Skip to content
Longterm Wiki

China AI Regulations

china-ai-regulationspolicyPath: /knowledge-base/responses/china-ai-regulations/
E58Entity ID (EID)
← Back to page2 backlinksQuality: 57Updated: 2026-02-11
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "china-ai-regulations",
  "wikiId": "E58",
  "path": "/knowledge-base/responses/china-ai-regulations/",
  "filePath": "knowledge-base/responses/china-ai-regulations.mdx",
  "title": "China AI Regulations",
  "quality": 57,
  "readerImportance": 71.5,
  "researchImportance": 37,
  "tacticalValue": null,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-02-11",
  "dateCreated": "2026-02-15",
  "summary": "Comprehensive analysis of China's AI regulatory framework covering 5+ major regulations affecting 50,000+ companies, with enforcement focusing on content control and social stability rather than capability restrictions. Documents China's emerging AI safety engagement through CnAISDA launch in February 2025 and growing international cooperation despite strategic competition barriers.",
  "description": "Comprehensive analysis of China's iterative, sector-specific AI regulatory framework, covering 5+ major regulations affecting 50,000+ companies, with enforcement focusing on content control and social stability rather than capability restrictions.",
  "ratings": {
    "focus": 7.5,
    "novelty": 4.2,
    "rigor": 5.8,
    "completeness": 8,
    "concreteness": 6.5,
    "actionability": 4.5,
    "objectivity": 4
  },
  "category": "responses",
  "subcategory": "legislation",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 3254,
    "tableCount": 6,
    "diagramCount": 1,
    "internalLinks": 1,
    "externalLinks": 61,
    "footnoteCount": 0,
    "bulletRatio": 0.09,
    "sectionCount": 36,
    "hasOverview": true,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": 7,
  "evergreen": true,
  "wordCount": 3254,
  "unconvertedLinks": [
    {
      "text": "Geneva talks in May 2024",
      "url": "https://carnegieendowment.org/research/2024/08/china-artificial-intelligence-ai-safety-regulation",
      "resourceId": "d0e36601100c356d",
      "resourceTitle": "Carnegie Endowment analysis"
    },
    {
      "text": "CnAISDA launched February 2025",
      "url": "https://carnegieendowment.org/research/2025/06/how-some-of-chinas-top-ai-thinkers-built-their-own-ai-safety-institute?lang=en",
      "resourceId": "0f17105b7e24c08a",
      "resourceTitle": "CnAISDA launched February 2025"
    },
    {
      "text": "China's self-described counterpart to AI safety institutes",
      "url": "https://carnegieendowment.org/research/2025/06/how-some-of-chinas-top-ai-thinkers-built-their-own-ai-safety-institute?lang=en",
      "resourceId": "0f17105b7e24c08a",
      "resourceTitle": "CnAISDA launched February 2025"
    },
    {
      "text": "limited public engagement with catastrophic AI risks or existential threats",
      "url": "https://carnegieendowment.org/research/2024/08/china-artificial-intelligence-ai-safety-regulation",
      "resourceId": "d0e36601100c356d",
      "resourceTitle": "Carnegie Endowment analysis"
    },
    {
      "text": "significant barriers to international coordination",
      "url": "https://carnegieendowment.org/research/2024/08/china-artificial-intelligence-ai-safety-regulation",
      "resourceId": "d0e36601100c356d",
      "resourceTitle": "Carnegie Endowment analysis"
    },
    {
      "text": "Strategic competition and trust deficits",
      "url": "https://www.rand.org/pubs/research_reports/RRA2680-1.html",
      "resourceId": "0532c540957038e6",
      "resourceTitle": "Why AI Projects Fail and How They Can Succeed"
    },
    {
      "text": "Emerging (CnAISDA 2025)",
      "url": "https://carnegieendowment.org/research/2025/06/how-some-of-chinas-top-ai-thinkers-built-their-own-ai-safety-institute?lang=en",
      "resourceId": "0f17105b7e24c08a",
      "resourceTitle": "CnAISDA launched February 2025"
    },
    {
      "text": "November 2024 Biden-Xi agreement",
      "url": "https://www.sandia.gov/app/uploads/sites/148/2025/04/Challenges-and-Opportunities-for-US-China-Collaboration-on-Artificial-Intelligence-Governance.pdf",
      "resourceId": "331246d11298126e",
      "resourceTitle": "Sandia National Labs: US-China AI Collaboration Challenges"
    },
    {
      "text": "UN General Assembly resolution 'Enhancing International Cooperation on Capacity-building of Artificial Intelligence'",
      "url": "https://www.techpolicy.press/from-competition-to-cooperation-can-uschina-engagement-overcome-geopolitical-barriers-in-ai-governance/",
      "resourceId": "8de95bad7d533f03",
      "resourceTitle": "called for explicit US-China collaboration"
    },
    {
      "text": "Ramping up rapidly",
      "url": "https://carnegieendowment.org/research/2024/08/china-artificial-intelligence-ai-safety-regulation",
      "resourceId": "d0e36601100c356d",
      "resourceTitle": "Carnegie Endowment analysis"
    },
    {
      "text": "Chinese scientists have been ramping up technical research",
      "url": "https://carnegieendowment.org/research/2024/08/china-artificial-intelligence-ai-safety-regulation",
      "resourceId": "d0e36601100c356d",
      "resourceTitle": "Carnegie Endowment analysis"
    },
    {
      "text": "relatively little safety work has been published by China's leading AI companies",
      "url": "https://carnegieendowment.org/research/2024/08/china-artificial-intelligence-ai-safety-regulation",
      "resourceId": "d0e36601100c356d",
      "resourceTitle": "Carnegie Endowment analysis"
    },
    {
      "text": "decentralized network",
      "url": "https://carnegieendowment.org/research/2025/06/how-some-of-chinas-top-ai-thinkers-built-their-own-ai-safety-institute?lang=en",
      "resourceId": "0f17105b7e24c08a",
      "resourceTitle": "CnAISDA launched February 2025"
    },
    {
      "text": "TC260 National Information Security Standardization",
      "url": "https://carnegieendowment.org/research/2024/08/china-artificial-intelligence-ai-safety-regulation",
      "resourceId": "d0e36601100c356d",
      "resourceTitle": "Carnegie Endowment analysis"
    },
    {
      "text": "17 companies sign AI Safety Commitments",
      "url": "https://carnegieendowment.org/research/2025/06/how-some-of-chinas-top-ai-thinkers-built-their-own-ai-safety-institute?lang=en",
      "resourceId": "0f17105b7e24c08a",
      "resourceTitle": "CnAISDA launched February 2025"
    },
    {
      "text": "CnAISDA launched",
      "url": "https://carnegieendowment.org/research/2025/06/how-some-of-chinas-top-ai-thinkers-built-their-own-ai-safety-institute?lang=en",
      "resourceId": "0f17105b7e24c08a",
      "resourceTitle": "CnAISDA launched February 2025"
    },
    {
      "text": "Trump administration's uncertain position",
      "url": "https://www.sandia.gov/app/uploads/sites/148/2025/04/Challenges-and-Opportunities-for-US-China-Collaboration-on-Artificial-Intelligence-Governance.pdf",
      "resourceId": "331246d11298126e",
      "resourceTitle": "Sandia National Labs: US-China AI Collaboration Challenges"
    },
    {
      "text": "How Some of China's Top AI Thinkers Built Their Own AI Safety Institute",
      "url": "https://carnegieendowment.org/research/2025/06/how-some-of-chinas-top-ai-thinkers-built-their-own-ai-safety-institute?lang=en",
      "resourceId": "0f17105b7e24c08a",
      "resourceTitle": "CnAISDA launched February 2025"
    },
    {
      "text": "Challenges and Opportunities for US-China AI Collaboration",
      "url": "https://www.sandia.gov/app/uploads/sites/148/2025/04/Challenges-and-Opportunities-for-US-China-Collaboration-on-Artificial-Intelligence-Governance.pdf",
      "resourceId": "331246d11298126e",
      "resourceTitle": "Sandia National Labs: US-China AI Collaboration Challenges"
    },
    {
      "text": "From Competition to Cooperation: US-China AI Governance",
      "url": "https://www.techpolicy.press/from-competition-to-cooperation-can-uschina-engagement-overcome-geopolitical-barriers-in-ai-governance/",
      "resourceId": "8de95bad7d533f03",
      "resourceTitle": "called for explicit US-China collaboration"
    }
  ],
  "unconvertedLinkCount": 20,
  "convertedLinkCount": 0,
  "backlinkCount": 2,
  "hallucinationRisk": {
    "level": "medium",
    "score": 55,
    "factors": [
      "no-citations"
    ]
  },
  "entityType": "policy",
  "redundancy": {
    "maxSimilarity": 21,
    "similarPages": [
      {
        "id": "us-aisi",
        "title": "US AI Safety Institute (now CAISI)",
        "path": "/knowledge-base/organizations/us-aisi/",
        "similarity": 21
      },
      {
        "id": "standards-bodies",
        "title": "AI Standards Bodies",
        "path": "/knowledge-base/responses/standards-bodies/",
        "similarity": 21
      },
      {
        "id": "structural-risks",
        "title": "AI Structural Risk Cruxes",
        "path": "/knowledge-base/cruxes/structural-risks/",
        "similarity": 20
      },
      {
        "id": "coordination-mechanisms",
        "title": "International Coordination Mechanisms",
        "path": "/knowledge-base/responses/coordination-mechanisms/",
        "similarity": 20
      },
      {
        "id": "failed-stalled-proposals",
        "title": "Failed and Stalled AI Policy Proposals",
        "path": "/knowledge-base/responses/failed-stalled-proposals/",
        "similarity": 20
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 13,
      "diagrams": 1,
      "internalLinks": 26,
      "externalLinks": 16,
      "footnotes": 10,
      "references": 10
    },
    "actuals": {
      "tables": 6,
      "diagrams": 1,
      "internalLinks": 1,
      "externalLinks": 61,
      "footnotes": 0,
      "references": 30,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "amber",
      "diagrams": "green",
      "internalLinks": "amber",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4.2 R:5.8 A:4.5 C:8"
  },
  "readerRank": 155,
  "researchRank": 364,
  "recommendedScore": 164.59
}
External Links

No external links

Backlinks (2)
idtitletyperelationship
international-summitsInternational AI Safety Summit Seriesevent
governance-overviewAI Governance & Policy (Overview)concept
Longterm Wiki