International AI Safety Summits
international-summitseventPath: /knowledge-base/responses/international-summits/
E173Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "international-summits",
"wikiId": "E173",
"path": "/knowledge-base/responses/international-summits/",
"filePath": "knowledge-base/responses/international-summits.mdx",
"title": "International AI Safety Summits",
"quality": 63,
"readerImportance": 66.5,
"researchImportance": 32,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-01-29",
"dateCreated": "2026-02-15",
"summary": "Three international AI safety summits (2023-2025) achieved first formal recognition of catastrophic AI risks from 28+ countries, established 10+ AI Safety Institutes with \\$100-400M combined budgets, and secured voluntary commitments from 16 companies covering ~80% of frontier AI development. However, all commitments remain non-binding with no enforcement mechanisms, and the coalition is fracturing (US/UK refused Paris 2025 declaration), with estimated 15-30% probability of binding frameworks by 2030.",
"description": "Global diplomatic initiatives bringing together 28+ countries and major AI companies to establish international coordination on AI safety.",
"ratings": {
"novelty": 5.2,
"rigor": 6.8,
"completeness": 7.5,
"actionability": 5.5
},
"category": "responses",
"subcategory": "international",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 4742,
"tableCount": 9,
"diagramCount": 1,
"internalLinks": 22,
"externalLinks": 12,
"footnoteCount": 0,
"bulletRatio": 0.07,
"sectionCount": 25,
"hasOverview": false,
"structuralScore": 14
},
"suggestedQuality": 93,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 4742,
"unconvertedLinks": [
{
"text": "Bletchley Park Summit",
"url": "https://www.gov.uk/government/topical-events/ai-safety-summit-2023",
"resourceId": "254bcdc7bfcdcd73",
"resourceTitle": "AI Safety Summit 2023"
},
{
"text": "Carnegie Endowment's analysis",
"url": "https://carnegieendowment.org/research/2024/10/the-ai-governance-arms-race-from-summit-pageantry-to-progress",
"resourceId": "a7f69bbad6cd82c0",
"resourceTitle": "Carnegie analysis warns"
},
{
"text": "European Policy Centre termed",
"url": "https://www.epc.eu/publication/The-Paris-Summit-Au-Revoir-global-AI-Safety-61ea68/",
"resourceId": "bffb6233e3238589",
"resourceTitle": "The Paris Summit: Au Revoir, global AI Safety?"
},
{
"text": "initially received only \\$10 million",
"url": "https://en.wikipedia.org/wiki/AI_Safety_Institute",
"resourceId": "89860462901f56f7",
"resourceTitle": "UK AI Safety Institute Wikipedia"
},
{
"text": "CSIS analysis of the AI Safety Institute International Network",
"url": "https://www.csis.org/analysis/ai-safety-institute-international-network-next-steps-and-recommendations",
"resourceId": "0572f91896f52377",
"resourceTitle": "The AI Safety Institute International Network: Next Steps"
},
{
"text": "Center for AI Safety Newsletter noted",
"url": "https://newsletter.safe.ai/p/ai-safety-newsletter-35-voluntary",
"resourceId": "2f90f810999eda1b",
"resourceTitle": "AI Safety Newsletter"
},
{
"text": "Max Tegmark of MIT and the Future of Life Institute",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
}
],
"unconvertedLinkCount": 7,
"convertedLinkCount": 13,
"backlinkCount": 6,
"hallucinationRisk": {
"level": "high",
"score": 70,
"factors": [
"specific-factual-claims",
"no-citations"
]
},
"entityType": "event",
"redundancy": {
"maxSimilarity": 25,
"similarPages": [
{
"id": "coordination-mechanisms",
"title": "International Coordination Mechanisms",
"path": "/knowledge-base/responses/coordination-mechanisms/",
"similarity": 25
},
{
"id": "international-regimes",
"title": "International Compute Regimes",
"path": "/knowledge-base/responses/international-regimes/",
"similarity": 24
},
{
"id": "us-aisi",
"title": "US AI Safety Institute (now CAISI)",
"path": "/knowledge-base/organizations/us-aisi/",
"similarity": 23
},
{
"id": "ai-safety-institutes",
"title": "AI Safety Institutes",
"path": "/knowledge-base/responses/ai-safety-institutes/",
"similarity": 23
},
{
"id": "voluntary-commitments",
"title": "Voluntary Industry Commitments",
"path": "/knowledge-base/responses/voluntary-commitments/",
"similarity": 23
}
]
},
"changeHistory": [
{
"date": "2026-02-16",
"branch": "claude/investigate-arxiv-paper-UmGPu",
"title": "Singapore Consensus on AI Safety",
"summary": "Investigated arXiv:2506.20702 (The Singapore Consensus on Global AI Safety Research Priorities) and integrated it into the wiki. Updated the international-summits page with a new SCAI section and Mermaid diagram, fixed the broken Singapore Consensus resource in web-other.yaml, updated Bengio/Russell/Tegmark pages with references, created a new dedicated singapore-consensus page with entity E694, and registered the entity in responses.yaml.",
"pr": 157
}
],
"coverage": {
"passing": 5,
"total": 13,
"targets": {
"tables": 19,
"diagrams": 2,
"internalLinks": 38,
"externalLinks": 24,
"footnotes": 14,
"references": 14
},
"actuals": {
"tables": 9,
"diagrams": 1,
"internalLinks": 22,
"externalLinks": 12,
"footnotes": 0,
"references": 20,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "red",
"tables": "amber",
"diagrams": "amber",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:5.2 R:6.8 A:5.5 C:7.5"
},
"readerRank": 184,
"researchRank": 401,
"recommendedScore": 172.77
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/ai-safety-summit"
}Backlinks (6)
| id | title | type | relationship |
|---|---|---|---|
| china-ai-regulations | China AI Regulatory Framework | policy | — |
| voluntary-commitments | Voluntary AI Safety Commitments | policy | — |
| singapore-consensus | Singapore Consensus on AI Safety Research Priorities | policy | — |
| carnegie-endowment | Carnegie Endowment for International Peace | organization | — |
| ai-control | AI Control | research-area | — |
| governance-overview | AI Governance & Policy (Overview) | concept | — |