The Case FOR AI Existential Risk
case-for-xriskargumentPath: /knowledge-base/debates/case-for-xrisk/
E56Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "case-for-xrisk",
"wikiId": "E56",
"path": "/knowledge-base/debates/case-for-xrisk/",
"filePath": "knowledge-base/debates/case-for-xrisk.mdx",
"title": "The Case FOR AI Existential Risk",
"quality": 66,
"readerImportance": 52.5,
"researchImportance": 81,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-01-29",
"dateCreated": "2026-02-15",
"summary": "Comprehensive formal argument that AI poses 5-14% median extinction risk by 2100 (per 2,788 researcher survey), structured around four premises: capabilities will advance, alignment is hard (with documented reward hacking and sleeper agent persistence), misaligned AI is dangerous (via instrumental convergence), and alignment funding (\\$180-200M/year) lags capabilities investment (\\$100B+/year) by 200-500x. The argument synthesizes theoretical foundations (orthogonality thesis, instrumental convergence) with empirical evidence (Anthropic's sleeper agents, specification gaming) to conclude significant x-risk probability.",
"description": "The strongest formal argument that AI poses existential risk to humanity. Expert surveys find median extinction probability of 5-14% by 2100, with Geoffrey Hinton estimating 10-20% within 30 years.",
"ratings": {
"novelty": 4.5,
"rigor": 6.8,
"completeness": 7.5,
"actionability": 5.2
},
"category": "debates",
"subcategory": "formal-arguments",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 6590,
"tableCount": 12,
"diagramCount": 1,
"internalLinks": 59,
"externalLinks": 15,
"footnoteCount": 0,
"bulletRatio": 0.46,
"sectionCount": 47,
"hasOverview": false,
"structuralScore": 13
},
"suggestedQuality": 87,
"updateFrequency": 90,
"evergreen": true,
"wordCount": 6590,
"unconvertedLinks": [
{
"text": "AI Impacts 2023",
"url": "https://wiki.aiimpacts.org/ai_timelines/predictions_of_human-level_ai_timelines/ai_timeline_surveys/2023_expert_survey_on_progress_in_ai",
"resourceId": "b4342da2ca0d2721",
"resourceTitle": "AI Impacts 2023 survey"
},
{
"text": "late 2026/early 2027",
"url": "https://blog.redwoodresearch.org/p/whats-up-with-anthropic-predicting",
"resourceId": "03c0d7873d860ee3",
"resourceTitle": "What's up with Anthropic predicting AGI by early 2027?"
},
{
"text": "power-seeking as optimal policy",
"url": "https://arxiv.org/abs/1912.01683",
"resourceId": "a93d9acd21819d62",
"resourceTitle": "Turner et al. formal results"
},
{
"text": "AI Safety Clock",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "Geoffrey Hinton (2025)",
"url": "https://en.wikipedia.org/wiki/Existential_risk_from_artificial_intelligence",
"resourceId": "9f9f0a463013941f",
"resourceTitle": "2023 AI researcher survey"
},
{
"text": "Shane Legg (2025)",
"url": "https://en.wikipedia.org/wiki/P(doom",
"resourceId": "ffb7dcedaa0a8711",
"resourceTitle": "Survey of AI researchers"
},
{
"text": "2025 survey",
"url": "https://arxiv.org/html/2502.14870v1",
"resourceId": "4e7f0e37bace9678",
"resourceTitle": "Roman Yampolskiy"
},
{
"text": "Anthropic (March 2025)",
"url": "https://blog.redwoodresearch.org/p/whats-up-with-anthropic-predicting",
"resourceId": "03c0d7873d860ee3",
"resourceTitle": "What's up with Anthropic predicting AGI by early 2027?"
},
{
"text": "Research Report (Aug 2025)",
"url": "https://research.aimultiple.com/artificial-general-intelligence-singularity-timing/",
"resourceId": "2f2cf65315f48c6b",
"resourceTitle": "AGI/Singularity Timing: 9,800 Predictions Analyzed"
},
{
"text": "Median of 8,590 predictions",
"url": "https://research.aimultiple.com/artificial-general-intelligence-singularity-timing/",
"resourceId": "2f2cf65315f48c6b",
"resourceTitle": "AGI/Singularity Timing: 9,800 Predictions Analyzed"
},
{
"text": "Polymarket (Jan 2026)",
"url": "https://research.aimultiple.com/artificial-general-intelligence-singularity-timing/",
"resourceId": "2f2cf65315f48c6b",
"resourceTitle": "AGI/Singularity Timing: 9,800 Predictions Analyzed"
},
{
"text": "2025 AI Safety Index",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
}
],
"unconvertedLinkCount": 12,
"convertedLinkCount": 37,
"backlinkCount": 3,
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"conceptual-content"
]
},
"entityType": "argument",
"redundancy": {
"maxSimilarity": 23,
"similarPages": [
{
"id": "case-against-xrisk",
"title": "The Case AGAINST AI Existential Risk",
"path": "/knowledge-base/debates/case-against-xrisk/",
"similarity": 23
},
{
"id": "why-alignment-easy",
"title": "Why Alignment Might Be Easy",
"path": "/knowledge-base/debates/why-alignment-easy/",
"similarity": 22
},
{
"id": "why-alignment-hard",
"title": "Why Alignment Might Be Hard",
"path": "/knowledge-base/debates/why-alignment-hard/",
"similarity": 22
},
{
"id": "accident-risks",
"title": "AI Accident Risk Cruxes",
"path": "/knowledge-base/cruxes/accident-risks/",
"similarity": 20
},
{
"id": "is-ai-xrisk-real",
"title": "Is AI Existential Risk Real?",
"path": "/knowledge-base/debates/is-ai-xrisk-real/",
"similarity": 20
}
]
},
"coverage": {
"passing": 5,
"total": 13,
"targets": {
"tables": 26,
"diagrams": 3,
"internalLinks": 53,
"externalLinks": 33,
"footnotes": 20,
"references": 20
},
"actuals": {
"tables": 12,
"diagrams": 1,
"internalLinks": 59,
"externalLinks": 15,
"footnotes": 0,
"references": 26,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "red",
"tables": "amber",
"diagrams": "amber",
"internalLinks": "green",
"externalLinks": "amber",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.5 R:6.8 A:5.2 C:7.5"
},
"readerRank": 274,
"researchRank": 81,
"recommendedScore": 171.77
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/existential-risk",
"eaForum": "https://forum.effectivealtruism.org/topics/existential-risk"
}Backlinks (3)
| id | title | type | relationship |
|---|---|---|---|
| __index__/knowledge-base/debates | Key Debates | concept | — |
| why-alignment-easy | Why Alignment Might Be Easy | argument | — |
| trump-ai-preemption-eo | EO: Ensuring a National Policy Framework for AI (State Preemption) | policy | — |