Skip to content
Longterm Wiki

Existential Risk from AI

existential-riskconceptPath: /knowledge-base/risks/existential-risk/
E131Entity ID (EID)
← Back to page7 backlinksQuality: 92Updated: 2026-03-14
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "existential-risk",
  "wikiId": "E131",
  "path": "/knowledge-base/risks/existential-risk/",
  "filePath": "knowledge-base/risks/existential-risk.mdx",
  "title": "Existential Risk from AI",
  "quality": 92,
  "readerImportance": 95,
  "researchImportance": 18.5,
  "tacticalValue": 45,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-03-14",
  "dateCreated": "2026-02-17",
  "summary": null,
  "description": "Hypotheses concerning risks from advanced AI systems that some researchers believe could result in human extinction or permanent global catastrophe, including institutional frameworks developed by frontier labs to address these risks",
  "ratings": null,
  "category": "risks",
  "subcategory": "accident",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 4050,
    "tableCount": 1,
    "diagramCount": 0,
    "internalLinks": 60,
    "externalLinks": 0,
    "footnoteCount": 51,
    "bulletRatio": 0.05,
    "sectionCount": 21,
    "hasOverview": false,
    "structuralScore": 12
  },
  "suggestedQuality": 80,
  "updateFrequency": 180,
  "evergreen": true,
  "wordCount": 4050,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 0,
  "backlinkCount": 7,
  "hallucinationRisk": {
    "level": "low",
    "score": 15,
    "factors": [
      "few-external-sources",
      "well-cited",
      "conceptual-content",
      "high-quality"
    ]
  },
  "entityType": "concept",
  "redundancy": {
    "maxSimilarity": 21,
    "similarPages": [
      {
        "id": "is-ai-xrisk-real",
        "title": "Is AI Existential Risk Real?",
        "path": "/knowledge-base/debates/is-ai-xrisk-real/",
        "similarity": 21
      },
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 20
      },
      {
        "id": "ai-timelines",
        "title": "AI Timelines",
        "path": "/knowledge-base/models/ai-timelines/",
        "similarity": 19
      },
      {
        "id": "deep-learning-era",
        "title": "Deep Learning Revolution (2012-2020)",
        "path": "/knowledge-base/history/deep-learning-era/",
        "similarity": 18
      },
      {
        "id": "research-agendas",
        "title": "AI Alignment Research Agenda Comparison",
        "path": "/knowledge-base/responses/research-agendas/",
        "similarity": 18
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-03-14",
      "branch": "auto-update/2026-03-14",
      "title": "Auto-improve (standard): Existential Risk from AI",
      "summary": "Improved \"Existential Risk from AI\" via standard pipeline (1087.0s). Quality score: 82. Issues resolved: Bare URL in footnote rc-3b8d: no URL provided but acceptable; Bare URL in footnote rc-166b: URL appears as raw text 'https; Bare URL in footnote rc-d541: raw URL not wrapped in markdow.",
      "duration": "1087.0s",
      "cost": "$5-8"
    },
    {
      "date": "2026-03-10",
      "branch": "auto-update/2026-03-10",
      "title": "Auto-improve (standard): Existential Risk from AI",
      "summary": "Improved \"Existential Risk from AI\" via standard pipeline (1302.5s). Quality score: 88. Issues resolved: Footnote [^rc-2f55] cites Birhane et al. (2022) FAccT paper ; Footnote [^rc-f540] attributes 'offense-defense balance' pap; EntityLink id='E26' for 'arc-evals' and EntityLink id='E25' .",
      "duration": "1302.5s",
      "cost": "$5-8"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/fix-issue-240-N5irU",
      "title": "Surface tacticalValue in /wiki table and score 53 pages",
      "summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
      "model": "sonnet-4",
      "duration": "~30min"
    },
    {
      "date": "2026-02-17",
      "branch": "claude/top-priority-update-WurDM",
      "title": "Improve top 5 foundational wiki pages",
      "summary": "Improved the 5 highest-importance, lowest-quality wiki pages using the Crux content pipeline. All were stubs (7 words) or had quality=0 and are now comprehensive articles with citations, EntityLinks, and balanced perspectives.",
      "pr": 188
    }
  ],
  "coverage": {
    "passing": 5,
    "total": 13,
    "targets": {
      "tables": 16,
      "diagrams": 2,
      "internalLinks": 32,
      "externalLinks": 20,
      "footnotes": 12,
      "references": 12
    },
    "actuals": {
      "tables": 1,
      "diagrams": 0,
      "internalLinks": 60,
      "externalLinks": 0,
      "footnotes": 51,
      "references": 0,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "red",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "red",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "green",
      "references": "red",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 4
  },
  "readerRank": 2,
  "researchRank": 499,
  "recommendedScore": 250.12
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/existential-risk",
  "eaForum": "https://forum.effectivealtruism.org/topics/existential-risk",
  "stampy": "https://aisafety.info/questions/8mTg/What-is-existential-risk",
  "wikidata": "https://www.wikidata.org/wiki/Q16830153",
  "eightyK": "https://80000hours.org/articles/existential-risks/"
}
Backlinks (7)
idtitletyperelationship
caisCenter for AI Safety (CAIS)organization
fhiFuture of Humanity Instituteorganization
early-warningsEarly Warnings (1950s-2000)historical
miri-eraThe MIRI Era (2000-2015)historical
longtermist-value-comparisonsRelative Longtermist Value Comparisonsanalysis
xaixAIorganization
epistemic-systemic-riskEpistemic Systemic Riskrisk
Longterm Wiki