Skip to content
Longterm Wiki

Toby Ord

toby-ordpersonPath: /knowledge-base/people/toby-ord/
E355Entity ID (EID)
← Back to page19 backlinksQuality: 41Updated: 2026-01-29
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "toby-ord",
  "wikiId": "E355",
  "path": "/knowledge-base/people/toby-ord/",
  "filePath": "knowledge-base/people/toby-ord.mdx",
  "title": "Toby Ord",
  "quality": 41,
  "readerImportance": 26,
  "researchImportance": 11.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-01-29",
  "dateCreated": "2026-02-15",
  "summary": "Comprehensive biographical profile of Toby Ord documenting his 10% AI extinction estimate and role founding effective altruism, with detailed tables on risk assessments, academic background, and influence metrics. While thorough on his contributions, provides limited original analysis beyond summarizing publicly available information about his work and impact.",
  "description": "Oxford philosopher and author of 'The Precipice' who provided foundational quantitative estimates for existential risks (10% for AI, 1/6 total this century) and philosophical frameworks for long-term thinking that shaped modern AI risk discourse.",
  "ratings": {
    "novelty": 2,
    "rigor": 4.5,
    "completeness": 6,
    "actionability": 2
  },
  "category": "people",
  "subcategory": "ea-figures",
  "clusters": [
    "community",
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 2452,
    "tableCount": 19,
    "diagramCount": 0,
    "internalLinks": 40,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.16,
    "sectionCount": 47,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 2452,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 25,
  "backlinkCount": 19,
  "hallucinationRisk": {
    "level": "high",
    "score": 80,
    "factors": [
      "biographical-claims",
      "no-citations",
      "few-external-sources"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 15,
    "similarPages": [
      {
        "id": "holden-karnofsky",
        "title": "Holden Karnofsky",
        "path": "/knowledge-base/people/holden-karnofsky/",
        "similarity": 15
      },
      {
        "id": "ai-impacts",
        "title": "AI Impacts",
        "path": "/knowledge-base/organizations/ai-impacts/",
        "similarity": 13
      },
      {
        "id": "geoffrey-hinton",
        "title": "Geoffrey Hinton",
        "path": "/knowledge-base/people/geoffrey-hinton/",
        "similarity": 13
      },
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 12
      },
      {
        "id": "corrigibility-failure-pathways",
        "title": "Corrigibility Failure Pathways",
        "path": "/knowledge-base/models/corrigibility-failure-pathways/",
        "similarity": 12
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 10,
      "diagrams": 1,
      "internalLinks": 20,
      "externalLinks": 12,
      "footnotes": 7,
      "references": 7
    },
    "actuals": {
      "tables": 19,
      "diagrams": 0,
      "internalLinks": 40,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 21,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:2 R:4.5 A:2 C:6"
  },
  "readerRank": 480,
  "researchRank": 541,
  "recommendedScore": 108.41
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/toby-ord",
  "wikidata": "https://www.wikidata.org/wiki/Q7811863",
  "grokipedia": "https://grokipedia.com/page/Toby_Ord"
}
Backlinks (19)
idtitletyperelationship
fhiFuture of Humanity Instituteorganizationresearch
giving-what-we-canGiving What We Canorganization
holden-karnofskyHolden Karnofskyperson
case-for-xriskThe Case FOR AI Existential Riskargument
is-ai-xrisk-realIs AI Existential Risk Real?crux
ea-longtermist-wins-lossesEA and Longtermist Wins and Lossesconcept
earning-to-giveEarning to Give: The EA Strategy and Its Limitsconcept
longtermism-credibility-after-ftxLongtermism's Philosophical Credibility After FTXconcept
longtermist-value-comparisonsRelative Longtermist Value Comparisonsanalysis
ceaCentre for Effective Altruismorganization
__index__/knowledge-base/peoplePeopleconcept
nick-becksteadNick Becksteadperson
nick-bostromNick Bostromperson
will-macaskillWill MacAskillperson
governance-policyAI Governance and Policycrux
bioweaponsBioweaponsrisk
existential-riskExistential Risk from AIconcept
irreversibilityAI-Induced Irreversibilityrisk
lock-inAI Value Lock-inrisk
Longterm Wiki