Toby Ord
toby-ordpersonPath: /knowledge-base/people/toby-ord/
E355Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "toby-ord",
"wikiId": "E355",
"path": "/knowledge-base/people/toby-ord/",
"filePath": "knowledge-base/people/toby-ord.mdx",
"title": "Toby Ord",
"quality": 41,
"readerImportance": 26,
"researchImportance": 11.5,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-01-29",
"dateCreated": "2026-02-15",
"summary": "Comprehensive biographical profile of Toby Ord documenting his 10% AI extinction estimate and role founding effective altruism, with detailed tables on risk assessments, academic background, and influence metrics. While thorough on his contributions, provides limited original analysis beyond summarizing publicly available information about his work and impact.",
"description": "Oxford philosopher and author of 'The Precipice' who provided foundational quantitative estimates for existential risks (10% for AI, 1/6 total this century) and philosophical frameworks for long-term thinking that shaped modern AI risk discourse.",
"ratings": {
"novelty": 2,
"rigor": 4.5,
"completeness": 6,
"actionability": 2
},
"category": "people",
"subcategory": "ea-figures",
"clusters": [
"community",
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 2452,
"tableCount": 19,
"diagramCount": 0,
"internalLinks": 40,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.16,
"sectionCount": 47,
"hasOverview": true,
"structuralScore": 11
},
"suggestedQuality": 73,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 2452,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 25,
"backlinkCount": 19,
"hallucinationRisk": {
"level": "high",
"score": 80,
"factors": [
"biographical-claims",
"no-citations",
"few-external-sources"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 15,
"similarPages": [
{
"id": "holden-karnofsky",
"title": "Holden Karnofsky",
"path": "/knowledge-base/people/holden-karnofsky/",
"similarity": 15
},
{
"id": "ai-impacts",
"title": "AI Impacts",
"path": "/knowledge-base/organizations/ai-impacts/",
"similarity": 13
},
{
"id": "geoffrey-hinton",
"title": "Geoffrey Hinton",
"path": "/knowledge-base/people/geoffrey-hinton/",
"similarity": 13
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 12
},
{
"id": "corrigibility-failure-pathways",
"title": "Corrigibility Failure Pathways",
"path": "/knowledge-base/models/corrigibility-failure-pathways/",
"similarity": 12
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 10,
"diagrams": 1,
"internalLinks": 20,
"externalLinks": 12,
"footnotes": 7,
"references": 7
},
"actuals": {
"tables": 19,
"diagrams": 0,
"internalLinks": 40,
"externalLinks": 0,
"footnotes": 0,
"references": 21,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2 R:4.5 A:2 C:6"
},
"readerRank": 480,
"researchRank": 541,
"recommendedScore": 108.41
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/toby-ord",
"wikidata": "https://www.wikidata.org/wiki/Q7811863",
"grokipedia": "https://grokipedia.com/page/Toby_Ord"
}Backlinks (19)
| id | title | type | relationship |
|---|---|---|---|
| fhi | Future of Humanity Institute | organization | research |
| giving-what-we-can | Giving What We Can | organization | — |
| holden-karnofsky | Holden Karnofsky | person | — |
| case-for-xrisk | The Case FOR AI Existential Risk | argument | — |
| is-ai-xrisk-real | Is AI Existential Risk Real? | crux | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| earning-to-give | Earning to Give: The EA Strategy and Its Limits | concept | — |
| longtermism-credibility-after-ftx | Longtermism's Philosophical Credibility After FTX | concept | — |
| longtermist-value-comparisons | Relative Longtermist Value Comparisons | analysis | — |
| cea | Centre for Effective Altruism | organization | — |
| __index__/knowledge-base/people | People | concept | — |
| nick-beckstead | Nick Beckstead | person | — |
| nick-bostrom | Nick Bostrom | person | — |
| will-macaskill | Will MacAskill | person | — |
| governance-policy | AI Governance and Policy | crux | — |
| bioweapons | Bioweapons | risk | — |
| existential-risk | Existential Risk from AI | concept | — |
| irreversibility | AI-Induced Irreversibility | risk | — |
| lock-in | AI Value Lock-in | risk | — |