Eliezer Yudkowsky
eliezer-yudkowskypersonPath: /knowledge-base/people/eliezer-yudkowsky/
E114Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "eliezer-yudkowsky",
"wikiId": "E114",
"path": "/knowledge-base/people/eliezer-yudkowsky/",
"filePath": "knowledge-base/people/eliezer-yudkowsky.mdx",
"title": "Eliezer Yudkowsky",
"quality": 35,
"readerImportance": 82,
"researchImportance": 12,
"tacticalValue": 75,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-02-23",
"dateCreated": "2026-02-15",
"summary": "Comprehensive biographical profile of Eliezer Yudkowsky covering his foundational contributions to AI safety (CEV, early problem formulation, agent foundations) and notably pessimistic views on AI risk. Includes detailed 'Statements & Track Record' section analyzing his mixed prediction accuracy—noting early timeline errors, his position on AI generalization in the Hanson debate, and the unfalsifiability of his core doom predictions.",
"description": "Co-founder of MIRI, early AI safety researcher and rationalist community founder",
"ratings": {
"novelty": 3,
"rigor": 4,
"completeness": 6.5,
"actionability": 2
},
"category": "people",
"subcategory": "safety-researchers",
"clusters": [
"ai-safety"
],
"metrics": {
"wordCount": 3237,
"tableCount": 3,
"diagramCount": 0,
"internalLinks": 30,
"externalLinks": 5,
"footnoteCount": 26,
"bulletRatio": 0.25,
"sectionCount": 29,
"hasOverview": true,
"structuralScore": 14
},
"suggestedQuality": 93,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 3237,
"unconvertedLinks": [
{
"text": "en.wikipedia.org",
"url": "https://en.wikipedia.org/wiki/Eliezer_Yudkowsky",
"resourceId": "d8d60a1c46155a15",
"resourceTitle": "Eliezer Yudkowsky - Wikipedia"
},
{
"text": "[PDF",
"url": "https://intelligence.org/files/IEM.pdf",
"resourceId": "a1186c87f23ab9ce",
"resourceTitle": "Intelligence Explosion Microeconomics"
}
],
"unconvertedLinkCount": 2,
"convertedLinkCount": 0,
"backlinkCount": 58,
"citationHealth": {
"total": 10,
"withQuotes": 9,
"verified": 9,
"accuracyChecked": 9,
"accurate": 5,
"inaccurate": 0,
"avgScore": 0.8912713891930051
},
"hallucinationRisk": {
"level": "medium",
"score": 50,
"factors": [
"biographical-claims",
"low-quality-score",
"well-cited"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 22,
"similarPages": [
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 22
},
{
"id": "is-ai-xrisk-real",
"title": "Is AI Existential Risk Real?",
"path": "/knowledge-base/debates/is-ai-xrisk-real/",
"similarity": 17
},
{
"id": "deep-learning-era",
"title": "Deep Learning Revolution (2012-2020)",
"path": "/knowledge-base/history/deep-learning-era/",
"similarity": 17
},
{
"id": "early-warnings",
"title": "Early Warnings (1950s-2000)",
"path": "/knowledge-base/history/early-warnings/",
"similarity": 17
},
{
"id": "ai-timelines",
"title": "AI Timelines",
"path": "/knowledge-base/models/ai-timelines/",
"similarity": 17
}
]
},
"changeHistory": [
{
"date": "2026-02-24",
"branch": "feat/stale-fact-detection-581-582",
"title": "Batch content fixes + stale-facts validator + 2 new validation rules",
"summary": "(fill in)",
"pr": 924,
"model": "claude-sonnet-4-6"
},
{
"date": "2026-02-23",
"branch": "feat/batch-improve-high-risk-pages",
"title": "Auto-improve (standard): Eliezer Yudkowsky",
"summary": "Improved \"Eliezer Yudkowsky\" via standard pipeline (1505.9s).",
"duration": "1505.9s",
"cost": "$5-8"
},
{
"date": "2026-02-18",
"branch": "claude/fix-issue-240-N5irU",
"title": "Surface tacticalValue in /wiki table and score 53 pages",
"summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
"model": "sonnet-4",
"duration": "~30min"
}
],
"coverage": {
"passing": 9,
"total": 13,
"targets": {
"tables": 13,
"diagrams": 1,
"internalLinks": 26,
"externalLinks": 16,
"footnotes": 10,
"references": 10
},
"actuals": {
"tables": 3,
"diagrams": 0,
"internalLinks": 30,
"externalLinks": 5,
"footnotes": 26,
"references": 5,
"quotesWithQuotes": 9,
"quotesTotal": 10,
"accuracyChecked": 9,
"accuracyTotal": 10
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "amber",
"footnotes": "green",
"references": "amber",
"quotes": "green",
"accuracy": "green"
},
"editHistoryCount": 3,
"ratingsString": "N:3 R:4 A:2 C:6.5"
},
"readerRank": 72,
"researchRank": 535,
"recommendedScore": 127.19
}External Links
{
"wikipedia": "https://en.wikipedia.org/wiki/Eliezer_Yudkowsky",
"lesswrong": "https://www.lesswrong.com/tag/eliezer-yudkowsky",
"wikidata": "https://www.wikidata.org/wiki/Q704195",
"grokipedia": "https://grokipedia.com/page/Eliezer_Yudkowsky"
}Backlinks (58)
| id | title | type | relationship |
|---|---|---|---|
| miri | Machine Intelligence Research Institute (MIRI) | organization | — |
| miri | Machine Intelligence Research Institute (MIRI) | organization | — |
| lesswrong | LessWrong | organization | leads-to |
| paul-christiano | Paul Christiano | person | — |
| corrigibility | Corrigibility | research-area | research |
| self-improvement | Self-Improvement and Recursive Enhancement | capability | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| case-against-xrisk | The Case AGAINST AI Existential Risk | argument | — |
| case-for-xrisk | The Case FOR AI Existential Risk | argument | — |
| is-ai-xrisk-real | Is AI Existential Risk Real? | crux | — |
| open-vs-closed | Open vs Closed Source AI | crux | — |
| pause-debate | Should We Pause AI Development? | crux | — |
| why-alignment-hard | Why Alignment Might Be Hard | argument | — |
| __index__/knowledge-base/history | History | concept | — |
| miri-era | The MIRI Era (2000-2015) | historical | — |
| ai-timelines | AI Timelines | concept | — |
| power-seeking-conditions | Power-Seeking Emergence Conditions Model | analysis | — |
| worldview-intervention-mapping | Worldview-Intervention Mapping | analysis | — |
| center-for-applied-rationality | Center for Applied Rationality | organization | — |
| community-building-overview | Community Building Organizations (Overview) | concept | — |
| fli | Future of Life Institute (FLI) | organization | — |
| manifold | Manifold (Prediction Market) | organization | — |
| manifund | Manifund | organization | — |
| pause-ai | Pause AI | organization | — |
| peter-thiel-philanthropy | Peter Thiel (Funder) | organization | — |
| sff | Survival and Flourishing Fund (SFF) | organization | — |
| the-sequences | The Sequences by Eliezer Yudkowsky | organization | — |
| tsmc | TSMC | organization | — |
| connor-leahy | Connor Leahy | person | — |
| dario-amodei | Dario Amodei | person | — |
| eliezer-yudkowsky-predictions | Eliezer Yudkowsky: Track Record | concept | — |
| elon-musk | Elon Musk | person | — |
| evan-hubinger | Evan Hubinger | person | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| greg-brockman | Greg Brockman | person | — |
| holden-karnofsky | Holden Karnofsky | person | — |
| __index__/knowledge-base/people | People | concept | — |
| jaan-tallinn | Jaan Tallinn | person | — |
| robin-hanson | Robin Hanson | person | — |
| yann-lecun | Yann LeCun | person | — |
| yoshua-bengio | Yoshua Bengio | person | — |
| ai-non-extremization-coordination | AI Non-Extremization Coordination | approach | — |
| alignment | AI Alignment | approach | — |
| constitutional-ai | Constitutional AI | approach | — |
| corporate | Corporate AI Safety Responses | approach | — |
| research-agendas | AI Alignment Research Agenda Comparison | crux | — |
| stampy-aisafety-info | Stampy / AISafety.info | project | — |
| timelines-wiki | Timelines Wiki | project | — |
| corrigibility-failure | Corrigibility Failure | risk | — |
| existential-risk | Existential Risk from AI | concept | — |
| instrumental-convergence | Instrumental Convergence | risk | — |
| lock-in | AI Value Lock-in | risk | — |
| sharp-left-turn | Sharp Left Turn | risk | — |
| superintelligence | Superintelligence | concept | — |
| doomer | AI Doomer Worldview | concept | — |
| __index__/knowledge-base/worldviews | Worldviews | concept | — |
| optimistic | Optimistic Alignment Worldview | concept | — |
| similar-projects | Similar Projects to LongtermWiki: Research Report | concept | — |