Eliezer Yudkowsky: Track Record
eliezer-yudkowsky-predictionsPath: /knowledge-base/people/eliezer-yudkowsky-predictions/
E643Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "eliezer-yudkowsky-predictions",
"wikiId": "E643",
"path": "/knowledge-base/people/eliezer-yudkowsky-predictions/",
"filePath": "knowledge-base/people/eliezer-yudkowsky-predictions.mdx",
"title": "Eliezer Yudkowsky: Track Record",
"quality": 61,
"readerImportance": 26,
"researchImportance": 43,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-02-01",
"dateCreated": "2026-02-15",
"summary": "Comprehensive tracking of Eliezer Yudkowsky's predictions shows clear early errors (Singularity by 2021, nanotech timelines), vindication on AI generalization (2008 FOOM debate), and acknowledged updates on deep learning. Core doom predictions (99% p(doom)) remain unfalsifiable; IMO bet won against Christiano, but pattern shows overconfidence on capabilities timelines while maintaining extreme confidence on catastrophic outcomes.",
"description": "Documenting Eliezer Yudkowsky's AI predictions and claims - assessing accuracy, patterns of over/underconfidence, and epistemic track record",
"ratings": {
"focus": 8.5,
"novelty": 2.5,
"rigor": 6.5,
"completeness": 8,
"concreteness": 7.5,
"actionability": 1
},
"category": "people",
"subcategory": "track-records",
"clusters": [
"community",
"ai-safety"
],
"metrics": {
"wordCount": 4152,
"tableCount": 24,
"diagramCount": 0,
"internalLinks": 19,
"externalLinks": 45,
"footnoteCount": 0,
"bulletRatio": 0.28,
"sectionCount": 36,
"hasOverview": false,
"structuralScore": 13
},
"suggestedQuality": 87,
"updateFrequency": 90,
"evergreen": true,
"wordCount": 4152,
"unconvertedLinks": [
{
"text": "LessWrong",
"url": "https://www.lesswrong.com/posts/tcEFh3vPS6zEANTFZ/transcript-and-brief-response-to-twitter-conversation",
"resourceId": "68db44ed009d7b6d",
"resourceTitle": "Transcript and Brief Response to Twitter Conversation between Yann LeCunn and Eliezer Yudkowsky"
},
{
"text": "MIRI",
"url": "https://intelligence.org/ai-foom-debate/",
"resourceId": "bfb6662776fe5f08",
"resourceTitle": "The Hanson-Yudkowsky AI-Foom Debate"
},
{
"text": "LessWrong",
"url": "https://www.lesswrong.com/posts/gGSvwd62TJAxxhcGh/yudkowsky-vs-hanson-on-foom-whose-predictions-were-better",
"resourceId": "a9ebc8d14e5ef11b",
"resourceTitle": "Yudkowsky vs Hanson on FOOM: Whose Predictions Were Better?"
},
{
"text": "AI-FOOM Debate",
"url": "https://intelligence.org/ai-foom-debate/",
"resourceId": "bfb6662776fe5f08",
"resourceTitle": "The Hanson-Yudkowsky AI-Foom Debate"
},
{
"text": "LessWrong",
"url": "https://www.lesswrong.com/posts/sWLLdG6DWJEy3CH7n/imo-challenge-bet-with-eliezer",
"resourceId": "608dd3d09eb51892",
"resourceTitle": "IMO challenge bet with Eliezer"
},
{
"text": "LessWrong",
"url": "https://www.lesswrong.com/posts/7im8at9PmhbT4JHsW/ngo-and-yudkowsky-on-alignment-difficulty",
"resourceId": "8ce8d9c37151fbb5",
"resourceTitle": "Ngo and Yudkowsky on alignment difficulty"
},
{
"text": "TIME",
"url": "https://time.com/6266923/ai-eliezer-yudkowsky-open-letter-not-enough/",
"resourceId": "d0c81bbfe41efe44",
"resourceTitle": "Pausing AI Development Isn't Enough. We Need to Shut it All Down"
},
{
"text": "EA Forum",
"url": "https://forum.effectivealtruism.org/posts/NBgpPaz5vYe3tH4ga/on-deference-and-yudkowsky-s-ai-risk-estimates",
"resourceId": "e1fe34e189cc4c55",
"resourceTitle": "On Deference and Yudkowsky's AI Risk Estimates"
},
{
"text": "LessWrong",
"url": "https://www.lesswrong.com/posts/WyJKqCNiT7HJ6cHRB/when-did-eliezer-yudkowsky-change-his-mind-about-neural",
"resourceId": "ea8445e1a4052378",
"resourceTitle": "When did Eliezer Yudkowsky change his mind about neural networks?"
},
{
"text": "LessWrong",
"url": "https://www.lesswrong.com/posts/sWLLdG6DWJEy3CH7n/imo-challenge-bet-with-eliezer",
"resourceId": "608dd3d09eb51892",
"resourceTitle": "IMO challenge bet with Eliezer"
},
{
"text": "AI-FOOM Debate",
"url": "https://intelligence.org/ai-foom-debate/",
"resourceId": "bfb6662776fe5f08",
"resourceTitle": "The Hanson-Yudkowsky AI-Foom Debate"
},
{
"text": "EA Forum",
"url": "https://forum.effectivealtruism.org/posts/NBgpPaz5vYe3tH4ga/on-deference-and-yudkowsky-s-ai-risk-estimates",
"resourceId": "e1fe34e189cc4c55",
"resourceTitle": "On Deference and Yudkowsky's AI Risk Estimates"
},
{
"text": "LessWrong",
"url": "https://www.lesswrong.com/posts/j9Q8bRmwCgXRYAgcJ/miri-announces-new-death-with-dignity-strategy",
"resourceId": "79b5b7f6113c8a6c",
"resourceTitle": "MIRI announces new \"Death With Dignity\" strategy"
},
{
"text": "MIRI: No Fire Alarm",
"url": "https://intelligence.org/2017/10/13/fire-alarm/",
"resourceId": "599472695a5fba70",
"resourceTitle": "There's No Fire Alarm for Artificial General Intelligence"
},
{
"text": "TIME",
"url": "https://time.com/6266923/ai-eliezer-yudkowsky-open-letter-not-enough/",
"resourceId": "d0c81bbfe41efe44",
"resourceTitle": "Pausing AI Development Isn't Enough. We Need to Shut it All Down"
},
{
"text": "Alignment Forum",
"url": "https://www.alignmentforum.org/",
"resourceId": "2e0c662574087c2a",
"resourceTitle": "AI Alignment Forum"
},
{
"text": "EA Forum: On Deference and Yudkowsky's AI Risk Estimates",
"url": "https://forum.effectivealtruism.org/posts/NBgpPaz5vYe3tH4ga/on-deference-and-yudkowsky-s-ai-risk-estimates",
"resourceId": "e1fe34e189cc4c55",
"resourceTitle": "On Deference and Yudkowsky's AI Risk Estimates"
},
{
"text": "LessWrong: Yudkowsky vs Hanson on FOOM",
"url": "https://www.lesswrong.com/posts/gGSvwd62TJAxxhcGh/yudkowsky-vs-hanson-on-foom-whose-predictions-were-better",
"resourceId": "a9ebc8d14e5ef11b",
"resourceTitle": "Yudkowsky vs Hanson on FOOM: Whose Predictions Were Better?"
},
{
"text": "LessWrong: When did Eliezer change his mind about neural networks?",
"url": "https://www.lesswrong.com/posts/WyJKqCNiT7HJ6cHRB/when-did-eliezer-yudkowsky-change-his-mind-about-neural",
"resourceId": "ea8445e1a4052378",
"resourceTitle": "When did Eliezer Yudkowsky change his mind about neural networks?"
},
{
"text": "TIME: The Only Way to Deal With AI? Shut It Down",
"url": "https://time.com/6266923/ai-eliezer-yudkowsky-open-letter-not-enough/",
"resourceId": "d0c81bbfe41efe44",
"resourceTitle": "Pausing AI Development Isn't Enough. We Need to Shut it All Down"
},
{
"text": "MIRI: Death with Dignity",
"url": "https://www.lesswrong.com/posts/j9Q8bRmwCgXRYAgcJ/miri-announces-new-death-with-dignity-strategy",
"resourceId": "79b5b7f6113c8a6c",
"resourceTitle": "MIRI announces new \"Death With Dignity\" strategy"
},
{
"text": "MIRI: Hanson-Yudkowsky AI-FOOM Debate",
"url": "https://intelligence.org/ai-foom-debate/",
"resourceId": "bfb6662776fe5f08",
"resourceTitle": "The Hanson-Yudkowsky AI-Foom Debate"
},
{
"text": "LessWrong: IMO Challenge Bet",
"url": "https://www.lesswrong.com/posts/sWLLdG6DWJEy3CH7n/imo-challenge-bet-with-eliezer",
"resourceId": "608dd3d09eb51892",
"resourceTitle": "IMO challenge bet with Eliezer"
},
{
"text": "LessWrong: Ngo and Yudkowsky on Alignment Difficulty",
"url": "https://www.lesswrong.com/posts/7im8at9PmhbT4JHsW/ngo-and-yudkowsky-on-alignment-difficulty",
"resourceId": "8ce8d9c37151fbb5",
"resourceTitle": "Ngo and Yudkowsky on alignment difficulty"
},
{
"text": "MIRI: There's No Fire Alarm for AGI",
"url": "https://intelligence.org/2017/10/13/fire-alarm/",
"resourceId": "599472695a5fba70",
"resourceTitle": "There's No Fire Alarm for Artificial General Intelligence"
}
],
"unconvertedLinkCount": 25,
"convertedLinkCount": 0,
"backlinkCount": 1,
"hallucinationRisk": {
"level": "medium",
"score": 55,
"factors": [
"no-citations"
]
},
"redundancy": {
"maxSimilarity": 15,
"similarPages": [
{
"id": "eliezer-yudkowsky",
"title": "Eliezer Yudkowsky",
"path": "/knowledge-base/people/eliezer-yudkowsky/",
"similarity": 15
},
{
"id": "case-against-xrisk",
"title": "The Case AGAINST AI Existential Risk",
"path": "/knowledge-base/debates/case-against-xrisk/",
"similarity": 13
},
{
"id": "case-for-xrisk",
"title": "The Case FOR AI Existential Risk",
"path": "/knowledge-base/debates/case-for-xrisk/",
"similarity": 13
},
{
"id": "ai-timelines",
"title": "AI Timelines",
"path": "/knowledge-base/models/ai-timelines/",
"similarity": 13
},
{
"id": "yann-lecun",
"title": "Yann LeCun",
"path": "/knowledge-base/people/yann-lecun/",
"similarity": 13
}
]
},
"coverage": {
"passing": 4,
"total": 13,
"targets": {
"tables": 17,
"diagrams": 2,
"internalLinks": 33,
"externalLinks": 21,
"footnotes": 12,
"references": 12
},
"actuals": {
"tables": 24,
"diagrams": 0,
"internalLinks": 19,
"externalLinks": 45,
"footnotes": 0,
"references": 3,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "red",
"editHistory": "red",
"overview": "red",
"tables": "green",
"diagrams": "red",
"internalLinks": "amber",
"externalLinks": "green",
"footnotes": "red",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2.5 R:6.5 A:1 C:8"
},
"readerRank": 479,
"researchRank": 321,
"recommendedScore": 148.81
}External Links
No external links
Backlinks (1)
| id | title | type | relationship |
|---|---|---|---|
| track-records-overview | Track Records (Overview) | concept | — |