Yann LeCun
yann-lecunpersonPath: /knowledge-base/people/yann-lecun/
E582Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "yann-lecun",
"wikiId": "E582",
"path": "/knowledge-base/people/yann-lecun/",
"filePath": "knowledge-base/people/yann-lecun.mdx",
"title": "Yann LeCun",
"quality": 41,
"readerImportance": 61.5,
"researchImportance": 29,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-02-01",
"dateCreated": "2026-02-15",
"summary": "Comprehensive biographical profile of Yann LeCun documenting his technical contributions (CNNs, JEPA), his ~0% AI extinction risk estimate, and his opposition to AI safety regulation including SB 1047. Includes detailed 'Statements & Track Record' section analyzing his prediction accuracy—noting strength in long-term architectural intuitions but pattern of underestimating near-term LLM capabilities. Catalogs debates with Hinton, Bengio, and Yudkowsky, and tracks his November 2025 departure from Meta to found AMI Labs.",
"description": "Turing Award winner and 'Godfather of AI' who remains one of the most prominent skeptics of AI existential risk, arguing that concerns about superintelligent AI are premature and that AI systems can be designed to remain under human control",
"ratings": {
"novelty": 3.5,
"rigor": 4.5,
"completeness": 7.5,
"actionability": 2
},
"category": "people",
"subcategory": "lab-leadership",
"clusters": [
"ai-safety"
],
"metrics": {
"wordCount": 4400,
"tableCount": 25,
"diagramCount": 1,
"internalLinks": 27,
"externalLinks": 18,
"footnoteCount": 0,
"bulletRatio": 0.1,
"sectionCount": 54,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 4400,
"unconvertedLinks": [
{
"text": "en.wikipedia.org",
"url": "https://en.wikipedia.org/wiki/Yann_LeCun",
"resourceId": "914e07c146555ae9",
"resourceTitle": "Yann LeCun - Wikipedia"
},
{
"text": "Meta's Yann LeCun says worries about AI's existential threat are 'complete B.S.'",
"url": "https://techcrunch.com/2024/10/12/metas-yann-lecun-says-worries-about-a-i-s-existential-threat-are-complete-b-s/",
"resourceId": "61b8ab42c6b32b27",
"resourceTitle": "Meta's Yann LeCun Says Worries About AI's Existential Threat Are 'Complete B.S.'"
},
{
"text": "Transcript of Twitter Conversation Between Yann LeCun and Eliezer Yudkowsky",
"url": "https://www.lesswrong.com/posts/tcEFh3vPS6zEANTFZ/transcript-and-brief-response-to-twitter-conversation",
"resourceId": "68db44ed009d7b6d",
"resourceTitle": "Transcript and Brief Response to Twitter Conversation between Yann LeCunn and Eliezer Yudkowsky"
},
{
"text": "AI whiz Yann LeCun is already targeting a \\$1.5 billion valuation",
"url": "https://fortune.com/2025/12/19/yann-lecun-ami-labs-ai-startup-valuation-meta-departure/",
"resourceId": "96212024a0dc8d36",
"resourceTitle": "Fortune - Yann LeCun AMI Valuation"
},
{
"text": "Yann LeCun - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Yann_LeCun",
"resourceId": "914e07c146555ae9",
"resourceTitle": "Yann LeCun - Wikipedia"
}
],
"unconvertedLinkCount": 5,
"convertedLinkCount": 0,
"backlinkCount": 26,
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 16
},
{
"id": "eliezer-yudkowsky",
"title": "Eliezer Yudkowsky",
"path": "/knowledge-base/people/eliezer-yudkowsky/",
"similarity": 16
},
{
"id": "ilya-sutskever",
"title": "Ilya Sutskever",
"path": "/knowledge-base/people/ilya-sutskever/",
"similarity": 16
},
{
"id": "case-against-xrisk",
"title": "The Case AGAINST AI Existential Risk",
"path": "/knowledge-base/debates/case-against-xrisk/",
"similarity": 15
},
{
"id": "is-ai-xrisk-real",
"title": "Is AI Existential Risk Real?",
"path": "/knowledge-base/debates/is-ai-xrisk-real/",
"similarity": 15
}
]
},
"coverage": {
"passing": 5,
"total": 13,
"targets": {
"tables": 18,
"diagrams": 2,
"internalLinks": 35,
"externalLinks": 22,
"footnotes": 13,
"references": 13
},
"actuals": {
"tables": 25,
"diagrams": 1,
"internalLinks": 27,
"externalLinks": 18,
"footnotes": 0,
"references": 3,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "amber",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "red",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:3.5 R:4.5 A:2 C:7.5"
},
"readerRank": 223,
"researchRank": 421,
"recommendedScore": 126.56
}External Links
{
"grokipedia": "https://grokipedia.com/page/Yann_LeCun"
}Backlinks (26)
| id | title | type | relationship |
|---|---|---|---|
| meta-ai | Meta AI (FAIR) | organization | research |
| meta-ai | Meta AI (FAIR) | organization | — |
| case-against-xrisk | The Case AGAINST AI Existential Risk | argument | — |
| is-ai-xrisk-real | Is AI Existential Risk Real? | crux | — |
| open-vs-closed | Open vs Closed Source AI | crux | — |
| pause-debate | Should We Pause AI Development? | crux | — |
| scaling-debate | Is Scaling All You Need? | crux | — |
| why-alignment-hard | Why Alignment Might Be Hard | argument | — |
| agi-timeline | AGI Timeline | concept | — |
| miri-era | The MIRI Era (2000-2015) | historical | — |
| world-models | World Models + Planning | capability | — |
| power-seeking-conditions | Power-Seeking Emergence Conditions Model | analysis | — |
| arc | Alignment Research Center (ARC) | organization | — |
| fli | Future of Life Institute (FLI) | organization | — |
| frontier-ai-comparison | Frontier AI Company Comparison (2026) | concept | — |
| daniela-amodei | Daniela Amodei | person | — |
| eliezer-yudkowsky-predictions | Eliezer Yudkowsky: Track Record | concept | — |
| eliezer-yudkowsky | Eliezer Yudkowsky | person | — |
| elon-musk | Elon Musk | person | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| stuart-russell | Stuart Russell | person | — |
| yann-lecun-predictions | Yann LeCun: Track Record | concept | — |
| yoshua-bengio | Yoshua Bengio | person | — |
| california-sb1047 | California SB 1047 | policy | — |
| existential-risk | Existential Risk from AI | concept | — |
| optimistic | Optimistic Alignment Worldview | concept | — |