Skip to content
Longterm Wiki

Yann LeCun

yann-lecunpersonPath: /knowledge-base/people/yann-lecun/
E582Entity ID (EID)
← Back to page26 backlinksQuality: 41Updated: 2026-02-01
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "yann-lecun",
  "wikiId": "E582",
  "path": "/knowledge-base/people/yann-lecun/",
  "filePath": "knowledge-base/people/yann-lecun.mdx",
  "title": "Yann LeCun",
  "quality": 41,
  "readerImportance": 61.5,
  "researchImportance": 29,
  "tacticalValue": null,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-02-01",
  "dateCreated": "2026-02-15",
  "summary": "Comprehensive biographical profile of Yann LeCun documenting his technical contributions (CNNs, JEPA), his ~0% AI extinction risk estimate, and his opposition to AI safety regulation including SB 1047. Includes detailed 'Statements & Track Record' section analyzing his prediction accuracy—noting strength in long-term architectural intuitions but pattern of underestimating near-term LLM capabilities. Catalogs debates with Hinton, Bengio, and Yudkowsky, and tracks his November 2025 departure from Meta to found AMI Labs.",
  "description": "Turing Award winner and 'Godfather of AI' who remains one of the most prominent skeptics of AI existential risk, arguing that concerns about superintelligent AI are premature and that AI systems can be designed to remain under human control",
  "ratings": {
    "novelty": 3.5,
    "rigor": 4.5,
    "completeness": 7.5,
    "actionability": 2
  },
  "category": "people",
  "subcategory": "lab-leadership",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 4400,
    "tableCount": 25,
    "diagramCount": 1,
    "internalLinks": 27,
    "externalLinks": 18,
    "footnoteCount": 0,
    "bulletRatio": 0.1,
    "sectionCount": 54,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 4400,
  "unconvertedLinks": [
    {
      "text": "en.wikipedia.org",
      "url": "https://en.wikipedia.org/wiki/Yann_LeCun",
      "resourceId": "914e07c146555ae9",
      "resourceTitle": "Yann LeCun - Wikipedia"
    },
    {
      "text": "Meta's Yann LeCun says worries about AI's existential threat are 'complete B.S.'",
      "url": "https://techcrunch.com/2024/10/12/metas-yann-lecun-says-worries-about-a-i-s-existential-threat-are-complete-b-s/",
      "resourceId": "61b8ab42c6b32b27",
      "resourceTitle": "Meta's Yann LeCun Says Worries About AI's Existential Threat Are 'Complete B.S.'"
    },
    {
      "text": "Transcript of Twitter Conversation Between Yann LeCun and Eliezer Yudkowsky",
      "url": "https://www.lesswrong.com/posts/tcEFh3vPS6zEANTFZ/transcript-and-brief-response-to-twitter-conversation",
      "resourceId": "68db44ed009d7b6d",
      "resourceTitle": "Transcript and Brief Response to Twitter Conversation between Yann LeCunn and Eliezer Yudkowsky"
    },
    {
      "text": "AI whiz Yann LeCun is already targeting a \\$1.5 billion valuation",
      "url": "https://fortune.com/2025/12/19/yann-lecun-ami-labs-ai-startup-valuation-meta-departure/",
      "resourceId": "96212024a0dc8d36",
      "resourceTitle": "Fortune - Yann LeCun AMI Valuation"
    },
    {
      "text": "Yann LeCun - Wikipedia",
      "url": "https://en.wikipedia.org/wiki/Yann_LeCun",
      "resourceId": "914e07c146555ae9",
      "resourceTitle": "Yann LeCun - Wikipedia"
    }
  ],
  "unconvertedLinkCount": 5,
  "convertedLinkCount": 0,
  "backlinkCount": 26,
  "hallucinationRisk": {
    "level": "high",
    "score": 75,
    "factors": [
      "biographical-claims",
      "no-citations"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 16,
    "similarPages": [
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 16
      },
      {
        "id": "eliezer-yudkowsky",
        "title": "Eliezer Yudkowsky",
        "path": "/knowledge-base/people/eliezer-yudkowsky/",
        "similarity": 16
      },
      {
        "id": "ilya-sutskever",
        "title": "Ilya Sutskever",
        "path": "/knowledge-base/people/ilya-sutskever/",
        "similarity": 16
      },
      {
        "id": "case-against-xrisk",
        "title": "The Case AGAINST AI Existential Risk",
        "path": "/knowledge-base/debates/case-against-xrisk/",
        "similarity": 15
      },
      {
        "id": "is-ai-xrisk-real",
        "title": "Is AI Existential Risk Real?",
        "path": "/knowledge-base/debates/is-ai-xrisk-real/",
        "similarity": 15
      }
    ]
  },
  "coverage": {
    "passing": 5,
    "total": 13,
    "targets": {
      "tables": 18,
      "diagrams": 2,
      "internalLinks": 35,
      "externalLinks": 22,
      "footnotes": 13,
      "references": 13
    },
    "actuals": {
      "tables": 25,
      "diagrams": 1,
      "internalLinks": 27,
      "externalLinks": 18,
      "footnotes": 0,
      "references": 3,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "amber",
      "internalLinks": "amber",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:3.5 R:4.5 A:2 C:7.5"
  },
  "readerRank": 223,
  "researchRank": 421,
  "recommendedScore": 126.56
}
External Links
{
  "grokipedia": "https://grokipedia.com/page/Yann_LeCun"
}
Backlinks (26)
idtitletyperelationship
meta-aiMeta AI (FAIR)organizationresearch
meta-aiMeta AI (FAIR)organization
case-against-xriskThe Case AGAINST AI Existential Riskargument
is-ai-xrisk-realIs AI Existential Risk Real?crux
open-vs-closedOpen vs Closed Source AIcrux
pause-debateShould We Pause AI Development?crux
scaling-debateIs Scaling All You Need?crux
why-alignment-hardWhy Alignment Might Be Hardargument
agi-timelineAGI Timelineconcept
miri-eraThe MIRI Era (2000-2015)historical
world-modelsWorld Models + Planningcapability
power-seeking-conditionsPower-Seeking Emergence Conditions Modelanalysis
arcAlignment Research Center (ARC)organization
fliFuture of Life Institute (FLI)organization
frontier-ai-comparisonFrontier AI Company Comparison (2026)concept
daniela-amodeiDaniela Amodeiperson
eliezer-yudkowsky-predictionsEliezer Yudkowsky: Track Recordconcept
eliezer-yudkowskyEliezer Yudkowskyperson
elon-muskElon Muskperson
geoffrey-hintonGeoffrey Hintonperson
stuart-russellStuart Russellperson
yann-lecun-predictionsYann LeCun: Track Recordconcept
yoshua-bengioYoshua Bengioperson
california-sb1047California SB 1047policy
existential-riskExistential Risk from AIconcept
optimisticOptimistic Alignment Worldviewconcept
Longterm Wiki