Skip to content
Longterm Wiki

Dan Hendrycks

dan-hendryckspersonPath: /knowledge-base/people/dan-hendrycks/
E89Entity ID (EID)
← Back to page15 backlinksQuality: 19Updated: 2026-03-16
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "dan-hendrycks",
  "wikiId": "E89",
  "path": "/knowledge-base/people/dan-hendrycks/",
  "filePath": "knowledge-base/people/dan-hendrycks.mdx",
  "title": "Dan Hendrycks",
  "quality": 19,
  "readerImportance": 87,
  "researchImportance": 39.5,
  "tacticalValue": 78,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-03-16",
  "dateCreated": "2026-02-15",
  "summary": "Comprehensive reference biography of Dan Hendrycks (CAIS director), covering his academic career (GELU, MMLU, OOD detection), CAIS founding and funding (including \\$6.5M FTX, Open Philanthropy/Coefficient Giving grants), policy work (SB 1047, NIST RMF input), and 2025 Superintelligence Strategy paper co-authored with Eric Schmidt and Alexandr Wang. The page is well-sourced and largely neutral but is purely descriptive reference material with no original synthesis or actionable guidance.",
  "description": "Director of CAIS, focuses on catastrophic AI risk reduction through research, education, and policy advocacy",
  "ratings": {
    "novelty": 1.5,
    "rigor": 2,
    "completeness": 4,
    "actionability": 1
  },
  "category": "people",
  "subcategory": "safety-researchers",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 3983,
    "tableCount": 2,
    "diagramCount": 0,
    "internalLinks": 49,
    "externalLinks": 11,
    "footnoteCount": 45,
    "bulletRatio": 0.14,
    "sectionCount": 22,
    "hasOverview": true,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": null,
  "evergreen": true,
  "wordCount": 3983,
  "unconvertedLinks": [
    {
      "text": "course.mlsafety.org",
      "url": "https://course.mlsafety.org/",
      "resourceId": "65c9fe2d57a4eb4c",
      "resourceTitle": "Intro to ML Safety Course"
    },
    {
      "text": "A Baseline for Detecting Misclassified and Out-of-Distribution Examples",
      "url": "https://arxiv.org/abs/1610.02136",
      "resourceId": "e607f629ec7bed70",
      "resourceTitle": "Hendrycks and Gimpel (2017)"
    },
    {
      "text": "Measuring Massive Multitask Language Understanding",
      "url": "https://arxiv.org/abs/2009.03300",
      "resourceId": "0635974beafcf9c5",
      "resourceTitle": "[2009.03300] Measuring Massive Multitask Language Understanding"
    },
    {
      "text": "Aligning AI With Shared Human Values",
      "url": "https://arxiv.org/abs/2008.02275",
      "resourceId": "57379f24535e9c04",
      "resourceTitle": "ICLR 2021"
    },
    {
      "text": "Unsolved Problems in ML Safety",
      "url": "https://arxiv.org/abs/2109.13916",
      "resourceId": "f94e705023d45765",
      "resourceTitle": "Unsolved Problems in ML Safety"
    },
    {
      "text": "Actionable Guidance for High-Consequence AI Risk Management",
      "url": "https://arxiv.org/abs/2206.08966",
      "resourceId": "b88263a70cbf743e",
      "resourceTitle": "Barrett, A.M., Hendrycks, D., Newman, J., & Nonnecke, B."
    },
    {
      "text": "Superintelligence Strategy",
      "url": "https://arxiv.org/abs/2503.05628",
      "resourceId": "a15589d5e604d864",
      "resourceTitle": "Hendrycks, D., Schmidt, E., & Wang, A."
    }
  ],
  "unconvertedLinkCount": 7,
  "convertedLinkCount": 0,
  "backlinkCount": 15,
  "hallucinationRisk": {
    "level": "medium",
    "score": 60,
    "factors": [
      "biographical-claims",
      "low-rigor-score",
      "low-quality-score",
      "well-cited"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 18,
    "similarPages": [
      {
        "id": "cais",
        "title": "Center for AI Safety (CAIS)",
        "path": "/knowledge-base/organizations/cais/",
        "similarity": 18
      },
      {
        "id": "is-ai-xrisk-real",
        "title": "Is AI Existential Risk Real?",
        "path": "/knowledge-base/debates/is-ai-xrisk-real/",
        "similarity": 17
      },
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 17
      },
      {
        "id": "deep-learning-era",
        "title": "Deep Learning Revolution (2012-2020)",
        "path": "/knowledge-base/history/deep-learning-era/",
        "similarity": 16
      },
      {
        "id": "anthropic",
        "title": "Anthropic",
        "path": "/knowledge-base/organizations/anthropic/",
        "similarity": 16
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-18",
      "branch": "claude/fix-issue-240-N5irU",
      "title": "Surface tacticalValue in /wiki table and score 53 pages",
      "summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
      "model": "sonnet-4",
      "duration": "~30min"
    },
    {
      "date": "2026-02-17",
      "branch": "claude/review-wiki-editing-scCul",
      "title": "Wiki editing system refactoring",
      "summary": "Six refactors to the wiki editing pipeline: (1) extracted shared regex patterns to `crux/lib/patterns.ts`, (2) refactored validation in page-improver to use in-process engine calls instead of subprocess spawning, (3) split the 694-line `phases.ts` into 7 individual phase modules under `phases/`, (4) created shared LLM abstraction `crux/lib/llm.ts` unifying duplicated streaming/retry/tool-loop code, (5) added Zod schemas for LLM JSON response validation, (6) decomposed 820-line mermaid validation into `crux/lib/mermaid-checks.ts` (604 lines) + slim orchestrator (281 lines). Follow-up review integrated patterns.ts across 19+ files, fixed dead imports, corrected ToolHandler type, wired mdx-utils.ts to use shared patterns, replaced hardcoded model strings with MODELS constants, replaced `new Anthropic()` with `createLlmClient()`, replaced inline `extractText` implementations with shared `extractText()` from llm.ts, integrated `MARKDOWN_LINK_RE` into link validators, added `objectivityIssues` to the `AnalysisResult` type (removing an unsafe cast in utils.ts), fixed CI failure from eager client creation, and tested the full pipeline by improving 3 wiki pages. After manual review of 3 improved pages, fixed 8 systematic pipeline issues: (1) added content preservation instructions to prevent polish-tier content loss, (2) made auto-grading default after --apply, (3) added polish-tier citation suppression to prevent fabricated citations, (4) added Quick Assessment table requirement for person pages, (5) added required Overview section enforcement, (6) added section deduplication and content repetition checks to review phase, (7) added bare URL→markdown link conversion instruction, (8) extended biographical claim checker to catch publication/co-authorship and citation count claims.\n\nSubsequent iterative testing and prompt refinement: ran pipeline on jan-leike, chris-olah, far-ai pages. Discovered and fixed: (a) `<!-- NEEDS CITATION -->` HTML comments break MDX compilation (changed to `{/* NEEDS CITATION */}`), (b) excessive citation markers at polish tier — added instruction to only mark NEW claims (max 3-5 per page), (c) editorial meta-comments cluttering output — added no-meta-comments instruction, (d) thin padding sections — added anti-padding instruction, (e) section deduplication needed stronger emphasis — added merge instruction with common patterns. Final test results: jan-leike 1254→1997 words, chris-olah 1187→1687 words, far-ai 1519→2783 words, miri-era 2678→4338 words; all MDX compile, zero critical issues.",
      "pr": 184
    }
  ],
  "coverage": {
    "passing": 6,
    "total": 13,
    "targets": {
      "tables": 16,
      "diagrams": 2,
      "internalLinks": 32,
      "externalLinks": 20,
      "footnotes": 12,
      "references": 12
    },
    "actuals": {
      "tables": 2,
      "diagrams": 0,
      "internalLinks": 49,
      "externalLinks": 11,
      "footnotes": 45,
      "references": 9,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "red",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "amber",
      "footnotes": "green",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 2,
    "ratingsString": "N:1.5 R:2 A:1 C:4"
  },
  "readerRank": 38,
  "researchRank": 345,
  "recommendedScore": 100.4
}
External Links
{
  "grokipedia": "https://grokipedia.com/page/Dan_Hendrycks"
}
Backlinks (15)
idtitletyperelationship
caisCenter for AI Safety (CAIS)organizationleads-to
far-aiFAR AIorganization
evalsAI Evaluationsresearch-arearesearch
maimMAIM (Mutually Assured AI Malfunction)approach
is-ai-xrisk-realIs AI Existential Risk Real?crux
warning-signs-modelWarning Signs Modelanalysis
ai-impactsAI Impactsorganization
coefficient-givingCoefficient Givingorganization
manifoldManifold (Prediction Market)organization
manifundManifundorganization
__index__/knowledge-base/peoplePeopleconcept
california-sb1047California SB 1047policy
lab-cultureAI Lab Safety Cultureapproach
training-programsAI Safety Training Programsapproach
emergent-capabilitiesEmergent Capabilitiesrisk
Longterm Wiki