Skip to content
Longterm Wiki

Paul Christiano

paul-christianopersonPath: /knowledge-base/people/paul-christiano/
E220Entity ID (EID)
← Back to page52 backlinksQuality: 39Updated: 2026-01-02
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "paul-christiano",
  "wikiId": "E220",
  "path": "/knowledge-base/people/paul-christiano/",
  "filePath": "knowledge-base/people/paul-christiano.mdx",
  "title": "Paul Christiano",
  "quality": 39,
  "readerImportance": 28,
  "researchImportance": 36,
  "tacticalValue": null,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-01-02",
  "dateCreated": "2026-02-15",
  "summary": "Comprehensive biography of Paul Christiano documenting his technical contributions (IDA, debate, scalable oversight), risk assessment (~10-20% P(doom), AGI 2030s-2040s), and evolution from higher optimism to current moderate concern. Documents implementation of his ideas at major labs (RLHF at OpenAI, Constitutional AI at Anthropic) with specific citation to papers and organizational impact.",
  "description": "Founder of ARC, creator of iterated amplification and AI safety via debate. Current risk assessment ~10-20% P(doom), AGI 2030s-2040s. Pioneered prosaic alignment approach focusing on scalable oversight mechanisms.",
  "ratings": {
    "novelty": 2,
    "rigor": 4.5,
    "completeness": 6,
    "actionability": 2
  },
  "category": "people",
  "subcategory": "safety-researchers",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 1111,
    "tableCount": 12,
    "diagramCount": 0,
    "internalLinks": 47,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.1,
    "sectionCount": 25,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 1111,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 18,
  "backlinkCount": 52,
  "hallucinationRisk": {
    "level": "high",
    "score": 85,
    "factors": [
      "biographical-claims",
      "no-citations",
      "low-quality-score",
      "few-external-sources"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 13,
    "similarPages": [
      {
        "id": "chai",
        "title": "Center for Human-Compatible AI (CHAI)",
        "path": "/knowledge-base/organizations/chai/",
        "similarity": 13
      },
      {
        "id": "holden-karnofsky",
        "title": "Holden Karnofsky",
        "path": "/knowledge-base/people/holden-karnofsky/",
        "similarity": 12
      },
      {
        "id": "safety-research-value",
        "title": "Expected Value of AI Safety Research",
        "path": "/knowledge-base/models/safety-research-value/",
        "similarity": 11
      },
      {
        "id": "conjecture",
        "title": "Conjecture",
        "path": "/knowledge-base/organizations/conjecture/",
        "similarity": 11
      },
      {
        "id": "scheming-likelihood-model",
        "title": "Scheming Likelihood Assessment",
        "path": "/knowledge-base/models/scheming-likelihood-model/",
        "similarity": 10
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 4,
      "diagrams": 0,
      "internalLinks": 9,
      "externalLinks": 6,
      "footnotes": 3,
      "references": 3
    },
    "actuals": {
      "tables": 12,
      "diagrams": 0,
      "internalLinks": 47,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 14,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:2 R:4.5 A:2 C:6"
  },
  "readerRank": 467,
  "researchRank": 371,
  "recommendedScore": 102.75
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/paul-christiano",
  "wikidata": "https://www.wikidata.org/wiki/Q64769299"
}
Backlinks (52)
idtitletyperelationship
capability-alignment-raceCapability-Alignment Race Modelanalysis
model-organisms-of-misalignmentModel Organisms of Misalignmentanalysis
metrMETRorganization
arcAlignment Research Center (ARC)organization
miriMachine Intelligence Research Institute (MIRI)organization
us-aisiUS AI Safety Institute (now CAISI)organization
arc-evalsARC Evaluationsorganizationleads-to
long-term-benefit-trustAnthropic Long-Term Benefit Trustorganization
nist-aiNIST and AI Safetyorganization
eliezer-yudkowskyEliezer Yudkowskyperson
rlhfRLHFresearch-arearesearch
scalable-oversightScalable Oversightresearch-arearesearch
accident-risksAI Accident Risk Cruxescrux
is-ai-xrisk-realIs AI Existential Risk Real?crux
why-alignment-easyWhy Alignment Might Be Easyargument
why-alignment-hardWhy Alignment Might Be Hardargument
deep-learning-eraDeep Learning Revolution (2012-2020)historical
miri-eraThe MIRI Era (2000-2015)historical
__index__/knowledge-baseKnowledge Baseconcept
ai-timelinesAI Timelinesconcept
anthropic-pledge-enforcementAnthropic Founder Pledges: Interventions to Increase Follow-Throughanalysis
defense-in-depth-modelDefense in Depth Modelanalysis
power-seeking-conditionsPower-Seeking Emergence Conditions Modelanalysis
scheming-likelihood-modelScheming Likelihood Assessmentanalysis
anthropic-investorsAnthropic (Funder)analysis
fliFuture of Life Institute (FLI)organization
__index__/knowledge-base/organizationsOrganizationsconcept
ltffLong-Term Future Fund (LTFF)organization
manifundManifundorganization
redwood-researchRedwood Researchorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
tsmcTSMCorganization
ajeya-cotraAjeya Cotraperson
dario-amodeiDario Amodeiperson
dustin-moskovitzDustin Moskovitzperson
eliezer-yudkowsky-predictionsEliezer Yudkowsky: Track Recordconcept
evan-hubingerEvan Hubingerperson
gwernGwern Branwenperson
helen-tonerHelen Tonerperson
holden-karnofskyHolden Karnofskyperson
__index__/knowledge-base/peoplePeopleconcept
jan-leikeJan Leikeperson
jared-kaplanJared Kaplanperson
tom-brownTom Brownperson
ai-controlAI Controlresearch-area
alignmentAI Alignmentapproach
research-agendasAI Alignment Research Agenda Comparisoncrux
sleeper-agent-detectionSleeper Agent Detectionapproach
existential-riskExistential Risk from AIconcept
superintelligenceSuperintelligenceconcept
doomerAI Doomer Worldviewconcept
optimisticOptimistic Alignment Worldviewconcept
Longterm Wiki