Paul Christiano
paul-christianopersonPath: /knowledge-base/people/paul-christiano/
E220Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "paul-christiano",
"wikiId": "E220",
"path": "/knowledge-base/people/paul-christiano/",
"filePath": "knowledge-base/people/paul-christiano.mdx",
"title": "Paul Christiano",
"quality": 39,
"readerImportance": 28,
"researchImportance": 36,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-01-02",
"dateCreated": "2026-02-15",
"summary": "Comprehensive biography of Paul Christiano documenting his technical contributions (IDA, debate, scalable oversight), risk assessment (~10-20% P(doom), AGI 2030s-2040s), and evolution from higher optimism to current moderate concern. Documents implementation of his ideas at major labs (RLHF at OpenAI, Constitutional AI at Anthropic) with specific citation to papers and organizational impact.",
"description": "Founder of ARC, creator of iterated amplification and AI safety via debate. Current risk assessment ~10-20% P(doom), AGI 2030s-2040s. Pioneered prosaic alignment approach focusing on scalable oversight mechanisms.",
"ratings": {
"novelty": 2,
"rigor": 4.5,
"completeness": 6,
"actionability": 2
},
"category": "people",
"subcategory": "safety-researchers",
"clusters": [
"ai-safety"
],
"metrics": {
"wordCount": 1111,
"tableCount": 12,
"diagramCount": 0,
"internalLinks": 47,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.1,
"sectionCount": 25,
"hasOverview": true,
"structuralScore": 11
},
"suggestedQuality": 73,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 1111,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 18,
"backlinkCount": 52,
"hallucinationRisk": {
"level": "high",
"score": 85,
"factors": [
"biographical-claims",
"no-citations",
"low-quality-score",
"few-external-sources"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 13,
"similarPages": [
{
"id": "chai",
"title": "Center for Human-Compatible AI (CHAI)",
"path": "/knowledge-base/organizations/chai/",
"similarity": 13
},
{
"id": "holden-karnofsky",
"title": "Holden Karnofsky",
"path": "/knowledge-base/people/holden-karnofsky/",
"similarity": 12
},
{
"id": "safety-research-value",
"title": "Expected Value of AI Safety Research",
"path": "/knowledge-base/models/safety-research-value/",
"similarity": 11
},
{
"id": "conjecture",
"title": "Conjecture",
"path": "/knowledge-base/organizations/conjecture/",
"similarity": 11
},
{
"id": "scheming-likelihood-model",
"title": "Scheming Likelihood Assessment",
"path": "/knowledge-base/models/scheming-likelihood-model/",
"similarity": 10
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 4,
"diagrams": 0,
"internalLinks": 9,
"externalLinks": 6,
"footnotes": 3,
"references": 3
},
"actuals": {
"tables": 12,
"diagrams": 0,
"internalLinks": 47,
"externalLinks": 0,
"footnotes": 0,
"references": 14,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2 R:4.5 A:2 C:6"
},
"readerRank": 467,
"researchRank": 371,
"recommendedScore": 102.75
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/paul-christiano",
"wikidata": "https://www.wikidata.org/wiki/Q64769299"
}Backlinks (52)
| id | title | type | relationship |
|---|---|---|---|
| capability-alignment-race | Capability-Alignment Race Model | analysis | — |
| model-organisms-of-misalignment | Model Organisms of Misalignment | analysis | — |
| metr | METR | organization | — |
| arc | Alignment Research Center (ARC) | organization | — |
| miri | Machine Intelligence Research Institute (MIRI) | organization | — |
| us-aisi | US AI Safety Institute (now CAISI) | organization | — |
| arc-evals | ARC Evaluations | organization | leads-to |
| long-term-benefit-trust | Anthropic Long-Term Benefit Trust | organization | — |
| nist-ai | NIST and AI Safety | organization | — |
| eliezer-yudkowsky | Eliezer Yudkowsky | person | — |
| rlhf | RLHF | research-area | research |
| scalable-oversight | Scalable Oversight | research-area | research |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| is-ai-xrisk-real | Is AI Existential Risk Real? | crux | — |
| why-alignment-easy | Why Alignment Might Be Easy | argument | — |
| why-alignment-hard | Why Alignment Might Be Hard | argument | — |
| deep-learning-era | Deep Learning Revolution (2012-2020) | historical | — |
| miri-era | The MIRI Era (2000-2015) | historical | — |
| __index__/knowledge-base | Knowledge Base | concept | — |
| ai-timelines | AI Timelines | concept | — |
| anthropic-pledge-enforcement | Anthropic Founder Pledges: Interventions to Increase Follow-Through | analysis | — |
| defense-in-depth-model | Defense in Depth Model | analysis | — |
| power-seeking-conditions | Power-Seeking Emergence Conditions Model | analysis | — |
| scheming-likelihood-model | Scheming Likelihood Assessment | analysis | — |
| anthropic-investors | Anthropic (Funder) | analysis | — |
| fli | Future of Life Institute (FLI) | organization | — |
| __index__/knowledge-base/organizations | Organizations | concept | — |
| ltff | Long-Term Future Fund (LTFF) | organization | — |
| manifund | Manifund | organization | — |
| redwood-research | Redwood Research | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| tsmc | TSMC | organization | — |
| ajeya-cotra | Ajeya Cotra | person | — |
| dario-amodei | Dario Amodei | person | — |
| dustin-moskovitz | Dustin Moskovitz | person | — |
| eliezer-yudkowsky-predictions | Eliezer Yudkowsky: Track Record | concept | — |
| evan-hubinger | Evan Hubinger | person | — |
| gwern | Gwern Branwen | person | — |
| helen-toner | Helen Toner | person | — |
| holden-karnofsky | Holden Karnofsky | person | — |
| __index__/knowledge-base/people | People | concept | — |
| jan-leike | Jan Leike | person | — |
| jared-kaplan | Jared Kaplan | person | — |
| tom-brown | Tom Brown | person | — |
| ai-control | AI Control | research-area | — |
| alignment | AI Alignment | approach | — |
| research-agendas | AI Alignment Research Agenda Comparison | crux | — |
| sleeper-agent-detection | Sleeper Agent Detection | approach | — |
| existential-risk | Existential Risk from AI | concept | — |
| superintelligence | Superintelligence | concept | — |
| doomer | AI Doomer Worldview | concept | — |
| optimistic | Optimistic Alignment Worldview | concept | — |