Center for Human-Compatible AI (CHAI)
chaiorganizationPath: /knowledge-base/organizations/chai/
E57Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "chai",
"wikiId": "E57",
"path": "/knowledge-base/organizations/chai/",
"filePath": "knowledge-base/organizations/chai.mdx",
"title": "Center for Human-Compatible AI (CHAI)",
"quality": 37,
"readerImportance": 68.5,
"researchImportance": 61,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2025-12-24",
"dateCreated": "2026-02-15",
"summary": "CHAI is UC Berkeley's AI safety research center founded by Stuart Russell in 2016, pioneering cooperative inverse reinforcement learning and human-compatible AI frameworks. The center has trained 30+ PhD students and influenced major labs (OpenAI's RLHF, Anthropic's Constitutional AI), though faces scalability challenges in preference learning approaches.",
"description": "UC Berkeley research center founded by Stuart Russell developing cooperative AI frameworks and preference learning approaches to ensure AI systems remain beneficial and deferential to humans",
"ratings": {
"novelty": 2.5,
"rigor": 4.5,
"completeness": 6.5,
"actionability": 2
},
"category": "organizations",
"subcategory": "safety-orgs",
"clusters": [
"ai-safety",
"community"
],
"metrics": {
"wordCount": 1237,
"tableCount": 11,
"diagramCount": 0,
"internalLinks": 23,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.24,
"sectionCount": 26,
"hasOverview": true,
"structuralScore": 11
},
"suggestedQuality": 73,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 1237,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 10,
"backlinkCount": 33,
"hallucinationRisk": {
"level": "high",
"score": 85,
"factors": [
"biographical-claims",
"no-citations",
"low-quality-score",
"few-external-sources"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 15,
"similarPages": [
{
"id": "cirl",
"title": "Cooperative IRL (CIRL)",
"path": "/knowledge-base/responses/cirl/",
"similarity": 15
},
{
"id": "holden-karnofsky",
"title": "Holden Karnofsky",
"path": "/knowledge-base/people/holden-karnofsky/",
"similarity": 14
},
{
"id": "dario-amodei",
"title": "Dario Amodei",
"path": "/knowledge-base/people/dario-amodei/",
"similarity": 13
},
{
"id": "paul-christiano",
"title": "Paul Christiano",
"path": "/knowledge-base/people/paul-christiano/",
"similarity": 13
},
{
"id": "cooperative-ai",
"title": "Cooperative AI",
"path": "/knowledge-base/responses/cooperative-ai/",
"similarity": 13
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 5,
"diagrams": 0,
"internalLinks": 10,
"externalLinks": 6,
"footnotes": 4,
"references": 4
},
"actuals": {
"tables": 11,
"diagrams": 0,
"internalLinks": 23,
"externalLinks": 0,
"footnotes": 0,
"references": 9,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2.5 R:4.5 A:2 C:6.5"
},
"readerRank": 172,
"researchRank": 215,
"recommendedScore": 118.38
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/center-for-human-compatible-ai-chai",
"eaForum": "https://forum.effectivealtruism.org/topics/center-for-human-compatible-ai",
"wikidata": "https://www.wikidata.org/wiki/Q85751153"
}Backlinks (33)
| id | title | type | relationship |
|---|---|---|---|
| beri | Berkeley Existential Risk Initiative | organization | — |
| stuart-russell | Stuart Russell | person | — |
| value-learning | Value Learning | research-area | research |
| deep-learning-era | Deep Learning Revolution (2012-2020) | historical | — |
| ai-compute-scaling-metrics | AI Compute Scaling Metrics | analysis | — |
| ai-talent-market-dynamics | AI Talent Market Dynamics | analysis | — |
| corrigibility-failure-pathways | Corrigibility Failure Pathways | analysis | — |
| goal-misgeneralization-probability | Goal Misgeneralization Probability Model | analysis | — |
| intervention-effectiveness-matrix | Intervention Effectiveness Matrix | analysis | — |
| mesa-optimization-analysis | Mesa-Optimization Risk Analysis | analysis | — |
| risk-interaction-matrix | Risk Interaction Matrix Model | analysis | — |
| risk-interaction-network | Risk Interaction Network | analysis | — |
| safety-research-allocation | Safety Research Allocation Model | analysis | — |
| safety-research-value | Expected Value of AI Safety Research | analysis | — |
| safety-researcher-gap | AI Safety Talent Supply/Demand Gap Model | analysis | — |
| worldview-intervention-mapping | Worldview-Intervention Mapping | analysis | — |
| apollo-research | Apollo Research | organization | — |
| cais | Center for AI Safety (CAIS) | organization | — |
| carnegie-endowment | Carnegie Endowment for International Peace | organization | — |
| center-for-applied-rationality | Center for Applied Rationality | organization | — |
| __index__/knowledge-base/organizations | Organizations | concept | — |
| lionheart-ventures | Lionheart Ventures | organization | — |
| mats | MATS ML Alignment Theory Scholars program | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| secure-ai-project | Secure AI Project | organization | — |
| holden-karnofsky | Holden Karnofsky | person | — |
| vipul-naik | Vipul Naik | person | — |
| alignment | AI Alignment | approach | — |
| cirl | Cooperative IRL (CIRL) | approach | — |
| cooperative-ai | Cooperative AI | approach | — |
| evaluation | AI Evaluation | approach | — |
| red-teaming | Red Teaming | research-area | — |
| training-programs | AI Safety Training Programs | approach | — |