Center for AI Safety (CAIS)
caisorganizationPath: /knowledge-base/organizations/cais/
E47Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "cais",
"wikiId": "E47",
"path": "/knowledge-base/organizations/cais/",
"filePath": "knowledge-base/organizations/cais.mdx",
"title": "Center for AI Safety (CAIS)",
"quality": 42,
"readerImportance": 88.5,
"researchImportance": 17.5,
"tacticalValue": 72,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-04-02",
"dateCreated": "2026-02-15",
"summary": "CAIS is a nonprofit research organization founded by Dan Hendrycks that has distributed compute grants to researchers, published technical AI safety papers including the representation engineering and MACHIAVELLI benchmark papers, and organized the May 2023 Statement on AI Risk signed by over 350 AI researchers and industry leaders. The organization focuses on technical safety research, field-building, and policy communication.",
"description": "Research organization focused on AI safety through technical research, field-building, and public communication, including the May 2023 Statement on AI Risk signed by prominent AI researchers and industry leaders",
"ratings": {
"novelty": 2.5,
"rigor": 4,
"completeness": 5.5,
"actionability": 3.5
},
"category": "organizations",
"subcategory": "safety-orgs",
"clusters": [
"community",
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 2955,
"tableCount": 6,
"diagramCount": 0,
"internalLinks": 61,
"externalLinks": 21,
"footnoteCount": 0,
"bulletRatio": 0.26,
"sectionCount": 27,
"hasOverview": true,
"structuralScore": 14
},
"suggestedQuality": 93,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 2955,
"unconvertedLinks": [
{
"text": "co-founded CAIS with Oliver Zhang",
"url": "https://en.wikipedia.org/wiki/Center_for_AI_Safety",
"resourceId": "0c57ac12fb1e760b",
"resourceTitle": "Center for AI Safety – Wikipedia."
},
{
"text": "2021–present",
"url": "https://forum.effectivealtruism.org/posts/9RYvJu2iNJMXgWCBn/introducing-the-ml-safety-scholars-program",
"resourceId": "65d92d482b71030d",
"resourceTitle": "Introducing the ML Safety Scholars Program"
},
{
"text": "introduced in 2021",
"url": "https://forum.effectivealtruism.org/posts/9RYvJu2iNJMXgWCBn/introducing-the-ml-safety-scholars-program",
"resourceId": "65d92d482b71030d",
"resourceTitle": "Introducing the ML Safety Scholars Program"
},
{
"text": "adversarial attacks on large language models",
"url": "https://en.wikipedia.org/wiki/Center_for_AI_Safety",
"resourceId": "0c57ac12fb1e760b",
"resourceTitle": "Center for AI Safety – Wikipedia."
},
{
"text": "California SB 1047",
"url": "https://en.wikipedia.org/wiki/Center_for_AI_Safety",
"resourceId": "0c57ac12fb1e760b",
"resourceTitle": "Center for AI Safety – Wikipedia."
},
{
"text": "organized into four functional teams",
"url": "https://safe.ai/about",
"resourceId": "kb-cf6c0895df42bac5",
"resourceTitle": "About Us | CAIS"
}
],
"unconvertedLinkCount": 6,
"convertedLinkCount": 15,
"backlinkCount": 45,
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 18,
"similarPages": [
{
"id": "dan-hendrycks",
"title": "Dan Hendrycks",
"path": "/knowledge-base/people/dan-hendrycks/",
"similarity": 18
},
{
"id": "ea-longtermist-wins-losses",
"title": "EA and Longtermist Wins and Losses",
"path": "/knowledge-base/history/ea-longtermist-wins-losses/",
"similarity": 17
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 17
},
{
"id": "ford-foundation",
"title": "Ford Foundation",
"path": "/knowledge-base/organizations/ford-foundation/",
"similarity": 17
},
{
"id": "is-ai-xrisk-real",
"title": "Is AI Existential Risk Real?",
"path": "/knowledge-base/debates/is-ai-xrisk-real/",
"similarity": 16
}
]
},
"changeHistory": [
{
"date": "2026-02-18",
"branch": "claude/fix-issue-240-N5irU",
"title": "Surface tacticalValue in /wiki table and score 53 pages",
"summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
"model": "sonnet-4",
"duration": "~30min"
}
],
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 12,
"diagrams": 1,
"internalLinks": 24,
"externalLinks": 15,
"footnotes": 9,
"references": 9
},
"actuals": {
"tables": 6,
"diagrams": 0,
"internalLinks": 61,
"externalLinks": 21,
"footnotes": 0,
"references": 16,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:2.5 R:4 A:3.5 C:5.5"
},
"readerRank": 28,
"researchRank": 506,
"recommendedScore": 149.7
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/center-for-ai-safety",
"wikidata": "https://www.wikidata.org/wiki/Q119084607"
}Backlinks (45)
| id | title | type | relationship |
|---|---|---|---|
| cais-action-fund | Center for AI Safety Action Fund | organization | related |
| dan-hendrycks | Dan Hendrycks | person | — |
| california-sb1047 | Safe and Secure Innovation for Frontier Artificial Intelligence Models Act | policy | — |
| capability-unlearning | Capability Unlearning / Removal | approach | — |
| pause | Pause Advocacy | approach | — |
| maim | MAIM (Mutually Assured AI Malfunction) | approach | — |
| representation-engineering | Representation Engineering | approach | — |
| power-seeking | Power-Seeking AI | risk | — |
| is-ai-xrisk-real | Is AI Existential Risk Real? | crux | — |
| ai-compute-scaling-metrics | AI Compute Scaling Metrics | analysis | — |
| ai-risk-portfolio-analysis | AI Risk Portfolio Analysis | analysis | — |
| bioweapons-ai-uplift | AI Uplift Assessment Model | analysis | — |
| intervention-effectiveness-matrix | Intervention Effectiveness Matrix | analysis | — |
| risk-activation-timeline | Risk Activation Timeline Model | analysis | — |
| risk-interaction-matrix | Risk Interaction Matrix Model | analysis | — |
| risk-interaction-network | Risk Interaction Network | analysis | — |
| ai-impacts | AI Impacts | organization | — |
| ai-now-institute | AI Now Institute | organization | — |
| americans-for-responsible-innovation | Americans for Responsible Innovation | organization | — |
| center-for-democracy-and-technology | Center for Democracy and Technology | organization | — |
| chai | Center for Human-Compatible AI (CHAI) | organization | — |
| cnas | Center for a New American Security (CNAS) | organization | — |
| deepmind | Google DeepMind | organization | — |
| elon-musk-philanthropy | Elon Musk (Funder) | analysis | — |
| ford-foundation | Ford Foundation | organization | — |
| funders-overview | Longtermist Funders (Overview) | concept | — |
| __index__/knowledge-base/organizations | Organizations | concept | — |
| longview-philanthropy | Longview Philanthropy | organization | — |
| mats | MATS ML Alignment Theory Scholars program | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| secure-ai-project | Secure AI Project | organization | — |
| sff | Survival and Flourishing Fund (SFF) | organization | — |
| dustin-moskovitz | Dustin Moskovitz | person | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| __index__/knowledge-base/people | People | concept | — |
| jaan-tallinn | Jaan Tallinn | person | — |
| nick-beckstead | Nick Beckstead | person | — |
| stuart-russell | Stuart Russell | person | — |
| ai-forecasting | AI-Augmented Forecasting | approach | — |
| corporate | Corporate AI Safety Responses | approach | — |
| eval-saturation | Eval Saturation & The Evals Gap | approach | — |
| failed-stalled-proposals | Failed and Stalled AI Policy Proposals | analysis | — |
| intervention-evaluation-for-political-stability | Intervention Evaluation for Political Stability | approach | — |
| us-state-legislation | US State AI Legislation | analysis | — |
| existential-risk | Existential Risk from AI | concept | — |