Alignment Research Center (ARC)
arcorganizationPath: /knowledge-base/organizations/arc/
E25Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "arc",
"wikiId": "E25",
"path": "/knowledge-base/organizations/arc/",
"filePath": "knowledge-base/organizations/arc.mdx",
"title": "Alignment Research Center (ARC)",
"quality": 57,
"readerImportance": 38.5,
"researchImportance": 17.5,
"tacticalValue": 62,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-02-27",
"dateCreated": "2026-02-15",
"summary": "Comprehensive reference page on ARC (Alignment Research Center), covering its evolution from a dual theory/evals organization to ARC Theory (3 permanent researchers) plus the METR spin-out (December 2023), with specific funding figures (\\$265K Coefficient Giving (formerly Open Philanthropy) grant, \\$1.25M returned FTX grant), ELK prize details (\\$274K total), and Christiano's 20%/46% doom estimates. Content is well-sourced compilation of publicly available information with no original analysis.",
"description": "AI safety research nonprofit operating as ARC Theory, investigating fundamental alignment problems including Eliciting Latent Knowledge and heuristic arguments for neural network behavior.",
"ratings": {
"focus": 7.5,
"novelty": 2.5,
"rigor": 7,
"completeness": 8,
"concreteness": 7.5,
"actionability": 3.5,
"objectivity": 7
},
"category": "organizations",
"subcategory": "safety-orgs",
"clusters": [
"ai-safety",
"community",
"governance"
],
"metrics": {
"wordCount": 3666,
"tableCount": 11,
"diagramCount": 0,
"internalLinks": 43,
"externalLinks": 28,
"footnoteCount": 22,
"bulletRatio": 0.12,
"sectionCount": 29,
"hasOverview": true,
"structuralScore": 14
},
"suggestedQuality": 93,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 3666,
"unconvertedLinks": [
{
"text": "ELK Report",
"url": "https://docs.google.com/document/d/1WwsnJQstPq91_Yh-Ch2XRL8H_EpsnjrC1dwZXR37PC8/edit",
"resourceId": "ecd797db5ba5d02c",
"resourceTitle": "eliciting latent knowledge"
},
{
"text": "GPT-4 System Card",
"url": "https://cdn.openai.com/papers/gpt-4-system-card.pdf",
"resourceId": "ebab6e05661645c5",
"resourceTitle": "GPT-4 System Card"
},
{
"text": "ARC Official Homepage",
"url": "https://www.alignment.org/",
"resourceId": "0562f8c207d8b63f",
"resourceTitle": "Alignment Research Center"
},
{
"text": "GPT-4 System Card",
"url": "https://cdn.openai.com/papers/gpt-4-system-card.pdf",
"resourceId": "ebab6e05661645c5",
"resourceTitle": "GPT-4 System Card"
},
{
"text": "More Information About the Dangerous Capability Evaluations We Did With GPT-4 and Claude",
"url": "https://www.lesswrong.com/posts/4Gt42jX7RiaNaxCwP/more-information-about-the-dangerous-capability-evaluations",
"resourceId": "483cab550255b00f",
"resourceTitle": "More information about the dangerous capability evaluations we did with GPT-4 and Claude."
},
{
"text": "Advanced AI Evaluations at AISI: May Update",
"url": "https://www.aisi.gov.uk/blog/advanced-ai-evaluations-may-update",
"resourceId": "4e56cdf6b04b126b",
"resourceTitle": "UK AI Safety Institute renamed to AI Security Institute"
},
{
"text": "My views on \"doom\"",
"url": "https://www.lesswrong.com/posts/xWMqsvHapP3nwdSW8/my-views-on-doom",
"resourceId": "ed73cbbe5dec0db9",
"resourceTitle": "My views on “doom”"
},
{
"text": "An Update on METR's Preliminary Evaluations of Claude 3.5 Sonnet and o1",
"url": "https://metr.org/blog/2025-01-31-update-sonnet-o1-evals/",
"resourceId": "89b92e6423256fc4",
"resourceTitle": "METR Capability Evaluations Update: Claude Sonnet and OpenAI o1"
},
{
"text": "Common Elements of Frontier AI Safety Policies",
"url": "https://metr.org/blog/2025-12-09-common-elements-of-frontier-ai-safety-policies/",
"resourceId": "c8782940b880d00f",
"resourceTitle": "METR's analysis of 12 companies"
}
],
"unconvertedLinkCount": 9,
"convertedLinkCount": 2,
"backlinkCount": 43,
"hallucinationRisk": {
"level": "medium",
"score": 35,
"factors": [
"biographical-claims",
"moderately-cited",
"high-rigor"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 18,
"similarPages": [
{
"id": "research-agendas",
"title": "AI Alignment Research Agenda Comparison",
"path": "/knowledge-base/responses/research-agendas/",
"similarity": 18
},
{
"id": "cais",
"title": "Center for AI Safety (CAIS)",
"path": "/knowledge-base/organizations/cais/",
"similarity": 16
},
{
"id": "frontier-model-forum",
"title": "Frontier Model Forum",
"path": "/knowledge-base/organizations/frontier-model-forum/",
"similarity": 16
},
{
"id": "anthropic-core-views",
"title": "Anthropic Core Views",
"path": "/knowledge-base/responses/anthropic-core-views/",
"similarity": 16
},
{
"id": "technical-research",
"title": "Technical AI Safety Research",
"path": "/knowledge-base/responses/technical-research/",
"similarity": 16
}
]
},
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 15,
"diagrams": 1,
"internalLinks": 29,
"externalLinks": 18,
"footnotes": 11,
"references": 11
},
"actuals": {
"tables": 11,
"diagrams": 0,
"internalLinks": 43,
"externalLinks": 28,
"footnotes": 22,
"references": 15,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "green",
"footnotes": "green",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2.5 R:7 A:3.5 C:8"
},
"readerRank": 384,
"researchRank": 505,
"recommendedScore": 149.92
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/alignment-research-center"
}Backlinks (43)
| id | title | type | relationship |
|---|---|---|---|
| situational-awareness | Situational Awareness | capability | — |
| apollo-research | Apollo Research | organization | — |
| metr | METR | organization | — |
| miri | Machine Intelligence Research Institute (MIRI) | organization | — |
| redwood-research | Redwood Research | organization | — |
| paul-christiano | Paul Christiano | person | — |
| scalable-oversight | Scalable Oversight | research-area | research |
| sandbagging | AI Capability Sandbagging | risk | — |
| coding | Autonomous Coding | capability | — |
| language-models | Large Language Models | capability | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| is-ai-xrisk-real | Is AI Existential Risk Real? | crux | — |
| why-alignment-hard | Why Alignment Might Be Hard | argument | — |
| ea-epistemic-failures-in-the-ftx-era | EA Epistemic Failures in the FTX Era | concept | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| ai-talent-market-dynamics | AI Talent Market Dynamics | analysis | — |
| capability-alignment-race | Capability-Alignment Race Model | analysis | — |
| deceptive-alignment-decomposition | Deceptive Alignment Decomposition Model | analysis | — |
| goal-misgeneralization-probability | Goal Misgeneralization Probability Model | analysis | — |
| instrumental-convergence-framework | Instrumental Convergence Framework | analysis | — |
| model-organisms-of-misalignment | Model Organisms of Misalignment | analysis | — |
| planning-for-frontier-lab-scaling | Planning for Frontier Lab Scaling | analysis | — |
| power-seeking-conditions | Power-Seeking Emergence Conditions Model | analysis | — |
| risk-interaction-network | Risk Interaction Network | analysis | — |
| safety-research-value | Expected Value of AI Safety Research | analysis | — |
| scheming-likelihood-model | Scheming Likelihood Assessment | analysis | — |
| carnegie-endowment | Carnegie Endowment for International Peace | organization | — |
| conjecture | Conjecture | organization | — |
| far-ai | FAR AI | organization | — |
| ftx-collapse-ea-funding-lessons | FTX Collapse: Lessons for EA Funding Resilience | concept | — |
| __index__/knowledge-base/organizations | Organizations | concept | — |
| long-term-benefit-trust | Anthropic Long-Term Benefit Trust | organization | — |
| mats | MATS ML Alignment Theory Scholars program | organization | — |
| nist-ai | NIST and AI Safety | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| vara | Value Aligned Research Advisors | organization | — |
| dustin-moskovitz | Dustin Moskovitz | person | — |
| elon-musk | Elon Musk | person | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| ilya-sutskever | Ilya Sutskever | person | — |
| ai-control | AI Control | research-area | — |
| alignment | AI Alignment | approach | — |
| x-com-epistemics | X.com Platform Epistemics | approach | — |