Skip to content
Longterm Wiki

Alignment Research Center (ARC)

arcorganizationPath: /knowledge-base/organizations/arc/
E25Entity ID (EID)
← Back to page43 backlinksQuality: 57Updated: 2026-02-27
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "arc",
  "wikiId": "E25",
  "path": "/knowledge-base/organizations/arc/",
  "filePath": "knowledge-base/organizations/arc.mdx",
  "title": "Alignment Research Center (ARC)",
  "quality": 57,
  "readerImportance": 38.5,
  "researchImportance": 17.5,
  "tacticalValue": 62,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-02-27",
  "dateCreated": "2026-02-15",
  "summary": "Comprehensive reference page on ARC (Alignment Research Center), covering its evolution from a dual theory/evals organization to ARC Theory (3 permanent researchers) plus the METR spin-out (December 2023), with specific funding figures (\\$265K Coefficient Giving (formerly Open Philanthropy) grant, \\$1.25M returned FTX grant), ELK prize details (\\$274K total), and Christiano's 20%/46% doom estimates. Content is well-sourced compilation of publicly available information with no original analysis.",
  "description": "AI safety research nonprofit operating as ARC Theory, investigating fundamental alignment problems including Eliciting Latent Knowledge and heuristic arguments for neural network behavior.",
  "ratings": {
    "focus": 7.5,
    "novelty": 2.5,
    "rigor": 7,
    "completeness": 8,
    "concreteness": 7.5,
    "actionability": 3.5,
    "objectivity": 7
  },
  "category": "organizations",
  "subcategory": "safety-orgs",
  "clusters": [
    "ai-safety",
    "community",
    "governance"
  ],
  "metrics": {
    "wordCount": 3666,
    "tableCount": 11,
    "diagramCount": 0,
    "internalLinks": 43,
    "externalLinks": 28,
    "footnoteCount": 22,
    "bulletRatio": 0.12,
    "sectionCount": 29,
    "hasOverview": true,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 3666,
  "unconvertedLinks": [
    {
      "text": "ELK Report",
      "url": "https://docs.google.com/document/d/1WwsnJQstPq91_Yh-Ch2XRL8H_EpsnjrC1dwZXR37PC8/edit",
      "resourceId": "ecd797db5ba5d02c",
      "resourceTitle": "eliciting latent knowledge"
    },
    {
      "text": "GPT-4 System Card",
      "url": "https://cdn.openai.com/papers/gpt-4-system-card.pdf",
      "resourceId": "ebab6e05661645c5",
      "resourceTitle": "GPT-4 System Card"
    },
    {
      "text": "ARC Official Homepage",
      "url": "https://www.alignment.org/",
      "resourceId": "0562f8c207d8b63f",
      "resourceTitle": "Alignment Research Center"
    },
    {
      "text": "GPT-4 System Card",
      "url": "https://cdn.openai.com/papers/gpt-4-system-card.pdf",
      "resourceId": "ebab6e05661645c5",
      "resourceTitle": "GPT-4 System Card"
    },
    {
      "text": "More Information About the Dangerous Capability Evaluations We Did With GPT-4 and Claude",
      "url": "https://www.lesswrong.com/posts/4Gt42jX7RiaNaxCwP/more-information-about-the-dangerous-capability-evaluations",
      "resourceId": "483cab550255b00f",
      "resourceTitle": "More information about the dangerous capability evaluations we did with GPT-4 and Claude."
    },
    {
      "text": "Advanced AI Evaluations at AISI: May Update",
      "url": "https://www.aisi.gov.uk/blog/advanced-ai-evaluations-may-update",
      "resourceId": "4e56cdf6b04b126b",
      "resourceTitle": "UK AI Safety Institute renamed to AI Security Institute"
    },
    {
      "text": "My views on \"doom\"",
      "url": "https://www.lesswrong.com/posts/xWMqsvHapP3nwdSW8/my-views-on-doom",
      "resourceId": "ed73cbbe5dec0db9",
      "resourceTitle": "My views on “doom”"
    },
    {
      "text": "An Update on METR's Preliminary Evaluations of Claude 3.5 Sonnet and o1",
      "url": "https://metr.org/blog/2025-01-31-update-sonnet-o1-evals/",
      "resourceId": "89b92e6423256fc4",
      "resourceTitle": "METR Capability Evaluations Update: Claude Sonnet and OpenAI o1"
    },
    {
      "text": "Common Elements of Frontier AI Safety Policies",
      "url": "https://metr.org/blog/2025-12-09-common-elements-of-frontier-ai-safety-policies/",
      "resourceId": "c8782940b880d00f",
      "resourceTitle": "METR's analysis of 12 companies"
    }
  ],
  "unconvertedLinkCount": 9,
  "convertedLinkCount": 2,
  "backlinkCount": 43,
  "hallucinationRisk": {
    "level": "medium",
    "score": 35,
    "factors": [
      "biographical-claims",
      "moderately-cited",
      "high-rigor"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 18,
    "similarPages": [
      {
        "id": "research-agendas",
        "title": "AI Alignment Research Agenda Comparison",
        "path": "/knowledge-base/responses/research-agendas/",
        "similarity": 18
      },
      {
        "id": "cais",
        "title": "Center for AI Safety (CAIS)",
        "path": "/knowledge-base/organizations/cais/",
        "similarity": 16
      },
      {
        "id": "frontier-model-forum",
        "title": "Frontier Model Forum",
        "path": "/knowledge-base/organizations/frontier-model-forum/",
        "similarity": 16
      },
      {
        "id": "anthropic-core-views",
        "title": "Anthropic Core Views",
        "path": "/knowledge-base/responses/anthropic-core-views/",
        "similarity": 16
      },
      {
        "id": "technical-research",
        "title": "Technical AI Safety Research",
        "path": "/knowledge-base/responses/technical-research/",
        "similarity": 16
      }
    ]
  },
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 15,
      "diagrams": 1,
      "internalLinks": 29,
      "externalLinks": 18,
      "footnotes": 11,
      "references": 11
    },
    "actuals": {
      "tables": 11,
      "diagrams": 0,
      "internalLinks": 43,
      "externalLinks": 28,
      "footnotes": 22,
      "references": 15,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "green",
      "footnotes": "green",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:2.5 R:7 A:3.5 C:8"
  },
  "readerRank": 384,
  "researchRank": 505,
  "recommendedScore": 149.92
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/alignment-research-center"
}
Backlinks (43)
idtitletyperelationship
situational-awarenessSituational Awarenesscapability
apollo-researchApollo Researchorganization
metrMETRorganization
miriMachine Intelligence Research Institute (MIRI)organization
redwood-researchRedwood Researchorganization
paul-christianoPaul Christianoperson
scalable-oversightScalable Oversightresearch-arearesearch
sandbaggingAI Capability Sandbaggingrisk
codingAutonomous Codingcapability
language-modelsLarge Language Modelscapability
accident-risksAI Accident Risk Cruxescrux
is-ai-xrisk-realIs AI Existential Risk Real?crux
why-alignment-hardWhy Alignment Might Be Hardargument
ea-epistemic-failures-in-the-ftx-eraEA Epistemic Failures in the FTX Eraconcept
ea-longtermist-wins-lossesEA and Longtermist Wins and Lossesconcept
ai-talent-market-dynamicsAI Talent Market Dynamicsanalysis
capability-alignment-raceCapability-Alignment Race Modelanalysis
deceptive-alignment-decompositionDeceptive Alignment Decomposition Modelanalysis
goal-misgeneralization-probabilityGoal Misgeneralization Probability Modelanalysis
instrumental-convergence-frameworkInstrumental Convergence Frameworkanalysis
model-organisms-of-misalignmentModel Organisms of Misalignmentanalysis
planning-for-frontier-lab-scalingPlanning for Frontier Lab Scalinganalysis
power-seeking-conditionsPower-Seeking Emergence Conditions Modelanalysis
risk-interaction-networkRisk Interaction Networkanalysis
safety-research-valueExpected Value of AI Safety Researchanalysis
scheming-likelihood-modelScheming Likelihood Assessmentanalysis
carnegie-endowmentCarnegie Endowment for International Peaceorganization
conjectureConjectureorganization
far-aiFAR AIorganization
ftx-collapse-ea-funding-lessonsFTX Collapse: Lessons for EA Funding Resilienceconcept
__index__/knowledge-base/organizationsOrganizationsconcept
long-term-benefit-trustAnthropic Long-Term Benefit Trustorganization
matsMATS ML Alignment Theory Scholars programorganization
nist-aiNIST and AI Safetyorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
varaValue Aligned Research Advisorsorganization
dustin-moskovitzDustin Moskovitzperson
elon-muskElon Muskperson
geoffrey-hintonGeoffrey Hintonperson
ilya-sutskeverIlya Sutskeverperson
ai-controlAI Controlresearch-area
alignmentAI Alignmentapproach
x-com-epistemicsX.com Platform Epistemicsapproach
Longterm Wiki