Skip to content
Longterm Wiki

Center for AI Safety (CAIS)

caisorganizationPath: /knowledge-base/organizations/cais/
E47Entity ID (EID)
← Back to page45 backlinksQuality: 42Updated: 2026-04-02
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "cais",
  "wikiId": "E47",
  "path": "/knowledge-base/organizations/cais/",
  "filePath": "knowledge-base/organizations/cais.mdx",
  "title": "Center for AI Safety (CAIS)",
  "quality": 42,
  "readerImportance": 88.5,
  "researchImportance": 17.5,
  "tacticalValue": 72,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-04-02",
  "dateCreated": "2026-02-15",
  "summary": "CAIS is a nonprofit research organization founded by Dan Hendrycks that has distributed compute grants to researchers, published technical AI safety papers including the representation engineering and MACHIAVELLI benchmark papers, and organized the May 2023 Statement on AI Risk signed by over 350 AI researchers and industry leaders. The organization focuses on technical safety research, field-building, and policy communication.",
  "description": "Research organization focused on AI safety through technical research, field-building, and public communication, including the May 2023 Statement on AI Risk signed by prominent AI researchers and industry leaders",
  "ratings": {
    "novelty": 2.5,
    "rigor": 4,
    "completeness": 5.5,
    "actionability": 3.5
  },
  "category": "organizations",
  "subcategory": "safety-orgs",
  "clusters": [
    "community",
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 2955,
    "tableCount": 6,
    "diagramCount": 0,
    "internalLinks": 61,
    "externalLinks": 21,
    "footnoteCount": 0,
    "bulletRatio": 0.26,
    "sectionCount": 27,
    "hasOverview": true,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 2955,
  "unconvertedLinks": [
    {
      "text": "co-founded CAIS with Oliver Zhang",
      "url": "https://en.wikipedia.org/wiki/Center_for_AI_Safety",
      "resourceId": "0c57ac12fb1e760b",
      "resourceTitle": "Center for AI Safety – Wikipedia."
    },
    {
      "text": "2021–present",
      "url": "https://forum.effectivealtruism.org/posts/9RYvJu2iNJMXgWCBn/introducing-the-ml-safety-scholars-program",
      "resourceId": "65d92d482b71030d",
      "resourceTitle": "Introducing the ML Safety Scholars Program"
    },
    {
      "text": "introduced in 2021",
      "url": "https://forum.effectivealtruism.org/posts/9RYvJu2iNJMXgWCBn/introducing-the-ml-safety-scholars-program",
      "resourceId": "65d92d482b71030d",
      "resourceTitle": "Introducing the ML Safety Scholars Program"
    },
    {
      "text": "adversarial attacks on large language models",
      "url": "https://en.wikipedia.org/wiki/Center_for_AI_Safety",
      "resourceId": "0c57ac12fb1e760b",
      "resourceTitle": "Center for AI Safety – Wikipedia."
    },
    {
      "text": "California SB 1047",
      "url": "https://en.wikipedia.org/wiki/Center_for_AI_Safety",
      "resourceId": "0c57ac12fb1e760b",
      "resourceTitle": "Center for AI Safety – Wikipedia."
    },
    {
      "text": "organized into four functional teams",
      "url": "https://safe.ai/about",
      "resourceId": "kb-cf6c0895df42bac5",
      "resourceTitle": "About Us | CAIS"
    }
  ],
  "unconvertedLinkCount": 6,
  "convertedLinkCount": 15,
  "backlinkCount": 45,
  "hallucinationRisk": {
    "level": "high",
    "score": 75,
    "factors": [
      "biographical-claims",
      "no-citations"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 18,
    "similarPages": [
      {
        "id": "dan-hendrycks",
        "title": "Dan Hendrycks",
        "path": "/knowledge-base/people/dan-hendrycks/",
        "similarity": 18
      },
      {
        "id": "ea-longtermist-wins-losses",
        "title": "EA and Longtermist Wins and Losses",
        "path": "/knowledge-base/history/ea-longtermist-wins-losses/",
        "similarity": 17
      },
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 17
      },
      {
        "id": "ford-foundation",
        "title": "Ford Foundation",
        "path": "/knowledge-base/organizations/ford-foundation/",
        "similarity": 17
      },
      {
        "id": "is-ai-xrisk-real",
        "title": "Is AI Existential Risk Real?",
        "path": "/knowledge-base/debates/is-ai-xrisk-real/",
        "similarity": 16
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-18",
      "branch": "claude/fix-issue-240-N5irU",
      "title": "Surface tacticalValue in /wiki table and score 53 pages",
      "summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
      "model": "sonnet-4",
      "duration": "~30min"
    }
  ],
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 12,
      "diagrams": 1,
      "internalLinks": 24,
      "externalLinks": 15,
      "footnotes": 9,
      "references": 9
    },
    "actuals": {
      "tables": 6,
      "diagrams": 0,
      "internalLinks": 61,
      "externalLinks": 21,
      "footnotes": 0,
      "references": 16,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 1,
    "ratingsString": "N:2.5 R:4 A:3.5 C:5.5"
  },
  "readerRank": 28,
  "researchRank": 506,
  "recommendedScore": 149.7
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/center-for-ai-safety",
  "wikidata": "https://www.wikidata.org/wiki/Q119084607"
}
Backlinks (45)
idtitletyperelationship
cais-action-fundCenter for AI Safety Action Fundorganizationrelated
dan-hendrycksDan Hendrycksperson
california-sb1047Safe and Secure Innovation for Frontier Artificial Intelligence Models Actpolicy
capability-unlearningCapability Unlearning / Removalapproach
pausePause Advocacyapproach
maimMAIM (Mutually Assured AI Malfunction)approach
representation-engineeringRepresentation Engineeringapproach
power-seekingPower-Seeking AIrisk
is-ai-xrisk-realIs AI Existential Risk Real?crux
ai-compute-scaling-metricsAI Compute Scaling Metricsanalysis
ai-risk-portfolio-analysisAI Risk Portfolio Analysisanalysis
bioweapons-ai-upliftAI Uplift Assessment Modelanalysis
intervention-effectiveness-matrixIntervention Effectiveness Matrixanalysis
risk-activation-timelineRisk Activation Timeline Modelanalysis
risk-interaction-matrixRisk Interaction Matrix Modelanalysis
risk-interaction-networkRisk Interaction Networkanalysis
ai-impactsAI Impactsorganization
ai-now-instituteAI Now Instituteorganization
americans-for-responsible-innovationAmericans for Responsible Innovationorganization
center-for-democracy-and-technologyCenter for Democracy and Technologyorganization
chaiCenter for Human-Compatible AI (CHAI)organization
cnasCenter for a New American Security (CNAS)organization
deepmindGoogle DeepMindorganization
elon-musk-philanthropyElon Musk (Funder)analysis
ford-foundationFord Foundationorganization
funders-overviewLongtermist Funders (Overview)concept
__index__/knowledge-base/organizationsOrganizationsconcept
longview-philanthropyLongview Philanthropyorganization
matsMATS ML Alignment Theory Scholars programorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
secure-ai-projectSecure AI Projectorganization
sffSurvival and Flourishing Fund (SFF)organization
dustin-moskovitzDustin Moskovitzperson
geoffrey-hintonGeoffrey Hintonperson
__index__/knowledge-base/peoplePeopleconcept
jaan-tallinnJaan Tallinnperson
nick-becksteadNick Becksteadperson
stuart-russellStuart Russellperson
ai-forecastingAI-Augmented Forecastingapproach
corporateCorporate AI Safety Responsesapproach
eval-saturationEval Saturation & The Evals Gapapproach
failed-stalled-proposalsFailed and Stalled AI Policy Proposalsanalysis
intervention-evaluation-for-political-stabilityIntervention Evaluation for Political Stabilityapproach
us-state-legislationUS State AI Legislationanalysis
existential-riskExistential Risk from AIconcept
Longterm Wiki