Skip to content
Longterm Wiki

US AI Safety Institute (now CAISI)

us-aisiorganizationPath: /knowledge-base/organizations/us-aisi/
E365Entity ID (EID)
← Back to page39 backlinksQuality: 91Updated: 2026-03-19
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "us-aisi",
  "wikiId": "E365",
  "path": "/knowledge-base/organizations/us-aisi/",
  "filePath": "knowledge-base/organizations/us-aisi.mdx",
  "title": "US AI Safety Institute (now CAISI)",
  "quality": 91,
  "readerImportance": 32,
  "researchImportance": 48,
  "tacticalValue": null,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-03-19",
  "dateCreated": "2026-02-15",
  "summary": "The US AI Safety Institute (AISI), established November 2023 within NIST with \\$10M budget (FY2025 request \\$82.7M), conducted pre-deployment evaluations of frontier models through MOUs with OpenAI and Anthropic. Co-led International Network of AI Safety Institutes (11 member nations). Director Elizabeth Kelly named to TIME's 100 Most Influential in AI (2024) but departed February 2025. Renamed to CAISI June 2025 with shift to innovation/competitiveness focus following Trump administration's revocation of EO 14110 and NIST layoffs affecting 73 staff.",
  "description": "US government agency for AI safety research and standard-setting under NIST, established November 2023 with \\\\$10M initial budget (FY2025 request of \\\\$82.7M) and 290+ consortium members.",
  "ratings": {
    "novelty": 4,
    "rigor": 6,
    "completeness": 7.5,
    "actionability": 5
  },
  "category": "organizations",
  "subcategory": "government",
  "clusters": [
    "ai-safety",
    "governance",
    "community"
  ],
  "metrics": {
    "wordCount": 4842,
    "tableCount": 12,
    "diagramCount": 1,
    "internalLinks": 35,
    "externalLinks": 18,
    "footnoteCount": 0,
    "bulletRatio": 0.07,
    "sectionCount": 28,
    "hasOverview": false,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 4842,
  "unconvertedLinks": [
    {
      "text": "International Network of AI Safety Institutes",
      "url": "https://www.nist.gov/news-events/news/2024/11/fact-sheet-us-department-commerce-us-department-state-launch-international",
      "resourceId": "a65ad4f1a30f1737",
      "resourceTitle": "International Network of AI Safety Institutes"
    },
    {
      "text": "NIST announcement",
      "url": "https://www.nist.gov/artificial-intelligence/artificial-intelligence-safety-institute-consortium-aisic",
      "resourceId": "bfe77d043707ba19",
      "resourceTitle": "AI Safety Institute Consortium (AISIC)"
    },
    {
      "text": "5-10x higher compensation",
      "url": "https://www.brookings.edu/articles/a-technical-ai-government-agency-plays-a-vital-role-in-advancing-ai-innovation-and-trustworthiness/",
      "resourceId": "f7d2ebb409b056f9",
      "resourceTitle": "U.S. AI Safety Institute"
    },
    {
      "text": "\\$1 billion from Amazon",
      "url": "https://www.nist.gov/news-events/news/2024/11/fact-sheet-us-department-commerce-us-department-state-launch-international",
      "resourceId": "a65ad4f1a30f1737",
      "resourceTitle": "International Network of AI Safety Institutes"
    },
    {
      "text": "TIME's 100 Most Influential People in AI",
      "url": "https://time.com/7012783/elizabeth-kelly/",
      "resourceId": "0694bc71bc9daac0",
      "resourceTitle": "Elizabeth Kelly: Leading America's AI Safety Institute (TIME 100 Most Influential in AI 2024)"
    }
  ],
  "unconvertedLinkCount": 5,
  "convertedLinkCount": 26,
  "backlinkCount": 39,
  "hallucinationRisk": {
    "level": "high",
    "score": 70,
    "factors": [
      "biographical-claims",
      "no-citations",
      "high-quality"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 25,
    "similarPages": [
      {
        "id": "ai-safety-institutes",
        "title": "AI Safety Institutes",
        "path": "/knowledge-base/responses/ai-safety-institutes/",
        "similarity": 25
      },
      {
        "id": "uk-aisi",
        "title": "UK AI Safety Institute",
        "path": "/knowledge-base/organizations/uk-aisi/",
        "similarity": 24
      },
      {
        "id": "international-summits",
        "title": "International AI Safety Summits",
        "path": "/knowledge-base/responses/international-summits/",
        "similarity": 23
      },
      {
        "id": "metr",
        "title": "METR",
        "path": "/knowledge-base/organizations/metr/",
        "similarity": 22
      },
      {
        "id": "standards-bodies",
        "title": "AI Standards Bodies",
        "path": "/knowledge-base/responses/standards-bodies/",
        "similarity": 22
      }
    ]
  },
  "coverage": {
    "passing": 4,
    "total": 13,
    "targets": {
      "tables": 19,
      "diagrams": 2,
      "internalLinks": 39,
      "externalLinks": 24,
      "footnotes": 15,
      "references": 15
    },
    "actuals": {
      "tables": 12,
      "diagrams": 1,
      "internalLinks": 35,
      "externalLinks": 18,
      "footnotes": 0,
      "references": 18,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "red",
      "tables": "amber",
      "diagrams": "amber",
      "internalLinks": "amber",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4 R:6 A:5 C:7.5"
  },
  "readerRank": 435,
  "researchRank": 294,
  "recommendedScore": 217.33
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/us-ai-safety-institute"
}
Backlinks (39)
idtitletyperelationship
uk-aisiUK AI Safety Instituteorganization
japan-aisiJapan AI Safety Instituteorganization
singapore-aisiSingapore AI Safety Instituteorganization
canada-aisiCanadian AI Safety Instituteorganization
france-inesiaINESIA (France National Institute for AI Evaluation and Security)organization
south-korea-aisiSouth Korea AI Safety Instituteorganization
australia-aisiAustralian AI Safety Instituteorganization
india-aisiIndiaAI Safety Instituteorganization
eu-ai-officeEU AI Officeorganization
joe-bidenJoe Bidenperson
bletchley-declarationBletchley Declarationpolicy
codingAutonomous Codingcapability
agi-developmentAGI Developmentconcept
ea-longtermist-wins-lossesEA and Longtermist Wins and Lossesconcept
corrigibility-failure-pathwaysCorrigibility Failure Pathwaysanalysis
mesa-optimization-analysisMesa-Optimization Risk Analysisanalysis
power-seeking-conditionsPower-Seeking Emergence Conditions Modelanalysis
safety-research-allocationSafety Research Allocation Modelanalysis
safety-researcher-gapAI Safety Talent Supply/Demand Gap Modelanalysis
worldview-intervention-mappingWorldview-Intervention Mappinganalysis
anthropicAnthropicorganization
arcAlignment Research Center (ARC)organization
caisCenter for AI Safety (CAIS)organization
frontier-model-forumFrontier Model Forumorganization
govaiGovAIorganization
government-orgs-overviewGovernment AI Safety Organizations (Overview)concept
__index__/knowledge-base/organizationsOrganizationsconcept
metrMETRorganization
geoffrey-hintonGeoffrey Hintonperson
holden-karnofskyHolden Karnofskyperson
ai-safety-institutesAI Safety Institutespolicy
anthropic-core-viewsAnthropic Core Viewssafety-agenda
coordination-techAI Governance Coordination Technologiesapproach
effectiveness-assessmentPolicy Effectiveness Assessmentanalysis
evaluationAI Evaluationapproach
international-summitsInternational AI Safety Summitsevent
lab-cultureAI Lab Safety Cultureapproach
scalable-eval-approachesScalable Eval Approachesapproach
us-executive-orderUS Executive Order on Safe, Secure, and Trustworthy AIpolicy
Longterm Wiki