Skip to content
Longterm Wiki

GovAI

govaiorganizationPath: /knowledge-base/organizations/govai/
E153Entity ID (EID)
← Back to page36 backlinksQuality: 43Updated: 2025-12-28
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "govai",
  "wikiId": "E153",
  "path": "/knowledge-base/organizations/govai/",
  "filePath": "knowledge-base/organizations/govai.mdx",
  "title": "GovAI",
  "quality": 43,
  "readerImportance": 50.5,
  "researchImportance": 55.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2025-12-28",
  "dateCreated": "2026-02-15",
  "summary": "GovAI is an AI policy research organization with ~40-45 staff, funded primarily by Coefficient Giving (\\$1.8M+ in 2023-2024), that has trained 100+ governance researchers through fellowships and currently holds Vice-Chair position in EU GPAI Code drafting. Their compute governance research has influenced regulatory thresholds across US, UK, and EU, with alumni now occupying key positions in frontier labs, think tanks, and government.",
  "description": "The Centre for the Governance of AI is a leading AI policy research organization that has shaped compute governance frameworks, trained 100+ AI governance researchers, and now directly influences EU AI Act implementation through Vice-Chair roles in GPAI Code drafting.",
  "ratings": {
    "novelty": 3.5,
    "rigor": 5,
    "completeness": 6.5,
    "actionability": 4
  },
  "category": "organizations",
  "subcategory": "safety-orgs",
  "clusters": [
    "ai-safety",
    "governance",
    "community"
  ],
  "metrics": {
    "wordCount": 1681,
    "tableCount": 14,
    "diagramCount": 1,
    "internalLinks": 14,
    "externalLinks": 7,
    "footnoteCount": 0,
    "bulletRatio": 0.08,
    "sectionCount": 24,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 1681,
  "unconvertedLinks": [
    {
      "text": "GovAI Homepage",
      "url": "https://www.governance.ai/",
      "resourceId": "f35c467b353f990f",
      "resourceTitle": "GovAI helps decision-makers navigate the transition to a world with advanced AI, by producing rigorous research and fostering talent.\" name=\"description\"/><meta content=\"GovAI | Home"
    }
  ],
  "unconvertedLinkCount": 1,
  "convertedLinkCount": 0,
  "backlinkCount": 36,
  "hallucinationRisk": {
    "level": "high",
    "score": 75,
    "factors": [
      "biographical-claims",
      "no-citations"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 16,
    "similarPages": [
      {
        "id": "cset",
        "title": "CSET (Center for Security and Emerging Technology)",
        "path": "/knowledge-base/organizations/cset/",
        "similarity": 16
      },
      {
        "id": "safety-orgs-overview",
        "title": "AI Safety Organizations (Overview)",
        "path": "/knowledge-base/organizations/safety-orgs-overview/",
        "similarity": 13
      },
      {
        "id": "training-programs",
        "title": "AI Safety Training Programs",
        "path": "/knowledge-base/responses/training-programs/",
        "similarity": 13
      },
      {
        "id": "safety-research-allocation",
        "title": "Safety Research Allocation Model",
        "path": "/knowledge-base/models/safety-research-allocation/",
        "similarity": 12
      },
      {
        "id": "cser",
        "title": "CSER (Centre for the Study of Existential Risk)",
        "path": "/knowledge-base/organizations/cser/",
        "similarity": 12
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 7,
      "diagrams": 1,
      "internalLinks": 13,
      "externalLinks": 8,
      "footnotes": 5,
      "references": 5
    },
    "actuals": {
      "tables": 14,
      "diagrams": 1,
      "internalLinks": 14,
      "externalLinks": 7,
      "footnotes": 0,
      "references": 1,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:3.5 R:5 A:4 C:6.5"
  },
  "readerRank": 298,
  "researchRank": 252,
  "recommendedScore": 121.8
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/centre-for-the-governance-of-ai"
}
Backlinks (36)
idtitletyperelationship
governance-policyAI Governance and Policycrux
ben-garfinkelBen Garfinkelpersonleads-to
compute-governanceCompute Governanceconcept
eu-ai-actEU AI Actpolicy
ai-governance-researchAI Governance Research and Analysisapproachrelated
racing-dynamicsAI Development Racing Dynamicsrisk
accident-risksAI Accident Risk Cruxescrux
ai-risk-portfolio-analysisAI Risk Portfolio Analysisanalysis
capability-alignment-raceCapability-Alignment Race Modelanalysis
deceptive-alignment-decompositionDeceptive Alignment Decomposition Modelanalysis
intervention-effectiveness-matrixIntervention Effectiveness Matrixanalysis
risk-interaction-matrixRisk Interaction Matrix Modelanalysis
ai-now-instituteAI Now Instituteorganization
ai-policy-instituteAI Policy Instituteorganization
americans-for-responsible-innovationAmericans for Responsible Innovationorganization
brookings-aiBrookings Institution AI and Emerging Technology Initiativeorganization
caisCenter for AI Safety (CAIS)organization
ceaCentre for Effective Altruismorganization
conjectureConjectureorganization
csetCSET (Center for Security and Emerging Technology)organization
far-aiFAR AIorganization
fhiFuture of Humanity Institute (FHI)organization
foresight-instituteForesight Instituteorganization
__index__/knowledge-base/organizationsOrganizationsconcept
lionheart-venturesLionheart Venturesorganization
longview-philanthropyLongview Philanthropyorganization
matsMATS ML Alignment Theory Scholars programorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
sffSurvival and Flourishing Fund (SFF)organization
swift-centreSwift Centreorganization
the-future-societyThe Future Societyorganization
dario-amodeiDario Amodeiperson
dustin-moskovitzDustin Moskovitzperson
structured-accessStructured Access / API-Onlyapproach
thresholdsCompute Thresholdsconcept
__index__/knowledge-base/worldviewsWorldviewsconcept
Longterm Wiki