GovAI
govaiorganizationPath: /knowledge-base/organizations/govai/
E153Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "govai",
"wikiId": "E153",
"path": "/knowledge-base/organizations/govai/",
"filePath": "knowledge-base/organizations/govai.mdx",
"title": "GovAI",
"quality": 43,
"readerImportance": 50.5,
"researchImportance": 55.5,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2025-12-28",
"dateCreated": "2026-02-15",
"summary": "GovAI is an AI policy research organization with ~40-45 staff, funded primarily by Coefficient Giving (\\$1.8M+ in 2023-2024), that has trained 100+ governance researchers through fellowships and currently holds Vice-Chair position in EU GPAI Code drafting. Their compute governance research has influenced regulatory thresholds across US, UK, and EU, with alumni now occupying key positions in frontier labs, think tanks, and government.",
"description": "The Centre for the Governance of AI is a leading AI policy research organization that has shaped compute governance frameworks, trained 100+ AI governance researchers, and now directly influences EU AI Act implementation through Vice-Chair roles in GPAI Code drafting.",
"ratings": {
"novelty": 3.5,
"rigor": 5,
"completeness": 6.5,
"actionability": 4
},
"category": "organizations",
"subcategory": "safety-orgs",
"clusters": [
"ai-safety",
"governance",
"community"
],
"metrics": {
"wordCount": 1681,
"tableCount": 14,
"diagramCount": 1,
"internalLinks": 14,
"externalLinks": 7,
"footnoteCount": 0,
"bulletRatio": 0.08,
"sectionCount": 24,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 1681,
"unconvertedLinks": [
{
"text": "GovAI Homepage",
"url": "https://www.governance.ai/",
"resourceId": "f35c467b353f990f",
"resourceTitle": "GovAI helps decision-makers navigate the transition to a world with advanced AI, by producing rigorous research and fostering talent.\" name=\"description\"/><meta content=\"GovAI | Home"
}
],
"unconvertedLinkCount": 1,
"convertedLinkCount": 0,
"backlinkCount": 36,
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "cset",
"title": "CSET (Center for Security and Emerging Technology)",
"path": "/knowledge-base/organizations/cset/",
"similarity": 16
},
{
"id": "safety-orgs-overview",
"title": "AI Safety Organizations (Overview)",
"path": "/knowledge-base/organizations/safety-orgs-overview/",
"similarity": 13
},
{
"id": "training-programs",
"title": "AI Safety Training Programs",
"path": "/knowledge-base/responses/training-programs/",
"similarity": 13
},
{
"id": "safety-research-allocation",
"title": "Safety Research Allocation Model",
"path": "/knowledge-base/models/safety-research-allocation/",
"similarity": 12
},
{
"id": "cser",
"title": "CSER (Centre for the Study of Existential Risk)",
"path": "/knowledge-base/organizations/cser/",
"similarity": 12
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 7,
"diagrams": 1,
"internalLinks": 13,
"externalLinks": 8,
"footnotes": 5,
"references": 5
},
"actuals": {
"tables": 14,
"diagrams": 1,
"internalLinks": 14,
"externalLinks": 7,
"footnotes": 0,
"references": 1,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "amber",
"footnotes": "red",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:3.5 R:5 A:4 C:6.5"
},
"readerRank": 298,
"researchRank": 252,
"recommendedScore": 121.8
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/centre-for-the-governance-of-ai"
}Backlinks (36)
| id | title | type | relationship |
|---|---|---|---|
| governance-policy | AI Governance and Policy | crux | — |
| ben-garfinkel | Ben Garfinkel | person | leads-to |
| compute-governance | Compute Governance | concept | — |
| eu-ai-act | EU AI Act | policy | — |
| ai-governance-research | AI Governance Research and Analysis | approach | related |
| racing-dynamics | AI Development Racing Dynamics | risk | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| ai-risk-portfolio-analysis | AI Risk Portfolio Analysis | analysis | — |
| capability-alignment-race | Capability-Alignment Race Model | analysis | — |
| deceptive-alignment-decomposition | Deceptive Alignment Decomposition Model | analysis | — |
| intervention-effectiveness-matrix | Intervention Effectiveness Matrix | analysis | — |
| risk-interaction-matrix | Risk Interaction Matrix Model | analysis | — |
| ai-now-institute | AI Now Institute | organization | — |
| ai-policy-institute | AI Policy Institute | organization | — |
| americans-for-responsible-innovation | Americans for Responsible Innovation | organization | — |
| brookings-ai | Brookings Institution AI and Emerging Technology Initiative | organization | — |
| cais | Center for AI Safety (CAIS) | organization | — |
| cea | Centre for Effective Altruism | organization | — |
| conjecture | Conjecture | organization | — |
| cset | CSET (Center for Security and Emerging Technology) | organization | — |
| far-ai | FAR AI | organization | — |
| fhi | Future of Humanity Institute (FHI) | organization | — |
| foresight-institute | Foresight Institute | organization | — |
| __index__/knowledge-base/organizations | Organizations | concept | — |
| lionheart-ventures | Lionheart Ventures | organization | — |
| longview-philanthropy | Longview Philanthropy | organization | — |
| mats | MATS ML Alignment Theory Scholars program | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| sff | Survival and Flourishing Fund (SFF) | organization | — |
| swift-centre | Swift Centre | organization | — |
| the-future-society | The Future Society | organization | — |
| dario-amodei | Dario Amodei | person | — |
| dustin-moskovitz | Dustin Moskovitz | person | — |
| structured-access | Structured Access / API-Only | approach | — |
| thresholds | Compute Thresholds | concept | — |
| __index__/knowledge-base/worldviews | Worldviews | concept | — |