Future of Humanity Institute (FHI)
fhiorganizationPath: /knowledge-base/organizations/fhi/
E140Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "fhi",
"wikiId": "E140",
"path": "/knowledge-base/organizations/fhi/",
"filePath": "knowledge-base/organizations/fhi.mdx",
"title": "Future of Humanity Institute (FHI)",
"quality": 51,
"readerImportance": 50.5,
"researchImportance": 49.5,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-01-29",
"dateCreated": "2026-02-15",
"summary": "The Future of Humanity Institute (2005-2024) was a pioneering Oxford research center that founded existential risk studies and AI alignment research, growing from 3 to ~50 researchers and receiving \\$10M+ in funding before closing due to administrative conflicts. FHI produced seminal works (Superintelligence, The Precipice), trained leaders now at Anthropic/DeepMind/GovAI, and advised UN/UK government, demonstrating both transformative intellectual impact and the challenges of housing speculative research in traditional academia.",
"description": "The Future of Humanity Institute was a pioneering interdisciplinary research center at Oxford University (2005-2024) that founded the fields of existential risk studies and AI alignment research.",
"ratings": {
"novelty": 3.2,
"rigor": 5.8,
"completeness": 7.3,
"actionability": 2.1
},
"category": "organizations",
"subcategory": "safety-orgs",
"clusters": [
"community",
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 4181,
"tableCount": 32,
"diagramCount": 2,
"internalLinks": 19,
"externalLinks": 24,
"footnoteCount": 0,
"bulletRatio": 0.05,
"sectionCount": 57,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 4181,
"unconvertedLinks": [
{
"text": "Future of Humanity Institute",
"url": "https://en.wikipedia.org/wiki/Future_of_Humanity_Institute",
"resourceId": "d04582635c8c0ce4",
"resourceTitle": "Future of Humanity Institute - Wikipedia"
},
{
"text": "Nick Bostrom",
"url": "https://nickbostrom.com/",
"resourceId": "9cf1412a293bfdbe",
"resourceTitle": "Nick Bostrom's Homepage"
},
{
"text": "Nick Bostrom's Homepage",
"url": "https://nickbostrom.com/",
"resourceId": "9cf1412a293bfdbe",
"resourceTitle": "Nick Bostrom's Homepage"
},
{
"text": "Future of Humanity Institute - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Future_of_Humanity_Institute",
"resourceId": "d04582635c8c0ce4",
"resourceTitle": "Future of Humanity Institute - Wikipedia"
},
{
"text": "Nick Bostrom - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Nick_Bostrom",
"resourceId": "kb-bab966a212f1bc8b",
"resourceTitle": "Nick Bostrom - Wikipedia"
},
{
"text": "Superintelligence: Paths, Dangers, Strategies - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Superintelligence:_Paths,_Dangers,_Strategies",
"resourceId": "0151481d5dc82963",
"resourceTitle": "Superintelligence: Paths, Dangers, Strategies - Wikipedia"
},
{
"text": "Daily Nous: The End of the Future of Humanity Institute",
"url": "https://dailynous.com/2024/04/18/end-future-of-humanity-institute/",
"resourceId": "73a866cd6278fc9b",
"resourceTitle": "The End of the Future of Humanity Institute — Daily Nous (April 18, 2024)"
},
{
"text": "EA Forum: FHI Final Report Discussion",
"url": "https://forum.effectivealtruism.org/posts/uK27pds7J36asqJPt/future-of-humanity-institute-2005-2024-final-report",
"resourceId": "87c472d68e8a2845",
"resourceTitle": "Future of Humanity Institute 2005-2024: Final Report"
},
{
"text": "LessWrong: FHI has shut down",
"url": "https://www.lesswrong.com/posts/tu3CH22nFLLKouMKw/fhi-future-of-humanity-institute-has-shut-down-2005-2024",
"resourceId": "59abe40a529ff678",
"resourceTitle": "FHI (Future of Humanity Institute) has shut down (2005–2024)"
}
],
"unconvertedLinkCount": 9,
"convertedLinkCount": 0,
"backlinkCount": 39,
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "nick-bostrom",
"title": "Nick Bostrom",
"path": "/knowledge-base/people/nick-bostrom/",
"similarity": 16
},
{
"id": "cser",
"title": "CSER (Centre for the Study of Existential Risk)",
"path": "/knowledge-base/organizations/cser/",
"similarity": 15
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 14
},
{
"id": "fli",
"title": "Future of Life Institute (FLI)",
"path": "/knowledge-base/organizations/fli/",
"similarity": 14
},
{
"id": "nick-beckstead",
"title": "Nick Beckstead",
"path": "/knowledge-base/people/nick-beckstead/",
"similarity": 14
}
]
},
"changeHistory": [
{
"date": "2026-02-18",
"branch": "claude/audit-webpage-errors-X4jHg",
"title": "Audit wiki pages for factual errors and hallucinations",
"summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
}
],
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 17,
"diagrams": 2,
"internalLinks": 33,
"externalLinks": 21,
"footnotes": 13,
"references": 13
},
"actuals": {
"tables": 32,
"diagrams": 2,
"internalLinks": 19,
"externalLinks": 24,
"footnotes": 0,
"references": 5,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "amber",
"externalLinks": "green",
"footnotes": "red",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:3.2 R:5.8 A:2.1 C:7.3"
},
"readerRank": 297,
"researchRank": 281,
"recommendedScore": 140.77
}External Links
{
"wikidata": "https://www.wikidata.org/wiki/Q5510826"
}Backlinks (39)
| id | title | type | relationship |
|---|---|---|---|
| miri-era | The MIRI Era | historical | — |
| josh-jacobson | Josh Jacobson | person | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| is-ai-xrisk-real | Is AI Existential Risk Real? | crux | — |
| deep-learning-era | Deep Learning Revolution (2012-2020) | historical | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| epstein-ai-connections | Jeffrey Epstein's Connections to AI Researchers | concept | — |
| ftx-red-flags-pre-collapse-warning-signs-that-were-overlooked | FTX Red Flags: Pre-Collapse Warning Signs That Were Overlooked | concept | — |
| longtermism-credibility-after-ftx | Longtermism's Philosophical Credibility After FTX | concept | — |
| capability-alignment-race | Capability-Alignment Race Model | analysis | — |
| risk-activation-timeline | Risk Activation Timeline Model | analysis | — |
| safety-researcher-gap | AI Safety Talent Supply/Demand Gap Model | analysis | — |
| anthropic-ipo | Anthropic IPO | analysis | — |
| cnas | Center for a New American Security (CNAS) | organization | — |
| cset | CSET (Center for Security and Emerging Technology) | organization | — |
| deepmind | Google DeepMind | organization | — |
| foresight-institute | Foresight Institute | organization | — |
| ftx | FTX (cryptocurrency exchange) | organization | — |
| govai | GovAI | organization | — |
| ibbis | IBBIS (International Biosecurity and Biosafety Initiative for Science) | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| secure-ai-project | Secure AI Project | organization | — |
| sentinel | Sentinel (Catastrophic Risk Foresight) | organization | — |
| connor-leahy | Connor Leahy | person | — |
| david-dalrymple | David Dalrymple | person | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| issa-rice | Issa Rice | person | — |
| jan-leike | Jan Leike | person | — |
| nick-beckstead | Nick Beckstead | person | — |
| nick-bostrom | Nick Bostrom | person | — |
| nuno-sempere | Nuño Sempere | person | — |
| robin-hanson | Robin Hanson | person | — |
| toby-ord | Toby Ord | person | — |
| corrigibility | Corrigibility Research | research-area | — |
| intervention-evaluation-for-political-stability | Intervention Evaluation for Political Stability | approach | — |
| disinformation | Disinformation | risk | — |
| existential-risk | Existential Risk from AI | concept | — |
| knowledge-monopoly | AI Knowledge Monopoly | risk | — |
| superintelligence | Superintelligence | concept | — |