Skip to content
Longterm Wiki

Future of Humanity Institute (FHI)

fhiorganizationPath: /knowledge-base/organizations/fhi/
E140Entity ID (EID)
← Back to page39 backlinksQuality: 51Updated: 2026-01-29
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "fhi",
  "wikiId": "E140",
  "path": "/knowledge-base/organizations/fhi/",
  "filePath": "knowledge-base/organizations/fhi.mdx",
  "title": "Future of Humanity Institute (FHI)",
  "quality": 51,
  "readerImportance": 50.5,
  "researchImportance": 49.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-01-29",
  "dateCreated": "2026-02-15",
  "summary": "The Future of Humanity Institute (2005-2024) was a pioneering Oxford research center that founded existential risk studies and AI alignment research, growing from 3 to ~50 researchers and receiving \\$10M+ in funding before closing due to administrative conflicts. FHI produced seminal works (Superintelligence, The Precipice), trained leaders now at Anthropic/DeepMind/GovAI, and advised UN/UK government, demonstrating both transformative intellectual impact and the challenges of housing speculative research in traditional academia.",
  "description": "The Future of Humanity Institute was a pioneering interdisciplinary research center at Oxford University (2005-2024) that founded the fields of existential risk studies and AI alignment research.",
  "ratings": {
    "novelty": 3.2,
    "rigor": 5.8,
    "completeness": 7.3,
    "actionability": 2.1
  },
  "category": "organizations",
  "subcategory": "safety-orgs",
  "clusters": [
    "community",
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 4181,
    "tableCount": 32,
    "diagramCount": 2,
    "internalLinks": 19,
    "externalLinks": 24,
    "footnoteCount": 0,
    "bulletRatio": 0.05,
    "sectionCount": 57,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 4181,
  "unconvertedLinks": [
    {
      "text": "Future of Humanity Institute",
      "url": "https://en.wikipedia.org/wiki/Future_of_Humanity_Institute",
      "resourceId": "d04582635c8c0ce4",
      "resourceTitle": "Future of Humanity Institute - Wikipedia"
    },
    {
      "text": "Nick Bostrom",
      "url": "https://nickbostrom.com/",
      "resourceId": "9cf1412a293bfdbe",
      "resourceTitle": "Nick Bostrom's Homepage"
    },
    {
      "text": "Nick Bostrom's Homepage",
      "url": "https://nickbostrom.com/",
      "resourceId": "9cf1412a293bfdbe",
      "resourceTitle": "Nick Bostrom's Homepage"
    },
    {
      "text": "Future of Humanity Institute - Wikipedia",
      "url": "https://en.wikipedia.org/wiki/Future_of_Humanity_Institute",
      "resourceId": "d04582635c8c0ce4",
      "resourceTitle": "Future of Humanity Institute - Wikipedia"
    },
    {
      "text": "Nick Bostrom - Wikipedia",
      "url": "https://en.wikipedia.org/wiki/Nick_Bostrom",
      "resourceId": "kb-bab966a212f1bc8b",
      "resourceTitle": "Nick Bostrom - Wikipedia"
    },
    {
      "text": "Superintelligence: Paths, Dangers, Strategies - Wikipedia",
      "url": "https://en.wikipedia.org/wiki/Superintelligence:_Paths,_Dangers,_Strategies",
      "resourceId": "0151481d5dc82963",
      "resourceTitle": "Superintelligence: Paths, Dangers, Strategies - Wikipedia"
    },
    {
      "text": "Daily Nous: The End of the Future of Humanity Institute",
      "url": "https://dailynous.com/2024/04/18/end-future-of-humanity-institute/",
      "resourceId": "73a866cd6278fc9b",
      "resourceTitle": "The End of the Future of Humanity Institute — Daily Nous (April 18, 2024)"
    },
    {
      "text": "EA Forum: FHI Final Report Discussion",
      "url": "https://forum.effectivealtruism.org/posts/uK27pds7J36asqJPt/future-of-humanity-institute-2005-2024-final-report",
      "resourceId": "87c472d68e8a2845",
      "resourceTitle": "Future of Humanity Institute 2005-2024: Final Report"
    },
    {
      "text": "LessWrong: FHI has shut down",
      "url": "https://www.lesswrong.com/posts/tu3CH22nFLLKouMKw/fhi-future-of-humanity-institute-has-shut-down-2005-2024",
      "resourceId": "59abe40a529ff678",
      "resourceTitle": "FHI (Future of Humanity Institute) has shut down (2005–2024)"
    }
  ],
  "unconvertedLinkCount": 9,
  "convertedLinkCount": 0,
  "backlinkCount": 39,
  "hallucinationRisk": {
    "level": "high",
    "score": 75,
    "factors": [
      "biographical-claims",
      "no-citations"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 16,
    "similarPages": [
      {
        "id": "nick-bostrom",
        "title": "Nick Bostrom",
        "path": "/knowledge-base/people/nick-bostrom/",
        "similarity": 16
      },
      {
        "id": "cser",
        "title": "CSER (Centre for the Study of Existential Risk)",
        "path": "/knowledge-base/organizations/cser/",
        "similarity": 15
      },
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 14
      },
      {
        "id": "fli",
        "title": "Future of Life Institute (FLI)",
        "path": "/knowledge-base/organizations/fli/",
        "similarity": 14
      },
      {
        "id": "nick-beckstead",
        "title": "Nick Beckstead",
        "path": "/knowledge-base/people/nick-beckstead/",
        "similarity": 14
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-18",
      "branch": "claude/audit-webpage-errors-X4jHg",
      "title": "Audit wiki pages for factual errors and hallucinations",
      "summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
    }
  ],
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 17,
      "diagrams": 2,
      "internalLinks": 33,
      "externalLinks": 21,
      "footnotes": 13,
      "references": 13
    },
    "actuals": {
      "tables": 32,
      "diagrams": 2,
      "internalLinks": 19,
      "externalLinks": 24,
      "footnotes": 0,
      "references": 5,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "amber",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 1,
    "ratingsString": "N:3.2 R:5.8 A:2.1 C:7.3"
  },
  "readerRank": 297,
  "researchRank": 281,
  "recommendedScore": 140.77
}
External Links
{
  "wikidata": "https://www.wikidata.org/wiki/Q5510826"
}
Backlinks (39)
idtitletyperelationship
miri-eraThe MIRI Erahistorical
josh-jacobsonJosh Jacobsonperson
accident-risksAI Accident Risk Cruxescrux
is-ai-xrisk-realIs AI Existential Risk Real?crux
deep-learning-eraDeep Learning Revolution (2012-2020)historical
ea-longtermist-wins-lossesEA and Longtermist Wins and Lossesconcept
epstein-ai-connectionsJeffrey Epstein's Connections to AI Researchersconcept
ftx-red-flags-pre-collapse-warning-signs-that-were-overlookedFTX Red Flags: Pre-Collapse Warning Signs That Were Overlookedconcept
longtermism-credibility-after-ftxLongtermism's Philosophical Credibility After FTXconcept
capability-alignment-raceCapability-Alignment Race Modelanalysis
risk-activation-timelineRisk Activation Timeline Modelanalysis
safety-researcher-gapAI Safety Talent Supply/Demand Gap Modelanalysis
anthropic-ipoAnthropic IPOanalysis
cnasCenter for a New American Security (CNAS)organization
csetCSET (Center for Security and Emerging Technology)organization
deepmindGoogle DeepMindorganization
foresight-instituteForesight Instituteorganization
ftxFTX (cryptocurrency exchange)organization
govaiGovAIorganization
ibbisIBBIS (International Biosecurity and Biosafety Initiative for Science)organization
safety-orgs-overviewAI Safety Organizations (Overview)concept
secure-ai-projectSecure AI Projectorganization
sentinelSentinel (Catastrophic Risk Foresight)organization
connor-leahyConnor Leahyperson
david-dalrympleDavid Dalrympleperson
geoffrey-hintonGeoffrey Hintonperson
issa-riceIssa Riceperson
jan-leikeJan Leikeperson
nick-becksteadNick Becksteadperson
nick-bostromNick Bostromperson
nuno-sempereNuño Sempereperson
robin-hansonRobin Hansonperson
toby-ordToby Ordperson
corrigibilityCorrigibility Researchresearch-area
intervention-evaluation-for-political-stabilityIntervention Evaluation for Political Stabilityapproach
disinformationDisinformationrisk
existential-riskExistential Risk from AIconcept
knowledge-monopolyAI Knowledge Monopolyrisk
superintelligenceSuperintelligenceconcept
Longterm Wiki