Future of Life Institute (FLI)
fliorganizationPath: /knowledge-base/organizations/fli/
E528Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "fli",
"wikiId": "E528",
"path": "/knowledge-base/organizations/fli/",
"filePath": "knowledge-base/organizations/fli.mdx",
"title": "Future of Life Institute (FLI)",
"quality": 46,
"readerImportance": 76,
"researchImportance": 53.5,
"tacticalValue": 72,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-01-29",
"dateCreated": "2026-02-15",
"summary": "Comprehensive profile of FLI documenting \\$25M+ in grants distributed (2015: \\$7M to 37 projects, 2021: \\$25M program), major public campaigns (Asilomar Principles with 5,700+ signatories, 2023 Pause Letter with 33,000+ signatories), and \\$665.8M Buterin donation (2021). Organization operates primarily through advocacy and grantmaking rather than direct research, with active EU/UN/US policy engagement.",
"description": "The Future of Life Institute is a nonprofit organization focused on reducing existential risks from advanced AI and other transformative technologies.",
"ratings": {
"novelty": 2.5,
"rigor": 4,
"completeness": 6.5,
"actionability": 2
},
"category": "organizations",
"subcategory": "funders",
"clusters": [
"community",
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 6031,
"tableCount": 32,
"diagramCount": 2,
"internalLinks": 51,
"externalLinks": 52,
"footnoteCount": 0,
"bulletRatio": 0.15,
"sectionCount": 51,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 6031,
"unconvertedLinks": [
{
"text": "futureoflife.org",
"url": "https://futureoflife.org/",
"resourceId": "786a68a91a7d5712",
"resourceTitle": "Future of Life Institute"
},
{
"text": "Future of Life Institute",
"url": "https://futureoflife.org/",
"resourceId": "786a68a91a7d5712",
"resourceTitle": "Future of Life Institute"
},
{
"text": "\"Pause Giant AI Experiments\"",
"url": "https://futureoflife.org/open-letter/pause-giant-ai-experiments/",
"resourceId": "531f55cee64f6509",
"resourceTitle": "Pause Giant AI Experiments: An Open Letter (FLI, 2023)"
},
{
"text": "Metaculus",
"url": "https://www.metaculus.com/",
"resourceId": "d99a6d0fb1edc2db",
"resourceTitle": "Metaculus Forecasting Platform"
},
{
"text": "FLI Official Website",
"url": "https://futureoflife.org/",
"resourceId": "786a68a91a7d5712",
"resourceTitle": "Future of Life Institute"
},
{
"text": "Pause Giant AI Experiments: An Open Letter",
"url": "https://futureoflife.org/open-letter/pause-giant-ai-experiments/",
"resourceId": "531f55cee64f6509",
"resourceTitle": "Pause Giant AI Experiments: An Open Letter (FLI, 2023)"
},
{
"text": "Pause Giant AI Experiments - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Pause_Giant_AI_Experiments:_An_Open_Letter",
"resourceId": "4fc41c1e8720f41f",
"resourceTitle": "Pause Giant AI Experiments: An Open Letter (Wikipedia)"
},
{
"text": "LessWrong: Elon Musk Donates \\$10M to FLI",
"url": "https://www.lesswrong.com/posts/FuCZdbQ3h6782bnY6/elon-musk-donates-usd10m-to-the-future-of-life-institute-to",
"resourceId": "4c3febc0d0c2d304",
"resourceTitle": "Elon Musk donates $10M to the Future of Life Institute to keep AI beneficial "
},
{
"text": "FLI Website",
"url": "https://futureoflife.org/",
"resourceId": "786a68a91a7d5712",
"resourceTitle": "Future of Life Institute"
},
{
"text": "Pause Giant AI Experiments Letter",
"url": "https://futureoflife.org/open-letter/pause-giant-ai-experiments/",
"resourceId": "531f55cee64f6509",
"resourceTitle": "Pause Giant AI Experiments: An Open Letter (FLI, 2023)"
}
],
"unconvertedLinkCount": 10,
"convertedLinkCount": 0,
"backlinkCount": 27,
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 16
},
{
"id": "ea-longtermist-wins-losses",
"title": "EA and Longtermist Wins and Losses",
"path": "/knowledge-base/history/ea-longtermist-wins-losses/",
"similarity": 15
},
{
"id": "mainstream-era",
"title": "Mainstream Era (2020-Present)",
"path": "/knowledge-base/history/mainstream-era/",
"similarity": 15
},
{
"id": "ai-futures-project",
"title": "AI Futures Project",
"path": "/knowledge-base/organizations/ai-futures-project/",
"similarity": 15
},
{
"id": "cais",
"title": "Center for AI Safety (CAIS)",
"path": "/knowledge-base/organizations/cais/",
"similarity": 15
}
]
},
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 24,
"diagrams": 2,
"internalLinks": 48,
"externalLinks": 30,
"footnotes": 18,
"references": 18
},
"actuals": {
"tables": 32,
"diagrams": 2,
"internalLinks": 51,
"externalLinks": 52,
"footnotes": 0,
"references": 4,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "green",
"footnotes": "red",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2.5 R:4 A:2 C:6.5"
},
"readerRank": 114,
"researchRank": 264,
"recommendedScore": 143.52
}External Links
No external links
Backlinks (27)
| id | title | type | relationship |
|---|---|---|---|
| future-of-life-foundation | Future of Life Foundation (FLF) | organization | related |
| carma | Center for AI Risk Management & Alignment (CARMA) | organization | — |
| wise-ancestors | Wise Ancestors | organization | — |
| max-tegmark | Max Tegmark | person | — |
| california-sb1047 | Safe and Secure Innovation for Frontier Artificial Intelligence Models Act | policy | — |
| pause-moratorium | Pause / Moratorium | concept | — |
| pause | Pause Advocacy | approach | — |
| ai-for-human-reasoning-fellowship | AI for Human Reasoning Fellowship | approach | — |
| ai-policy-lobbying | AI Safety Policy Lobbying | approach | related |
| grassroots-ai-activism | Grassroots AI Safety and Democracy Activism | approach | related |
| situational-awareness | Situational Awareness | capability | — |
| solutions | AI Safety Solution Cruxes | crux | — |
| miri-era | The MIRI Era (2000-2015) | historical | — |
| capability-threshold-model | Capability Threshold Model | analysis | — |
| feedback-loops | Feedback Loop & Cascade Model | analysis | — |
| safety-research-allocation | Safety Research Allocation Model | analysis | — |
| anthropic-investors | Anthropic (Funder) | analysis | — |
| cser | CSER (Centre for the Study of Existential Risk) | organization | — |
| foresight-institute | Foresight Institute | organization | — |
| funders-overview | Longtermist Funders (Overview) | concept | — |
| leading-the-future | Leading the Future super PAC | organization | — |
| lionheart-ventures | Lionheart Ventures | organization | — |
| metaculus | Metaculus | organization | — |
| openai-foundation | OpenAI Foundation | organization | — |
| elon-musk | Elon Musk | person | — |
| jaan-tallinn | Jaan Tallinn | person | — |
| irreversibility | AI-Induced Irreversibility | risk | — |