Agent Foundations
agent-foundationsapproachPath: /knowledge-base/responses/agent-foundations/
E584Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "agent-foundations",
"wikiId": "E584",
"path": "/knowledge-base/responses/agent-foundations/",
"filePath": "knowledge-base/responses/agent-foundations.mdx",
"title": "Agent Foundations",
"quality": 59,
"readerImportance": 26,
"researchImportance": 38.5,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2025-12-28",
"dateCreated": "2026-02-15",
"summary": "Agent foundations research (MIRI's mathematical frameworks for aligned agency) faces low tractability after 10+ years with core problems unsolved, leading to MIRI's 2024 strategic pivot away from the field. Assessment shows ~15-25% probability the work is essential, 60-75% confidence in low tractability, with value 3-5x higher under long timeline assumptions.",
"description": "Agent foundations research develops mathematical frameworks for understanding aligned agency, including embedded agency, decision theory, logical induction, and corrigibility.",
"ratings": {
"novelty": 4.5,
"rigor": 6,
"completeness": 7,
"actionability": 5.5
},
"category": "responses",
"subcategory": "alignment-theoretical",
"clusters": [
"ai-safety"
],
"metrics": {
"wordCount": 2156,
"tableCount": 9,
"diagramCount": 1,
"internalLinks": 38,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.12,
"sectionCount": 25,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 90,
"evergreen": true,
"wordCount": 2156,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 20,
"backlinkCount": 8,
"hallucinationRisk": {
"level": "medium",
"score": 50,
"factors": [
"no-citations",
"few-external-sources",
"conceptual-content"
]
},
"entityType": "approach",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "research-agendas",
"title": "AI Alignment Research Agenda Comparison",
"path": "/knowledge-base/responses/research-agendas/",
"similarity": 16
},
{
"id": "technical-research",
"title": "Technical AI Safety Research",
"path": "/knowledge-base/responses/technical-research/",
"similarity": 16
},
{
"id": "corrigibility",
"title": "Corrigibility Research",
"path": "/knowledge-base/responses/corrigibility/",
"similarity": 15
},
{
"id": "corrigibility-failure",
"title": "Corrigibility Failure",
"path": "/knowledge-base/risks/corrigibility-failure/",
"similarity": 15
},
{
"id": "instrumental-convergence",
"title": "Instrumental Convergence",
"path": "/knowledge-base/risks/instrumental-convergence/",
"similarity": 15
}
]
},
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 9,
"diagrams": 1,
"internalLinks": 17,
"externalLinks": 11,
"footnotes": 6,
"references": 6
},
"actuals": {
"tables": 9,
"diagrams": 1,
"internalLinks": 38,
"externalLinks": 0,
"footnotes": 0,
"references": 12,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.5 R:6 A:5.5 C:7"
},
"readerRank": 481,
"researchRank": 355,
"recommendedScore": 141.66
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/agent-foundations",
"stampy": "https://aisafety.info/questions/8Iup/What-is-agent-foundations",
"alignmentForum": "https://www.alignmentforum.org/tag/agent-foundations"
}Backlinks (8)
| id | title | type | relationship |
|---|---|---|---|
| deep-learning-era | Deep Learning Revolution (2012-2020) | historical | — |
| miri-era | The MIRI Era (2000-2015) | historical | — |
| capability-alignment-race | Capability-Alignment Race Model | analysis | — |
| miri | Machine Intelligence Research Institute (MIRI) | organization | — |
| alignment-theoretical-overview | Theoretical Foundations (Overview) | concept | — |
| research-agendas | AI Alignment Research Agenda Comparison | crux | — |
| doomer | AI Doomer Worldview | concept | — |
| long-timelines | Long-Timelines Technical Worldview | concept | — |