Open Source AI Safety
open-sourceapproachPath: /knowledge-base/responses/open-source/
E474Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "open-source",
"wikiId": "E474",
"path": "/knowledge-base/responses/open-source/",
"filePath": "knowledge-base/responses/open-source.mdx",
"title": "Open Source AI Safety",
"quality": 62,
"readerImportance": 48.5,
"researchImportance": 47.5,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2025-12-28",
"dateCreated": "2026-02-15",
"summary": "Comprehensive analysis showing open-source AI poses irreversible safety risks (fine-tuning removes safeguards with just 200 examples) while providing research access and reducing concentration—with current U.S. policy (July 2024 NTIA) recommending monitoring without restrictions. The page identifies four key cruxes (marginal risk assessment, capability thresholds, compute bottlenecks, concentration risk) that determine whether open release is net positive, concluding that evidence is contested but risks are quantifiable and non-trivial.",
"description": "This analysis evaluates whether releasing AI model weights publicly is net positive or negative for safety.",
"ratings": {
"novelty": 4.5,
"rigor": 6.5,
"completeness": 7.5,
"actionability": 7
},
"category": "responses",
"subcategory": "organizational-practices",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 1966,
"tableCount": 18,
"diagramCount": 1,
"internalLinks": 58,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.09,
"sectionCount": 33,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 1966,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 56,
"backlinkCount": 0,
"hallucinationRisk": {
"level": "medium",
"score": 50,
"factors": [
"no-citations",
"few-external-sources",
"conceptual-content"
]
},
"entityType": "approach",
"redundancy": {
"maxSimilarity": 12,
"similarPages": [
{
"id": "open-vs-closed",
"title": "Open vs Closed Source AI",
"path": "/knowledge-base/debates/open-vs-closed/",
"similarity": 12
},
{
"id": "output-filtering",
"title": "AI Output Filtering",
"path": "/knowledge-base/responses/output-filtering/",
"similarity": 12
},
{
"id": "collective-intelligence",
"title": "Collective Intelligence / Coordination",
"path": "/knowledge-base/intelligence-paradigms/collective-intelligence/",
"similarity": 11
},
{
"id": "anthropic-impact",
"title": "Anthropic Impact Assessment Model",
"path": "/knowledge-base/models/anthropic-impact/",
"similarity": 11
},
{
"id": "corrigibility-failure-pathways",
"title": "Corrigibility Failure Pathways",
"path": "/knowledge-base/models/corrigibility-failure-pathways/",
"similarity": 11
}
]
},
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 8,
"diagrams": 1,
"internalLinks": 16,
"externalLinks": 10,
"footnotes": 6,
"references": 6
},
"actuals": {
"tables": 18,
"diagrams": 1,
"internalLinks": 58,
"externalLinks": 0,
"footnotes": 0,
"references": 22,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.5 R:6.5 A:7 C:7.5"
},
"readerRank": 310,
"researchRank": 297,
"recommendedScore": 158.87
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/open-source-ai"
}Backlinks (0)
No backlinks