AI Misuse Risk Cruxes
misuse-riskscruxPath: /knowledge-base/cruxes/misuse-risks/
E392Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "misuse-risks",
"wikiId": "E392",
"path": "/knowledge-base/cruxes/misuse-risks/",
"filePath": "knowledge-base/cruxes/misuse-risks.mdx",
"title": "AI Misuse Risk Cruxes",
"quality": 65,
"readerImportance": 81.5,
"researchImportance": 86,
"tacticalValue": 58,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-03-17",
"dateCreated": "2026-02-15",
"summary": "Comprehensive analysis of AI misuse cruxes with quantified evidence across bioweapons (RAND bio study found no significant difference; novice uplift studies show modest gains on in silico tasks), cyber (CTF scores improved 27%→76%; state actors confirmed using AI for lateral movement), disinformation (8M deepfakes projected 2025; human detection at 24.5%), agentic/prompt injection risks (new vector), and open-weight model worst-case analysis. Covers preparedness frameworks, instruction hierarchy improvements, and the commercial-pressure erosion problem. Framework maps uncertainties to policy responses with probability ranges.",
"description": "Key uncertainties that determine views on AI misuse risks, including capability uplift, offense-defense balance, agentic misuse vectors, open-weight model risks, and mitigation effectiveness across bioweapons, cyberweapons, disinformation, and autonomous systems",
"ratings": {
"novelty": 5.8,
"rigor": 6.5,
"completeness": 7,
"actionability": 7.2
},
"category": "cruxes",
"subcategory": null,
"clusters": [
"ai-safety",
"biorisks",
"cyber",
"governance"
],
"metrics": {
"wordCount": 5925,
"tableCount": 18,
"diagramCount": 1,
"internalLinks": 35,
"externalLinks": 88,
"footnoteCount": 0,
"bulletRatio": 0.06,
"sectionCount": 39,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 7,
"evergreen": true,
"wordCount": 5925,
"unconvertedLinks": [
{
"text": "AI Incident Database",
"url": "https://incidentdatabase.ai/",
"resourceId": "baac25fa61cb2244",
"resourceTitle": "AI Incident Database"
},
{
"text": "RAND",
"url": "https://www.rand.org/pubs/research_reports/RRA2977-2.html",
"resourceId": "0fe4cfa7ca5f2270",
"resourceTitle": "RAND Corporation study"
},
{
"text": "lesswrong.com",
"url": "https://www.lesswrong.com/posts/DJB82jKwgJE5NsWgT/some-cruxes-on-impactful-alternatives-to-ai-policy-work",
"resourceId": "6633b1a43f70e5fe",
"resourceTitle": "Some cruxes on impactful alternatives to AI policy work"
},
{
"text": "RAND Red-Team Study",
"url": "https://www.rand.org/pubs/research_reports/RRA2977-2.html",
"resourceId": "0fe4cfa7ca5f2270",
"resourceTitle": "RAND Corporation study"
},
{
"text": "Deepstrike Research",
"url": "https://deepstrike.io/blog/deepfake-statistics-2025",
"resourceId": "d786af9f7b112dc6",
"resourceTitle": "Deepfake Statistics 2025 – Deepstrike"
},
{
"text": "Deepstrike 2025",
"url": "https://deepstrike.io/blog/deepfake-statistics-2025",
"resourceId": "d786af9f7b112dc6",
"resourceTitle": "Deepfake Statistics 2025 – Deepstrike"
},
{
"text": "C2PA",
"url": "https://c2pa.org/",
"resourceId": "ff89bed1f7960ab2",
"resourceTitle": "C2PA Explainer Videos"
},
{
"text": "Congressional Research Service analysis",
"url": "https://www.congress.gov/crs-product/IF11150",
"resourceId": "65548750e4511847",
"resourceTitle": "Section 1066 of the FY2025 NDAA"
},
{
"text": "ASIL Insights",
"url": "https://www.asil.org/insights/volume/29/issue/1",
"resourceId": "461296b9a5df30f5",
"resourceTitle": "December 2024 UN General Assembly resolution"
},
{
"text": "RAND Corporation",
"url": "https://www.rand.org/topics/artificial-intelligence.html",
"resourceId": "cf5fd74e8db11565",
"resourceTitle": "RAND: AI and National Security"
},
{
"text": "Georgetown CSET",
"url": "https://cset.georgetown.edu/",
"resourceId": "f0d95954b449240a",
"resourceTitle": "CSET: AI Market Dynamics"
},
{
"text": "CNAS",
"url": "https://www.cnas.org/",
"resourceId": "58f6946af0177ca5",
"resourceTitle": "Center for a New American Security (CNAS) - Homepage"
},
{
"text": "UN CCW GGE on LAWS",
"url": "https://meetings.unoda.org/ccw/convention-on-certain-conventional-weapons-group-of-governmental-experts-on-lethal-autonomous-weapons-systems-2025",
"resourceId": "c5cc338fe2a44f23",
"resourceTitle": "March and September 2025"
},
{
"text": "Congressional Research Service",
"url": "https://www.congress.gov/crs-product/IF11150",
"resourceId": "65548750e4511847",
"resourceTitle": "Section 1066 of the FY2025 NDAA"
},
{
"text": "ASIL",
"url": "https://www.asil.org/insights/volume/29/issue/1",
"resourceId": "461296b9a5df30f5",
"resourceTitle": "December 2024 UN General Assembly resolution"
},
{
"text": "Deepstrike Research",
"url": "https://deepstrike.io/blog/deepfake-statistics-2025",
"resourceId": "d786af9f7b112dc6",
"resourceTitle": "Deepfake Statistics 2025 – Deepstrike"
},
{
"text": "C2PA Coalition",
"url": "https://c2pa.org/",
"resourceId": "ff89bed1f7960ab2",
"resourceTitle": "C2PA Explainer Videos"
}
],
"unconvertedLinkCount": 17,
"convertedLinkCount": 12,
"backlinkCount": 13,
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"conceptual-content"
]
},
"entityType": "crux",
"redundancy": {
"maxSimilarity": 20,
"similarPages": [
{
"id": "agentic-ai",
"title": "Agentic AI",
"path": "/knowledge-base/capabilities/agentic-ai/",
"similarity": 20
},
{
"id": "bioweapons-ai-uplift",
"title": "AI Uplift Assessment Model",
"path": "/knowledge-base/models/bioweapons-ai-uplift/",
"similarity": 20
},
{
"id": "bioweapons-attack-chain",
"title": "Bioweapons Attack Chain Model",
"path": "/knowledge-base/models/bioweapons-attack-chain/",
"similarity": 20
},
{
"id": "accident-risks",
"title": "AI Accident Risk Cruxes",
"path": "/knowledge-base/cruxes/accident-risks/",
"similarity": 19
},
{
"id": "solutions",
"title": "AI Safety Solution Cruxes",
"path": "/knowledge-base/cruxes/solutions/",
"similarity": 19
}
]
},
"changeHistory": [
{
"date": "2026-03-17",
"branch": "auto-update/2026-03-17",
"title": "Auto-improve (standard): AI Misuse Risk Cruxes",
"summary": "Improved \"AI Misuse Risk Cruxes\" via standard pipeline (482.6s).",
"duration": "482.6s",
"cost": "$5-8"
}
],
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 24,
"diagrams": 2,
"internalLinks": 47,
"externalLinks": 30,
"footnotes": 18,
"references": 18
},
"actuals": {
"tables": 18,
"diagrams": 1,
"internalLinks": 35,
"externalLinks": 88,
"footnotes": 0,
"references": 21,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "amber",
"internalLinks": "amber",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:5.8 R:6.5 A:7.2 C:7"
},
"readerRank": 76,
"researchRank": 47,
"recommendedScore": 189.79
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/ai-misuse",
"eightyK": "https://80000hours.org/problem-profiles/catastrophic-ai-misuse/"
}Backlinks (13)
| id | title | type | relationship |
|---|---|---|---|
| epistemic-risks | AI Epistemic Cruxes | crux | — |
| __index__/knowledge-base/cruxes | Key Cruxes | concept | — |
| openclaw-matplotlib-incident-2026 | OpenClaw Matplotlib Incident (2026) | concept | — |
| 1day-sooner | 1Day Sooner | organization | — |
| nti-bio | NTI | bio (Nuclear Threat Initiative - Biological Program) | organization | — |
| securebio | SecureBio | organization | — |
| situational-awareness-lp | Situational Awareness LP | organization | — |
| ssi | Safe Superintelligence Inc. (SSI) | organization | — |
| california-sb53 | California SB 53 | policy | — |
| compute-governance | Compute Governance: AI Chips Export Controls Policy | concept | — |
| model-registries | Model Registries | concept | — |
| deepfakes | Deepfakes | risk | — |
| fraud | AI-Powered Fraud | risk | — |