AI Uplift Assessment Model
bioweapons-ai-upliftanalysisPath: /knowledge-base/models/bioweapons-ai-uplift/
E43Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "bioweapons-ai-uplift",
"wikiId": "E43",
"path": "/knowledge-base/models/bioweapons-ai-uplift/",
"filePath": "knowledge-base/models/bioweapons-ai-uplift.mdx",
"title": "AI Uplift Assessment Model",
"quality": 70,
"readerImportance": 75.5,
"researchImportance": 61.5,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-03-17",
"dateCreated": "2026-02-15",
"summary": "Quantitative assessment estimating AI provides modest knowledge uplift for bioweapons (1.0-1.2x per RAND 2024) but more substantial evasion capabilities (2-3x, potentially 7-10x by 2028). The Virology Capabilities Test (VCT, 2025) found frontier models outperform most human virology experts on tacit knowledge tasks (o3 at 95th percentile), though a concurrent wet-lab RCT found mid-2025 LLMs did not substantially increase novice completion of complex laboratory procedures. OpenAI's April 2025 Preparedness Framework v2 anticipates models reaching 'High' biological capability thresholds in the near term. The OpenAI-LANL partnership extended biosecurity evaluation from text tasks to physical lab settings, and OpenAI's 'Building an Early Warning System for LLM-Aided Biological Threat Creation' study found GPT-4 provides at most mild uplift under controlled conditions. Recommends prioritizing adaptive DNA synthesis screening over information restriction, given asymmetry where evasion capabilities advance faster than synthesis knowledge.",
"description": "This model estimates AI's marginal contribution to bioweapons risk over time. It projects uplift increasing from 1.3-2.5x (2024) to 3-5x by 2030, with biosecurity evasion capabilities posing the greatest concern as they could undermine existing defenses before triggering policy response. Recent benchmarks show frontier models surpassing expert-level virology knowledge, while wet-lab RCTs suggest a gap between in silico performance and physical laboratory utility.",
"ratings": {
"focus": 8.5,
"novelty": 6.5,
"rigor": 7,
"completeness": 8,
"concreteness": 7.5,
"actionability": 7
},
"category": "models",
"subcategory": "domain-models",
"clusters": [
"ai-safety",
"biorisks",
"governance"
],
"metrics": {
"wordCount": 7887,
"tableCount": 14,
"diagramCount": 1,
"internalLinks": 46,
"externalLinks": 38,
"footnoteCount": 2,
"bulletRatio": 0.13,
"sectionCount": 41,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 90,
"evergreen": true,
"wordCount": 7887,
"unconvertedLinks": [
{
"text": "collaborated on a first-of-its-kind joint evaluation",
"url": "https://openai.com/index/openai-anthropic-safety-evaluation/",
"resourceId": "cc554bd1593f0504",
"resourceTitle": "2025 OpenAI-Anthropic joint evaluation"
},
{
"text": "Future of Life Institute 2025 AI Safety Index",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "RAND Corporation (2024)",
"url": "https://www.rand.org/pubs/research_reports/RRA2977-2.html",
"resourceId": "0fe4cfa7ca5f2270",
"resourceTitle": "RAND Corporation study"
},
{
"text": "\"Building an Early Warning System for LLM-Aided Biological Threat Creation\"",
"url": "https://openai.com/index/building-an-early-warning-system-for-llm-aided-biological-threat-creation/",
"resourceId": "2f918741de446a84",
"resourceTitle": "Building an early warning system for LLM-aided biological threat creation"
},
{
"text": "Preparedness Framework Version 2",
"url": "https://openai.com/index/updating-our-preparedness-framework/",
"resourceId": "ded0b05862511312",
"resourceTitle": "Preparedness Framework"
},
{
"text": "OpenAI-Anthropic Joint Evaluation",
"url": "https://openai.com/index/openai-anthropic-safety-evaluation/",
"resourceId": "cc554bd1593f0504",
"resourceTitle": "2025 OpenAI-Anthropic joint evaluation"
},
{
"text": "Future of Life Institute",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "\"Building an Early Warning System for LLM-Aided Biological Threat Creation.\"",
"url": "https://openai.com/index/building-an-early-warning-system-for-llm-aided-biological-threat-creation/",
"resourceId": "2f918741de446a84",
"resourceTitle": "Building an early warning system for LLM-aided biological threat creation"
}
],
"unconvertedLinkCount": 8,
"convertedLinkCount": 12,
"backlinkCount": 4,
"hallucinationRisk": {
"level": "medium",
"score": 35,
"factors": [
"low-citation-density",
"high-rigor"
]
},
"entityType": "analysis",
"redundancy": {
"maxSimilarity": 25,
"similarPages": [
{
"id": "bioweapons",
"title": "Bioweapons",
"path": "/knowledge-base/risks/bioweapons/",
"similarity": 25
},
{
"id": "agentic-ai",
"title": "Agentic AI",
"path": "/knowledge-base/capabilities/agentic-ai/",
"similarity": 20
},
{
"id": "scientific-research",
"title": "Scientific Research Capabilities",
"path": "/knowledge-base/capabilities/scientific-research/",
"similarity": 20
},
{
"id": "misuse-risks",
"title": "AI Misuse Risk Cruxes",
"path": "/knowledge-base/cruxes/misuse-risks/",
"similarity": 20
},
{
"id": "solutions",
"title": "AI Safety Solution Cruxes",
"path": "/knowledge-base/cruxes/solutions/",
"similarity": 20
}
]
},
"changeHistory": [
{
"date": "2026-03-17",
"branch": "auto-update/2026-03-17",
"title": "Auto-improve (standard): AI Uplift Assessment Model",
"summary": "Improved \"AI Uplift Assessment Model\" via standard pipeline (1473.8s). Quality score: 82. Issues resolved: Footnote [^3] is referenced multiple times in prose but neve; EntityLink on 'openai' appears twice with slightly different; The 'Sources' section uses inline markdown links for some re.",
"duration": "1473.8s",
"cost": "$5-8"
}
],
"coverage": {
"passing": 5,
"total": 13,
"targets": {
"tables": 32,
"diagrams": 3,
"internalLinks": 63,
"externalLinks": 39,
"footnotes": 24,
"references": 24
},
"actuals": {
"tables": 14,
"diagrams": 1,
"internalLinks": 46,
"externalLinks": 38,
"footnotes": 2,
"references": 5,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "amber",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "amber",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:6.5 R:7 A:7 C:8"
},
"readerRank": 116,
"researchRank": 209,
"recommendedScore": 196.79
}External Links
No external links
Backlinks (4)
| id | title | type | relationship |
|---|---|---|---|
| bioweapons-attack-chain | Bioweapons Attack Chain Model | analysis | — |
| bioweapons-timeline | AI-Bioweapons Timeline Model | analysis | — |
| risk-activation-timeline | Risk Activation Timeline Model | analysis | — |
| risk-interaction-matrix | Risk Interaction Matrix Model | analysis | — |