AI Surveillance and US Democratic Erosion
us-ai-surveillance-democratic-erosionriskPath: /knowledge-base/risks/us-ai-surveillance-democratic-erosion/
E1003Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "us-ai-surveillance-democratic-erosion",
"wikiId": "E1003",
"path": "/knowledge-base/risks/us-ai-surveillance-democratic-erosion/",
"filePath": "knowledge-base/risks/us-ai-surveillance-democratic-erosion.mdx",
"title": "AI Surveillance and US Democratic Erosion",
"quality": 55,
"readerImportance": 85,
"researchImportance": 75,
"tacticalValue": 90,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-03-20",
"dateCreated": null,
"summary": "Analysis of how data centralization, oversight dismantlement, and AI capability acquisition by the US government create near-term threats to democratic processes. Documents the Anthropic-Pentagon standoff as a crystallizing moment, current administration actions (100+ targeted opponents, national citizenship database, Palantir contracts, DOGE AI surveillance of federal workers, gutted oversight boards), legal loopholes enabling warrantless bulk data collection, how AI changes surveillance economics, five threat scenarios for the 2026 midterms with probability estimates, and countervailing forces including courts and betting-market-favored Democratic House win.",
"description": "The convergence of data centralization, oversight dismantlement, and AI surveillance capability acquisition by the current US administration poses near-term risks to democratic processes.",
"ratings": {
"focus": 9,
"novelty": 8,
"rigor": 6.5,
"completeness": 7,
"concreteness": 9,
"actionability": 7,
"objectivity": 6.5
},
"category": "risks",
"subcategory": "governance",
"clusters": [
"ai-safety",
"governance",
"community"
],
"metrics": {
"wordCount": 3733,
"tableCount": 2,
"diagramCount": 0,
"internalLinks": 11,
"externalLinks": 9,
"footnoteCount": 0,
"bulletRatio": 0.37,
"sectionCount": 21,
"hasOverview": true,
"structuralScore": 13
},
"suggestedQuality": 87,
"updateFrequency": 7,
"evergreen": true,
"wordCount": 3733,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 0,
"backlinkCount": 1,
"hallucinationRisk": {
"level": "medium",
"score": 55,
"factors": [
"no-citations"
]
},
"entityType": "risk",
"redundancy": {
"maxSimilarity": 20,
"similarPages": [
{
"id": "disinformation-electoral-impact",
"title": "Electoral Impact Assessment Model",
"path": "/knowledge-base/models/disinformation-electoral-impact/",
"similarity": 20
},
{
"id": "authoritarian-tools-diffusion",
"title": "Authoritarian Tools Diffusion Model",
"path": "/knowledge-base/models/authoritarian-tools-diffusion/",
"similarity": 15
},
{
"id": "whistleblower-dynamics",
"title": "Whistleblower Dynamics Model",
"path": "/knowledge-base/models/whistleblower-dynamics/",
"similarity": 15
},
{
"id": "epistemic-security",
"title": "AI-Era Epistemic Security",
"path": "/knowledge-base/responses/epistemic-security/",
"similarity": 15
},
{
"id": "failed-stalled-proposals",
"title": "Failed and Stalled AI Policy Proposals",
"path": "/knowledge-base/responses/failed-stalled-proposals/",
"similarity": 15
}
]
},
"coverage": {
"passing": 4,
"total": 13,
"targets": {
"tables": 15,
"diagrams": 1,
"internalLinks": 30,
"externalLinks": 19,
"footnotes": 11,
"references": 11
},
"actuals": {
"tables": 2,
"diagrams": 0,
"internalLinks": 11,
"externalLinks": 9,
"footnotes": 0,
"references": 0,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "red",
"references": "red",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:8 R:6.5 A:7 C:7"
},
"readerRank": 53,
"researchRank": 120,
"recommendedScore": 171.98
}External Links
No external links
Backlinks (1)
| id | title | type | relationship |
|---|---|---|---|
| near-term-risks | Key Near-Term AI Risks | risk | — |