Corporate Influence on AI Policy
corporate-influencecruxPath: /knowledge-base/responses/corporate-influence/
E78Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "corporate-influence",
"wikiId": "E78",
"path": "/knowledge-base/responses/corporate-influence/",
"filePath": "knowledge-base/responses/corporate-influence.mdx",
"title": "Corporate Influence on AI Policy",
"quality": 66,
"readerImportance": 23,
"researchImportance": 35.5,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2025-12-28",
"dateCreated": "2026-02-15",
"summary": "Comprehensive analysis of corporate influence pathways (working inside labs, shareholder activism, whistleblowing) showing mixed effectiveness: safety teams influenced GPT-4 delays and responsible scaling policies, but ~50% of OpenAI's safety staff departed in 2024 and the November 2023 board crisis demonstrated commercial pressures override safety concerns. Provides specific compensation data (\\$115K-\\$190K for researchers), talent flow metrics (8x more likely to leave OpenAI for Anthropic), and detailed assessment that 1,500-2,500 people work in safety roles globally with 60% in SF Bay Area.",
"description": "A comprehensive analysis of directly influencing frontier AI labs through working inside them, shareholder activism, whistleblowing, and transparency advocacy.",
"ratings": {
"novelty": 4.5,
"rigor": 6.5,
"completeness": 7.5,
"actionability": 7
},
"category": "responses",
"subcategory": "field-building",
"clusters": [
"ai-safety",
"community",
"governance"
],
"metrics": {
"wordCount": 3300,
"tableCount": 6,
"diagramCount": 1,
"internalLinks": 39,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.1,
"sectionCount": 17,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 3300,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 25,
"backlinkCount": 3,
"hallucinationRisk": {
"level": "medium",
"score": 50,
"factors": [
"no-citations",
"few-external-sources",
"conceptual-content"
]
},
"entityType": "crux",
"redundancy": {
"maxSimilarity": 20,
"similarPages": [
{
"id": "lab-culture",
"title": "AI Lab Safety Culture",
"path": "/knowledge-base/responses/lab-culture/",
"similarity": 20
},
{
"id": "whistleblower-dynamics",
"title": "Whistleblower Dynamics Model",
"path": "/knowledge-base/models/whistleblower-dynamics/",
"similarity": 19
},
{
"id": "metr",
"title": "METR",
"path": "/knowledge-base/organizations/metr/",
"similarity": 19
},
{
"id": "us-aisi",
"title": "US AI Safety Institute (now CAISI)",
"path": "/knowledge-base/organizations/us-aisi/",
"similarity": 19
},
{
"id": "voluntary-commitments",
"title": "Voluntary Industry Commitments",
"path": "/knowledge-base/responses/voluntary-commitments/",
"similarity": 19
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 13,
"diagrams": 1,
"internalLinks": 26,
"externalLinks": 17,
"footnotes": 10,
"references": 10
},
"actuals": {
"tables": 6,
"diagrams": 1,
"internalLinks": 39,
"externalLinks": 0,
"footnotes": 0,
"references": 20,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.5 R:6.5 A:7 C:7.5"
},
"readerRank": 502,
"researchRank": 373,
"recommendedScore": 154.33
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/working-at-ai-labs"
}Backlinks (3)
| id | title | type | relationship |
|---|---|---|---|
| ea-shareholder-diversification-anthropic | EA Shareholder Diversification from Anthropic | concept | — |
| __index__/knowledge-base/responses | Safety Responses | concept | — |
| training-programs | AI Safety Training Programs | approach | — |