Skip to content
Longterm Wiki

OpenAI

openaiorganizationPath: /knowledge-base/organizations/openai/
E218Entity ID (EID)
← Back to page259 backlinksQuality: 62Updated: 2026-03-18
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "openai",
  "wikiId": "E218",
  "path": "/knowledge-base/organizations/openai/",
  "filePath": "knowledge-base/organizations/openai.mdx",
  "title": "OpenAI",
  "quality": 62,
  "readerImportance": 72.4,
  "researchImportance": 44.5,
  "tacticalValue": 92,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-03-18",
  "dateCreated": "2026-02-15",
  "summary": "Comprehensive organizational profile of OpenAI documenting evolution from 2015 non-profit to Public Benefit Corporation, with detailed analysis of governance crisis, 2024-2025 ownership restructuring (conversion from capped-profit LLC to PBC, completed October 2025), key leadership departures, and capability advancement (o1/o3 reasoning models). Updated with 2025 developments including o3-mini release, 800M weekly active users, Altman's AGI timeline statements, enterprise market share decline from 50% to 25% between 2023 and 2025, and joint safety evaluation with Anthropic in summer 2025.",
  "description": "Leading AI lab that developed GPT models and ChatGPT, analyzing organizational evolution from non-profit research to commercial AGI development amid safety-commercialization tensions",
  "ratings": {
    "focus": 7.2,
    "novelty": 3.5,
    "rigor": 5.8,
    "completeness": 7.5,
    "concreteness": 7.8,
    "actionability": 4.5,
    "objectivity": 6.5
  },
  "category": "organizations",
  "subcategory": "labs",
  "clusters": [
    "ai-safety",
    "community",
    "governance"
  ],
  "metrics": {
    "wordCount": 3991,
    "tableCount": 16,
    "diagramCount": 0,
    "internalLinks": 40,
    "externalLinks": 14,
    "footnoteCount": 41,
    "bulletRatio": 0.34,
    "sectionCount": 46,
    "hasOverview": true,
    "structuralScore": 13
  },
  "suggestedQuality": 87,
  "updateFrequency": 3,
  "evergreen": true,
  "wordCount": 3991,
  "unconvertedLinks": [
    {
      "text": "OpenAI GPT-4 System Card",
      "url": "https://cdn.openai.com/papers/gpt-4-system-card.pdf",
      "resourceId": "ebab6e05661645c5",
      "resourceTitle": "GPT-4 System Card"
    },
    {
      "text": "OpenAI Deliberative Alignment",
      "url": "https://openai.com/index/deliberative-alignment/",
      "resourceId": "ee7628aa3f6282e5",
      "resourceTitle": "Deliberative alignment: reasoning enables safer language models"
    },
    {
      "text": "Global Affairs Initiative",
      "url": "https://openai.com/global-affairs/openai-for-countries/",
      "resourceId": "238f28c96d8780f6",
      "resourceTitle": "Introducing OpenAI for Countries"
    },
    {
      "text": "Sora 2 Launch",
      "url": "https://openai.com/index/sora-2/",
      "resourceId": "edc1663b7d3b8ac2",
      "resourceTitle": "Sora 2: OpenAI's Flagship Video and Audio Generation Model"
    },
    {
      "text": "TIME Magazine Interview",
      "url": "https://time.com/7205596/sam-altman-superintelligence-agi/",
      "resourceId": "358ab98ce38cdd9c",
      "resourceTitle": "How OpenAI's Sam Altman Is Thinking About AGI and Superintelligence in 2025"
    },
    {
      "text": "OpenAI Announcement",
      "url": "https://openai.com/index/softbank-openai-joint-announcement/",
      "resourceId": "kb-b4d3ddb177146075"
    },
    {
      "text": "OpenAI and SoftBank Joint Announcement",
      "url": "https://openai.com/index/softbank-openai-joint-announcement/",
      "resourceId": "kb-b4d3ddb177146075"
    },
    {
      "text": "arXiv:2005.14165",
      "url": "https://arxiv.org/abs/2005.14165",
      "resourceId": "2cab3ea10b8b7ae2",
      "resourceTitle": "Brown et al. (2020)"
    },
    {
      "text": "arXiv:2203.02155",
      "url": "https://arxiv.org/abs/2203.02155",
      "resourceId": "1098fc60be7ca2b0",
      "resourceTitle": "Training Language Models to Follow Instructions with Human Feedback"
    },
    {
      "text": "arXiv:2312.09390",
      "url": "https://arxiv.org/abs/2312.09390",
      "resourceId": "0ba98ae3a8a72270",
      "resourceTitle": "[2312.09390] Weak-to-Strong Generalization: Eliciting Strong Capabilities With Weak Supervision"
    },
    {
      "text": "arXiv:2303.08774",
      "url": "https://arxiv.org/abs/2303.08774",
      "resourceId": "29a0882390ee7063",
      "resourceTitle": "OpenAI's GPT-4"
    }
  ],
  "unconvertedLinkCount": 11,
  "convertedLinkCount": 0,
  "backlinkCount": 259,
  "citationHealth": {
    "total": 34,
    "withQuotes": 31,
    "verified": 30,
    "accuracyChecked": 34,
    "accurate": 25,
    "inaccurate": 0,
    "avgScore": 0.9180523262869927
  },
  "hallucinationRisk": {
    "level": "medium",
    "score": 45,
    "factors": [
      "biographical-claims",
      "well-cited"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 18,
    "similarPages": [
      {
        "id": "anthropic-ipo",
        "title": "Anthropic IPO",
        "path": "/knowledge-base/organizations/anthropic-ipo/",
        "similarity": 18
      },
      {
        "id": "anthropic",
        "title": "Anthropic",
        "path": "/knowledge-base/organizations/anthropic/",
        "similarity": 18
      },
      {
        "id": "meta-ai",
        "title": "Meta AI (FAIR)",
        "path": "/knowledge-base/organizations/meta-ai/",
        "similarity": 18
      },
      {
        "id": "ssi",
        "title": "Safe Superintelligence Inc. (SSI)",
        "path": "/knowledge-base/organizations/ssi/",
        "similarity": 18
      },
      {
        "id": "xai",
        "title": "xAI",
        "path": "/knowledge-base/organizations/xai/",
        "similarity": 18
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-26",
      "branch": "claude/claims-driven-improvements",
      "title": "Auto-improve (standard): OpenAI",
      "summary": "Improved \"OpenAI\" via standard pipeline (403.2s). Quality score: 74. Issues resolved: Footnote [^4] is missing — footnotes skip from [^3] to [^5],; Footnote [^24], [^25], [^26] are missing — footnotes skip fr; Footnotes [^40] and [^41] cite sources (LessWrong OpenAI los.",
      "duration": "403.2s",
      "cost": "$5-8"
    },
    {
      "date": "2026-02-19",
      "branch": "claude/add-wiki-tables-VhyKT",
      "title": "Add concrete shareable data tables to high-value pages",
      "summary": "Added three concrete, screenshot-worthy data tables to high-value wiki pages: (1) OpenAI ownership/stakeholder table to openai.mdx showing the 2024-2025 PBC restructuring with Foundation ~26%, Microsoft transitioning from 49% profit share to ~2.5% equity, and Sam Altman's proposed 7% grant; (2) Budget and headcount comparison table to safety-orgs-overview.mdx covering MIRI, ARC, METR, Redwood Research, CAIS, Apollo Research, GovAI, Conjecture, and FAR AI with annual budgets, headcounts, and cost-per-researcher; (3) Per-company compensation comparison table to ai-talent-market-dynamics.mdx comparing Anthropic, OpenAI, Google DeepMind, xAI, Meta AI, and Microsoft Research by total comp range, base salary, equity type, and benefits including Anthropic's unique DAF matching program.",
      "model": "sonnet-4",
      "duration": "~45min"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/source-unsourced-facts-RecGw",
      "title": "Source unsourced facts",
      "summary": "Sourced 25 of 30 previously unsourced facts across all 4 fact files (anthropic, sam-altman, openai, jaan-tallinn). Created 21 new resource entries in news-media.yaml and ai-labs.yaml with proper SHA256-based IDs. Added 8 new publications (Bloomberg, The Information, Quartz, Benzinga, Britannica, World, Sherwood News). Fixed date accuracy issues (Worldcoin stats from 2024 to 2025-05, OpenAI revenue from Oct to Jun 2024) and improved notes. Source coverage improved from 29% to 88%.",
      "model": "opus-4-6",
      "duration": "~45min"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/review-pr-216-P4Fcu",
      "title": "Fix audit report findings from PR #216",
      "summary": "Reviewed PR #216 (comprehensive wiki audit report) and implemented fixes for the major issues it identified: fixed 181 path-style EntityLink IDs across 33 files, converted 164 broken EntityLinks (referencing non-existent entities) to plain text across 38 files, fixed a temporal inconsistency in anthropic.mdx, and added missing description fields to 53 ai-transition-model pages."
    },
    {
      "date": "2026-02-18",
      "branch": "claude/audit-webpage-errors-X4jHg",
      "title": "Audit wiki pages for factual errors and hallucinations",
      "summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
    }
  ],
  "coverage": {
    "passing": 11,
    "total": 13,
    "targets": {
      "tables": 16,
      "diagrams": 2,
      "internalLinks": 32,
      "externalLinks": 20,
      "footnotes": 12,
      "references": 12
    },
    "actuals": {
      "tables": 16,
      "diagrams": 0,
      "internalLinks": 40,
      "externalLinks": 14,
      "footnotes": 41,
      "references": 29,
      "quotesWithQuotes": 31,
      "quotesTotal": 34,
      "accuracyChecked": 34,
      "accuracyTotal": 34
    },
    "items": {
      "summary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "amber",
      "footnotes": "green",
      "references": "green",
      "quotes": "green",
      "accuracy": "green"
    },
    "editHistoryCount": 5,
    "ratingsString": "N:3.5 R:5.8 A:4.5 C:7.5"
  },
  "readerRank": 148,
  "researchRank": 308,
  "recommendedScore": 179.39
}
External Links
{
  "wikipedia": "https://en.wikipedia.org/wiki/OpenAI",
  "lesswrong": "https://www.lesswrong.com/tag/openai",
  "wikidata": "https://www.wikidata.org/wiki/Q21708200",
  "grokipedia": "https://grokipedia.com/page/OpenAI"
}
Backlinks (259)
idtitletyperelationship
gptGPTai-modelcreated-by
gpt-3-5-turboGPT-3.5 Turboai-modelcreated-by
gpt-4GPT-4ai-modelcreated-by
gpt-4-turboGPT-4 Turboai-modelcreated-by
gpt-4oGPT-4oai-modelcreated-by
gpt-4o-miniGPT-4o miniai-modelcreated-by
o1-previewo1-previewai-modelcreated-by
o1o1ai-modelcreated-by
o1-minio1-miniai-modelcreated-by
o3-minio3-miniai-modelcreated-by
o3o3ai-modelcreated-by
gpt-4-1GPT-4.1ai-modelcreated-by
gpt-4-1-miniGPT-4.1 miniai-modelcreated-by
gpt-4-1-nanoGPT-4.1 nanoai-modelcreated-by
o4-minio4-miniai-modelcreated-by
codingAutonomous Codingcapability
language-modelsLarge Language Modelscapability
reasoningReasoning and Planningcapability
corporate-influenceCorporate Influence on AI Policycrux
governance-focusedGovernance-Focused Worldviewconcept
heavy-scaffoldingHeavy Scaffolding / Agentic Systemsconcept
deep-learning-eraDeep Learning Revolution Erahistorical
mainstream-eraMainstream Erahistorical
ai-military-deployment-iran-2026AI Military Deployment in the 2026 Iran Warevent
anthropic-government-standoffAnthropic-Pentagon Standoff (2026)event
openai-foundation-governanceOpenAI Foundation Governance Paradoxanalysis
anthropic-valuationAnthropic Valuation Analysisanalysis
musk-openai-lawsuitMusk v. OpenAI Lawsuitanalysis
elon-musk-philanthropyElon Musk (Funder)analysis
anthropic-impactAnthropic Impact Assessment Modelanalysis
technical-pathwaysAI Safety Technical Pathway Decompositionanalysis
multi-actor-landscapeAI Safety Multi-Actor Strategic Landscapeanalysis
anthropicAnthropicorganization
deepmindGoogle DeepMindorganization
xaixAIorganization
metrMETRorganization
arcAlignment Research Center (ARC)organization
uk-aisiUK AI Safety Instituteorganization
us-aisiUS AI Safety Institute (now CAISI)organization
openai-foundationOpenAI Foundationorganizationpart-of
leading-the-futureLeading the Future super PACorganization
nist-aiNIST and AI Safetyorganization
ssiSafe Superintelligence Inc. (SSI)organization
frontier-model-forumFrontier Model Forumorganization
goodfireGoodfireorganization
microsoftMicrosoft AIorganization
partnership-on-aiPartnership on AIorganization
dario-amodeiDario Amodeiperson
yonadav-shavitYonadav Shavitperson
rlhfRLHFresearch-arearesearch
scalable-oversightScalable Oversightresearch-arearesearch
voluntary-commitmentsVoluntary AI Safety Commitmentspolicy
eval-saturationEval Saturation & The Evals Gapapproach
alignmentAI Alignmentapproach
scheming-detectionScheming & Deception Detectionapproach
dangerous-cap-evalsDangerous Capability Evaluationsapproach
safety-casesAI Safety Casesapproach
ai-assistedAI-Assisted Alignmentapproach
alignment-evalsAlignment Evaluationsapproach
weak-to-strongWeak-to-Strong Generalizationapproach
preference-optimizationPreference Optimization Methodsapproach
process-supervisionProcess Supervisionapproach
refusal-trainingRefusal Trainingapproach
rspResponsible Scaling Policiesapproach
corporateCorporate AI Safety Responsesapproach
open-sourceOpen Source AI Safetyapproach
whistleblower-protectionsAI Whistleblower Protectionspolicy
debateAI Safety via Debateapproach
structured-accessStructured Access / API-Onlyapproach
tool-restrictionsTool-Use Restrictionsapproach
agentic-aiAgentic AIcapability
large-language-modelsLarge Language Modelsconcept
long-horizonLong-Horizon Autonomous Taskscapability
scientific-researchScientific Research Capabilitiescapability
situational-awarenessSituational Awarenesscapability
tool-useTool Use and Computer Usecapability
accident-risksAI Accident Risk Cruxescrux
elite-coordination-infrastructureElite Coordination Infrastructureconcept
intra-coalition-engagement-strategiesIntra-Coalition Engagement Strategiesconcept
misuse-risksAI Misuse Risk Cruxescrux
solutionsAI Safety Solution Cruxescrux
structural-risksAI Structural Risk Cruxescrux
interpretability-sufficientIs Interpretability Sufficient for Safety?crux
is-ai-xrisk-realIs AI Existential Risk Real?crux
pause-debateShould We Pause AI Development?crux
regulation-debateGovernment Regulation vs Industry Self-Governancecrux
scaling-debateIs Scaling All You Need?crux
why-alignment-hardWhy Alignment Might Be Hardargument
agi-developmentAGI Developmentconcept
agi-timelineAGI Timelineconcept
ea-longtermist-wins-lossesEA and Longtermist Wins and Lossesconcept
__index__/knowledge-base/historyHistoryconcept
miri-eraThe MIRI Era (2000-2015)historical
claude-code-espionage-2025Claude Code Espionage Incident (2025)concept
__index__/knowledge-baseKnowledge Baseconcept
light-scaffoldingLight Scaffoldingcapability
ai-compute-scaling-metricsAI Compute Scaling Metricsanalysis
ai-megaproject-infrastructureAI Megaproject Infrastructureanalysis
ai-talent-market-dynamicsAI Talent Market Dynamicsanalysis
ai-timelinesAI Timelinesconcept
bioweapons-ai-upliftAI Uplift Assessment Modelanalysis
capabilities-to-safety-pipelineCapabilities-to-Safety Pipeline Modelanalysis
capability-alignment-raceCapability-Alignment Race Modelanalysis
critical-uncertaintiesAI Risk Critical Uncertainties Modelcrux
frontier-lab-cost-structureFrontier Lab Cost Structureanalysis
goal-misgeneralization-probabilityGoal Misgeneralization Probability Modelanalysis
instrumental-convergence-frameworkInstrumental Convergence Frameworkanalysis
international-coordination-gameInternational AI Coordination Gameanalysis
intervention-effectiveness-matrixIntervention Effectiveness Matrixanalysis
intervention-timing-windowsIntervention Timing Windowsanalysis
model-organisms-of-misalignmentModel Organisms of Misalignmentanalysis
planning-for-frontier-lab-scalingPlanning for Frontier Lab Scalinganalysis
power-seeking-conditionsPower-Seeking Emergence Conditions Modelanalysis
pre-tai-capital-deploymentPre-TAI Capital Deployment: $100B-$300B+ Spending Analysisanalysis
projecting-compute-spendingProjecting Compute Spendinganalysis
racing-dynamics-impactRacing Dynamics Impact Modelanalysis
risk-activation-timelineRisk Activation Timeline Modelanalysis
risk-interaction-matrixRisk Interaction Matrix Modelanalysis
risk-interaction-networkRisk Interaction Networkanalysis
safety-research-allocationSafety Research Allocation Modelanalysis
safety-researcher-gapAI Safety Talent Supply/Demand Gap Modelanalysis
safety-spending-at-scaleSafety Spending at Scaleanalysis
scaling-lawsAI Scaling Lawsconcept
scheming-likelihood-modelScheming Likelihood Assessmentanalysis
ai-futures-projectAI Futures Projectorganization
ai-revenue-sourcesAI Revenue Sourcesorganization
anthropic-investorsAnthropic (Funder)analysis
anthropic-ipoAnthropic IPOanalysis
apart-researchApart Researchorganization
apollo-researchApollo Researchorganization
biosecurity-orgs-overviewBiosecurity Organizations (Overview)concept
bridgewater-aia-labsBridgewater AIA Labsorganization
caisCenter for AI Safety (CAIS)organization
chaiCenter for Human-Compatible AI (CHAI)organization
coefficient-givingCoefficient Givingorganization
conjectureConjectureorganization
controlaiControlAIorganization
ea-globalEA Globalorganization
epoch-aiEpoch AIorganization
far-aiFAR AIorganization
fhiFuture of Humanity Institute (FHI)organization
ford-foundationFord Foundationorganization
foresight-instituteForesight Instituteorganization
founders-fundFounders Fundorganization
frontier-ai-comparisonFrontier AI Company Comparison (2026)concept
ftxFTX (cryptocurrency exchange)organization
futuresearchFutureSearchorganization
govaiGovAIorganization
__index__/knowledge-base/organizationsOrganizationsconcept
kalshiKalshi (Prediction Market)organization
labs-overviewFrontier AI Labs (Overview)concept
lesswrongLessWrongorganization
lionheart-venturesLionheart Venturesorganization
long-term-benefit-trustAnthropic Long-Term Benefit Trustorganization
matsMATS ML Alignment Theory Scholars programorganization
meta-aiMeta AI (FAIR)organization
palisade-researchPalisade Researchorganization
pause-aiPause AIorganization
red-queen-bioRed Queen Bioorganization
redwood-researchRedwood Researchorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
schmidt-futuresSchmidt Futuresorganization
securebioSecureBioorganization
situational-awareness-lpSituational Awareness LPorganization
stanford-haiStanford HAI (Human-Centered Artificial Intelligence)organization
tsmcTSMCorganization
chris-olahChris Olahperson
connor-leahyConnor Leahyperson
dan-hendrycksDan Hendrycksperson
daniela-amodeiDaniela Amodeiperson
david-sacksDavid Sacksperson
demis-hassabisDemis Hassabisperson
dustin-moskovitzDustin Moskovitzperson
eli-liflandEli Liflandperson
eliezer-yudkowsky-predictionsEliezer Yudkowsky: Track Recordconcept
elon-musk-predictionsElon Musk: Track Recordconcept
elon-muskElon Muskperson
evan-hubingerEvan Hubingerperson
greg-brockmanGreg Brockmanperson
gwernGwern Branwenperson
helen-tonerHelen Tonerperson
holden-karnofskyHolden Karnofskyperson
ilya-sutskeverIlya Sutskeverperson
__index__/knowledge-base/peoplePeopleconcept
jack-clarkJack Clarkperson
jan-leikeJan Leikeperson
jared-kaplanJared Kaplanperson
leopold-aschenbrennerLeopold Aschenbrennerperson
max-tegmarkMax Tegmarkperson
nick-bostromNick Bostromperson
paul-christianoPaul Christianoperson
sam-altman-predictionsSam Altman: Track Recordconcept
sam-altmanSam Altmanperson
sam-mccandlishSam McCandlishperson
tom-brownTom Brownperson
vidur-kapurVidur Kapurperson
yann-lecun-predictionsYann LeCun: Track Recordconcept
ai-assisted-diplomacy-and-negotiationAI-Assisted Diplomacy and Negotiationapproach
ai-controlAI Controlresearch-area
ai-forecasting-benchmarkAI Forecasting Benchmark Tournamentproject
anthropic-core-viewsAnthropic Core Viewssafety-agenda
bletchley-declarationBletchley Declarationpolicy
california-sb1047California SB 1047policy
california-sb53California SB 53policy
constitutional-aiConstitutional AIapproach
coordination-mechanismsInternational Coordination Mechanismsconcept
coordination-techAI Governance Coordination Technologiesapproach
corrigibilityCorrigibility Researchresearch-area
deliberationAI-Assisted Deliberation Platformsapproach
effectiveness-assessmentPolicy Effectiveness Assessmentanalysis
epistemic-infrastructureAI-Era Epistemic Infrastructureapproach
epistemic-securityAI-Era Epistemic Securityapproach
eu-ai-actEU AI Actpolicy
evals-governanceEvals-Based Deployment Gatesapproach
evalsEvals & Red-teamingresearch-area
evaluation-awarenessEvaluation Awarenessapproach
evaluationAI Evaluationapproach
governance-policyAI Governance and Policycrux
international-summitsInternational AI Safety Summitsevent
interpretabilityMechanistic Interpretabilityresearch-area
intervention-portfolioAI Safety Intervention Portfolioapproach
lab-cultureAI Lab Safety Cultureapproach
maimMAIM (Mutually Assured AI Malfunction)approach
mech-interpMechanistic Interpretabilityresearch-area
model-auditingThird-Party Model Auditingapproach
model-specAI Model Specificationsapproach
output-filteringAI Output Filteringapproach
paris-ai-summitParis AI Action Summit (February 2025)policy
red-teamingRed Teamingresearch-area
research-agendasAI Alignment Research Agenda Comparisoncrux
reward-modelingReward Modelingapproach
sandboxingSandboxing / Containmentapproach
scalable-eval-approachesScalable Eval Approachesapproach
seoul-declarationSeoul AI Safety Summit Declarationpolicy
sparse-autoencodersSparse Autoencoders (SAEs)approach
stampy-aisafety-infoStampy / AISafety.infoproject
technical-researchTechnical AI Safety Researchcrux
thresholdsCompute Thresholdsconcept
training-programsAI Safety Training Programsapproach
trump-eo-14179Executive Order 14179: Removing Barriers to American Leadership in AIpolicy
us-executive-orderUS Executive Order on Safe, Secure, and Trustworthy AIpolicy
us-state-legislationUS State AI Legislationanalysis
ai-enabled-political-polarizationAI-Enabled Political Polarizationrisk
ai-welfareAI Welfare and Digital Mindsconcept
bioweaponsBioweaponsrisk
concentrated-compute-cybersecurity-riskConcentrated Compute as a Cybersecurity Riskrisk
cyber-psychosisAI-Induced Cyber Psychosisrisk
disinformationDisinformationrisk
epistemic-sycophancyEpistemic Sycophancyrisk
existential-riskExistential Risk from AIconcept
financial-stability-risks-ai-capexFinancial Stability Risks from AI Capital Expenditurerisk
knowledge-monopolyAI Knowledge Monopolyrisk
power-seekingPower-Seeking AIrisk
reward-hackingReward Hackingrisk
schemingSchemingrisk
superintelligenceSuperintelligenceconcept
winner-take-allAI Winner-Take-All Dynamicsrisk
long-timelinesLong-Timelines Technical Worldviewconcept
ai-research-workflowsAI-Assisted Research Workflows: Best Practicesconcept
Longterm Wiki