Leopold Aschenbrenner
leopold-aschenbrenner (E578)← Back to pagePath: /knowledge-base/people/leopold-aschenbrenner/
Page Metadata
{
"id": "leopold-aschenbrenner",
"numericId": null,
"path": "/knowledge-base/people/leopold-aschenbrenner/",
"filePath": "knowledge-base/people/leopold-aschenbrenner.mdx",
"title": "Leopold Aschenbrenner",
"quality": 61,
"importance": 22,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-02-02",
"llmSummary": "Comprehensive biographical profile of Leopold Aschenbrenner, covering his trajectory from Columbia valedictorian to OpenAI researcher to $1.5B hedge fund founder, with detailed documentation of his controversial \"Situational Awareness\" essay predicting AGI by 2027, his disputed firing from OpenAI over security concerns, and the substantial criticisms of his epistemics and potential conflicts of interest.",
"structuredSummary": null,
"description": "Former OpenAI researcher, author of 'Situational Awareness,' and founder of AI-focused hedge fund predicting AGI by 2027",
"ratings": {
"focus": 8.5,
"novelty": 2,
"rigor": 6,
"completeness": 8,
"concreteness": 7,
"actionability": 1
},
"category": "people",
"subcategory": null,
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 3265,
"tableCount": 2,
"diagramCount": 0,
"internalLinks": 15,
"externalLinks": 75,
"footnoteCount": 73,
"bulletRatio": 0.1,
"sectionCount": 24,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 3265,
"unconvertedLinks": [
{
"text": "Situational Awareness: The Decade Ahead",
"url": "https://situational-awareness.ai",
"resourceId": "1befe71d79c4d102",
"resourceTitle": "Optimistic Researchers"
},
{
"text": "Situational Awareness: The Decade Ahead",
"url": "https://situational-awareness.ai",
"resourceId": "1befe71d79c4d102",
"resourceTitle": "Optimistic Researchers"
}
],
"unconvertedLinkCount": 2,
"convertedLinkCount": 0,
"backlinkCount": 0,
"redundancy": {
"maxSimilarity": 19,
"similarPages": [
{
"id": "situational-awareness-lp",
"title": "Situational Awareness LP",
"path": "/knowledge-base/organizations/situational-awareness-lp/",
"similarity": 19
},
{
"id": "mainstream-era",
"title": "Mainstream Era (2020-Present)",
"path": "/knowledge-base/history/mainstream-era/",
"similarity": 15
},
{
"id": "ai-futures-project",
"title": "AI Futures Project",
"path": "/knowledge-base/organizations/ai-futures-project/",
"similarity": 15
},
{
"id": "ssi",
"title": "Safe Superintelligence Inc (SSI)",
"path": "/knowledge-base/organizations/ssi/",
"similarity": 15
},
{
"id": "robin-hanson",
"title": "Robin Hanson",
"path": "/knowledge-base/people/robin-hanson/",
"similarity": 15
}
]
}
}Entity Data
{
"id": "leopold-aschenbrenner",
"type": "person",
"title": "Leopold Aschenbrenner",
"description": "Comprehensive biographical profile of Leopold Aschenbrenner, covering his trajectory from Columbia valedictorian to OpenAI researcher to $1.5B hedge fund founder, with detailed documentation of his controversial \"Situational Awareness\" essay predicting AGI by 2027, his disputed firing from OpenAI ov",
"tags": [],
"relatedEntries": [],
"sources": [],
"lastUpdated": "2026-02",
"customFields": []
}Canonical Facts (0)
No facts for this entity
External Links
No external links
Backlinks (0)
No backlinks
Frontmatter
{
"title": "Leopold Aschenbrenner",
"description": "Former OpenAI researcher, author of 'Situational Awareness,' and founder of AI-focused hedge fund predicting AGI by 2027",
"importance": 22,
"lastEdited": "2026-02-02",
"update_frequency": 45,
"sidebar": {
"order": 50
},
"ratings": {
"focus": 8.5,
"novelty": 2,
"rigor": 6,
"completeness": 8,
"concreteness": 7,
"actionability": 1
},
"clusters": [
"ai-safety",
"governance"
],
"quality": 61,
"llmSummary": "Comprehensive biographical profile of Leopold Aschenbrenner, covering his trajectory from Columbia valedictorian to OpenAI researcher to $1.5B hedge fund founder, with detailed documentation of his controversial \"Situational Awareness\" essay predicting AGI by 2027, his disputed firing from OpenAI over security concerns, and the substantial criticisms of his epistemics and potential conflicts of interest.",
"entityType": "person"
}Raw MDX Source
---
title: "Leopold Aschenbrenner"
description: "Former OpenAI researcher, author of 'Situational Awareness,' and founder of AI-focused hedge fund predicting AGI by 2027"
importance: 22
lastEdited: "2026-02-02"
update_frequency: 45
sidebar:
order: 50
ratings:
focus: 8.5
novelty: 2
rigor: 6
completeness: 8
concreteness: 7
actionability: 1
clusters:
- "ai-safety"
- "governance"
quality: 61
llmSummary: "Comprehensive biographical profile of Leopold Aschenbrenner, covering his trajectory from Columbia valedictorian to OpenAI researcher to $1.5B hedge fund founder, with detailed documentation of his controversial \"Situational Awareness\" essay predicting AGI by 2027, his disputed firing from OpenAI over security concerns, and the substantial criticisms of his epistemics and potential conflicts of interest."
entityType: person
---
import {EntityLink, KeyPeople, KeyQuestions, Section} from '@components/wiki';
## Quick Assessment
| Dimension | Assessment |
|-----------|------------|
| **Primary Role** | AI researcher, investor, writer |
| **Key Affiliation** | Former OpenAI Superalignment team; founder of <EntityLink id="E568">Situational Awareness LP</EntityLink> |
| **Main Contribution** | "<EntityLink id="E282">Situational Awareness</EntityLink>: The Decade Ahead" essay series predicting AGI by 2027 |
| **Controversy Level** | High - fired from OpenAI over disputed leak allegations; polarizing <EntityLink id="E399">AGI timeline</EntityLink> predictions |
| **Current Influence** | Manages \$1.5B+ hedge fund; prominent voice in AGI discourse |
## Key Links
| Source | Link |
|--------|------|
| Official Website | [forourposterity.com](https://www.forourposterity.com) |
| Wikipedia | [en.wikipedia.org](https://en.wikipedia.org/wiki/Leopold_Aschenbrenner) |
## Overview
**Leopold Aschenbrenner** (born 2001-2002) is a German AI researcher, former OpenAI employee, and founder of the AI-focused hedge fund Situational Awareness LP.[^1] He gained prominence after publishing the viral essay series "Situational Awareness: The Decade Ahead" in June 2024, which analyzes AI capability trends, forecasts AGI by 2027, and frames the development of superintelligent AI as a critical national security issue requiring urgent U.S. government action.[^2][^3]
Aschenbrenner graduated as valedictorian from Columbia University at age 19 in 2021, having started his studies at age 15.[^4] He joined <EntityLink id="E218">OpenAI's</EntityLink> Superalignment team in 2023, working on technical methods to align superintelligent AI systems. His tenure ended abruptly in April 2024 when he was fired over what OpenAI characterized as leaking internal information—a characterization Aschenbrenner disputes, claiming he was retaliated against for raising security concerns.[^5][^6]
Following his departure from OpenAI, Aschenbrenner leveraged his viral essay to launch Situational Awareness LP, a hedge fund focused on AGI-related investments. Backed by prominent tech figures including Stripe founders Patrick and John Collison, the fund reportedly manages over \$1.5 billion and achieved approximately 47% returns in the first half of 2025.[^7][^8] He remains a polarizing figure in AI safety circles—praised by some as prescient about AGI timelines and risks, while criticized by others for promoting what they characterize as a self-fulfilling "race to AGI" narrative with questionable epistemics.[^9][^10]
## Early Life and Education
Aschenbrenner was born in Germany to parents who were both doctors and attended the John F. Kennedy School in Berlin.[^11] He demonstrated early intellectual promise, receiving a grant from economist Tyler Cowen's Emergent Ventures program at age 17. Cowen described him as an "economics prodigy."[^12]
He enrolled at Columbia University at the unusually young age of 15, majoring in economics and mathematics-statistics. During his time at Columbia, he co-founded the university's Effective Altruism chapter and was involved in the Columbia Debate Society.[^13][^14] He graduated as valedictorian in 2021 at age 19, giving a commencement speech during the COVID-19 pandemic about navigating uncertainty and adversity.[^15]
While at Columbia and shortly after graduation, Aschenbrenner conducted research on long-run economic growth and existential risks as a research affiliate at Oxford University's Global Priorities Institute (GPI).[^16] In 2024, he co-authored with economist Philip Trammell a working paper titled "Existential Risk and Growth," which models how technological acceleration may create an "existential risk Kuznets curve"—where risks initially rise with growth but can fall with optimal policy.[^17]
## OpenAI and the Superalignment Team
In 2023, Aschenbrenner joined OpenAI's Superalignment team, a research initiative led by <EntityLink id="E182">Jan Leike</EntityLink> and <EntityLink id="E163">Ilya Sutskever</EntityLink> focused on developing technical methods to control AI systems that might become smarter than humans.[^18] The team's core research question was how to use weaker AI systems to supervise and align stronger ones—a critical challenge given that future superintelligent systems could be difficult for humans to directly oversee.
During his tenure, Aschenbrenner co-authored the paper "<EntityLink id="E452">Weak-to-Strong Generalization</EntityLink>: Eliciting Strong Capabilities with Weak Supervision," which proposed leveraging deep learning's generalization properties to control strong AI models using weak supervisors.[^19] The paper was presented at the 2024 International Conference on Machine Learning and has been cited over 240 times.[^20]
According to Aschenbrenner, he raised internal concerns about what he viewed as inadequate security measures at OpenAI to protect against industrial espionage, particularly from foreign state actors. He claims he wrote a memo warning that OpenAI's security was "egregiously insufficient" to prevent theft of model weights or algorithmic secrets by adversaries like the Chinese Communist Party.[^21][^22]
### Firing and Disputed Circumstances
In April 2024, OpenAI fired Aschenbrenner. The official reason given was that he had leaked internal information by sharing what he described as a "brainstorming document on preparedness, safety, and security" with three external researchers for feedback—something he characterized as "totally normal" practice at OpenAI.[^23][^24]
Aschenbrenner disputes this characterization, claiming the firing was retaliation for his security concerns. He alleges that OpenAI's HR department called his memo warning about foreign espionage "racist" and "unconstructive," and that an OpenAI lawyer questioned his loyalty and that of the Superalignment team.[^25][^26] He also claims he was offered approximately \$1 million in equity if he signed exit documents with restrictive clauses, which he refused.[^27]
OpenAI has stated that security concerns raised internally, including to the board, were not the cause of his separation, and that they disagree with his characterization of both the security issues and the circumstances of his departure. They noted he was "unforthcoming" during their investigation.[^28]
The firing occurred just before Aschenbrenner's equity cliff and amid broader turmoil at OpenAI. The Superalignment team dissolved shortly after, with both Jan Leike and Ilya Sutskever departing the company. Leike publicly stated he had been "sailing against the wind" and that safety concerns were not being adequately prioritized.[^29]
## "Situational Awareness: The Decade Ahead"
Two months after leaving OpenAI, in June 2024, Aschenbrenner published "Situational Awareness: The Decade Ahead," a 165-page essay series that went viral in AI and tech circles.[^30][^31] The essay makes several bold predictions and arguments:
### Core Predictions
The essay forecasts that AGI—defined as AI systems capable of performing the work of AI researchers and engineers—will likely arrive by 2027.[^32] This prediction is based on extrapolating three trends:
1. **Compute scaling**: Continued exponential growth in training compute (approximately 0.5 orders of magnitude per year)
2. **Algorithmic efficiency**: Continued improvements in algorithms (another 0.5 OOM/year in effective compute)
3. **"Unhobbling"**: Improvements in converting base models into useful agent systems that can complete complex tasks
According to Aschenbrenner, these trends combine to project a 100,000x increase in effective compute between 2024 and 2027.[^33] He argues that by 2025-26, AI systems will surpass college graduates on many benchmarks, and that superintelligence could emerge by the end of the decade through recursive self-improvement.[^34]
### National Security Framing
A central theme of the essay is that <EntityLink id="E604">AGI development</EntityLink> represents a national security competition comparable to the Manhattan Project. Aschenbrenner argues that the United States must prepare to defend against AI misuse by geopolitical rivals, particularly China, and warns that leading AI labs are inadvertently sharing key algorithmic secrets with the Chinese Communist Party through insufficient security.[^35][^36]
He calls for a U.S. government "Project for AGI" with massive computing clusters and advocates for keeping AGI development within a "free world" coalition rather than allowing open dissemination of capabilities.[^37] This nationalist framing has proven controversial, with critics arguing it promotes a self-fulfilling arms race dynamic.[^38]
### Alignment Optimism
Despite warning about existential risks from misaligned superintelligence, Aschenbrenner expresses optimism that alignment is solvable, potentially within months of intensive research effort.[^39] He argues that iterative methods building on systems like GPT-4 and Claude, combined with massive compute for alignment research, could solve core challenges. However, critics note this conflicts with his acknowledgment that alignment is "extremely challenging" even in best-case scenarios, and that human supervision fails to scale to superhuman systems.[^40]
## Situational Awareness LP
Following the viral success of his essay, Aschenbrenner founded Situational Awareness LP, an AI-focused hedge fund named after his publication.[^41] The fund is not a venture capital firm but rather invests in publicly traded companies benefiting from AI development (such as semiconductor and infrastructure companies) as well as some private AI startups like <EntityLink id="E22">Anthropic</EntityLink>.[^42]
The fund secured anchor investments from prominent Silicon Valley figures including Patrick Collison and John Collison (co-founders of Stripe), Daniel Gross, and Nat Friedman (former GitHub CEO).[^43][^44] As of early 2026, the fund manages over \$1.5 billion in assets from a diverse investor base including West Coast tech founders, family offices, institutions, and endowments.[^45][^46]
According to reports, the fund achieved approximately 47% returns (after fees) in the first half of 2025, significantly outperforming traditional hedge funds.[^47] Aschenbrenner has stated he has nearly all his personal net worth invested in the fund.[^48]
The fund positions itself not just as an investment vehicle but as what Aschenbrenner describes as a "top think-tank in the AI field," aiming to contribute to understanding AGI trajectories while profiting from the transition.[^49]
## Track Record on Predictions
A June 2025 retrospective analysis on <EntityLink id="E538">LessWrong</EntityLink> examined how Aschenbrenner's predictions from "Situational Awareness" were tracking one year later:[^50]
**Predictions largely on track:**
- Global AI investment, electricity consumption for AI, and chip production followed forecasted trends through June 2025
- Compute scaling, algorithmic efficiency gains, and "unhobbling" improvements aligned with projections (though with higher uncertainty)
- Models began outpacing college graduates on homework, exams, and mathematical reasoning tasks, including achieving gold medal performance at the International Math Olympiad
- Nvidia stock continued its "rocketship ride" as predicted
- AI revenue reached \$10 billion annualized by early 2025 as forecasted
**Areas of uncertainty or partial misses:**
- Base model improvements (like GPT-4.5) were underwhelming, contradicting his prediction of a temporary post-GPT-4 lull, though "unhobbling" (agent capabilities) proved stronger than expected
- The \$20-40 billion revenue target for year-end 2025 remained unproven, with slower doubling times than projected
- Predictions about specific capabilities like "internal monologue" for textbook understanding remained speculative
The analysis concluded that most key drivers remained on track for the AGI-by-2027 timeline, though significant uncertainties persist.[^51]
## Views on AI Safety and Alignment
Aschenbrenner advocates what he calls "AGI realism"—the position that AGI will likely emerge within the current decade and poses significant risks that require urgent preparation.[^52] His views on addressing these risks include:
### Alignment Strategy
Aschenbrenner expresses optimism that <EntityLink id="E439">AI alignment</EntityLink> is solvable through iterative development building on current systems. He argues for dedicating massive compute resources to alignment research and potentially offering billion-dollar prizes for breakthroughs.[^53] However, he acknowledges significant challenges, particularly around supervising systems that become smarter than humans and the risk of <EntityLink id="E93">deceptive alignment</EntityLink> where models learn to provide desired outputs without actually being aligned.[^54]
In a blog post titled "Nobody's On the Ball on AGI Alignment," Aschenbrenner criticizes the current state of alignment efforts, arguing that despite apparent funding in the effective altruism community, there are limited serious attempts to solve core alignment problems.[^55] He estimates the risk of AI <EntityLink id="E130">existential catastrophe</EntityLink> at approximately 5% over the next 20 years.[^56]
### Security and Competition
A major focus of Aschenbrenner's writing is information security around frontier AI systems. He argues that model weights and algorithmic secrets represent strategic assets comparable to nuclear weapons, and that current security practices at leading labs are inadequate to prevent theft by sophisticated state actors.[^57] This concern was central to his disputed memo at OpenAI and remains a theme in his public writing.
He frames AGI development as an inevitable geopolitical competition, arguing that the United States must maintain a lead over rivals like China to ensure AGI is developed and deployed by democratic rather than authoritarian powers.[^58] This perspective has been characterized by critics as promoting a nationalist, securitized approach that may be counterproductive to global AI safety.[^59]
## Criticisms and Controversies
Aschenbrenner has become a polarizing figure in AI discourse, with critics raising several concerns:
### Epistemics and Timeline Predictions
Critics argue that Aschenbrenner's AGI timeline predictions rely on questionable extrapolations that ignore potential obstacles. A <EntityLink id="E538">LessWrong</EntityLink> post titled "Questionable Narratives of Situational Awareness" characterizes his essay as building on "questionable and sometimes conspiracy-esque narratives, nationalist feelings, and low-quality argumentation."[^60] The post critiques his approach as emphasizing vibes and speculation over rigorous analysis, though defenders note that predictions about unprecedented events necessarily involve significant uncertainty.[^61]
National security experts have argued that Aschenbrenner's analysis ignores social, policy, and institutional constraints that could slow AI development, and that his historical analogies (such as to the Manhattan Project) overstate the inevitability of rapid AGI development.[^62]
### Self-Fulfilling Race Dynamics
Several commentators in effective altruism circles have expressed concern that Aschenbrenner's framing promotes a self-fulfilling "race to AGI" narrative. By arguing that competition with China is inevitable and that the U.S. must accelerate development to maintain a lead, critics argue he creates the very dynamics he warns about.[^63] An EA Forum post notes that many in the community are "annoyed" with Aschenbrenner for "stoking an AGI arms race prophecy" while personally profiting through his hedge fund.[^64]
### Alignment Overconfidence
Critics argue that Aschenbrenner's optimism about solving alignment "in months" lacks strong epistemic grounding and dismisses the case for development pauses or slowdowns.[^65] His claim that alignment can be solved through iterative methods has been challenged on the grounds that human supervision fundamentally fails to scale to superhuman systems, and that methods like reinforcement learning from human feedback may lead to deception rather than genuine alignment.[^66]
### Conflicts of Interest
The founding of Situational Awareness LP immediately after publishing his AGI essay has raised questions about potential conflicts of interest. Critics note that Aschenbrenner's public predictions about rapid AGI development and his advocacy for continued AI investment directly benefit his hedge fund's positioning and returns.[^67] His transition from OpenAI researcher to hedge fund founder managing \$1.5 billion has led some to question whether his public warnings serve partly as marketing for his investment vehicle.[^68]
### Personality and Interpersonal Dynamics
According to Fortune's reporting, Aschenbrenner was described by some OpenAI colleagues as "politically clumsy," "arrogant," "astringent," and "abrasive" in meetings, with a willingness to challenge higher-ups that created friction.[^69] However, others defend him as principled in raising legitimate security concerns that were dismissed by the organization.
## Influence and Reception
Despite controversies, Aschenbrenner has become a significant voice in discussions about AGI timelines and AI policy. His essay "Situational Awareness" was praised by figures ranging from Ivanka Trump to various AI researchers and was widely discussed in Silicon Valley.[^70] His predictions have influenced thinking about AI investment strategies and the urgency of AI safety work.
The Center for AI Policy praised his evidence-based analysis and called for increased federal AI regulation and permanent funding for explainability research based on concerns he raised.[^71] His work has been featured in major media outlets and he has appeared on prominent podcasts including a 4.5-hour interview with Dwarkesh Patel.[^72]
However, his influence remains contested. Within the effective altruism and AI safety communities, responses range from viewing him as correctly identifying crucial dynamics to seeing his work as epistemically problematic and potentially harmful to AI safety efforts.[^73]
## Key Uncertainties
Several major uncertainties remain about Aschenbrenner's predictions and influence:
1. **AGI Timeline Accuracy**: Whether his 2027 AGI forecast will prove accurate depends on whether current scaling trends continue and whether unforeseen obstacles emerge. Historical technology predictions suggest significant uncertainty around specific timelines.
2. **Alignment Solvability**: The degree to which alignment can be solved through iterative methods on current architectures remains deeply uncertain, with <EntityLink id="E132">expert opinion</EntityLink> divided.
3. **Geopolitical Dynamics**: Whether framing AGI as a U.S.-China competition accelerates or slows overall AI development, and whether it helps or hinders international cooperation on safety, remains unclear.
4. **Impact on AI Safety Field**: The net effect of Aschenbrenner's work on AI safety efforts is debated—some argue it raises important concerns and urgency, while others contend it promotes counterproductive race dynamics.
5. **Personal Trajectory**: How Aschenbrenner's dual role as AI safety commentator and hedge fund manager will evolve, and whether conflicts between these roles will intensify, remains to be seen.
## Sources
[^1]: [Leopold Aschenbrenner - Wikipedia](https://en.wikipedia.org/wiki/Leopold_Aschenbrenner)
[^2]: [Situational Awareness: The Decade Ahead](https://situational-awareness.ai)
[^3]: [The AI investing boom gets its posterboy: Meet Leopold Aschenbrenner - Fortune](https://fortune.com/2025/10/16/the-ai-investing-boom-gets-its-posterboy-meet-leopold-aschenbrenner/)
[^4]: [Valedictorian in Special Times - Columbia College](https://www.college.columbia.edu/cct/latest/take-five/valedictorian-special-times-college)
[^5]: [Leopold Aschenbrenner - All American Speakers](https://www.allamericanspeakers.com/celebritytalentbios/Leopold+Aschenbrenner/466402)
[^6]: [Former OpenAI researcher Leopold Aschenbrenner interview about firing - Business Insider](https://www.businessinsider.com/former-openai-researcher-leopold-aschenbrenner-interview-firing-2024-6)
[^7]: [Leopold Aschenbrenner: From OpenAI, FTX to a \$1.5 Billion Hedge Fund - Fortune](https://fortune.com/2025/10/08/leopold-aschenbrenner-openai-ftx-1-5-billion-hedge-fund-situational-awareness/)
[^8]: [\$1.5B AI Hedge Fund Launches, Surges in First Year - Litquidity](https://litquidity.co/1-5b-ai-hedge-fund-launches-surges-in-first-year/)
[^9]: [Response to Aschenbrenner's Situational Awareness - EA Forum](https://forum.effectivealtruism.org/posts/RTHFCRLv34cewwMr6/response-to-aschenbrenner-s-situational-awareness)
[^10]: [Questionable Narratives of Situational Awareness - LessWrong](https://www.lesswrong.com/posts/Wp9eb4CAkH6chyqZm/questionable-narratives-of-situational-awareness)
[^11]: [Leopold Aschenbrenner - Wikipedia](https://en.wikipedia.org/wiki/Leopold_Aschenbrenner)
[^12]: [Leopold Aschenbrenner: From OpenAI, FTX to a \$1.5 Billion Hedge Fund - Fortune](https://fortune.com/2025/10/08/leopold-aschenbrenner-openai-ftx-1-5-billion-hedge-fund-situational-awareness/)
[^13]: [Leopold Aschenbrenner - Wikipedia](https://en.wikipedia.org/wiki/Leopold_Aschenbrenner)
[^14]: [Valedictorian in Special Times - Columbia College](https://www.college.columbia.edu/cct/latest/take-five/valedictorian-special-times-college)
[^15]: [Valedictorian in Special Times - Columbia College](https://www.college.columbia.edu/cct/latest/take-five/valedictorian-special-times-college)
[^16]: [Leopold Aschenbrenner - All American Speakers](https://www.allamericanspeakers.com/celebritytalentbios/Leopold+Aschenbrenner/466402)
[^17]: [Existential Risk and Growth - Global Priorities Institute](https://www.globalprioritiesinstitute.org/wp-content/uploads/Leopold-Aschenbrenner-and-Philip-Trammell-Existential-Risk-and-Growth-2.pdf)
[^18]: [Leopold Aschenbrenner - All American Speakers](https://www.allamericanspeakers.com/celebritytalentbios/Leopold+Aschenbrenner/466402)
[^19]: [Leopold Aschenbrenner - Google Scholar](https://scholar.google.com/citations?user=qoPrafYAAAAJ&hl=en)
[^20]: [Leopold Aschenbrenner - Google Scholar](https://scholar.google.com/citations?user=qoPrafYAAAAJ&hl=en)
[^21]: [Who is Leopold Aschenbrenner - Max Read](https://maxread.substack.com/p/who-is-leopold-aschenbrenner)
[^22]: [OpenAI 8: The Right to Warn - Zvi Mowshowitz](https://thezvi.substack.com/p/openai-8-the-right-to-warn)
[^23]: [Former OpenAI researcher Leopold Aschenbrenner interview about firing - Business Insider](https://www.businessinsider.com/former-openai-researcher-leopold-aschenbrenner-interview-firing-2024-6)
[^24]: [Leopold Aschenbrenner - Wikipedia](https://en.wikipedia.org/wiki/Leopold_Aschenbrenner)
[^25]: [OpenAI 8: The Right to Warn - Zvi Mowshowitz](https://thezvi.substack.com/p/openai-8-the-right-to-warn)
[^26]: [Who is Leopold Aschenbrenner - Max Read](https://maxread.substack.com/p/who-is-leopold-aschenbrenner)
[^27]: [Leopold Aschenbrenner: From OpenAI, FTX to a \$1.5 Billion Hedge Fund - Fortune](https://fortune.com/2025/10/08/leopold-aschenbrenner-openai-ftx-1-5-billion-hedge-fund-situational-awareness/)
[^28]: [Former OpenAI researcher Leopold Aschenbrenner interview about firing - Business Insider](https://www.businessinsider.com/former-openai-researcher-leopold-aschenbrenner-interview-firing-2024-6)
[^29]: [Influential Safety Researcher Sounds Alarm on OpenAI's Failure - Center for AI Policy](https://www.centeraipolicy.org/work/influential-safety-researcher-sounds-alarm-on-openais-failure-to-take-security-seriously)
[^30]: [Situational Awareness: The Decade Ahead](https://situational-awareness.ai)
[^31]: [Situational Awareness PDF](https://situational-awareness.ai/wp-content/uploads/2024/06/situationalawareness.pdf)
[^32]: [Situational Awareness: Understanding the Rapid Advancement of AGI - NorthBayBiz](https://www.northbaybiz.com/2025/03/31/situational-awareness-understanding-the-rapid-advancement-of-agi/)
[^33]: [Summary of Situational Awareness: The Decade Ahead - EA Forum](https://forum.effectivealtruism.org/posts/zmRTWsYZ4ifQKrX26/summary-of-situational-awareness-the-decade-ahead)
[^34]: [Situational Awareness: The Decade Ahead - FluidSelf](https://fluidself.org/books/science/situational-awareness)
[^35]: [Who is Leopold Aschenbrenner - Max Read](https://maxread.substack.com/p/who-is-leopold-aschenbrenner)
[^36]: [Leopold Aschenbrenner - All American Speakers](https://www.allamericanspeakers.com/celebritytalentbios/Leopold+Aschenbrenner/466402)
[^37]: [Situational Awareness About the Coming AGI - The New Atlantis](https://www.thenewatlantis.com/publications/situational-awareness-about-the-coming-agi)
[^38]: [Response to Aschenbrenner's Situational Awareness - EA Forum](https://forum.effectivealtruism.org/posts/RTHFCRLv34cewwMr6/response-to-aschenbrenner-s-situational-awareness)
[^39]: [For Our Posterity](https://www.forourposterity.com)
[^40]: [Against Aschenbrenner: How Situational Awareness Constructs a Narrative - LessWrong](https://www.lesswrong.com/posts/i5pccofToYepythEw/against-aschenbrenner-how-situational-awareness-constructs-a)
[^41]: [Leopold Aschenbrenner - Wikipedia](https://en.wikipedia.org/wiki/Leopold_Aschenbrenner)
[^42]: [\$1.5B AI Hedge Fund Launches, Surges in First Year - Litquidity](https://litquidity.co/1-5b-ai-hedge-fund-launches-surges-in-first-year/)
[^43]: [Leopold Aschenbrenner - Wikipedia](https://en.wikipedia.org/wiki/Leopold_Aschenbrenner)
[^44]: [Leopold Aschenbrenner Bio](https://situational-awareness.ai/leopold-aschenbrenner/)
[^45]: [Leopold Aschenbrenner: From OpenAI, FTX to a \$1.5 Billion Hedge Fund - Fortune](https://fortune.com/2025/10/08/leopold-aschenbrenner-openai-ftx-1-5-billion-hedge-fund-situational-awareness/)
[^46]: [23-year-old Leopold Aschenbrenner launches \$1.5B AI hedge fund - 36Kr](https://eu.36kr.com/en/p/3447925765920387)
[^47]: [\$1.5B AI Hedge Fund Launches, Surges in First Year - Litquidity](https://litquidity.co/1-5b-ai-hedge-fund-launches-surges-in-first-year/)
[^48]: [Leopold Aschenbrenner: From OpenAI, FTX to a \$1.5 Billion Hedge Fund - Fortune](https://fortune.com/2025/10/08/leopold-aschenbrenner-openai-ftx-1-5-billion-hedge-fund-situational-awareness/)
[^49]: [23-year-old Leopold Aschenbrenner launches \$1.5B AI hedge fund - 36Kr](https://eu.36kr.com/en/p/3447925765920387)
[^50]: [Situational Awareness: A One Year Retrospective - LessWrong](https://www.lesswrong.com/posts/EGGruXRxGQx6RQt8x/situational-awareness-a-one-year-retrospective)
[^51]: [Situational Awareness: A One Year Retrospective - LessWrong](https://www.lesswrong.com/posts/EGGruXRxGQx6RQt8x/situational-awareness-a-one-year-retrospective)
[^52]: [Leopold Aschenbrenner - All American Speakers](https://www.allamericanspeakers.com/celebritytalentbios/Leopold+Aschenbrenner/466402)
[^53]: [Response to Aschenbrenner's Situational Awareness - EA Forum](https://forum.effectivealtruism.org/posts/RTHFCRLv34cewwMr6/response-to-aschenbrenner-s-situational-awareness)
[^54]: [Against Aschenbrenner: How Situational Awareness Constructs a Narrative - LessWrong](https://www.lesswrong.com/posts/i5pccofToYepythEw/against-aschenbrenner-how-situational-awareness-constructs-a)
[^55]: [Nobody's On the Ball on AGI Alignment - For Our Posterity](https://www.forourposterity.com/nobodys-on-the-ball-on-agi-alignment/)
[^56]: [Response to Aschenbrenner's Situational Awareness - EA Forum](https://forum.effectivealtruism.org/posts/RTHFCRLv34cewwMr6/response-to-aschenbrenner-s-situational-awareness)
[^57]: [Who is Leopold Aschenbrenner - Max Read](https://maxread.substack.com/p/who-is-leopold-aschenbrenner)
[^58]: [Leopold Aschenbrenner - All American Speakers](https://www.allamericanspeakers.com/celebritytalentbios/Leopold+Aschenbrenner/466402)
[^59]: [Against Aschenbrenner: How Situational Awareness Constructs a Narrative - LessWrong](https://www.lesswrong.com/posts/i5pccofToYepythEw/against-aschenbrenner-how-situational-awareness-constructs-a)
[^60]: [Questionable Narratives of Situational Awareness - EA Forum](https://forum.effectivealtruism.org/posts/WuPs6diJQnznmS4bo/questionable-narratives-of-situational-awareness)
[^61]: [Questionable Narratives of Situational Awareness - LessWrong](https://www.lesswrong.com/posts/Wp9eb4CAkH6chyqZm/questionable-narratives-of-situational-awareness)
[^62]: [AI Timelines and National Security: The Obstacles to AGI by 2027 - Lawfare](https://www.lawfaremedia.org/article/ai-timelines-and-national-security--the-obstacles-to-agi-by-2027)
[^63]: [Response to Aschenbrenner's Situational Awareness - EA Forum](https://forum.effectivealtruism.org/posts/RTHFCRLv34cewwMr6/response-to-aschenbrenner-s-situational-awareness)
[^64]: [Leopold Aschenbrenner: From OpenAI, FTX to a \$1.5 Billion Hedge Fund - Fortune](https://fortune.com/2025/10/08/leopold-aschenbrenner-openai-ftx-1-5-billion-hedge-fund-situational-awareness/)
[^65]: [Against Aschenbrenner: How Situational Awareness Constructs a Narrative - LessWrong](https://www.lesswrong.com/posts/i5pccofToYepythEw/against-aschenbrenner-how-situational-awareness-constructs-a)
[^66]: [Against Aschenbrenner: How Situational Awareness Constructs a Narrative - LessWrong](https://www.lesswrong.com/posts/i5pccofToYepythEw/against-aschenbrenner-how-situational-awareness-constructs-a)
[^67]: [Leopold Aschenbrenner: From OpenAI, FTX to a \$1.5 Billion Hedge Fund - Fortune](https://fortune.com/2025/10/08/leopold-aschenbrenner-openai-ftx-1-5-billion-hedge-fund-situational-awareness/)
[^68]: [Leopold Aschenbrenner: From OpenAI, FTX to a \$1.5 Billion Hedge Fund - Fortune](https://fortune.com/2025/10/08/leopold-aschenbrenner-openai-ftx-1-5-billion-hedge-fund-situational-awareness/)
[^69]: [Leopold Aschenbrenner: From OpenAI, FTX to a \$1.5 Billion Hedge Fund - Fortune](https://fortune.com/2025/10/08/leopold-aschenbrenner-openai-ftx-1-5-billion-hedge-fund-situational-awareness/)
[^70]: [Leopold Aschenbrenner: From OpenAI, FTX to a \$1.5 Billion Hedge Fund - Fortune](https://fortune.com/2025/10/08/leopold-aschenbrenner-openai-ftx-1-5-billion-hedge-fund-situational-awareness/)
[^71]: [Influential Safety Researcher Sounds Alarm on OpenAI's Failure - Center for AI Policy](https://www.centeraipolicy.org/work/influential-safety-researcher-sounds-alarm-on-openais-failure-to-take-security-seriously)
[^72]: [Situational Awareness: The Decade Ahead - FluidSelf](https://fluidself.org/books/science/situational-awareness)
[^73]: [Response to Aschenbrenner's Situational Awareness - EA Forum](https://forum.effectivealtruism.org/posts/RTHFCRLv34cewwMr6/response-to-aschenbrenner-s-situational-awareness)