Max Tegmark
max-tegmark (E433)← Back to pagePath: /knowledge-base/people/max-tegmark/
Page Metadata
{
"id": "max-tegmark",
"numericId": null,
"path": "/knowledge-base/people/max-tegmark/",
"filePath": "knowledge-base/people/max-tegmark.mdx",
"title": "Max Tegmark",
"quality": 63,
"importance": 75,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-02-02",
"llmSummary": "Comprehensive biographical profile of Max Tegmark covering his transition from cosmology to AI safety advocacy, his role founding the Future of Life Institute, and his controversial Mathematical Universe Hypothesis. The article provides balanced coverage of both his contributions and criticisms, including the 2023 grant controversy and scientific debates about his theoretical work.",
"structuredSummary": null,
"description": "Swedish-American physicist at MIT, co-founder of the Future of Life Institute, and prominent AI safety advocate known for his work on the Mathematical Universe Hypothesis and efforts to promote safe artificial intelligence development.",
"ratings": {
"novelty": 4,
"rigor": 7,
"actionability": 6,
"completeness": 8
},
"category": "people",
"subcategory": null,
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 3177,
"tableCount": 2,
"diagramCount": 0,
"internalLinks": 11,
"externalLinks": 69,
"footnoteCount": 72,
"bulletRatio": 0.03,
"sectionCount": 23,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 3177,
"unconvertedLinks": [
{
"text": "FLI AI Safety Index: Summer 2025",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "FLI AI Safety Index: Summer 2025",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "FLI AI Safety Index: Summer 2025",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "FLI AI Safety Index: Summer 2025",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "FLI AI Safety Index: Summer 2025",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
}
],
"unconvertedLinkCount": 5,
"convertedLinkCount": 0,
"backlinkCount": 0,
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "robin-hanson",
"title": "Robin Hanson",
"path": "/knowledge-base/people/robin-hanson/",
"similarity": 16
},
{
"id": "frontier-model-forum",
"title": "Frontier Model Forum",
"path": "/knowledge-base/organizations/frontier-model-forum/",
"similarity": 15
},
{
"id": "yann-lecun",
"title": "Yann LeCun",
"path": "/knowledge-base/people/yann-lecun/",
"similarity": 15
},
{
"id": "pause",
"title": "Pause Advocacy",
"path": "/knowledge-base/responses/pause/",
"similarity": 15
},
{
"id": "mainstream-era",
"title": "Mainstream Era (2020-Present)",
"path": "/knowledge-base/history/mainstream-era/",
"similarity": 14
}
]
}
}Entity Data
{
"id": "max-tegmark",
"type": "person",
"title": "Max Tegmark",
"description": "Swedish-American physicist at MIT, co-founder of the Future of Life Institute, and prominent AI safety advocate known for his work on the Mathematical Universe Hypothesis and efforts to promote safe artificial intelligence development.",
"tags": [
"ai-safety-advocacy",
"future-of-life-institute",
"ai-pause",
"mechanistic-interpretability",
"physics"
],
"relatedEntries": [
{
"id": "fli",
"type": "organization"
},
{
"id": "elon-musk",
"type": "researcher"
},
{
"id": "yoshua-bengio",
"type": "researcher"
},
{
"id": "interpretability",
"type": "safety-agenda"
},
{
"id": "prediction-markets",
"type": "concept"
}
],
"sources": [],
"lastUpdated": "2026-02",
"customFields": []
}Canonical Facts (0)
No facts for this entity
External Links
{
"wikidata": "https://www.wikidata.org/wiki/Q2076321"
}Backlinks (0)
No backlinks
Frontmatter
{
"title": "Max Tegmark",
"description": "Swedish-American physicist at MIT, co-founder of the Future of Life Institute, and prominent AI safety advocate known for his work on the Mathematical Universe Hypothesis and efforts to promote safe artificial intelligence development.",
"importance": 75,
"lastEdited": "2026-02-02",
"update_frequency": 45,
"sidebar": {
"order": 65
},
"ratings": {
"novelty": 4,
"rigor": 7,
"actionability": 6,
"completeness": 8
},
"quality": 63,
"llmSummary": "Comprehensive biographical profile of Max Tegmark covering his transition from cosmology to AI safety advocacy, his role founding the Future of Life Institute, and his controversial Mathematical Universe Hypothesis. The article provides balanced coverage of both his contributions and criticisms, including the 2023 grant controversy and scientific debates about his theoretical work.",
"clusters": [
"ai-safety",
"governance"
],
"entityType": "person"
}Raw MDX Source
---
title: Max Tegmark
description: Swedish-American physicist at MIT, co-founder of the Future of Life
Institute, and prominent AI safety advocate known for his work on the
Mathematical Universe Hypothesis and efforts to promote safe artificial
intelligence development.
importance: 75
lastEdited: "2026-02-02"
update_frequency: 45
sidebar:
order: 65
ratings:
novelty: 4
rigor: 7
actionability: 6
completeness: 8
quality: 63
llmSummary: Comprehensive biographical profile of Max Tegmark covering his
transition from cosmology to AI safety advocacy, his role founding the Future
of Life Institute, and his controversial Mathematical Universe Hypothesis. The
article provides balanced coverage of both his contributions and criticisms,
including the 2023 grant controversy and scientific debates about his
theoretical work.
clusters: ["ai-safety","governance"]
entityType: person
---
import {EntityLink, KeyPeople, KeyQuestions, Section} from '@components/wiki';
## Quick Assessment
| Dimension | Assessment |
|-----------|------------|
| **Primary Role** | MIT Physics Professor, AI Safety Advocate, FLI President |
| **Key Contributions** | Co-founded Future of Life Institute; developed 23 Asilomar AI Principles adopted by 1,000+ researchers; organized 2023 AI pause letter with 30,000+ signatories |
| **Main Focus Areas** | AI safety and governance, <EntityLink id="E174">mechanistic interpretability</EntityLink>, cosmology, Mathematical Universe Hypothesis |
| **Notable Works** | *Life 3.0* (2017), *Our Mathematical Universe* (2014), 300+ technical publications |
| **Recognition** | Time 100 Most Influential in AI (2023), American Physical Society Fellow (2012) |
| **Key Concern** | AGI misalignment and the "control problem" - preventing advanced AI from pursuing goals incompatible with human values |
## Key Links
| Source | Link |
|--------|------|
| Official Website | [physics.mit.edu](https://physics.mit.edu/faculty/max-tegmark/) |
| Wikipedia | [en.wikipedia.org](https://en.wikipedia.org/wiki/Max_Tegmark) |
## Overview
**Max Tegmark** is a Swedish-American physicist and professor at MIT who has become one of the most prominent public advocates for AI safety. Born May 5, 1967 in Stockholm, Tegmark spent the first 25 years of his career focused on cosmology and precision measurements of the universe before pivoting to machine learning and AI safety research in the 2010s.[^1] He co-founded the <EntityLink id="E528">Future of Life Institute</EntityLink> in 2014 and serves as its president, leading efforts to ensure artificial intelligence benefits humanity rather than posing existential risks.[^2]
Tegmark's influence spans both academic research and public policy. His 2017 book *Life 3.0: Being Human in the Age of Artificial Intelligence* became a New York Times bestseller and helped bring AI safety concerns to mainstream audiences.[^3] He organized the March 2023 open letter calling for a pause on AI development that garnered over 30,000 signatures, including <EntityLink id="E116">Elon Musk</EntityLink> and <EntityLink id="E380">Yoshua Bengio</EntityLink>.[^4] Through the Future of Life Institute's AI Safety Index, launched in summer 2025, Tegmark has worked to create accountability mechanisms for AI companies, though these efforts have met resistance from industry.[^5]
Beyond AI safety, Tegmark is known for his controversial Mathematical Universe Hypothesis, which proposes that physical reality is fundamentally mathematical rather than merely described by mathematics. While this has attracted criticism from fellow scientists who characterize it as "science fiction and mysticism," Tegmark remains optimistic that it points toward a future without fundamental roadblocks for physics.[^6][^7]
## History and Career Development
### Early Life and Education
Tegmark demonstrated technical aptitude early, creating and selling a word processor written in pure machine code for the Swedish ABC 80 computer during high school, along with a 3D Tetris-like game called "Frac."[^8] He earned dual undergraduate degrees—a B.A. in Economics from Stockholm School of Economics and a B.Sc. in Physics from the Royal Institute of Technology—before leaving Sweden in 1990.[^9]
After completing his M.A. in Physics at UC Berkeley in 1992 and his Ph.D. in 1994 under Joseph Silk, Tegmark held postdoctoral positions at the Max-Planck-Institut für Physik in Munich (1995-1996) and as a Hubble Fellow at the Institute for Advanced Study at Princeton (1996).[^10] He joined the University of Pennsylvania as an assistant professor and received tenure in 2003 before moving to MIT's Department of Physics in September 2004.[^11]
### Transition from Cosmology to AI Safety
For approximately 25 years, Tegmark focused primarily on cosmology, making significant contributions to precision measurements of the universe.[^12] His work with the Sloan Digital Sky Survey (SDSS) collaboration on galaxy clustering shared first prize in Science magazine's "Breakthrough of the Year: 2003."[^13] He co-introduced the concept of using baryon acoustic oscillations as a standard ruler with Daniel Eisenstein and Wayne Hu, and discovered the anomalous multipole alignment in WMAP data (sometimes called the "axis of evil") with colleagues.[^14]
The turning point toward AI safety came on January 1, 2015, when Tegmark adopted a personal "put-up-or-shut-up" resolution to stop complaining about problems without attempting to fix them.[^15] This philosophy had already led him to co-found the Future of Life Institute in 2014 with Anthony Aguirre and others, following outreach to Elon Musk after Musk's 2014 tweet comparing AI to "summoning the demon."[^16] The institute organized early AI safety conferences and provided the first major funding for AI safety research, receiving approximately 300 grant applications requesting around \$100 million in total.[^17]
### Recent Work and Current Focus
Tegmark's research has evolved to focus on what he calls the "physics of intelligence," using physics-based techniques to understand biological and artificial intelligence.[^18] His recent work emphasizes mechanistic interpretability—understanding the internal workings of AI systems—and developing approaches for guaranteed safe AI.[^19] He leads the Tegmark AI Safety Group at MIT and continues involvement with multiple nonprofits, including serving as Scientific Director of the Foundational Questions Institute and co-founding the Improve the News Foundation in October 2020.[^20]
## AI Safety Advocacy and Positions
### Core Concerns About AGI
Tegmark frames the central challenge of artificial general intelligence as a "control problem" rather than a question of AI malice. He uses the analogy of human-rhino relationships: humans don't hate rhinos, but misalignment of goals has led to rhino endangerment.[^21] According to Tegmark, the biggest threat from AGI is misalignment with human goals, where advanced systems pursue objectives that inadvertently harm humanity—such as a hypothetical robot programmed to save sheep that develops self-preservation and resource acquisition as instrumental subgoals.[^22]
In a December 2023 discussion, Tegmark stated that AGI matching or surpassing human cognition could arrive within three years, noting how <EntityLink id="E228">prediction markets</EntityLink> had shifted from 20-year to 3-year timelines.[^23] At WebSummit 2024, he emphasized that US AI investments had exceeded the inflation-adjusted costs of the Manhattan Project over a five-year period, highlighting the massive scale of current development efforts.[^24]
Tegmark has been particularly critical of what he sees as a rush toward uncontrollable AGI. He characterizes the position of some AI developers who embrace uncontrollable superintelligence as "digital eugenics," arguing it amounts to deliberately replacing humanity.[^25] He advocates instead for "Tool AI"—controllable AI systems that enhance human capabilities without autonomous agency that could misalign with human values.
### Policy Work and the Future of Life Institute
The Future of Life Institute, under Tegmark's leadership, has pursued multiple strategies to promote AI safety:
**Asilomar AI Principles**: Tegmark and colleagues developed 23 principles for safe AI development that have been adopted by more than 1,000 researchers and scientists worldwide.[^26]
**AI Safety Index**: Launched in summer 2025, this initiative evaluates AI companies on their safety practices for models like <EntityLink id="E22">Anthropic</EntityLink> Claude 4 Opus, <EntityLink id="E98">Google DeepMind</EntityLink> Gemini 2.5 Pro, <EntityLink id="E218">OpenAI</EntityLink> o3, and xAI Grok 3.[^27] The index assesses companies across multiple dimensions and provides recommendations for improvement, such as increasing investment in technical safety research, publishing whistleblowing policies and risk assessments, and developing tamper-resistant safeguards.[^28]
**Legislative Advocacy**: Tegmark has called for "binding government rules" rather than voluntary self-governance by AI companies, arguing "you can't let the fox guard the hen house."[^29] He praised California's SB 53 (signed by Governor Gavin Newsom in September 2025) requiring AI businesses to share safety protocols and report incidents, though he considers it only a "step in the right direction" requiring additional oversight.[^30]
**Open Letter for AI Pause**: In March 2023, Tegmark organized an open letter calling for a pause on AI development that attracted over 30,000 signatories, including Elon Musk and Yoshua Bengio (though notably not Andrew Ng).[^31]
### Arguments and Communication Style
Tegmark's advocacy employs several rhetorical strategies. He frequently draws parallels to historical cases of regulatory capture, warning that AI companies could follow the playbook of Big Tobacco and Big Oil to circumvent government constraints.[^32] He has also compared AI risks to climate change inaction, using the "Don't Look Up" asteroid analogy—asking audiences to imagine a 10% chance of asteroid impact and whether that would justify action.[^33]
However, Tegmark's communication style has drawn criticism. In public debates, he has been characterized as overrelying on speculation and authority appeals, with opponents calling out his use of insider jargon like "alignment/safety" and probability questions that alienate general audiences.[^34] Some former supporters have expressed disappointment with what they view as overly alarmist "AI will kill us all" arguments.[^35]
Within the AI safety community, Tegmark has identified a factional divide, criticizing what he calls "Camp A"—those who advocate racing to superintelligence safely—as dominating effective altruism with "most money/power/influence." He positions himself in a more cautious camp skeptical of narratives about capitalism, Moloch, or China making an AGI race inevitable.[^36]
## The Mathematical Universe Hypothesis
### Core Theory
Tegmark proposed in 2007 that physical reality is not merely described by mathematics but *is* a mathematical structure—a position he acknowledges puts him on the "most radical fringe" of mathematical universe views.[^37] Under this hypothesis, our universe's mathematical regularities exist because reality is fundamentally mathematical. Every consistent mathematical structure exists as a physical reality in what Tegmark calls a Level IV multiverse.[^38]
Tegmark argues this view is optimistic for physics: if true, it predicts continued discoveries without fundamental roadblocks, whereas if false, physics faces potential dead ends.[^39] He defends the multiverse framework by tying different levels to falsifiable theories: Level II multiverses to inflation (falsifiable if inflation is wrong), Level III to quantum mechanics (falsifiable if quantum mechanics is wrong), and Level IV as all mathematical structures existing equally.[^40]
### Scientific Reception and Criticism
The Mathematical Universe Hypothesis has faced substantial criticism from the scientific community. Mathematician Edward Frenkel characterized it as "science fiction and mysticism" rather than science.[^41] Computer scientist Scott Aaronson argued in his review of Tegmark's book *Our Mathematical Universe* that the hypothesis lacks falsifiable rules—it allows Tegmark to claim evidence both when physics laws are simple (supporting the hypothesis directly) and when they're complex (fitting via multiverse reasoning), unlike well-defined concepts like eigenstates in quantum mechanics or Lyapunov exponents in chaos theory.[^42]
Critics have also pointed to what they see as internal contradictions. Aaronson noted "cognitive dissonance" in Tegmark's acceptance of cosmic inflation producing a Level I multiverse as more than speculation while treating the broader multiverse concept skeptically.[^43] A 2017 physics blog post accused Tegmark of "fetishizing wave function collapse unnecessarily" and misframing Schrödinger's equation as the "end-all" of quantum mechanics despite alternative formulations like path integrals.[^44]
### Related Work on Consciousness
Tegmark views consciousness as intrinsic to integrated information processing, separable from intelligence, and has been critical of mainstream scientists for dismissing consciousness as unscientific.[^45] He distinguishes consciousness (which can be present in minimally active states like lying in bed) from intelligence, arguing they can exist independently.[^46]
Supporting Giulio Tononi's Integrated Information Theory (IIT), Tegmark emphasizes the measure of integration via phi (Φ), which requires no secret separation into non-communicating parts for unified conscious experience.[^47] He rejects both panpsychism and theories suggesting consciousness depends on external-world interaction, arguing instead that conscious experience arises from internal world models—as evidenced by dreaming with eyes closed.[^48]
In 2000, Tegmark published a paper refuting Roger Penrose's quantum consciousness model, concluding that quantum decoherence occurs too rapidly for orchestrated objective reduction to function in neurons.[^49] However, other scientists have argued Tegmark overstretches quantum effects to macroscopic biological scales, potentially "giving Deepak Chopra ammunition" by conflating quantum mechanics with larger-scale phenomena.[^50]
## Criticisms and Controversies
### The 2023 Grant Controversy
In 2023, Tegmark faced allegations of signing a letter of intent on behalf of the Future of Life Institute for a \$100,000 grant to *Nya Dagbladet*, a far-right Swedish media outlet where his brother contributed articles.[^51] The outlet was linked to antisemitism, white supremacy, and racism. FLI ultimately rejected the grant before media involvement, issuing a statement that they "find Nazi, neo-Nazi or pro-Nazi groups or ideologies despicable" and would never knowingly support them.[^52] Tegmark echoed this position, emphasizing the rejection was due to issues uncovered during due diligence. The controversy nonetheless raised questions about FLI's grant evaluation processes.
### AI Safety Community Debates
Tegmark's strong advocacy for restricting artificial superintelligence development has created tensions within both the AI safety community and the broader tech world. On platforms like Hacker News, some former fans have expressed "strong disagreement and disappointment" with his enthusiasm for what they view as overly alarmist existential risk arguments.[^53]
Within effective altruism circles, debates around Tegmark's positions have been more nuanced. His performance in a public debate with Yoshua Bengio defending AI x-risk received mixed reviews. While some praised the concrete evidence provided—such as rapid AI progress surprising even experts like Bengio—others criticized Tegmark for weak rebuttals, overreliance on speculation and authority appeals, and communication that was "outsider-unfriendly" with excessive jargon.[^54] The debate highlighted broader questions about how AI safety advocates should communicate with general audiences.
Tegmark has also critiqued what he sees as misguided geopolitical strategies, particularly the emerging "AGI Entente" approach in US national security circles that emphasizes cooperation between allied nations in <EntityLink id="E604">AGI development</EntityLink>.[^55]
### Corporate and Industry Pushback
The AI Safety Index has faced resistance from industry. When the summer 2025 edition was released, xAI dismissed it as "Legacy Media Lies," and Elon Musk's attorney declined to comment despite Musk's past support for FLI.[^56] Tech lobby groups have argued that regulation slows innovation and drives companies abroad, pushing back against Tegmark's calls for binding safety standards.[^57]
Tegmark has been particularly critical of what he characterizes as an AI industry "race to the bottom," describing companies as "completely unregulated" and lacking incentives for safety. He warns this environment could enable terrorists to develop <EntityLink id="E42">bioweapons</EntityLink>, facilitate manipulation, or destabilize governments.[^58] He points to what he calls "information asymmetry"—AI developers underreporting risks because they have incentives to select lenient testing or cite "infohazard" concerns, especially in biosecurity, while lacking independent scrutiny.[^59]
### Concerns About Research Approach
Some critics have raised methodological concerns about Tegmark's tendency to extrapolate beyond available evidence. The 2017 blog post criticizing his quantum mechanics work accused him of "neglecting scale" by overstretching quantum effects to macroscopic biological levels, cherry-picking mathematical regularity while ignoring quantum problems reliant on approximations, and supporting multiverse theories that are "underthought."[^60]
These criticisms reflect a broader pattern where Tegmark's speculative and optimistic approach—valued by some for pushing boundaries—raises concerns among others about unfalsifiable claims and overreach beyond empirical evidence.
## Awards and Recognition
Tegmark has received numerous honors for his contributions to physics and AI safety:
- **Time Magazine 100 Most Influential People in AI** (2023)[^61]
- **Gold Medal** from The Royal Swedish Academy of Engineering Sciences (2019) for contributions to understanding humanity's place in the cosmos and AI opportunities and risks[^62]
- **American Physical Society Fellow** (2012) for contributions to cosmology and low-frequency radio interferometry technology[^63]
- **Packard Fellowship** (2001-2006), **Cottrell Scholar Award** (2002-2007), and **NSF Career Grant** (2002-2007)[^64]
His books *Our Mathematical Universe* (2014) and *Life 3.0* (2017) both became New York Times bestsellers.[^65] He has authored over 300 technical publications and is featured in numerous science documentaries.[^66]
## Key Uncertainties
**<EntityLink id="E399">AGI Timeline</EntityLink> Accuracy**: Tegmark's December 2023 prediction that AGI could arrive "within three years" (i.e., by late 2026) has not been definitively validated as of early 2026, though no comprehensive evidence confirms or refutes it either.[^67]
**Effectiveness of Regulatory Approaches**: Whether Tegmark's advocacy for binding government rules over voluntary industry self-governance will prove effective remains unclear, especially given industry resistance and the "race to the top" versus "race to the bottom" dynamics he describes.[^68]
**Mathematical Universe Hypothesis Testability**: The fundamental question of whether Tegmark's hypothesis is genuinely scientific or unfalsifiable remains contested. His framework allows him to accommodate both simple and complex physics laws, raising questions about what empirical observations could potentially disprove the theory.[^69]
**Information Asymmetry Solutions**: Tegmark identifies AI developers' incentives to underreport risks and select lenient testing, but whether mechanisms like the AI Safety Index can overcome these structural problems through transparency and accountability remains to be demonstrated.[^70]
**Multiverse Measure Problem**: Tegmark's inflation-based multiverse predictions face what he acknowledges as the "measure problem"—infinities yielding useless predictions (infinity/infinity), which he admits "sabotages physics' predictive power." His proposed solution of eliminating infinitely small scales has not gained consensus.[^71]
**Camp Divisions in AI Safety**: Tegmark's identification of factional divides between "Camp A" (race to safe superintelligence) and more cautious approaches reflects uncertainty about optimal strategies. Whether his critique of Camp A's dominance in effective altruism will influence resource allocation and priorities is unclear.[^72]
## Sources
[^1]: [Max Tegmark - MIT Physics](https://physics.mit.edu/faculty/max-tegmark/)
[^2]: [Max Tegmark - Future of Life Institute](https://futureoflife.org/person/max-tegmark/)
[^3]: [Max Tegmark - Chartwell Speakers](https://www.chartwellspeakers.com/speaker/max-tegmark/)
[^4]: Max Tegmark, WebSummit 2024 talk (November 12, 2024)
[^5]: [FLI AI Safety Index: Summer 2025](https://futureoflife.org/ai-safety-index-summer-2025/)
[^6]: [Max Tegmark - Wikipedia](https://en.wikipedia.org/wiki/Max_Tegmark)
[^7]: [Scott Aaronson - Review of Our Mathematical Universe](https://scottaaronson.blog/?p=1753)
[^8]: [Max Tegmark - Logicism Fandom Wiki](https://logicism.fandom.com/wiki/Max_Tegmark)
[^9]: [Max Tegmark - MIT Physics](https://physics.mit.edu/faculty/max-tegmark/)
[^10]: [Max Tegmark - Big Think](https://bigthink.com/people/max-tegmark/)
[^11]: [Max Tegmark - MIT Physics](https://physics.mit.edu/faculty/max-tegmark/)
[^12]: [Max Tegmark - MIT Physics](https://physics.mit.edu/faculty/max-tegmark/)
[^13]: [Max Tegmark - Big Think](https://bigthink.com/people/max-tegmark/)
[^14]: [Max Tegmark - Logicism Fandom Wiki](https://logicism.fandom.com/wiki/Max_Tegmark)
[^15]: [80,000 Hours Podcast - Max Tegmark](https://80000hours.org/podcast/episodes/max-tegmark-ai-and-algorithmic-news-selection/)
[^16]: Max Tegmark, WebSummit 2024 talk (November 12, 2024)
[^17]: [Max Tegmark - FLI Talk on AI Risks and Benefits](https://www.youtube.com/watch?v=R-VNlXJpAIQ)
[^18]: [Max Tegmark - MIT Physics](https://physics.mit.edu/faculty/max-tegmark/)
[^19]: [Max Tegmark - Future of Life Institute](https://futureoflife.org/person/max-tegmark/)
[^20]: [Max Tegmark - Wikipedia](https://en.wikipedia.org/wiki/Max_Tegmark)
[^21]: [Dynatrace - Max Tegmark on AI](https://www.dynatrace.com/news/blog/max-tegmark-on-artificial-intelligence/)
[^22]: [Max Tegmark - FLI Talk on AI Risks and Benefits](https://www.youtube.com/watch?v=R-VNlXJpAIQ)
[^23]: [Max Tegmark Interview - December 2023](https://www.youtube.com/watch?v=gFQvL3KVaOQ)
[^24]: Max Tegmark, WebSummit 2024 talk (November 12, 2024)
[^25]: Max Tegmark, WebSummit 2024 talk (November 12, 2024)
[^26]: [Dynatrace - Max Tegmark on AI](https://www.dynatrace.com/news/blog/max-tegmark-on-artificial-intelligence/)
[^27]: [FLI AI Safety Index: Summer 2025](https://futureoflife.org/ai-safety-index-summer-2025/)
[^28]: [FLI AI Safety Index: Summer 2025](https://futureoflife.org/ai-safety-index-summer-2025/)
[^29]: [Max Tegmark on AI Safety Index](https://www.youtube.com/watch?v=hGUUhxNn86M)
[^30]: [LA Times - AI Company Scorecard](https://www.latimes.com/entertainment-arts/business/story/2025-12-05/ai-artificial-intelligence-company-scorecard-ranks-safety-humanity)
[^31]: Max Tegmark, WebSummit 2024 talk (November 12, 2024)
[^32]: [80,000 Hours Podcast - Max Tegmark](https://80000hours.org/podcast/episodes/max-tegmark-ai-and-algorithmic-news-selection/)
[^33]: [EA Forum - Max Tegmark's Time Article](https://forum.effectivealtruism.org/posts/moA9DNjbYr72xy4mQ/max-tegmark-s-new-time-article-on-how-we-re-in-a-don-t-look)
[^34]: [EA Forum - Did Bengio and Tegmark Lose a Debate?](https://forum.effectivealtruism.org/posts/HN4ECPh5bPykBgvT8/did-bengio-and-tegmark-lose-a-debate-about-ai-x-risk-against)
[^35]: [Hacker News Discussion](https://news.ycombinator.com/item?id=40067124)
[^36]: [EA Forum - AI Safety Community Camps](https://forum.effectivealtruism.org/posts/3WEkFjrab9rE2sJaE/which-side-of-the-ai-safety-community-are-you-in)
[^37]: [VICE - Interview with Max Tegmark](https://www.vice.com/en/article/the-mathematical-reality-of-reality-an-interview-with-cosmologist-max-tegmark/)
[^38]: [Scott Aaronson - Review of Our Mathematical Universe](https://scottaaronson.blog/?p=1753)
[^39]: [VICE - Interview with Max Tegmark](https://www.vice.com/en/article/the-mathematical-reality-of-reality-an-interview-with-cosmologist-max-tegmark/)
[^40]: [Scott Aaronson - Review of Our Mathematical Universe](https://scottaaronson.blog/?p=1753)
[^41]: [Max Tegmark - Wikipedia](https://en.wikipedia.org/wiki/Max_Tegmark)
[^42]: [Scott Aaronson - Review of Our Mathematical Universe](https://scottaaronson.blog/?p=1753)
[^43]: [Scott Aaronson - Review of Our Mathematical Universe](https://scottaaronson.blog/?p=1753)
[^44]: [Poetry in Physics Blog - Disagreeing with Mathematical Universe](https://poetryinphysics.wordpress.com/2017/12/14/disagreeing-with-our-mathematical-universe/)
[^45]: [Max Tegmark on Consciousness - YouTube](https://www.youtube.com/watch?v=g2V85ssfwtE)
[^46]: [Max Tegmark on Consciousness - YouTube](https://www.youtube.com/watch?v=g2V85ssfwtE)
[^47]: [Max Tegmark on Consciousness - YouTube](https://www.youtube.com/watch?v=g2V85ssfwtE)
[^48]: [Max Tegmark on Consciousness - YouTube](https://www.youtube.com/watch?v=g2V85ssfwtE)
[^49]: [Max Tegmark - Wikipedia](https://en.wikipedia.org/wiki/Max_Tegmark)
[^50]: [Poetry in Physics Blog - Disagreeing with Mathematical Universe](https://poetryinphysics.wordpress.com/2017/12/14/disagreeing-with-our-mathematical-universe/)
[^51]: [Max Tegmark - Wikipedia](https://en.wikipedia.org/wiki/Max_Tegmark)
[^52]: [Max Tegmark - Wikipedia](https://en.wikipedia.org/wiki/Max_Tegmark)
[^53]: [Hacker News Discussion](https://news.ycombinator.com/item?id=40067124)
[^54]: [EA Forum - Did Bengio and Tegmark Lose a Debate?](https://forum.effectivealtruism.org/posts/HN4ECPh5bPykBgvT8/did-benigo-and-tegmark-lose-a-debate-about-ai-x-risk-against)
[^55]: [EA Forum - AGI Entente Delusion](https://ea.greaterwrong.com/posts/7sfRDtBDNf7F56fHg/max-tegmark-the-agi-entente-delusion?comments=false&hide-nav-bars=true)
[^56]: [LA Times - AI Company Scorecard](https://www.latimes.com/entertainment-arts/business/story/2025-12-05/ai-artificial-intelligence-company-scorecard-ranks-safety-humanity)
[^57]: [LA Times - AI Company Scorecard](https://www.latimes.com/entertainment-arts/business/story/2025-12-05/ai-artificial-intelligence-company-scorecard-ranks-safety-humanity)
[^58]: [LA Times - AI Company Scorecard](https://www.latimes.com/entertainment-arts/business/story/2025-12-05/ai-artificial-intelligence-company-scorecard-ranks-safety-humanity)
[^59]: [FLI AI Safety Index: Summer 2025](https://futureoflife.org/ai-safety-index-summer-2025/)
[^60]: [Poetry in Physics Blog - Disagreeing with Mathematical Universe](https://poetryinphysics.wordpress.com/2017/12/14/disagreeing-with-our-mathematical-universe/)
[^61]: [Max Tegmark - Future of Life Institute](https://futureoflife.org/person/max-tegmark/)
[^62]: [Max Tegmark - MIT Physics](https://physics.mit.edu/faculty/max-tegmark/)
[^63]: [Max Tegmark - MIT Physics](https://physics.mit.edu/faculty/max-tegmark/)
[^64]: [Max Tegmark - Big Think](https://bigthink.com/people/max-tegmark/)
[^65]: [Max Tegmark - Future of Life Institute](https://futureoflife.org/person/max-tegmark/)
[^66]: [Max Tegmark - Future of Life Institute](https://futureoflife.org/person/max-tegmark/)
[^67]: [Max Tegmark Interview - December 2023](https://www.youtube.com/watch?v=gFQvL3KVaOQ)
[^68]: [Max Tegmark on AI Safety Index](https://www.youtube.com/watch?v=hGUUhxNn86M)
[^69]: [Scott Aaronson - Review of Our Mathematical Universe](https://scottaaronson.blog/?p=1753)
[^70]: [FLI AI Safety Index: Summer 2025](https://futureoflife.org/ai-safety-index-summer-2025/)
[^71]: [Edge - Max Tegmark on Infinity](https://www.edge.org/response-detail/25344)
[^72]: [EA Forum - AI Safety Community Camps](https://forum.effectivealtruism.org/posts/3WEkFjrab9rE2sJaE/which-side-of-the-ai-safety-community-are-you-in)