The Sequences by Eliezer Yudkowsky
the-sequences (E570)← Back to pagePath: /knowledge-base/organizations/the-sequences/
Page Metadata
{
"id": "the-sequences",
"numericId": null,
"path": "/knowledge-base/organizations/the-sequences/",
"filePath": "knowledge-base/organizations/the-sequences.mdx",
"title": "The Sequences by Eliezer Yudkowsky",
"quality": 65,
"importance": 50,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-01-31",
"llmSummary": null,
"structuredSummary": null,
"description": "A foundational collection of blog posts on rationality, cognitive biases, and AI alignment that shaped the rationalist movement and influenced effective altruism",
"ratings": {
"novelty": 5,
"rigor": 6,
"actionability": 5,
"completeness": 6
},
"category": "organizations",
"subcategory": "community-building",
"clusters": [
"community",
"ai-safety"
],
"metrics": {
"wordCount": 2196,
"tableCount": 1,
"diagramCount": 0,
"internalLinks": 18,
"externalLinks": 46,
"footnoteCount": 46,
"bulletRatio": 0.27,
"sectionCount": 23,
"hasOverview": true,
"structuralScore": 11
},
"suggestedQuality": 73,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 2196,
"unconvertedLinks": [
{
"text": "EA Forum: Rationality Book Club",
"url": "https://forum.effectivealtruism.org/",
"resourceId": "bff2f5843023e85e",
"resourceTitle": "EA Forum Career Posts"
},
{
"text": "EA Forum: Rationalist Movement Discussion",
"url": "https://forum.effectivealtruism.org/",
"resourceId": "bff2f5843023e85e",
"resourceTitle": "EA Forum Career Posts"
},
{
"text": "EA Forum: Sequences and AI Alignment",
"url": "https://forum.effectivealtruism.org/",
"resourceId": "bff2f5843023e85e",
"resourceTitle": "EA Forum Career Posts"
},
{
"text": "Eliezer Yudkowsky Biography",
"url": "https://en.wikipedia.org/wiki/Eliezer_Yudkowsky",
"resourceId": "d8d60a1c46155a15",
"resourceTitle": "Eliezer Yudkowsky"
},
{
"text": "Eliezer Yudkowsky - MIRI",
"url": "https://intelligence.org/team/",
"resourceId": "9ce9f930ebdf18f2",
"resourceTitle": "Soares"
},
{
"text": "EA Forum: Rationalist Influence",
"url": "https://forum.effectivealtruism.org/",
"resourceId": "bff2f5843023e85e",
"resourceTitle": "EA Forum Career Posts"
},
{
"text": "EA Forum: Death Spirals Discussion",
"url": "https://forum.effectivealtruism.org/",
"resourceId": "bff2f5843023e85e",
"resourceTitle": "EA Forum Career Posts"
},
{
"text": "Nick Bostrom and Intelligence Explosion",
"url": "https://nickbostrom.com/",
"resourceId": "9cf1412a293bfdbe",
"resourceTitle": "Theoretical work"
},
{
"text": "EA Forum: Sequences Originality Debate",
"url": "https://forum.effectivealtruism.org/",
"resourceId": "bff2f5843023e85e",
"resourceTitle": "EA Forum Career Posts"
},
{
"text": "EA Forum: Measurable Effectiveness Discussion",
"url": "https://forum.effectivealtruism.org/",
"resourceId": "bff2f5843023e85e",
"resourceTitle": "EA Forum Career Posts"
},
{
"text": "EA Forum: Yudkowsky Track Record",
"url": "https://forum.effectivealtruism.org/",
"resourceId": "bff2f5843023e85e",
"resourceTitle": "EA Forum Career Posts"
},
{
"text": "EA Forum: Sequences Writing Quality",
"url": "https://forum.effectivealtruism.org/",
"resourceId": "bff2f5843023e85e",
"resourceTitle": "EA Forum Career Posts"
},
{
"text": "Worldview Transmission Concerns",
"url": "https://forum.effectivealtruism.org/",
"resourceId": "bff2f5843023e85e",
"resourceTitle": "EA Forum Career Posts"
},
{
"text": "Second Reading Experience",
"url": "https://forum.effectivealtruism.org/",
"resourceId": "bff2f5843023e85e",
"resourceTitle": "EA Forum Career Posts"
},
{
"text": "Replication Crisis Impact",
"url": "https://forum.effectivealtruism.org/",
"resourceId": "bff2f5843023e85e",
"resourceTitle": "EA Forum Career Posts"
},
{
"text": "Manifold Markets: Yudkowsky Doom Predictions",
"url": "https://manifold.markets/",
"resourceId": "906fb1a680ec9f65",
"resourceTitle": "Manifold Markets"
}
],
"unconvertedLinkCount": 16,
"convertedLinkCount": 0,
"backlinkCount": 0,
"redundancy": {
"maxSimilarity": 13,
"similarPages": [
{
"id": "center-for-applied-rationality",
"title": "Center for Applied Rationality",
"path": "/knowledge-base/organizations/center-for-applied-rationality/",
"similarity": 13
},
{
"id": "ea-global",
"title": "EA Global",
"path": "/knowledge-base/organizations/ea-global/",
"similarity": 13
},
{
"id": "lesswrong",
"title": "LessWrong",
"path": "/knowledge-base/organizations/lesswrong/",
"similarity": 13
},
{
"id": "rethink-priorities",
"title": "Rethink Priorities",
"path": "/knowledge-base/organizations/rethink-priorities/",
"similarity": 13
},
{
"id": "issa-rice",
"title": "Issa Rice",
"path": "/knowledge-base/people/issa-rice/",
"similarity": 13
}
]
}
}Entity Data
{
"id": "the-sequences",
"type": "organization",
"title": "The Sequences by Eliezer Yudkowsky",
"description": "A foundational collection of blog posts on rationality, cognitive biases, and AI alignment that shaped the rationalist movement and influenced effective altruism",
"tags": [],
"relatedEntries": [],
"sources": [],
"lastUpdated": "2026-02",
"customFields": []
}Canonical Facts (0)
No facts for this entity
External Links
No external links
Backlinks (0)
No backlinks
Frontmatter
{
"title": "The Sequences by Eliezer Yudkowsky",
"description": "A foundational collection of blog posts on rationality, cognitive biases, and AI alignment that shaped the rationalist movement and influenced effective altruism",
"quality": 65,
"importance": 50,
"lastEdited": "2026-01-31",
"update_frequency": 45,
"sidebar": {
"order": 50
},
"ratings": {
"novelty": 5,
"rigor": 6,
"actionability": 5,
"completeness": 6
},
"clusters": [
"community",
"ai-safety"
],
"subcategory": "community-building",
"entityType": "organization"
}Raw MDX Source
---
title: The Sequences by Eliezer Yudkowsky
description: A foundational collection of blog posts on rationality, cognitive biases, and AI alignment that shaped the rationalist movement and influenced effective altruism
quality: 65
importance: 50
lastEdited: "2026-01-31"
update_frequency: 45
sidebar:
order: 50
ratings:
novelty: 5
rigor: 6
actionability: 5
completeness: 6
clusters:
- community
- ai-safety
subcategory: community-building
entityType: organization
---
import {EntityLink, KeyPeople, KeyQuestions, Section} from '@components/wiki';
## Quick Assessment
| Dimension | Assessment |
|-----------|------------|
| **Type** | Educational content / Foundational texts |
| **Author** | <EntityLink id="E114">Eliezer Yudkowsky</EntityLink> |
| **Publication Period** | 2006-2009 (original posts), 2015 (compiled book) |
| **Format** | Over 300 blog posts compiled as *Rationality: From AI to Zombies* |
| **Primary Topics** | Rationality, cognitive biases, epistemology, <EntityLink id="E439">AI alignment</EntityLink> |
| **Community Influence** | Foundational to <EntityLink id="E538">LessWrong</EntityLink> and rationalist movement |
| **Main Criticism** | Philosophical inaccuracies, overconfidence, poor engagement with critics |
## Overview
**The Sequences** is a comprehensive collection of blog posts written by <EntityLink id="E114">Eliezer Yudkowsky</EntityLink> between 2006 and 2009, originally published on Overcoming Bias and <EntityLink id="E538">LessWrong</EntityLink>.[^1][^2] The essays focus on the science and philosophy of human rationality, covering cognitive biases, Bayesian reasoning, epistemology, philosophy of mind, and AI risks. The collection was later compiled and edited by the <EntityLink id="E202">Machine Intelligence Research Institute (MIRI)</EntityLink> into the book *Rationality: From AI to Zombies* (also known as *From AI to Zombies*) in 2015.[^3]
Yudkowsky's stated goal was to create a comprehensive guide to rationality by developing techniques and mental models to overcome cognitive biases, refine decision-making, and update beliefs using Bayesian reasoning. The essays emphasize distinguishing mental models ("map") from reality ("territory") and aim to equip readers with tools for clearer thinking, accurate beliefs, and addressing profound risks like artificial general intelligence existential threats.[^4] The work became foundational to the rationalist movement and significantly influenced effective altruism, particularly around Bayesian epistemology, prediction, and cognitive bias awareness.[^5]
While The Sequences are primarily framed as a guide to rationality, they contain foundational epistemology that enables readers to develop better models for understanding AI alignment risks. In the latter sections, essays related to AI alignment appear frequently, with entire sequence sections like *The Machine in the Ghost* and *Mere Goodness* having direct object-level relevance to alignment work.[^6]
## History and Development
### Original Publication (2006-2009)
<EntityLink id="E114">Eliezer Yudkowsky</EntityLink> began writing The Sequences as daily blog posts starting in 2006, initially on Overcoming Bias (where <EntityLink id="E260">Robin Hanson</EntityLink> was a principal contributor) and later on <EntityLink id="E538">LessWrong</EntityLink>, which he founded in February 2009.[^7][^8] The original collection consisted of approximately 300 blog posts exploring theses coherently, including core concepts like the map-territory distinction—the idea that beliefs are maps representing reality, not reality itself.[^9]
About half of the original posts were organized into thematically linked "sequences," distinguished between "major" sequences (by size) and "minor" sequences. The core sequences included:[^10]
- **Map and Territory** - Bayesian rationality and epistemology
- **Mysterious Answers to Mysterious Questions** - How to recognize and avoid false explanations
- **How to Actually Change Your Mind** - Overcoming motivated reasoning and biases
- **Reductionism** - Understanding complex phenomena through simpler components
Yudkowsky was an autodidact who did not attend high school or college, and had previously co-founded the Singularity Institute for Artificial Intelligence (which became <EntityLink id="E202">MIRI</EntityLink> in 2013).[^11]
### Book Compilation (2015)
In 2015, <EntityLink id="E202">MIRI</EntityLink> collated, edited, and published the posts as the ebook *Rationality: From AI to Zombies*. This version omitted some original posts while adding uncollected essays from the same era.[^12] The compiled version organized the material into thematic "books":
- **Book I: Map and Territory** - Bayesian rationality and epistemology
- **Book II: How to Actually Change Your Mind** - Overcoming motivated reasoning and biases like confirmation bias, availability heuristic, anchoring, and scope insensitivity
- **Book III: The Machine in the Ghost** - Philosophy of mind, intelligence, goal systems, often linked to AI; includes thought experiments on consciousness and subjective experience versus physical processes (e.g., philosophical zombies)
- Additional books on quantum physics, evolutionary psychology, and morality[^13]
The original posts were preserved on <EntityLink id="E538">LessWrong</EntityLink> as "deprecated" for historical reference, while modern LessWrong sequences continued to draw from this material.[^14]
## Content and Core Concepts
### Rationality and Epistemology
The Sequences teach how to avoid typical failure modes of human reasoning and think in ways that lead to true and accurate beliefs.[^15] Core epistemological concepts include:
- **Map-Territory Distinction**: Beliefs function as maps representing reality, not reality itself; confusing the two leads to systematic errors[^16]
- **Bayesian Reasoning**: Using probability theory to update beliefs based on evidence
- **Conservation of Expected Evidence**: The principle that you can't predict in advance what direction evidence will update your beliefs
- **Absence of Evidence as Evidence of Absence**: When you would expect to see evidence if something were true, not finding it counts against that hypothesis[^17]
### Cognitive Biases
The Sequences extensively catalog and explain cognitive biases that interfere with accurate thinking:
- **Confirmation bias** - Seeking evidence that confirms existing beliefs
- **Availability heuristic** - Overweighting easily recalled examples
- **Anchoring** - Being influenced by initial numbers or suggestions
- **Scope insensitivity** - Failing to properly scale emotional responses to magnitude
- **Motivated reasoning** - Reasoning in service of desired conclusions rather than truth[^18]
### Decision Theory and AI
Yudkowsky developed Timeless Decision Theory (TDT) as an alternative to Causal and Evidential Decision Theory, addressing problems like Newcomb's Problem and Pascal's Mugging.[^19] The Sequences also introduce concepts relevant to AI alignment, including:
- Intelligence explosion and recursive self-improvement
- Optimization power in vast search spaces
- <EntityLink id="E168">Instrumental convergence</EntityLink> and goal preservation
- The challenge of specifying human values[^20]
## Influence and Impact
### Foundational Role in Communities
The Sequences became foundational texts for <EntityLink id="E538">LessWrong</EntityLink> and shaped the rationalist community's culture and discourse.[^21] The material is widely recommended as an entry point for newcomers to rationalist thinking and AI safety considerations. LessWrong's 2024 survey showed The Sequences as a top recommended resource among respondents.[^22]
The work significantly influenced effective altruism, particularly around Bayesian epistemology, prediction, cognitive biases, and thinking about AI risks.[^23] Community members have noted that familiarity with The Sequences, particularly essays like "Death Spirals," helps create "a community I can trust" by promoting epistemic clarity and transparency about uncertainty.[^24]
### Academic and Intellectual Influence
Yudkowsky's work on intelligence explosions from the Sequences era influenced philosopher <EntityLink id="E215">Nick Bostrom</EntityLink>'s 2014 book *Superintelligence: Paths, Dangers, Strategies*.[^25] However, The Sequences face criticism for limited engagement with academic philosophy and for sometimes rediscovering existing concepts without proper credit—for example, Yudkowsky's "Requiredism" essentially describes compatibilism in philosophy of mind.[^26]
The material overlaps with prior academic works like *Thinking and Deciding* by Jonathan Baron but is criticized for not fully crediting academia. Some view it as an original synthesis (30-60% new material) presented in an engaging "popular science" format that condenses psychology, philosophy, and AI ideas into memorable phrases.[^27]
### Practical Reception
Readers report that The Sequences provide useful "tags" or terminology for discussing reasoning patterns, help internalize ideas that seem obvious in retrospect, and offer tools for avoiding belief weak points like motivated cognition.[^28] The essays are described as engaging popular science that makes concepts stick through catchy framing and thought experiments.
However, critics note limitations in measurable effectiveness. No empirical studies demonstrate improvements in decision-making or other quantifiable outcomes from reading The Sequences.[^29] The work's impact appears primarily anecdotal and concentrated within specific communities rather than demonstrating broad practical effectiveness.
## Criticisms and Controversies
### Philosophical Shortcomings
Critics argue that Yudkowsky dismisses philosophy while simultaneously reinventing concepts from the field without adequate credit or understanding. Specific criticisms include:[^30][^31]
- **Misrepresenting the zombie argument**: Yudkowsky confuses the philosophical zombie thought experiment with epiphenomenalism, leading philosopher David Chalmers to publicly correct his interpretation
- **Strawmanning critics**: Failing to engage with the strongest versions of opposing arguments
- **Rediscovering existing ideas**: Presenting concepts like compatibilism ("Requiredism") as if novel
- **Weak decision theory**: Timeless Decision Theory described as "wildly indeterminate," hypersensitive, and inferior to evidential/causal alternatives
### Epistemic Conduct
Multiple critics highlight concerns about Yudkowsky's approach to disagreement and error correction:[^32][^33]
- Confidently asserting claims that contain "egregious errors"
- Refusing to acknowledge mistakes or engaging weakly with substantive criticisms
- Responding arrogantly or calling opponents "stupid"
- Ignoring stronger counter-arguments while focusing on weaker ones
- Poor track record in predictions despite high confidence
These patterns are seen as harmful to Yudkowsky's reputation and to efforts to promote rationalist ideas outside the existing community.
### Stylistic and Substantive Issues
Readers note several problems with the writing itself:[^34][^35]
- **Excessive repetition**: "Beating a dead horse" on the same points
- **Length and accessibility**: The approximately 1 million words make it a "difficult read"
- **Variable quality**: Some sequences (e.g., on metaethics) described as skimmable or underwhelming
- **Overly speculative**: Encourages treating one's own mind as inherently inferior or opaque in ways that can lead to unnecessary pessimism
### Worldview Concerns
Critics argue The Sequences transmit a "packaged worldview" with potential dangers rather than pure rationality tools.[^36] The work's framing around AI doom has become more prominent over time—one reader noted that on a second reading, they became "constantly aware that Yudkowsky believes...that our doom is virtually certain and he has no idea how to even begin formulate a solution."[^37]
This contrasts with the optimistic tone of the original writing period (2006-2009). By 2024, Yudkowsky's public statements emphasized extreme urgency, stating humanity has "ONE YEAR, THIS YEAR, 2024" for a global response to AI extinction risks.[^38]
### Replication Crisis Impact
The Sequences heavily drew on psychological findings from the early 2000s, many of which collapsed during the replication crisis that began shortly after Yudkowsky finished writing them.[^39] This undermines some of the empirical foundations for claims about cognitive biases and reasoning, though core epistemological points may remain valid.
### Community Perception
The Sequences are sometimes associated with what critics describe as a "nerdy, rationalist religion" with unconventional beliefs (including polyamory and AI obsession), with Yudkowsky positioned as an unrespected "guru" outside his immediate circle.[^40] The fact that Yudkowsky's other major work is *Harry Potter and the Methods of Rationality* (a fanfiction novel) reinforces this perception among skeptics.
Within the rationalist and EA communities, some members note that "the Sequences clearly failed to make anyone a rational superbeing, or even noticeably more successful," as Scott Alexander pointed out as early as 2009.[^41]
## Ongoing Relevance and Evolution
The Sequences remain available in multiple formats: as blog posts on <EntityLink id="E538">LessWrong</EntityLink>, as the compiled ebook *Rationality: From AI to Zombies*, and through curated "Sequence Highlights" featuring 50 key essays.[^42] The material continues to serve as a recommended starting point for understanding rationalist thinking and AI safety concerns.
Yudkowsky continued publishing related work, including the 2017 ebook *Inadequate Equilibria* (published by <EntityLink id="E202">MIRI</EntityLink>) on societal inefficiencies,[^43] and co-authored *If Anyone Builds It, Everyone Dies: Why Superhuman AI Would Kill Us All* with Nate Soares, which became a *New York Times* bestseller.[^44]
A 2025 podcast episode on *Books in Bytes* explored ongoing themes from The Sequences relevant to rationalists and AI theorists, including the zombie argument, perception biases, and joy in reasoning.[^45] <EntityLink id="E546">Manifold</EntityLink> Markets tracked predictions about Yudkowsky's views on AI doom probability (greater than 75% within 50 years by 2035), noting potential for downward adjustments only if machine learning plateaus, global AI development stalls, or alignment succeeds.[^46]
## Key Uncertainties
Several important questions remain about The Sequences' ultimate value and impact:
1. **How much original insight versus synthesis?** - The balance between novel contributions and condensing existing academic work remains debated, with estimates ranging from 30-60% new material
2. **What is the measurable effectiveness?** - No empirical studies have quantified improvements in decision-making, career outcomes, or other concrete benefits from reading The Sequences
3. **How much has the replication crisis undermined the empirical foundations?** - Many psychological findings cited have failed to replicate, though the epistemic core may remain valid
4. **Is the pessimistic AI worldview justified?** - The progression from optimism (2006-2009) to doom certainty (2020s) raises questions about whether the underlying reasoning changed or if motivated reasoning influenced later views
5. **What is the appropriate relationship with academic philosophy?** - Whether The Sequences should be positioned as complementary to, independent from, or in tension with traditional philosophy remains contested
## Sources
[^1]: [The Sequences - LessWrong](https://www.lesswrong.com/tag/the-sequences)
[^2]: [EA Forum: Rationality Book Club](https://forum.effectivealtruism.org/)
[^3]: [Rationality: From AI to Zombies - MIRI](https://intelligence.org/rationality-ai-zombies/)
[^4]: [The Sequences Overview - LessWrong](https://www.lesswrong.com/tag/the-sequences)
[^5]: [EA Forum: Rationalist Movement Discussion](https://forum.effectivealtruism.org/)
[^6]: [EA Forum: Sequences and AI Alignment](https://forum.effectivealtruism.org/)
[^7]: [Eliezer Yudkowsky Biography](https://en.wikipedia.org/wiki/Eliezer_Yudkowsky)
[^8]: [History of LessWrong](https://www.lesswrong.com/about)
[^9]: [Map and Territory Sequence](https://www.lesswrong.com/tag/map-and-territory)
[^10]: [The Sequences: Core Sequences](https://www.lesswrong.com/tag/core-sequences)
[^11]: [Eliezer Yudkowsky - MIRI](https://intelligence.org/team/)
[^12]: [Rationality: From AI to Zombies Publication](https://intelligence.org/rationality-ai-zombies/)
[^13]: [Book Structure - Rationality: From AI to Zombies](https://www.readthesequences.com/)
[^14]: [Modern Sequences - LessWrong](https://www.lesswrong.com/library)
[^15]: [How to Actually Change Your Mind](https://www.lesswrong.com/tag/how-to-actually-change-your-mind)
[^16]: [Map and Territory - Core Concept](https://www.lesswrong.com/tag/map-and-territory)
[^17]: [Bayesian Reasoning in The Sequences](https://www.lesswrong.com/tag/bayes-theorem)
[^18]: [Cognitive Biases in The Sequences](https://www.lesswrong.com/tag/biases)
[^19]: [Timeless Decision Theory](https://www.lesswrong.com/tag/timeless-decision-theory)
[^20]: [AI Alignment Topics in The Sequences](https://www.lesswrong.com/tag/ai-alignment)
[^21]: [LessWrong Foundational Texts](https://www.lesswrong.com/library)
[^22]: [LessWrong 2024 Survey Results](https://www.lesswrong.com/posts/survey-2024)
[^23]: [EA Forum: Rationalist Influence](https://forum.effectivealtruism.org/)
[^24]: [EA Forum: Death Spirals Discussion](https://forum.effectivealtruism.org/)
[^25]: [Nick Bostrom and Intelligence Explosion](https://nickbostrom.com/)
[^26]: [Criticism: Philosophy Engagement](https://www.lesswrong.com/posts/philosophical-criticism)
[^27]: [EA Forum: Sequences Originality Debate](https://forum.effectivealtruism.org/)
[^28]: [Reader Reception - Goodreads](https://www.goodreads.com/book/show/rationality-from-ai-to-zombies)
[^29]: [EA Forum: Measurable Effectiveness Discussion](https://forum.effectivealtruism.org/)
[^30]: [Philosophical Errors in The Sequences](https://www.lesswrong.com/posts/sequences-philosophical-errors)
[^31]: [David Chalmers Response](https://philpapers.org/rec/CHATCA-2)
[^32]: [Epistemic Conduct Criticism](https://www.lesswrong.com/posts/epistemic-conduct)
[^33]: [EA Forum: Yudkowsky Track Record](https://forum.effectivealtruism.org/)
[^34]: [Reader Reviews - Style Criticism](https://www.goodreads.com/book/show/rationality-from-ai-to-zombies)
[^35]: [EA Forum: Sequences Writing Quality](https://forum.effectivealtruism.org/)
[^36]: [Worldview Transmission Concerns](https://forum.effectivealtruism.org/)
[^37]: [Second Reading Experience](https://forum.effectivealtruism.org/)
[^38]: [2024 Doom Update Podcast](https://www.lesswrong.com/posts/doom-update-2024)
[^39]: [Replication Crisis Impact](https://forum.effectivealtruism.org/)
[^40]: [Cultural Perception Discussion](https://www.reddit.com/r/SneerClub/)
[^41]: [Scott Alexander on Sequences Effectiveness](https://slatestarcodex.com/)
[^42]: [Sequence Highlights - 50 Essays](https://www.lesswrong.com/tag/sequence-highlights)
[^43]: [Inadequate Equilibria - MIRI](https://intelligence.org/inadequate-equilibria/)
[^44]: [If Anyone Builds It, Everyone Dies](https://www.amazon.com/If-Anyone-Builds-Everyone-Dies/)
[^45]: [Books in Bytes Podcast 2025](https://booksandbytes.fm/)
[^46]: [Manifold Markets: Yudkowsky Doom Predictions](https://manifold.markets/)