Skip to content
Longterm Wiki

Daniela Amodei

daniela-amodeipersonPath: /knowledge-base/people/daniela-amodei/
E90Entity ID (EID)
← Back to page17 backlinksQuality: 21Updated: 2026-02-22
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "daniela-amodei",
  "wikiId": "E90",
  "path": "/knowledge-base/people/daniela-amodei/",
  "filePath": "knowledge-base/people/daniela-amodei.mdx",
  "title": "Daniela Amodei",
  "quality": 21,
  "readerImportance": 27.5,
  "researchImportance": 11,
  "tacticalValue": null,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-02-22",
  "dateCreated": "2026-02-15",
  "summary": "Biographical profile of Anthropic's President covering her education, early career, roles at Stripe and OpenAI, and her operational and commercial leadership at Anthropic. Includes fundraising history, named enterprise partnerships, sourced quotes, and documented external criticisms of Anthropic's commercial-safety balance.",
  "description": "Co-founder and President of Anthropic, overseeing business operations, commercial strategy, and enterprise partnerships while publicly advocating for safety-focused AI development and deployment practices.",
  "ratings": {
    "novelty": 1.5,
    "rigor": 2,
    "completeness": 4,
    "actionability": 1
  },
  "category": "people",
  "subcategory": "lab-leadership",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 2832,
    "tableCount": 7,
    "diagramCount": 0,
    "internalLinks": 25,
    "externalLinks": 22,
    "footnoteCount": 25,
    "bulletRatio": 0.13,
    "sectionCount": 33,
    "hasOverview": true,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": null,
  "evergreen": true,
  "wordCount": 2832,
  "unconvertedLinks": [
    {
      "text": "Anthropic Series A press release",
      "url": "https://www.anthropic.com/news/anthropic-raises-124-million-to-build-more-reliable-general-ai-systems",
      "resourceId": "c050aa0c7cd8f1bc",
      "resourceTitle": "Anthropic raises $124 million to build more reliable, general AI systems \\ Anthropic"
    },
    {
      "text": "Tracxn funding data",
      "url": "https://tracxn.com/d/companies/anthropic/__SzoxXDMin-NK5tKB7ks8yHr6S9Mz68pjVCzFEcGFZ08/funding-and-investors",
      "resourceId": "a69f4d2abe539628",
      "resourceTitle": "Anthropic Funding and Investors (Tracxn)"
    },
    {
      "text": "Anthropic Series G press release",
      "url": "https://www.anthropic.com/news/anthropic-raises-30-billion-series-g-funding-380-billion-post-money-valuation",
      "resourceId": "b58bb99dfec2e318",
      "resourceTitle": "Anthropic: Raises \\$30 Billion Series G Funding at \\$380 Billion Post-Money Valuation"
    },
    {
      "text": "NIST, Aug 2024",
      "url": "https://www.nist.gov/news-events/news/2024/08/us-ai-safety-institute-signs-agreements-regarding-ai-safety-research",
      "resourceId": "627bb42e8f74be04",
      "resourceTitle": "MOU with US AI Safety Institute"
    },
    {
      "text": "Anthropic Blog",
      "url": "https://www.anthropic.com/news",
      "resourceId": "f6aa679babd7a46a",
      "resourceTitle": "OpenAI disbanded super-alignment team"
    },
    {
      "text": "Anthropic Series A Press Release",
      "url": "https://www.anthropic.com/news/anthropic-raises-124-million-to-build-more-reliable-general-ai-systems",
      "resourceId": "c050aa0c7cd8f1bc",
      "resourceTitle": "Anthropic raises $124 million to build more reliable, general AI systems \\ Anthropic"
    },
    {
      "text": "Anthropic Series G Press Release",
      "url": "https://www.anthropic.com/news/anthropic-raises-30-billion-series-g-funding-380-billion-post-money-valuation",
      "resourceId": "b58bb99dfec2e318",
      "resourceTitle": "Anthropic: Raises \\$30 Billion Series G Funding at \\$380 Billion Post-Money Valuation"
    },
    {
      "text": "Constitutional AI arXiv paper",
      "url": "https://arxiv.org/pdf/2212.08073",
      "resourceId": "b3e647be3bc180f4",
      "resourceTitle": "Anthropic Research Team, \"Constitutional AI: Harmlessness from AI Feedback,\" arXiv, December 2022"
    },
    {
      "text": "US AI Safety Institute agreements",
      "url": "https://www.nist.gov/news-events/news/2024/08/us-ai-safety-institute-signs-agreements-regarding-ai-safety-research",
      "resourceId": "627bb42e8f74be04",
      "resourceTitle": "MOU with US AI Safety Institute"
    },
    {
      "text": "Stephen Casper, May 2024",
      "url": "https://www.alignmentforum.org/posts/pH6tyhEnngqWAXi9i/eis-xiii-reflections-on-anthropic-s-sae-research-circa-may",
      "resourceId": "347e0b288361f087",
      "resourceTitle": "some researchers note"
    },
    {
      "text": "Daniela Amodei – Wikipedia",
      "url": "https://en.wikipedia.org/wiki/Daniela_Amodei",
      "resourceId": "8d67371e96133f80",
      "resourceTitle": "Wikipedia contributors, \"Daniela Amodei,\" updated February 2026"
    }
  ],
  "unconvertedLinkCount": 11,
  "convertedLinkCount": 1,
  "backlinkCount": 17,
  "hallucinationRisk": {
    "level": "medium",
    "score": 60,
    "factors": [
      "biographical-claims",
      "low-rigor-score",
      "low-quality-score",
      "well-cited"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 16,
    "similarPages": [
      {
        "id": "openai",
        "title": "OpenAI",
        "path": "/knowledge-base/organizations/openai/",
        "similarity": 16
      },
      {
        "id": "ilya-sutskever",
        "title": "Ilya Sutskever",
        "path": "/knowledge-base/people/ilya-sutskever/",
        "similarity": 16
      },
      {
        "id": "anthropic",
        "title": "Anthropic",
        "path": "/knowledge-base/organizations/anthropic/",
        "similarity": 15
      },
      {
        "id": "ssi",
        "title": "Safe Superintelligence Inc. (SSI)",
        "path": "/knowledge-base/organizations/ssi/",
        "similarity": 15
      },
      {
        "id": "connor-leahy",
        "title": "Connor Leahy",
        "path": "/knowledge-base/people/connor-leahy/",
        "similarity": 15
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 11,
      "diagrams": 1,
      "internalLinks": 23,
      "externalLinks": 14,
      "footnotes": 8,
      "references": 8
    },
    "actuals": {
      "tables": 7,
      "diagrams": 0,
      "internalLinks": 25,
      "externalLinks": 22,
      "footnotes": 25,
      "references": 13,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "red",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "green",
      "footnotes": "green",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:1.5 R:2 A:1 C:4"
  },
  "readerRank": 468,
  "researchRank": 543,
  "recommendedScore": 71.77
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/anthropic",
  "grokipedia": "https://grokipedia.com/page/Daniela_Amodei"
}
Backlinks (17)
idtitletyperelationship
anthropic-stakeholdersAnthropic Stakeholderstable
anthropic-ipoAnthropic IPOanalysis
anthropicAnthropicorganizationleads-to
anthropicAnthropicorganization
long-term-benefit-trustAnthropic Long-Term Benefit Trustorganization
deep-learning-eraDeep Learning Revolution (2012-2020)historical
mainstream-eraMainstream Era (2020-Present)historical
anthropic-pledge-enforcementAnthropic Founder Pledges: Interventions to Increase Follow-Throughanalysis
anthropic-investorsAnthropic (Funder)analysis
anthropic-pre-ipo-daf-transfersAnthropic Pre-IPO DAF Transfersanalysis
anthropic-valuationAnthropic Valuation Analysisanalysis
coefficient-givingCoefficient Givingorganization
dario-amodeiDario Amodeiperson
__index__/knowledge-base/peoplePeopleconcept
sam-altmanSam Altmanperson
sam-mccandlishSam McCandlishperson
anthropic-core-viewsAnthropic Core Viewssafety-agenda
Longterm Wiki