Skip to content
Longterm Wiki

Palisade Research

palisade-researchorganizationPath: /knowledge-base/organizations/palisade-research/
E428Entity ID (EID)
← Back to page5 backlinksQuality: 65Updated: 2026-02-01
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "palisade-research",
  "wikiId": "E428",
  "path": "/knowledge-base/organizations/palisade-research/",
  "filePath": "knowledge-base/organizations/palisade-research.mdx",
  "title": "Palisade Research",
  "quality": 65,
  "readerImportance": 87.5,
  "researchImportance": 50.5,
  "tacticalValue": 75,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-02-01",
  "dateCreated": "2026-02-15",
  "summary": "Palisade Research is a 2023-founded nonprofit conducting empirical research on AI shutdown resistance and autonomous hacking capabilities, with notable findings that some frontier models resist shutdown commands but current systems cannot execute complex long-term plans. Their work provides concrete demonstrations of AI risks for policymakers but faces methodological criticism regarding prompt design and potential dual-use concerns.",
  "description": "Nonprofit organization investigating offensive AI capabilities and controllability of frontier AI models through empirical research on autonomous hacking, shutdown resistance, and agentic misalignment",
  "ratings": {
    "novelty": 6,
    "rigor": 5,
    "completeness": 8,
    "actionability": 7
  },
  "category": "organizations",
  "subcategory": "safety-orgs",
  "clusters": [
    "ai-safety",
    "community",
    "cyber"
  ],
  "metrics": {
    "wordCount": 2037,
    "tableCount": 2,
    "diagramCount": 0,
    "internalLinks": 31,
    "externalLinks": 3,
    "footnoteCount": 26,
    "bulletRatio": 0.27,
    "sectionCount": 22,
    "hasOverview": true,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 2037,
  "unconvertedLinks": [
    {
      "text": "lesswrong.com",
      "url": "https://www.lesswrong.com/posts/7Jr7matwXHj2Chugw/help-keep-ai-under-human-control-palisade-research-2026",
      "resourceId": "db8efd724b178326",
      "resourceTitle": "Help keep AI under human control: Palisade Research 2026 fundraiser "
    }
  ],
  "unconvertedLinkCount": 1,
  "convertedLinkCount": 0,
  "backlinkCount": 5,
  "citationHealth": {
    "total": 24,
    "withQuotes": 20,
    "verified": 19,
    "accuracyChecked": 19,
    "accurate": 10,
    "inaccurate": 0,
    "avgScore": 0.9122591450810432
  },
  "hallucinationRisk": {
    "level": "medium",
    "score": 45,
    "factors": [
      "biographical-claims",
      "well-cited"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 15,
    "similarPages": [
      {
        "id": "ai-futures-project",
        "title": "AI Futures Project",
        "path": "/knowledge-base/organizations/ai-futures-project/",
        "similarity": 15
      },
      {
        "id": "frontier-model-forum",
        "title": "Frontier Model Forum",
        "path": "/knowledge-base/organizations/frontier-model-forum/",
        "similarity": 15
      },
      {
        "id": "nist-ai",
        "title": "NIST and AI Safety",
        "path": "/knowledge-base/organizations/nist-ai/",
        "similarity": 15
      },
      {
        "id": "apollo-research",
        "title": "Apollo Research",
        "path": "/knowledge-base/organizations/apollo-research/",
        "similarity": 14
      },
      {
        "id": "cais",
        "title": "Center for AI Safety (CAIS)",
        "path": "/knowledge-base/organizations/cais/",
        "similarity": 14
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-18",
      "branch": "claude/fix-issue-240-N5irU",
      "title": "Surface tacticalValue in /wiki table and score 53 pages",
      "summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
      "model": "sonnet-4",
      "duration": "~30min"
    }
  ],
  "coverage": {
    "passing": 9,
    "total": 13,
    "targets": {
      "tables": 8,
      "diagrams": 1,
      "internalLinks": 16,
      "externalLinks": 10,
      "footnotes": 6,
      "references": 6
    },
    "actuals": {
      "tables": 2,
      "diagrams": 0,
      "internalLinks": 31,
      "externalLinks": 3,
      "footnotes": 26,
      "references": 1,
      "quotesWithQuotes": 20,
      "quotesTotal": 24,
      "accuracyChecked": 19,
      "accuracyTotal": 24
    },
    "items": {
      "summary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "amber",
      "footnotes": "green",
      "references": "amber",
      "quotes": "green",
      "accuracy": "green"
    },
    "editHistoryCount": 1,
    "ratingsString": "N:6 R:5 A:7 C:8"
  },
  "readerRank": 34,
  "researchRank": 277,
  "recommendedScore": 187.37
}
External Links
{
  "grokipedia": "https://grokipedia.com/page/Palisade_Research"
}
Backlinks (5)
idtitletyperelationship
why-alignment-hardWhy Alignment Might Be Hardargument
lionheart-venturesLionheart Venturesorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
jaan-tallinnJaan Tallinnperson
power-seekingPower-Seeking AIrisk
Longterm Wiki