Skip to content
Longterm Wiki

Tool-Use Restrictions

tool-restrictionsapproachPath: /knowledge-base/responses/tool-restrictions/
E487Entity ID (EID)
← Back to page3 backlinksQuality: 91Updated: 2026-01-29
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "tool-restrictions",
  "wikiId": "E487",
  "path": "/knowledge-base/responses/tool-restrictions/",
  "filePath": "knowledge-base/responses/tool-restrictions.mdx",
  "title": "Tool-Use Restrictions",
  "quality": 91,
  "readerImportance": 57.5,
  "researchImportance": 26,
  "tacticalValue": null,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-01-29",
  "dateCreated": "2026-02-15",
  "summary": "Tool-use restrictions provide hard limits on AI agent capabilities through defense-in-depth approaches combining permissions, sandboxing, and human-in-the-loop controls. Empirical evidence shows METR task horizons doubling every 7 months and incident data (EchoLeak CVE-2025-32711, Shai-Hulud campaign) demonstrating real-world exploitation, with security effectiveness ranging 60-95% across threat categories depending on control type.",
  "description": "Tool-use restrictions limit what actions and APIs AI systems can access, directly constraining their potential for harm.",
  "ratings": {
    "novelty": 4.5,
    "rigor": 6.5,
    "completeness": 7,
    "actionability": 7.5
  },
  "category": "responses",
  "subcategory": "alignment-deployment",
  "clusters": [
    "ai-safety",
    "governance",
    "cyber"
  ],
  "metrics": {
    "wordCount": 3894,
    "tableCount": 24,
    "diagramCount": 2,
    "internalLinks": 4,
    "externalLinks": 75,
    "footnoteCount": 0,
    "bulletRatio": 0.1,
    "sectionCount": 42,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 3894,
  "unconvertedLinks": [
    {
      "text": "METR",
      "url": "https://metr.org/blog/2025-03-19-measuring-ai-ability-to-complete-long-tasks/",
      "resourceId": "271fc5f73a8304b2",
      "resourceTitle": "Measuring AI Ability to Complete Long Tasks - METR"
    },
    {
      "text": "METR",
      "url": "https://metr.org/",
      "resourceId": "45370a5153534152",
      "resourceTitle": "METR: Model Evaluation and Threat Research"
    },
    {
      "text": "Agentic AI Security Survey",
      "url": "https://arxiv.org/html/2510.23883v1",
      "resourceId": "307088cd981d31e1",
      "resourceTitle": "Engineered prompts in emails"
    },
    {
      "text": "Palo Alto Unit 42",
      "url": "https://unit42.paloaltonetworks.com/agentic-ai-threats/",
      "resourceId": "d6f4face14780e85",
      "resourceTitle": "EchoLeak exploit (CVE-2025-32711)"
    },
    {
      "text": "Palo Alto Unit 42 research",
      "url": "https://unit42.paloaltonetworks.com/agentic-ai-threats/",
      "resourceId": "d6f4face14780e85",
      "resourceTitle": "EchoLeak exploit (CVE-2025-32711)"
    },
    {
      "text": "AI Lab Watch",
      "url": "https://ailabwatch.org/resources/commitments",
      "resourceId": "91ca6b1425554e9a",
      "resourceTitle": "AI Lab Watch: Commitments Tracker"
    },
    {
      "text": "AI Lab Watch Commitments",
      "url": "https://ailabwatch.org/resources/commitments",
      "resourceId": "91ca6b1425554e9a",
      "resourceTitle": "AI Lab Watch: Commitments Tracker"
    },
    {
      "text": "EA Forum Safety Plan Analysis",
      "url": "https://forum.effectivealtruism.org/posts/fsxQGjhYecDoHshxX/i-read-every-major-ai-lab-s-safety-plan-so-you-don-t-have-to",
      "resourceId": "d564401cd5e38340",
      "resourceTitle": "I read every major AI lab’s safety plan so you don’t have to"
    },
    {
      "text": "April 2025",
      "url": "https://forum.effectivealtruism.org/posts/fsxQGjhYecDoHshxX/i-read-every-major-ai-lab-s-safety-plan-so-you-don-t-have-to",
      "resourceId": "d564401cd5e38340",
      "resourceTitle": "I read every major AI lab’s safety plan so you don’t have to"
    },
    {
      "text": "UK AI Safety Institute",
      "url": "https://www.aisi.gov.uk/blog/advanced-ai-evaluations-may-update",
      "resourceId": "4e56cdf6b04b126b",
      "resourceTitle": "UK AI Safety Institute renamed to AI Security Institute"
    },
    {
      "text": "METR",
      "url": "https://metr.org/",
      "resourceId": "45370a5153534152",
      "resourceTitle": "METR: Model Evaluation and Threat Research"
    },
    {
      "text": "UK AISI May 2025 Update",
      "url": "https://www.aisi.gov.uk/blog/advanced-ai-evaluations-may-update",
      "resourceId": "4e56cdf6b04b126b",
      "resourceTitle": "UK AI Safety Institute renamed to AI Security Institute"
    },
    {
      "text": "Evidently AI Benchmarks",
      "url": "https://www.evidentlyai.com/blog/ai-agent-benchmarks",
      "resourceId": "f8832ce349126f66",
      "resourceTitle": "AI Agent Benchmarks 2025"
    },
    {
      "text": "UK AI Safety Institute",
      "url": "https://www.aisi.gov.uk/",
      "resourceId": "fdf68a8f30f57dee",
      "resourceTitle": "UK AI Safety Institute (AISI)"
    },
    {
      "text": "Advanced AI Evaluations",
      "url": "https://www.aisi.gov.uk/blog/advanced-ai-evaluations-may-update",
      "resourceId": "4e56cdf6b04b126b",
      "resourceTitle": "UK AI Safety Institute renamed to AI Security Institute"
    },
    {
      "text": "METR",
      "url": "https://metr.org/",
      "resourceId": "45370a5153534152",
      "resourceTitle": "METR: Model Evaluation and Threat Research"
    },
    {
      "text": "NIST",
      "url": "https://www.nist.gov/",
      "resourceId": "25fd927348343183",
      "resourceTitle": "US AI Safety Institute"
    },
    {
      "text": "Future of Life Institute",
      "url": "https://futureoflife.org/",
      "resourceId": "786a68a91a7d5712",
      "resourceTitle": "Future of Life Institute"
    },
    {
      "text": "2025 AI Safety Index",
      "url": "https://futureoflife.org/ai-safety-index-summer-2025/",
      "resourceId": "df46edd6fa2078d1",
      "resourceTitle": "FLI AI Safety Index Summer 2025"
    },
    {
      "text": "ailabwatch.org/resources/commitments",
      "url": "https://ailabwatch.org/resources/commitments",
      "resourceId": "91ca6b1425554e9a",
      "resourceTitle": "AI Lab Watch: Commitments Tracker"
    },
    {
      "text": "Agentic AI Security: Threats, Defenses, Evaluation, and Open Challenges",
      "url": "https://arxiv.org/html/2510.23883v1",
      "resourceId": "307088cd981d31e1",
      "resourceTitle": "Engineered prompts in emails"
    },
    {
      "text": "EA Forum: AI Lab Safety Plans Analysis",
      "url": "https://forum.effectivealtruism.org/posts/fsxQGjhYecDoHshxX/i-read-every-major-ai-lab-s-safety-plan-so-you-don-t-have-to",
      "resourceId": "d564401cd5e38340",
      "resourceTitle": "I read every major AI lab’s safety plan so you don’t have to"
    }
  ],
  "unconvertedLinkCount": 22,
  "convertedLinkCount": 0,
  "backlinkCount": 3,
  "hallucinationRisk": {
    "level": "medium",
    "score": 40,
    "factors": [
      "no-citations",
      "conceptual-content",
      "high-quality"
    ]
  },
  "entityType": "approach",
  "redundancy": {
    "maxSimilarity": 19,
    "similarPages": [
      {
        "id": "sandboxing",
        "title": "Sandboxing / Containment",
        "path": "/knowledge-base/responses/sandboxing/",
        "similarity": 19
      },
      {
        "id": "tool-use",
        "title": "Tool Use and Computer Use",
        "path": "/knowledge-base/capabilities/tool-use/",
        "similarity": 18
      },
      {
        "id": "capability-elicitation",
        "title": "Capability Elicitation",
        "path": "/knowledge-base/responses/capability-elicitation/",
        "similarity": 16
      },
      {
        "id": "dangerous-cap-evals",
        "title": "Dangerous Capability Evaluations",
        "path": "/knowledge-base/responses/dangerous-cap-evals/",
        "similarity": 16
      },
      {
        "id": "model-auditing",
        "title": "Third-Party Model Auditing",
        "path": "/knowledge-base/responses/model-auditing/",
        "similarity": 16
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 16,
      "diagrams": 2,
      "internalLinks": 31,
      "externalLinks": 19,
      "footnotes": 12,
      "references": 12
    },
    "actuals": {
      "tables": 24,
      "diagrams": 2,
      "internalLinks": 4,
      "externalLinks": 75,
      "footnotes": 0,
      "references": 11,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "amber",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4.5 R:6.5 A:7.5 C:7"
  },
  "readerRank": 248,
  "researchRank": 443,
  "recommendedScore": 224.27
}
External Links

No external links

Backlinks (3)
idtitletyperelationship
sandboxingSandboxing / Containmentapproach
sandboxingSandboxing / Containmentapproach
alignment-deployment-overviewDeployment & Control (Overview)concept
Longterm Wiki