Safety Culture Strength
safety-culture-strength (E264)← Back to pagePath: /ai-transition-model/safety-culture-strength/
Page Metadata
{
"id": "safety-culture-strength",
"numericId": null,
"path": "/ai-transition-model/safety-culture-strength/",
"filePath": "ai-transition-model/safety-culture-strength.mdx",
"title": "Safety Culture Strength",
"quality": null,
"importance": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": null,
"llmSummary": "This page contains only a React component import with no actual content displayed. Cannot assess the substantive content about safety culture strength in AI development.",
"structuredSummary": null,
"description": null,
"ratings": {
"novelty": 0,
"rigor": 0,
"actionability": 0,
"completeness": 0
},
"category": "ai-transition-model",
"subcategory": "factors-misalignment-potential",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 0,
"tableCount": 0,
"diagramCount": 0,
"internalLinks": 0,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0,
"sectionCount": 0,
"hasOverview": false,
"structuralScore": 2
},
"suggestedQuality": 13,
"updateFrequency": null,
"evergreen": true,
"wordCount": 0,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 0,
"backlinkCount": 7,
"redundancy": {
"maxSimilarity": 0,
"similarPages": []
}
}Entity Data
{
"id": "safety-culture-strength",
"type": "ai-transition-model-parameter",
"title": "Safety Culture Strength",
"description": "The degree to which AI organizations genuinely prioritize safety in decisions, resource allocation, and personnel incentives.",
"tags": [
"governance",
"safety",
"organizational"
],
"relatedEntries": [
{
"id": "racing-dynamics",
"type": "risk",
"relationship": "related"
},
{
"id": "safety-research",
"type": "ai-transition-model-metric",
"relationship": "measured-by"
},
{
"id": "lab-behavior",
"type": "ai-transition-model-metric",
"relationship": "measured-by"
},
{
"id": "racing-dynamics-model",
"type": "model",
"relationship": "analyzed-by"
},
{
"id": "lab-incentives-model",
"type": "model",
"relationship": "analyzed-by"
}
],
"sources": [],
"lastUpdated": "2025-12",
"customFields": [
{
"label": "Direction",
"value": "Higher is better"
},
{
"label": "Current Trend",
"value": "Mixed (some labs lead, others decline under competitive pressure)"
},
{
"label": "Key Measurement",
"value": "Safety budget trends, deployment veto authority, incident transparency"
}
]
}Canonical Facts (0)
No facts for this entity
External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/ai-lab-safety"
}Backlinks (7)
| id | title | type | relationship |
|---|---|---|---|
| misalignment-potential | Misalignment Potential | ai-transition-model-factor | composed-of |
| safety-research | Safety Research | ai-transition-model-metric | measures |
| lab-behavior | Lab Behavior | ai-transition-model-metric | measures |
| safety-capability-gap | Safety-Capability Gap | ai-transition-model-parameter | related |
| racing-dynamics-model | Racing Dynamics Game Theory Model | model | affects |
| lab-incentives-model | AI Lab Incentives Model | model | models |
| safety-culture-equilibrium | AI Safety Culture Equilibrium Model | model | models |
Frontmatter
{
"title": "Safety Culture Strength",
"sidebar": {
"order": 18
},
"importance": 0,
"quality": 0,
"llmSummary": "This page contains only a React component import with no actual content displayed. Cannot assess the substantive content about safety culture strength in AI development.",
"ratings": {
"novelty": 0,
"rigor": 0,
"actionability": 0,
"completeness": 0
},
"clusters": [
"ai-safety",
"governance"
],
"subcategory": "factors-misalignment-potential"
}Raw MDX Source
---
title: Safety Culture Strength
sidebar:
order: 18
importance: 0
quality: 0
llmSummary: This page contains only a React component import with no actual content displayed. Cannot assess the substantive content about safety culture strength in AI development.
ratings:
novelty: 0
rigor: 0
actionability: 0
completeness: 0
clusters:
- ai-safety
- governance
subcategory: factors-misalignment-potential
---
import {TransitionModelContent} from '@components/wiki/TransitionModelContent';
<TransitionModelContent entityId="E345" />