source: 2026-01-01-aisi-sketch-ai-control-safety-case.md → null-result
Pentagon-Agent: Epimetheus <PIPELINE>
This commit is contained in:
parent
af8e374aaf
commit
4ab4c24b0d
1 changed files with 2 additions and 1 deletions
|
|
@ -7,10 +7,11 @@ date: 2026-01-01
|
|||
domain: ai-alignment
|
||||
secondary_domains: [grand-strategy]
|
||||
format: paper
|
||||
status: unprocessed
|
||||
status: null-result
|
||||
priority: medium
|
||||
tags: [AISI, control-safety-case, safety-argument, loss-of-control, governance-framework, institutional]
|
||||
flagged_for_leo: ["this is the governance architecture side — AISI is building not just evaluation tools but a structured argument framework for claiming AI is safe to deploy; the gap between this framework and the sandbagging/detection-failure findings in other AISI papers is itself a governance signal"]
|
||||
extraction_model: "anthropic/claude-sonnet-4.5"
|
||||
---
|
||||
|
||||
## Content
|
||||
Loading…
Reference in a new issue