From b41a80ab0e9f50d5bcf301fc3861faf1ed37522e Mon Sep 17 00:00:00 2001 From: Teleo Agents Date: Tue, 24 Mar 2026 04:31:45 +0000 Subject: [PATCH 1/3] extract: 2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review Pentagon-Agent: Epimetheus <3D35839A-7722-4740-B93D-51157F7D5E70> --- ...iagnostic accuracy in randomized trials.md | 6 ++++ ...e-gap-39-benchmarks-systematic-review.json | 32 +++++++++++++++++++ ...ice-gap-39-benchmarks-systematic-review.md | 17 +++++++++- 3 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 inbox/queue/.extraction-debug/2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review.json diff --git a/domains/health/medical LLM benchmark performance does not translate to clinical impact because physicians with and without AI access achieve similar diagnostic accuracy in randomized trials.md b/domains/health/medical LLM benchmark performance does not translate to clinical impact because physicians with and without AI access achieve similar diagnostic accuracy in randomized trials.md index 6c4e105c9..bb919b4c9 100644 --- a/domains/health/medical LLM benchmark performance does not translate to clinical impact because physicians with and without AI access achieve similar diagnostic accuracy in randomized trials.md +++ b/domains/health/medical LLM benchmark performance does not translate to clinical impact because physicians with and without AI access achieve similar diagnostic accuracy in randomized trials.md @@ -35,6 +35,12 @@ OpenEvidence's medRxiv preprint (November 2025) showed 24% accuracy for relevant ARISE report identifies specific failure modes: real-world performance 'breaks down when systems must manage uncertainty, incomplete information, or multi-step workflows.' This provides mechanistic detail for why benchmark performance doesn't translate — benchmarks test pattern recognition on complete data while clinical care requires uncertainty management. +### Additional Evidence (extend) +*Source: [[2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review]] | Added: 2026-03-24* + +JMIR systematic review of 761 studies provides methodological foundation: 95% of clinical LLM evaluation uses medical exam questions rather than real patient data, with only 5% assessing performance on actual patient care. Traditional benchmarks show saturation at 84-90% USMLE accuracy, but conversational frameworks reveal 19.3pp accuracy drop (82% → 62.7%) when moving from case vignettes to multi-turn dialogues. Review concludes: 'substantial disconnects from clinical reality and foundational gaps in construct validity, data integrity, and safety coverage.' This establishes that the Oxford/Nature Medicine RCT deployment gap (94.9% → 34.5%) is part of a systematic field-wide pattern, not an isolated finding. + + Relevant Notes: diff --git a/inbox/queue/.extraction-debug/2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review.json b/inbox/queue/.extraction-debug/2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review.json new file mode 100644 index 000000000..cf3d8577a --- /dev/null +++ b/inbox/queue/.extraction-debug/2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review.json @@ -0,0 +1,32 @@ +{ + "rejected_claims": [ + { + "filename": "clinical-llm-evaluation-uses-medical-exam-questions-not-real-patient-data-creating-systematic-benchmark-validity-gap.md", + "issues": [ + "missing_attribution_extractor" + ] + }, + { + "filename": "conversational-clinical-ai-shows-19-point-accuracy-drop-versus-single-turn-questions-revealing-interaction-complexity-gap.md", + "issues": [ + "missing_attribution_extractor" + ] + } + ], + "validation_stats": { + "total": 2, + "kept": 0, + "fixed": 2, + "rejected": 2, + "fixes_applied": [ + "clinical-llm-evaluation-uses-medical-exam-questions-not-real-patient-data-creating-systematic-benchmark-validity-gap.md:set_created:2026-03-24", + "conversational-clinical-ai-shows-19-point-accuracy-drop-versus-single-turn-questions-revealing-interaction-complexity-gap.md:set_created:2026-03-24" + ], + "rejections": [ + "clinical-llm-evaluation-uses-medical-exam-questions-not-real-patient-data-creating-systematic-benchmark-validity-gap.md:missing_attribution_extractor", + "conversational-clinical-ai-shows-19-point-accuracy-drop-versus-single-turn-questions-revealing-interaction-complexity-gap.md:missing_attribution_extractor" + ] + }, + "model": "anthropic/claude-sonnet-4.5", + "date": "2026-03-24" +} \ No newline at end of file diff --git a/inbox/queue/2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review.md b/inbox/queue/2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review.md index 269145614..377c7e8e1 100644 --- a/inbox/queue/2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review.md +++ b/inbox/queue/2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review.md @@ -7,9 +7,13 @@ date: 2025-11-01 domain: health secondary_domains: [ai-alignment] format: research-paper -status: unprocessed +status: enrichment priority: medium tags: [clinical-ai-safety, benchmark-performance-gap, llm-evaluation, knowledge-practice-gap, real-world-deployment, belief-5, systematic-review] +processed_by: vida +processed_date: 2026-03-24 +enrichments_applied: ["medical LLM benchmark performance does not translate to clinical impact because physicians with and without AI access achieve similar diagnostic accuracy in randomized trials.md"] +extraction_model: "anthropic/claude-sonnet-4.5" --- ## Content @@ -53,3 +57,14 @@ Published in *Journal of Medical Internet Research* (JMIR), 2025, Vol. 2025, e84 PRIMARY CONNECTION: Belief 5 — clinical AI safety evaluation methodology gap WHY ARCHIVED: Provides systematic evidence that the KB's reliance on benchmark performance data (e.g., "OE scores 100% on USMLE") is epistemically weak — and establishes that the Oxford RCT deployment gap finding is part of a systematic pattern EXTRACTION HINT: Extract the 5%/95% finding as a standalone methodological claim about the clinical AI evaluation field; pair with Oxford Nature Medicine RCT as empirical confirmation + + +## Key Facts +- JMIR systematic review analyzed 761 LLM evaluation studies across 39 benchmarks +- Only 5% of 761 studies assessed performance on real patient care data +- 95% of studies relied on medical examination questions (USMLE-style) or case vignettes +- Leading models achieve 84-90% accuracy on USMLE benchmarks +- Diagnostic accuracy drops from 82% on case vignettes to 62.7% on multi-turn dialogues (19.3pp decrease) +- npj Digital Medicine study: six LLMs averaged 57.2% total score, 54.7% safety score, 62.3% effectiveness +- 13.3% performance drop in high-risk scenarios versus average scenarios (npj Digital Medicine) +- LLMs show markedly lower performance on script concordance testing than on multiple-choice benchmarks -- 2.45.2 From 961ad0ee00610496543ad847e54d83b8e177fe9f Mon Sep 17 00:00:00 2001 From: Teleo Agents Date: Tue, 24 Mar 2026 04:33:13 +0000 Subject: [PATCH 2/3] pipeline: archive 1 source(s) post-merge Pentagon-Agent: Epimetheus <3D35839A-7722-4740-B93D-51157F7D5E70> --- ...ice-gap-39-benchmarks-systematic-review.md | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 inbox/archive/health/2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review.md diff --git a/inbox/archive/health/2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review.md b/inbox/archive/health/2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review.md new file mode 100644 index 000000000..93c0f9b54 --- /dev/null +++ b/inbox/archive/health/2025-11-01-jmir-knowledge-practice-gap-39-benchmarks-systematic-review.md @@ -0,0 +1,55 @@ +--- +type: source +title: "JMIR 2025 Systematic Review: Knowledge-Practice Performance Gap in Clinical LLMs — Only 5% of 761 Studies Used Real Patient Data" +author: "JMIR authors (systematic review team)" +url: https://www.jmir.org/2025/1/e84120 +date: 2025-11-01 +domain: health +secondary_domains: [ai-alignment] +format: research-paper +status: processed +priority: medium +tags: [clinical-ai-safety, benchmark-performance-gap, llm-evaluation, knowledge-practice-gap, real-world-deployment, belief-5, systematic-review] +--- + +## Content + +Published in *Journal of Medical Internet Research* (JMIR), 2025, Vol. 2025, e84120. Available in PMC as PMC12706444. Systematic review of 761 LLM evaluation studies across clinical medicine, analyzing 39 benchmarks. + +**Key findings:** +- **Only 5%** of 761 LLM evaluation studies assessed performance on real patient care data +- Remaining 95%: relied on medical examination questions (USMLE-style) or case vignettes +- Traditional knowledge-based benchmarks show saturation: leading models achieve 84-90% accuracy on USMLE +- **Conversational frameworks:** Diagnostic accuracy drops from 82% on traditional case vignettes to 62.7% on multi-turn patient dialogues — **a 19.3 percentage point decrease** +- LLMs demonstrate "markedly lower performance on script concordance testing (evaluating clinical reasoning) than on medical multiple-choice benchmarks" +- Review conclusion: "Recent audits reveal substantial disconnects from clinical reality and foundational gaps in construct validity, data integrity, and safety coverage" + +**Related findings (npj Digital Medicine benchmark study):** +- Six LLMs evaluated: average total score 57.2%, safety score 54.7%, effectiveness 62.3% +- **13.3% performance drop in high-risk scenarios** vs. average scenarios + +## Agent Notes + +**Why this matters:** This is the methodological foundation under both the Oxford/Nature Medicine RCT (94.9% → 34.5% deployment gap) and the broader claim that OE's USMLE 100% benchmark performance doesn't predict clinical outcomes. The systematic review establishes that the benchmark-to-reality gap is systematic across the field, not anomalous. The 5% real-patient-data figure is particularly striking: 95% of clinical AI evaluation is done with questions that would never fool a medical student, not with actual clinical workflows. + +**What surprised me:** The 19.3 percentage point drop from case vignettes to multi-turn dialogues. This is the conversational complexity gap — the same model that answers discrete questions well fails in the back-and-forth of real clinical interaction. OE users query OE in conversational clinical language, making this gap directly relevant. + +**What I expected but didn't find:** Any indication that the field is systematically correcting this — moving toward real-patient-data evaluation. The review documents the problem but doesn't identify a trend toward better evaluation practices. + +**KB connections:** +- Methodological foundation for the Oxford/Nature Medicine RCT deployment gap finding +- Directly explains why OE's USMLE 100% benchmark performance (cited in Session 9) doesn't predict clinical safety +- Connects to NOHARM's finding that real clinical scenario evaluation (31 LLMs, complex vignettes) shows 22% severe error rates — vs. USMLE saturation at 84-90% +- The 13.3% performance drop in high-risk scenarios (npj Digital Medicine) maps to NOHARM's finding that omissions cluster in complex, high-acuity scenarios + +**Extraction hints:** +- Primary claim: "95% of clinical LLM evaluation uses medical examination questions rather than real patient care data — a systematic evaluation methodology gap that makes benchmark performance (84-90% USMLE) uninterpretable as a clinical safety signal" +- Secondary: "Conversational frameworks reveal 19.3pp accuracy drop vs. case vignettes, demonstrating that LLMs fail in the back-and-forth interaction that defines actual clinical use" +- This could merge with the Oxford/Nature Medicine source as a unified "benchmark saturation and real-world deployment gap" claim + +**Context:** JMIR is a leading peer-reviewed journal in digital health and health informatics. Systematic review of 761 studies is a large corpus. The PMC availability confirms peer review. + +## Curator Notes +PRIMARY CONNECTION: Belief 5 — clinical AI safety evaluation methodology gap +WHY ARCHIVED: Provides systematic evidence that the KB's reliance on benchmark performance data (e.g., "OE scores 100% on USMLE") is epistemically weak — and establishes that the Oxford RCT deployment gap finding is part of a systematic pattern +EXTRACTION HINT: Extract the 5%/95% finding as a standalone methodological claim about the clinical AI evaluation field; pair with Oxford Nature Medicine RCT as empirical confirmation -- 2.45.2 From 8f8f8adf005636b208be25b63b70de03fdc43a1b Mon Sep 17 00:00:00 2001 From: Teleo Agents Date: Tue, 24 Mar 2026 04:33:27 +0000 Subject: [PATCH 3/3] extract: 2026-01-23-obbba-medicaid-work-requirements-implementation-2026-states Pentagon-Agent: Epimetheus <3D35839A-7722-4740-B93D-51157F7D5E70> --- ...-requirements-implementation-2026-states.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/inbox/queue/2026-01-23-obbba-medicaid-work-requirements-implementation-2026-states.md b/inbox/queue/2026-01-23-obbba-medicaid-work-requirements-implementation-2026-states.md index 6c6c928b5..1805bce7c 100644 --- a/inbox/queue/2026-01-23-obbba-medicaid-work-requirements-implementation-2026-states.md +++ b/inbox/queue/2026-01-23-obbba-medicaid-work-requirements-implementation-2026-states.md @@ -7,9 +7,13 @@ date: 2026-01-23 domain: health secondary_domains: [] format: news -status: unprocessed +status: null-result priority: medium tags: [obbba, medicaid, work-requirements, vbc, belief-3, structural-misalignment, enrollment-stability, vbc-attractor-state, state-policy] +processed_by: vida +processed_date: 2026-03-24 +extraction_model: "anthropic/claude-sonnet-4.5" +extraction_notes: "LLM returned 0 claims, 0 rejected by validator" --- ## Content @@ -56,3 +60,15 @@ Supporting sources: Georgetown Center for Children and Families (CCF) analysis o PRIMARY CONNECTION: Belief 3 "structural misalignment" + OBBBA enrollment stability mechanism from Session 8 WHY ARCHIVED: Implementation update confirming that the December 2026 OBBBA enrollment disruption is on track — the KB needs to update confidence from "projected" to "in-progress" EXTRACTION HINT: Update the existing OBBBA claim rather than creating a new one; the observation period is Q1 2027 when work requirements take full effect + + +## Key Facts +- As of January 23, 2026, 7 states have pending Section 1115 waivers for Medicaid work requirements: Arizona, Arkansas, Iowa, Montana, Ohio, South Carolina, Utah +- Nebraska is implementing work requirements via state plan amendment without waiver +- Federal mandate requires all states to implement by December 31, 2026, or request extension to 2028 +- Work requirements: ages 19-64 must work or participate in qualifying activities ≥80 hours/month +- Exemptions include parents of children ≤13 and medically frail individuals +- Federal funding: $200M for HHS implementation, $200M for states in FY2026 +- Required state outreach to beneficiaries: June-August 2026 +- CBO projected 5.3M people losing Medicaid coverage +- Mandatory start date for states without extension: January 1, 2027 -- 2.45.2