Compare commits

..

6 commits

Author SHA1 Message Date
6361c7e9e8 Merge branch 'epimetheus/eval-cost-tracking' 2026-04-14 12:25:46 +01:00
5f287ae9c8 epimetheus: fix connect.py title→slug mismatch in vector-search edges
claim_title payloads wrote unresolvable human-readable titles into
frontmatter related fields. Switched to claim_path with slug extraction
so reciprocal edges in merge.py can resolve targets. Renamed
neighbor_titles→neighbor_slugs throughout for consistency.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-14 12:25:41 +01:00
5b9ce01412 epimetheus: wire LLM connections into typed frontmatter edges
Extract.py was discarding LLM-provided connections — related_claims went into
frontmatter as wiki-links but supports/challenges/depends_on from the
connections field were ignored entirely. This is the primary driver of 50%+
orphan ratio.

Now: connections[] → typed edge fields (supports/challenges/related) in YAML
frontmatter. related_claims fall back to related edges. Post-write
connect_new_claims() adds vector-search edges for claims the LLM missed.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-14 12:01:21 +01:00
154f36f2d3 epimetheus: fix eval crash + wire per-PR cost tracking
Three bugs fixed:
1. triage_pr() returns 3 values but line 611 unpacked 2 → ValueError on every
   non-deterministic PR (circuit breaker opened, 5 PRs stuck)
2. costs import was inside triage else-block → NameError on deterministic routes
3. pr_cost never written to prs.cost_usd → 0% cost tracking across 1,118 PRs

Cost tracking now covers all 4 exit paths: domain failed, domain rejected,
Leo failed, and normal completion. Uses additive UPDATE (cost_usd + ?) so
re-evals accumulate correctly.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-14 12:01:13 +01:00
d8a64d479f epimetheus: remove dead disagreement_types UI card
Ganymede review finding — the review-summary API no longer returns
disagreement_types, so the card always showed "No disagreements."
Removed the JS loop and HTML table.

Pentagon-Agent: Epimetheus <0144398e-4ed3-4fe2-95a3-3d72e1abf887>
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-14 11:57:01 +01:00
143adb09e9 epimetheus: merge root/diagnostics fixes into canonical ops/diagnostics
dashboard_routes.py — root copy is superset:
  - Extraction yield query: source_url→path, s.url→s.path (truth audit)
  - insufficient_data flag on cascade-coverage endpoint
  - Rejection reasons fallback to prs.eval_issues when review_records empty
  - rejection_source field replaces disagreement_types in review-summary
  - New /api/agent-scorecard endpoint (Argus truth audit)
  - Route registration for agent-scorecard

alerting.py — merged from both copies:
  - FROM ROOT: "unknown" agent filter in check_agent_health (bug #3)
  - FROM ROOT: prs.eval_issues queries in check_rejection_spike,
    check_stuck_loops, check_domain_rejection_patterns,
    generate_failure_report (truth audit correction Apr 2)
  - FROM CANONICAL: _ALLOWED_DIM_EXPRS SQL whitelist + validation
    in _check_approval_by_dimension (Ganymede security fix)

Files verified canonical=newer (no changes needed):
  IDENTICAL: dashboard_prs.py, shared_ui.py, dashboard_ops.py,
    dashboard_health.py, research_tracking.py, response_audit_routes.py
  CANONICAL WINS: dashboard_epistemic.py, tier1_metrics.py,
    dashboard_agents.py, alerting_routes.py, tier1_routes.py

NOTE: dashboard_routes.py review-summary API no longer returns
disagreement_types, but canonical dashboard_epistemic.py still renders
it — UI will show empty data. Flag for Ganymede review.

Root /diagnostics/ copies are now safe to delete for these 2 files.
Remaining root files already match or are older than canonical.

Pentagon-Agent: Epimetheus <0144398E-4ED3-4FE2-95A3-3D72E1ABF887>
2026-04-14 11:37:12 +01:00
3 changed files with 90 additions and 29 deletions

View file

@ -63,7 +63,7 @@ def _build_search_text(content: str) -> str:
return " ".join(parts) return " ".join(parts)
def _add_related_edges(claim_path: str, neighbor_titles: list[str]) -> bool: def _add_related_edges(claim_path: str, neighbor_slugs: list[str]) -> bool:
"""Add related edges to a claim's frontmatter. Returns True if modified.""" """Add related edges to a claim's frontmatter. Returns True if modified."""
try: try:
with open(claim_path) as f: with open(claim_path) as f:
@ -87,10 +87,10 @@ def _add_related_edges(claim_path: str, neighbor_titles: list[str]) -> bool:
# Add new edges # Add new edges
added = [] added = []
for title in neighbor_titles: for slug in neighbor_slugs:
if title.strip().lower() not in existing_lower: if slug.strip().lower() not in existing_lower:
added.append(title) added.append(slug)
existing_lower.add(title.strip().lower()) existing_lower.add(slug.strip().lower())
if not added: if not added:
return False return False
@ -167,27 +167,28 @@ def connect_new_claims(
stats["skipped_no_neighbors"] += 1 stats["skipped_no_neighbors"] += 1
continue continue
# Extract neighbor titles # Extract neighbor slugs (filename stems, not titles — reciprocal edges need resolvable names)
neighbor_titles = [] neighbor_slugs = []
for hit in hits: for hit in hits:
payload = hit.get("payload", {}) payload = hit.get("payload", {})
title = payload.get("claim_title", "") claim_path_qdrant = payload.get("claim_path", "")
if title: if claim_path_qdrant:
neighbor_titles.append(title) slug = claim_path_qdrant.rsplit("/", 1)[-1].replace(".md", "")
neighbor_slugs.append(slug)
if not neighbor_titles: if not neighbor_slugs:
stats["skipped_no_neighbors"] += 1 stats["skipped_no_neighbors"] += 1
continue continue
# Add edges to the new claim's frontmatter # Add edges to the new claim's frontmatter
if _add_related_edges(claim_path, neighbor_titles): if _add_related_edges(claim_path, neighbor_slugs):
stats["connected"] += 1 stats["connected"] += 1
stats["edges_added"] += len(neighbor_titles) stats["edges_added"] += len(neighbor_slugs)
stats["connections"].append({ stats["connections"].append({
"claim": os.path.basename(claim_path), "claim": os.path.basename(claim_path),
"neighbors": neighbor_titles, "neighbors": neighbor_slugs,
}) })
logger.info("Connected %s%d neighbors", os.path.basename(claim_path), len(neighbor_titles)) logger.info("Connected %s%d neighbors", os.path.basename(claim_path), len(neighbor_slugs))
else: else:
stats["skipped_no_neighbors"] += 1 stats["skipped_no_neighbors"] += 1

View file

@ -493,6 +493,9 @@ async def _dispose_rejected_pr(conn, pr_number: int, eval_attempts: int, all_iss
async def evaluate_pr(conn, pr_number: int, tier: str = None) -> dict: async def evaluate_pr(conn, pr_number: int, tier: str = None) -> dict:
"""Evaluate a single PR. Returns result dict.""" """Evaluate a single PR. Returns result dict."""
from . import costs
pr_cost = 0.0
# Check eval attempt budget before claiming # Check eval attempt budget before claiming
row = conn.execute("SELECT eval_attempts FROM prs WHERE number = ?", (pr_number,)).fetchone() row = conn.execute("SELECT eval_attempts FROM prs WHERE number = ?", (pr_number,)).fetchone()
eval_attempts = (row["eval_attempts"] or 0) if row else 0 eval_attempts = (row["eval_attempts"] or 0) if row else 0
@ -608,10 +611,8 @@ async def evaluate_pr(conn, pr_number: int, tier: str = None) -> dict:
json.dumps({"pr": pr_number, "tier": tier}), json.dumps({"pr": pr_number, "tier": tier}),
) )
else: else:
tier, triage_usage = await triage_pr(diff) tier, triage_usage, _triage_reason = await triage_pr(diff)
# Record triage cost pr_cost += costs.record_usage(
from . import costs
costs.record_usage(
conn, config.TRIAGE_MODEL, "eval_triage", conn, config.TRIAGE_MODEL, "eval_triage",
input_tokens=triage_usage.get("prompt_tokens", 0), input_tokens=triage_usage.get("prompt_tokens", 0),
output_tokens=triage_usage.get("completion_tokens", 0), output_tokens=triage_usage.get("completion_tokens", 0),
@ -674,6 +675,8 @@ async def evaluate_pr(conn, pr_number: int, tier: str = None) -> dict:
# OpenRouter failure (timeout, error) — revert to open for retry. # OpenRouter failure (timeout, error) — revert to open for retry.
# NOT a rate limit — don't trigger 15-min backoff, just skip this PR. # NOT a rate limit — don't trigger 15-min backoff, just skip this PR.
conn.execute("UPDATE prs SET status = 'open' WHERE number = ?", (pr_number,)) conn.execute("UPDATE prs SET status = 'open' WHERE number = ?", (pr_number,))
if pr_cost > 0:
conn.execute("UPDATE prs SET cost_usd = cost_usd + ? WHERE number = ?", (pr_cost, pr_number))
return {"pr": pr_number, "skipped": True, "reason": "openrouter_failed"} return {"pr": pr_number, "skipped": True, "reason": "openrouter_failed"}
domain_verdict = _parse_verdict(domain_review, agent) domain_verdict = _parse_verdict(domain_review, agent)
@ -714,6 +717,15 @@ async def evaluate_pr(conn, pr_number: int, tier: str = None) -> dict:
# Disposition: check if this PR should be terminated or kept open # Disposition: check if this PR should be terminated or kept open
await _dispose_rejected_pr(conn, pr_number, eval_attempts, domain_issues) await _dispose_rejected_pr(conn, pr_number, eval_attempts, domain_issues)
if domain_verdict != "skipped":
pr_cost += costs.record_usage(
conn, config.EVAL_DOMAIN_MODEL, "eval_domain",
input_tokens=domain_usage.get("prompt_tokens", 0),
output_tokens=domain_usage.get("completion_tokens", 0),
backend="openrouter",
)
if pr_cost > 0:
conn.execute("UPDATE prs SET cost_usd = cost_usd + ? WHERE number = ?", (pr_cost, pr_number))
return { return {
"pr": pr_number, "pr": pr_number,
"domain_verdict": domain_verdict, "domain_verdict": domain_verdict,
@ -731,6 +743,15 @@ async def evaluate_pr(conn, pr_number: int, tier: str = None) -> dict:
if leo_review is None: if leo_review is None:
# DEEP: Opus rate limited (queue for later). STANDARD: OpenRouter failed (skip, retry next cycle). # DEEP: Opus rate limited (queue for later). STANDARD: OpenRouter failed (skip, retry next cycle).
conn.execute("UPDATE prs SET status = 'open' WHERE number = ?", (pr_number,)) conn.execute("UPDATE prs SET status = 'open' WHERE number = ?", (pr_number,))
if domain_verdict != "skipped":
pr_cost += costs.record_usage(
conn, config.EVAL_DOMAIN_MODEL, "eval_domain",
input_tokens=domain_usage.get("prompt_tokens", 0),
output_tokens=domain_usage.get("completion_tokens", 0),
backend="openrouter",
)
if pr_cost > 0:
conn.execute("UPDATE prs SET cost_usd = cost_usd + ? WHERE number = ?", (pr_cost, pr_number))
reason = "opus_rate_limited" if tier == "DEEP" else "openrouter_failed" reason = "opus_rate_limited" if tier == "DEEP" else "openrouter_failed"
return {"pr": pr_number, "skipped": True, "reason": reason} return {"pr": pr_number, "skipped": True, "reason": reason}
@ -834,10 +855,8 @@ async def evaluate_pr(conn, pr_number: int, tier: str = None) -> dict:
await _dispose_rejected_pr(conn, pr_number, eval_attempts, all_issues) await _dispose_rejected_pr(conn, pr_number, eval_attempts, all_issues)
# Record cost (only for reviews that actually ran) # Record cost (only for reviews that actually ran)
from . import costs
if domain_verdict != "skipped": if domain_verdict != "skipped":
costs.record_usage( pr_cost += costs.record_usage(
conn, config.EVAL_DOMAIN_MODEL, "eval_domain", conn, config.EVAL_DOMAIN_MODEL, "eval_domain",
input_tokens=domain_usage.get("prompt_tokens", 0), input_tokens=domain_usage.get("prompt_tokens", 0),
output_tokens=domain_usage.get("completion_tokens", 0), output_tokens=domain_usage.get("completion_tokens", 0),
@ -845,15 +864,23 @@ async def evaluate_pr(conn, pr_number: int, tier: str = None) -> dict:
) )
if leo_verdict not in ("skipped",): if leo_verdict not in ("skipped",):
if tier == "DEEP": if tier == "DEEP":
costs.record_usage(conn, config.EVAL_LEO_MODEL, "eval_leo", backend="max") pr_cost += costs.record_usage(
conn, config.EVAL_LEO_MODEL, "eval_leo",
input_tokens=leo_usage.get("prompt_tokens", 0),
output_tokens=leo_usage.get("completion_tokens", 0),
backend="max",
)
else: else:
costs.record_usage( pr_cost += costs.record_usage(
conn, config.EVAL_LEO_STANDARD_MODEL, "eval_leo", conn, config.EVAL_LEO_STANDARD_MODEL, "eval_leo",
input_tokens=leo_usage.get("prompt_tokens", 0), input_tokens=leo_usage.get("prompt_tokens", 0),
output_tokens=leo_usage.get("completion_tokens", 0), output_tokens=leo_usage.get("completion_tokens", 0),
backend="openrouter", backend="openrouter",
) )
if pr_cost > 0:
conn.execute("UPDATE prs SET cost_usd = cost_usd + ? WHERE number = ?", (pr_cost, pr_number))
return { return {
"pr": pr_number, "pr": pr_number,
"tier": tier, "tier": tier,

View file

@ -37,6 +37,7 @@ from .domains import agent_for_domain
from .extraction_prompt import build_extraction_prompt from .extraction_prompt import build_extraction_prompt
from .forgejo import api as forgejo_api from .forgejo import api as forgejo_api
from .llm import openrouter_call from .llm import openrouter_call
from .connect import connect_new_claims
from .post_extract import load_existing_claims_from_repo, validate_and_fix_claims from .post_extract import load_existing_claims_from_repo, validate_and_fix_claims
from .worktree_lock import async_main_worktree_lock from .worktree_lock import async_main_worktree_lock
@ -225,7 +226,29 @@ def _build_claim_content(claim: dict, agent: str) -> str:
body = claim.get("body", "") body = claim.get("body", "")
scope = claim.get("scope", "") scope = claim.get("scope", "")
sourcer = claim.get("sourcer", "") sourcer = claim.get("sourcer", "")
related = claim.get("related_claims", []) related_claims = claim.get("related_claims", [])
connections = claim.get("connections", [])
edge_fields = {"supports": [], "challenges": [], "related": []}
for conn in connections:
target = conn.get("target", "")
rel = conn.get("relationship", "related")
if target and rel in edge_fields:
target = target.replace(".md", "")
if target not in edge_fields[rel]:
edge_fields[rel].append(target)
for r in related_claims[:5]:
r_clean = r.replace(".md", "")
if r_clean not in edge_fields["related"]:
edge_fields["related"].append(r_clean)
edge_lines = []
for edge_type in ("supports", "challenges", "related"):
targets = edge_fields[edge_type]
if targets:
edge_lines.append(f"{edge_type}:")
for t in targets:
edge_lines.append(f" - {t}")
lines = [ lines = [
"---", "---",
@ -242,10 +265,7 @@ def _build_claim_content(claim: dict, agent: str) -> str:
lines.append(f"scope: {scope}") lines.append(f"scope: {scope}")
if sourcer: if sourcer:
lines.append(f'sourcer: "{sourcer}"') lines.append(f'sourcer: "{sourcer}"')
if related: lines.extend(edge_lines)
lines.append("related_claims:")
for r in related:
lines.append(f' - "[[{r}]]"')
lines.append("---") lines.append("---")
lines.append("") lines.append("")
lines.append(f"# {title}") lines.append(f"# {title}")
@ -456,6 +476,19 @@ async def _extract_one_source(
await _archive_source(source_path, domain, "null-result") await _archive_source(source_path, domain, "null-result")
return 0, 0 return 0, 0
# Post-write: connect new claims to existing KB via vector search (non-fatal)
claim_paths = [str(worktree / f) for f in files_written if f.startswith("domains/")]
if claim_paths:
try:
connect_stats = connect_new_claims(claim_paths)
if connect_stats["connected"] > 0:
logger.info(
"Extract-connect: %d/%d claims → %d edges",
connect_stats["connected"], len(claim_paths), connect_stats["edges_added"],
)
except Exception:
logger.warning("Extract-connect failed (non-fatal)", exc_info=True)
# Stage and commit # Stage and commit
for f in files_written: for f in files_written:
await _git("add", f, cwd=str(EXTRACT_WORKTREE)) await _git("add", f, cwd=str(EXTRACT_WORKTREE))