Imports 67 files from VPS (/opt/teleo-eval/) into repo as the single source of truth. Previously only 8 of 67 files existed in repo — the rest were deployed directly to VPS via SCP, causing massive drift. Includes: - pipeline/lib/: 33 Python modules (daemon core, extraction, evaluation, merge, cascade, cross-domain, costs, attribution, etc.) - pipeline/: main daemon (teleo-pipeline.py), reweave.py, batch-extract-50.sh - diagnostics/: 19 files (4-page dashboard, alerting, daily digest, review queue, tier1 metrics) - agent-state/: bootstrap, lib-state, cascade inbox processor, schema - systemd/: service unit files for reference - deploy.sh: rsync-based deploy with --dry-run, syntax checks, dirty-tree gate - research-session.sh: updated with Step 8.5 digest + cascade inbox processing No new code written — all files are exact copies from VPS as of 2026-04-06. From this point forward: edit in repo, commit, then deploy.sh. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
62 lines
1.8 KiB
Python
62 lines
1.8 KiB
Python
"""Route handlers for /api/daily-digest endpoint.
|
|
|
|
Import into app.py and register routes in create_app().
|
|
"""
|
|
|
|
import logging
|
|
|
|
from aiohttp import web
|
|
from daily_digest import fetch_daily_digest
|
|
|
|
logger = logging.getLogger("argus.daily_digest")
|
|
|
|
|
|
async def handle_daily_digest(request):
|
|
"""GET /api/daily-digest — structured data for Telegram daily digest.
|
|
|
|
Query params:
|
|
hours: lookback period in hours (default: 24, max: 168)
|
|
|
|
Returns JSON with:
|
|
claims_merged: merged claims with summaries
|
|
pipeline_stats: PRs merged/opened/rejected, approval rate, rejection reasons
|
|
agent_activity: per-agent contribution breakdown
|
|
pending_review: open PR count
|
|
knowledge_base: total claims, domain breakdown, orphan ratio
|
|
"""
|
|
# Validate hours param
|
|
try:
|
|
hours = int(request.query.get("hours", 24))
|
|
hours = max(1, min(hours, 168)) # clamp to 1h-7d
|
|
except (ValueError, TypeError):
|
|
hours = 24
|
|
|
|
db_path = request.app.get("_db_path")
|
|
if not db_path:
|
|
return web.json_response({"error": "database not configured"}, status=500)
|
|
|
|
token = request.app.get("_forgejo_token")
|
|
|
|
try:
|
|
digest = await fetch_daily_digest(
|
|
db_path=db_path,
|
|
forgejo_token=token,
|
|
hours=hours,
|
|
)
|
|
except Exception as e:
|
|
logger.error("Daily digest fetch failed: %s", e)
|
|
return web.json_response({"error": str(e)}, status=500)
|
|
|
|
return web.json_response(digest)
|
|
|
|
|
|
def register_daily_digest_routes(app, db_path: str, forgejo_token: str | None = None):
|
|
"""Register daily digest routes on the app.
|
|
|
|
db_path: path to pipeline.db
|
|
forgejo_token: optional Forgejo API token
|
|
"""
|
|
app["_db_path"] = db_path
|
|
if forgejo_token:
|
|
app["_forgejo_token"] = forgejo_token
|
|
app.router.add_get("/api/daily-digest", handle_daily_digest)
|