teleo-codex/ops/pipeline-v2/telegram/market_data.py
m3taversal 7bfce6b706 commit telegram bot module from VPS — 20 files never previously in repo
Pulled from /opt/teleo-eval/telegram/ on VPS. Includes:
- bot.py (92K), kb_retrieval.py, kb_tools.py (agentic retrieval)
- retrieval.py (RRF merge, query decomposition, entity traversal)
- response.py (system prompt builder, response parser)
- agent_config.py, agent_runner.py (multi-agent template unit support)
- approval_stages.py, approvals.py, digest.py (approval workflow)
- eval_checks.py, eval.py (response quality checks)
- output_gate.py, x_publisher.py, x_client.py, x_search.py (X pipeline)
- market_data.py, worktree_lock.py (utilities)
- rio.yaml, theseus.yaml (agent configs)

These files were deployed to VPS but never committed to the repo.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-13 11:02:32 +02:00

112 lines
3.4 KiB
Python

#!/usr/bin/env python3
"""Market data API client for live token prices.
Calls Ben's teleo-ai-api endpoint for ownership coin prices.
Used by the Telegram bot to give Rio real-time market context.
Epimetheus owns this module. Rhea: static API key pattern.
"""
import logging
from pathlib import Path
import aiohttp
logger = logging.getLogger("market-data")
API_URL = "https://teleo-ai-api-257133920458.us-east4.run.app/v0/chat/tool/market-data"
API_KEY_FILE = "/opt/teleo-eval/secrets/market-data-key"
# Cache: avoid hitting the API on every message
_cache: dict[str, dict] = {} # token_name → {data, timestamp}
CACHE_TTL = 300 # 5 minutes
def _load_api_key() -> str | None:
"""Load the market-data API key from secrets."""
try:
return Path(API_KEY_FILE).read_text().strip()
except Exception:
logger.warning("Market data API key not found at %s", API_KEY_FILE)
return None
async def get_token_price(token_name: str) -> dict | None:
"""Fetch live market data for a token.
Returns dict with price, market_cap, volume, etc. or None on failure.
Caches results for CACHE_TTL seconds.
"""
import time
token_upper = token_name.upper().strip("$")
# Check cache
cached = _cache.get(token_upper)
if cached and time.time() - cached["timestamp"] < CACHE_TTL:
return cached["data"]
key = _load_api_key()
if not key:
return None
try:
async with aiohttp.ClientSession() as session:
async with session.post(
API_URL,
headers={
"X-Internal-Key": key,
"Content-Type": "application/json",
},
json={"token": token_upper},
timeout=aiohttp.ClientTimeout(total=10),
) as resp:
if resp.status >= 400:
logger.warning("Market data API %s%d", token_upper, resp.status)
return None
data = await resp.json()
# Cache the result
_cache[token_upper] = {
"data": data,
"timestamp": time.time(),
}
return data
except Exception as e:
logger.warning("Market data API error for %s: %s", token_upper, e)
return None
def format_price_context(data: dict, token_name: str) -> str:
"""Format market data into a concise string for the LLM prompt."""
if not data:
return ""
# API returns a "result" text field with pre-formatted data
result_text = data.get("result", "")
if result_text:
return result_text
# Fallback for structured JSON responses
parts = [f"Live market data for {token_name}:"]
price = data.get("price") or data.get("current_price")
if price:
parts.append(f"Price: ${price}")
mcap = data.get("market_cap") or data.get("marketCap")
if mcap:
if isinstance(mcap, (int, float)) and mcap > 1_000_000:
parts.append(f"Market cap: ${mcap/1_000_000:.1f}M")
else:
parts.append(f"Market cap: {mcap}")
volume = data.get("volume") or data.get("volume_24h")
if volume:
parts.append(f"24h volume: ${volume}")
change = data.get("price_change_24h") or data.get("change_24h")
if change:
parts.append(f"24h change: {change}")
return " | ".join(parts) if len(parts) > 1 else ""