Pulled from /opt/teleo-eval/telegram/ on VPS. Includes: - bot.py (92K), kb_retrieval.py, kb_tools.py (agentic retrieval) - retrieval.py (RRF merge, query decomposition, entity traversal) - response.py (system prompt builder, response parser) - agent_config.py, agent_runner.py (multi-agent template unit support) - approval_stages.py, approvals.py, digest.py (approval workflow) - eval_checks.py, eval.py (response quality checks) - output_gate.py, x_publisher.py, x_client.py, x_search.py (X pipeline) - market_data.py, worktree_lock.py (utilities) - rio.yaml, theseus.yaml (agent configs) These files were deployed to VPS but never committed to the repo. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
85 lines
2.6 KiB
Python
85 lines
2.6 KiB
Python
"""File-based lock for ALL processes writing to the main worktree.
|
|
|
|
One lock, one mechanism (Ganymede: Option C). Used by:
|
|
- Pipeline daemon stages (entity_batch, source archiver, substantive_fixer) via async wrapper
|
|
- Telegram bot (sync context manager)
|
|
|
|
Protects: /opt/teleo-eval/workspaces/main/
|
|
|
|
flock auto-releases on process exit (even crash/kill). No stale lock cleanup needed.
|
|
"""
|
|
|
|
import asyncio
|
|
import fcntl
|
|
import logging
|
|
import time
|
|
from contextlib import asynccontextmanager, contextmanager
|
|
from pathlib import Path
|
|
|
|
logger = logging.getLogger("worktree-lock")
|
|
|
|
LOCKFILE = Path("/opt/teleo-eval/workspaces/.main-worktree.lock")
|
|
|
|
|
|
@contextmanager
|
|
def main_worktree_lock(timeout: float = 10.0):
|
|
"""Sync context manager — use in telegram bot and other external processes.
|
|
|
|
Usage:
|
|
with main_worktree_lock():
|
|
# write to inbox/queue/, git add/commit/push, etc.
|
|
"""
|
|
LOCKFILE.parent.mkdir(parents=True, exist_ok=True)
|
|
fp = open(LOCKFILE, "w")
|
|
start = time.monotonic()
|
|
while True:
|
|
try:
|
|
fcntl.flock(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
break
|
|
except BlockingIOError:
|
|
if time.monotonic() - start > timeout:
|
|
fp.close()
|
|
logger.warning("Main worktree lock timeout after %.0fs", timeout)
|
|
raise TimeoutError(f"Could not acquire main worktree lock in {timeout}s")
|
|
time.sleep(0.1)
|
|
try:
|
|
yield
|
|
finally:
|
|
fcntl.flock(fp, fcntl.LOCK_UN)
|
|
fp.close()
|
|
|
|
|
|
@asynccontextmanager
|
|
async def async_main_worktree_lock(timeout: float = 10.0):
|
|
"""Async context manager — use in pipeline daemon stages.
|
|
|
|
Acquires the same file lock via run_in_executor (Ganymede: <1ms overhead).
|
|
|
|
Usage:
|
|
async with async_main_worktree_lock():
|
|
await _git("fetch", "origin", "main", cwd=main_dir)
|
|
await _git("reset", "--hard", "origin/main", cwd=main_dir)
|
|
# ... write files, commit, push ...
|
|
"""
|
|
loop = asyncio.get_event_loop()
|
|
LOCKFILE.parent.mkdir(parents=True, exist_ok=True)
|
|
fp = open(LOCKFILE, "w")
|
|
|
|
def _acquire():
|
|
start = time.monotonic()
|
|
while True:
|
|
try:
|
|
fcntl.flock(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
return
|
|
except BlockingIOError:
|
|
if time.monotonic() - start > timeout:
|
|
fp.close()
|
|
raise TimeoutError(f"Could not acquire main worktree lock in {timeout}s")
|
|
time.sleep(0.1)
|
|
|
|
await loop.run_in_executor(None, _acquire)
|
|
try:
|
|
yield
|
|
finally:
|
|
fcntl.flock(fp, fcntl.LOCK_UN)
|
|
fp.close()
|