epimetheus: fix double research message + Haiku query tuning
- Skip RESEARCH: tag when Haiku pre-pass already searched (no double-fire) - Haiku told to use 2-3 word queries (was generating 6+ word queries that returned 0) - Engagement filter dropped to 0 (niche crypto tweets have low engagement) - systemd ProtectSystem paths fixed (root cause of ALL write failures) Pentagon-Agent: Epimetheus <3D35839A-7722-4740-B93D-51157F7D5E70>
This commit is contained in:
parent
7086bcacb1
commit
ed46c0674b
1 changed files with 10 additions and 6 deletions
|
|
@ -288,7 +288,7 @@ async def handle_research(msg, query: str, user):
|
|||
await msg.chat.send_action("typing")
|
||||
|
||||
logger.info("Research: searching X for '%s'", query)
|
||||
tweets = await search_x(query, max_results=15, min_engagement=3)
|
||||
tweets = await search_x(query, max_results=15, min_engagement=0)
|
||||
logger.info("Research: got %d tweets for '%s'", len(tweets), query)
|
||||
if not tweets:
|
||||
await msg.reply_text(f"No recent tweets found for '{query}'.")
|
||||
|
|
@ -490,10 +490,12 @@ async def handle_tagged(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
|||
haiku_prompt = (
|
||||
f"Does this Telegram message need a live X/Twitter search to answer well? "
|
||||
f"Only say YES if the user is asking about recent sentiment, community takes, "
|
||||
f"what people are saying, or emerging discussions that wouldn't be in a knowledge base.\n\n"
|
||||
f"what people are saying, or emerging discussions.\n\n"
|
||||
f"Message: {text}\n\n"
|
||||
f"If YES, provide a SHORT search query (2-3 words max, like 'P2P.me' or 'MetaDAO buyback'). "
|
||||
f"Twitter search works best with simple queries — too many words returns nothing.\n\n"
|
||||
f"Respond with ONLY one of:\n"
|
||||
f"YES: [search query]\n"
|
||||
f"YES: [2-3 word query]\n"
|
||||
f"NO"
|
||||
)
|
||||
haiku_result = await call_openrouter("anthropic/claude-haiku-4.5", haiku_prompt, max_tokens=50)
|
||||
|
|
@ -616,12 +618,14 @@ IMPORTANT: Two special tags you can append at the end of your response (after yo
|
|||
logger.info("Auto-learned [%s]: %s", category, correction[:80])
|
||||
|
||||
# Auto-research (Ganymede: LLM-driven research trigger)
|
||||
# Skip if Haiku pre-pass already searched (prevents double-fire + duplicate "No tweets found" messages)
|
||||
research_lines = re.findall(r'^RESEARCH:\s+(.+)$', response, re.MULTILINE)
|
||||
if research_lines:
|
||||
display_response = re.sub(r'\nRESEARCH:\s+.+$', '', display_response, flags=re.MULTILINE).rstrip()
|
||||
for query in research_lines:
|
||||
asyncio.get_event_loop().create_task(handle_research(msg, query.strip(), user))
|
||||
logger.info("Auto-research triggered: %s", query[:80])
|
||||
if not research_context: # Only fire if Haiku didn't already search
|
||||
for query in research_lines:
|
||||
asyncio.get_event_loop().create_task(handle_research(msg, query.strip(), user))
|
||||
logger.info("Auto-research triggered: %s", query[:80])
|
||||
|
||||
# Post response (without LEARNING lines)
|
||||
await msg.reply_text(display_response)
|
||||
|
|
|
|||
Loading…
Reference in a new issue