diff --git a/telegram/bot.py b/telegram/bot.py index 2edd3cb..de6fd8f 100644 --- a/telegram/bot.py +++ b/telegram/bot.py @@ -306,6 +306,56 @@ def _format_conversation_history(chat_id: int, user_id: int) -> str: RESEARCH_PATTERN = re.compile(r'/research(?:@\w+)?\s+(.+)', re.IGNORECASE) +async def _research_and_followup(msg, query: str, user): + """Run X search and send a follow-up message with findings. + + Used when Opus triggers RESEARCH: tag — the user expects results back, + not silent archival. + """ + from x_client import search_tweets as _search + logger.info("Research follow-up: searching X for '%s'", query) + tweets = await _search(query, max_results=10, min_engagement=0) + if not tweets: + await msg.reply_text(f"Searched X for '{query}' — nothing recent found.") + return + + # Build concise summary of findings + lines = [f"Found {len(tweets)} recent posts about '{query}':\n"] + for t in tweets[:5]: + author = t.get("author", "?") + text = t.get("text", "")[:200] + url = t.get("url", "") + lines.append(f"@{author}: {text}") + if url: + lines.append(f" {url}") + lines.append("") + + followup = "\n".join(lines) + # Split if needed + if len(followup) <= 4096: + await msg.reply_text(followup) + else: + chunks = [] + remaining = followup + while remaining: + if len(remaining) <= 4096: + chunks.append(remaining) + break + split_at = remaining.rfind("\n\n", 0, 4000) + if split_at == -1: + split_at = remaining.rfind("\n", 0, 4096) + if split_at == -1: + split_at = 4096 + chunks.append(remaining[:split_at]) + remaining = remaining[split_at:].lstrip("\n") + for chunk in chunks: + if chunk.strip(): + await msg.reply_text(chunk) + + # Also archive for pipeline + await handle_research(msg, query, user, silent=True) + + async def handle_research(msg, query: str, user, silent: bool = False): """Handle a research request — search X and archive results as sources. @@ -716,8 +766,10 @@ IMPORTANT: Two special tags you can append at the end of your response (after yo display_response = re.sub(r'\nRESEARCH:\s+.+$', '', display_response, flags=re.MULTILINE).rstrip() if not research_context: # Only fire if Haiku didn't already search for query in research_lines: - asyncio.get_event_loop().create_task(handle_research(msg, query.strip(), user, silent=True)) - logger.info("Auto-research triggered: %s", query[:80]) + # Send follow-up with findings (not silent — user expects results) + asyncio.get_event_loop().create_task( + _research_and_followup(msg, query.strip(), user)) + logger.info("Auto-research triggered (will follow up): %s", query[:80]) # Post response (without LEARNING lines) # Telegram has a 4096 char limit — split long messages