diff --git a/telegram/bot.py b/telegram/bot.py index c865884..141e7b7 100644 --- a/telegram/bot.py +++ b/telegram/bot.py @@ -528,7 +528,7 @@ async def handle_tagged(update: Update, context: ContextTypes.DEFAULT_TYPE): research_context = f"\n## Fresh X Research Results for '{search_query}'\n" for t in tweets[:7]: research_context += f"- @{t['author']}: {t['text'][:150]}\n" - record_research_usage(user.id if user else 0) + # Don't burn user's rate limit on autonomous searches (Ganymede) # Archive as source try: slug = re.sub(r"[^a-z0-9]+", "-", search_query[:60].lower()).strip("-") diff --git a/telegram/x_search.py b/telegram/x_search.py index 2aca9fa..d8a29c7 100644 --- a/telegram/x_search.py +++ b/telegram/x_search.py @@ -203,28 +203,50 @@ async def fetch_tweet_by_url(url: str) -> dict | None: } # Fallback: search from:username and match by ID - async with session.get( - API_URL, - params={"query": f"from:{username}", "queryType": "Latest"}, - headers={"X-API-Key": key}, - timeout=aiohttp.ClientTimeout(total=10), - ) as resp: - if resp.status >= 400: - return None - data = await resp.json() - for tweet in data.get("tweets", []): - if str(tweet.get("id")) == tweet_id: - author = tweet.get("author", {}) - return { - "text": tweet.get("text", ""), - "url": url, - "author": author.get("userName", username), - "author_name": author.get("name", ""), - "author_followers": author.get("followers", 0), - "engagement": (tweet.get("likeCount", 0) or 0) + (tweet.get("retweetCount", 0) or 0), - "tweet_date": tweet.get("createdAt", ""), - "is_article": False, - } + # NOTE: fallback only finds recent tweets (~7 days). Older tweets fail. (Ganymede) + # Try with cursor pagination to search deeper + cursor = "" + for page in range(3): # Up to 3 pages + params = {"query": f"from:{username}", "queryType": "Latest"} + if cursor: + params["cursor"] = cursor + async with session.get( + API_URL, + params=params, + headers={"X-API-Key": key}, + timeout=aiohttp.ClientTimeout(total=10), + ) as resp: + if resp.status >= 400: + break + data = await resp.json() + for tweet in data.get("tweets", []): + if str(tweet.get("id")) == tweet_id: + author = tweet.get("author", {}) + return { + "text": tweet.get("text", ""), + "url": url, + "author": author.get("userName", username), + "author_name": author.get("name", ""), + "author_followers": author.get("followers", 0), + "engagement": (tweet.get("likeCount", 0) or 0) + (tweet.get("retweetCount", 0) or 0), + "tweet_date": tweet.get("createdAt", ""), + "is_article": False, + } + cursor = data.get("next_cursor", "") + if not cursor: + break + + # If still not found, return placeholder (Ganymede: surface failure) + return { + "text": f"[Could not fetch tweet content from @{username}]", + "url": url, + "author": username, + "author_name": "", + "author_followers": 0, + "engagement": 0, + "tweet_date": "", + "is_article": False, + } except Exception as e: logger.warning("Tweet fetch error for %s: %s", url, e)