@heylemon/lemonade 0.2.2 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,565 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ šŸ”„ HOT SCANNER v2 - Find viral stocks & crypto trends
4
+ Now with Twitter/X, Reddit, and improved Yahoo Finance
5
+ """
6
+
7
+ import json
8
+ import urllib.request
9
+ import urllib.error
10
+ import xml.etree.ElementTree as ET
11
+ import gzip
12
+ import io
13
+ import subprocess
14
+ import os
15
+ from datetime import datetime, timezone
16
+ from pathlib import Path
17
+ import re
18
+ import ssl
19
+ from collections import defaultdict
20
+ from concurrent.futures import ThreadPoolExecutor, as_completed
21
+
22
+ # Load .env file if exists
23
+ ENV_FILE = Path(__file__).parent.parent / ".env"
24
+ if ENV_FILE.exists():
25
+ with open(ENV_FILE) as f:
26
+ for line in f:
27
+ line = line.strip()
28
+ if line and not line.startswith("#") and "=" in line:
29
+ key, value = line.split("=", 1)
30
+ os.environ[key] = value
31
+
32
+ # Cache directory
33
+ CACHE_DIR = Path(__file__).parent.parent / "cache"
34
+ CACHE_DIR.mkdir(exist_ok=True)
35
+
36
+ # SSL context
37
+ SSL_CONTEXT = ssl.create_default_context()
38
+
39
+
40
+ class HotScanner:
41
+ def __init__(self, include_social=True):
42
+ self.include_social = include_social
43
+ self.results = {
44
+ "timestamp": datetime.now(timezone.utc).isoformat(),
45
+ "crypto": [],
46
+ "stocks": [],
47
+ "news": [],
48
+ "movers": [],
49
+ "social": []
50
+ }
51
+ self.mentions = defaultdict(lambda: {"count": 0, "sources": [], "sentiment_hints": []})
52
+ self.headers = {
53
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
54
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
55
+ "Accept-Language": "en-US,en;q=0.5",
56
+ "Accept-Encoding": "gzip, deflate",
57
+ }
58
+
59
+ def _fetch(self, url, timeout=15):
60
+ """Fetch URL with gzip support."""
61
+ req = urllib.request.Request(url, headers=self.headers)
62
+ with urllib.request.urlopen(req, timeout=timeout, context=SSL_CONTEXT) as resp:
63
+ data = resp.read()
64
+ # Handle gzip
65
+ if resp.info().get('Content-Encoding') == 'gzip' or data[:2] == b'\x1f\x8b':
66
+ data = gzip.decompress(data)
67
+ return data.decode('utf-8', errors='replace')
68
+
69
+ def _fetch_json(self, url, timeout=15):
70
+ """Fetch and parse JSON."""
71
+ return json.loads(self._fetch(url, timeout))
72
+
73
+ def scan_all(self):
74
+ """Run all scans in parallel."""
75
+ print("šŸ” Scanning for hot trends...\n")
76
+
77
+ tasks = [
78
+ ("CoinGecko Trending", self.scan_coingecko_trending),
79
+ ("CoinGecko Movers", self.scan_coingecko_gainers_losers),
80
+ ("Google News Finance", self.scan_google_news_finance),
81
+ ("Google News Crypto", self.scan_google_news_crypto),
82
+ ("Yahoo Movers", self.scan_yahoo_movers),
83
+ ]
84
+
85
+ if self.include_social:
86
+ tasks.extend([
87
+ ("Reddit WSB", self.scan_reddit_wsb),
88
+ ("Reddit Crypto", self.scan_reddit_crypto),
89
+ ("Twitter/X", self.scan_twitter),
90
+ ])
91
+
92
+ with ThreadPoolExecutor(max_workers=8) as executor:
93
+ futures = {executor.submit(task[1]): task[0] for task in tasks}
94
+ for future in as_completed(futures):
95
+ name = futures[future]
96
+ try:
97
+ future.result()
98
+ except Exception as e:
99
+ print(f" āŒ {name}: {str(e)[:50]}")
100
+
101
+ return self.results
102
+
103
+ def scan_coingecko_trending(self):
104
+ """Get trending crypto from CoinGecko."""
105
+ print(" šŸ“Š CoinGecko Trending...")
106
+ try:
107
+ url = "https://api.coingecko.com/api/v3/search/trending"
108
+ data = self._fetch_json(url)
109
+
110
+ for item in data.get("coins", [])[:10]:
111
+ coin = item.get("item", {})
112
+ price_data = coin.get("data", {})
113
+ price_change = price_data.get("price_change_percentage_24h", {}).get("usd", 0)
114
+
115
+ entry = {
116
+ "symbol": coin.get("symbol", "").upper(),
117
+ "name": coin.get("name", ""),
118
+ "rank": coin.get("market_cap_rank"),
119
+ "price_change_24h": round(price_change, 2) if price_change else None,
120
+ "source": "coingecko_trending"
121
+ }
122
+ self.results["crypto"].append(entry)
123
+
124
+ sym = entry["symbol"]
125
+ self.mentions[sym]["count"] += 2 # Trending gets extra weight
126
+ self.mentions[sym]["sources"].append("CoinGecko Trending")
127
+ if price_change:
128
+ direction = "šŸš€ bullish" if price_change > 0 else "šŸ“‰ bearish"
129
+ self.mentions[sym]["sentiment_hints"].append(f"{direction} ({price_change:+.1f}%)")
130
+
131
+ print(f" āœ… {len(data.get('coins', []))} trending coins")
132
+ except Exception as e:
133
+ print(f" āŒ CoinGecko trending: {e}")
134
+
135
+ def scan_coingecko_gainers_losers(self):
136
+ """Get top gainers/losers."""
137
+ print(" šŸ“ˆ CoinGecko Movers...")
138
+ try:
139
+ url = "https://api.coingecko.com/api/v3/coins/markets?vs_currency=usd&order=market_cap_desc&per_page=100&page=1&price_change_percentage=24h"
140
+ data = self._fetch_json(url)
141
+
142
+ sorted_data = sorted(data, key=lambda x: abs(x.get("price_change_percentage_24h") or 0), reverse=True)
143
+
144
+ count = 0
145
+ for coin in sorted_data[:20]:
146
+ change = coin.get("price_change_percentage_24h", 0)
147
+ if abs(change or 0) > 3:
148
+ entry = {
149
+ "symbol": coin.get("symbol", "").upper(),
150
+ "name": coin.get("name", ""),
151
+ "price": coin.get("current_price"),
152
+ "change_24h": round(change, 2) if change else None,
153
+ "volume": coin.get("total_volume"),
154
+ "source": "coingecko_movers"
155
+ }
156
+ self.results["movers"].append(entry)
157
+ count += 1
158
+
159
+ sym = entry["symbol"]
160
+ self.mentions[sym]["count"] += 1
161
+ self.mentions[sym]["sources"].append("CoinGecko Movers")
162
+ direction = "šŸš€ pumping" if change > 0 else "šŸ“‰ dumping"
163
+ self.mentions[sym]["sentiment_hints"].append(f"{direction} ({change:+.1f}%)")
164
+
165
+ print(f" āœ… {count} significant movers")
166
+ except Exception as e:
167
+ print(f" āŒ CoinGecko movers: {e}")
168
+
169
+ def scan_google_news_finance(self):
170
+ """Get finance news from Google News RSS."""
171
+ print(" šŸ“° Google News Finance...")
172
+ try:
173
+ url = "https://news.google.com/rss/topics/CAAqJggKIiBDQkFTRWdvSUwyMHZNRGx6TVdZU0FtVnVHZ0pWVXlnQVAB?hl=en-US&gl=US&ceid=US:en"
174
+ text = self._fetch(url)
175
+ root = ET.fromstring(text)
176
+ items = root.findall(".//item")
177
+
178
+ for item in items[:15]:
179
+ title_elem = item.find("title")
180
+ title = title_elem.text if title_elem is not None else ""
181
+ tickers = self._extract_tickers(title)
182
+
183
+ news_entry = {
184
+ "title": title,
185
+ "tickers_mentioned": tickers,
186
+ "source": "google_news_finance"
187
+ }
188
+ self.results["news"].append(news_entry)
189
+
190
+ for ticker in tickers:
191
+ self.mentions[ticker]["count"] += 1
192
+ self.mentions[ticker]["sources"].append("Google News")
193
+ self.mentions[ticker]["sentiment_hints"].append(f"šŸ“° {title[:40]}...")
194
+
195
+ print(f" āœ… {len(items)} news items")
196
+ except Exception as e:
197
+ print(f" āŒ Google News Finance: {e}")
198
+
199
+ def scan_google_news_crypto(self):
200
+ """Search for crypto news."""
201
+ print(" šŸ“° Google News Crypto...")
202
+ try:
203
+ url = "https://news.google.com/rss/search?q=bitcoin+OR+ethereum+OR+crypto+crash+OR+crypto+pump&hl=en-US&gl=US&ceid=US:en"
204
+ text = self._fetch(url)
205
+ root = ET.fromstring(text)
206
+ items = root.findall(".//item")
207
+
208
+ crypto_keywords = {
209
+ "bitcoin": "BTC", "btc": "BTC", "ethereum": "ETH", "eth": "ETH",
210
+ "solana": "SOL", "xrp": "XRP", "ripple": "XRP", "dogecoin": "DOGE",
211
+ "cardano": "ADA", "polkadot": "DOT", "avalanche": "AVAX",
212
+ }
213
+
214
+ for item in items[:12]:
215
+ title_elem = item.find("title")
216
+ title = title_elem.text if title_elem is not None else ""
217
+ tickers = self._extract_tickers(title)
218
+
219
+ for word, ticker in crypto_keywords.items():
220
+ if word in title.lower():
221
+ tickers.append(ticker)
222
+ tickers = list(set(tickers))
223
+
224
+ if tickers:
225
+ news_entry = {
226
+ "title": title,
227
+ "tickers_mentioned": tickers,
228
+ "source": "google_news_crypto"
229
+ }
230
+ self.results["news"].append(news_entry)
231
+
232
+ for ticker in tickers:
233
+ self.mentions[ticker]["count"] += 1
234
+ self.mentions[ticker]["sources"].append("Google News Crypto")
235
+
236
+ print(f" āœ… Processed crypto news")
237
+ except Exception as e:
238
+ print(f" āŒ Google News Crypto: {e}")
239
+
240
+ def scan_yahoo_movers(self):
241
+ """Scrape Yahoo Finance movers with gzip support."""
242
+ print(" šŸ“ˆ Yahoo Finance Movers...")
243
+ categories = [
244
+ ("gainers", "https://finance.yahoo.com/gainers/"),
245
+ ("losers", "https://finance.yahoo.com/losers/"),
246
+ ("most_active", "https://finance.yahoo.com/most-active/")
247
+ ]
248
+
249
+ for category, url in categories:
250
+ try:
251
+ text = self._fetch(url, timeout=12)
252
+
253
+ tickers = []
254
+ tickers.extend(re.findall(r'data-symbol="([A-Z]{1,5})"', text))
255
+ tickers.extend(re.findall(r'/quote/([A-Z]{1,5})[/"\?]', text))
256
+ tickers.extend(re.findall(r'fin-streamer[^>]*symbol="([A-Z]{1,5})"', text))
257
+
258
+ unique_tickers = list(dict.fromkeys(tickers))[:15]
259
+
260
+ for ticker in unique_tickers:
261
+ if ticker in ['USA', 'CEO', 'IPO', 'ETF', 'SEC', 'FDA', 'NYSE', 'API']:
262
+ continue
263
+ self.results["stocks"].append({
264
+ "symbol": ticker,
265
+ "category": category,
266
+ "source": f"yahoo_{category}"
267
+ })
268
+ self.mentions[ticker]["count"] += 1
269
+ self.mentions[ticker]["sources"].append(f"Yahoo {category.replace('_', ' ').title()}")
270
+
271
+ if unique_tickers:
272
+ print(f" āœ… Yahoo {category}: {len(unique_tickers)} tickers")
273
+ except Exception as e:
274
+ print(f" āš ļø Yahoo {category}: {str(e)[:30]}")
275
+
276
+ def scan_reddit_wsb(self):
277
+ """Scrape r/wallstreetbets for hot stocks."""
278
+ print(" šŸ¦ Reddit r/wallstreetbets...")
279
+ try:
280
+ url = "https://old.reddit.com/r/wallstreetbets/hot/.json"
281
+ headers = {**self.headers, "Accept": "application/json"}
282
+ req = urllib.request.Request(url, headers=headers)
283
+
284
+ with urllib.request.urlopen(req, timeout=15, context=SSL_CONTEXT) as resp:
285
+ data = resp.read()
286
+ if data[:2] == b'\x1f\x8b':
287
+ data = gzip.decompress(data)
288
+ posts = json.loads(data.decode('utf-8'))
289
+
290
+ tickers_found = []
291
+ for post in posts.get("data", {}).get("children", [])[:25]:
292
+ title = post.get("data", {}).get("title", "")
293
+ score = post.get("data", {}).get("score", 0)
294
+
295
+ tickers = self._extract_tickers(title)
296
+ for ticker in tickers:
297
+ if ticker not in ['USA', 'CEO', 'IPO', 'DD', 'WSB', 'YOLO', 'FD']:
298
+ weight = 2 if score > 1000 else 1
299
+ self.mentions[ticker]["count"] += weight
300
+ self.mentions[ticker]["sources"].append("Reddit WSB")
301
+ self.mentions[ticker]["sentiment_hints"].append(f"šŸ¦ WSB: {title[:35]}...")
302
+ tickers_found.append(ticker)
303
+
304
+ self.results["social"].append({
305
+ "platform": "reddit_wsb",
306
+ "title": title[:100],
307
+ "score": score,
308
+ "tickers": tickers
309
+ })
310
+
311
+ print(f" āœ… WSB: {len(set(tickers_found))} tickers mentioned")
312
+ except Exception as e:
313
+ print(f" āŒ Reddit WSB: {str(e)[:40]}")
314
+
315
+ def scan_reddit_crypto(self):
316
+ """Scrape r/cryptocurrency for hot coins."""
317
+ print(" šŸ’Ž Reddit r/cryptocurrency...")
318
+ try:
319
+ url = "https://old.reddit.com/r/cryptocurrency/hot/.json"
320
+ headers = {**self.headers, "Accept": "application/json"}
321
+ req = urllib.request.Request(url, headers=headers)
322
+
323
+ with urllib.request.urlopen(req, timeout=15, context=SSL_CONTEXT) as resp:
324
+ data = resp.read()
325
+ if data[:2] == b'\x1f\x8b':
326
+ data = gzip.decompress(data)
327
+ posts = json.loads(data.decode('utf-8'))
328
+
329
+ crypto_keywords = {
330
+ "bitcoin": "BTC", "btc": "BTC", "ethereum": "ETH", "eth": "ETH",
331
+ "solana": "SOL", "sol": "SOL", "xrp": "XRP", "cardano": "ADA",
332
+ "dogecoin": "DOGE", "doge": "DOGE", "shiba": "SHIB", "pepe": "PEPE",
333
+ "avalanche": "AVAX", "polkadot": "DOT", "chainlink": "LINK",
334
+ }
335
+
336
+ tickers_found = []
337
+ for post in posts.get("data", {}).get("children", [])[:20]:
338
+ title = post.get("data", {}).get("title", "").lower()
339
+ score = post.get("data", {}).get("score", 0)
340
+
341
+ for word, ticker in crypto_keywords.items():
342
+ if word in title:
343
+ weight = 2 if score > 500 else 1
344
+ self.mentions[ticker]["count"] += weight
345
+ self.mentions[ticker]["sources"].append("Reddit Crypto")
346
+ tickers_found.append(ticker)
347
+
348
+ print(f" āœ… r/crypto: {len(set(tickers_found))} coins mentioned")
349
+ except Exception as e:
350
+ print(f" āŒ Reddit Crypto: {str(e)[:40]}")
351
+
352
+ def scan_twitter(self):
353
+ """Use bird CLI to get trending finance/crypto tweets."""
354
+ print(" 🐦 Twitter/X...")
355
+ try:
356
+ bird_paths = [
357
+ "/home/clawdbot/.nvm/versions/node/v24.12.0/bin/bird",
358
+ "/usr/local/bin/bird",
359
+ "bird"
360
+ ]
361
+ bird_bin = None
362
+ for p in bird_paths:
363
+ if Path(p).exists() or p == "bird":
364
+ bird_bin = p
365
+ break
366
+
367
+ if not bird_bin:
368
+ print(" āš ļø Twitter: bird not found")
369
+ return
370
+
371
+ searches = [
372
+ ("stocks", "stock OR $SPY OR $QQQ OR earnings"),
373
+ ("crypto", "bitcoin OR ethereum OR crypto OR $BTC"),
374
+ ]
375
+
376
+ for category, query in searches:
377
+ try:
378
+ env = os.environ.copy()
379
+ result = subprocess.run(
380
+ [bird_bin, "search", query, "-n", "15", "--json"],
381
+ capture_output=True, text=True, timeout=30, env=env
382
+ )
383
+
384
+ if result.returncode == 0 and result.stdout.strip():
385
+ tweets = json.loads(result.stdout)
386
+ for tweet in tweets[:10]:
387
+ text = tweet.get("text", "")
388
+ tickers = self._extract_tickers(text)
389
+
390
+ crypto_map = {"bitcoin": "BTC", "ethereum": "ETH", "solana": "SOL"}
391
+ for word, ticker in crypto_map.items():
392
+ if word in text.lower():
393
+ tickers.append(ticker)
394
+
395
+ for ticker in set(tickers):
396
+ self.mentions[ticker]["count"] += 1
397
+ self.mentions[ticker]["sources"].append("Twitter/X")
398
+ self.mentions[ticker]["sentiment_hints"].append(f"🐦 {text[:35]}...")
399
+
400
+ self.results["social"].append({
401
+ "platform": "twitter",
402
+ "text": text[:100],
403
+ "tickers": list(set(tickers))
404
+ })
405
+
406
+ print(f" āœ… Twitter {category}: processed")
407
+ except subprocess.TimeoutExpired:
408
+ print(f" āš ļø Twitter {category}: timeout")
409
+ except json.JSONDecodeError:
410
+ print(f" āš ļø Twitter {category}: no auth?")
411
+ except FileNotFoundError:
412
+ print(" āš ļø Twitter: bird CLI not found")
413
+ except Exception as e:
414
+ print(f" āŒ Twitter: {str(e)[:40]}")
415
+
416
+ def _extract_tickers(self, text):
417
+ """Extract stock/crypto tickers from text."""
418
+ patterns = [
419
+ r'\$([A-Z]{1,5})\b',
420
+ r'\(([A-Z]{2,5})\)',
421
+ r'(?:^|\s)([A-Z]{2,4})(?:\s|$|[,.])',
422
+ ]
423
+
424
+ tickers = []
425
+ for pattern in patterns:
426
+ matches = re.findall(pattern, text)
427
+ tickers.extend(matches)
428
+
429
+ companies = {
430
+ "Apple": "AAPL", "Microsoft": "MSFT", "Google": "GOOGL", "Alphabet": "GOOGL",
431
+ "Amazon": "AMZN", "Tesla": "TSLA", "Nvidia": "NVDA", "Meta": "META",
432
+ "Netflix": "NFLX", "GameStop": "GME", "AMD": "AMD", "Intel": "INTC",
433
+ "Palantir": "PLTR", "Coinbase": "COIN", "MicroStrategy": "MSTR",
434
+ }
435
+
436
+ for company, ticker in companies.items():
437
+ if company.lower() in text.lower():
438
+ tickers.append(ticker)
439
+
440
+ skip = {'USA', 'CEO', 'IPO', 'ETF', 'SEC', 'FDA', 'NYSE', 'API', 'USD', 'EU',
441
+ 'UK', 'US', 'AI', 'IT', 'AT', 'TO', 'IN', 'ON', 'IS', 'IF', 'OR', 'AN',
442
+ 'DD', 'WSB', 'YOLO', 'FD', 'OP', 'PM', 'AM'}
443
+
444
+ return list(set(t for t in tickers if t not in skip and len(t) >= 2))
445
+
446
+ def get_hot_summary(self):
447
+ """Generate summary."""
448
+ sorted_mentions = sorted(
449
+ self.mentions.items(),
450
+ key=lambda x: x[1]["count"],
451
+ reverse=True
452
+ )
453
+
454
+ summary = {
455
+ "scan_time": self.results["timestamp"],
456
+ "top_trending": [],
457
+ "crypto_highlights": [],
458
+ "stock_highlights": [],
459
+ "social_buzz": [],
460
+ "breaking_news": []
461
+ }
462
+
463
+ for symbol, data in sorted_mentions[:20]:
464
+ summary["top_trending"].append({
465
+ "symbol": symbol,
466
+ "mentions": data["count"],
467
+ "sources": list(set(data["sources"])),
468
+ "signals": data["sentiment_hints"][:3]
469
+ })
470
+
471
+ seen = set()
472
+ for coin in self.results["crypto"] + self.results["movers"]:
473
+ if coin["symbol"] not in seen:
474
+ summary["crypto_highlights"].append(coin)
475
+ seen.add(coin["symbol"])
476
+
477
+ seen = set()
478
+ for stock in self.results["stocks"]:
479
+ if stock["symbol"] not in seen:
480
+ summary["stock_highlights"].append(stock)
481
+ seen.add(stock["symbol"])
482
+
483
+ for item in self.results["social"][:15]:
484
+ summary["social_buzz"].append(item)
485
+
486
+ for news in self.results["news"][:10]:
487
+ if news.get("tickers_mentioned"):
488
+ summary["breaking_news"].append({
489
+ "title": news["title"],
490
+ "tickers": news["tickers_mentioned"]
491
+ })
492
+
493
+ return summary
494
+
495
+
496
+ def main():
497
+ import argparse
498
+ parser = argparse.ArgumentParser(description="šŸ”„ Hot Scanner - Find trending stocks & crypto")
499
+ parser.add_argument("--no-social", action="store_true", help="Skip social media scans")
500
+ parser.add_argument("--json", action="store_true", help="Output only JSON")
501
+ args = parser.parse_args()
502
+
503
+ scanner = HotScanner(include_social=not args.no_social)
504
+
505
+ if not args.json:
506
+ print("=" * 60)
507
+ print("šŸ”„ HOT SCANNER v2 - What's Trending Right Now?")
508
+ print(f"šŸ“… {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} UTC")
509
+ print("=" * 60)
510
+ print()
511
+
512
+ scanner.scan_all()
513
+ summary = scanner.get_hot_summary()
514
+
515
+ output_file = CACHE_DIR / "hot_scan_latest.json"
516
+ with open(output_file, "w") as f:
517
+ json.dump(summary, f, indent=2, default=str)
518
+
519
+ if args.json:
520
+ print(json.dumps(summary, indent=2, default=str))
521
+ return
522
+
523
+ print()
524
+ print("=" * 60)
525
+ print("šŸ”„ RESULTS")
526
+ print("=" * 60)
527
+
528
+ print("\nšŸ“Š TOP TRENDING (by buzz):\n")
529
+ for i, item in enumerate(summary["top_trending"][:12], 1):
530
+ sources = ", ".join(item["sources"][:2])
531
+ signal = item["signals"][0][:30] if item["signals"] else ""
532
+ print(f" {i:2}. {item['symbol']:8} ({item['mentions']:2} pts) [{sources}] {signal}")
533
+
534
+ print("\nšŸŖ™ CRYPTO:\n")
535
+ for coin in summary["crypto_highlights"][:8]:
536
+ change = coin.get("change_24h") or coin.get("price_change_24h")
537
+ change_str = f"{change:+.1f}%" if change else "šŸ”„"
538
+ emoji = "šŸš€" if (change or 0) > 0 else "šŸ“‰" if (change or 0) < 0 else "šŸ”„"
539
+ print(f" {emoji} {coin.get('symbol', '?'):8} {coin.get('name', '')[:16]:16} {change_str:>8}")
540
+
541
+ print("\nšŸ“ˆ STOCKS:\n")
542
+ cat_emoji = {"gainers": "🟢", "losers": "šŸ”“", "most_active": "šŸ“Š"}
543
+ for stock in summary["stock_highlights"][:10]:
544
+ emoji = cat_emoji.get(stock.get("category"), "•")
545
+ print(f" {emoji} {stock['symbol']:6} ({stock.get('category', 'N/A').replace('_', ' ')})")
546
+
547
+ if summary["social_buzz"]:
548
+ print("\n🐦 SOCIAL BUZZ:\n")
549
+ for item in summary["social_buzz"][:5]:
550
+ platform = item.get("platform", "?")
551
+ text = item.get("title") or item.get("text", "")
552
+ text = text[:55] + "..." if len(text) > 55 else text
553
+ print(f" [{platform}] {text}")
554
+
555
+ print("\nšŸ“° NEWS:\n")
556
+ for news in summary["breaking_news"][:5]:
557
+ tickers = ", ".join(news["tickers"][:3])
558
+ title = news["title"][:55] + "..." if len(news["title"]) > 55 else news["title"]
559
+ print(f" [{tickers}] {title}")
560
+
561
+ print(f"\nšŸ’¾ Saved: {output_file}\n")
562
+
563
+
564
+ if __name__ == "__main__":
565
+ main()