@heylemon/lemonade 0.2.2 → 0.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/build-info.json +3 -3
- package/dist/canvas-host/a2ui/.bundle.hash +1 -1
- package/package.json +1 -1
- package/skills/frontend-design/SKILL.md +39 -0
- package/skills/self-improving-agent/SKILL.md +128 -0
- package/skills/stock-analysis/SKILL.md +131 -0
- package/skills/stock-analysis/scripts/analyze_stock.py +2532 -0
- package/skills/stock-analysis/scripts/dividends.py +365 -0
- package/skills/stock-analysis/scripts/hot_scanner.py +565 -0
- package/skills/stock-analysis/scripts/portfolio.py +528 -0
- package/skills/stock-analysis/scripts/rumor_scanner.py +330 -0
- package/skills/stock-analysis/scripts/watchlist.py +318 -0
- package/skills/youtube-watcher/SKILL.md +51 -0
- package/skills/youtube-watcher/scripts/get_transcript.py +81 -0
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
🔮 RUMOR & BUZZ SCANNER
|
|
4
|
+
Scans for early signals, rumors, and whispers before they become mainstream news.
|
|
5
|
+
|
|
6
|
+
Sources:
|
|
7
|
+
- Twitter/X: "hearing", "rumor", "sources say", unusual buzz
|
|
8
|
+
- Google News: M&A, insider, upgrade/downgrade
|
|
9
|
+
- Unusual keywords detection
|
|
10
|
+
|
|
11
|
+
Usage: python3 rumor_scanner.py
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import json
|
|
15
|
+
import os
|
|
16
|
+
import subprocess
|
|
17
|
+
import sys
|
|
18
|
+
import re
|
|
19
|
+
from datetime import datetime, timezone
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
from urllib.request import urlopen, Request
|
|
22
|
+
from urllib.parse import quote_plus
|
|
23
|
+
import gzip
|
|
24
|
+
|
|
25
|
+
CACHE_DIR = Path(__file__).parent.parent / "cache"
|
|
26
|
+
CACHE_DIR.mkdir(exist_ok=True)
|
|
27
|
+
|
|
28
|
+
# Bird CLI path
|
|
29
|
+
BIRD_CLI = "/home/clawdbot/.nvm/versions/node/v24.12.0/bin/bird"
|
|
30
|
+
BIRD_ENV = Path(__file__).parent.parent / ".env"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def load_env():
|
|
34
|
+
"""Load environment variables from .env file."""
|
|
35
|
+
if BIRD_ENV.exists():
|
|
36
|
+
for line in BIRD_ENV.read_text().splitlines():
|
|
37
|
+
if '=' in line and not line.startswith('#'):
|
|
38
|
+
key, value = line.split('=', 1)
|
|
39
|
+
os.environ[key.strip()] = value.strip().strip('"').strip("'")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def fetch_url(url, timeout=15):
|
|
43
|
+
"""Fetch URL with headers."""
|
|
44
|
+
headers = {
|
|
45
|
+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36',
|
|
46
|
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
|
47
|
+
'Accept-Encoding': 'gzip, deflate',
|
|
48
|
+
'Accept-Language': 'en-US,en;q=0.9',
|
|
49
|
+
}
|
|
50
|
+
req = Request(url, headers=headers)
|
|
51
|
+
try:
|
|
52
|
+
with urlopen(req, timeout=timeout) as resp:
|
|
53
|
+
data = resp.read()
|
|
54
|
+
if resp.info().get('Content-Encoding') == 'gzip':
|
|
55
|
+
data = gzip.decompress(data)
|
|
56
|
+
return data.decode('utf-8', errors='ignore')
|
|
57
|
+
except Exception as e:
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def search_twitter_rumors():
|
|
62
|
+
"""Search Twitter for rumors and early signals."""
|
|
63
|
+
results = []
|
|
64
|
+
|
|
65
|
+
queries = [
|
|
66
|
+
'"hearing that" stock OR $',
|
|
67
|
+
'"sources say" stock OR company',
|
|
68
|
+
'"rumor" merger OR acquisition',
|
|
69
|
+
'insider buying stock',
|
|
70
|
+
'"upgrade" OR "downgrade" stock tomorrow',
|
|
71
|
+
'$AAPL OR $TSLA OR $NVDA rumor',
|
|
72
|
+
'"breaking" stock market',
|
|
73
|
+
'M&A rumor',
|
|
74
|
+
]
|
|
75
|
+
|
|
76
|
+
load_env()
|
|
77
|
+
|
|
78
|
+
for query in queries[:4]:
|
|
79
|
+
try:
|
|
80
|
+
cmd = [BIRD_CLI, 'search', query, '-n', '10', '--json']
|
|
81
|
+
env = os.environ.copy()
|
|
82
|
+
|
|
83
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=30, env=env)
|
|
84
|
+
|
|
85
|
+
if result.returncode == 0 and result.stdout:
|
|
86
|
+
try:
|
|
87
|
+
tweets = json.loads(result.stdout)
|
|
88
|
+
for tweet in tweets:
|
|
89
|
+
text = tweet.get('text', '')
|
|
90
|
+
if any(kw in text.lower() for kw in ['hearing', 'rumor', 'source', 'insider', 'upgrade', 'downgrade', 'breaking', 'M&A', 'merger', 'acquisition']):
|
|
91
|
+
results.append({
|
|
92
|
+
'source': 'twitter',
|
|
93
|
+
'type': 'rumor',
|
|
94
|
+
'text': text[:300],
|
|
95
|
+
'author': tweet.get('author', {}).get('username', 'unknown'),
|
|
96
|
+
'likes': tweet.get('likes', 0),
|
|
97
|
+
'retweets': tweet.get('retweets', 0),
|
|
98
|
+
'query': query
|
|
99
|
+
})
|
|
100
|
+
except json.JSONDecodeError:
|
|
101
|
+
pass
|
|
102
|
+
except Exception as e:
|
|
103
|
+
pass
|
|
104
|
+
|
|
105
|
+
seen = set()
|
|
106
|
+
unique = []
|
|
107
|
+
for r in results:
|
|
108
|
+
key = r['text'][:100]
|
|
109
|
+
if key not in seen:
|
|
110
|
+
seen.add(key)
|
|
111
|
+
unique.append(r)
|
|
112
|
+
|
|
113
|
+
return unique
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def search_twitter_buzz():
|
|
117
|
+
"""Search Twitter for general stock buzz - what are people talking about?"""
|
|
118
|
+
results = []
|
|
119
|
+
|
|
120
|
+
queries = [
|
|
121
|
+
'$SPY OR $QQQ',
|
|
122
|
+
'stock to buy',
|
|
123
|
+
'calls OR puts expiring',
|
|
124
|
+
'earnings play',
|
|
125
|
+
'short squeeze',
|
|
126
|
+
]
|
|
127
|
+
|
|
128
|
+
load_env()
|
|
129
|
+
|
|
130
|
+
for query in queries[:3]:
|
|
131
|
+
try:
|
|
132
|
+
cmd = [BIRD_CLI, 'search', query, '-n', '15', '--json']
|
|
133
|
+
env = os.environ.copy()
|
|
134
|
+
|
|
135
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=30, env=env)
|
|
136
|
+
|
|
137
|
+
if result.returncode == 0 and result.stdout:
|
|
138
|
+
try:
|
|
139
|
+
tweets = json.loads(result.stdout)
|
|
140
|
+
for tweet in tweets:
|
|
141
|
+
text = tweet.get('text', '')
|
|
142
|
+
symbols = re.findall(r'\$([A-Z]{1,5})\b', text)
|
|
143
|
+
if symbols:
|
|
144
|
+
results.append({
|
|
145
|
+
'source': 'twitter',
|
|
146
|
+
'type': 'buzz',
|
|
147
|
+
'text': text[:300],
|
|
148
|
+
'symbols': symbols,
|
|
149
|
+
'author': tweet.get('author', {}).get('username', 'unknown'),
|
|
150
|
+
'engagement': tweet.get('likes', 0) + tweet.get('retweets', 0) * 2
|
|
151
|
+
})
|
|
152
|
+
except json.JSONDecodeError:
|
|
153
|
+
pass
|
|
154
|
+
except Exception as e:
|
|
155
|
+
pass
|
|
156
|
+
|
|
157
|
+
results.sort(key=lambda x: x.get('engagement', 0), reverse=True)
|
|
158
|
+
return results[:20]
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def search_news_rumors():
|
|
162
|
+
"""Search Google News for M&A, insider, upgrade news."""
|
|
163
|
+
results = []
|
|
164
|
+
|
|
165
|
+
queries = [
|
|
166
|
+
'merger acquisition rumor',
|
|
167
|
+
'insider buying stock',
|
|
168
|
+
'analyst upgrade stock',
|
|
169
|
+
'takeover bid company',
|
|
170
|
+
'SEC investigation company',
|
|
171
|
+
]
|
|
172
|
+
|
|
173
|
+
for query in queries:
|
|
174
|
+
url = f"https://news.google.com/rss/search?q={quote_plus(query)}&hl=en-US&gl=US&ceid=US:en"
|
|
175
|
+
content = fetch_url(url)
|
|
176
|
+
|
|
177
|
+
if content:
|
|
178
|
+
import xml.etree.ElementTree as ET
|
|
179
|
+
try:
|
|
180
|
+
root = ET.fromstring(content)
|
|
181
|
+
for item in root.findall('.//item')[:5]:
|
|
182
|
+
title = item.find('title')
|
|
183
|
+
link = item.find('link')
|
|
184
|
+
pub_date = item.find('pubDate')
|
|
185
|
+
|
|
186
|
+
if title is not None:
|
|
187
|
+
title_text = title.text or ''
|
|
188
|
+
results.append({
|
|
189
|
+
'source': 'google_news',
|
|
190
|
+
'type': 'news_rumor',
|
|
191
|
+
'title': title_text,
|
|
192
|
+
'link': link.text if link is not None else '',
|
|
193
|
+
'date': pub_date.text if pub_date is not None else '',
|
|
194
|
+
'query': query
|
|
195
|
+
})
|
|
196
|
+
except ET.ParseError:
|
|
197
|
+
pass
|
|
198
|
+
|
|
199
|
+
return results
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def extract_symbols_from_text(text):
|
|
203
|
+
"""Extract stock symbols from text."""
|
|
204
|
+
dollar_symbols = re.findall(r'\$([A-Z]{1,5})\b', text)
|
|
205
|
+
|
|
206
|
+
company_map = {
|
|
207
|
+
'apple': 'AAPL', 'tesla': 'TSLA', 'nvidia': 'NVDA', 'microsoft': 'MSFT',
|
|
208
|
+
'google': 'GOOGL', 'amazon': 'AMZN', 'meta': 'META', 'netflix': 'NFLX',
|
|
209
|
+
'coinbase': 'COIN', 'robinhood': 'HOOD', 'disney': 'DIS', 'intel': 'INTC',
|
|
210
|
+
'amd': 'AMD', 'palantir': 'PLTR', 'gamestop': 'GME', 'amc': 'AMC',
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
text_lower = text.lower()
|
|
214
|
+
company_symbols = [sym for name, sym in company_map.items() if name in text_lower]
|
|
215
|
+
|
|
216
|
+
return list(set(dollar_symbols + company_symbols))
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def calculate_rumor_score(item):
|
|
220
|
+
"""Score a rumor by potential impact."""
|
|
221
|
+
score = 0
|
|
222
|
+
text = (item.get('text', '') + item.get('title', '')).lower()
|
|
223
|
+
|
|
224
|
+
if any(kw in text for kw in ['merger', 'acquisition', 'takeover', 'buyout']):
|
|
225
|
+
score += 5
|
|
226
|
+
if any(kw in text for kw in ['insider', 'ceo buying', 'director buying']):
|
|
227
|
+
score += 4
|
|
228
|
+
if any(kw in text for kw in ['upgrade', 'price target raised']):
|
|
229
|
+
score += 3
|
|
230
|
+
if any(kw in text for kw in ['downgrade', 'sec investigation', 'fraud']):
|
|
231
|
+
score += 3
|
|
232
|
+
if any(kw in text for kw in ['hearing', 'sources say', 'rumor']):
|
|
233
|
+
score += 2
|
|
234
|
+
if any(kw in text for kw in ['breaking', 'just in', 'alert']):
|
|
235
|
+
score += 2
|
|
236
|
+
|
|
237
|
+
if item.get('engagement', 0) > 100:
|
|
238
|
+
score += 2
|
|
239
|
+
if item.get('likes', 0) > 50:
|
|
240
|
+
score += 1
|
|
241
|
+
|
|
242
|
+
return score
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def main():
|
|
246
|
+
print("=" * 60)
|
|
247
|
+
print("🔮 RUMOR & BUZZ SCANNER")
|
|
248
|
+
print(f"📅 {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S')} UTC")
|
|
249
|
+
print("=" * 60)
|
|
250
|
+
print()
|
|
251
|
+
print("🔍 Scanning for early signals...")
|
|
252
|
+
print()
|
|
253
|
+
|
|
254
|
+
all_rumors = []
|
|
255
|
+
all_buzz = []
|
|
256
|
+
|
|
257
|
+
print(" 🐦 Twitter rumors...")
|
|
258
|
+
rumors = search_twitter_rumors()
|
|
259
|
+
print(f" ✅ {len(rumors)} potential rumors")
|
|
260
|
+
all_rumors.extend(rumors)
|
|
261
|
+
|
|
262
|
+
print(" 🐦 Twitter buzz...")
|
|
263
|
+
buzz = search_twitter_buzz()
|
|
264
|
+
print(f" ✅ {len(buzz)} buzz items")
|
|
265
|
+
all_buzz.extend(buzz)
|
|
266
|
+
|
|
267
|
+
print(" 📰 News rumors...")
|
|
268
|
+
news = search_news_rumors()
|
|
269
|
+
print(f" ✅ {len(news)} news items")
|
|
270
|
+
all_rumors.extend(news)
|
|
271
|
+
|
|
272
|
+
for item in all_rumors:
|
|
273
|
+
item['score'] = calculate_rumor_score(item)
|
|
274
|
+
item['symbols'] = extract_symbols_from_text(item.get('text', '') + item.get('title', ''))
|
|
275
|
+
|
|
276
|
+
all_rumors.sort(key=lambda x: x['score'], reverse=True)
|
|
277
|
+
|
|
278
|
+
symbol_counts = {}
|
|
279
|
+
for item in all_buzz:
|
|
280
|
+
for sym in item.get('symbols', []):
|
|
281
|
+
symbol_counts[sym] = symbol_counts.get(sym, 0) + 1
|
|
282
|
+
|
|
283
|
+
print()
|
|
284
|
+
print("=" * 60)
|
|
285
|
+
print("🔮 RESULTS")
|
|
286
|
+
print("=" * 60)
|
|
287
|
+
print()
|
|
288
|
+
|
|
289
|
+
print("🚨 TOP RUMORS (by potential impact):")
|
|
290
|
+
print()
|
|
291
|
+
for item in all_rumors[:10]:
|
|
292
|
+
if item['score'] > 0:
|
|
293
|
+
source = item['source']
|
|
294
|
+
symbols = ', '.join(item.get('symbols', [])) or 'N/A'
|
|
295
|
+
text = item.get('text', item.get('title', ''))[:80]
|
|
296
|
+
print(f" [{item['score']}] [{source}] {symbols}")
|
|
297
|
+
print(f" {text}...")
|
|
298
|
+
print()
|
|
299
|
+
|
|
300
|
+
print("📊 BUZZ LEADERBOARD (most discussed):")
|
|
301
|
+
print()
|
|
302
|
+
sorted_symbols = sorted(symbol_counts.items(), key=lambda x: x[1], reverse=True)
|
|
303
|
+
for symbol, count in sorted_symbols[:15]:
|
|
304
|
+
bar = "█" * min(count, 20)
|
|
305
|
+
print(f" ${symbol:5} {bar} ({count})")
|
|
306
|
+
|
|
307
|
+
print()
|
|
308
|
+
print("💬 WHAT PEOPLE ARE SAYING:")
|
|
309
|
+
print()
|
|
310
|
+
for item in all_buzz[:8]:
|
|
311
|
+
author = item.get('author', 'anon')
|
|
312
|
+
text = item.get('text', '')[:120]
|
|
313
|
+
engagement = item.get('engagement', 0)
|
|
314
|
+
print(f" @{author} ({engagement}♥): {text}...")
|
|
315
|
+
print()
|
|
316
|
+
|
|
317
|
+
output = {
|
|
318
|
+
'timestamp': datetime.now(timezone.utc).isoformat(),
|
|
319
|
+
'rumors': all_rumors[:20],
|
|
320
|
+
'buzz': all_buzz[:30],
|
|
321
|
+
'symbol_counts': symbol_counts,
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
output_file = CACHE_DIR / 'rumor_scan_latest.json'
|
|
325
|
+
output_file.write_text(json.dumps(output, indent=2, default=str))
|
|
326
|
+
print(f"💾 Saved: {output_file}")
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
if __name__ == "__main__":
|
|
330
|
+
main()
|
|
@@ -0,0 +1,318 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# /// script
|
|
3
|
+
# requires-python = ">=3.10"
|
|
4
|
+
# dependencies = [
|
|
5
|
+
# "yfinance>=0.2.40",
|
|
6
|
+
# ]
|
|
7
|
+
# ///
|
|
8
|
+
"""
|
|
9
|
+
Stock Watchlist with Price Alerts.
|
|
10
|
+
|
|
11
|
+
Usage:
|
|
12
|
+
uv run watchlist.py add AAPL # Add to watchlist
|
|
13
|
+
uv run watchlist.py add AAPL --target 200 # With price target
|
|
14
|
+
uv run watchlist.py add AAPL --stop 150 # With stop loss
|
|
15
|
+
uv run watchlist.py add AAPL --alert-on signal # Alert on signal change
|
|
16
|
+
uv run watchlist.py remove AAPL # Remove from watchlist
|
|
17
|
+
uv run watchlist.py list # Show watchlist
|
|
18
|
+
uv run watchlist.py check # Check for triggered alerts
|
|
19
|
+
uv run watchlist.py check --notify # Check and format for notification
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
import argparse
|
|
23
|
+
import json
|
|
24
|
+
import sys
|
|
25
|
+
from dataclasses import dataclass, asdict
|
|
26
|
+
from datetime import datetime, timezone
|
|
27
|
+
from pathlib import Path
|
|
28
|
+
from typing import Literal
|
|
29
|
+
|
|
30
|
+
import yfinance as yf
|
|
31
|
+
|
|
32
|
+
# Storage
|
|
33
|
+
WATCHLIST_DIR = Path.home() / ".clawdbot" / "skills" / "stock-analysis"
|
|
34
|
+
WATCHLIST_FILE = WATCHLIST_DIR / "watchlist.json"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass
|
|
38
|
+
class WatchlistItem:
|
|
39
|
+
ticker: str
|
|
40
|
+
added_at: str
|
|
41
|
+
price_at_add: float | None = None
|
|
42
|
+
target_price: float | None = None # Alert when price >= target
|
|
43
|
+
stop_price: float | None = None # Alert when price <= stop
|
|
44
|
+
alert_on_signal: bool = False # Alert when recommendation changes
|
|
45
|
+
last_signal: str | None = None # BUY/HOLD/SELL
|
|
46
|
+
last_check: str | None = None
|
|
47
|
+
notes: str | None = None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@dataclass
|
|
51
|
+
class Alert:
|
|
52
|
+
ticker: str
|
|
53
|
+
alert_type: Literal["target_hit", "stop_hit", "signal_change"]
|
|
54
|
+
message: str
|
|
55
|
+
current_price: float
|
|
56
|
+
trigger_value: float | str
|
|
57
|
+
timestamp: str
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def ensure_dirs():
|
|
61
|
+
"""Create storage directories."""
|
|
62
|
+
WATCHLIST_DIR.mkdir(parents=True, exist_ok=True)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def load_watchlist() -> list[WatchlistItem]:
|
|
66
|
+
"""Load watchlist from file."""
|
|
67
|
+
if WATCHLIST_FILE.exists():
|
|
68
|
+
data = json.loads(WATCHLIST_FILE.read_text())
|
|
69
|
+
return [WatchlistItem(**item) for item in data]
|
|
70
|
+
return []
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def save_watchlist(items: list[WatchlistItem]):
|
|
74
|
+
"""Save watchlist to file."""
|
|
75
|
+
ensure_dirs()
|
|
76
|
+
data = [asdict(item) for item in items]
|
|
77
|
+
WATCHLIST_FILE.write_text(json.dumps(data, indent=2))
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def get_current_price(ticker: str) -> float | None:
|
|
81
|
+
"""Get current price for a ticker."""
|
|
82
|
+
try:
|
|
83
|
+
stock = yf.Ticker(ticker)
|
|
84
|
+
price = stock.info.get("regularMarketPrice") or stock.info.get("currentPrice")
|
|
85
|
+
return float(price) if price else None
|
|
86
|
+
except Exception:
|
|
87
|
+
return None
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def add_to_watchlist(
|
|
91
|
+
ticker: str,
|
|
92
|
+
target_price: float | None = None,
|
|
93
|
+
stop_price: float | None = None,
|
|
94
|
+
alert_on_signal: bool = False,
|
|
95
|
+
notes: str | None = None,
|
|
96
|
+
) -> dict:
|
|
97
|
+
"""Add ticker to watchlist."""
|
|
98
|
+
ticker = ticker.upper()
|
|
99
|
+
|
|
100
|
+
current_price = get_current_price(ticker)
|
|
101
|
+
if current_price is None:
|
|
102
|
+
return {"success": False, "error": f"Invalid ticker: {ticker}"}
|
|
103
|
+
|
|
104
|
+
watchlist = load_watchlist()
|
|
105
|
+
|
|
106
|
+
for item in watchlist:
|
|
107
|
+
if item.ticker == ticker:
|
|
108
|
+
item.target_price = target_price or item.target_price
|
|
109
|
+
item.stop_price = stop_price or item.stop_price
|
|
110
|
+
item.alert_on_signal = alert_on_signal or item.alert_on_signal
|
|
111
|
+
item.notes = notes or item.notes
|
|
112
|
+
save_watchlist(watchlist)
|
|
113
|
+
return {
|
|
114
|
+
"success": True,
|
|
115
|
+
"action": "updated",
|
|
116
|
+
"ticker": ticker,
|
|
117
|
+
"current_price": current_price,
|
|
118
|
+
"target_price": item.target_price,
|
|
119
|
+
"stop_price": item.stop_price,
|
|
120
|
+
"alert_on_signal": item.alert_on_signal,
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
item = WatchlistItem(
|
|
124
|
+
ticker=ticker,
|
|
125
|
+
added_at=datetime.now(timezone.utc).isoformat(),
|
|
126
|
+
price_at_add=current_price,
|
|
127
|
+
target_price=target_price,
|
|
128
|
+
stop_price=stop_price,
|
|
129
|
+
alert_on_signal=alert_on_signal,
|
|
130
|
+
notes=notes,
|
|
131
|
+
)
|
|
132
|
+
watchlist.append(item)
|
|
133
|
+
save_watchlist(watchlist)
|
|
134
|
+
|
|
135
|
+
return {
|
|
136
|
+
"success": True,
|
|
137
|
+
"action": "added",
|
|
138
|
+
"ticker": ticker,
|
|
139
|
+
"current_price": current_price,
|
|
140
|
+
"target_price": target_price,
|
|
141
|
+
"stop_price": stop_price,
|
|
142
|
+
"alert_on_signal": alert_on_signal,
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def remove_from_watchlist(ticker: str) -> dict:
|
|
147
|
+
"""Remove ticker from watchlist."""
|
|
148
|
+
ticker = ticker.upper()
|
|
149
|
+
watchlist = load_watchlist()
|
|
150
|
+
|
|
151
|
+
original_len = len(watchlist)
|
|
152
|
+
watchlist = [item for item in watchlist if item.ticker != ticker]
|
|
153
|
+
|
|
154
|
+
if len(watchlist) == original_len:
|
|
155
|
+
return {"success": False, "error": f"{ticker} not in watchlist"}
|
|
156
|
+
|
|
157
|
+
save_watchlist(watchlist)
|
|
158
|
+
return {"success": True, "removed": ticker}
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def list_watchlist() -> dict:
|
|
162
|
+
"""List all watchlist items with current prices."""
|
|
163
|
+
watchlist = load_watchlist()
|
|
164
|
+
|
|
165
|
+
if not watchlist:
|
|
166
|
+
return {"success": True, "items": [], "count": 0}
|
|
167
|
+
|
|
168
|
+
items = []
|
|
169
|
+
for item in watchlist:
|
|
170
|
+
current_price = get_current_price(item.ticker)
|
|
171
|
+
|
|
172
|
+
change_pct = None
|
|
173
|
+
if current_price and item.price_at_add:
|
|
174
|
+
change_pct = ((current_price - item.price_at_add) / item.price_at_add) * 100
|
|
175
|
+
|
|
176
|
+
to_target = None
|
|
177
|
+
to_stop = None
|
|
178
|
+
if current_price:
|
|
179
|
+
if item.target_price:
|
|
180
|
+
to_target = ((item.target_price - current_price) / current_price) * 100
|
|
181
|
+
if item.stop_price:
|
|
182
|
+
to_stop = ((item.stop_price - current_price) / current_price) * 100
|
|
183
|
+
|
|
184
|
+
items.append({
|
|
185
|
+
"ticker": item.ticker,
|
|
186
|
+
"current_price": current_price,
|
|
187
|
+
"price_at_add": item.price_at_add,
|
|
188
|
+
"change_pct": round(change_pct, 2) if change_pct else None,
|
|
189
|
+
"target_price": item.target_price,
|
|
190
|
+
"to_target_pct": round(to_target, 2) if to_target else None,
|
|
191
|
+
"stop_price": item.stop_price,
|
|
192
|
+
"to_stop_pct": round(to_stop, 2) if to_stop else None,
|
|
193
|
+
"alert_on_signal": item.alert_on_signal,
|
|
194
|
+
"last_signal": item.last_signal,
|
|
195
|
+
"added_at": item.added_at[:10],
|
|
196
|
+
"notes": item.notes,
|
|
197
|
+
})
|
|
198
|
+
|
|
199
|
+
return {"success": True, "items": items, "count": len(items)}
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def check_alerts(notify_format: bool = False) -> dict:
|
|
203
|
+
"""Check watchlist for triggered alerts."""
|
|
204
|
+
watchlist = load_watchlist()
|
|
205
|
+
alerts: list[Alert] = []
|
|
206
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
207
|
+
|
|
208
|
+
for item in watchlist:
|
|
209
|
+
current_price = get_current_price(item.ticker)
|
|
210
|
+
if current_price is None:
|
|
211
|
+
continue
|
|
212
|
+
|
|
213
|
+
if item.target_price and current_price >= item.target_price:
|
|
214
|
+
alerts.append(Alert(
|
|
215
|
+
ticker=item.ticker,
|
|
216
|
+
alert_type="target_hit",
|
|
217
|
+
message=f"🎯 {item.ticker} hit target! ${current_price:.2f} >= ${item.target_price:.2f}",
|
|
218
|
+
current_price=current_price,
|
|
219
|
+
trigger_value=item.target_price,
|
|
220
|
+
timestamp=now,
|
|
221
|
+
))
|
|
222
|
+
|
|
223
|
+
if item.stop_price and current_price <= item.stop_price:
|
|
224
|
+
alerts.append(Alert(
|
|
225
|
+
ticker=item.ticker,
|
|
226
|
+
alert_type="stop_hit",
|
|
227
|
+
message=f"🛑 {item.ticker} hit stop! ${current_price:.2f} <= ${item.stop_price:.2f}",
|
|
228
|
+
current_price=current_price,
|
|
229
|
+
trigger_value=item.stop_price,
|
|
230
|
+
timestamp=now,
|
|
231
|
+
))
|
|
232
|
+
|
|
233
|
+
if item.alert_on_signal:
|
|
234
|
+
try:
|
|
235
|
+
import subprocess
|
|
236
|
+
result = subprocess.run(
|
|
237
|
+
["uv", "run", str(Path(__file__).parent / "analyze_stock.py"), item.ticker, "--output", "json"],
|
|
238
|
+
capture_output=True,
|
|
239
|
+
text=True,
|
|
240
|
+
timeout=60,
|
|
241
|
+
)
|
|
242
|
+
if result.returncode == 0:
|
|
243
|
+
analysis = json.loads(result.stdout)
|
|
244
|
+
new_signal = analysis.get("recommendation")
|
|
245
|
+
|
|
246
|
+
if item.last_signal and new_signal and new_signal != item.last_signal:
|
|
247
|
+
alerts.append(Alert(
|
|
248
|
+
ticker=item.ticker,
|
|
249
|
+
alert_type="signal_change",
|
|
250
|
+
message=f"📊 {item.ticker} signal changed: {item.last_signal} → {new_signal}",
|
|
251
|
+
current_price=current_price,
|
|
252
|
+
trigger_value=f"{item.last_signal} → {new_signal}",
|
|
253
|
+
timestamp=now,
|
|
254
|
+
))
|
|
255
|
+
|
|
256
|
+
item.last_signal = new_signal
|
|
257
|
+
except Exception:
|
|
258
|
+
pass
|
|
259
|
+
|
|
260
|
+
item.last_check = now
|
|
261
|
+
|
|
262
|
+
save_watchlist(watchlist)
|
|
263
|
+
|
|
264
|
+
if notify_format and alerts:
|
|
265
|
+
lines = ["📢 **Stock Alerts**\n"]
|
|
266
|
+
for alert in alerts:
|
|
267
|
+
lines.append(alert.message)
|
|
268
|
+
return {"success": True, "alerts": [asdict(a) for a in alerts], "notification": "\n".join(lines)}
|
|
269
|
+
|
|
270
|
+
return {"success": True, "alerts": [asdict(a) for a in alerts], "count": len(alerts)}
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def main():
|
|
274
|
+
parser = argparse.ArgumentParser(description="Stock Watchlist with Alerts")
|
|
275
|
+
subparsers = parser.add_subparsers(dest="command", required=True)
|
|
276
|
+
|
|
277
|
+
add_parser = subparsers.add_parser("add", help="Add ticker to watchlist")
|
|
278
|
+
add_parser.add_argument("ticker", help="Stock ticker")
|
|
279
|
+
add_parser.add_argument("--target", type=float, help="Target price for alert")
|
|
280
|
+
add_parser.add_argument("--stop", type=float, help="Stop loss price for alert")
|
|
281
|
+
add_parser.add_argument("--alert-on", choices=["signal"], help="Alert on signal change")
|
|
282
|
+
add_parser.add_argument("--notes", help="Notes")
|
|
283
|
+
|
|
284
|
+
remove_parser = subparsers.add_parser("remove", help="Remove ticker from watchlist")
|
|
285
|
+
remove_parser.add_argument("ticker", help="Stock ticker")
|
|
286
|
+
|
|
287
|
+
subparsers.add_parser("list", help="List watchlist")
|
|
288
|
+
|
|
289
|
+
check_parser = subparsers.add_parser("check", help="Check for triggered alerts")
|
|
290
|
+
check_parser.add_argument("--notify", action="store_true", help="Format for notification")
|
|
291
|
+
|
|
292
|
+
args = parser.parse_args()
|
|
293
|
+
|
|
294
|
+
if args.command == "add":
|
|
295
|
+
result = add_to_watchlist(
|
|
296
|
+
args.ticker,
|
|
297
|
+
target_price=args.target,
|
|
298
|
+
stop_price=args.stop,
|
|
299
|
+
alert_on_signal=(args.alert_on == "signal"),
|
|
300
|
+
notes=args.notes,
|
|
301
|
+
)
|
|
302
|
+
print(json.dumps(result, indent=2))
|
|
303
|
+
|
|
304
|
+
elif args.command == "remove":
|
|
305
|
+
result = remove_from_watchlist(args.ticker)
|
|
306
|
+
print(json.dumps(result, indent=2))
|
|
307
|
+
|
|
308
|
+
elif args.command == "list":
|
|
309
|
+
result = list_watchlist()
|
|
310
|
+
print(json.dumps(result, indent=2))
|
|
311
|
+
|
|
312
|
+
elif args.command == "check":
|
|
313
|
+
result = check_alerts(notify_format=args.notify)
|
|
314
|
+
print(json.dumps(result, indent=2))
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
if __name__ == "__main__":
|
|
318
|
+
main()
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: youtube-watcher
|
|
3
|
+
description: Fetch and read transcripts from YouTube videos. Use when you need to summarize a video, answer questions about its content, or extract information from it.
|
|
4
|
+
author: michael gathara
|
|
5
|
+
version: 1.0.0
|
|
6
|
+
triggers:
|
|
7
|
+
- "watch youtube"
|
|
8
|
+
- "summarize video"
|
|
9
|
+
- "video transcript"
|
|
10
|
+
- "youtube summary"
|
|
11
|
+
- "analyze video"
|
|
12
|
+
metadata: {"lemonade":{"emoji":"📺","requires":{"bins":["yt-dlp"]},"install":[{"id":"brew","kind":"brew","formula":"yt-dlp","bins":["yt-dlp"],"label":"Install yt-dlp (brew)"},{"id":"pip","kind":"pip","package":"yt-dlp","bins":["yt-dlp"],"label":"Install yt-dlp (pip)"}]}}
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
# YouTube Watcher
|
|
16
|
+
|
|
17
|
+
Fetch transcripts from YouTube videos to enable summarization, QA, and content extraction.
|
|
18
|
+
|
|
19
|
+
## Usage
|
|
20
|
+
|
|
21
|
+
### Get Transcript
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
python3 {baseDir}/scripts/get_transcript.py "https://www.youtube.com/watch?v=VIDEO_ID"
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Examples
|
|
28
|
+
|
|
29
|
+
**Summarize a video:**
|
|
30
|
+
|
|
31
|
+
1. Get the transcript:
|
|
32
|
+
```bash
|
|
33
|
+
python3 {baseDir}/scripts/get_transcript.py "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
|
|
34
|
+
```
|
|
35
|
+
2. Read the output and summarize it for the user.
|
|
36
|
+
|
|
37
|
+
**Find specific information:**
|
|
38
|
+
|
|
39
|
+
1. Get the transcript.
|
|
40
|
+
2. Search the text for keywords or answer the user's question based on the content.
|
|
41
|
+
|
|
42
|
+
## Important
|
|
43
|
+
|
|
44
|
+
**If `lemon-youtube` is available**, prefer using `lemon-youtube transcript <url>` instead — it uses the authenticated YouTube API and is more reliable. Use this skill as a fallback when the YouTube integration is not connected or `lemon-youtube` is unavailable.
|
|
45
|
+
|
|
46
|
+
## Notes
|
|
47
|
+
|
|
48
|
+
- Requires `yt-dlp` to be installed and available in the PATH.
|
|
49
|
+
- Works with videos that have closed captions (CC) or auto-generated subtitles.
|
|
50
|
+
- If a video has no subtitles, the script will fail with an error message.
|
|
51
|
+
- No API key required — uses yt-dlp to fetch publicly available subtitles.
|