opentradex 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/.env.example +8 -0
  2. package/CLAUDE.md +98 -0
  3. package/README.md +246 -0
  4. package/SOUL.md +79 -0
  5. package/SPEC.md +317 -0
  6. package/SUBMISSION.md +30 -0
  7. package/architecture.excalidraw +170 -0
  8. package/architecture.png +0 -0
  9. package/bin/opentradex.mjs +4 -0
  10. package/data/.gitkeep +0 -0
  11. package/data/strategy_notes.md +158 -0
  12. package/gossip/__init__.py +0 -0
  13. package/gossip/dashboard.py +150 -0
  14. package/gossip/db.py +358 -0
  15. package/gossip/kalshi.py +492 -0
  16. package/gossip/news.py +235 -0
  17. package/gossip/trader.py +646 -0
  18. package/main.py +287 -0
  19. package/package.json +47 -0
  20. package/requirements.txt +7 -0
  21. package/src/cli.mjs +124 -0
  22. package/src/index.mjs +420 -0
  23. package/web/AGENTS.md +5 -0
  24. package/web/CLAUDE.md +1 -0
  25. package/web/README.md +36 -0
  26. package/web/components.json +25 -0
  27. package/web/eslint.config.mjs +18 -0
  28. package/web/next.config.ts +7 -0
  29. package/web/package-lock.json +11626 -0
  30. package/web/package.json +37 -0
  31. package/web/postcss.config.mjs +7 -0
  32. package/web/public/file.svg +1 -0
  33. package/web/public/globe.svg +1 -0
  34. package/web/public/next.svg +1 -0
  35. package/web/public/vercel.svg +1 -0
  36. package/web/public/window.svg +1 -0
  37. package/web/src/app/api/agent/route.ts +77 -0
  38. package/web/src/app/api/agent/stream/route.ts +87 -0
  39. package/web/src/app/api/markets/route.ts +15 -0
  40. package/web/src/app/api/news/live/route.ts +77 -0
  41. package/web/src/app/api/news/reddit/route.ts +118 -0
  42. package/web/src/app/api/news/route.ts +10 -0
  43. package/web/src/app/api/news/tiktok/route.ts +115 -0
  44. package/web/src/app/api/news/truthsocial/route.ts +116 -0
  45. package/web/src/app/api/news/twitter/route.ts +186 -0
  46. package/web/src/app/api/portfolio/route.ts +50 -0
  47. package/web/src/app/api/prices/route.ts +18 -0
  48. package/web/src/app/api/trades/route.ts +10 -0
  49. package/web/src/app/favicon.ico +0 -0
  50. package/web/src/app/globals.css +170 -0
  51. package/web/src/app/layout.tsx +36 -0
  52. package/web/src/app/page.tsx +366 -0
  53. package/web/src/components/AgentLog.tsx +71 -0
  54. package/web/src/components/LiveStream.tsx +394 -0
  55. package/web/src/components/MarketScanner.tsx +111 -0
  56. package/web/src/components/NewsFeed.tsx +561 -0
  57. package/web/src/components/PortfolioStrip.tsx +139 -0
  58. package/web/src/components/PositionsPanel.tsx +219 -0
  59. package/web/src/components/TopBar.tsx +127 -0
  60. package/web/src/components/ui/badge.tsx +52 -0
  61. package/web/src/components/ui/button.tsx +60 -0
  62. package/web/src/components/ui/card.tsx +103 -0
  63. package/web/src/components/ui/scroll-area.tsx +55 -0
  64. package/web/src/components/ui/separator.tsx +25 -0
  65. package/web/src/components/ui/tabs.tsx +82 -0
  66. package/web/src/components/ui/tooltip.tsx +66 -0
  67. package/web/src/lib/db.ts +81 -0
  68. package/web/src/lib/types.ts +130 -0
  69. package/web/src/lib/utils.ts +6 -0
  70. package/web/tsconfig.json +34 -0
package/gossip/news.py ADDED
@@ -0,0 +1,235 @@
1
+ """
2
+ News intelligence layer — Apify-powered scraping for Google News, Twitter/X, and web search.
3
+
4
+ CLI tool invoked by Claude Code agent:
5
+ python3 gossip/news.py --keywords "bitcoin,tariff,cpi"
6
+ python3 gossip/news.py --keywords "trump tariff" --hours 2
7
+ python3 gossip/news.py --trending
8
+ python3 gossip/news.py --source google --keywords "federal reserve"
9
+ python3 gossip/news.py --source twitter --keywords "kalshi,polymarket"
10
+
11
+ All output is JSON to stdout. Logs go to stderr.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import argparse
17
+ import json
18
+ import os
19
+ import sys
20
+ from datetime import datetime, timezone, timedelta
21
+ from pathlib import Path
22
+
23
+ from apify_client import ApifyClient
24
+ from dotenv import load_dotenv
25
+
26
+ load_dotenv(Path(__file__).resolve().parent.parent / ".env")
27
+
28
+ def log(msg: str) -> None:
29
+ print(msg, file=sys.stderr)
30
+
31
+ def get_client() -> ApifyClient:
32
+ token = os.getenv("APIFY_API_TOKEN", "")
33
+ if not token:
34
+ log("WARNING: APIFY_API_TOKEN not set")
35
+ return ApifyClient("")
36
+ return ApifyClient(token)
37
+
38
+
39
+ def scrape_google_news(keywords: list[str], hours_back: int = 4, max_results: int = 30) -> list[dict]:
40
+ client = get_client()
41
+ queries = [f"{kw} news" for kw in keywords]
42
+
43
+ try:
44
+ run = client.actor("apify/google-search-scraper").call(
45
+ run_input={
46
+ "queries": "\n".join(queries),
47
+ "maxPagesPerQuery": 1,
48
+ "resultsPerPage": max_results,
49
+ "languageCode": "en",
50
+ "countryCode": "us",
51
+ },
52
+ timeout_secs=120,
53
+ )
54
+ except Exception as e:
55
+ log(f"Google News scrape failed: {e}")
56
+ return []
57
+
58
+ articles = []
59
+ seen_urls = set()
60
+ cutoff = datetime.now(timezone.utc) - timedelta(hours=hours_back)
61
+
62
+ for item in client.dataset(run["defaultDatasetId"]).iterate_items():
63
+ organic = item.get("organicResults", [])
64
+ for r in organic:
65
+ url = r.get("url", "")
66
+ if url in seen_urls:
67
+ continue
68
+ seen_urls.add(url)
69
+
70
+ articles.append({
71
+ "title": r.get("title", ""),
72
+ "url": url,
73
+ "snippet": r.get("description", ""),
74
+ "source": "google",
75
+ "keyword": item.get("searchQuery", {}).get("term", ""),
76
+ "position": r.get("position", 0),
77
+ })
78
+
79
+ return articles[:max_results]
80
+
81
+
82
+ def scrape_twitter(keywords: list[str], hours_back: int = 2, max_results: int = 20) -> list[dict]:
83
+ client = get_client()
84
+ queries = [f"{kw} min_faves:50" for kw in keywords]
85
+
86
+ try:
87
+ run = client.actor("apidojo/tweet-scraper").call(
88
+ run_input={
89
+ "searchTerms": queries,
90
+ "maxTweets": max_results,
91
+ "sort": "Latest",
92
+ },
93
+ timeout_secs=120,
94
+ )
95
+ except Exception as e:
96
+ log(f"Twitter scrape failed: {e}")
97
+ return []
98
+
99
+ tweets = []
100
+ for item in client.dataset(run["defaultDatasetId"]).iterate_items():
101
+ tweets.append({
102
+ "text": item.get("full_text", item.get("text", "")),
103
+ "author": item.get("author", {}).get("screen_name", ""),
104
+ "likes": item.get("favorite_count", 0),
105
+ "retweets": item.get("retweet_count", 0),
106
+ "url": item.get("url", ""),
107
+ "source": "twitter",
108
+ "created_at": item.get("created_at", ""),
109
+ })
110
+
111
+ tweets.sort(key=lambda t: t.get("likes", 0), reverse=True)
112
+ return tweets[:max_results]
113
+
114
+
115
+ def scrape_web_search(keywords: list[str], max_results: int = 20) -> list[dict]:
116
+ client = get_client()
117
+
118
+ try:
119
+ run = client.actor("apify/google-search-scraper").call(
120
+ run_input={
121
+ "queries": "\n".join(keywords),
122
+ "maxPagesPerQuery": 1,
123
+ "resultsPerPage": max_results,
124
+ "languageCode": "en",
125
+ "countryCode": "us",
126
+ },
127
+ timeout_secs=120,
128
+ )
129
+ except Exception as e:
130
+ log(f"Web search failed: {e}")
131
+ return []
132
+
133
+ results = []
134
+ seen = set()
135
+ for item in client.dataset(run["defaultDatasetId"]).iterate_items():
136
+ for r in item.get("organicResults", []):
137
+ url = r.get("url", "")
138
+ if url in seen:
139
+ continue
140
+ seen.add(url)
141
+ results.append({
142
+ "title": r.get("title", ""),
143
+ "url": url,
144
+ "snippet": r.get("description", ""),
145
+ "source": "web",
146
+ "keyword": item.get("searchQuery", {}).get("term", ""),
147
+ })
148
+
149
+ return results[:max_results]
150
+
151
+
152
+ def scrape_news_articles(urls: list[str]) -> list[dict]:
153
+ """Use Apify web scraper to extract article text from URLs."""
154
+ client = get_client()
155
+
156
+ try:
157
+ run = client.actor("apify/website-content-crawler").call(
158
+ run_input={
159
+ "startUrls": [{"url": u} for u in urls[:10]],
160
+ "maxCrawlPages": len(urls),
161
+ "crawlerType": "cheerio",
162
+ },
163
+ timeout_secs=180,
164
+ )
165
+ except Exception as e:
166
+ log(f"Article scrape failed: {e}")
167
+ return []
168
+
169
+ articles = []
170
+ for item in client.dataset(run["defaultDatasetId"]).iterate_items():
171
+ articles.append({
172
+ "url": item.get("url", ""),
173
+ "title": item.get("metadata", {}).get("title", ""),
174
+ "text": item.get("text", "")[:3000],
175
+ "source": "article",
176
+ })
177
+
178
+ return articles
179
+
180
+
181
+ # --- Default keyword sets ---
182
+
183
+ BASE_KEYWORDS = [
184
+ "breaking news today",
185
+ "financial markets today",
186
+ ]
187
+
188
+ def main():
189
+ parser = argparse.ArgumentParser(description="News intelligence scraper")
190
+ parser.add_argument("--keywords", type=str, default=None, help="Comma-separated keywords")
191
+ parser.add_argument("--hours", type=int, default=4, help="Hours to look back")
192
+ parser.add_argument("--source", choices=["google", "twitter", "web", "article", "all"], default="google")
193
+ parser.add_argument("--limit", type=int, default=30, help="Max results")
194
+ parser.add_argument("--trending", action="store_true", help="Use base trending keywords")
195
+ parser.add_argument("--urls", type=str, default=None, help="Comma-separated URLs to scrape article text from")
196
+
197
+ args = parser.parse_args()
198
+
199
+ if args.urls:
200
+ urls = [u.strip() for u in args.urls.split(",")]
201
+ results = scrape_news_articles(urls)
202
+ print(json.dumps(results, indent=2))
203
+ return
204
+
205
+ keywords = BASE_KEYWORDS if args.trending else []
206
+ if args.keywords:
207
+ keywords = [k.strip() for k in args.keywords.split(",")]
208
+
209
+ if not keywords:
210
+ keywords = BASE_KEYWORDS
211
+
212
+ results = []
213
+ if args.source in ("google", "all"):
214
+ results.extend(scrape_google_news(keywords, args.hours, args.limit))
215
+ if args.source in ("twitter", "all"):
216
+ results.extend(scrape_twitter(keywords, args.hours, args.limit))
217
+ if args.source in ("web", "all"):
218
+ results.extend(scrape_web_search(keywords, args.limit))
219
+
220
+ # persist to DB
221
+ if results:
222
+ try:
223
+ import sys as _sys
224
+ _sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
225
+ from gossip.db import GossipDB
226
+ db = GossipDB()
227
+ db.insert_news(results)
228
+ except Exception as e:
229
+ log(f"DB write failed: {e}")
230
+
231
+ print(json.dumps(results[:args.limit], indent=2))
232
+
233
+
234
+ if __name__ == "__main__":
235
+ main()