eightstatecli 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eightstatecli-0.4.0.dist-info/METADATA +177 -0
- eightstatecli-0.4.0.dist-info/RECORD +18 -0
- eightstatecli-0.4.0.dist-info/WHEEL +4 -0
- eightstatecli-0.4.0.dist-info/entry_points.txt +2 -0
- eightstatecli-0.4.0.dist-info/licenses/LICENSE +21 -0
- escli/__init__.py +837 -0
- escli/__main__.py +5 -0
- escli/commands/__init__.py +0 -0
- escli/commands/audio.py +438 -0
- escli/commands/docs.py +354 -0
- escli/commands/research.py +597 -0
- escli/commands/search.py +286 -0
- escli/commands/social.py +243 -0
- escli/commands/usage.py +428 -0
- escli/services/__init__.py +0 -0
- escli/services/credentials.py +117 -0
- escli/services/describe.py +186 -0
- escli/services/output.py +168 -0
escli/commands/search.py
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
1
|
+
"""
|
|
2
|
+
escli search / fetch — web search and URL extraction via Parallel.ai.
|
|
3
|
+
|
|
4
|
+
Uses the free Parallel Search MCP endpoint (no API key required).
|
|
5
|
+
Falls back to the REST API with a key from gate if available.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
escli search "query about something" Web search
|
|
9
|
+
escli search "query" --queries "q1" "q2" Multiple search queries
|
|
10
|
+
escli fetch <url> [url2 ...] Extract content from URLs
|
|
11
|
+
escli fetch <url> --objective "what to find" Focused extraction
|
|
12
|
+
escli fetch <url> --full Full page content
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import argparse
|
|
16
|
+
import json
|
|
17
|
+
import sys
|
|
18
|
+
import time
|
|
19
|
+
import uuid
|
|
20
|
+
|
|
21
|
+
from ..services.credentials import get_key_for_service
|
|
22
|
+
|
|
23
|
+
MCP_ENDPOINT = "https://search.parallel.ai/mcp"
|
|
24
|
+
REST_ENDPOINT = "https://api.parallel.ai/v1"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _session_id() -> str:
|
|
28
|
+
"""Stable session ID for rate limiting."""
|
|
29
|
+
return uuid.uuid4().hex
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _call_mcp(tool_name: str, arguments: dict) -> dict:
|
|
33
|
+
"""Call a tool on the Parallel MCP endpoint via JSON-RPC over HTTP."""
|
|
34
|
+
import httpx
|
|
35
|
+
|
|
36
|
+
payload = {
|
|
37
|
+
"jsonrpc": "2.0",
|
|
38
|
+
"id": 1,
|
|
39
|
+
"method": "tools/call",
|
|
40
|
+
"params": {
|
|
41
|
+
"name": tool_name,
|
|
42
|
+
"arguments": arguments,
|
|
43
|
+
},
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
resp = httpx.post(
|
|
47
|
+
MCP_ENDPOINT,
|
|
48
|
+
json=payload,
|
|
49
|
+
headers={
|
|
50
|
+
"Content-Type": "application/json",
|
|
51
|
+
"Accept": "application/json, text/event-stream",
|
|
52
|
+
},
|
|
53
|
+
timeout=60,
|
|
54
|
+
)
|
|
55
|
+
resp.raise_for_status()
|
|
56
|
+
|
|
57
|
+
data = resp.json()
|
|
58
|
+
if "error" in data:
|
|
59
|
+
raise RuntimeError(data["error"].get("message", "MCP error"))
|
|
60
|
+
|
|
61
|
+
result = data.get("result", {})
|
|
62
|
+
# MCP tool results come as content array
|
|
63
|
+
content = result.get("content", [])
|
|
64
|
+
for item in content:
|
|
65
|
+
if item.get("type") == "text":
|
|
66
|
+
try:
|
|
67
|
+
return json.loads(item["text"])
|
|
68
|
+
except json.JSONDecodeError:
|
|
69
|
+
return {"text": item["text"]}
|
|
70
|
+
|
|
71
|
+
return result
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _call_rest_search(objective: str, queries: list[str], api_key: str) -> dict:
|
|
75
|
+
"""Call Parallel Search REST API directly."""
|
|
76
|
+
import httpx
|
|
77
|
+
|
|
78
|
+
resp = httpx.post(
|
|
79
|
+
f"{REST_ENDPOINT}/search",
|
|
80
|
+
json={"objective": objective, "search_queries": queries},
|
|
81
|
+
headers={"Content-Type": "application/json", "x-api-key": api_key},
|
|
82
|
+
timeout=60,
|
|
83
|
+
)
|
|
84
|
+
resp.raise_for_status()
|
|
85
|
+
return resp.json()
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _call_rest_extract(urls: list[str], objective: str | None, full: bool, api_key: str) -> dict:
|
|
89
|
+
"""Call Parallel Extract REST API directly."""
|
|
90
|
+
import httpx
|
|
91
|
+
|
|
92
|
+
body: dict = {"urls": urls}
|
|
93
|
+
if objective:
|
|
94
|
+
body["objective"] = objective
|
|
95
|
+
if full:
|
|
96
|
+
body["full_content"] = True
|
|
97
|
+
|
|
98
|
+
resp = httpx.post(
|
|
99
|
+
f"{REST_ENDPOINT}/extract",
|
|
100
|
+
json=body,
|
|
101
|
+
headers={"Content-Type": "application/json", "x-api-key": api_key},
|
|
102
|
+
timeout=60,
|
|
103
|
+
)
|
|
104
|
+
resp.raise_for_status()
|
|
105
|
+
return resp.json()
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
# ── Commands ─────────────────────────────────────────────────────
|
|
109
|
+
|
|
110
|
+
def cmd_search(args):
|
|
111
|
+
"""Web search via Parallel."""
|
|
112
|
+
objective = " ".join(args.objective)
|
|
113
|
+
if not objective:
|
|
114
|
+
print(" ✗ search objective required", file=sys.stderr)
|
|
115
|
+
return 1
|
|
116
|
+
|
|
117
|
+
queries = args.queries if args.queries else [objective]
|
|
118
|
+
t0 = time.time()
|
|
119
|
+
|
|
120
|
+
if not args.quiet:
|
|
121
|
+
print(f" ▸ searching: {objective}", file=sys.stderr)
|
|
122
|
+
|
|
123
|
+
# Try REST API with key first (better quality), fall back to MCP (free)
|
|
124
|
+
api_key = get_key_for_service("parallel", "PARALLEL_API_KEY")
|
|
125
|
+
|
|
126
|
+
try:
|
|
127
|
+
if api_key:
|
|
128
|
+
data = _call_rest_search(objective, queries, api_key)
|
|
129
|
+
else:
|
|
130
|
+
session = _session_id()
|
|
131
|
+
data = _call_mcp("web_search", {
|
|
132
|
+
"objective": objective,
|
|
133
|
+
"search_queries": queries,
|
|
134
|
+
"session_id": session,
|
|
135
|
+
})
|
|
136
|
+
except Exception as e:
|
|
137
|
+
if args.json:
|
|
138
|
+
print(json.dumps({"success": False, "error": str(e)}))
|
|
139
|
+
else:
|
|
140
|
+
print(f" ✗ search failed: {e}", file=sys.stderr)
|
|
141
|
+
return 1
|
|
142
|
+
|
|
143
|
+
elapsed = round(time.time() - t0, 1)
|
|
144
|
+
results = data.get("results", [])
|
|
145
|
+
|
|
146
|
+
if args.json:
|
|
147
|
+
print(json.dumps({
|
|
148
|
+
"success": True,
|
|
149
|
+
"elapsed_seconds": elapsed,
|
|
150
|
+
"results": results,
|
|
151
|
+
"count": len(results),
|
|
152
|
+
}))
|
|
153
|
+
return 0
|
|
154
|
+
|
|
155
|
+
if not results:
|
|
156
|
+
print(" No results found.", file=sys.stderr)
|
|
157
|
+
return 0
|
|
158
|
+
|
|
159
|
+
for r in results:
|
|
160
|
+
title = r.get("title", "")
|
|
161
|
+
url = r.get("url", "")
|
|
162
|
+
excerpts = r.get("excerpts", [])
|
|
163
|
+
|
|
164
|
+
print(f"\n {title}")
|
|
165
|
+
print(f" {url}")
|
|
166
|
+
for ex in excerpts[:2]:
|
|
167
|
+
# Truncate long excerpts
|
|
168
|
+
text = ex.strip().replace("\n", " ")[:200]
|
|
169
|
+
print(f" {text}")
|
|
170
|
+
|
|
171
|
+
if not args.quiet:
|
|
172
|
+
print(f"\n {len(results)} results · {elapsed}s", file=sys.stderr)
|
|
173
|
+
|
|
174
|
+
return 0
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def cmd_fetch(args):
|
|
178
|
+
"""Extract content from URLs via Parallel."""
|
|
179
|
+
urls = args.urls
|
|
180
|
+
if not urls:
|
|
181
|
+
print(" ✗ at least one URL required", file=sys.stderr)
|
|
182
|
+
return 1
|
|
183
|
+
|
|
184
|
+
objective = args.objective
|
|
185
|
+
full = getattr(args, "full", False)
|
|
186
|
+
t0 = time.time()
|
|
187
|
+
|
|
188
|
+
if not args.quiet:
|
|
189
|
+
print(f" ▸ fetching {len(urls)} URL(s)...", file=sys.stderr)
|
|
190
|
+
|
|
191
|
+
api_key = get_key_for_service("parallel", "PARALLEL_API_KEY")
|
|
192
|
+
|
|
193
|
+
try:
|
|
194
|
+
if api_key:
|
|
195
|
+
data = _call_rest_extract(urls, objective, full, api_key)
|
|
196
|
+
else:
|
|
197
|
+
session = _session_id()
|
|
198
|
+
mcp_args: dict = {"urls": urls, "session_id": session}
|
|
199
|
+
if objective:
|
|
200
|
+
mcp_args["objective"] = objective
|
|
201
|
+
if full:
|
|
202
|
+
mcp_args["full_content"] = True
|
|
203
|
+
data = _call_mcp("web_fetch", mcp_args)
|
|
204
|
+
except Exception as e:
|
|
205
|
+
if args.json:
|
|
206
|
+
print(json.dumps({"success": False, "error": str(e)}))
|
|
207
|
+
else:
|
|
208
|
+
print(f" ✗ fetch failed: {e}", file=sys.stderr)
|
|
209
|
+
return 1
|
|
210
|
+
|
|
211
|
+
elapsed = round(time.time() - t0, 1)
|
|
212
|
+
results = data.get("results", [])
|
|
213
|
+
|
|
214
|
+
if args.json:
|
|
215
|
+
print(json.dumps({
|
|
216
|
+
"success": True,
|
|
217
|
+
"elapsed_seconds": elapsed,
|
|
218
|
+
"results": results,
|
|
219
|
+
"count": len(results),
|
|
220
|
+
}))
|
|
221
|
+
return 0
|
|
222
|
+
|
|
223
|
+
for r in results:
|
|
224
|
+
title = r.get("title", "")
|
|
225
|
+
url = r.get("url", "")
|
|
226
|
+
full_content = r.get("full_content")
|
|
227
|
+
excerpts = r.get("excerpts", [])
|
|
228
|
+
|
|
229
|
+
if title:
|
|
230
|
+
print(f"\n # {title}")
|
|
231
|
+
print(f" {url}\n")
|
|
232
|
+
|
|
233
|
+
if full_content:
|
|
234
|
+
print(full_content)
|
|
235
|
+
else:
|
|
236
|
+
for ex in excerpts:
|
|
237
|
+
print(ex)
|
|
238
|
+
print()
|
|
239
|
+
|
|
240
|
+
if not args.quiet:
|
|
241
|
+
print(f" {len(results)} page(s) · {elapsed}s", file=sys.stderr)
|
|
242
|
+
|
|
243
|
+
return 0
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
# ── Parser ───────────────────────────────────────────────────────
|
|
247
|
+
|
|
248
|
+
def register(subparsers):
|
|
249
|
+
"""Register search and fetch subcommands."""
|
|
250
|
+
F = argparse.RawDescriptionHelpFormatter
|
|
251
|
+
|
|
252
|
+
# search
|
|
253
|
+
search_p = subparsers.add_parser(
|
|
254
|
+
"search", aliases=["s"], help="Web search (Parallel.ai)",
|
|
255
|
+
formatter_class=F,
|
|
256
|
+
epilog="""examples:
|
|
257
|
+
escli search "latest cloudflare workers features"
|
|
258
|
+
escli search "react server components" --queries "RSC tutorial" "RSC vs SSR"
|
|
259
|
+
escli --json search "python web frameworks 2026"
|
|
260
|
+
|
|
261
|
+
Uses the free Parallel Search MCP. No API key needed.
|
|
262
|
+
Add a Parallel API key for higher rate limits.
|
|
263
|
+
""")
|
|
264
|
+
search_p.add_argument("objective", nargs="+", help="What you're searching for")
|
|
265
|
+
search_p.add_argument("--queries", nargs="+", default=None, metavar="Q",
|
|
266
|
+
help="Specific search queries (default: uses objective)")
|
|
267
|
+
search_p.set_defaults(func=cmd_search)
|
|
268
|
+
|
|
269
|
+
# fetch
|
|
270
|
+
fetch_p = subparsers.add_parser(
|
|
271
|
+
"fetch", aliases=["f"], help="Extract URL content (Parallel.ai)",
|
|
272
|
+
formatter_class=F,
|
|
273
|
+
epilog="""examples:
|
|
274
|
+
escli fetch https://docs.parallel.ai/search/search-quickstart
|
|
275
|
+
escli fetch https://example.com --objective "pricing information"
|
|
276
|
+
escli fetch https://example.com --full
|
|
277
|
+
escli --json fetch https://example.com https://other.com
|
|
278
|
+
|
|
279
|
+
Uses the free Parallel Extract MCP. No API key needed.
|
|
280
|
+
""")
|
|
281
|
+
fetch_p.add_argument("urls", nargs="+", help="URLs to extract content from")
|
|
282
|
+
fetch_p.add_argument("--objective", default=None, help="Focus extraction on this topic")
|
|
283
|
+
fetch_p.add_argument("--full", action="store_true", help="Return full page content instead of excerpts")
|
|
284
|
+
fetch_p.set_defaults(func=cmd_fetch)
|
|
285
|
+
|
|
286
|
+
return search_p
|
escli/commands/social.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
"""
|
|
2
|
+
escli social — social media search via Tavily API.
|
|
3
|
+
|
|
4
|
+
Searches across Reddit, X/Twitter, TikTok, LinkedIn, Instagram,
|
|
5
|
+
Facebook, or all combined for real-world opinions and discussions.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
escli social "query" Search all platforms
|
|
9
|
+
escli social "query" --platform reddit Reddit only
|
|
10
|
+
escli social "query" --platform x,linkedin Multiple platforms
|
|
11
|
+
escli social "query" --time week --raw Last week, full content
|
|
12
|
+
escli social "query" --answer Include AI summary
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import argparse
|
|
16
|
+
import json
|
|
17
|
+
import sys
|
|
18
|
+
import time
|
|
19
|
+
|
|
20
|
+
from ..services.credentials import get_key_for_service
|
|
21
|
+
|
|
22
|
+
TAVILY_API = "https://api.tavily.com"
|
|
23
|
+
|
|
24
|
+
PLATFORM_DOMAINS = {
|
|
25
|
+
"reddit": ["reddit.com"],
|
|
26
|
+
"x": ["x.com", "twitter.com"],
|
|
27
|
+
"twitter": ["x.com", "twitter.com"],
|
|
28
|
+
"tiktok": ["tiktok.com"],
|
|
29
|
+
"linkedin": ["linkedin.com"],
|
|
30
|
+
"instagram": ["instagram.com"],
|
|
31
|
+
"facebook": ["facebook.com"],
|
|
32
|
+
"youtube": ["youtube.com"],
|
|
33
|
+
"combined": ["reddit.com", "x.com", "twitter.com", "tiktok.com",
|
|
34
|
+
"linkedin.com", "instagram.com", "facebook.com"],
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
VALID_PLATFORMS = list(PLATFORM_DOMAINS.keys())
|
|
38
|
+
VALID_TIME_RANGES = ["day", "week", "month", "year"]
|
|
39
|
+
VALID_DEPTHS = ["basic", "advanced", "fast", "ultra-fast"]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _get_api_key() -> str:
|
|
43
|
+
key = get_key_for_service("tavily", "TAVILY_API_KEY")
|
|
44
|
+
if not key:
|
|
45
|
+
print(" ✗ no Tavily API key. Set TAVILY_API_KEY or add one via the dashboard.", file=sys.stderr)
|
|
46
|
+
sys.exit(1)
|
|
47
|
+
return key
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _resolve_domains(platform_str: str | None) -> list[str]:
|
|
51
|
+
"""Resolve platform string to domain list."""
|
|
52
|
+
if not platform_str:
|
|
53
|
+
return PLATFORM_DOMAINS["combined"]
|
|
54
|
+
|
|
55
|
+
domains = []
|
|
56
|
+
for p in platform_str.lower().split(","):
|
|
57
|
+
p = p.strip()
|
|
58
|
+
if p in PLATFORM_DOMAINS:
|
|
59
|
+
domains.extend(PLATFORM_DOMAINS[p])
|
|
60
|
+
else:
|
|
61
|
+
print(f" ✗ unknown platform: {p}", file=sys.stderr)
|
|
62
|
+
print(f" valid: {', '.join(VALID_PLATFORMS)}", file=sys.stderr)
|
|
63
|
+
sys.exit(2)
|
|
64
|
+
|
|
65
|
+
return list(dict.fromkeys(domains)) # dedupe preserving order
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def cmd_social(args):
|
|
69
|
+
"""Search social media platforms."""
|
|
70
|
+
import httpx
|
|
71
|
+
|
|
72
|
+
api_key = _get_api_key()
|
|
73
|
+
query = " ".join(args.query)
|
|
74
|
+
if not query:
|
|
75
|
+
print(" ✗ search query required", file=sys.stderr)
|
|
76
|
+
return 1
|
|
77
|
+
|
|
78
|
+
platform = getattr(args, "platform", None)
|
|
79
|
+
domains = _resolve_domains(platform)
|
|
80
|
+
t0 = time.time()
|
|
81
|
+
|
|
82
|
+
if not args.quiet:
|
|
83
|
+
platform_label = platform or "all platforms"
|
|
84
|
+
print(f" ▸ searching {platform_label}: {query}", file=sys.stderr)
|
|
85
|
+
|
|
86
|
+
# Build request
|
|
87
|
+
body: dict = {
|
|
88
|
+
"query": query,
|
|
89
|
+
"include_domains": domains,
|
|
90
|
+
"max_results": getattr(args, "max_results", 10) or 10,
|
|
91
|
+
"search_depth": getattr(args, "depth", "advanced") or "advanced",
|
|
92
|
+
"include_answer": getattr(args, "answer", False),
|
|
93
|
+
"include_raw_content": getattr(args, "raw", False),
|
|
94
|
+
"include_images": getattr(args, "images", False),
|
|
95
|
+
"include_usage": True,
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
time_range = getattr(args, "time", None)
|
|
99
|
+
if time_range:
|
|
100
|
+
body["time_range"] = time_range
|
|
101
|
+
|
|
102
|
+
country = getattr(args, "country", None)
|
|
103
|
+
if country:
|
|
104
|
+
body["country"] = country
|
|
105
|
+
|
|
106
|
+
try:
|
|
107
|
+
resp = httpx.post(
|
|
108
|
+
f"{TAVILY_API}/search",
|
|
109
|
+
json=body,
|
|
110
|
+
headers={
|
|
111
|
+
"Authorization": f"Bearer {api_key}",
|
|
112
|
+
"Content-Type": "application/json",
|
|
113
|
+
},
|
|
114
|
+
timeout=30,
|
|
115
|
+
)
|
|
116
|
+
resp.raise_for_status()
|
|
117
|
+
data = resp.json()
|
|
118
|
+
except httpx.HTTPStatusError as e:
|
|
119
|
+
error_msg = str(e)
|
|
120
|
+
try:
|
|
121
|
+
error_msg = e.response.json().get("detail", {}).get("error", str(e))
|
|
122
|
+
except Exception:
|
|
123
|
+
pass
|
|
124
|
+
if args.json:
|
|
125
|
+
print(json.dumps({"ok": False, "data": None, "error": {"code": f"tavily.{e.response.status_code}", "message": error_msg}, "meta": {}}))
|
|
126
|
+
else:
|
|
127
|
+
print(f" ✗ {error_msg}", file=sys.stderr)
|
|
128
|
+
return 1
|
|
129
|
+
except Exception as e:
|
|
130
|
+
if args.json:
|
|
131
|
+
print(json.dumps({"ok": False, "data": None, "error": {"code": "tavily.network", "message": str(e), "retryable": True}, "meta": {}}))
|
|
132
|
+
else:
|
|
133
|
+
print(f" ✗ {e}", file=sys.stderr)
|
|
134
|
+
return 1
|
|
135
|
+
|
|
136
|
+
elapsed = round(time.time() - t0, 1)
|
|
137
|
+
results = data.get("results", [])
|
|
138
|
+
answer = data.get("answer")
|
|
139
|
+
|
|
140
|
+
if args.json:
|
|
141
|
+
print(json.dumps({
|
|
142
|
+
"ok": True,
|
|
143
|
+
"data": {
|
|
144
|
+
"answer": answer,
|
|
145
|
+
"results": [{
|
|
146
|
+
"title": r.get("title", ""),
|
|
147
|
+
"url": r.get("url", ""),
|
|
148
|
+
"content": r.get("content", ""),
|
|
149
|
+
"score": r.get("score"),
|
|
150
|
+
"raw_content": r.get("raw_content") if getattr(args, "raw", False) else None,
|
|
151
|
+
} for r in results],
|
|
152
|
+
"platform": platform or "combined",
|
|
153
|
+
"query": query,
|
|
154
|
+
},
|
|
155
|
+
"error": None,
|
|
156
|
+
"meta": {
|
|
157
|
+
"elapsed_seconds": elapsed,
|
|
158
|
+
"result_count": len(results),
|
|
159
|
+
"credits": data.get("usage", {}).get("credits"),
|
|
160
|
+
},
|
|
161
|
+
}))
|
|
162
|
+
return 0
|
|
163
|
+
|
|
164
|
+
# Human output
|
|
165
|
+
if answer:
|
|
166
|
+
print(f"\n {answer}\n")
|
|
167
|
+
|
|
168
|
+
for r in results:
|
|
169
|
+
title = r.get("title", "")
|
|
170
|
+
url = r.get("url", "")
|
|
171
|
+
content = r.get("content", "")
|
|
172
|
+
score = r.get("score", 0)
|
|
173
|
+
|
|
174
|
+
# Detect platform from URL
|
|
175
|
+
platform_tag = ""
|
|
176
|
+
for p, doms in PLATFORM_DOMAINS.items():
|
|
177
|
+
if p in ("combined", "twitter"):
|
|
178
|
+
continue
|
|
179
|
+
if any(d in url for d in doms):
|
|
180
|
+
platform_tag = p
|
|
181
|
+
break
|
|
182
|
+
|
|
183
|
+
print(f" {f'[{platform_tag}] ' if platform_tag else ''}{title}")
|
|
184
|
+
print(f" {url}")
|
|
185
|
+
if content:
|
|
186
|
+
text = content.strip().replace("\n", " ")[:200]
|
|
187
|
+
print(f" {text}")
|
|
188
|
+
print()
|
|
189
|
+
|
|
190
|
+
if not args.quiet:
|
|
191
|
+
credits = data.get("usage", {}).get("credits", "?")
|
|
192
|
+
print(f" {len(results)} results · {elapsed}s · {credits} credit(s)", file=sys.stderr)
|
|
193
|
+
|
|
194
|
+
return 0
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
# ── Parser ───────────────────────────────────────────────────────
|
|
198
|
+
|
|
199
|
+
def register(subparsers):
|
|
200
|
+
"""Register the social subcommand."""
|
|
201
|
+
F = argparse.RawDescriptionHelpFormatter
|
|
202
|
+
|
|
203
|
+
p = subparsers.add_parser(
|
|
204
|
+
"social", help="Social media search (Tavily)",
|
|
205
|
+
formatter_class=F,
|
|
206
|
+
epilog="""platforms:
|
|
207
|
+
reddit, x, tiktok, linkedin, instagram, facebook, youtube, combined (default)
|
|
208
|
+
|
|
209
|
+
examples:
|
|
210
|
+
escli social "what are people saying about Claude Code"
|
|
211
|
+
escli social "AI trends 2026" --platform reddit
|
|
212
|
+
escli social "product reviews" --platform tiktok,instagram --time week
|
|
213
|
+
escli social "company sentiment" --platform linkedin,x --answer
|
|
214
|
+
escli social "breaking news" --platform x --time day --raw --max-results 15
|
|
215
|
+
escli --json --quiet social "brand perception" --platform reddit,x
|
|
216
|
+
|
|
217
|
+
time ranges:
|
|
218
|
+
day Last 24 hours (breaking news, reactions)
|
|
219
|
+
week Last 7 days
|
|
220
|
+
month Last 30 days (broader trends)
|
|
221
|
+
year Last 12 months
|
|
222
|
+
""")
|
|
223
|
+
|
|
224
|
+
p.add_argument("query", nargs="+", help="What to search for on social media")
|
|
225
|
+
p.add_argument("--platform", "-p", default=None, metavar="P",
|
|
226
|
+
help=f"Platform(s) to search, comma-separated ({', '.join(VALID_PLATFORMS)})")
|
|
227
|
+
p.add_argument("--time", "-t", default=None, choices=VALID_TIME_RANGES, metavar="T",
|
|
228
|
+
help="Time range: day, week, month, year")
|
|
229
|
+
p.add_argument("--max-results", "-n", type=int, default=10, metavar="N",
|
|
230
|
+
help="Max results (1-20, default: 10)")
|
|
231
|
+
p.add_argument("--depth", default="advanced", choices=VALID_DEPTHS,
|
|
232
|
+
help="Search depth (default: advanced)")
|
|
233
|
+
p.add_argument("--answer", "-a", action="store_true",
|
|
234
|
+
help="Include AI-synthesized answer")
|
|
235
|
+
p.add_argument("--raw", action="store_true",
|
|
236
|
+
help="Include full post content (raw markdown)")
|
|
237
|
+
p.add_argument("--images", action="store_true",
|
|
238
|
+
help="Include images from results")
|
|
239
|
+
p.add_argument("--country", default=None, metavar="CC",
|
|
240
|
+
help="Country for geo-targeted results (e.g. 'united states')")
|
|
241
|
+
p.set_defaults(func=cmd_social)
|
|
242
|
+
|
|
243
|
+
return p
|