pomera-ai-commander 1.2.8 → 1.2.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +113 -89
- package/core/database_schema.py +24 -1
- package/core/database_schema_manager.py +4 -2
- package/core/database_settings_manager.py +25 -2
- package/core/dialog_manager.py +4 -4
- package/core/efficient_line_numbers.py +5 -4
- package/core/load_presets_dialog.py +460 -0
- package/core/mcp/tool_registry.py +327 -0
- package/core/settings_defaults_registry.py +159 -15
- package/mcp.json +1 -1
- package/package.json +2 -1
- package/pomera.py +755 -22
- package/tools/case_tool.py +4 -4
- package/tools/curl_settings.py +12 -1
- package/tools/curl_tool.py +176 -11
- package/tools/tool_loader.py +18 -0
- package/tools/url_content_reader.py +402 -0
- package/tools/web_search.py +522 -0
|
@@ -0,0 +1,522 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Web Search Tool
|
|
4
|
+
===============
|
|
5
|
+
Search the web using multiple search engine APIs.
|
|
6
|
+
|
|
7
|
+
API keys are loaded from encrypted database settings (Pomera settings.db).
|
|
8
|
+
Configure keys in Pomera UI: Select "Web Search" tool and enter API keys.
|
|
9
|
+
|
|
10
|
+
## Usage
|
|
11
|
+
|
|
12
|
+
# Basic search (DuckDuckGo - no API key required)
|
|
13
|
+
python web_search.py "your search query"
|
|
14
|
+
|
|
15
|
+
# Use specific engine
|
|
16
|
+
python web_search.py "query" --engine tavily
|
|
17
|
+
|
|
18
|
+
# Save to JSON file
|
|
19
|
+
python web_search.py "query" --output searches/
|
|
20
|
+
python web_search.py "query" -o searches/ --task seo-research
|
|
21
|
+
|
|
22
|
+
## Engine Selection Priority
|
|
23
|
+
|
|
24
|
+
Default order:
|
|
25
|
+
1. duckduckgo - Free, no API key required (default)
|
|
26
|
+
2. tavily - AI-optimized snippets, 1000 free/month
|
|
27
|
+
3. google - Complex queries, 100 free/day
|
|
28
|
+
4. brave - General search, 2000 free/month
|
|
29
|
+
5. serpapi - Multi-engine, 100 free TOTAL
|
|
30
|
+
6. serper - Fast Google SERP, 2500 free TOTAL
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
import argparse
|
|
34
|
+
import json
|
|
35
|
+
import os
|
|
36
|
+
import sys
|
|
37
|
+
import re
|
|
38
|
+
from pathlib import Path
|
|
39
|
+
from typing import Dict, List, Optional
|
|
40
|
+
import urllib.request
|
|
41
|
+
import urllib.parse
|
|
42
|
+
import urllib.error
|
|
43
|
+
from datetime import datetime, timedelta
|
|
44
|
+
|
|
45
|
+
# Optional async imports for DuckDuckGo
|
|
46
|
+
try:
|
|
47
|
+
import httpx
|
|
48
|
+
from bs4 import BeautifulSoup
|
|
49
|
+
DUCKDUCKGO_AVAILABLE = True
|
|
50
|
+
except ImportError:
|
|
51
|
+
DUCKDUCKGO_AVAILABLE = False
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def get_encrypted_api_key(engine_key: str) -> str:
|
|
55
|
+
"""
|
|
56
|
+
Load encrypted API key for a search engine from database settings.
|
|
57
|
+
|
|
58
|
+
Uses the same database path as the Pomera UI to ensure keys are loaded
|
|
59
|
+
from the correct location.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
engine_key: Engine name (e.g., 'tavily', 'google', 'brave')
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
Decrypted API key or empty string if not configured
|
|
66
|
+
"""
|
|
67
|
+
try:
|
|
68
|
+
from tools.ai_tools import decrypt_api_key
|
|
69
|
+
from core.database_settings_manager import DatabaseSettingsManager
|
|
70
|
+
|
|
71
|
+
# Get the correct database path (same as UI uses)
|
|
72
|
+
try:
|
|
73
|
+
from core.data_directory import get_database_path
|
|
74
|
+
db_path = get_database_path("settings.db")
|
|
75
|
+
except ImportError:
|
|
76
|
+
db_path = "settings.db"
|
|
77
|
+
|
|
78
|
+
settings_manager = DatabaseSettingsManager(db_path=db_path)
|
|
79
|
+
web_search_settings = settings_manager.get_tool_settings("Web Search")
|
|
80
|
+
|
|
81
|
+
# web_search_settings is a dict with keys like 'tavily_api_key', 'google_cse_id', etc.
|
|
82
|
+
encrypted = web_search_settings.get(f"{engine_key}_api_key", "")
|
|
83
|
+
if encrypted:
|
|
84
|
+
return decrypt_api_key(encrypted)
|
|
85
|
+
except Exception as e:
|
|
86
|
+
print(f"[DEBUG] Failed to load API key for {engine_key}: {e}")
|
|
87
|
+
return ""
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def get_web_search_setting(engine_key: str, setting: str, default: str = "") -> str:
|
|
91
|
+
"""Get a web search setting from database.
|
|
92
|
+
|
|
93
|
+
Uses the same database path as the Pomera UI.
|
|
94
|
+
"""
|
|
95
|
+
try:
|
|
96
|
+
from core.database_settings_manager import DatabaseSettingsManager
|
|
97
|
+
|
|
98
|
+
# Get the correct database path (same as UI uses)
|
|
99
|
+
try:
|
|
100
|
+
from core.data_directory import get_database_path
|
|
101
|
+
db_path = get_database_path("settings.db")
|
|
102
|
+
except ImportError:
|
|
103
|
+
db_path = "settings.db"
|
|
104
|
+
|
|
105
|
+
settings_manager = DatabaseSettingsManager(db_path=db_path)
|
|
106
|
+
web_search_settings = settings_manager.get_tool_settings("Web Search")
|
|
107
|
+
|
|
108
|
+
return web_search_settings.get(f"{engine_key}_{setting}", default)
|
|
109
|
+
except Exception:
|
|
110
|
+
return default
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def search_google(query: str, count: int = 5) -> List[Dict]:
|
|
114
|
+
"""Search using Google Custom Search API."""
|
|
115
|
+
api_key = get_encrypted_api_key("google")
|
|
116
|
+
cse_id = get_web_search_setting("google", "cse_id", "")
|
|
117
|
+
|
|
118
|
+
if not api_key:
|
|
119
|
+
print("[ERROR] Google API key not configured. Add in Pomera Web Search settings.")
|
|
120
|
+
return []
|
|
121
|
+
|
|
122
|
+
if not cse_id:
|
|
123
|
+
print("[ERROR] Google CSE ID not configured. Add in Pomera Web Search settings.")
|
|
124
|
+
return []
|
|
125
|
+
|
|
126
|
+
url = f"https://www.googleapis.com/customsearch/v1?key={api_key}&cx={cse_id}&q={urllib.parse.quote(query)}&num={min(count, 10)}"
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
req = urllib.request.Request(url)
|
|
130
|
+
with urllib.request.urlopen(req, timeout=30) as response:
|
|
131
|
+
data = json.loads(response.read().decode())
|
|
132
|
+
|
|
133
|
+
if "error" in data:
|
|
134
|
+
print(f"[ERROR] Google API: {data['error']['message']}")
|
|
135
|
+
return []
|
|
136
|
+
|
|
137
|
+
results = []
|
|
138
|
+
for item in data.get("items", []):
|
|
139
|
+
results.append({
|
|
140
|
+
"title": item.get("title", ""),
|
|
141
|
+
"snippet": item.get("snippet", ""),
|
|
142
|
+
"url": item.get("link", ""),
|
|
143
|
+
"source": "google"
|
|
144
|
+
})
|
|
145
|
+
|
|
146
|
+
return results
|
|
147
|
+
|
|
148
|
+
except urllib.error.HTTPError as e:
|
|
149
|
+
print(f"[ERROR] HTTP {e.code}: {e.reason}")
|
|
150
|
+
return []
|
|
151
|
+
except Exception as e:
|
|
152
|
+
print(f"[ERROR] {e}")
|
|
153
|
+
return []
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def search_brave(query: str, count: int = 5) -> List[Dict]:
|
|
157
|
+
"""Search using Brave Search API."""
|
|
158
|
+
api_key = get_encrypted_api_key("brave")
|
|
159
|
+
|
|
160
|
+
if not api_key:
|
|
161
|
+
print("[ERROR] Brave API key not configured. Add in Pomera Web Search settings.")
|
|
162
|
+
return []
|
|
163
|
+
|
|
164
|
+
url = f"https://api.search.brave.com/res/v1/web/search?q={urllib.parse.quote(query)}&count={min(count, 20)}"
|
|
165
|
+
|
|
166
|
+
try:
|
|
167
|
+
req = urllib.request.Request(url)
|
|
168
|
+
req.add_header("Accept", "application/json")
|
|
169
|
+
req.add_header("X-Subscription-Token", api_key)
|
|
170
|
+
|
|
171
|
+
with urllib.request.urlopen(req, timeout=30) as response:
|
|
172
|
+
data = json.loads(response.read().decode())
|
|
173
|
+
|
|
174
|
+
results = []
|
|
175
|
+
for item in data.get("web", {}).get("results", []):
|
|
176
|
+
results.append({
|
|
177
|
+
"title": item.get("title", ""),
|
|
178
|
+
"snippet": item.get("description", ""),
|
|
179
|
+
"url": item.get("url", ""),
|
|
180
|
+
"source": "brave"
|
|
181
|
+
})
|
|
182
|
+
|
|
183
|
+
return results
|
|
184
|
+
|
|
185
|
+
except urllib.error.HTTPError as e:
|
|
186
|
+
print(f"[ERROR] HTTP {e.code}: {e.reason}")
|
|
187
|
+
return []
|
|
188
|
+
except Exception as e:
|
|
189
|
+
print(f"[ERROR] {e}")
|
|
190
|
+
return []
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def search_duckduckgo_sync(query: str, count: int = 10) -> List[Dict]:
|
|
194
|
+
"""
|
|
195
|
+
Search DuckDuckGo using the ddgs package.
|
|
196
|
+
No API key required - free and reliable.
|
|
197
|
+
"""
|
|
198
|
+
try:
|
|
199
|
+
from ddgs import DDGS
|
|
200
|
+
except ImportError:
|
|
201
|
+
print("[ERROR] DuckDuckGo requires: pip install ddgs")
|
|
202
|
+
return []
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
with DDGS() as ddgs:
|
|
206
|
+
results_gen = ddgs.text(query, max_results=count)
|
|
207
|
+
results = []
|
|
208
|
+
for r in results_gen:
|
|
209
|
+
results.append({
|
|
210
|
+
"title": r.get("title", ""),
|
|
211
|
+
"snippet": r.get("body", ""),
|
|
212
|
+
"url": r.get("href", ""),
|
|
213
|
+
"source": "duckduckgo",
|
|
214
|
+
"position": len(results) + 1
|
|
215
|
+
})
|
|
216
|
+
return results
|
|
217
|
+
except Exception as e:
|
|
218
|
+
print(f"[ERROR] DuckDuckGo search failed: {e}")
|
|
219
|
+
return []
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def search_serper(query: str, count: int = 5) -> List[Dict]:
|
|
223
|
+
"""
|
|
224
|
+
Search using Serper.dev Google SERP API.
|
|
225
|
+
Fast, reliable Google results. 2500 free queries (no CC required).
|
|
226
|
+
"""
|
|
227
|
+
api_key = get_encrypted_api_key("serper")
|
|
228
|
+
|
|
229
|
+
if not api_key:
|
|
230
|
+
print("[ERROR] Serper API key not configured. Add in Pomera Web Search settings.")
|
|
231
|
+
return []
|
|
232
|
+
|
|
233
|
+
try:
|
|
234
|
+
data = json.dumps({"q": query, "num": min(count, 100)}).encode('utf-8')
|
|
235
|
+
req = urllib.request.Request(
|
|
236
|
+
"https://google.serper.dev/search",
|
|
237
|
+
data=data,
|
|
238
|
+
headers={
|
|
239
|
+
"X-API-KEY": api_key,
|
|
240
|
+
"Content-Type": "application/json"
|
|
241
|
+
}
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
with urllib.request.urlopen(req, timeout=30) as response:
|
|
245
|
+
result = json.loads(response.read().decode())
|
|
246
|
+
|
|
247
|
+
results = []
|
|
248
|
+
for item in result.get("organic", [])[:count]:
|
|
249
|
+
results.append({
|
|
250
|
+
"title": item.get("title", ""),
|
|
251
|
+
"snippet": item.get("snippet", ""),
|
|
252
|
+
"url": item.get("link", ""),
|
|
253
|
+
"source": "serper",
|
|
254
|
+
"position": item.get("position", len(results) + 1)
|
|
255
|
+
})
|
|
256
|
+
|
|
257
|
+
return results
|
|
258
|
+
|
|
259
|
+
except urllib.error.HTTPError as e:
|
|
260
|
+
print(f"[ERROR] Serper HTTP {e.code}: {e.reason}")
|
|
261
|
+
return []
|
|
262
|
+
except Exception as e:
|
|
263
|
+
print(f"[ERROR] Serper: {e}")
|
|
264
|
+
return []
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def search_tavily(query: str, count: int = 5) -> List[Dict]:
|
|
268
|
+
"""
|
|
269
|
+
Search using Tavily AI-optimized search API.
|
|
270
|
+
Designed for AI agents. 1000 free calls/month.
|
|
271
|
+
"""
|
|
272
|
+
api_key = get_encrypted_api_key("tavily")
|
|
273
|
+
|
|
274
|
+
if not api_key:
|
|
275
|
+
print("[ERROR] Tavily API key not configured. Add in Pomera Web Search settings.")
|
|
276
|
+
return []
|
|
277
|
+
|
|
278
|
+
try:
|
|
279
|
+
data = json.dumps({
|
|
280
|
+
"api_key": api_key,
|
|
281
|
+
"query": query,
|
|
282
|
+
"max_results": min(count, 20),
|
|
283
|
+
"search_depth": "basic"
|
|
284
|
+
}).encode('utf-8')
|
|
285
|
+
|
|
286
|
+
req = urllib.request.Request(
|
|
287
|
+
"https://api.tavily.com/search",
|
|
288
|
+
data=data,
|
|
289
|
+
headers={"Content-Type": "application/json"}
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
with urllib.request.urlopen(req, timeout=30) as response:
|
|
293
|
+
result = json.loads(response.read().decode())
|
|
294
|
+
|
|
295
|
+
results = []
|
|
296
|
+
for i, item in enumerate(result.get("results", [])[:count], 1):
|
|
297
|
+
results.append({
|
|
298
|
+
"title": item.get("title", ""),
|
|
299
|
+
"snippet": item.get("content", ""),
|
|
300
|
+
"url": item.get("url", ""),
|
|
301
|
+
"source": "tavily",
|
|
302
|
+
"position": i,
|
|
303
|
+
"score": item.get("score", None)
|
|
304
|
+
})
|
|
305
|
+
|
|
306
|
+
return results
|
|
307
|
+
|
|
308
|
+
except urllib.error.HTTPError as e:
|
|
309
|
+
print(f"[ERROR] Tavily HTTP {e.code}: {e.reason}")
|
|
310
|
+
return []
|
|
311
|
+
except Exception as e:
|
|
312
|
+
print(f"[ERROR] Tavily: {e}")
|
|
313
|
+
return []
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
def search_serpapi(query: str, count: int = 5) -> List[Dict]:
|
|
317
|
+
"""
|
|
318
|
+
Search using SerpApi (supports Google, Bing, Yahoo, etc).
|
|
319
|
+
100 free searches total (one-time credit).
|
|
320
|
+
"""
|
|
321
|
+
api_key = get_encrypted_api_key("serpapi")
|
|
322
|
+
|
|
323
|
+
if not api_key:
|
|
324
|
+
print("[ERROR] SerpApi key not configured. Add in Pomera Web Search settings.")
|
|
325
|
+
return []
|
|
326
|
+
|
|
327
|
+
params = urllib.parse.urlencode({
|
|
328
|
+
"q": query,
|
|
329
|
+
"api_key": api_key,
|
|
330
|
+
"num": min(count, 100),
|
|
331
|
+
"engine": "google"
|
|
332
|
+
})
|
|
333
|
+
|
|
334
|
+
try:
|
|
335
|
+
url = f"https://serpapi.com/search.json?{params}"
|
|
336
|
+
req = urllib.request.Request(url)
|
|
337
|
+
|
|
338
|
+
with urllib.request.urlopen(req, timeout=30) as response:
|
|
339
|
+
result = json.loads(response.read().decode())
|
|
340
|
+
|
|
341
|
+
results = []
|
|
342
|
+
for item in result.get("organic_results", [])[:count]:
|
|
343
|
+
results.append({
|
|
344
|
+
"title": item.get("title", ""),
|
|
345
|
+
"snippet": item.get("snippet", ""),
|
|
346
|
+
"url": item.get("link", ""),
|
|
347
|
+
"source": "serpapi",
|
|
348
|
+
"position": item.get("position", len(results) + 1)
|
|
349
|
+
})
|
|
350
|
+
|
|
351
|
+
return results
|
|
352
|
+
|
|
353
|
+
except urllib.error.HTTPError as e:
|
|
354
|
+
print(f"[ERROR] SerpApi HTTP {e.code}: {e.reason}")
|
|
355
|
+
return []
|
|
356
|
+
except Exception as e:
|
|
357
|
+
print(f"[ERROR] SerpApi: {e}")
|
|
358
|
+
return []
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
def search(query: str, engine: str = "duckduckgo", count: int = 5) -> List[Dict]:
|
|
362
|
+
"""
|
|
363
|
+
Search the web using the specified engine.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
query: Search query string
|
|
367
|
+
engine: Engine name (default: duckduckgo)
|
|
368
|
+
count: Number of results (default: 5)
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
List of result dicts with title, snippet, url, source
|
|
372
|
+
"""
|
|
373
|
+
engine = engine.lower()
|
|
374
|
+
if engine == "google":
|
|
375
|
+
return search_google(query, count)
|
|
376
|
+
elif engine in ("duckduckgo", "ddg"):
|
|
377
|
+
return search_duckduckgo_sync(query, count)
|
|
378
|
+
elif engine == "serper":
|
|
379
|
+
return search_serper(query, count)
|
|
380
|
+
elif engine == "tavily":
|
|
381
|
+
return search_tavily(query, count)
|
|
382
|
+
elif engine == "serpapi":
|
|
383
|
+
return search_serpapi(query, count)
|
|
384
|
+
elif engine == "brave":
|
|
385
|
+
return search_brave(query, count)
|
|
386
|
+
else:
|
|
387
|
+
print(f"[ERROR] Unknown engine: {engine}")
|
|
388
|
+
return []
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
# =============================================================================
|
|
392
|
+
# PERSISTENT STORAGE
|
|
393
|
+
# =============================================================================
|
|
394
|
+
|
|
395
|
+
def slugify(text: str, max_length: int = 40) -> str:
|
|
396
|
+
"""Convert text to URL-safe slug for filenames."""
|
|
397
|
+
slug = text.lower().strip()
|
|
398
|
+
slug = re.sub(r'[\s_]+', '-', slug)
|
|
399
|
+
slug = re.sub(r'[^a-z0-9-]', '', slug)
|
|
400
|
+
slug = re.sub(r'-+', '-', slug).strip('-')
|
|
401
|
+
if len(slug) > max_length:
|
|
402
|
+
slug = slug[:max_length].rsplit('-', 1)[0]
|
|
403
|
+
return slug or 'search'
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
def save_results(
|
|
407
|
+
results: List[Dict],
|
|
408
|
+
query: str,
|
|
409
|
+
engine: str,
|
|
410
|
+
output_dir: str,
|
|
411
|
+
task: Optional[str] = None,
|
|
412
|
+
count: int = 5
|
|
413
|
+
) -> Path:
|
|
414
|
+
"""Save search results to organized JSON file."""
|
|
415
|
+
now = datetime.now()
|
|
416
|
+
date_dir = now.strftime('%Y-%m-%d')
|
|
417
|
+
time_prefix = now.strftime('%H-%M-%S')
|
|
418
|
+
query_slug = slugify(query)
|
|
419
|
+
|
|
420
|
+
if task:
|
|
421
|
+
filename = f"{time_prefix}-{engine}-task-{slugify(task, 20)}-{query_slug}.json"
|
|
422
|
+
else:
|
|
423
|
+
filename = f"{time_prefix}-{engine}-{query_slug}.json"
|
|
424
|
+
|
|
425
|
+
save_dir = Path(output_dir) / date_dir
|
|
426
|
+
save_dir.mkdir(parents=True, exist_ok=True)
|
|
427
|
+
filepath = save_dir / filename
|
|
428
|
+
|
|
429
|
+
output = {
|
|
430
|
+
"meta": {
|
|
431
|
+
"query": query,
|
|
432
|
+
"engine": engine,
|
|
433
|
+
"timestamp": now.isoformat(),
|
|
434
|
+
"count_requested": count,
|
|
435
|
+
"count_returned": len(results),
|
|
436
|
+
},
|
|
437
|
+
"results": results
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
if task:
|
|
441
|
+
output["meta"]["task"] = task
|
|
442
|
+
|
|
443
|
+
with open(filepath, 'w', encoding='utf-8') as f:
|
|
444
|
+
json.dump(output, f, indent=2, ensure_ascii=False)
|
|
445
|
+
|
|
446
|
+
return filepath
|
|
447
|
+
|
|
448
|
+
|
|
449
|
+
def format_results(results: List[Dict], query: str) -> str:
|
|
450
|
+
"""Format search results for display."""
|
|
451
|
+
if not results:
|
|
452
|
+
return f"No results found for '{query}'"
|
|
453
|
+
|
|
454
|
+
source = results[0]['source']
|
|
455
|
+
output = [f"\n[SEARCH] Results for: \"{query}\" ({source})\n"]
|
|
456
|
+
output.append("=" * 60)
|
|
457
|
+
|
|
458
|
+
for i, r in enumerate(results, 1):
|
|
459
|
+
output.append(f"\n{i}. {r['title']}")
|
|
460
|
+
snippet = r['snippet']
|
|
461
|
+
if len(snippet) > 200:
|
|
462
|
+
snippet = snippet[:200] + "..."
|
|
463
|
+
output.append(f" {snippet}")
|
|
464
|
+
output.append(f" URL: {r['url']}")
|
|
465
|
+
|
|
466
|
+
output.append("\n" + "=" * 60)
|
|
467
|
+
return "\n".join(output)
|
|
468
|
+
|
|
469
|
+
|
|
470
|
+
def main():
|
|
471
|
+
parser = argparse.ArgumentParser(
|
|
472
|
+
description="Search the web using multiple search engine APIs",
|
|
473
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
474
|
+
epilog="""
|
|
475
|
+
Examples:
|
|
476
|
+
python web_search.py "how to respond to blog comments"
|
|
477
|
+
python web_search.py "python asyncio tutorial" --engine tavily --count 10
|
|
478
|
+
python web_search.py "best cycling routes NYC" --output searches/
|
|
479
|
+
|
|
480
|
+
API keys are loaded from encrypted database settings (settings.db).
|
|
481
|
+
Configure keys in Pomera UI: Select "Web Search" tool and enter API keys.
|
|
482
|
+
"""
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
parser.add_argument("query", help="Search query")
|
|
486
|
+
parser.add_argument("--engine", "-e",
|
|
487
|
+
choices=["duckduckgo", "ddg", "tavily", "google", "brave", "serpapi", "serper"],
|
|
488
|
+
default="duckduckgo",
|
|
489
|
+
help="Search engine to use (default: duckduckgo)")
|
|
490
|
+
parser.add_argument("--count", "-c", type=int, default=5,
|
|
491
|
+
help="Number of results (default: 5)")
|
|
492
|
+
parser.add_argument("--json", "-j", action="store_true",
|
|
493
|
+
help="Output raw JSON to console")
|
|
494
|
+
parser.add_argument("--output", "-o", type=str, metavar="DIR",
|
|
495
|
+
help="Save results as JSON to DIR (organized by date)")
|
|
496
|
+
parser.add_argument("--task", "-t", type=str, metavar="NAME",
|
|
497
|
+
help="Tag search with task/plan name (used in filename)")
|
|
498
|
+
|
|
499
|
+
args = parser.parse_args()
|
|
500
|
+
|
|
501
|
+
results = search(args.query, args.engine, args.count)
|
|
502
|
+
|
|
503
|
+
if args.output:
|
|
504
|
+
filepath = save_results(
|
|
505
|
+
results=results,
|
|
506
|
+
query=args.query,
|
|
507
|
+
engine=args.engine,
|
|
508
|
+
output_dir=args.output,
|
|
509
|
+
task=args.task,
|
|
510
|
+
count=args.count
|
|
511
|
+
)
|
|
512
|
+
print(f"[OK] Saved {len(results)} results to: {filepath}")
|
|
513
|
+
if not args.json:
|
|
514
|
+
print(format_results(results, args.query))
|
|
515
|
+
elif args.json:
|
|
516
|
+
print(json.dumps(results, indent=2))
|
|
517
|
+
else:
|
|
518
|
+
print(format_results(results, args.query))
|
|
519
|
+
|
|
520
|
+
|
|
521
|
+
if __name__ == "__main__":
|
|
522
|
+
main()
|