@plusonelabs/cue 0.0.94 → 0.0.95

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/bin/cue.js +10 -10
  2. package/bin/windows-bootstrap.ps1 +9 -9
  3. package/bin/windows-runtime-artifact.json +2 -2
  4. package/dist/cli.mjs +1087 -820
  5. package/dist/skills/README.md +199 -0
  6. package/dist/skills/_lib/credentials.py +72 -0
  7. package/dist/skills/activity/SKILL.md +97 -0
  8. package/dist/skills/assistant/SKILL.md +173 -0
  9. package/dist/skills/audio/SKILL.md +132 -0
  10. package/dist/skills/elevenlabs-tts/SKILL.md +76 -0
  11. package/dist/skills/elevenlabs-tts/scripts/speak.ts +226 -0
  12. package/dist/skills/event/SKILL.md +98 -0
  13. package/dist/skills/gemini-search/SKILL.md +52 -0
  14. package/dist/skills/gemini-search/generate.py +195 -0
  15. package/dist/skills/image/SKILL.md +169 -0
  16. package/dist/skills/like/SKILL.md +66 -0
  17. package/dist/skills/listen/SKILL.md +57 -0
  18. package/dist/skills/listen/scripts/listen.sh +74 -0
  19. package/dist/skills/listen/scripts/record.swift +94 -0
  20. package/dist/skills/markdown-to-pdf/SKILL.md +31 -0
  21. package/dist/skills/message/SKILL.md +136 -0
  22. package/dist/skills/mini-apps/SKILL.md +256 -0
  23. package/dist/skills/music/SKILL.md +139 -0
  24. package/dist/skills/nano-banana/SKILL.md +70 -0
  25. package/dist/skills/nano-banana/generate.py +191 -0
  26. package/dist/skills/news/SKILL.md +41 -0
  27. package/dist/skills/notify/SKILL.md +123 -0
  28. package/dist/skills/places/SKILL.md +215 -0
  29. package/dist/skills/posts/SKILL.md +440 -0
  30. package/dist/skills/project/SKILL.md +116 -0
  31. package/dist/skills/pulse/SKILL.md +106 -0
  32. package/dist/skills/reddit/SKILL.md +41 -0
  33. package/dist/skills/seeddance/SKILL.md +81 -0
  34. package/dist/skills/seeddance/generate.py +303 -0
  35. package/dist/skills/seedream/SKILL.md +86 -0
  36. package/dist/skills/seedream/generate.py +301 -0
  37. package/dist/skills/social-graph/SKILL.md +119 -0
  38. package/dist/skills/transcribe/SKILL.md +150 -0
  39. package/dist/skills/transcribe/generate.py +389 -0
  40. package/dist/skills/user/SKILL.md +180 -0
  41. package/dist/skills/veo3/SKILL.md +76 -0
  42. package/dist/skills/veo3/generate.py +339 -0
  43. package/dist/skills/video/SKILL.md +163 -0
  44. package/dist/skills/weather/SKILL.md +101 -0
  45. package/dist/skills/web-fetch/SKILL.md +43 -0
  46. package/dist/skills/web-search/SKILL.md +52 -0
  47. package/dist/skills/z-asr/SKILL.md +58 -0
  48. package/dist/skills/z-asr/generate.py +177 -0
  49. package/dist/skills/z-search/SKILL.md +57 -0
  50. package/dist/skills/z-search/generate.py +189 -0
  51. package/dist/skills/z-tts/SKILL.md +51 -0
  52. package/dist/skills/z-tts/generate.py +172 -0
  53. package/package.json +1 -1
@@ -0,0 +1,76 @@
1
+ ---
2
+ name: elevenlabs-tts
3
+ description: ElevenLabs text-to-speech with streaming playback. Use when user asks to speak, say something aloud, or generate voice audio.
4
+ category: media
5
+ type: hybrid
6
+ env:
7
+ - ELEVENLABS_API_KEY
8
+ metadata:
9
+ scope: first-party
10
+ requires:
11
+ bins: [ffplay]
12
+ ---
13
+
14
+ # ElevenLabs TTS
15
+
16
+ Lightweight ElevenLabs text-to-speech with streaming audio playback.
17
+
18
+ ## Requirements
19
+
20
+ - Bun runtime
21
+ - `ffplay` (from ffmpeg) for streaming playback: `brew install ffmpeg`
22
+ - `ELEVENLABS_API_KEY` environment variable
23
+
24
+ ## Usage
25
+
26
+ Run from the skill's scripts directory:
27
+
28
+ ```bash
29
+ # Stream to speakers (default)
30
+ bun run <skill-dir>/scripts/speak.ts "Hello, world!"
31
+
32
+ # Choose a voice by name or ID
33
+ bun run <skill-dir>/scripts/speak.ts -v Lily "Hello there"
34
+ bun run <skill-dir>/scripts/speak.ts -v pFZP5JQG7iQjIQuC4Bku "Hello there"
35
+
36
+ # Save to file instead of playing
37
+ bun run <skill-dir>/scripts/speak.ts -o /tmp/output.mp3 "Hello"
38
+
39
+ # Save to file AND play
40
+ bun run <skill-dir>/scripts/speak.ts -o /tmp/output.mp3 --play "Hello"
41
+
42
+ # List available voices
43
+ bun run <skill-dir>/scripts/speak.ts --list
44
+
45
+ # Adjust speed (0.5-2.0)
46
+ bun run <skill-dir>/scripts/speak.ts --speed 1.2 "Speaking faster"
47
+
48
+ # Adjust stability and style
49
+ bun run <skill-dir>/scripts/speak.ts --stability 0.7 --style 0.3 "Styled speech"
50
+
51
+ # Choose model
52
+ bun run <skill-dir>/scripts/speak.ts -m eleven_flash_v2_5 "Fast model"
53
+ ```
54
+
55
+ ## Models
56
+
57
+ | Model | Description |
58
+ | ------------------------ | ------------------------ |
59
+ | `eleven_v3` | Default, most expressive |
60
+ | `eleven_multilingual_v2` | Stable, multilingual |
61
+ | `eleven_flash_v2_5` | Fast, lower latency |
62
+ | `eleven_turbo_v2_5` | Balanced speed/quality |
63
+
64
+ ## Default Voice
65
+
66
+ Jessica (cgSgspJ2msm6clMCkdW9) — playful, bright, warm. Override with `-v`.
67
+
68
+ ## Voice Aliases
69
+
70
+ | Alias | Voice | Style |
71
+ | --------- | ----------------- | ----------------------------------------------------------------- |
72
+ | `jessica` | Jessica (default) | Playful, Bright, Warm |
73
+ | `hope` | Hope | Clear, Relatable, Charismatic — great for podcasts |
74
+ | `ryan` | Ryan | Casual, Unscripted, Real-Talk — like a friend giving honest takes |
75
+
76
+ Usage: `-v hope` or `-v jessica`
@@ -0,0 +1,226 @@
1
+ #!/usr/bin/env bun
2
+
3
+ import { spawn } from "child_process";
4
+ import { writeFileSync, mkdirSync, existsSync } from "fs";
5
+ import { join } from "path";
6
+ import { homedir } from "os";
7
+
8
+ const API_BASE = "https://api.elevenlabs.io/v1";
9
+
10
+ function getApiKey(): string {
11
+ const key = process.env.ELEVENLABS_API_KEY;
12
+ if (!key) {
13
+ console.error("Error: ELEVENLABS_API_KEY not set");
14
+ process.exit(1);
15
+ }
16
+ return key;
17
+ }
18
+
19
+ function parseArgs() {
20
+ const args = process.argv.slice(2);
21
+ const opts: {
22
+ text: string;
23
+ voice: string;
24
+ model: string;
25
+ output?: string;
26
+ play: boolean;
27
+ list: boolean;
28
+ speed: number;
29
+ stability?: number;
30
+ style?: number;
31
+ } = {
32
+ text: "",
33
+ voice: "cgSgspJ2msm6clMCkdW9", // Jessica
34
+ model: "eleven_v3",
35
+ play: true,
36
+ list: false,
37
+ speed: 1.0,
38
+ };
39
+
40
+ for (let i = 0; i < args.length; i++) {
41
+ switch (args[i]) {
42
+ case "--list":
43
+ case "-l":
44
+ opts.list = true;
45
+ break;
46
+ case "--voice":
47
+ case "-v":
48
+ opts.voice = args[++i];
49
+ break;
50
+ case "--model":
51
+ case "-m":
52
+ opts.model = args[++i];
53
+ break;
54
+ case "--output":
55
+ case "-o":
56
+ opts.output = args[++i];
57
+ opts.play = false;
58
+ break;
59
+ case "--speed":
60
+ opts.speed = parseFloat(args[++i]);
61
+ break;
62
+ case "--stability":
63
+ opts.stability = parseFloat(args[++i]);
64
+ break;
65
+ case "--style":
66
+ opts.style = parseFloat(args[++i]);
67
+ break;
68
+ case "--play":
69
+ opts.play = true;
70
+ break;
71
+ default:
72
+ if (!args[i].startsWith("-")) {
73
+ opts.text = opts.text ? `${opts.text} ${args[i]}` : args[i];
74
+ }
75
+ }
76
+ }
77
+ return opts;
78
+ }
79
+
80
+ async function listVoices(apiKey: string) {
81
+ const res = await fetch(`${API_BASE}/voices`, {
82
+ headers: { "xi-api-key": apiKey },
83
+ });
84
+ if (!res.ok) {
85
+ console.error(`Failed to list voices: ${res.status} ${res.statusText}`);
86
+ process.exit(1);
87
+ }
88
+ const data = (await res.json()) as { voices: Array<{ voice_id: string; name: string; category: string; labels?: Record<string, string> }> };
89
+ console.log(`${"ID".padEnd(24)} ${"NAME".padEnd(40)} CATEGORY`);
90
+ for (const v of data.voices) {
91
+ console.log(`${v.voice_id.padEnd(24)} ${v.name.padEnd(40)} ${v.category}`);
92
+ }
93
+ }
94
+
95
+ const VOICE_ALIASES: Record<string, string> = {
96
+ hope: "WAhoMTNdLdMoq1j3wf3I",
97
+ jessica: "cgSgspJ2msm6clMCkdW9",
98
+ ryan: "4e32WqNVWRquDa1OcRYZ",
99
+ };
100
+
101
+ async function resolveVoice(apiKey: string, voice: string): Promise<string> {
102
+ const alias = VOICE_ALIASES[voice.toLowerCase()];
103
+ if (alias) {
104
+ console.error(`Using voice: ${voice} (${alias})`);
105
+ return alias;
106
+ }
107
+ if (/^[a-zA-Z0-9]{20}$/.test(voice)) return voice;
108
+
109
+ const res = await fetch(`${API_BASE}/voices`, {
110
+ headers: { "xi-api-key": apiKey },
111
+ });
112
+ if (!res.ok) return voice;
113
+
114
+ const data = (await res.json()) as { voices: Array<{ voice_id: string; name: string }> };
115
+ const match = data.voices.find(
116
+ (v) => v.name.toLowerCase().startsWith(voice.toLowerCase())
117
+ );
118
+ if (match) {
119
+ console.error(`Using voice: ${match.name} (${match.voice_id})`);
120
+ return match.voice_id;
121
+ }
122
+ return voice;
123
+ }
124
+
125
+ async function streamToSpeaker(audioStream: ReadableStream<Uint8Array>) {
126
+ const player = spawn("ffplay", ["-nodisp", "-autoexit", "-loglevel", "quiet", "-i", "pipe:0"], {
127
+ stdio: ["pipe", "ignore", "ignore"],
128
+ });
129
+
130
+ const reader = audioStream.getReader();
131
+ try {
132
+ while (true) {
133
+ const { done, value } = await reader.read();
134
+ if (done) break;
135
+ player.stdin.write(value);
136
+ }
137
+ player.stdin.end();
138
+ await new Promise<void>((resolve) => player.on("close", resolve));
139
+ } catch (e) {
140
+ player.kill();
141
+ throw e;
142
+ }
143
+ }
144
+
145
+ async function saveToFile(audioStream: ReadableStream<Uint8Array>, path: string) {
146
+ const reader = audioStream.getReader();
147
+ const chunks: Uint8Array[] = [];
148
+ while (true) {
149
+ const { done, value } = await reader.read();
150
+ if (done) break;
151
+ chunks.push(value);
152
+ }
153
+ const buffer = Buffer.concat(chunks);
154
+ const dir = path.substring(0, path.lastIndexOf("/"));
155
+ if (dir && !existsSync(dir)) mkdirSync(dir, { recursive: true });
156
+ writeFileSync(path, buffer);
157
+ console.log(`Saved: ${path} (${buffer.length} bytes)`);
158
+ }
159
+
160
+ async function speak(opts: ReturnType<typeof parseArgs>, apiKey: string) {
161
+ if (!opts.text) {
162
+ console.error("Error: No text provided");
163
+ process.exit(1);
164
+ }
165
+
166
+ const voiceId = await resolveVoice(apiKey, opts.voice);
167
+
168
+ const body: Record<string, unknown> = {
169
+ text: opts.text,
170
+ model_id: opts.model,
171
+ };
172
+
173
+ if (opts.speed !== 1.0 || opts.stability !== undefined || opts.style !== undefined) {
174
+ const settings: Record<string, number> = {};
175
+ if (opts.speed !== 1.0) settings.speed = opts.speed;
176
+ if (opts.stability !== undefined) settings.stability = opts.stability;
177
+ if (opts.style !== undefined) settings.style = opts.style;
178
+ body.voice_settings = settings;
179
+ }
180
+
181
+ const endpoint = opts.play
182
+ ? `${API_BASE}/text-to-speech/${voiceId}/stream`
183
+ : `${API_BASE}/text-to-speech/${voiceId}`;
184
+
185
+ const res = await fetch(endpoint, {
186
+ method: "POST",
187
+ headers: {
188
+ "xi-api-key": apiKey,
189
+ "Content-Type": "application/json",
190
+ },
191
+ body: JSON.stringify(body),
192
+ });
193
+
194
+ if (!res.ok) {
195
+ const err = await res.text();
196
+ console.error(`TTS failed (${res.status}): ${err}`);
197
+ process.exit(1);
198
+ }
199
+
200
+ if (!res.body) {
201
+ console.error("No response body");
202
+ process.exit(1);
203
+ }
204
+
205
+ if (opts.output) {
206
+ await saveToFile(res.body as ReadableStream<Uint8Array>, opts.output);
207
+ } else {
208
+ await streamToSpeaker(res.body as ReadableStream<Uint8Array>);
209
+ }
210
+ }
211
+
212
+ async function main() {
213
+ const apiKey = getApiKey();
214
+ const opts = parseArgs();
215
+
216
+ if (opts.list) {
217
+ await listVoices(apiKey);
218
+ } else {
219
+ await speak(opts, apiKey);
220
+ }
221
+ }
222
+
223
+ main().catch((e) => {
224
+ console.error(e.message || e);
225
+ process.exit(1);
226
+ });
@@ -0,0 +1,98 @@
1
+ ---
2
+ name: event
3
+ description: Create and browse events. Ergonomic wrapper around event posts with structured metadata.
4
+ category: comms
5
+ type: context
6
+ metadata:
7
+ short-description: Event creation and browsing
8
+ scope: first-party
9
+ ---
10
+
11
+ Create structured event posts and browse upcoming events. `cue event` is sugar over the post system — it sets `channel: "events"` and builds `metadata.event` automatically.
12
+
13
+ ## Requirements
14
+
15
+ - `cue` CLI installed and authenticated (`cue` then `/auth`)
16
+
17
+ ## Usage
18
+
19
+ ### Create an event
20
+
21
+ ```bash
22
+ cue event create --title "Event Name" --date "YYYY-MM-DD HH:MM" --location "Place" --link https://...
23
+ ```
24
+
25
+ | Flag | Required | Description |
26
+ | ------------ | -------- | ------------------------------------------------------ |
27
+ | `--title` | Yes | Event name |
28
+ | `--date` | Yes | Date and time (e.g., `"2026-02-10 17:30"` or ISO 8601) |
29
+ | `--location` | No | Venue or address |
30
+ | `--link` | No | URL (auto-converts to LinkMediaPart with OG preview) |
31
+
32
+ You can also add a description as a positional argument:
33
+
34
+ ```bash
35
+ cue event create --title "AI Night" --date "2026-02-10 17:30" "Monthly meetup for AI builders."
36
+ ```
37
+
38
+ ### List events
39
+
40
+ ```bash
41
+ cue event list # Upcoming events from global feed
42
+ cue event list 50 # With limit
43
+ ```
44
+
45
+ ## Examples
46
+
47
+ ```bash
48
+ cue event create --title "AI Tinkerers SF" --date "2026-02-13 18:30" --location "GitHub HQ, SF" --link https://lu.ma/ai-tinkerers-sf
49
+
50
+ cue event create --title "WorkOS AI Night" --date "2026-02-10 17:30" --location "WorkOS HQ" --link https://lu.ma/workos-ai "AI Night with The Pragmatic Engineer. 246 registered."
51
+
52
+ cue event list
53
+ ```
54
+
55
+ ## What It Builds
56
+
57
+ `cue event create` generates a post with:
58
+
59
+ ```json
60
+ {
61
+ "content": [
62
+ {
63
+ "type": "post",
64
+ "title": "AI Night",
65
+ "description": "...",
66
+ "media": [{ "type": "link", "url": "..." }]
67
+ }
68
+ ],
69
+ "visibility": "public",
70
+ "channel": "events",
71
+ "role": "assistant",
72
+ "metadata": {
73
+ "event": {
74
+ "name": "AI Night",
75
+ "start_time": "2026-02-10T17:30:00Z",
76
+ "location": "WorkOS HQ",
77
+ "url": "..."
78
+ }
79
+ }
80
+ }
81
+ ```
82
+
83
+ The backend validates `metadata.event.name` and `metadata.event.start_time` are present for events channel posts.
84
+
85
+ ## Use a specific account
86
+
87
+ ```bash
88
+ cue --profile cue:<account> event create --title "Event" --date "2026-02-10 17:30"
89
+ cue --profile cue:<account> event list
90
+ ```
91
+
92
+ ## Troubleshooting
93
+
94
+ | Error | Fix |
95
+ | ------------------------------------------------- | ---------------------------------- |
96
+ | Not authenticated | Run `cue` then `/auth` to log in |
97
+ | Events require metadata.event.name and start_time | Missing --title or --date |
98
+ | Invalid date format | Use "YYYY-MM-DD HH:MM" or ISO 8601 |
@@ -0,0 +1,52 @@
1
+ ---
2
+ name: gemini-search
3
+ description: Web search using Gemini API with Google Search grounding. Use when user asks to search the web, look up current information, or needs real-time facts with source citations.
4
+ category: data
5
+ type: hybrid
6
+ env:
7
+ - GEMINI_API_KEY
8
+ metadata:
9
+ short-description: Web search with Gemini
10
+ scope: first-party
11
+ ---
12
+
13
+ Search the web using Gemini's Google Search grounding for AI-synthesized answers with source citations.
14
+
15
+ ## Requirements
16
+
17
+ - Python 3.10+, `httpx` package
18
+ - `GEMINI_API_KEY` environment variable (or configured via `cue skill env set`)
19
+ - Get your API key at: https://aistudio.google.com/apikey
20
+
21
+ ## Usage
22
+
23
+ ```bash
24
+ # Basic search
25
+ python3 <skill-dir>/generate.py "What happened in tech news today?"
26
+
27
+ # Get more sources
28
+ python3 <skill-dir>/generate.py "latest Python release" --max-sources 10
29
+
30
+ # JSON output with full metadata
31
+ python3 <skill-dir>/generate.py "SpaceX launches 2026" --json
32
+
33
+ # Save results
34
+ python3 <skill-dir>/generate.py "climate summit results" --output results.txt
35
+
36
+ # Use pro model for deeper analysis
37
+ python3 <skill-dir>/generate.py "compare React vs Vue in 2026" --pro
38
+ ```
39
+
40
+ **Features:**
41
+
42
+ - AI-synthesized answer grounded in real-time Google Search results
43
+ - Source citations with URLs and titles
44
+ - Supports follow-up context for multi-turn research
45
+
46
+ ## Troubleshooting
47
+
48
+ | Error | Solution |
49
+ | ------------------------ | ---------------------------------------------------- |
50
+ | "GEMINI_API_KEY not set" | Set with: `/skill env set GEMINI_API_KEY <your-key>` |
51
+ | "API error: 400" | Check query is not empty |
52
+ | "API error: 429" | Rate limited, wait and retry |
@@ -0,0 +1,195 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Gemini Search - Web search using Gemini API with Google Search grounding.
4
+
5
+ Usage:
6
+ python generate.py "What's the latest news on AI?"
7
+ python generate.py "Python 3.14 release date" --json
8
+ python generate.py "compare frameworks" --pro
9
+ """
10
+
11
+ import argparse
12
+ import json
13
+ import os
14
+ import sys
15
+ import time
16
+ from pathlib import Path
17
+
18
+ sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '_lib'))
19
+ from credentials import get_credential
20
+
21
+ try:
22
+ import httpx
23
+ except ImportError:
24
+ print("Error: httpx not installed. Run: pip install httpx")
25
+ sys.exit(1)
26
+
27
+
28
+ API_BASE = "https://generativelanguage.googleapis.com/v1beta/models"
29
+ FLASH_MODEL = "gemini-2.5-flash"
30
+ PRO_MODEL = "gemini-2.5-pro"
31
+ TIMEOUT = 60.0
32
+
33
+
34
+ def search(
35
+ query: str,
36
+ use_pro: bool = False,
37
+ context: str | None = None,
38
+ ) -> dict:
39
+ api_key = get_credential("GEMINI_API_KEY", "gemini-search")
40
+ model = PRO_MODEL if use_pro else FLASH_MODEL
41
+ url = f"{API_BASE}/{model}:generateContent?key={api_key}"
42
+
43
+ contents = []
44
+ if context:
45
+ contents.append({"parts": [{"text": context}], "role": "user"})
46
+ contents.append({"parts": [{"text": "Understood, I have that context."}], "role": "model"})
47
+ contents.append({"parts": [{"text": query}], "role": "user"})
48
+
49
+ payload = {
50
+ "contents": contents,
51
+ "tools": [{"google_search": {}}],
52
+ }
53
+
54
+ print(f"Searching with {model}...")
55
+
56
+ start_time = time.time()
57
+
58
+ with httpx.Client(timeout=TIMEOUT) as client:
59
+ response = client.post(
60
+ url,
61
+ headers={"Content-Type": "application/json"},
62
+ json=payload,
63
+ )
64
+
65
+ if response.status_code != 200:
66
+ print(f"Error: API returned {response.status_code}")
67
+ try:
68
+ err = response.json()
69
+ msg = err.get("error", {}).get("message", response.text[:500])
70
+ print(f" {msg}")
71
+ except Exception:
72
+ print(response.text[:500])
73
+ sys.exit(1)
74
+
75
+ result = response.json()
76
+
77
+ processing_time = int((time.time() - start_time) * 1000)
78
+
79
+ candidates = result.get("candidates", [])
80
+ if not candidates:
81
+ print("Error: No response from model")
82
+ sys.exit(1)
83
+
84
+ candidate = candidates[0]
85
+ content = candidate.get("content", {})
86
+ parts = content.get("parts", [])
87
+ text = "".join(p.get("text", "") for p in parts)
88
+
89
+ grounding = candidate.get("groundingMetadata", {})
90
+ search_queries = grounding.get("webSearchQueries", [])
91
+ chunks = grounding.get("groundingChunks", [])
92
+
93
+ sources = []
94
+ for chunk in chunks:
95
+ web = chunk.get("web", {})
96
+ if web.get("uri"):
97
+ sources.append({
98
+ "title": web.get("title", ""),
99
+ "url": web.get("uri", ""),
100
+ })
101
+
102
+ return {
103
+ "query": query,
104
+ "answer": text,
105
+ "model": model,
106
+ "search_queries": search_queries,
107
+ "sources": sources,
108
+ "source_count": len(sources),
109
+ "processing_time_ms": processing_time,
110
+ }
111
+
112
+
113
+ def format_results(data: dict) -> str:
114
+ lines = []
115
+ lines.append(data["answer"])
116
+ lines.append("")
117
+
118
+ sources = data["sources"]
119
+ if sources:
120
+ lines.append("--- Sources ---")
121
+ seen = set()
122
+ for i, src in enumerate(sources, 1):
123
+ url = src["url"]
124
+ if url in seen:
125
+ continue
126
+ seen.add(url)
127
+ title = src["title"] or "Untitled"
128
+ lines.append(f"{i}. {title}")
129
+ lines.append(f" {url}")
130
+ lines.append("")
131
+
132
+ if data["search_queries"]:
133
+ lines.append(f"Search queries: {', '.join(data['search_queries'])}")
134
+
135
+ return "\n".join(lines)
136
+
137
+
138
+ def main():
139
+ parser = argparse.ArgumentParser(
140
+ description="Web search using Gemini with Google Search grounding",
141
+ formatter_class=argparse.RawDescriptionHelpFormatter,
142
+ epilog="""
143
+ Examples:
144
+ %(prog)s "What happened in tech news today?"
145
+ %(prog)s "latest Python release" --json
146
+ %(prog)s "compare React vs Vue" --pro
147
+ %(prog)s "follow up question" --context "previous context here"
148
+ """,
149
+ )
150
+ parser.add_argument("query", help="Search query or question")
151
+ parser.add_argument(
152
+ "--pro",
153
+ action="store_true",
154
+ help="Use Gemini 2.5 Pro (default: Flash)",
155
+ )
156
+ parser.add_argument(
157
+ "--context", "-c",
158
+ help="Previous context for follow-up questions",
159
+ )
160
+ parser.add_argument(
161
+ "--output", "-o",
162
+ help="Output file path (default: print to stdout)",
163
+ )
164
+ parser.add_argument(
165
+ "--json", "-j",
166
+ action="store_true",
167
+ help="Output JSON with full metadata",
168
+ )
169
+
170
+ args = parser.parse_args()
171
+
172
+ result = search(
173
+ query=args.query,
174
+ use_pro=args.pro,
175
+ context=args.context,
176
+ )
177
+
178
+ if args.json:
179
+ output_content = json.dumps(result, indent=2, ensure_ascii=False)
180
+ else:
181
+ output_content = format_results(result)
182
+
183
+ if args.output:
184
+ output_path = Path(args.output)
185
+ output_path.parent.mkdir(parents=True, exist_ok=True)
186
+ output_path.write_text(output_content, encoding="utf-8")
187
+ print(f"Saved: {output_path}")
188
+ else:
189
+ print(output_content)
190
+
191
+ print(f"\n{result['source_count']} sources in {result['processing_time_ms']}ms")
192
+
193
+
194
+ if __name__ == "__main__":
195
+ main()