@doufunao123/ai-search 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +53 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +302 -0
- package/package.json +36 -0
package/README.md
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# @doufunao123/ai-search
|
|
2
|
+
|
|
3
|
+
AI-powered web search CLI (npm package). Uses AI models with native web search capabilities via the xiaomao proxy.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install -g @doufunao123/ai-search
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Setup
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
ai-search config set api_key your_api_key
|
|
15
|
+
# Or
|
|
16
|
+
export AI_SEARCH_KEY=your_api_key
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Usage
|
|
20
|
+
|
|
21
|
+
```bash
|
|
22
|
+
# Simple search
|
|
23
|
+
ai-search "latest AI news"
|
|
24
|
+
|
|
25
|
+
# Choose model
|
|
26
|
+
ai-search "query" --model grok-4.1-fast
|
|
27
|
+
|
|
28
|
+
# Complex search with query splitting
|
|
29
|
+
ai-search "comprehensive analysis of X" --split 3
|
|
30
|
+
|
|
31
|
+
# JSON output (default in non-TTY)
|
|
32
|
+
ai-search "query" --json
|
|
33
|
+
|
|
34
|
+
# Human-readable output
|
|
35
|
+
ai-search "query" --human
|
|
36
|
+
|
|
37
|
+
# List models
|
|
38
|
+
ai-search models
|
|
39
|
+
|
|
40
|
+
# Show config
|
|
41
|
+
ai-search config show
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
## Configuration
|
|
45
|
+
|
|
46
|
+
Config file: `~/.config/ai-search/config.json`
|
|
47
|
+
|
|
48
|
+
Environment variables (override config):
|
|
49
|
+
- `AI_SEARCH_KEY` — API key
|
|
50
|
+
- `AI_SEARCH_URL` — API base URL (default: `https://grok.xiaomao.chat`)
|
|
51
|
+
- `AI_SEARCH_MODEL` — Default search model (default: `grok-4.1-expert`)
|
|
52
|
+
|
|
53
|
+
Shares credentials with the Rust CLI (`~/.config/ai-search/config.toml`).
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
// src/index.ts
|
|
4
|
+
import { Command } from "commander";
|
|
5
|
+
|
|
6
|
+
// src/meta.ts
|
|
7
|
+
var CLI_VERSION = "0.1.0";
|
|
8
|
+
var DEFAULT_API_URL = "https://grok.xiaomao.chat";
|
|
9
|
+
var DEFAULT_SEARCH_MODEL = "grok-4.1-expert";
|
|
10
|
+
var DEFAULT_ANALYSIS_MODEL = "grok-4.1-fast";
|
|
11
|
+
var SEARCH_MODELS = [
|
|
12
|
+
"grok-4.1-expert",
|
|
13
|
+
"grok-4.1-fast",
|
|
14
|
+
"grok-4.20-beta",
|
|
15
|
+
"grok-4",
|
|
16
|
+
"grok-4-thinking"
|
|
17
|
+
];
|
|
18
|
+
|
|
19
|
+
// src/config.ts
|
|
20
|
+
import { existsSync, readFileSync, mkdirSync, writeFileSync, chmodSync } from "fs";
|
|
21
|
+
import { homedir } from "os";
|
|
22
|
+
import { join, dirname } from "path";
|
|
23
|
+
function configDir() {
|
|
24
|
+
return join(homedir(), ".config", "ai-search");
|
|
25
|
+
}
|
|
26
|
+
function configPath() {
|
|
27
|
+
return join(configDir(), "config.json");
|
|
28
|
+
}
|
|
29
|
+
function loadConfig() {
|
|
30
|
+
const defaults = {
|
|
31
|
+
api_url: DEFAULT_API_URL,
|
|
32
|
+
api_key: "",
|
|
33
|
+
search_model: DEFAULT_SEARCH_MODEL,
|
|
34
|
+
analysis_model: DEFAULT_ANALYSIS_MODEL,
|
|
35
|
+
max_split: 1,
|
|
36
|
+
timeout_ms: 12e4
|
|
37
|
+
};
|
|
38
|
+
const path = configPath();
|
|
39
|
+
if (existsSync(path)) {
|
|
40
|
+
try {
|
|
41
|
+
const content = readFileSync(path, "utf8");
|
|
42
|
+
const parsed = JSON.parse(content);
|
|
43
|
+
Object.assign(defaults, parsed);
|
|
44
|
+
} catch {
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
if (!defaults.api_key) {
|
|
48
|
+
const tomlPath = join(configDir(), "config.toml");
|
|
49
|
+
if (existsSync(tomlPath)) {
|
|
50
|
+
try {
|
|
51
|
+
const toml = readFileSync(tomlPath, "utf8");
|
|
52
|
+
const keyMatch = toml.match(/^api_key\s*=\s*"([^"]*)"/m);
|
|
53
|
+
if (keyMatch?.[1]) defaults.api_key = keyMatch[1];
|
|
54
|
+
} catch {
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
if (process.env.AI_SEARCH_URL) defaults.api_url = process.env.AI_SEARCH_URL;
|
|
59
|
+
if (process.env.AI_SEARCH_KEY) defaults.api_key = process.env.AI_SEARCH_KEY;
|
|
60
|
+
if (process.env.AI_SEARCH_MODEL) defaults.search_model = process.env.AI_SEARCH_MODEL;
|
|
61
|
+
if (process.env.AI_SEARCH_ANALYSIS_MODEL) defaults.analysis_model = process.env.AI_SEARCH_ANALYSIS_MODEL;
|
|
62
|
+
return defaults;
|
|
63
|
+
}
|
|
64
|
+
function saveConfig(config) {
|
|
65
|
+
const path = configPath();
|
|
66
|
+
mkdirSync(dirname(path), { recursive: true });
|
|
67
|
+
let existing = {};
|
|
68
|
+
if (existsSync(path)) {
|
|
69
|
+
try {
|
|
70
|
+
existing = JSON.parse(readFileSync(path, "utf8"));
|
|
71
|
+
} catch {
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
const merged = { ...existing, ...config };
|
|
75
|
+
writeFileSync(path, JSON.stringify(merged, null, 2) + "\n", { mode: 384 });
|
|
76
|
+
chmodSync(path, 384);
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// src/client.ts
|
|
80
|
+
var THINKING_RE = /<think(?:ing)?>[\s\S]*?<\/think(?:ing)?>/g;
|
|
81
|
+
var SYSTEM_PROMPT = `You are a real-time web search assistant with direct access to the internet. When the user asks a question:
|
|
82
|
+
|
|
83
|
+
1. Search the web for the most current, accurate information available
|
|
84
|
+
2. Synthesize results into a clear, well-structured answer
|
|
85
|
+
3. Always include sources with URLs and dates when available
|
|
86
|
+
4. Be concise but comprehensive \u2014 cover key facts without unnecessary padding
|
|
87
|
+
5. If the query is in Chinese, respond entirely in Chinese; otherwise match the query language
|
|
88
|
+
6. When results conflict, note the discrepancy and indicate which sources are more authoritative
|
|
89
|
+
7. Format sources as a numbered list at the end of your response`;
|
|
90
|
+
function stripThinking(text) {
|
|
91
|
+
return text.replace(THINKING_RE, "").trim();
|
|
92
|
+
}
|
|
93
|
+
var AIClient = class {
|
|
94
|
+
constructor(config) {
|
|
95
|
+
this.config = config;
|
|
96
|
+
}
|
|
97
|
+
async chat(messages, model) {
|
|
98
|
+
const url = `${this.config.api_url.replace(/\/+$/, "")}/v1/chat/completions`;
|
|
99
|
+
const resp = await fetch(url, {
|
|
100
|
+
method: "POST",
|
|
101
|
+
headers: {
|
|
102
|
+
"Content-Type": "application/json",
|
|
103
|
+
Authorization: `Bearer ${this.config.api_key}`
|
|
104
|
+
},
|
|
105
|
+
body: JSON.stringify({ model, messages, stream: false }),
|
|
106
|
+
signal: AbortSignal.timeout(this.config.timeout_ms)
|
|
107
|
+
});
|
|
108
|
+
if (!resp.ok) {
|
|
109
|
+
const body = await resp.text().catch(() => "");
|
|
110
|
+
throw new Error(`AI API HTTP ${resp.status}: ${body.slice(0, 500)}`);
|
|
111
|
+
}
|
|
112
|
+
const data = await resp.json();
|
|
113
|
+
const content = stripThinking(data.choices?.[0]?.message?.content ?? "");
|
|
114
|
+
const tokens = data.usage?.total_tokens ?? 0;
|
|
115
|
+
return { content, tokens };
|
|
116
|
+
}
|
|
117
|
+
async search(query, model, split = 1) {
|
|
118
|
+
if (split <= 1) {
|
|
119
|
+
const { content, tokens } = await this.chat(
|
|
120
|
+
[
|
|
121
|
+
{ role: "system", content: SYSTEM_PROMPT },
|
|
122
|
+
{ role: "user", content: query }
|
|
123
|
+
],
|
|
124
|
+
model
|
|
125
|
+
);
|
|
126
|
+
return { query, model, content, tokens };
|
|
127
|
+
}
|
|
128
|
+
const subQueries = await this.splitQuery(query, split);
|
|
129
|
+
const subResults = await Promise.all(
|
|
130
|
+
subQueries.map(async (sq) => {
|
|
131
|
+
const { content, tokens } = await this.chat(
|
|
132
|
+
[
|
|
133
|
+
{ role: "system", content: SYSTEM_PROMPT },
|
|
134
|
+
{ role: "user", content: sq }
|
|
135
|
+
],
|
|
136
|
+
model
|
|
137
|
+
);
|
|
138
|
+
return { sub_query: sq, content, tokens };
|
|
139
|
+
})
|
|
140
|
+
);
|
|
141
|
+
const totalSubTokens = subResults.reduce((sum, r) => sum + r.tokens, 0);
|
|
142
|
+
const { content: merged, tokens: mergeTokens } = await this.mergeResults(query, subResults);
|
|
143
|
+
return {
|
|
144
|
+
query,
|
|
145
|
+
model,
|
|
146
|
+
content: merged,
|
|
147
|
+
tokens: totalSubTokens + mergeTokens,
|
|
148
|
+
sub_results: subResults
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
async splitQuery(query, maxSplit) {
|
|
152
|
+
const { content } = await this.chat(
|
|
153
|
+
[
|
|
154
|
+
{
|
|
155
|
+
role: "system",
|
|
156
|
+
content: "You are a query analysis assistant. You split complex questions into independent sub-queries for parallel web search. Always respond with a valid JSON array of strings."
|
|
157
|
+
},
|
|
158
|
+
{
|
|
159
|
+
role: "user",
|
|
160
|
+
content: `Split the following complex search query into ${maxSplit} or fewer independent sub-questions that can be searched separately. Return ONLY a JSON array of strings, nothing else.
|
|
161
|
+
|
|
162
|
+
Query: ${query}`
|
|
163
|
+
}
|
|
164
|
+
],
|
|
165
|
+
this.config.analysis_model
|
|
166
|
+
);
|
|
167
|
+
try {
|
|
168
|
+
let jsonStr = content.trim();
|
|
169
|
+
if (jsonStr.startsWith("```")) {
|
|
170
|
+
jsonStr = jsonStr.split("\n").slice(1).filter((l) => !l.startsWith("```")).join("\n");
|
|
171
|
+
}
|
|
172
|
+
const parsed = JSON.parse(jsonStr);
|
|
173
|
+
if (Array.isArray(parsed) && parsed.length > 0) return parsed;
|
|
174
|
+
} catch {
|
|
175
|
+
}
|
|
176
|
+
return [query];
|
|
177
|
+
}
|
|
178
|
+
async mergeResults(originalQuery, subResults) {
|
|
179
|
+
const context = subResults.map((r, i) => `--- Sub-query ${i + 1}: ${r.sub_query} ---
|
|
180
|
+
${r.content}`).join("\n\n");
|
|
181
|
+
return this.chat(
|
|
182
|
+
[
|
|
183
|
+
{
|
|
184
|
+
role: "system",
|
|
185
|
+
content: "You are a research synthesis assistant. Merge multiple search results into a single coherent answer. Keep all sources and citations. Match the language of the original query."
|
|
186
|
+
},
|
|
187
|
+
{
|
|
188
|
+
role: "user",
|
|
189
|
+
content: `Based on the following search results for sub-queries, provide a comprehensive answer to the original question. Synthesize the information, remove duplicates, and present a coherent response with sources.
|
|
190
|
+
|
|
191
|
+
Original question: ${originalQuery}
|
|
192
|
+
|
|
193
|
+
${context}`
|
|
194
|
+
}
|
|
195
|
+
],
|
|
196
|
+
this.config.analysis_model
|
|
197
|
+
);
|
|
198
|
+
}
|
|
199
|
+
};
|
|
200
|
+
|
|
201
|
+
// src/output.ts
|
|
202
|
+
function success(command, data) {
|
|
203
|
+
return { ok: true, command, data };
|
|
204
|
+
}
|
|
205
|
+
function error(command, code, message, suggestion) {
|
|
206
|
+
return { ok: false, command, error: { code, message, ...suggestion ? { suggestion } : {} } };
|
|
207
|
+
}
|
|
208
|
+
function output(result, human = false) {
|
|
209
|
+
if (human) {
|
|
210
|
+
if (result.ok) {
|
|
211
|
+
const data = result.data;
|
|
212
|
+
if (data.content) {
|
|
213
|
+
process.stdout.write(String(data.content) + "\n");
|
|
214
|
+
} else {
|
|
215
|
+
process.stdout.write(JSON.stringify(data, null, 2) + "\n");
|
|
216
|
+
}
|
|
217
|
+
} else {
|
|
218
|
+
process.stderr.write(`\u2717 ${result.error.code}: ${result.error.message}
|
|
219
|
+
`);
|
|
220
|
+
if (result.error.suggestion) {
|
|
221
|
+
process.stderr.write(` \u2192 ${result.error.suggestion}
|
|
222
|
+
`);
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
} else {
|
|
226
|
+
process.stdout.write(JSON.stringify(result, null, 2) + "\n");
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
// src/index.ts
|
|
231
|
+
var program = new Command().name("ai-search").description("AI-powered web search CLI").version(CLI_VERSION);
|
|
232
|
+
program.argument("[query...]", "Search query").option("-m, --model <model>", "Model to use").option("--split <n>", "Max sub-queries for query splitting", "1").option("--json", "Force JSON output").option("--human", "Human-readable output").action(async (queryParts, opts) => {
|
|
233
|
+
const query = queryParts.join(" ");
|
|
234
|
+
if (!query) {
|
|
235
|
+
program.help();
|
|
236
|
+
return;
|
|
237
|
+
}
|
|
238
|
+
await doSearch(query, opts);
|
|
239
|
+
});
|
|
240
|
+
program.command("search <query...>").description("Search the web").option("-m, --model <model>", "Model to use").option("--split <n>", "Max sub-queries for query splitting", "1").option("--json", "Force JSON output").option("--human", "Human-readable output").option("--stdin", "Read query from stdin JSON").action(async (queryParts, opts) => {
|
|
241
|
+
let query;
|
|
242
|
+
if (opts.stdin) {
|
|
243
|
+
const chunks = [];
|
|
244
|
+
for await (const chunk of process.stdin) chunks.push(chunk);
|
|
245
|
+
const input = JSON.parse(Buffer.concat(chunks).toString());
|
|
246
|
+
query = input.query ?? "";
|
|
247
|
+
if (input.model) opts.model = input.model;
|
|
248
|
+
if (input.split) opts.split = String(input.split);
|
|
249
|
+
} else {
|
|
250
|
+
query = queryParts.join(" ");
|
|
251
|
+
}
|
|
252
|
+
await doSearch(query, opts);
|
|
253
|
+
});
|
|
254
|
+
program.command("models").description("List available search models").action(() => {
|
|
255
|
+
output(success("models", { models: SEARCH_MODELS }));
|
|
256
|
+
});
|
|
257
|
+
program.command("config").description("Manage configuration").addCommand(
|
|
258
|
+
new Command("set").description("Set a config value").argument("<key>", "Config key (api_url, api_key, search_model, analysis_model)").argument("<value>", "Config value").action((key, value) => {
|
|
259
|
+
saveConfig({ [key]: value });
|
|
260
|
+
output(success("config.set", { key, value: key === "api_key" ? "***" : value }));
|
|
261
|
+
})
|
|
262
|
+
).addCommand(
|
|
263
|
+
new Command("show").description("Show current configuration").action(() => {
|
|
264
|
+
const config = loadConfig();
|
|
265
|
+
output(
|
|
266
|
+
success("config.show", {
|
|
267
|
+
...config,
|
|
268
|
+
api_key: config.api_key ? `${config.api_key.slice(0, 8)}...` : "(not set)"
|
|
269
|
+
})
|
|
270
|
+
);
|
|
271
|
+
})
|
|
272
|
+
);
|
|
273
|
+
async function doSearch(query, opts) {
|
|
274
|
+
const config = loadConfig();
|
|
275
|
+
if (!config.api_key) {
|
|
276
|
+
output(error("search", "NO_API_KEY", "API key not configured", "Run: ai-search config set api_key <key> OR export AI_SEARCH_KEY=<key>"));
|
|
277
|
+
process.exit(1);
|
|
278
|
+
}
|
|
279
|
+
const client = new AIClient(config);
|
|
280
|
+
const model = opts.model ?? config.search_model;
|
|
281
|
+
const split = parseInt(opts.split ?? "1", 10);
|
|
282
|
+
try {
|
|
283
|
+
const result = await client.search(query, model, split);
|
|
284
|
+
const isHuman = opts.human || process.stdout.isTTY && !opts.json;
|
|
285
|
+
if (isHuman) {
|
|
286
|
+
process.stderr.write(`
|
|
287
|
+
\u{1F50D} [${result.model}] Search: ${result.query}
|
|
288
|
+
|
|
289
|
+
`);
|
|
290
|
+
process.stdout.write(result.content + "\n");
|
|
291
|
+
process.stderr.write(`
|
|
292
|
+
\u{1F4CA} Tokens: ${result.tokens}
|
|
293
|
+
`);
|
|
294
|
+
} else {
|
|
295
|
+
output(success("search", result));
|
|
296
|
+
}
|
|
297
|
+
} catch (e) {
|
|
298
|
+
output(error("search", "SEARCH_FAILED", e instanceof Error ? e.message : String(e)));
|
|
299
|
+
process.exit(3);
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
await program.parseAsync(process.argv);
|
package/package.json
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@doufunao123/ai-search",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "AI-powered web search CLI with query splitting",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"ai-search": "dist/index.js"
|
|
8
|
+
},
|
|
9
|
+
"files": [
|
|
10
|
+
"dist",
|
|
11
|
+
"README.md"
|
|
12
|
+
],
|
|
13
|
+
"repository": {
|
|
14
|
+
"type": "git",
|
|
15
|
+
"url": "git+https://github.com/fran0220/agent-skills.git",
|
|
16
|
+
"directory": "ai-search/npm"
|
|
17
|
+
},
|
|
18
|
+
"author": "doufunao123",
|
|
19
|
+
"scripts": {
|
|
20
|
+
"build": "tsup src/index.ts --format esm --dts --clean",
|
|
21
|
+
"dev": "tsup src/index.ts --format esm --watch",
|
|
22
|
+
"lint": "tsc --noEmit",
|
|
23
|
+
"prepublishOnly": "npm run build"
|
|
24
|
+
},
|
|
25
|
+
"engines": {
|
|
26
|
+
"node": ">=20"
|
|
27
|
+
},
|
|
28
|
+
"dependencies": {
|
|
29
|
+
"commander": "^13.1.0"
|
|
30
|
+
},
|
|
31
|
+
"devDependencies": {
|
|
32
|
+
"@types/node": "^22.15.0",
|
|
33
|
+
"tsup": "^8.4.0",
|
|
34
|
+
"typescript": "^5.8.0"
|
|
35
|
+
}
|
|
36
|
+
}
|