@apmantza/greedysearch-pi 1.7.7 → 1.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/bin/coding-task.mjs +27 -1
- package/bin/search.mjs +260 -1539
- package/index.ts +134 -421
- package/package.json +1 -1
- package/src/github.mjs +6 -1
- package/src/search/chrome.mjs +223 -0
- package/src/search/constants.mjs +38 -0
- package/src/search/defaults.mjs +15 -0
- package/src/search/engines.mjs +58 -0
- package/src/search/fetch-source.mjs +230 -0
- package/src/search/output.mjs +59 -0
- package/src/search/sources.mjs +446 -0
- package/src/search/synthesis-runner.mjs +64 -0
- package/src/search/synthesis.mjs +212 -0
- package/src/tools/deep-research-handler.ts +37 -0
- package/src/tools/greedy-search-handler.ts +58 -0
- package/src/tools/shared.ts +131 -0
- package/src/types.ts +104 -0
package/bin/search.mjs
CHANGED
|
@@ -1,1539 +1,260 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
// search.mjs — unified CLI for GreedySearch extractors
|
|
3
|
-
//
|
|
4
|
-
// Usage:
|
|
5
|
-
// node search.mjs <engine> "<query>"
|
|
6
|
-
// node search.mjs all "<query>"
|
|
7
|
-
//
|
|
8
|
-
// Engines:
|
|
9
|
-
// perplexity | pplx | p
|
|
10
|
-
// bing | copilot | b
|
|
11
|
-
// google | g
|
|
12
|
-
// gemini | gem
|
|
13
|
-
// all — fan-out to all engines in parallel
|
|
14
|
-
//
|
|
15
|
-
// Output: JSON to stdout, errors to stderr
|
|
16
|
-
//
|
|
17
|
-
// Examples:
|
|
18
|
-
// node search.mjs p "what is memoization"
|
|
19
|
-
// node search.mjs gem "latest React features"
|
|
20
|
-
// node search.mjs all "how does TCP congestion control work"
|
|
21
|
-
|
|
22
|
-
import {
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
} from "
|
|
31
|
-
import
|
|
32
|
-
import {
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
"
|
|
78
|
-
"
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
"
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
"
|
|
87
|
-
|
|
88
|
-
"
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
const
|
|
110
|
-
const
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
const
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
}
|
|
262
|
-
|
|
263
|
-
function pickPreferredTitle(currentTitle = "", nextTitle = "") {
|
|
264
|
-
const current = normalizeSourceTitle(currentTitle);
|
|
265
|
-
const next = normalizeSourceTitle(nextTitle);
|
|
266
|
-
if (!next) return current;
|
|
267
|
-
if (!current) return next;
|
|
268
|
-
const currentLooksLikeUrl = /^https?:\/\//i.test(current);
|
|
269
|
-
const nextLooksLikeUrl = /^https?:\/\//i.test(next);
|
|
270
|
-
if (currentLooksLikeUrl && !nextLooksLikeUrl) return next;
|
|
271
|
-
if (!currentLooksLikeUrl && nextLooksLikeUrl) return current;
|
|
272
|
-
return next.length > current.length ? next : current;
|
|
273
|
-
}
|
|
274
|
-
|
|
275
|
-
function normalizeUrl(rawUrl) {
|
|
276
|
-
if (!rawUrl) return null;
|
|
277
|
-
try {
|
|
278
|
-
const url = new URL(rawUrl);
|
|
279
|
-
if (!["http:", "https:"].includes(url.protocol)) return null;
|
|
280
|
-
url.hash = "";
|
|
281
|
-
url.hostname = url.hostname.toLowerCase();
|
|
282
|
-
if (
|
|
283
|
-
(url.protocol === "https:" && url.port === "443") ||
|
|
284
|
-
(url.protocol === "http:" && url.port === "80")
|
|
285
|
-
) {
|
|
286
|
-
url.port = "";
|
|
287
|
-
}
|
|
288
|
-
for (const key of [...url.searchParams.keys()]) {
|
|
289
|
-
const lower = key.toLowerCase();
|
|
290
|
-
if (TRACKING_PARAMS.includes(lower) || lower.startsWith("utm_")) {
|
|
291
|
-
url.searchParams.delete(key);
|
|
292
|
-
}
|
|
293
|
-
}
|
|
294
|
-
url.searchParams.sort();
|
|
295
|
-
const normalizedPath = url.pathname.replace(/\/+$/, "") || "/";
|
|
296
|
-
url.pathname = normalizedPath;
|
|
297
|
-
const normalized = url.toString();
|
|
298
|
-
return normalizedPath === "/" ? normalized.replace(/\/$/, "") : normalized;
|
|
299
|
-
} catch {
|
|
300
|
-
return null;
|
|
301
|
-
}
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
function getDomain(rawUrl) {
|
|
305
|
-
try {
|
|
306
|
-
const domain = new URL(rawUrl).hostname.toLowerCase();
|
|
307
|
-
return domain.replace(/^www\./, "");
|
|
308
|
-
} catch {
|
|
309
|
-
return "";
|
|
310
|
-
}
|
|
311
|
-
}
|
|
312
|
-
|
|
313
|
-
function matchesDomain(domain, hosts) {
|
|
314
|
-
return hosts.some((host) => domain === host || domain.endsWith(`.${host}`));
|
|
315
|
-
}
|
|
316
|
-
|
|
317
|
-
function classifySourceType(domain, title = "", rawUrl = "") {
|
|
318
|
-
const lowerTitle = title.toLowerCase();
|
|
319
|
-
const lowerUrl = rawUrl.toLowerCase();
|
|
320
|
-
|
|
321
|
-
if (domain === "github.com" || domain === "gitlab.com") return "repo";
|
|
322
|
-
if (matchesDomain(domain, COMMUNITY_HOSTS)) return "community";
|
|
323
|
-
if (matchesDomain(domain, NEWS_HOSTS)) return "news";
|
|
324
|
-
if (
|
|
325
|
-
domain.startsWith("docs.") ||
|
|
326
|
-
domain.startsWith("developer.") ||
|
|
327
|
-
domain.startsWith("developers.") ||
|
|
328
|
-
domain.startsWith("api.") ||
|
|
329
|
-
lowerTitle.includes("documentation") ||
|
|
330
|
-
lowerTitle.includes("docs") ||
|
|
331
|
-
lowerTitle.includes("reference") ||
|
|
332
|
-
lowerUrl.includes("/docs/") ||
|
|
333
|
-
lowerUrl.includes("/reference/") ||
|
|
334
|
-
lowerUrl.includes("/api/")
|
|
335
|
-
) {
|
|
336
|
-
return "official-docs";
|
|
337
|
-
}
|
|
338
|
-
if (domain.startsWith("blog.") || lowerUrl.includes("/blog/"))
|
|
339
|
-
return "maintainer-blog";
|
|
340
|
-
return "website";
|
|
341
|
-
}
|
|
342
|
-
|
|
343
|
-
function sourceTypePriority(sourceType) {
|
|
344
|
-
switch (sourceType) {
|
|
345
|
-
case "official-docs":
|
|
346
|
-
return 5;
|
|
347
|
-
case "repo":
|
|
348
|
-
return 4;
|
|
349
|
-
case "maintainer-blog":
|
|
350
|
-
return 3;
|
|
351
|
-
case "website":
|
|
352
|
-
return 2;
|
|
353
|
-
case "community":
|
|
354
|
-
return 1;
|
|
355
|
-
case "news":
|
|
356
|
-
return 0;
|
|
357
|
-
default:
|
|
358
|
-
return 0;
|
|
359
|
-
}
|
|
360
|
-
}
|
|
361
|
-
|
|
362
|
-
function bestRank(source) {
|
|
363
|
-
const ranks = Object.values(source.perEngine || {}).map((v) => v?.rank || 99);
|
|
364
|
-
return ranks.length ? Math.min(...ranks) : 99;
|
|
365
|
-
}
|
|
366
|
-
|
|
367
|
-
function buildSourceRegistry(out, query = "") {
|
|
368
|
-
const seen = new Map();
|
|
369
|
-
const engineOrder = ["perplexity", "bing", "google"];
|
|
370
|
-
|
|
371
|
-
// Get preferred domains for this query
|
|
372
|
-
const preferredDomains = inferPreferredDomains(query);
|
|
373
|
-
|
|
374
|
-
for (const engine of engineOrder) {
|
|
375
|
-
const result = out[engine];
|
|
376
|
-
if (!result?.sources) continue;
|
|
377
|
-
|
|
378
|
-
for (let i = 0; i < result.sources.length; i++) {
|
|
379
|
-
const source = result.sources[i];
|
|
380
|
-
const canonicalUrl = normalizeUrl(source.url);
|
|
381
|
-
if (!canonicalUrl || canonicalUrl.length < 10) continue;
|
|
382
|
-
|
|
383
|
-
const title = normalizeSourceTitle(source.title || "");
|
|
384
|
-
const domain = getDomain(canonicalUrl);
|
|
385
|
-
const sourceType = classifySourceType(domain, title, canonicalUrl);
|
|
386
|
-
|
|
387
|
-
// Calculate smart score boost
|
|
388
|
-
let smartScore = 0;
|
|
389
|
-
|
|
390
|
-
// Boost preferred domains for this query
|
|
391
|
-
if (preferredDomains.some((pd) => domainMatches(domain, pd))) {
|
|
392
|
-
smartScore += 10; // Strong boost for query-relevant official docs
|
|
393
|
-
}
|
|
394
|
-
|
|
395
|
-
// Boost docs/developer sites
|
|
396
|
-
if (sourceType === "official-docs") {
|
|
397
|
-
smartScore += 3;
|
|
398
|
-
}
|
|
399
|
-
|
|
400
|
-
// Boost based on URL path patterns
|
|
401
|
-
const lowerUrl = canonicalUrl.toLowerCase();
|
|
402
|
-
if (
|
|
403
|
-
/\/docs\/|\/documentation\/|\.dev\/|\/api\/|\/reference\//.test(
|
|
404
|
-
lowerUrl,
|
|
405
|
-
)
|
|
406
|
-
) {
|
|
407
|
-
smartScore += 2;
|
|
408
|
-
}
|
|
409
|
-
|
|
410
|
-
// Penalize community/discussion sites for technical queries
|
|
411
|
-
if (sourceType === "community" && preferredDomains.length > 0) {
|
|
412
|
-
smartScore -= 2;
|
|
413
|
-
}
|
|
414
|
-
|
|
415
|
-
const existing = seen.get(canonicalUrl) || {
|
|
416
|
-
id: "",
|
|
417
|
-
canonicalUrl,
|
|
418
|
-
displayUrl: source.url || canonicalUrl,
|
|
419
|
-
domain,
|
|
420
|
-
title: "",
|
|
421
|
-
engines: [],
|
|
422
|
-
engineCount: 0,
|
|
423
|
-
perEngine: {},
|
|
424
|
-
sourceType,
|
|
425
|
-
isOfficial: sourceType === "official-docs",
|
|
426
|
-
smartScore: 0,
|
|
427
|
-
};
|
|
428
|
-
|
|
429
|
-
existing.title = pickPreferredTitle(existing.title, title);
|
|
430
|
-
existing.displayUrl = existing.displayUrl || source.url || canonicalUrl;
|
|
431
|
-
existing.sourceType = existing.sourceType || sourceType;
|
|
432
|
-
existing.isOfficial =
|
|
433
|
-
existing.isOfficial || sourceType === "official-docs";
|
|
434
|
-
existing.smartScore = Math.max(existing.smartScore, smartScore);
|
|
435
|
-
|
|
436
|
-
if (!existing.engines.includes(engine)) {
|
|
437
|
-
existing.engines.push(engine);
|
|
438
|
-
}
|
|
439
|
-
existing.perEngine[engine] = {
|
|
440
|
-
rank: i + 1,
|
|
441
|
-
title: pickPreferredTitle(
|
|
442
|
-
existing.perEngine[engine]?.title || "",
|
|
443
|
-
title,
|
|
444
|
-
),
|
|
445
|
-
};
|
|
446
|
-
|
|
447
|
-
seen.set(canonicalUrl, existing);
|
|
448
|
-
}
|
|
449
|
-
}
|
|
450
|
-
|
|
451
|
-
const sources = Array.from(seen.values())
|
|
452
|
-
.map((source) => ({
|
|
453
|
-
...source,
|
|
454
|
-
engineCount: source.engines.length,
|
|
455
|
-
}))
|
|
456
|
-
.sort((a, b) => {
|
|
457
|
-
// Primary: smart score (query-aware domain boosting)
|
|
458
|
-
if (b.smartScore !== a.smartScore) return b.smartScore - a.smartScore;
|
|
459
|
-
|
|
460
|
-
// Secondary: consensus (sources found by more engines)
|
|
461
|
-
if (b.engineCount !== a.engineCount) return b.engineCount - a.engineCount;
|
|
462
|
-
|
|
463
|
-
// Tertiary: source type priority
|
|
464
|
-
if (
|
|
465
|
-
sourceTypePriority(b.sourceType) !== sourceTypePriority(a.sourceType)
|
|
466
|
-
) {
|
|
467
|
-
return (
|
|
468
|
-
sourceTypePriority(b.sourceType) - sourceTypePriority(a.sourceType)
|
|
469
|
-
);
|
|
470
|
-
}
|
|
471
|
-
|
|
472
|
-
// Quaternary: best rank across engines
|
|
473
|
-
if (bestRank(a) !== bestRank(b)) return bestRank(a) - bestRank(b);
|
|
474
|
-
|
|
475
|
-
return a.domain.localeCompare(b.domain);
|
|
476
|
-
})
|
|
477
|
-
.slice(0, 12)
|
|
478
|
-
.map((source, index) => ({
|
|
479
|
-
...source,
|
|
480
|
-
id: `S${index + 1}`,
|
|
481
|
-
title: source.title || source.domain || source.canonicalUrl,
|
|
482
|
-
}));
|
|
483
|
-
|
|
484
|
-
return sources;
|
|
485
|
-
}
|
|
486
|
-
|
|
487
|
-
function mergeFetchDataIntoSources(sources, fetchedSources) {
|
|
488
|
-
const byId = new Map(fetchedSources.map((source) => [source.id, source]));
|
|
489
|
-
return sources.map((source) => {
|
|
490
|
-
const fetched = byId.get(source.id);
|
|
491
|
-
if (!fetched) return source;
|
|
492
|
-
|
|
493
|
-
const title = pickPreferredTitle(source.title, fetched.title || "");
|
|
494
|
-
return {
|
|
495
|
-
...source,
|
|
496
|
-
title: title || source.title,
|
|
497
|
-
fetch: {
|
|
498
|
-
attempted: true,
|
|
499
|
-
ok: !fetched.error && fetched.contentChars > 100,
|
|
500
|
-
status: fetched.status || null,
|
|
501
|
-
finalUrl: fetched.finalUrl || fetched.url || source.canonicalUrl,
|
|
502
|
-
contentType: fetched.contentType || "",
|
|
503
|
-
lastModified: fetched.lastModified || "",
|
|
504
|
-
publishedTime: fetched.publishedTime || "",
|
|
505
|
-
byline: fetched.byline || "",
|
|
506
|
-
siteName: fetched.siteName || "",
|
|
507
|
-
lang: fetched.lang || "",
|
|
508
|
-
title: fetched.title || "",
|
|
509
|
-
snippet: fetched.snippet || "",
|
|
510
|
-
contentChars: fetched.contentChars || 0,
|
|
511
|
-
source: fetched.source || "unknown", // "http" | "browser"
|
|
512
|
-
duration: fetched.duration || 0,
|
|
513
|
-
error: fetched.error || "",
|
|
514
|
-
},
|
|
515
|
-
};
|
|
516
|
-
});
|
|
517
|
-
}
|
|
518
|
-
|
|
519
|
-
function parseStructuredJson(text) {
|
|
520
|
-
if (!text) return null;
|
|
521
|
-
const trimmed = String(text).trim();
|
|
522
|
-
const candidates = [
|
|
523
|
-
trimmed,
|
|
524
|
-
trimmed
|
|
525
|
-
.replace(/^```json\s*/i, "")
|
|
526
|
-
.replace(/^```\s*/i, "")
|
|
527
|
-
.replace(/```$/i, "")
|
|
528
|
-
.trim(),
|
|
529
|
-
];
|
|
530
|
-
|
|
531
|
-
const objectMatch = trimmed.match(/\{[\s\S]*\}/);
|
|
532
|
-
if (objectMatch) candidates.push(objectMatch[0]);
|
|
533
|
-
|
|
534
|
-
for (const candidate of candidates) {
|
|
535
|
-
try {
|
|
536
|
-
return JSON.parse(candidate);
|
|
537
|
-
} catch {
|
|
538
|
-
// try next candidate
|
|
539
|
-
}
|
|
540
|
-
}
|
|
541
|
-
return null;
|
|
542
|
-
}
|
|
543
|
-
|
|
544
|
-
function normalizeSynthesisPayload(payload, sources, fallbackAnswer = "") {
|
|
545
|
-
const sourceIds = new Set(sources.map((source) => source.id));
|
|
546
|
-
const agreementLevel = [
|
|
547
|
-
"high",
|
|
548
|
-
"medium",
|
|
549
|
-
"low",
|
|
550
|
-
"mixed",
|
|
551
|
-
"conflicting",
|
|
552
|
-
].includes(payload?.agreement?.level)
|
|
553
|
-
? payload.agreement.level
|
|
554
|
-
: "mixed";
|
|
555
|
-
const claims = Array.isArray(payload?.claims)
|
|
556
|
-
? payload.claims
|
|
557
|
-
.map((claim) => ({
|
|
558
|
-
claim: trimText(claim?.claim || "", 260),
|
|
559
|
-
support: ["strong", "moderate", "weak", "conflicting"].includes(
|
|
560
|
-
claim?.support,
|
|
561
|
-
)
|
|
562
|
-
? claim.support
|
|
563
|
-
: "moderate",
|
|
564
|
-
sourceIds: Array.isArray(claim?.sourceIds)
|
|
565
|
-
? claim.sourceIds.filter((id) => sourceIds.has(id))
|
|
566
|
-
: [],
|
|
567
|
-
}))
|
|
568
|
-
.filter((claim) => claim.claim)
|
|
569
|
-
: [];
|
|
570
|
-
const recommendedSources = Array.isArray(payload?.recommendedSources)
|
|
571
|
-
? payload.recommendedSources.filter((id) => sourceIds.has(id)).slice(0, 6)
|
|
572
|
-
: [];
|
|
573
|
-
|
|
574
|
-
return {
|
|
575
|
-
answer: trimText(payload?.answer || fallbackAnswer, 4000),
|
|
576
|
-
agreement: {
|
|
577
|
-
level: agreementLevel,
|
|
578
|
-
summary: trimText(payload?.agreement?.summary || "", 280),
|
|
579
|
-
},
|
|
580
|
-
differences: Array.isArray(payload?.differences)
|
|
581
|
-
? payload.differences
|
|
582
|
-
.map((item) => trimText(item, 220))
|
|
583
|
-
.filter(Boolean)
|
|
584
|
-
.slice(0, 5)
|
|
585
|
-
: [],
|
|
586
|
-
caveats: Array.isArray(payload?.caveats)
|
|
587
|
-
? payload.caveats
|
|
588
|
-
.map((item) => trimText(item, 220))
|
|
589
|
-
.filter(Boolean)
|
|
590
|
-
.slice(0, 5)
|
|
591
|
-
: [],
|
|
592
|
-
claims,
|
|
593
|
-
recommendedSources,
|
|
594
|
-
};
|
|
595
|
-
}
|
|
596
|
-
|
|
597
|
-
function buildSynthesisPrompt(
|
|
598
|
-
query,
|
|
599
|
-
results,
|
|
600
|
-
sources,
|
|
601
|
-
{ grounded = false } = {},
|
|
602
|
-
) {
|
|
603
|
-
const engineSummaries = {};
|
|
604
|
-
for (const engine of ["perplexity", "bing", "google"]) {
|
|
605
|
-
const result = results[engine];
|
|
606
|
-
if (!result) continue;
|
|
607
|
-
if (result.error) {
|
|
608
|
-
engineSummaries[engine] = {
|
|
609
|
-
status: "error",
|
|
610
|
-
error: String(result.error),
|
|
611
|
-
};
|
|
612
|
-
continue;
|
|
613
|
-
}
|
|
614
|
-
|
|
615
|
-
engineSummaries[engine] = {
|
|
616
|
-
status: "ok",
|
|
617
|
-
answer: trimText(result.answer || "", grounded ? 4500 : 2200),
|
|
618
|
-
sourceIds: sources
|
|
619
|
-
.filter((source) => source.engines.includes(engine))
|
|
620
|
-
.sort(
|
|
621
|
-
(a, b) =>
|
|
622
|
-
(a.perEngine[engine]?.rank || 99) -
|
|
623
|
-
(b.perEngine[engine]?.rank || 99),
|
|
624
|
-
)
|
|
625
|
-
.map((source) => source.id)
|
|
626
|
-
.slice(0, 6),
|
|
627
|
-
};
|
|
628
|
-
}
|
|
629
|
-
|
|
630
|
-
const sourceRegistry = sources.slice(0, grounded ? 10 : 8).map((source) => ({
|
|
631
|
-
id: source.id,
|
|
632
|
-
title: source.title,
|
|
633
|
-
domain: source.domain,
|
|
634
|
-
canonicalUrl: source.canonicalUrl,
|
|
635
|
-
sourceType: source.sourceType,
|
|
636
|
-
isOfficial: source.isOfficial,
|
|
637
|
-
engines: source.engines,
|
|
638
|
-
engineCount: source.engineCount,
|
|
639
|
-
perEngine: source.perEngine,
|
|
640
|
-
fetch:
|
|
641
|
-
source.fetch?.attempted
|
|
642
|
-
? {
|
|
643
|
-
ok: source.fetch.ok,
|
|
644
|
-
status: source.fetch.status,
|
|
645
|
-
publishedTime: source.fetch.publishedTime || "",
|
|
646
|
-
lastModified: source.fetch.lastModified || "",
|
|
647
|
-
byline: source.fetch.byline || "",
|
|
648
|
-
siteName: source.fetch.siteName || "",
|
|
649
|
-
...(grounded ? { snippet: trimText(source.fetch.snippet || "", 700) } : {}),
|
|
650
|
-
}
|
|
651
|
-
: undefined,
|
|
652
|
-
}));
|
|
653
|
-
|
|
654
|
-
return [
|
|
655
|
-
"You are synthesizing results from Perplexity, Bing Copilot, and Google AI.",
|
|
656
|
-
grounded
|
|
657
|
-
? "Use the fetched source snippets as the strongest evidence. Use engine answers for perspective and conflict detection."
|
|
658
|
-
: "Use the engine answers for perspective. Use the source registry for provenance and citations.",
|
|
659
|
-
"Prefer official docs, release notes, repositories, and maintainer-authored sources when available.",
|
|
660
|
-
"When publishedTime or lastModified is available, flag sources older than 2 years as potentially stale in caveats.",
|
|
661
|
-
"If the engines disagree, say so explicitly.",
|
|
662
|
-
"Do not invent sources. Only reference source IDs from the source registry.",
|
|
663
|
-
"Return valid JSON only. No markdown fences, no prose outside the JSON object.",
|
|
664
|
-
"",
|
|
665
|
-
"JSON schema:",
|
|
666
|
-
"{",
|
|
667
|
-
' "answer": "short direct answer",',
|
|
668
|
-
' "agreement": { "level": "high|medium|low|mixed|conflicting", "summary": "..." },',
|
|
669
|
-
' "differences": ["..."],',
|
|
670
|
-
' "caveats": ["..."],',
|
|
671
|
-
' "claims": [',
|
|
672
|
-
' { "claim": "...", "support": "strong|moderate|weak|conflicting", "sourceIds": ["S1"] }',
|
|
673
|
-
" ],",
|
|
674
|
-
' "recommendedSources": ["S1", "S2"]',
|
|
675
|
-
"}",
|
|
676
|
-
"",
|
|
677
|
-
`User query: ${query}`,
|
|
678
|
-
"",
|
|
679
|
-
`Engine results:\n${JSON.stringify(engineSummaries, null, 2)}`,
|
|
680
|
-
"",
|
|
681
|
-
`Source registry:\n${JSON.stringify(sourceRegistry, null, 2)}`,
|
|
682
|
-
].join("\n");
|
|
683
|
-
}
|
|
684
|
-
|
|
685
|
-
function buildConfidence(out) {
|
|
686
|
-
const sources = Array.isArray(out._sources) ? out._sources : [];
|
|
687
|
-
const topConsensus = sources.length > 0 ? sources[0]?.engineCount || 0 : 0;
|
|
688
|
-
const officialSourceCount = sources.filter(
|
|
689
|
-
(source) => source.isOfficial,
|
|
690
|
-
).length;
|
|
691
|
-
const firstPartySourceCount = sources.filter(
|
|
692
|
-
(source) => source.isOfficial || source.sourceType === "maintainer-blog",
|
|
693
|
-
).length;
|
|
694
|
-
const fetchedAttempted = sources.filter(
|
|
695
|
-
(source) => source.fetch?.attempted,
|
|
696
|
-
).length;
|
|
697
|
-
const fetchedSucceeded = sources.filter((source) => source.fetch?.ok).length;
|
|
698
|
-
const sourceTypeBreakdown = sources.reduce((acc, source) => {
|
|
699
|
-
acc[source.sourceType] = (acc[source.sourceType] || 0) + 1;
|
|
700
|
-
return acc;
|
|
701
|
-
}, {});
|
|
702
|
-
const synthesisLevel = out._synthesis?.agreement?.level;
|
|
703
|
-
|
|
704
|
-
return {
|
|
705
|
-
sourcesCount: sources.length,
|
|
706
|
-
topSourceConsensus: topConsensus,
|
|
707
|
-
agreementLevel:
|
|
708
|
-
synthesisLevel ||
|
|
709
|
-
(topConsensus >= 3 ? "high" : topConsensus >= 2 ? "medium" : "low"),
|
|
710
|
-
enginesResponded: ALL_ENGINES.filter(
|
|
711
|
-
(engine) => out[engine]?.answer && !out[engine]?.error,
|
|
712
|
-
),
|
|
713
|
-
enginesFailed: ALL_ENGINES.filter((engine) => out[engine]?.error),
|
|
714
|
-
officialSourceCount,
|
|
715
|
-
firstPartySourceCount,
|
|
716
|
-
fetchedSourceSuccessRate:
|
|
717
|
-
fetchedAttempted > 0
|
|
718
|
-
? Number((fetchedSucceeded / fetchedAttempted).toFixed(2))
|
|
719
|
-
: 0,
|
|
720
|
-
sourceTypeBreakdown,
|
|
721
|
-
};
|
|
722
|
-
}
|
|
723
|
-
|
|
724
|
-
function getFullTabFromCache(engine) {
|
|
725
|
-
try {
|
|
726
|
-
if (!existsSync(PAGES_CACHE)) return null;
|
|
727
|
-
const pages = JSON.parse(readFileSync(PAGES_CACHE, "utf8"));
|
|
728
|
-
const found = pages.find((p) => p.url.includes(ENGINE_DOMAINS[engine]));
|
|
729
|
-
return found ? found.targetId : null;
|
|
730
|
-
} catch {
|
|
731
|
-
return null;
|
|
732
|
-
}
|
|
733
|
-
}
|
|
734
|
-
|
|
735
|
-
function cdp(args, timeoutMs = 15000) {
|
|
736
|
-
return new Promise((resolve, reject) => {
|
|
737
|
-
const proc = spawn("node", [CDP, ...args], {
|
|
738
|
-
stdio: ["ignore", "pipe", "pipe"],
|
|
739
|
-
});
|
|
740
|
-
let out = "",
|
|
741
|
-
err = "";
|
|
742
|
-
proc.stdout.on("data", (d) => (out += d));
|
|
743
|
-
proc.stderr.on("data", (d) => (err += d));
|
|
744
|
-
const t = setTimeout(() => {
|
|
745
|
-
proc.kill();
|
|
746
|
-
reject(new Error(`cdp timeout: ${args[0]}`));
|
|
747
|
-
}, timeoutMs);
|
|
748
|
-
proc.on("close", (code) => {
|
|
749
|
-
clearTimeout(t);
|
|
750
|
-
if (code !== 0) reject(new Error(err.trim() || `cdp exit ${code}`));
|
|
751
|
-
else resolve(out.trim());
|
|
752
|
-
});
|
|
753
|
-
});
|
|
754
|
-
}
|
|
755
|
-
|
|
756
|
-
async function getAnyTab() {
|
|
757
|
-
const list = await cdp(["list"]);
|
|
758
|
-
const first = list.split("\n")[0];
|
|
759
|
-
if (!first) throw new Error("No Chrome tabs found");
|
|
760
|
-
return first.slice(0, 8);
|
|
761
|
-
}
|
|
762
|
-
|
|
763
|
-
async function openNewTab() {
|
|
764
|
-
const anchor = await getAnyTab();
|
|
765
|
-
const raw = await cdp([
|
|
766
|
-
"evalraw",
|
|
767
|
-
anchor,
|
|
768
|
-
"Target.createTarget",
|
|
769
|
-
'{"url":"about:blank"}',
|
|
770
|
-
]);
|
|
771
|
-
const { targetId } = JSON.parse(raw);
|
|
772
|
-
return targetId;
|
|
773
|
-
}
|
|
774
|
-
|
|
775
|
-
async function activateTab(targetId) {
|
|
776
|
-
try {
|
|
777
|
-
const anchor = await getAnyTab();
|
|
778
|
-
await cdp([
|
|
779
|
-
"evalraw",
|
|
780
|
-
anchor,
|
|
781
|
-
"Target.activateTarget",
|
|
782
|
-
JSON.stringify({ targetId }),
|
|
783
|
-
]);
|
|
784
|
-
} catch {
|
|
785
|
-
// best-effort
|
|
786
|
-
}
|
|
787
|
-
}
|
|
788
|
-
|
|
789
|
-
async function closeTabs(targetIds = []) {
|
|
790
|
-
for (const targetId of targetIds) {
|
|
791
|
-
if (!targetId) continue;
|
|
792
|
-
await closeTab(targetId);
|
|
793
|
-
}
|
|
794
|
-
if (targetIds.length > 0) {
|
|
795
|
-
await new Promise((r) => setTimeout(r, 300));
|
|
796
|
-
await cdp(["list"]).catch(() => null);
|
|
797
|
-
}
|
|
798
|
-
}
|
|
799
|
-
|
|
800
|
-
async function closeTab(targetId) {
|
|
801
|
-
try {
|
|
802
|
-
const anchor = await getAnyTab();
|
|
803
|
-
await cdp([
|
|
804
|
-
"evalraw",
|
|
805
|
-
anchor,
|
|
806
|
-
"Target.closeTarget",
|
|
807
|
-
JSON.stringify({ targetId }),
|
|
808
|
-
]);
|
|
809
|
-
} catch {
|
|
810
|
-
/* best-effort */
|
|
811
|
-
}
|
|
812
|
-
}
|
|
813
|
-
|
|
814
|
-
function runExtractor(
|
|
815
|
-
script,
|
|
816
|
-
query,
|
|
817
|
-
tabPrefix = null,
|
|
818
|
-
short = false,
|
|
819
|
-
timeoutMs = null, // null = auto-select based on engine
|
|
820
|
-
) {
|
|
821
|
-
// Gemini is slower - use longer timeout
|
|
822
|
-
if (timeoutMs === null) {
|
|
823
|
-
timeoutMs = script.includes("gemini") ? 180000 : 90000;
|
|
824
|
-
}
|
|
825
|
-
const extraArgs = [
|
|
826
|
-
...(tabPrefix ? ["--tab", tabPrefix] : []),
|
|
827
|
-
...(short ? ["--short"] : []),
|
|
828
|
-
];
|
|
829
|
-
return new Promise((resolve, reject) => {
|
|
830
|
-
const proc = spawn(
|
|
831
|
-
"node",
|
|
832
|
-
[join(__dir, "..", "extractors", script), query, ...extraArgs],
|
|
833
|
-
{
|
|
834
|
-
stdio: ["ignore", "pipe", "pipe"],
|
|
835
|
-
env: { ...process.env, CDP_PROFILE_DIR: GREEDY_PROFILE_DIR },
|
|
836
|
-
},
|
|
837
|
-
);
|
|
838
|
-
let out = "";
|
|
839
|
-
let err = "";
|
|
840
|
-
proc.stdout.on("data", (d) => (out += d));
|
|
841
|
-
proc.stderr.on("data", (d) => (err += d));
|
|
842
|
-
const t = setTimeout(() => {
|
|
843
|
-
proc.kill();
|
|
844
|
-
reject(new Error(`${script} timed out after ${timeoutMs / 1000}s`));
|
|
845
|
-
}, timeoutMs);
|
|
846
|
-
proc.on("close", (code) => {
|
|
847
|
-
clearTimeout(t);
|
|
848
|
-
if (code !== 0) reject(new Error(err.trim() || `extractor exit ${code}`));
|
|
849
|
-
else {
|
|
850
|
-
try {
|
|
851
|
-
resolve(JSON.parse(out.trim()));
|
|
852
|
-
} catch {
|
|
853
|
-
reject(new Error(`bad JSON from ${script}: ${out.slice(0, 100)}`));
|
|
854
|
-
}
|
|
855
|
-
}
|
|
856
|
-
});
|
|
857
|
-
});
|
|
858
|
-
}
|
|
859
|
-
|
|
860
|
-
async function fetchTopSource(url) {
|
|
861
|
-
const tab = await openNewTab();
|
|
862
|
-
await cdp(["list"]); // refresh cache so the new tab is findable
|
|
863
|
-
try {
|
|
864
|
-
await cdp(["nav", tab, url], 30000);
|
|
865
|
-
await new Promise((r) => setTimeout(r, 1500));
|
|
866
|
-
const content = await cdp([
|
|
867
|
-
"eval",
|
|
868
|
-
tab,
|
|
869
|
-
`
|
|
870
|
-
(function(){
|
|
871
|
-
var el = document.querySelector('article, [role="main"], main, .post-content, .article-body, #content, .content');
|
|
872
|
-
var text = (el || document.body).innerText;
|
|
873
|
-
return text.replace(/\\s+/g, ' ').trim();
|
|
874
|
-
})()
|
|
875
|
-
`,
|
|
876
|
-
]);
|
|
877
|
-
return { url, content };
|
|
878
|
-
} catch (e) {
|
|
879
|
-
return { url, content: null, error: e.message };
|
|
880
|
-
} finally {
|
|
881
|
-
await closeTab(tab);
|
|
882
|
-
}
|
|
883
|
-
}
|
|
884
|
-
|
|
885
|
-
/**
|
|
886
|
-
* Fetch source content via HTTP with Readability extraction.
|
|
887
|
-
* Falls back to browser if HTTP fails or content quality is low.
|
|
888
|
-
* @param {string} url - URL to fetch
|
|
889
|
-
* @param {number} maxChars - Max characters to return
|
|
890
|
-
* @returns {Promise<object>} Fetch result
|
|
891
|
-
*/
|
|
892
|
-
async function fetchSourceContent(url, maxChars = 8000) {
|
|
893
|
-
const start = Date.now();
|
|
894
|
-
|
|
895
|
-
// Check if it's a GitHub URL (tree/root - use clone, blob - let fetcher handle via raw)
|
|
896
|
-
if (parseGitHubUrl(url)) {
|
|
897
|
-
const parsed = parseGitHubUrl(url);
|
|
898
|
-
// Use cloning for tree/root URLs, or blob URLs that might need exploration
|
|
899
|
-
if (
|
|
900
|
-
parsed &&
|
|
901
|
-
(parsed.type === "root" ||
|
|
902
|
-
parsed.type === "tree" ||
|
|
903
|
-
(parsed.type === "blob" && !parsed.path?.includes(".")))
|
|
904
|
-
) {
|
|
905
|
-
const ghResult = await fetchGitHubContent(url);
|
|
906
|
-
if (ghResult.ok) {
|
|
907
|
-
const content = trimContentHeadTail(ghResult.content, maxChars);
|
|
908
|
-
return {
|
|
909
|
-
url,
|
|
910
|
-
finalUrl: url,
|
|
911
|
-
status: 200,
|
|
912
|
-
contentType: "text/markdown",
|
|
913
|
-
lastModified: "",
|
|
914
|
-
title: ghResult.title,
|
|
915
|
-
snippet: content.slice(0, 320),
|
|
916
|
-
content,
|
|
917
|
-
contentChars: content.length,
|
|
918
|
-
source: "github-api",
|
|
919
|
-
...(ghResult.tree && { tree: ghResult.tree }),
|
|
920
|
-
duration: Date.now() - start,
|
|
921
|
-
};
|
|
922
|
-
}
|
|
923
|
-
// If GitHub clone failed, fall through to HTTP (which will use raw for blobs)
|
|
924
|
-
process.stderr.write(
|
|
925
|
-
`[greedysearch] GitHub API fetch failed, trying HTTP: ${ghResult.error}\n`,
|
|
926
|
-
);
|
|
927
|
-
}
|
|
928
|
-
}
|
|
929
|
-
|
|
930
|
-
// Try HTTP first
|
|
931
|
-
const httpResult = await fetchSourceHttp(url, { timeoutMs: 15000 });
|
|
932
|
-
|
|
933
|
-
if (httpResult.ok) {
|
|
934
|
-
const content = trimContentHeadTail(httpResult.markdown, maxChars);
|
|
935
|
-
return {
|
|
936
|
-
url,
|
|
937
|
-
finalUrl: httpResult.finalUrl,
|
|
938
|
-
status: httpResult.status,
|
|
939
|
-
contentType: "text/markdown",
|
|
940
|
-
lastModified: httpResult.lastModified || "",
|
|
941
|
-
publishedTime: httpResult.publishedTime || "",
|
|
942
|
-
byline: httpResult.byline || "",
|
|
943
|
-
siteName: httpResult.siteName || "",
|
|
944
|
-
lang: httpResult.lang || "",
|
|
945
|
-
title: httpResult.title,
|
|
946
|
-
snippet: httpResult.excerpt,
|
|
947
|
-
content,
|
|
948
|
-
contentChars: content.length,
|
|
949
|
-
source: "http",
|
|
950
|
-
duration: Date.now() - start,
|
|
951
|
-
};
|
|
952
|
-
}
|
|
953
|
-
|
|
954
|
-
// HTTP failed or blocked - fall back to browser
|
|
955
|
-
process.stderr.write(
|
|
956
|
-
`[greedysearch] HTTP failed for ${url.slice(0, 60)}, trying browser...\n`,
|
|
957
|
-
);
|
|
958
|
-
return await fetchSourceContentBrowser(url, maxChars);
|
|
959
|
-
}
|
|
960
|
-
|
|
961
|
-
/**
|
|
962
|
-
* Browser fallback for source fetching (original CDP-based method)
|
|
963
|
-
*/
|
|
964
|
-
async function fetchSourceContentBrowser(url, maxChars = 8000) {
|
|
965
|
-
const start = Date.now();
|
|
966
|
-
const tab = await openNewTab();
|
|
967
|
-
|
|
968
|
-
try {
|
|
969
|
-
await cdp(["nav", tab, url], 30000);
|
|
970
|
-
await new Promise((r) => setTimeout(r, 1500));
|
|
971
|
-
|
|
972
|
-
const content = await cdp([
|
|
973
|
-
"eval",
|
|
974
|
-
tab,
|
|
975
|
-
`
|
|
976
|
-
(function(){
|
|
977
|
-
var el = document.querySelector('article, [role="main"], main, .post-content, .article-body, #content, .content');
|
|
978
|
-
var text = (el || document.body).innerText;
|
|
979
|
-
return JSON.stringify({
|
|
980
|
-
title: document.title,
|
|
981
|
-
content: text.replace(/\\s+/g, ' ').trim(),
|
|
982
|
-
url: location.href
|
|
983
|
-
});
|
|
984
|
-
})()
|
|
985
|
-
`,
|
|
986
|
-
]);
|
|
987
|
-
|
|
988
|
-
const parsed = JSON.parse(content);
|
|
989
|
-
const finalContent = trimContentHeadTail(parsed.content, maxChars);
|
|
990
|
-
|
|
991
|
-
return {
|
|
992
|
-
url,
|
|
993
|
-
finalUrl: parsed.url || url,
|
|
994
|
-
status: 200,
|
|
995
|
-
contentType: "text/plain",
|
|
996
|
-
lastModified: "",
|
|
997
|
-
title: parsed.title,
|
|
998
|
-
snippet: trimText(finalContent, 320),
|
|
999
|
-
content: finalContent,
|
|
1000
|
-
contentChars: finalContent.length,
|
|
1001
|
-
source: "browser",
|
|
1002
|
-
duration: Date.now() - start,
|
|
1003
|
-
};
|
|
1004
|
-
} catch (error) {
|
|
1005
|
-
return {
|
|
1006
|
-
url,
|
|
1007
|
-
title: "",
|
|
1008
|
-
content: null,
|
|
1009
|
-
snippet: "",
|
|
1010
|
-
contentChars: 0,
|
|
1011
|
-
error: error.message,
|
|
1012
|
-
source: "browser",
|
|
1013
|
-
duration: Date.now() - start,
|
|
1014
|
-
};
|
|
1015
|
-
} finally {
|
|
1016
|
-
await closeTab(tab);
|
|
1017
|
-
}
|
|
1018
|
-
}
|
|
1019
|
-
|
|
1020
|
-
async function fetchMultipleSources(
|
|
1021
|
-
sources,
|
|
1022
|
-
maxSources = 5,
|
|
1023
|
-
maxChars = 8000,
|
|
1024
|
-
concurrency = SOURCE_FETCH_CONCURRENCY,
|
|
1025
|
-
) {
|
|
1026
|
-
const toFetch = sources.slice(0, maxSources);
|
|
1027
|
-
if (toFetch.length === 0) return [];
|
|
1028
|
-
|
|
1029
|
-
const workerCount = Math.min(
|
|
1030
|
-
toFetch.length,
|
|
1031
|
-
Math.max(1, parseInt(String(concurrency), 10) || SOURCE_FETCH_CONCURRENCY),
|
|
1032
|
-
);
|
|
1033
|
-
|
|
1034
|
-
process.stderr.write(
|
|
1035
|
-
`[greedysearch] Fetching content from ${toFetch.length} sources via HTTP (concurrency ${workerCount})...\n`,
|
|
1036
|
-
);
|
|
1037
|
-
|
|
1038
|
-
const fetched = new Array(toFetch.length);
|
|
1039
|
-
let nextIndex = 0;
|
|
1040
|
-
let completed = 0;
|
|
1041
|
-
|
|
1042
|
-
async function worker() {
|
|
1043
|
-
while (true) {
|
|
1044
|
-
const index = nextIndex++;
|
|
1045
|
-
if (index >= toFetch.length) return;
|
|
1046
|
-
|
|
1047
|
-
const s = toFetch[index];
|
|
1048
|
-
const url = s.canonicalUrl || s.url;
|
|
1049
|
-
process.stderr.write(
|
|
1050
|
-
`[greedysearch] [${index + 1}/${toFetch.length}] Fetching: ${url.slice(0, 60)}...\n`,
|
|
1051
|
-
);
|
|
1052
|
-
|
|
1053
|
-
const result = await fetchSourceContent(url, maxChars);
|
|
1054
|
-
fetched[index] = {
|
|
1055
|
-
id: s.id,
|
|
1056
|
-
...result,
|
|
1057
|
-
};
|
|
1058
|
-
|
|
1059
|
-
if (result.content && result.content.length > 100) {
|
|
1060
|
-
process.stderr.write(
|
|
1061
|
-
`[greedysearch] ✓ ${result.source}: ${result.content.length} chars\n`,
|
|
1062
|
-
);
|
|
1063
|
-
} else if (result.error) {
|
|
1064
|
-
process.stderr.write(`[greedysearch] ✗ ${result.error.slice(0, 80)}\n`);
|
|
1065
|
-
}
|
|
1066
|
-
|
|
1067
|
-
completed += 1;
|
|
1068
|
-
process.stderr.write(`PROGRESS:fetch:${completed}/${toFetch.length}\n`);
|
|
1069
|
-
}
|
|
1070
|
-
}
|
|
1071
|
-
|
|
1072
|
-
await Promise.all(Array.from({ length: workerCount }, () => worker()));
|
|
1073
|
-
|
|
1074
|
-
// Log summary
|
|
1075
|
-
const successful = fetched.filter((f) => f.content && f.content.length > 100);
|
|
1076
|
-
const httpCount = fetched.filter((f) => f.source === "http").length;
|
|
1077
|
-
const browserCount = fetched.filter((f) => f.source === "browser").length;
|
|
1078
|
-
|
|
1079
|
-
process.stderr.write(
|
|
1080
|
-
`[greedysearch] Fetched ${successful.length}/${fetched.length} sources ` +
|
|
1081
|
-
`(HTTP: ${httpCount}, Browser: ${browserCount})\n`,
|
|
1082
|
-
);
|
|
1083
|
-
|
|
1084
|
-
return fetched;
|
|
1085
|
-
}
|
|
1086
|
-
|
|
1087
|
-
function pickTopSource(out) {
|
|
1088
|
-
if (Array.isArray(out._sources) && out._sources.length > 0)
|
|
1089
|
-
return out._sources[0];
|
|
1090
|
-
for (const engine of ["perplexity", "google", "bing"]) {
|
|
1091
|
-
const r = out[engine];
|
|
1092
|
-
if (r?.sources?.length > 0) return r.sources[0];
|
|
1093
|
-
}
|
|
1094
|
-
return null;
|
|
1095
|
-
}
|
|
1096
|
-
|
|
1097
|
-
async function synthesizeWithGemini(
|
|
1098
|
-
query,
|
|
1099
|
-
results,
|
|
1100
|
-
{ grounded = false, tabPrefix = null } = {},
|
|
1101
|
-
) {
|
|
1102
|
-
const sources = Array.isArray(results._sources)
|
|
1103
|
-
? results._sources
|
|
1104
|
-
: buildSourceRegistry(results);
|
|
1105
|
-
const prompt = buildSynthesisPrompt(query, results, sources, { grounded });
|
|
1106
|
-
|
|
1107
|
-
return new Promise((resolve, reject) => {
|
|
1108
|
-
const extraArgs = tabPrefix ? ["--tab", String(tabPrefix)] : [];
|
|
1109
|
-
const proc = spawn(
|
|
1110
|
-
"node",
|
|
1111
|
-
[join(__dir, "..", "extractors", "gemini.mjs"), prompt, ...extraArgs],
|
|
1112
|
-
{
|
|
1113
|
-
stdio: ["ignore", "pipe", "pipe"],
|
|
1114
|
-
env: { ...process.env, CDP_PROFILE_DIR: GREEDY_PROFILE_DIR },
|
|
1115
|
-
},
|
|
1116
|
-
);
|
|
1117
|
-
let out = "";
|
|
1118
|
-
let err = "";
|
|
1119
|
-
proc.stdout.on("data", (d) => (out += d));
|
|
1120
|
-
proc.stderr.on("data", (d) => (err += d));
|
|
1121
|
-
const t = setTimeout(() => {
|
|
1122
|
-
proc.kill();
|
|
1123
|
-
reject(new Error("Gemini synthesis timed out after 180s"));
|
|
1124
|
-
}, 180000);
|
|
1125
|
-
proc.on("close", (code) => {
|
|
1126
|
-
clearTimeout(t);
|
|
1127
|
-
if (code !== 0)
|
|
1128
|
-
reject(new Error(err.trim() || "gemini extractor failed"));
|
|
1129
|
-
else {
|
|
1130
|
-
try {
|
|
1131
|
-
const raw = JSON.parse(out.trim());
|
|
1132
|
-
const structured = parseStructuredJson(raw.answer || "");
|
|
1133
|
-
resolve({
|
|
1134
|
-
...normalizeSynthesisPayload(structured, sources, raw.answer || ""),
|
|
1135
|
-
rawAnswer: raw.answer || "",
|
|
1136
|
-
geminiSources: raw.sources || [],
|
|
1137
|
-
});
|
|
1138
|
-
} catch {
|
|
1139
|
-
reject(new Error(`bad JSON from gemini: ${out.slice(0, 100)}`));
|
|
1140
|
-
}
|
|
1141
|
-
}
|
|
1142
|
-
});
|
|
1143
|
-
});
|
|
1144
|
-
}
|
|
1145
|
-
|
|
1146
|
-
function slugify(query) {
|
|
1147
|
-
return query
|
|
1148
|
-
.toLowerCase()
|
|
1149
|
-
.replace(/[^a-z0-9]+/g, "-")
|
|
1150
|
-
.replace(/^-|-$/g, "")
|
|
1151
|
-
.slice(0, 60);
|
|
1152
|
-
}
|
|
1153
|
-
|
|
1154
|
-
function resultsDir() {
|
|
1155
|
-
const dir = join(__dir, "..", "results");
|
|
1156
|
-
mkdirSync(dir, { recursive: true });
|
|
1157
|
-
return dir;
|
|
1158
|
-
}
|
|
1159
|
-
|
|
1160
|
-
function writeOutput(
|
|
1161
|
-
data,
|
|
1162
|
-
outFile,
|
|
1163
|
-
{ inline = false, synthesize = false, query = "" } = {},
|
|
1164
|
-
) {
|
|
1165
|
-
const json = `${JSON.stringify(data, null, 2)}\n`;
|
|
1166
|
-
|
|
1167
|
-
if (outFile) {
|
|
1168
|
-
writeFileSync(outFile, json, "utf8");
|
|
1169
|
-
process.stderr.write(`Results written to ${outFile}\n`);
|
|
1170
|
-
return;
|
|
1171
|
-
}
|
|
1172
|
-
|
|
1173
|
-
if (inline) {
|
|
1174
|
-
process.stdout.write(json);
|
|
1175
|
-
return;
|
|
1176
|
-
}
|
|
1177
|
-
|
|
1178
|
-
const ts = new Date()
|
|
1179
|
-
.toISOString()
|
|
1180
|
-
.replace("T", "_")
|
|
1181
|
-
.replace(/[:.]/g, "-")
|
|
1182
|
-
.slice(0, 19);
|
|
1183
|
-
const slug = slugify(query);
|
|
1184
|
-
const base = join(resultsDir(), `${ts}_${slug}`);
|
|
1185
|
-
|
|
1186
|
-
writeFileSync(`${base}.json`, json, "utf8");
|
|
1187
|
-
|
|
1188
|
-
if (synthesize && data._synthesis?.answer) {
|
|
1189
|
-
writeFileSync(`${base}-synthesis.md`, data._synthesis.answer, "utf8");
|
|
1190
|
-
process.stdout.write(`${base}-synthesis.md\n`);
|
|
1191
|
-
} else {
|
|
1192
|
-
process.stdout.write(`${base}.json\n`);
|
|
1193
|
-
}
|
|
1194
|
-
}
|
|
1195
|
-
|
|
1196
|
-
const GREEDY_PROFILE_DIR = `${tmpdir().replace(/\\/g, "/")}/greedysearch-chrome-profile`;
|
|
1197
|
-
const ACTIVE_PORT_FILE = `${GREEDY_PROFILE_DIR}/DevToolsActivePort`;
|
|
1198
|
-
|
|
1199
|
-
// Tell cdp.mjs to prefer the GreedySearch Chrome profile's DevToolsActivePort,
|
|
1200
|
-
// so searches never accidentally attach to the user's main Chrome session.
|
|
1201
|
-
process.env.CDP_PROFILE_DIR = GREEDY_PROFILE_DIR;
|
|
1202
|
-
|
|
1203
|
-
function probeGreedyChrome(timeoutMs = 3000) {
|
|
1204
|
-
return new Promise((resolve) => {
|
|
1205
|
-
const req = http.get(
|
|
1206
|
-
`http://localhost:${GREEDY_PORT}/json/version`,
|
|
1207
|
-
(res) => {
|
|
1208
|
-
res.resume();
|
|
1209
|
-
resolve(res.statusCode === 200);
|
|
1210
|
-
},
|
|
1211
|
-
);
|
|
1212
|
-
req.on("error", () => resolve(false));
|
|
1213
|
-
req.setTimeout(timeoutMs, () => {
|
|
1214
|
-
req.destroy();
|
|
1215
|
-
resolve(false);
|
|
1216
|
-
});
|
|
1217
|
-
});
|
|
1218
|
-
}
|
|
1219
|
-
|
|
1220
|
-
// Write (or refresh) the DevToolsActivePort file for the GreedySearch Chrome so
|
|
1221
|
-
// cdp.mjs always connects to the right port rather than the user's main Chrome.
|
|
1222
|
-
// Uses atomic write (write to temp + rename) to prevent corruption from parallel processes.
|
|
1223
|
-
async function refreshPortFile() {
|
|
1224
|
-
const LOCK_FILE = `${ACTIVE_PORT_FILE}.lock`;
|
|
1225
|
-
const TEMP_FILE = `${ACTIVE_PORT_FILE}.tmp`;
|
|
1226
|
-
const LOCK_STALE_MS = 5000;
|
|
1227
|
-
const LOCK_WAIT_MS = 1000;
|
|
1228
|
-
|
|
1229
|
-
// File-based lock with exclusive create + stale lock recovery
|
|
1230
|
-
const lockAcquired = await new Promise((resolve) => {
|
|
1231
|
-
const start = Date.now();
|
|
1232
|
-
const tryLock = () => {
|
|
1233
|
-
try {
|
|
1234
|
-
const payload = JSON.stringify({ pid: process.pid, ts: Date.now() });
|
|
1235
|
-
writeFileSync(LOCK_FILE, payload, { encoding: "utf8", flag: "wx" });
|
|
1236
|
-
resolve(true);
|
|
1237
|
-
} catch (e) {
|
|
1238
|
-
if (e?.code !== "EEXIST") {
|
|
1239
|
-
if (Date.now() - start < LOCK_WAIT_MS) {
|
|
1240
|
-
setTimeout(tryLock, 50);
|
|
1241
|
-
} else {
|
|
1242
|
-
resolve(false);
|
|
1243
|
-
}
|
|
1244
|
-
return;
|
|
1245
|
-
}
|
|
1246
|
-
|
|
1247
|
-
try {
|
|
1248
|
-
const lockRaw = readFileSync(LOCK_FILE, "utf8").trim();
|
|
1249
|
-
const parsed = lockRaw.startsWith("{")
|
|
1250
|
-
? JSON.parse(lockRaw)
|
|
1251
|
-
: { ts: Number(lockRaw) };
|
|
1252
|
-
const lockTime = Number(parsed?.ts) || 0;
|
|
1253
|
-
|
|
1254
|
-
if (lockTime > 0 && Date.now() - lockTime > LOCK_STALE_MS) {
|
|
1255
|
-
try {
|
|
1256
|
-
unlinkSync(LOCK_FILE);
|
|
1257
|
-
} catch {}
|
|
1258
|
-
}
|
|
1259
|
-
|
|
1260
|
-
if (Date.now() - start < LOCK_WAIT_MS) {
|
|
1261
|
-
setTimeout(tryLock, 50);
|
|
1262
|
-
} else {
|
|
1263
|
-
resolve(false);
|
|
1264
|
-
}
|
|
1265
|
-
} catch {
|
|
1266
|
-
if (Date.now() - start < LOCK_WAIT_MS) {
|
|
1267
|
-
setTimeout(tryLock, 50);
|
|
1268
|
-
} else {
|
|
1269
|
-
resolve(false);
|
|
1270
|
-
}
|
|
1271
|
-
}
|
|
1272
|
-
}
|
|
1273
|
-
};
|
|
1274
|
-
tryLock();
|
|
1275
|
-
});
|
|
1276
|
-
|
|
1277
|
-
try {
|
|
1278
|
-
const body = await new Promise((res, rej) => {
|
|
1279
|
-
const req = http.get(
|
|
1280
|
-
`http://localhost:${GREEDY_PORT}/json/version`,
|
|
1281
|
-
(r) => {
|
|
1282
|
-
let b = "";
|
|
1283
|
-
r.on("data", (d) => (b += d));
|
|
1284
|
-
r.on("end", () => res(b));
|
|
1285
|
-
},
|
|
1286
|
-
);
|
|
1287
|
-
req.on("error", rej);
|
|
1288
|
-
req.setTimeout(3000, () => {
|
|
1289
|
-
req.destroy();
|
|
1290
|
-
rej(new Error("timeout"));
|
|
1291
|
-
});
|
|
1292
|
-
});
|
|
1293
|
-
const { webSocketDebuggerUrl } = JSON.parse(body);
|
|
1294
|
-
const wsPath = new URL(webSocketDebuggerUrl).pathname;
|
|
1295
|
-
|
|
1296
|
-
// Atomic write: write to temp file, then rename
|
|
1297
|
-
if (lockAcquired) {
|
|
1298
|
-
writeFileSync(TEMP_FILE, `${GREEDY_PORT}\n${wsPath}`, "utf8");
|
|
1299
|
-
try {
|
|
1300
|
-
unlinkSync(ACTIVE_PORT_FILE);
|
|
1301
|
-
} catch {}
|
|
1302
|
-
renameSync(TEMP_FILE, ACTIVE_PORT_FILE);
|
|
1303
|
-
}
|
|
1304
|
-
} catch {
|
|
1305
|
-
/* best-effort — launch.mjs already wrote the file on first start */
|
|
1306
|
-
} finally {
|
|
1307
|
-
if (lockAcquired) {
|
|
1308
|
-
try {
|
|
1309
|
-
unlinkSync(LOCK_FILE);
|
|
1310
|
-
} catch {}
|
|
1311
|
-
}
|
|
1312
|
-
}
|
|
1313
|
-
}
|
|
1314
|
-
|
|
1315
|
-
async function ensureChrome() {
|
|
1316
|
-
const ready = await probeGreedyChrome();
|
|
1317
|
-
if (!ready) {
|
|
1318
|
-
process.stderr.write(
|
|
1319
|
-
`GreedySearch Chrome not running on port ${GREEDY_PORT} — auto-launching...\n`,
|
|
1320
|
-
);
|
|
1321
|
-
await new Promise((resolve, reject) => {
|
|
1322
|
-
const proc = spawn("node", [join(__dir, "launch.mjs")], {
|
|
1323
|
-
stdio: ["ignore", process.stderr, process.stderr],
|
|
1324
|
-
});
|
|
1325
|
-
proc.on("close", (code) =>
|
|
1326
|
-
code === 0 ? resolve() : reject(new Error("launch.mjs failed")),
|
|
1327
|
-
);
|
|
1328
|
-
});
|
|
1329
|
-
} else {
|
|
1330
|
-
// Chrome already running — refresh the port file so cdp.mjs always picks
|
|
1331
|
-
// up the right port, even if the file was stale from a previous session.
|
|
1332
|
-
await refreshPortFile();
|
|
1333
|
-
}
|
|
1334
|
-
}
|
|
1335
|
-
|
|
1336
|
-
async function main() {
|
|
1337
|
-
const args = process.argv.slice(2);
|
|
1338
|
-
if (args.length < 2 || args[0] === "--help") {
|
|
1339
|
-
process.stderr.write(
|
|
1340
|
-
`${[
|
|
1341
|
-
'Usage: node search.mjs <engine> "<query>"',
|
|
1342
|
-
"",
|
|
1343
|
-
"Engines: perplexity (p), bing (b), google (g), gemini (gem), all",
|
|
1344
|
-
"",
|
|
1345
|
-
"Flags:",
|
|
1346
|
-
" --fast Quick mode: no source fetching or synthesis",
|
|
1347
|
-
" --synthesize Deprecated: synthesis is now default for multi-engine",
|
|
1348
|
-
" --deep-research Deprecated: source fetching is now default",
|
|
1349
|
-
" --fetch-top-source Fetch content from top source",
|
|
1350
|
-
" --inline Output JSON to stdout (for piping)",
|
|
1351
|
-
"",
|
|
1352
|
-
"Examples:",
|
|
1353
|
-
' node search.mjs all "Node.js streams" # Default: sources + synthesis',
|
|
1354
|
-
' node search.mjs all "quick check" --fast # Fast: no sources/synthesis',
|
|
1355
|
-
' node search.mjs p "what is memoization" # Single engine: fast mode',
|
|
1356
|
-
].join("\n")}\n`,
|
|
1357
|
-
);
|
|
1358
|
-
process.exit(1);
|
|
1359
|
-
}
|
|
1360
|
-
|
|
1361
|
-
await ensureChrome();
|
|
1362
|
-
|
|
1363
|
-
// Depth modes: fast (no synthesis/fetch), standard (synthesis+fetch 5 sources)
|
|
1364
|
-
const depthIdx = args.indexOf("--depth");
|
|
1365
|
-
let depth = "standard"; // DEFAULT: all "all" searches now include synthesis + source fetch
|
|
1366
|
-
|
|
1367
|
-
if (depthIdx !== -1 && args[depthIdx + 1]) {
|
|
1368
|
-
depth = args[depthIdx + 1];
|
|
1369
|
-
} else if (args.includes("--fast")) {
|
|
1370
|
-
depth = "fast"; // Explicit fast mode requested
|
|
1371
|
-
}
|
|
1372
|
-
|
|
1373
|
-
// For single engine (not "all"), default to fast unless explicit
|
|
1374
|
-
const engineArg = args.find((a) => !a.startsWith("--"))?.toLowerCase();
|
|
1375
|
-
if (engineArg !== "all" && depthIdx === -1 && !args.includes("--fast")) {
|
|
1376
|
-
// Single engine: default to fast for speed (no synthesis overhead)
|
|
1377
|
-
depth = "fast";
|
|
1378
|
-
}
|
|
1379
|
-
|
|
1380
|
-
// --deep-research / --deep flags map to deep mode (backward compat)
|
|
1381
|
-
if (args.includes("--deep-research")) {
|
|
1382
|
-
depth = "standard";
|
|
1383
|
-
}
|
|
1384
|
-
if (args.includes("--deep")) {
|
|
1385
|
-
depth = "deep";
|
|
1386
|
-
}
|
|
1387
|
-
|
|
1388
|
-
// For "all" engine with no explicit flags, standard is already default
|
|
1389
|
-
|
|
1390
|
-
const full = args.includes("--full");
|
|
1391
|
-
const short = !full;
|
|
1392
|
-
const fetchSource = args.includes("--fetch-top-source");
|
|
1393
|
-
const inline = args.includes("--inline");
|
|
1394
|
-
const outIdx = args.indexOf("--out");
|
|
1395
|
-
const outFile = outIdx !== -1 ? args[outIdx + 1] : null;
|
|
1396
|
-
const rest = args.filter(
|
|
1397
|
-
(a, i) =>
|
|
1398
|
-
a !== "--full" &&
|
|
1399
|
-
a !== "--short" &&
|
|
1400
|
-
a !== "--fast" &&
|
|
1401
|
-
a !== "--fetch-top-source" &&
|
|
1402
|
-
a !== "--synthesize" &&
|
|
1403
|
-
a !== "--deep-research" &&
|
|
1404
|
-
a !== "--deep" &&
|
|
1405
|
-
a !== "--inline" &&
|
|
1406
|
-
a !== "--depth" &&
|
|
1407
|
-
a !== "--out" &&
|
|
1408
|
-
(depthIdx === -1 || i !== depthIdx + 1) &&
|
|
1409
|
-
(outIdx === -1 || i !== outIdx + 1),
|
|
1410
|
-
);
|
|
1411
|
-
const engine = rest[0].toLowerCase();
|
|
1412
|
-
const query = rest.slice(1).join(" ");
|
|
1413
|
-
|
|
1414
|
-
if (engine === "all") {
|
|
1415
|
-
await cdp(["list"]); // refresh pages cache
|
|
1416
|
-
|
|
1417
|
-
// PARALLEL-SAFE: Always create fresh tabs for each engine to avoid race conditions
|
|
1418
|
-
// when multiple "all" searches run concurrently. Previously, reusing cached tabs
|
|
1419
|
-
// caused ERR_ABORTED and Uncaught errors as multiple processes fought over the same tab.
|
|
1420
|
-
const engineTabs = [];
|
|
1421
|
-
for (let i = 0; i < ALL_ENGINES.length; i++) {
|
|
1422
|
-
if (i > 0) await new Promise((r) => setTimeout(r, 300)); // small delay between tab opens
|
|
1423
|
-
const tab = await openNewTab();
|
|
1424
|
-
engineTabs.push(tab);
|
|
1425
|
-
}
|
|
1426
|
-
|
|
1427
|
-
// All tabs assigned — run extractors in parallel
|
|
1428
|
-
try {
|
|
1429
|
-
const results = await Promise.allSettled(
|
|
1430
|
-
ALL_ENGINES.map((e, i) =>
|
|
1431
|
-
runExtractor(ENGINES[e], query, engineTabs[i], short)
|
|
1432
|
-
.then((r) => {
|
|
1433
|
-
process.stderr.write(`PROGRESS:${e}:done\n`);
|
|
1434
|
-
return { engine: e, ...r };
|
|
1435
|
-
})
|
|
1436
|
-
.catch((err) => {
|
|
1437
|
-
process.stderr.write(`PROGRESS:${e}:error\n`);
|
|
1438
|
-
throw err;
|
|
1439
|
-
}),
|
|
1440
|
-
),
|
|
1441
|
-
);
|
|
1442
|
-
|
|
1443
|
-
const out = {};
|
|
1444
|
-
for (let i = 0; i < results.length; i++) {
|
|
1445
|
-
const r = results[i];
|
|
1446
|
-
if (r.status === "fulfilled") {
|
|
1447
|
-
out[r.value.engine] = r.value;
|
|
1448
|
-
} else {
|
|
1449
|
-
out[ALL_ENGINES[i]] = { error: r.reason?.message || "unknown error" };
|
|
1450
|
-
}
|
|
1451
|
-
}
|
|
1452
|
-
|
|
1453
|
-
await closeTabs(engineTabs);
|
|
1454
|
-
|
|
1455
|
-
// Build a canonical source registry across all engines
|
|
1456
|
-
out._sources = buildSourceRegistry(out, query);
|
|
1457
|
-
|
|
1458
|
-
// Source fetching: default for all "all" searches (was deep-research only)
|
|
1459
|
-
if (depth !== "fast" && out._sources.length > 0) {
|
|
1460
|
-
process.stderr.write("PROGRESS:source-fetch:start\n");
|
|
1461
|
-
const fetchedSources = await fetchMultipleSources(
|
|
1462
|
-
out._sources,
|
|
1463
|
-
5,
|
|
1464
|
-
8000,
|
|
1465
|
-
);
|
|
1466
|
-
|
|
1467
|
-
out._sources = mergeFetchDataIntoSources(out._sources, fetchedSources);
|
|
1468
|
-
out._fetchedSources = fetchedSources;
|
|
1469
|
-
process.stderr.write("PROGRESS:source-fetch:done\n");
|
|
1470
|
-
}
|
|
1471
|
-
|
|
1472
|
-
// Synthesize with Gemini for all non-fast modes (now default)
|
|
1473
|
-
if (depth !== "fast") {
|
|
1474
|
-
process.stderr.write("PROGRESS:synthesis:start\n");
|
|
1475
|
-
process.stderr.write(
|
|
1476
|
-
"[greedysearch] Synthesizing results with Gemini...\n",
|
|
1477
|
-
);
|
|
1478
|
-
try {
|
|
1479
|
-
const geminiTab = await openNewTab();
|
|
1480
|
-
await activateTab(geminiTab);
|
|
1481
|
-
const synthesis = await synthesizeWithGemini(query, out, {
|
|
1482
|
-
grounded: depth === "deep",
|
|
1483
|
-
tabPrefix: geminiTab,
|
|
1484
|
-
});
|
|
1485
|
-
out._synthesis = {
|
|
1486
|
-
...synthesis,
|
|
1487
|
-
synthesized: true,
|
|
1488
|
-
};
|
|
1489
|
-
await closeTab(geminiTab);
|
|
1490
|
-
process.stderr.write("PROGRESS:synthesis:done\n");
|
|
1491
|
-
} catch (e) {
|
|
1492
|
-
process.stderr.write(
|
|
1493
|
-
`[greedysearch] Synthesis failed: ${e.message}\n`,
|
|
1494
|
-
);
|
|
1495
|
-
out._synthesis = { error: e.message, synthesized: false };
|
|
1496
|
-
}
|
|
1497
|
-
}
|
|
1498
|
-
|
|
1499
|
-
if (fetchSource) {
|
|
1500
|
-
const top = pickTopSource(out);
|
|
1501
|
-
if (top)
|
|
1502
|
-
out._topSource = await fetchTopSource(top.canonicalUrl || top.url);
|
|
1503
|
-
}
|
|
1504
|
-
|
|
1505
|
-
// Always include confidence metrics for non-fast searches
|
|
1506
|
-
if (depth !== "fast") out._confidence = buildConfidence(out);
|
|
1507
|
-
|
|
1508
|
-
writeOutput(out, outFile, {
|
|
1509
|
-
inline,
|
|
1510
|
-
synthesize: depth !== "fast",
|
|
1511
|
-
query,
|
|
1512
|
-
});
|
|
1513
|
-
return;
|
|
1514
|
-
} finally {
|
|
1515
|
-
await closeTabs(engineTabs);
|
|
1516
|
-
}
|
|
1517
|
-
}
|
|
1518
|
-
|
|
1519
|
-
const script = ENGINES[engine];
|
|
1520
|
-
if (!script) {
|
|
1521
|
-
process.stderr.write(
|
|
1522
|
-
`Unknown engine: "${engine}"\nAvailable: ${Object.keys(ENGINES).join(", ")}\n`,
|
|
1523
|
-
);
|
|
1524
|
-
process.exit(1);
|
|
1525
|
-
}
|
|
1526
|
-
|
|
1527
|
-
try {
|
|
1528
|
-
const result = await runExtractor(script, query, null, short);
|
|
1529
|
-
if (fetchSource && result.sources?.length > 0) {
|
|
1530
|
-
result.topSource = await fetchTopSource(result.sources[0].url);
|
|
1531
|
-
}
|
|
1532
|
-
writeOutput(result, outFile, { inline, synthesize: false, query });
|
|
1533
|
-
} catch (e) {
|
|
1534
|
-
process.stderr.write(`Error: ${e.message}\n`);
|
|
1535
|
-
process.exit(1);
|
|
1536
|
-
}
|
|
1537
|
-
}
|
|
1538
|
-
|
|
1539
|
-
main();
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
// search.mjs — unified CLI for GreedySearch extractors
|
|
3
|
+
//
|
|
4
|
+
// Usage:
|
|
5
|
+
// node search.mjs <engine> "<query>"
|
|
6
|
+
// node search.mjs all "<query>"
|
|
7
|
+
//
|
|
8
|
+
// Engines:
|
|
9
|
+
// perplexity | pplx | p
|
|
10
|
+
// bing | copilot | b
|
|
11
|
+
// google | g
|
|
12
|
+
// gemini | gem
|
|
13
|
+
// all — fan-out to all engines in parallel
|
|
14
|
+
//
|
|
15
|
+
// Output: JSON to stdout, errors to stderr
|
|
16
|
+
//
|
|
17
|
+
// Examples:
|
|
18
|
+
// node search.mjs p "what is memoization"
|
|
19
|
+
// node search.mjs gem "latest React features"
|
|
20
|
+
// node search.mjs all "how does TCP congestion control work"
|
|
21
|
+
|
|
22
|
+
import {
|
|
23
|
+
ALL_ENGINES,
|
|
24
|
+
ENGINES,
|
|
25
|
+
} from "../src/search/constants.mjs";
|
|
26
|
+
import {
|
|
27
|
+
buildSourceRegistry,
|
|
28
|
+
mergeFetchDataIntoSources,
|
|
29
|
+
} from "../src/search/sources.mjs";
|
|
30
|
+
import { buildConfidence } from "../src/search/synthesis.mjs";
|
|
31
|
+
import { synthesizeWithGemini } from "../src/search/synthesis-runner.mjs";
|
|
32
|
+
import {
|
|
33
|
+
cdp,
|
|
34
|
+
ensureChrome,
|
|
35
|
+
openNewTab,
|
|
36
|
+
activateTab,
|
|
37
|
+
closeTab,
|
|
38
|
+
closeTabs,
|
|
39
|
+
} from "../src/search/chrome.mjs";
|
|
40
|
+
import { runExtractor } from "../src/search/engines.mjs";
|
|
41
|
+
import {
|
|
42
|
+
fetchMultipleSources,
|
|
43
|
+
fetchTopSource,
|
|
44
|
+
} from "../src/search/fetch-source.mjs";
|
|
45
|
+
import { writeOutput } from "../src/search/output.mjs";
|
|
46
|
+
|
|
47
|
+
// ─── Main ──────────────────────────────────────────────────────────────────
|
|
48
|
+
|
|
49
|
+
async function main() {
|
|
50
|
+
const args = process.argv.slice(2);
|
|
51
|
+
if (args.length < 2 || args[0] === "--help") {
|
|
52
|
+
process.stderr.write(
|
|
53
|
+
`${[
|
|
54
|
+
'Usage: node search.mjs <engine> "<query>"',
|
|
55
|
+
"",
|
|
56
|
+
"Engines: perplexity (p), bing (b), google (g), gemini (gem), all",
|
|
57
|
+
"",
|
|
58
|
+
"Flags:",
|
|
59
|
+
" --fast Quick mode: no source fetching or synthesis",
|
|
60
|
+
" --synthesize Deprecated: synthesis is now default for multi-engine",
|
|
61
|
+
" --deep-research Deprecated: source fetching is now default",
|
|
62
|
+
" --fetch-top-source Fetch content from top source",
|
|
63
|
+
" --inline Output JSON to stdout (for piping)",
|
|
64
|
+
"",
|
|
65
|
+
"Examples:",
|
|
66
|
+
' node search.mjs all "Node.js streams" # Default: sources + synthesis',
|
|
67
|
+
' node search.mjs all "quick check" --fast # Fast: no sources/synthesis',
|
|
68
|
+
' node search.mjs p "what is memoization" # Single engine: fast mode',
|
|
69
|
+
].join("\n")}\n`,
|
|
70
|
+
);
|
|
71
|
+
process.exit(1);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
await ensureChrome();
|
|
75
|
+
|
|
76
|
+
// Depth modes: fast (no synthesis/fetch), standard (synthesis+fetch 5 sources)
|
|
77
|
+
const depthIdx = args.indexOf("--depth");
|
|
78
|
+
let depth = "standard"; // DEFAULT: synthesis + source fetch
|
|
79
|
+
|
|
80
|
+
if (depthIdx !== -1 && args[depthIdx + 1]) {
|
|
81
|
+
depth = args[depthIdx + 1];
|
|
82
|
+
} else if (args.includes("--fast")) {
|
|
83
|
+
depth = "fast"; // Explicit fast mode requested
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// For single engine (not "all"), default to fast unless explicit
|
|
87
|
+
const engineArg = args.find((a) => !a.startsWith("--"))?.toLowerCase();
|
|
88
|
+
if (engineArg !== "all" && depthIdx === -1 && !args.includes("--fast")) {
|
|
89
|
+
depth = "fast";
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
// --deep-research / --deep flags map to deep mode (backward compat)
|
|
93
|
+
if (args.includes("--deep-research")) {
|
|
94
|
+
depth = "standard";
|
|
95
|
+
process.stderr.write("[greedysearch] --deep-research is deprecated; use --depth standard (now default)\n");
|
|
96
|
+
}
|
|
97
|
+
if (args.includes("--deep")) {
|
|
98
|
+
depth = "deep";
|
|
99
|
+
}
|
|
100
|
+
if (args.includes("--synthesize")) {
|
|
101
|
+
process.stderr.write("[greedysearch] --synthesize is deprecated; synthesis is now default for multi-engine\n");
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
const full = args.includes("--full");
|
|
105
|
+
const short = !full;
|
|
106
|
+
const fetchSource = args.includes("--fetch-top-source");
|
|
107
|
+
const inline = args.includes("--inline");
|
|
108
|
+
const outIdx = args.indexOf("--out");
|
|
109
|
+
const outFile = outIdx !== -1 ? args[outIdx + 1] : null;
|
|
110
|
+
const rest = args.filter(
|
|
111
|
+
(a, i) =>
|
|
112
|
+
a !== "--full" &&
|
|
113
|
+
a !== "--short" &&
|
|
114
|
+
a !== "--fast" &&
|
|
115
|
+
a !== "--fetch-top-source" &&
|
|
116
|
+
a !== "--synthesize" &&
|
|
117
|
+
a !== "--deep-research" &&
|
|
118
|
+
a !== "--deep" &&
|
|
119
|
+
a !== "--inline" &&
|
|
120
|
+
a !== "--depth" &&
|
|
121
|
+
a !== "--out" &&
|
|
122
|
+
a !== "--help" &&
|
|
123
|
+
(depthIdx === -1 || i !== depthIdx + 1) &&
|
|
124
|
+
(outIdx === -1 || i !== outIdx + 1),
|
|
125
|
+
);
|
|
126
|
+
const engine = rest[0].toLowerCase();
|
|
127
|
+
const query = rest.slice(1).join(" ");
|
|
128
|
+
|
|
129
|
+
if (engine === "all") {
|
|
130
|
+
await cdp(["list"]); // refresh pages cache
|
|
131
|
+
|
|
132
|
+
// Create fresh tabs for each engine to avoid race conditions
|
|
133
|
+
const engineTabs = [];
|
|
134
|
+
for (let i = 0; i < ALL_ENGINES.length; i++) {
|
|
135
|
+
if (i > 0) await new Promise((r) => setTimeout(r, 300));
|
|
136
|
+
const tab = await openNewTab();
|
|
137
|
+
engineTabs.push(tab);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
try {
|
|
141
|
+
const results = await Promise.allSettled(
|
|
142
|
+
ALL_ENGINES.map((e, i) =>
|
|
143
|
+
runExtractor(ENGINES[e], query, engineTabs[i], short)
|
|
144
|
+
.then((r) => {
|
|
145
|
+
process.stderr.write(`PROGRESS:${e}:done\n`);
|
|
146
|
+
return { engine: e, ...r };
|
|
147
|
+
})
|
|
148
|
+
.catch((err) => {
|
|
149
|
+
process.stderr.write(`PROGRESS:${e}:error\n`);
|
|
150
|
+
throw err;
|
|
151
|
+
}),
|
|
152
|
+
),
|
|
153
|
+
);
|
|
154
|
+
|
|
155
|
+
const out = {};
|
|
156
|
+
for (let i = 0; i < results.length; i++) {
|
|
157
|
+
const r = results[i];
|
|
158
|
+
if (r.status === "fulfilled") {
|
|
159
|
+
out[r.value.engine] = r.value;
|
|
160
|
+
} else {
|
|
161
|
+
out[ALL_ENGINES[i]] = { error: r.reason?.message || "unknown error" };
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
// Build a canonical source registry across all engines
|
|
166
|
+
out._sources = buildSourceRegistry(out, query);
|
|
167
|
+
|
|
168
|
+
// Source fetching: default for all "all" searches
|
|
169
|
+
if (depth !== "fast" && out._sources.length > 0) {
|
|
170
|
+
process.stderr.write("PROGRESS:source-fetch:start\n");
|
|
171
|
+
const fetchedSources = await fetchMultipleSources(
|
|
172
|
+
out._sources,
|
|
173
|
+
5,
|
|
174
|
+
8000,
|
|
175
|
+
);
|
|
176
|
+
|
|
177
|
+
out._sources = mergeFetchDataIntoSources(out._sources, fetchedSources);
|
|
178
|
+
out._fetchedSources = fetchedSources;
|
|
179
|
+
process.stderr.write("PROGRESS:source-fetch:done\n");
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
// Synthesize with Gemini for all non-fast modes
|
|
183
|
+
if (depth !== "fast") {
|
|
184
|
+
process.stderr.write("PROGRESS:synthesis:start\n");
|
|
185
|
+
process.stderr.write(
|
|
186
|
+
"[greedysearch] Synthesizing results with Gemini...\n",
|
|
187
|
+
);
|
|
188
|
+
try {
|
|
189
|
+
const geminiTab = await openNewTab();
|
|
190
|
+
await activateTab(geminiTab);
|
|
191
|
+
const synthesis = await synthesizeWithGemini(query, out, {
|
|
192
|
+
grounded: depth === "deep",
|
|
193
|
+
tabPrefix: geminiTab,
|
|
194
|
+
});
|
|
195
|
+
out._synthesis = {
|
|
196
|
+
...synthesis,
|
|
197
|
+
synthesized: true,
|
|
198
|
+
};
|
|
199
|
+
await closeTab(geminiTab);
|
|
200
|
+
process.stderr.write("PROGRESS:synthesis:done\n");
|
|
201
|
+
} catch (e) {
|
|
202
|
+
process.stderr.write(
|
|
203
|
+
`[greedysearch] Synthesis failed: ${e.message}\n`,
|
|
204
|
+
);
|
|
205
|
+
out._synthesis = { error: e.message, synthesized: false };
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
if (fetchSource) {
|
|
210
|
+
const top = pickTopSource(out);
|
|
211
|
+
if (top)
|
|
212
|
+
out._topSource = await fetchTopSource(top.canonicalUrl || top.url);
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// Always include confidence metrics for non-fast searches
|
|
216
|
+
if (depth !== "fast") out._confidence = buildConfidence(out);
|
|
217
|
+
|
|
218
|
+
writeOutput(out, outFile, {
|
|
219
|
+
inline,
|
|
220
|
+
synthesize: depth !== "fast",
|
|
221
|
+
query,
|
|
222
|
+
});
|
|
223
|
+
return;
|
|
224
|
+
} finally {
|
|
225
|
+
await closeTabs(engineTabs);
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
// Single engine
|
|
230
|
+
const script = ENGINES[engine];
|
|
231
|
+
if (!script) {
|
|
232
|
+
process.stderr.write(
|
|
233
|
+
`Unknown engine: "${engine}"\nAvailable: ${Object.keys(ENGINES).join(", ")}\n`,
|
|
234
|
+
);
|
|
235
|
+
process.exit(1);
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
try {
|
|
239
|
+
const result = await runExtractor(script, query, null, short);
|
|
240
|
+
if (fetchSource && result.sources?.length > 0) {
|
|
241
|
+
result.topSource = await fetchTopSource(result.sources[0].url);
|
|
242
|
+
}
|
|
243
|
+
writeOutput(result, outFile, { inline, synthesize: false, query });
|
|
244
|
+
} catch (e) {
|
|
245
|
+
process.stderr.write(`Error: ${e.message}\n`);
|
|
246
|
+
process.exit(1);
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
function pickTopSource(out) {
|
|
251
|
+
if (Array.isArray(out._sources) && out._sources.length > 0)
|
|
252
|
+
return out._sources[0];
|
|
253
|
+
for (const engine of ["perplexity", "google", "bing"]) {
|
|
254
|
+
const r = out[engine];
|
|
255
|
+
if (r?.sources?.length > 0) return r.sources[0];
|
|
256
|
+
}
|
|
257
|
+
return null;
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
main();
|