illuma-agents 1.0.8 → 1.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (217) hide show
  1. package/LICENSE +1 -5
  2. package/dist/cjs/common/enum.cjs +1 -2
  3. package/dist/cjs/common/enum.cjs.map +1 -1
  4. package/dist/cjs/instrumentation.cjs.map +1 -1
  5. package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
  6. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +79 -2
  7. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  8. package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -1
  9. package/dist/cjs/llm/bedrock/index.cjs +99 -0
  10. package/dist/cjs/llm/bedrock/index.cjs.map +1 -0
  11. package/dist/cjs/llm/fake.cjs.map +1 -1
  12. package/dist/cjs/llm/providers.cjs +13 -16
  13. package/dist/cjs/llm/providers.cjs.map +1 -1
  14. package/dist/cjs/llm/text.cjs.map +1 -1
  15. package/dist/cjs/messages/core.cjs +14 -14
  16. package/dist/cjs/messages/core.cjs.map +1 -1
  17. package/dist/cjs/messages/ids.cjs.map +1 -1
  18. package/dist/cjs/messages/prune.cjs.map +1 -1
  19. package/dist/cjs/run.cjs +10 -1
  20. package/dist/cjs/run.cjs.map +1 -1
  21. package/dist/cjs/splitStream.cjs.map +1 -1
  22. package/dist/cjs/stream.cjs +4 -1
  23. package/dist/cjs/stream.cjs.map +1 -1
  24. package/dist/cjs/tools/ToolNode.cjs +10 -1
  25. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  26. package/dist/cjs/tools/handlers.cjs +29 -25
  27. package/dist/cjs/tools/handlers.cjs.map +1 -1
  28. package/dist/cjs/tools/search/anthropic.cjs.map +1 -1
  29. package/dist/cjs/tools/search/content.cjs.map +1 -1
  30. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -1
  31. package/dist/cjs/tools/search/format.cjs.map +1 -1
  32. package/dist/cjs/tools/search/highlights.cjs.map +1 -1
  33. package/dist/cjs/tools/search/rerankers.cjs.map +1 -1
  34. package/dist/cjs/tools/search/schema.cjs +25 -25
  35. package/dist/cjs/tools/search/schema.cjs.map +1 -1
  36. package/dist/cjs/tools/search/search.cjs +6 -1
  37. package/dist/cjs/tools/search/search.cjs.map +1 -1
  38. package/dist/cjs/tools/search/serper-scraper.cjs.map +1 -1
  39. package/dist/cjs/tools/search/tool.cjs +162 -35
  40. package/dist/cjs/tools/search/tool.cjs.map +1 -1
  41. package/dist/cjs/tools/search/utils.cjs.map +1 -1
  42. package/dist/cjs/utils/graph.cjs.map +1 -1
  43. package/dist/cjs/utils/llm.cjs +0 -1
  44. package/dist/cjs/utils/llm.cjs.map +1 -1
  45. package/dist/cjs/utils/misc.cjs.map +1 -1
  46. package/dist/cjs/utils/run.cjs.map +1 -1
  47. package/dist/cjs/utils/title.cjs +7 -7
  48. package/dist/cjs/utils/title.cjs.map +1 -1
  49. package/dist/esm/common/enum.mjs +1 -2
  50. package/dist/esm/common/enum.mjs.map +1 -1
  51. package/dist/esm/instrumentation.mjs.map +1 -1
  52. package/dist/esm/llm/anthropic/types.mjs.map +1 -1
  53. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +79 -2
  54. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  55. package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -1
  56. package/dist/esm/llm/bedrock/index.mjs +97 -0
  57. package/dist/esm/llm/bedrock/index.mjs.map +1 -0
  58. package/dist/esm/llm/fake.mjs.map +1 -1
  59. package/dist/esm/llm/providers.mjs +2 -5
  60. package/dist/esm/llm/providers.mjs.map +1 -1
  61. package/dist/esm/llm/text.mjs.map +1 -1
  62. package/dist/esm/messages/core.mjs +14 -14
  63. package/dist/esm/messages/core.mjs.map +1 -1
  64. package/dist/esm/messages/ids.mjs.map +1 -1
  65. package/dist/esm/messages/prune.mjs.map +1 -1
  66. package/dist/esm/run.mjs +10 -1
  67. package/dist/esm/run.mjs.map +1 -1
  68. package/dist/esm/splitStream.mjs.map +1 -1
  69. package/dist/esm/stream.mjs +4 -1
  70. package/dist/esm/stream.mjs.map +1 -1
  71. package/dist/esm/tools/ToolNode.mjs +10 -1
  72. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  73. package/dist/esm/tools/handlers.mjs +30 -26
  74. package/dist/esm/tools/handlers.mjs.map +1 -1
  75. package/dist/esm/tools/search/anthropic.mjs.map +1 -1
  76. package/dist/esm/tools/search/content.mjs.map +1 -1
  77. package/dist/esm/tools/search/firecrawl.mjs.map +1 -1
  78. package/dist/esm/tools/search/format.mjs.map +1 -1
  79. package/dist/esm/tools/search/highlights.mjs.map +1 -1
  80. package/dist/esm/tools/search/rerankers.mjs.map +1 -1
  81. package/dist/esm/tools/search/schema.mjs +25 -25
  82. package/dist/esm/tools/search/schema.mjs.map +1 -1
  83. package/dist/esm/tools/search/search.mjs +6 -1
  84. package/dist/esm/tools/search/search.mjs.map +1 -1
  85. package/dist/esm/tools/search/serper-scraper.mjs.map +1 -1
  86. package/dist/esm/tools/search/tool.mjs +162 -35
  87. package/dist/esm/tools/search/tool.mjs.map +1 -1
  88. package/dist/esm/tools/search/utils.mjs.map +1 -1
  89. package/dist/esm/utils/graph.mjs.map +1 -1
  90. package/dist/esm/utils/llm.mjs +0 -1
  91. package/dist/esm/utils/llm.mjs.map +1 -1
  92. package/dist/esm/utils/misc.mjs.map +1 -1
  93. package/dist/esm/utils/run.mjs.map +1 -1
  94. package/dist/esm/utils/title.mjs +7 -7
  95. package/dist/esm/utils/title.mjs.map +1 -1
  96. package/dist/types/common/enum.d.ts +1 -2
  97. package/dist/types/llm/bedrock/index.d.ts +36 -0
  98. package/dist/types/tools/search/types.d.ts +2 -0
  99. package/dist/types/types/llm.d.ts +3 -8
  100. package/package.json +15 -11
  101. package/src/common/enum.ts +1 -2
  102. package/src/common/index.ts +1 -1
  103. package/src/instrumentation.ts +22 -22
  104. package/src/llm/anthropic/llm.spec.ts +1442 -1442
  105. package/src/llm/anthropic/types.ts +140 -140
  106. package/src/llm/anthropic/utils/message_inputs.ts +757 -660
  107. package/src/llm/anthropic/utils/output_parsers.ts +133 -133
  108. package/src/llm/anthropic/utils/tools.ts +29 -29
  109. package/src/llm/bedrock/index.ts +128 -0
  110. package/src/llm/fake.ts +133 -133
  111. package/src/llm/google/utils/tools.ts +160 -160
  112. package/src/llm/openai/types.ts +24 -24
  113. package/src/llm/openai/utils/isReasoningModel.test.ts +90 -90
  114. package/src/llm/providers.ts +2 -7
  115. package/src/llm/text.ts +94 -94
  116. package/src/messages/core.ts +463 -463
  117. package/src/messages/formatAgentMessages.tools.test.ts +400 -400
  118. package/src/messages/formatMessage.test.ts +693 -693
  119. package/src/messages/ids.ts +26 -26
  120. package/src/messages/prune.ts +567 -567
  121. package/src/messages/shiftIndexTokenCountMap.test.ts +81 -81
  122. package/src/mockStream.ts +98 -98
  123. package/src/prompts/collab.ts +5 -5
  124. package/src/prompts/index.ts +1 -1
  125. package/src/prompts/taskmanager.ts +61 -61
  126. package/src/run.ts +13 -4
  127. package/src/scripts/ant_web_search_edge_case.ts +162 -0
  128. package/src/scripts/ant_web_search_error_edge_case.ts +148 -0
  129. package/src/scripts/args.ts +48 -48
  130. package/src/scripts/caching.ts +123 -123
  131. package/src/scripts/code_exec_files.ts +193 -193
  132. package/src/scripts/empty_input.ts +137 -137
  133. package/src/scripts/image.ts +178 -178
  134. package/src/scripts/memory.ts +97 -97
  135. package/src/scripts/thinking.ts +149 -149
  136. package/src/specs/anthropic.simple.test.ts +67 -0
  137. package/src/specs/spec.utils.ts +3 -3
  138. package/src/specs/token-distribution-edge-case.test.ts +316 -316
  139. package/src/specs/tool-error.test.ts +193 -193
  140. package/src/splitStream.test.ts +691 -691
  141. package/src/splitStream.ts +234 -234
  142. package/src/stream.test.ts +94 -94
  143. package/src/stream.ts +4 -1
  144. package/src/tools/ToolNode.ts +12 -1
  145. package/src/tools/handlers.ts +32 -28
  146. package/src/tools/search/anthropic.ts +51 -51
  147. package/src/tools/search/content.test.ts +173 -173
  148. package/src/tools/search/content.ts +147 -147
  149. package/src/tools/search/direct-url.test.ts +530 -0
  150. package/src/tools/search/firecrawl.ts +210 -210
  151. package/src/tools/search/format.ts +250 -250
  152. package/src/tools/search/highlights.ts +320 -320
  153. package/src/tools/search/index.ts +2 -2
  154. package/src/tools/search/jina-reranker.test.ts +126 -126
  155. package/src/tools/search/output.md +2775 -2775
  156. package/src/tools/search/rerankers.ts +242 -242
  157. package/src/tools/search/schema.ts +63 -63
  158. package/src/tools/search/search.ts +766 -759
  159. package/src/tools/search/serper-scraper.ts +155 -155
  160. package/src/tools/search/test.html +883 -883
  161. package/src/tools/search/test.md +642 -642
  162. package/src/tools/search/test.ts +159 -159
  163. package/src/tools/search/tool.ts +619 -471
  164. package/src/tools/search/types.ts +689 -687
  165. package/src/tools/search/utils.ts +79 -79
  166. package/src/types/index.ts +6 -6
  167. package/src/types/llm.ts +2 -8
  168. package/src/utils/graph.ts +10 -10
  169. package/src/utils/llm.ts +26 -27
  170. package/src/utils/llmConfig.ts +5 -3
  171. package/src/utils/logging.ts +48 -48
  172. package/src/utils/misc.ts +57 -57
  173. package/src/utils/run.ts +100 -100
  174. package/src/utils/title.ts +165 -165
  175. package/dist/cjs/llm/ollama/index.cjs +0 -70
  176. package/dist/cjs/llm/ollama/index.cjs.map +0 -1
  177. package/dist/cjs/llm/ollama/utils.cjs +0 -158
  178. package/dist/cjs/llm/ollama/utils.cjs.map +0 -1
  179. package/dist/esm/llm/ollama/index.mjs +0 -68
  180. package/dist/esm/llm/ollama/index.mjs.map +0 -1
  181. package/dist/esm/llm/ollama/utils.mjs +0 -155
  182. package/dist/esm/llm/ollama/utils.mjs.map +0 -1
  183. package/dist/types/llm/ollama/index.d.ts +0 -8
  184. package/dist/types/llm/ollama/utils.d.ts +0 -7
  185. package/src/llm/ollama/index.ts +0 -92
  186. package/src/llm/ollama/utils.ts +0 -193
  187. package/src/proto/CollabGraph.ts +0 -269
  188. package/src/proto/TaskManager.ts +0 -243
  189. package/src/proto/collab.ts +0 -200
  190. package/src/proto/collab_design.ts +0 -184
  191. package/src/proto/collab_design_v2.ts +0 -224
  192. package/src/proto/collab_design_v3.ts +0 -255
  193. package/src/proto/collab_design_v4.ts +0 -220
  194. package/src/proto/collab_design_v5.ts +0 -251
  195. package/src/proto/collab_graph.ts +0 -181
  196. package/src/proto/collab_original.ts +0 -123
  197. package/src/proto/example.ts +0 -93
  198. package/src/proto/example_new.ts +0 -68
  199. package/src/proto/example_old.ts +0 -201
  200. package/src/proto/example_test.ts +0 -152
  201. package/src/proto/example_test_anthropic.ts +0 -100
  202. package/src/proto/log_stream.ts +0 -202
  203. package/src/proto/main_collab_community_event.ts +0 -133
  204. package/src/proto/main_collab_design_v2.ts +0 -96
  205. package/src/proto/main_collab_design_v4.ts +0 -100
  206. package/src/proto/main_collab_design_v5.ts +0 -135
  207. package/src/proto/main_collab_global_analysis.ts +0 -122
  208. package/src/proto/main_collab_hackathon_event.ts +0 -153
  209. package/src/proto/main_collab_space_mission.ts +0 -153
  210. package/src/proto/main_philosophy.ts +0 -210
  211. package/src/proto/original_script.ts +0 -126
  212. package/src/proto/standard.ts +0 -100
  213. package/src/proto/stream.ts +0 -56
  214. package/src/proto/tasks.ts +0 -118
  215. package/src/proto/tools/global_analysis_tools.ts +0 -86
  216. package/src/proto/tools/space_mission_tools.ts +0 -60
  217. package/src/proto/vertexai.ts +0 -54
@@ -1,759 +1,766 @@
1
- import axios from 'axios';
2
- import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
3
- import type * as t from './types';
4
- import { getAttribution, createDefaultLogger } from './utils';
5
- import { BaseReranker } from './rerankers';
6
-
7
- const chunker = {
8
- cleanText: (text: string): string => {
9
- if (!text) return '';
10
-
11
- /** Normalized all line endings to '\n' */
12
- const normalizedText = text.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
13
-
14
- /** Handle multiple backslashes followed by newlines
15
- * This replaces patterns like '\\\\\\n' with a single newline */
16
- const fixedBackslashes = normalizedText.replace(/\\+\n/g, '\n');
17
-
18
- /** Cleaned up consecutive newlines, tabs, and spaces around newlines */
19
- const cleanedNewlines = fixedBackslashes.replace(/[\t ]*\n[\t \n]*/g, '\n');
20
-
21
- /** Cleaned up excessive spaces and tabs */
22
- const cleanedSpaces = cleanedNewlines.replace(/[ \t]+/g, ' ');
23
-
24
- return cleanedSpaces.trim();
25
- },
26
- splitText: async (
27
- text: string,
28
- options?: {
29
- chunkSize?: number;
30
- chunkOverlap?: number;
31
- separators?: string[];
32
- }
33
- ): Promise<string[]> => {
34
- const chunkSize = options?.chunkSize ?? 150;
35
- const chunkOverlap = options?.chunkOverlap ?? 50;
36
- const separators = options?.separators || ['\n\n', '\n'];
37
-
38
- const splitter = new RecursiveCharacterTextSplitter({
39
- separators,
40
- chunkSize,
41
- chunkOverlap,
42
- });
43
-
44
- return await splitter.splitText(text);
45
- },
46
-
47
- splitTexts: async (
48
- texts: string[],
49
- options?: {
50
- chunkSize?: number;
51
- chunkOverlap?: number;
52
- separators?: string[];
53
- },
54
- logger?: t.Logger
55
- ): Promise<string[][]> => {
56
- // Split multiple texts
57
- const logger_ = logger || createDefaultLogger();
58
- const promises = texts.map((text) =>
59
- chunker.splitText(text, options).catch((error) => {
60
- logger_.error('Error splitting text:', error);
61
- return [text];
62
- })
63
- );
64
- return Promise.all(promises);
65
- },
66
- };
67
-
68
- function createSourceUpdateCallback(sourceMap: Map<string, t.ValidSource>) {
69
- return (link: string, update?: Partial<t.ValidSource>): void => {
70
- const source = sourceMap.get(link);
71
- if (source) {
72
- sourceMap.set(link, {
73
- ...source,
74
- ...update,
75
- });
76
- }
77
- };
78
- }
79
-
80
- const getHighlights = async ({
81
- query,
82
- content,
83
- reranker,
84
- topResults = 5,
85
- logger,
86
- }: {
87
- content: string;
88
- query: string;
89
- reranker?: BaseReranker;
90
- topResults?: number;
91
- logger?: t.Logger;
92
- }): Promise<t.Highlight[] | undefined> => {
93
- const logger_ = logger || createDefaultLogger();
94
-
95
- if (!content) {
96
- logger_.warn('No content provided for highlights');
97
- return;
98
- }
99
- if (!reranker) {
100
- logger_.warn('No reranker provided for highlights');
101
- return;
102
- }
103
-
104
- try {
105
- const documents = await chunker.splitText(content);
106
- if (Array.isArray(documents)) {
107
- return await reranker.rerank(query, documents, topResults);
108
- } else {
109
- logger_.error(
110
- 'Expected documents to be an array, got:',
111
- typeof documents
112
- );
113
- return;
114
- }
115
- } catch (error) {
116
- logger_.error('Error in content processing:', error);
117
- return;
118
- }
119
- };
120
-
121
- const createSerperAPI = (
122
- apiKey?: string
123
- ): {
124
- getSources: (params: t.GetSourcesParams) => Promise<t.SearchResult>;
125
- } => {
126
- const config = {
127
- apiKey: apiKey ?? process.env.SERPER_API_KEY,
128
- apiUrl: 'https://google.serper.dev/search',
129
- timeout: 10000,
130
- };
131
-
132
- if (config.apiKey == null || config.apiKey === '') {
133
- throw new Error('SERPER_API_KEY is required for SerperAPI');
134
- }
135
-
136
- const getSources = async ({
137
- query,
138
- date,
139
- country,
140
- safeSearch,
141
- numResults = 8,
142
- type,
143
- }: t.GetSourcesParams): Promise<t.SearchResult> => {
144
- if (!query.trim()) {
145
- return { success: false, error: 'Query cannot be empty' };
146
- }
147
-
148
- try {
149
- const safe = ['off', 'moderate', 'active'] as const;
150
- const payload: t.SerperSearchPayload = {
151
- q: query,
152
- safe: safe[safeSearch ?? 1],
153
- num: Math.min(Math.max(1, numResults), 10),
154
- };
155
-
156
- // Set the search type if provided
157
- if (type) {
158
- payload.type = type;
159
- }
160
-
161
- if (date != null) {
162
- payload.tbs = `qdr:${date}`;
163
- }
164
-
165
- if (country != null && country !== '') {
166
- payload['gl'] = country.toLowerCase();
167
- }
168
-
169
- // Determine the API endpoint based on the search type
170
- let apiEndpoint = config.apiUrl;
171
- if (type === 'images') {
172
- apiEndpoint = 'https://google.serper.dev/images';
173
- } else if (type === 'videos') {
174
- apiEndpoint = 'https://google.serper.dev/videos';
175
- } else if (type === 'news') {
176
- apiEndpoint = 'https://google.serper.dev/news';
177
- }
178
-
179
- const response = await axios.post<t.SerperResultData>(
180
- apiEndpoint,
181
- payload,
182
- {
183
- headers: {
184
- 'X-API-KEY': config.apiKey,
185
- 'Content-Type': 'application/json',
186
- },
187
- timeout: config.timeout,
188
- }
189
- );
190
-
191
- const data = response.data;
192
- const results: t.SearchResultData = {
193
- organic: data.organic,
194
- images: data.images ?? [],
195
- answerBox: data.answerBox,
196
- topStories: data.topStories ?? [],
197
- peopleAlsoAsk: data.peopleAlsoAsk,
198
- knowledgeGraph: data.knowledgeGraph,
199
- relatedSearches: data.relatedSearches,
200
- videos: data.videos ?? [],
201
- news: data.news ?? [],
202
- };
203
-
204
- return { success: true, data: results };
205
- } catch (error) {
206
- const errorMessage =
207
- error instanceof Error ? error.message : String(error);
208
- return { success: false, error: `API request failed: ${errorMessage}` };
209
- }
210
- };
211
-
212
- return { getSources };
213
- };
214
-
215
- const createSearXNGAPI = (
216
- instanceUrl?: string,
217
- apiKey?: string
218
- ): {
219
- getSources: (params: t.GetSourcesParams) => Promise<t.SearchResult>;
220
- } => {
221
- const config = {
222
- instanceUrl: instanceUrl ?? process.env.SEARXNG_INSTANCE_URL,
223
- apiKey: apiKey ?? process.env.SEARXNG_API_KEY,
224
- defaultLocation: 'all',
225
- timeout: 10000,
226
- };
227
-
228
- if (config.instanceUrl == null || config.instanceUrl === '') {
229
- throw new Error('SEARXNG_INSTANCE_URL is required for SearXNG API');
230
- }
231
-
232
- const getSources = async ({
233
- query,
234
- numResults = 8,
235
- safeSearch,
236
- type,
237
- }: t.GetSourcesParams): Promise<t.SearchResult> => {
238
- if (!query.trim()) {
239
- return { success: false, error: 'Query cannot be empty' };
240
- }
241
-
242
- try {
243
- // Ensure the instance URL ends with /search
244
- if (config.instanceUrl == null || config.instanceUrl === '') {
245
- return { success: false, error: 'Instance URL is not defined' };
246
- }
247
-
248
- let searchUrl = config.instanceUrl;
249
- if (!searchUrl.endsWith('/search')) {
250
- searchUrl = searchUrl.replace(/\/$/, '') + '/search';
251
- }
252
-
253
- // Determine the search category based on the type
254
- let category = 'general';
255
- if (type === 'images') {
256
- category = 'images';
257
- } else if (type === 'videos') {
258
- category = 'videos';
259
- } else if (type === 'news') {
260
- category = 'news';
261
- }
262
-
263
- // Prepare parameters for SearXNG
264
- const params: t.SearxNGSearchPayload = {
265
- q: query,
266
- format: 'json',
267
- pageno: 1,
268
- categories: category,
269
- language: 'all',
270
- safesearch: safeSearch,
271
- engines: 'google,bing,duckduckgo',
272
- };
273
-
274
- const headers: Record<string, string> = {
275
- 'Content-Type': 'application/json',
276
- };
277
-
278
- if (config.apiKey != null && config.apiKey !== '') {
279
- headers['X-API-Key'] = config.apiKey;
280
- }
281
-
282
- const response = await axios.get(searchUrl, {
283
- headers,
284
- params,
285
- timeout: config.timeout,
286
- });
287
-
288
- const data = response.data;
289
-
290
- // Helper function to identify news results since SearXNG doesn't provide that classification by default
291
- const isNewsResult = (result: t.SearXNGResult): boolean => {
292
- const url = result.url?.toLowerCase() ?? '';
293
- const title = result.title?.toLowerCase() ?? '';
294
-
295
- // News-related keywords in title/content
296
- const newsKeywords = [
297
- 'breaking news',
298
- 'latest news',
299
- 'top stories',
300
- 'news today',
301
- 'developing story',
302
- 'trending news',
303
- 'news',
304
- ];
305
-
306
- // Check if title/content contains news keywords
307
- const hasNewsKeywords = newsKeywords.some(
308
- (keyword) => title.toLowerCase().includes(keyword) // just title probably fine, content parsing is overkill for what we need: || content.includes(keyword)
309
- );
310
-
311
- // Check if URL contains news-related paths
312
- const hasNewsPath =
313
- url.includes('/news/') ||
314
- url.includes('/world/') ||
315
- url.includes('/politics/') ||
316
- url.includes('/breaking/');
317
-
318
- return hasNewsKeywords || hasNewsPath;
319
- };
320
-
321
- // Transform SearXNG results to match SerperAPI format
322
- const organicResults = (data.results ?? [])
323
- .slice(0, numResults)
324
- .map((result: t.SearXNGResult, index: number) => {
325
- let attribution = '';
326
- try {
327
- attribution = new URL(result.url ?? '').hostname;
328
- } catch {
329
- attribution = '';
330
- }
331
-
332
- return {
333
- position: index + 1,
334
- title: result.title ?? '',
335
- link: result.url ?? '',
336
- snippet: result.content ?? '',
337
- date: result.publishedDate ?? '',
338
- attribution,
339
- };
340
- });
341
-
342
- const imageResults = (data.results ?? [])
343
- .filter((result: t.SearXNGResult) => result.img_src)
344
- .slice(0, 6)
345
- .map((result: t.SearXNGResult, index: number) => ({
346
- title: result.title ?? '',
347
- imageUrl: result.img_src ?? '',
348
- position: index + 1,
349
- source: new URL(result.url ?? '').hostname,
350
- domain: new URL(result.url ?? '').hostname,
351
- link: result.url ?? '',
352
- }));
353
-
354
- // Extract news results from organic results
355
- const newsResults = (data.results ?? [])
356
- .filter(isNewsResult)
357
- .map((result: t.SearXNGResult, index: number) => {
358
- let attribution = '';
359
- try {
360
- attribution = new URL(result.url ?? '').hostname;
361
- } catch {
362
- attribution = '';
363
- }
364
-
365
- return {
366
- title: result.title ?? '',
367
- link: result.url ?? '',
368
- snippet: result.content ?? '',
369
- date: result.publishedDate ?? '',
370
- source: attribution,
371
- imageUrl: result.img_src ?? '',
372
- position: index + 1,
373
- };
374
- });
375
-
376
- const topStories = newsResults.slice(0, 5);
377
-
378
- const relatedSearches = Array.isArray(data.suggestions)
379
- ? data.suggestions.map((suggestion: string) => ({ query: suggestion }))
380
- : [];
381
-
382
- const results: t.SearchResultData = {
383
- organic: organicResults,
384
- images: imageResults,
385
- topStories: topStories, // Use first 5 extracted news as top stories
386
- relatedSearches,
387
- videos: [],
388
- news: newsResults,
389
- // Add empty arrays for other Serper fields to maintain parity
390
- places: [],
391
- shopping: [],
392
- peopleAlsoAsk: [],
393
- knowledgeGraph: undefined,
394
- answerBox: undefined,
395
- };
396
-
397
- return { success: true, data: results };
398
- } catch (error) {
399
- const errorMessage =
400
- error instanceof Error ? error.message : String(error);
401
- return {
402
- success: false,
403
- error: `SearXNG API request failed: ${errorMessage}`,
404
- };
405
- }
406
- };
407
-
408
- return { getSources };
409
- };
410
-
411
- export const createSearchAPI = (
412
- config: t.SearchConfig
413
- ): {
414
- getSources: (params: t.GetSourcesParams) => Promise<t.SearchResult>;
415
- } => {
416
- const {
417
- searchProvider = 'serper',
418
- serperApiKey,
419
- searxngInstanceUrl,
420
- searxngApiKey,
421
- } = config;
422
-
423
- if (searchProvider.toLowerCase() === 'serper') {
424
- return createSerperAPI(serperApiKey);
425
- } else if (searchProvider.toLowerCase() === 'searxng') {
426
- return createSearXNGAPI(searxngInstanceUrl, searxngApiKey);
427
- } else {
428
- throw new Error(
429
- `Invalid search provider: ${searchProvider}. Must be 'serper' or 'searxng'`
430
- );
431
- }
432
- };
433
-
434
- export const createSourceProcessor = (
435
- config: t.ProcessSourcesConfig = {},
436
- scraperInstance?: t.BaseScraper
437
- ): {
438
- processSources: (
439
- fields: t.ProcessSourcesFields
440
- ) => Promise<t.SearchResultData>;
441
- topResults: number;
442
- } => {
443
- if (!scraperInstance) {
444
- throw new Error('Scraper instance is required');
445
- }
446
- const {
447
- topResults = 5,
448
- // strategies = ['no_extraction'],
449
- // filterContent = true,
450
- reranker,
451
- logger,
452
- } = config;
453
-
454
- const logger_ = logger || createDefaultLogger();
455
- const scraper = scraperInstance;
456
-
457
- const webScraper = {
458
- scrapeMany: async ({
459
- query,
460
- links,
461
- onGetHighlights,
462
- }: {
463
- query: string;
464
- links: string[];
465
- onGetHighlights: t.SearchToolConfig['onGetHighlights'];
466
- }): Promise<Array<t.ScrapeResult>> => {
467
- logger_.debug(`Scraping ${links.length} links`);
468
- const promises: Array<Promise<t.ScrapeResult>> = [];
469
- try {
470
- for (let i = 0; i < links.length; i++) {
471
- const currentLink = links[i];
472
- const promise: Promise<t.ScrapeResult> = scraper
473
- .scrapeUrl(currentLink, {})
474
- .then(([url, response]) => {
475
- const attribution = getAttribution(
476
- url,
477
- response.data?.metadata,
478
- logger_
479
- );
480
- if (response.success && response.data) {
481
- const [content, references] = scraper.extractContent(response);
482
- return {
483
- url,
484
- references,
485
- attribution,
486
- content: chunker.cleanText(content),
487
- } as t.ScrapeResult;
488
- } else {
489
- logger_.error(
490
- `Error scraping ${url}: ${response.error ?? 'Unknown error'}`
491
- );
492
- }
493
-
494
- return {
495
- url,
496
- attribution,
497
- error: true,
498
- content: '',
499
- } as t.ScrapeResult;
500
- })
501
- .then(async (result) => {
502
- try {
503
- if (result.error != null) {
504
- logger_.error(
505
- `Error scraping ${result.url}: ${result.content}`
506
- );
507
- return {
508
- ...result,
509
- };
510
- }
511
- const highlights = await getHighlights({
512
- query,
513
- reranker,
514
- content: result.content,
515
- logger: logger_,
516
- });
517
- if (onGetHighlights) {
518
- onGetHighlights(result.url);
519
- }
520
- return {
521
- ...result,
522
- highlights,
523
- };
524
- } catch (error) {
525
- logger_.error('Error processing scraped content:', error);
526
- return {
527
- ...result,
528
- };
529
- }
530
- })
531
- .catch((error) => {
532
- logger_.error(`Error scraping ${currentLink}:`, error);
533
- return {
534
- url: currentLink,
535
- error: true,
536
- content: '',
537
- };
538
- });
539
- promises.push(promise);
540
- }
541
- return await Promise.all(promises);
542
- } catch (error) {
543
- logger_.error('Error in scrapeMany:', error);
544
- return [];
545
- }
546
- },
547
- };
548
-
549
- const fetchContents = async ({
550
- links,
551
- query,
552
- target,
553
- onGetHighlights,
554
- onContentScraped,
555
- }: {
556
- links: string[];
557
- query: string;
558
- target: number;
559
- onGetHighlights: t.SearchToolConfig['onGetHighlights'];
560
- onContentScraped?: (link: string, update?: Partial<t.ValidSource>) => void;
561
- }): Promise<void> => {
562
- const initialLinks = links.slice(0, target);
563
- // const remainingLinks = links.slice(target).reverse();
564
- const results = await webScraper.scrapeMany({
565
- query,
566
- links: initialLinks,
567
- onGetHighlights,
568
- });
569
- for (const result of results) {
570
- if (result.error === true) {
571
- continue;
572
- }
573
- const { url, content, attribution, references, highlights } = result;
574
- onContentScraped?.(url, {
575
- content,
576
- attribution,
577
- references,
578
- highlights,
579
- });
580
- }
581
- };
582
-
583
- const processSources = async ({
584
- result,
585
- numElements,
586
- query,
587
- news,
588
- proMode = true,
589
- onGetHighlights,
590
- }: t.ProcessSourcesFields): Promise<t.SearchResultData> => {
591
- try {
592
- if (!result.data) {
593
- return {
594
- organic: [],
595
- topStories: [],
596
- images: [],
597
- relatedSearches: [],
598
- };
599
- } else if (!result.data.organic) {
600
- return result.data;
601
- }
602
-
603
- if (!proMode) {
604
- const wikiSources = result.data.organic.filter((source) =>
605
- source.link.includes('wikipedia.org')
606
- );
607
-
608
- if (!wikiSources.length) {
609
- return result.data;
610
- }
611
-
612
- const wikiSourceMap = new Map<string, t.ValidSource>();
613
- wikiSourceMap.set(wikiSources[0].link, wikiSources[0]);
614
- const onContentScraped = createSourceUpdateCallback(wikiSourceMap);
615
- await fetchContents({
616
- query,
617
- target: 1,
618
- onGetHighlights,
619
- onContentScraped,
620
- links: [wikiSources[0].link],
621
- });
622
-
623
- for (let i = 0; i < result.data.organic.length; i++) {
624
- const source = result.data.organic[i];
625
- const updatedSource = wikiSourceMap.get(source.link);
626
- if (updatedSource) {
627
- result.data.organic[i] = {
628
- ...source,
629
- ...updatedSource,
630
- };
631
- }
632
- }
633
-
634
- return result.data;
635
- }
636
-
637
- const sourceMap = new Map<string, t.ValidSource>();
638
- const organicLinksSet = new Set<string>();
639
-
640
- // Collect organic links
641
- const organicLinks = collectLinks(
642
- result.data.organic,
643
- sourceMap,
644
- organicLinksSet
645
- );
646
-
647
- // Collect top story links, excluding any that are already in organic links
648
- const topStories = result.data.topStories ?? [];
649
- const topStoryLinks = collectLinks(
650
- topStories,
651
- sourceMap,
652
- organicLinksSet
653
- );
654
-
655
- if (organicLinks.length === 0 && (topStoryLinks.length === 0 || !news)) {
656
- return result.data;
657
- }
658
-
659
- const onContentScraped = createSourceUpdateCallback(sourceMap);
660
- const promises: Promise<void>[] = [];
661
-
662
- // Process organic links
663
- if (organicLinks.length > 0) {
664
- promises.push(
665
- fetchContents({
666
- query,
667
- onGetHighlights,
668
- onContentScraped,
669
- links: organicLinks,
670
- target: numElements,
671
- })
672
- );
673
- }
674
-
675
- // Process top story links
676
- if (news && topStoryLinks.length > 0) {
677
- promises.push(
678
- fetchContents({
679
- query,
680
- onGetHighlights,
681
- onContentScraped,
682
- links: topStoryLinks,
683
- target: numElements,
684
- })
685
- );
686
- }
687
-
688
- await Promise.all(promises);
689
-
690
- if (result.data.organic.length > 0) {
691
- updateSourcesWithContent(result.data.organic, sourceMap);
692
- }
693
-
694
- if (news && topStories.length > 0) {
695
- updateSourcesWithContent(topStories, sourceMap);
696
- }
697
-
698
- return result.data;
699
- } catch (error) {
700
- logger_.error('Error in processSources:', error);
701
- return {
702
- organic: [],
703
- topStories: [],
704
- images: [],
705
- relatedSearches: [],
706
- ...result.data,
707
- error: error instanceof Error ? error.message : String(error),
708
- };
709
- }
710
- };
711
-
712
- return {
713
- processSources,
714
- topResults,
715
- };
716
- };
717
-
718
- /** Helper function to collect links and update sourceMap */
719
- function collectLinks(
720
- sources: Array<t.OrganicResult | t.TopStoryResult>,
721
- sourceMap: Map<string, t.ValidSource>,
722
- existingLinksSet?: Set<string>
723
- ): string[] {
724
- const links: string[] = [];
725
-
726
- for (const source of sources) {
727
- if (source.link) {
728
- // For topStories, only add if not already in organic links
729
- if (existingLinksSet && existingLinksSet.has(source.link)) {
730
- continue;
731
- }
732
-
733
- links.push(source.link);
734
- if (existingLinksSet) {
735
- existingLinksSet.add(source.link);
736
- }
737
- sourceMap.set(source.link, source as t.ValidSource);
738
- }
739
- }
740
-
741
- return links;
742
- }
743
-
744
- /** Helper function to update sources with scraped content */
745
- function updateSourcesWithContent<T extends t.ValidSource>(
746
- sources: T[],
747
- sourceMap: Map<string, t.ValidSource>
748
- ): void {
749
- for (let i = 0; i < sources.length; i++) {
750
- const source = sources[i];
751
- const updatedSource = sourceMap.get(source.link);
752
- if (updatedSource) {
753
- sources[i] = {
754
- ...source,
755
- ...updatedSource,
756
- } as T;
757
- }
758
- }
759
- }
1
+ import axios from 'axios';
2
+ import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';
3
+ import type * as t from './types';
4
+ import { getAttribution, createDefaultLogger } from './utils';
5
+ import { BaseReranker } from './rerankers';
6
+
7
+ const chunker = {
8
+ cleanText: (text: string): string => {
9
+ if (!text) return '';
10
+
11
+ /** Normalized all line endings to '\n' */
12
+ const normalizedText = text.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
13
+
14
+ /** Handle multiple backslashes followed by newlines
15
+ * This replaces patterns like '\\\\\\n' with a single newline */
16
+ const fixedBackslashes = normalizedText.replace(/\\+\n/g, '\n');
17
+
18
+ /** Cleaned up consecutive newlines, tabs, and spaces around newlines */
19
+ const cleanedNewlines = fixedBackslashes.replace(/[\t ]*\n[\t \n]*/g, '\n');
20
+
21
+ /** Cleaned up excessive spaces and tabs */
22
+ const cleanedSpaces = cleanedNewlines.replace(/[ \t]+/g, ' ');
23
+
24
+ return cleanedSpaces.trim();
25
+ },
26
+ splitText: async (
27
+ text: string,
28
+ options?: {
29
+ chunkSize?: number;
30
+ chunkOverlap?: number;
31
+ separators?: string[];
32
+ }
33
+ ): Promise<string[]> => {
34
+ const chunkSize = options?.chunkSize ?? 150;
35
+ const chunkOverlap = options?.chunkOverlap ?? 50;
36
+ const separators = options?.separators || ['\n\n', '\n'];
37
+
38
+ const splitter = new RecursiveCharacterTextSplitter({
39
+ separators,
40
+ chunkSize,
41
+ chunkOverlap,
42
+ });
43
+
44
+ return await splitter.splitText(text);
45
+ },
46
+
47
+ splitTexts: async (
48
+ texts: string[],
49
+ options?: {
50
+ chunkSize?: number;
51
+ chunkOverlap?: number;
52
+ separators?: string[];
53
+ },
54
+ logger?: t.Logger
55
+ ): Promise<string[][]> => {
56
+ // Split multiple texts
57
+ const logger_ = logger || createDefaultLogger();
58
+ const promises = texts.map((text) =>
59
+ chunker.splitText(text, options).catch((error) => {
60
+ logger_.error('Error splitting text:', error);
61
+ return [text];
62
+ })
63
+ );
64
+ return Promise.all(promises);
65
+ },
66
+ };
67
+
68
+ function createSourceUpdateCallback(sourceMap: Map<string, t.ValidSource>) {
69
+ return (link: string, update?: Partial<t.ValidSource>): void => {
70
+ const source = sourceMap.get(link);
71
+ if (source) {
72
+ sourceMap.set(link, {
73
+ ...source,
74
+ ...update,
75
+ });
76
+ }
77
+ };
78
+ }
79
+
80
+ const getHighlights = async ({
81
+ query,
82
+ content,
83
+ reranker,
84
+ topResults = 5,
85
+ logger,
86
+ }: {
87
+ content: string;
88
+ query: string;
89
+ reranker?: BaseReranker;
90
+ topResults?: number;
91
+ logger?: t.Logger;
92
+ }): Promise<t.Highlight[] | undefined> => {
93
+ const logger_ = logger || createDefaultLogger();
94
+
95
+ if (!content) {
96
+ logger_.warn('No content provided for highlights');
97
+ return;
98
+ }
99
+ if (!reranker) {
100
+ logger_.warn('No reranker provided for highlights');
101
+ return;
102
+ }
103
+
104
+ try {
105
+ const documents = await chunker.splitText(content);
106
+ if (Array.isArray(documents)) {
107
+ return await reranker.rerank(query, documents, topResults);
108
+ } else {
109
+ logger_.error(
110
+ 'Expected documents to be an array, got:',
111
+ typeof documents
112
+ );
113
+ return;
114
+ }
115
+ } catch (error) {
116
+ logger_.error('Error in content processing:', error);
117
+ return;
118
+ }
119
+ };
120
+
121
+ const createSerperAPI = (
122
+ apiKey?: string
123
+ ): {
124
+ getSources: (params: t.GetSourcesParams) => Promise<t.SearchResult>;
125
+ } => {
126
+ const config = {
127
+ apiKey: apiKey ?? process.env.SERPER_API_KEY,
128
+ apiUrl: 'https://google.serper.dev/search',
129
+ timeout: 10000,
130
+ };
131
+
132
+ if (config.apiKey == null || config.apiKey === '') {
133
+ throw new Error('SERPER_API_KEY is required for SerperAPI');
134
+ }
135
+
136
+ const getSources = async ({
137
+ query,
138
+ date,
139
+ country,
140
+ safeSearch,
141
+ numResults = 8,
142
+ type,
143
+ }: t.GetSourcesParams): Promise<t.SearchResult> => {
144
+ if (!query.trim()) {
145
+ return { success: false, error: 'Query cannot be empty' };
146
+ }
147
+
148
+ try {
149
+ const safe = ['off', 'moderate', 'active'] as const;
150
+ const payload: t.SerperSearchPayload = {
151
+ q: query,
152
+ safe: safe[safeSearch ?? 1],
153
+ num: Math.min(Math.max(1, numResults), 10),
154
+ };
155
+
156
+ // Set the search type if provided
157
+ if (type) {
158
+ payload.type = type;
159
+ }
160
+
161
+ if (date != null) {
162
+ payload.tbs = `qdr:${date}`;
163
+ }
164
+
165
+ if (country != null && country !== '') {
166
+ payload['gl'] = country.toLowerCase();
167
+ }
168
+
169
+ // Determine the API endpoint based on the search type
170
+ let apiEndpoint = config.apiUrl;
171
+ if (type === 'images') {
172
+ apiEndpoint = 'https://google.serper.dev/images';
173
+ } else if (type === 'videos') {
174
+ apiEndpoint = 'https://google.serper.dev/videos';
175
+ } else if (type === 'news') {
176
+ apiEndpoint = 'https://google.serper.dev/news';
177
+ }
178
+
179
+ const response = await axios.post<t.SerperResultData>(
180
+ apiEndpoint,
181
+ payload,
182
+ {
183
+ headers: {
184
+ 'X-API-KEY': config.apiKey,
185
+ 'Content-Type': 'application/json',
186
+ },
187
+ timeout: config.timeout,
188
+ }
189
+ );
190
+
191
+ const data = response.data;
192
+ const results: t.SearchResultData = {
193
+ organic: data.organic,
194
+ images: data.images ?? [],
195
+ answerBox: data.answerBox,
196
+ topStories: data.topStories ?? [],
197
+ peopleAlsoAsk: data.peopleAlsoAsk,
198
+ knowledgeGraph: data.knowledgeGraph,
199
+ relatedSearches: data.relatedSearches,
200
+ videos: data.videos ?? [],
201
+ news: data.news ?? [],
202
+ };
203
+
204
+ return { success: true, data: results };
205
+ } catch (error) {
206
+ const errorMessage =
207
+ error instanceof Error ? error.message : String(error);
208
+ return { success: false, error: `API request failed: ${errorMessage}` };
209
+ }
210
+ };
211
+
212
+ return { getSources };
213
+ };
214
+
215
+ const createSearXNGAPI = (
216
+ instanceUrl?: string,
217
+ apiKey?: string
218
+ ): {
219
+ getSources: (params: t.GetSourcesParams) => Promise<t.SearchResult>;
220
+ } => {
221
+ const config = {
222
+ instanceUrl: instanceUrl ?? process.env.SEARXNG_INSTANCE_URL,
223
+ apiKey: apiKey ?? process.env.SEARXNG_API_KEY,
224
+ defaultLocation: 'all',
225
+ timeout: 10000,
226
+ };
227
+
228
+ if (config.instanceUrl == null || config.instanceUrl === '') {
229
+ throw new Error('SEARXNG_INSTANCE_URL is required for SearXNG API');
230
+ }
231
+
232
+ const getSources = async ({
233
+ query,
234
+ numResults = 8,
235
+ safeSearch,
236
+ type,
237
+ }: t.GetSourcesParams): Promise<t.SearchResult> => {
238
+ if (!query.trim()) {
239
+ return { success: false, error: 'Query cannot be empty' };
240
+ }
241
+
242
+ try {
243
+ // Ensure the instance URL ends with /search
244
+ if (config.instanceUrl == null || config.instanceUrl === '') {
245
+ return { success: false, error: 'Instance URL is not defined' };
246
+ }
247
+
248
+ let searchUrl = config.instanceUrl;
249
+ if (!searchUrl.endsWith('/search')) {
250
+ searchUrl = searchUrl.replace(/\/$/, '') + '/search';
251
+ }
252
+
253
+ // Determine the search category based on the type
254
+ let category = 'general';
255
+ if (type === 'images') {
256
+ category = 'images';
257
+ } else if (type === 'videos') {
258
+ category = 'videos';
259
+ } else if (type === 'news') {
260
+ category = 'news';
261
+ }
262
+
263
+ // Prepare parameters for SearXNG
264
+ const params: t.SearxNGSearchPayload = {
265
+ q: query,
266
+ format: 'json',
267
+ pageno: 1,
268
+ categories: category,
269
+ language: 'all',
270
+ safesearch: safeSearch,
271
+ engines: 'google,bing,duckduckgo',
272
+ };
273
+
274
+ const headers: Record<string, string> = {
275
+ 'Content-Type': 'application/json',
276
+ };
277
+
278
+ if (config.apiKey != null && config.apiKey !== '') {
279
+ headers['X-API-Key'] = config.apiKey;
280
+ }
281
+
282
+ const response = await axios.get(searchUrl, {
283
+ headers,
284
+ params,
285
+ timeout: config.timeout,
286
+ });
287
+
288
+ const data = response.data;
289
+
290
+ // Helper function to identify news results since SearXNG doesn't provide that classification by default
291
+ const isNewsResult = (result: t.SearXNGResult): boolean => {
292
+ const url = result.url?.toLowerCase() ?? '';
293
+ const title = result.title?.toLowerCase() ?? '';
294
+
295
+ // News-related keywords in title/content
296
+ const newsKeywords = [
297
+ 'breaking news',
298
+ 'latest news',
299
+ 'top stories',
300
+ 'news today',
301
+ 'developing story',
302
+ 'trending news',
303
+ 'news',
304
+ ];
305
+
306
+ // Check if title/content contains news keywords
307
+ const hasNewsKeywords = newsKeywords.some(
308
+ (keyword) => title.toLowerCase().includes(keyword) // just title probably fine, content parsing is overkill for what we need: || content.includes(keyword)
309
+ );
310
+
311
+ // Check if URL contains news-related paths
312
+ const hasNewsPath =
313
+ url.includes('/news/') ||
314
+ url.includes('/world/') ||
315
+ url.includes('/politics/') ||
316
+ url.includes('/breaking/');
317
+
318
+ return hasNewsKeywords || hasNewsPath;
319
+ };
320
+
321
+ // Transform SearXNG results to match SerperAPI format
322
+ const organicResults = (data.results ?? [])
323
+ .slice(0, numResults)
324
+ .map((result: t.SearXNGResult, index: number) => {
325
+ let attribution = '';
326
+ try {
327
+ attribution = new URL(result.url ?? '').hostname;
328
+ } catch {
329
+ attribution = '';
330
+ }
331
+
332
+ return {
333
+ position: index + 1,
334
+ title: result.title ?? '',
335
+ link: result.url ?? '',
336
+ snippet: result.content ?? '',
337
+ date: result.publishedDate ?? '',
338
+ attribution,
339
+ };
340
+ });
341
+
342
+ const imageResults = (data.results ?? [])
343
+ .filter((result: t.SearXNGResult) => result.img_src)
344
+ .slice(0, 6)
345
+ .map((result: t.SearXNGResult, index: number) => ({
346
+ title: result.title ?? '',
347
+ imageUrl: result.img_src ?? '',
348
+ position: index + 1,
349
+ source: new URL(result.url ?? '').hostname,
350
+ domain: new URL(result.url ?? '').hostname,
351
+ link: result.url ?? '',
352
+ }));
353
+
354
+ // Extract news results from organic results
355
+ const newsResults = (data.results ?? [])
356
+ .filter(isNewsResult)
357
+ .map((result: t.SearXNGResult, index: number) => {
358
+ let attribution = '';
359
+ try {
360
+ attribution = new URL(result.url ?? '').hostname;
361
+ } catch {
362
+ attribution = '';
363
+ }
364
+
365
+ return {
366
+ title: result.title ?? '',
367
+ link: result.url ?? '',
368
+ snippet: result.content ?? '',
369
+ date: result.publishedDate ?? '',
370
+ source: attribution,
371
+ imageUrl: result.img_src ?? '',
372
+ position: index + 1,
373
+ };
374
+ });
375
+
376
+ const topStories = newsResults.slice(0, 5);
377
+
378
+ const relatedSearches = Array.isArray(data.suggestions)
379
+ ? data.suggestions.map((suggestion: string) => ({ query: suggestion }))
380
+ : [];
381
+
382
+ const results: t.SearchResultData = {
383
+ organic: organicResults,
384
+ images: imageResults,
385
+ topStories: topStories, // Use first 5 extracted news as top stories
386
+ relatedSearches,
387
+ videos: [],
388
+ news: newsResults,
389
+ // Add empty arrays for other Serper fields to maintain parity
390
+ places: [],
391
+ shopping: [],
392
+ peopleAlsoAsk: [],
393
+ knowledgeGraph: undefined,
394
+ answerBox: undefined,
395
+ };
396
+
397
+ return { success: true, data: results };
398
+ } catch (error) {
399
+ const errorMessage =
400
+ error instanceof Error ? error.message : String(error);
401
+ return {
402
+ success: false,
403
+ error: `SearXNG API request failed: ${errorMessage}`,
404
+ };
405
+ }
406
+ };
407
+
408
+ return { getSources };
409
+ };
410
+
411
+ export const createSearchAPI = (
412
+ config: t.SearchConfig
413
+ ): {
414
+ getSources: (params: t.GetSourcesParams) => Promise<t.SearchResult>;
415
+ } => {
416
+ const {
417
+ searchProvider = 'serper',
418
+ serperApiKey,
419
+ searxngInstanceUrl,
420
+ searxngApiKey,
421
+ } = config;
422
+
423
+ if (searchProvider.toLowerCase() === 'serper') {
424
+ return createSerperAPI(serperApiKey);
425
+ } else if (searchProvider.toLowerCase() === 'searxng') {
426
+ return createSearXNGAPI(searxngInstanceUrl, searxngApiKey);
427
+ } else {
428
+ throw new Error(
429
+ `Invalid search provider: ${searchProvider}. Must be 'serper' or 'searxng'`
430
+ );
431
+ }
432
+ };
433
+
434
+ export const createSourceProcessor = (
435
+ config: t.ProcessSourcesConfig = {},
436
+ scraperInstance?: t.BaseScraper
437
+ ): {
438
+ processSources: (
439
+ fields: t.ProcessSourcesFields
440
+ ) => Promise<t.SearchResultData>;
441
+ topResults: number;
442
+ } => {
443
+ if (!scraperInstance) {
444
+ throw new Error('Scraper instance is required');
445
+ }
446
+ const {
447
+ topResults = 5,
448
+ // strategies = ['no_extraction'],
449
+ // filterContent = true,
450
+ reranker,
451
+ logger,
452
+ } = config;
453
+
454
+ const logger_ = logger || createDefaultLogger();
455
+ const scraper = scraperInstance;
456
+
457
+ const webScraper = {
458
+ scrapeMany: async ({
459
+ query,
460
+ links,
461
+ onGetHighlights,
462
+ }: {
463
+ query: string;
464
+ links: string[];
465
+ onGetHighlights: t.SearchToolConfig['onGetHighlights'];
466
+ }): Promise<Array<t.ScrapeResult>> => {
467
+ logger_.debug(`Scraping ${links.length} links`);
468
+ const promises: Array<Promise<t.ScrapeResult>> = [];
469
+ try {
470
+ for (let i = 0; i < links.length; i++) {
471
+ const currentLink = links[i];
472
+ const promise: Promise<t.ScrapeResult> = scraper
473
+ .scrapeUrl(currentLink, {})
474
+ .then(([url, response]) => {
475
+ const attribution = getAttribution(
476
+ url,
477
+ response.data?.metadata,
478
+ logger_
479
+ );
480
+ if (response.success && response.data) {
481
+ const [content, references] = scraper.extractContent(response);
482
+ return {
483
+ url,
484
+ references,
485
+ attribution,
486
+ content: chunker.cleanText(content),
487
+ } as t.ScrapeResult;
488
+ } else {
489
+ logger_.error(
490
+ `Error scraping ${url}: ${response.error ?? 'Unknown error'}`
491
+ );
492
+ }
493
+
494
+ return {
495
+ url,
496
+ attribution,
497
+ error: true,
498
+ content: '',
499
+ } as t.ScrapeResult;
500
+ })
501
+ .then(async (result) => {
502
+ try {
503
+ if (result.error != null) {
504
+ logger_.error(
505
+ `Error scraping ${result.url}: ${result.content}`
506
+ );
507
+ return {
508
+ ...result,
509
+ };
510
+ }
511
+ const highlights = await getHighlights({
512
+ query,
513
+ reranker,
514
+ content: result.content,
515
+ logger: logger_,
516
+ });
517
+ if (onGetHighlights) {
518
+ onGetHighlights(result.url);
519
+ }
520
+ return {
521
+ ...result,
522
+ highlights,
523
+ };
524
+ } catch (error) {
525
+ logger_.error('Error processing scraped content:', error);
526
+ return {
527
+ ...result,
528
+ };
529
+ }
530
+ })
531
+ .catch((error) => {
532
+ logger_.error(`Error scraping ${currentLink}:`, error);
533
+ return {
534
+ url: currentLink,
535
+ error: true,
536
+ content: '',
537
+ };
538
+ });
539
+ promises.push(promise);
540
+ }
541
+ return await Promise.all(promises);
542
+ } catch (error) {
543
+ logger_.error('Error in scrapeMany:', error);
544
+ return [];
545
+ }
546
+ },
547
+ };
548
+
549
+ const fetchContents = async ({
550
+ links,
551
+ query,
552
+ target,
553
+ onGetHighlights,
554
+ onContentScraped,
555
+ }: {
556
+ links: string[];
557
+ query: string;
558
+ target: number;
559
+ onGetHighlights: t.SearchToolConfig['onGetHighlights'];
560
+ onContentScraped?: (link: string, update?: Partial<t.ValidSource>) => void;
561
+ }): Promise<void> => {
562
+ const initialLinks = links.slice(0, target);
563
+ // const remainingLinks = links.slice(target).reverse();
564
+ const results = await webScraper.scrapeMany({
565
+ query,
566
+ links: initialLinks,
567
+ onGetHighlights,
568
+ });
569
+ for (const result of results) {
570
+ if (result.error === true) {
571
+ continue;
572
+ }
573
+ const { url, content, attribution, references, highlights } = result;
574
+ onContentScraped?.(url, {
575
+ content,
576
+ attribution,
577
+ references,
578
+ highlights,
579
+ });
580
+ }
581
+ };
582
+
583
+ const processSources = async ({
584
+ result,
585
+ numElements,
586
+ query,
587
+ news,
588
+ proMode = true,
589
+ onGetHighlights,
590
+ skipScraping = false,
591
+ }: t.ProcessSourcesFields): Promise<t.SearchResultData> => {
592
+ try {
593
+ if (!result.data) {
594
+ return {
595
+ organic: [],
596
+ topStories: [],
597
+ images: [],
598
+ relatedSearches: [],
599
+ };
600
+ } else if (!result.data.organic) {
601
+ return result.data;
602
+ }
603
+
604
+ // If content was already extracted directly (e.g., direct URL extraction), skip scraping
605
+ if (skipScraping) {
606
+ logger_.debug('Skipping additional scraping - content already extracted');
607
+ return result.data;
608
+ }
609
+
610
+ if (!proMode) {
611
+ const wikiSources = result.data.organic.filter((source) =>
612
+ source.link.includes('wikipedia.org')
613
+ );
614
+
615
+ if (!wikiSources.length) {
616
+ return result.data;
617
+ }
618
+
619
+ const wikiSourceMap = new Map<string, t.ValidSource>();
620
+ wikiSourceMap.set(wikiSources[0].link, wikiSources[0]);
621
+ const onContentScraped = createSourceUpdateCallback(wikiSourceMap);
622
+ await fetchContents({
623
+ query,
624
+ target: 1,
625
+ onGetHighlights,
626
+ onContentScraped,
627
+ links: [wikiSources[0].link],
628
+ });
629
+
630
+ for (let i = 0; i < result.data.organic.length; i++) {
631
+ const source = result.data.organic[i];
632
+ const updatedSource = wikiSourceMap.get(source.link);
633
+ if (updatedSource) {
634
+ result.data.organic[i] = {
635
+ ...source,
636
+ ...updatedSource,
637
+ };
638
+ }
639
+ }
640
+
641
+ return result.data;
642
+ }
643
+
644
+ const sourceMap = new Map<string, t.ValidSource>();
645
+ const organicLinksSet = new Set<string>();
646
+
647
+ // Collect organic links
648
+ const organicLinks = collectLinks(
649
+ result.data.organic,
650
+ sourceMap,
651
+ organicLinksSet
652
+ );
653
+
654
+ // Collect top story links, excluding any that are already in organic links
655
+ const topStories = result.data.topStories ?? [];
656
+ const topStoryLinks = collectLinks(
657
+ topStories,
658
+ sourceMap,
659
+ organicLinksSet
660
+ );
661
+
662
+ if (organicLinks.length === 0 && (topStoryLinks.length === 0 || !news)) {
663
+ return result.data;
664
+ }
665
+
666
+ const onContentScraped = createSourceUpdateCallback(sourceMap);
667
+ const promises: Promise<void>[] = [];
668
+
669
+ // Process organic links
670
+ if (organicLinks.length > 0) {
671
+ promises.push(
672
+ fetchContents({
673
+ query,
674
+ onGetHighlights,
675
+ onContentScraped,
676
+ links: organicLinks,
677
+ target: numElements,
678
+ })
679
+ );
680
+ }
681
+
682
+ // Process top story links
683
+ if (news && topStoryLinks.length > 0) {
684
+ promises.push(
685
+ fetchContents({
686
+ query,
687
+ onGetHighlights,
688
+ onContentScraped,
689
+ links: topStoryLinks,
690
+ target: numElements,
691
+ })
692
+ );
693
+ }
694
+
695
+ await Promise.all(promises);
696
+
697
+ if (result.data.organic.length > 0) {
698
+ updateSourcesWithContent(result.data.organic, sourceMap);
699
+ }
700
+
701
+ if (news && topStories.length > 0) {
702
+ updateSourcesWithContent(topStories, sourceMap);
703
+ }
704
+
705
+ return result.data;
706
+ } catch (error) {
707
+ logger_.error('Error in processSources:', error);
708
+ return {
709
+ organic: [],
710
+ topStories: [],
711
+ images: [],
712
+ relatedSearches: [],
713
+ ...result.data,
714
+ error: error instanceof Error ? error.message : String(error),
715
+ };
716
+ }
717
+ };
718
+
719
+ return {
720
+ processSources,
721
+ topResults,
722
+ };
723
+ };
724
+
725
+ /** Helper function to collect links and update sourceMap */
726
+ function collectLinks(
727
+ sources: Array<t.OrganicResult | t.TopStoryResult>,
728
+ sourceMap: Map<string, t.ValidSource>,
729
+ existingLinksSet?: Set<string>
730
+ ): string[] {
731
+ const links: string[] = [];
732
+
733
+ for (const source of sources) {
734
+ if (source.link) {
735
+ // For topStories, only add if not already in organic links
736
+ if (existingLinksSet && existingLinksSet.has(source.link)) {
737
+ continue;
738
+ }
739
+
740
+ links.push(source.link);
741
+ if (existingLinksSet) {
742
+ existingLinksSet.add(source.link);
743
+ }
744
+ sourceMap.set(source.link, source as t.ValidSource);
745
+ }
746
+ }
747
+
748
+ return links;
749
+ }
750
+
751
+ /** Helper function to update sources with scraped content */
752
+ function updateSourcesWithContent<T extends t.ValidSource>(
753
+ sources: T[],
754
+ sourceMap: Map<string, t.ValidSource>
755
+ ): void {
756
+ for (let i = 0; i < sources.length; i++) {
757
+ const source = sources[i];
758
+ const updatedSource = sourceMap.get(source.link);
759
+ if (updatedSource) {
760
+ sources[i] = {
761
+ ...source,
762
+ ...updatedSource,
763
+ } as T;
764
+ }
765
+ }
766
+ }