prepia 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +312 -0
  3. package/bin/prepia.mjs +119 -0
  4. package/package.json +53 -0
  5. package/skill/SKILL.md +148 -0
  6. package/skill/config.json +29 -0
  7. package/src/analytics/dashboard.mjs +84 -0
  8. package/src/analytics/tracker.mjs +131 -0
  9. package/src/api/middleware.mjs +219 -0
  10. package/src/api/routes.mjs +142 -0
  11. package/src/api/server.mjs +150 -0
  12. package/src/cache/disk-store.mjs +199 -0
  13. package/src/cache/manager.mjs +142 -0
  14. package/src/cache/memory-store.mjs +205 -0
  15. package/src/chain/dag.mjs +209 -0
  16. package/src/chain/executor.mjs +103 -0
  17. package/src/chain/scheduler.mjs +89 -0
  18. package/src/client/adapters.mjs +483 -0
  19. package/src/client/connector.mjs +391 -0
  20. package/src/client/index.mjs +483 -0
  21. package/src/client/websocket.mjs +353 -0
  22. package/src/core/context-packager.mjs +169 -0
  23. package/src/core/engine.mjs +338 -0
  24. package/src/core/event-bus.mjs +84 -0
  25. package/src/core/prepimshot.mjs +120 -0
  26. package/src/core/task-decomposer.mjs +158 -0
  27. package/src/edge/lite.mjs +90 -0
  28. package/src/guard/checker.mjs +123 -0
  29. package/src/guard/fact-checker.mjs +105 -0
  30. package/src/guard/hallucination.mjs +108 -0
  31. package/src/index.mjs +67 -0
  32. package/src/models/local-model.mjs +171 -0
  33. package/src/models/provider.mjs +192 -0
  34. package/src/models/router.mjs +156 -0
  35. package/src/morph/optimizer.mjs +142 -0
  36. package/src/network/p2p.mjs +146 -0
  37. package/src/persona/detector.mjs +118 -0
  38. package/src/plugins/loader.mjs +120 -0
  39. package/src/plugins/registry.mjs +164 -0
  40. package/src/plugins/sandbox.mjs +79 -0
  41. package/src/rate/limiter.mjs +145 -0
  42. package/src/rate/shield.mjs +150 -0
  43. package/src/script/executor.mjs +164 -0
  44. package/src/script/parser.mjs +134 -0
  45. package/src/security/privacy.mjs +108 -0
  46. package/src/security/sanitizer.mjs +133 -0
  47. package/src/shadow/daemon.mjs +128 -0
  48. package/src/stream/handler.mjs +204 -0
  49. package/src/tools/calculator.mjs +312 -0
  50. package/src/tools/file-ops.mjs +138 -0
  51. package/src/tools/http-client.mjs +127 -0
  52. package/src/tools/orchestrator.mjs +205 -0
  53. package/src/tools/web-scraper.mjs +159 -0
  54. package/src/tools/web-search.mjs +129 -0
  55. package/src/vault/knowledge-base.mjs +207 -0
  56. package/src/vault/pattern-learner.mjs +192 -0
  57. package/workflows/analyze.json +32 -0
  58. package/workflows/automate.json +32 -0
  59. package/workflows/research.json +37 -0
  60. package/workflows/summarize.json +32 -0
@@ -0,0 +1,338 @@
1
+ /**
2
+ * @fileoverview Main Prepia engine - orchestrates the full pipeline.
3
+ * @module core/engine
4
+ */
5
+
6
+ import { EventEmitter } from 'node:events';
7
+ import { decompose, partitionTasks } from './task-decomposer.mjs';
8
+ import { packageContext } from './context-packager.mjs';
9
+ import { generatePrompt, mergePrompts } from './prepimshot.mjs';
10
+ import { classify, canHandleLocally, getLocalResponse } from '../models/local-model.mjs';
11
+ import { Orchestrator } from '../tools/orchestrator.mjs';
12
+ import { CacheManager } from '../cache/manager.mjs';
13
+ import { Tracker } from '../analytics/tracker.mjs';
14
+ import { StreamHandler } from '../stream/handler.mjs';
15
+ import { sanitize, validateParams } from '../security/sanitizer.mjs';
16
+ import { detectPersona } from '../persona/detector.mjs';
17
+ import { checkQuality } from '../guard/checker.mjs';
18
+ import { RateShield } from '../rate/shield.mjs';
19
+
20
+ export class PrepiaEngine extends EventEmitter {
21
+ /**
22
+ * @param {Object} [options]
23
+ * @param {Object} [options.cache] - Cache options
24
+ * @param {Object} [options.rate] - Rate limiting options
25
+ * @param {Object} [options.model] - Model router options
26
+ */
27
+ constructor(options = {}) {
28
+ super();
29
+ this._cache = new CacheManager(options.cache || { enableDisk: false });
30
+ this._orchestrator = new Orchestrator();
31
+ this._tracker = new Tracker();
32
+ this._stream = new StreamHandler();
33
+ this._shield = new RateShield(options.rate || {});
34
+ this._modelRouter = options.modelRouter || null;
35
+ this._config = {
36
+ maxContextTokens: 4000,
37
+ defaultMode: 'shot',
38
+ enableLocalModel: true,
39
+ enableCache: true,
40
+ enableQualityCheck: true,
41
+ ...options.config,
42
+ };
43
+ }
44
+
45
+ /**
46
+ * Process a task through the full pipeline.
47
+ * @param {string} query - User query
48
+ * @param {Object} [options]
49
+ * @param {string} [options.mode='shot'] - Processing mode: flash, shot, stream
50
+ * @param {string} [options.workflow] - Workflow template to use
51
+ * @returns {Promise<Object>} Processing result
52
+ */
53
+ async process(query, options = {}) {
54
+ const start = Date.now();
55
+ const taskId = `task_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`;
56
+ const mode = options.mode || this._config.defaultMode;
57
+
58
+ this._stream.start(taskId, query);
59
+
60
+ try {
61
+ // Sanitize input
62
+ const sanitized = sanitize(query);
63
+ if (sanitized.modified) {
64
+ this.emit('input:sanitized', { issues: sanitized.issues });
65
+ }
66
+
67
+ // Check cache first
68
+ if (this._config.enableCache) {
69
+ const cacheKey = this._cache.generateKey({ query: sanitized.text, mode });
70
+ const cached = await this._cache.get(cacheKey);
71
+ if (cached) {
72
+ this._stream.complete(taskId, cached);
73
+ this._tracker.record({
74
+ id: taskId, type: 'cached', usedLLM: false,
75
+ duration: Date.now() - start, cached: true, success: true,
76
+ });
77
+ return { ...cached, fromCache: true };
78
+ }
79
+ }
80
+
81
+ // Flash mode: try local model first
82
+ if (mode === 'flash') {
83
+ const localResult = this._tryLocal(sanitized.text);
84
+ if (localResult) {
85
+ const duration = Date.now() - start;
86
+ const resultWithDuration = { ...localResult, duration, taskId };
87
+ this._stream.complete(taskId, resultWithDuration);
88
+ this._tracker.record({
89
+ id: taskId, type: 'flash', usedLLM: false,
90
+ duration, success: true,
91
+ });
92
+ return resultWithDuration;
93
+ }
94
+ }
95
+
96
+ // Decompose task
97
+ this._stream.update(taskId, 'analyzing', 'Analyzing task...');
98
+ const decomposed = decompose(sanitized.text);
99
+ const { local, llm } = partitionTasks(decomposed.subtasks);
100
+
101
+ // Execute local tasks
102
+ this._stream.update(taskId, 'processing', 'Processing locally...');
103
+ const localResults = await this._executeLocalTasks(local);
104
+
105
+ // If all tasks are local, no LLM needed
106
+ if (llm.length === 0 && localResults.length > 0) {
107
+ const answerParts = localResults.map(r => {
108
+ const res = r.result;
109
+ if (res === null || res === undefined) return null;
110
+ if (typeof res === 'string') return res;
111
+ if (Array.isArray(res)) {
112
+ return res.map(item => {
113
+ if (typeof item === 'object' && item !== null) {
114
+ if (item.value !== undefined) return String(item.value);
115
+ return JSON.stringify(item);
116
+ }
117
+ return String(item);
118
+ }).join('\n');
119
+ }
120
+ if (typeof res === 'object') {
121
+ if (res.value !== undefined) return String(res.value);
122
+ return JSON.stringify(res);
123
+ }
124
+ return String(res);
125
+ }).filter(Boolean);
126
+
127
+ const result = {
128
+ answer: answerParts.join('\n'),
129
+ mode: 'flash',
130
+ usedLLM: false,
131
+ taskId,
132
+ duration: Date.now() - start,
133
+ };
134
+ this._stream.complete(taskId, result);
135
+ this._tracker.record({
136
+ id: taskId, type: 'flash', usedLLM: false,
137
+ duration: Date.now() - start, success: true,
138
+ });
139
+ return result;
140
+ }
141
+
142
+ // Package context for LLM
143
+ this._stream.update(taskId, 'synthesizing', 'Preparing LLM prompt...');
144
+ const persona = detectPersona(sanitized.text);
145
+ const prompt = mode === 'shot'
146
+ ? mergePrompts(sanitized.text, localResults, { maxTokens: this._config.maxContextTokens })
147
+ : generatePrompt({
148
+ query: sanitized.text,
149
+ context: localResults.map(r => JSON.stringify(r.result)).join('\n'),
150
+ systemPrompt: persona.systemPrompt,
151
+ options: { maxTokens: this._config.maxContextTokens },
152
+ });
153
+
154
+ // Send to LLM
155
+ let llmResponse;
156
+ if (this._modelRouter) {
157
+ this._stream.update(taskId, 'synthesizing', 'Calling LLM...');
158
+ llmResponse = await this._modelRouter.send({
159
+ prompt: prompt.user,
160
+ system: prompt.system,
161
+ maxTokens: 2048,
162
+ });
163
+ } else {
164
+ // No model configured - return what we have
165
+ const formatResult = (res) => {
166
+ if (res === null || res === undefined) return null;
167
+ if (typeof res === 'string') return res;
168
+ if (Array.isArray(res)) return res.map(formatResult).filter(Boolean).join('\n');
169
+ if (typeof res === 'object') {
170
+ if (res.value !== undefined) return String(res.value);
171
+ return JSON.stringify(res);
172
+ }
173
+ return String(res);
174
+ };
175
+
176
+ llmResponse = {
177
+ content: localResults.length > 0
178
+ ? `Based on available information:\n${localResults.map(r => formatResult(r.result)).filter(Boolean).join('\n')}`
179
+ : 'No LLM configured. Please configure a model provider.',
180
+ provider: 'none',
181
+ usage: { prompt: 0, completion: 0, total: 0 },
182
+ };
183
+ }
184
+
185
+ // Quality check
186
+ let quality = null;
187
+ if (this._config.enableQualityCheck && llmResponse.content) {
188
+ quality = checkQuality(llmResponse.content, { query: sanitized.text });
189
+ }
190
+
191
+ const result = {
192
+ answer: llmResponse.content,
193
+ mode,
194
+ usedLLM: true,
195
+ taskId,
196
+ provider: llmResponse.provider,
197
+ tokens: llmResponse.usage,
198
+ quality,
199
+ duration: Date.now() - start,
200
+ };
201
+
202
+ // Cache result
203
+ if (this._config.enableCache && llmResponse.content) {
204
+ const cacheKey = this._cache.generateKey({ query: sanitized.text, mode });
205
+ await this._cache.set(cacheKey, result);
206
+ }
207
+
208
+ this._stream.complete(taskId, result);
209
+ this._tracker.record({
210
+ id: taskId, type: mode, usedLLM: true,
211
+ tokensUsed: llmResponse.usage?.total || 0,
212
+ tokensSaved: prompt.tokens || 0,
213
+ duration: Date.now() - start, success: true,
214
+ });
215
+
216
+ return result;
217
+ } catch (err) {
218
+ this._stream.error(taskId, err.message);
219
+ this._tracker.record({
220
+ id: taskId, type: mode, usedLLM: false,
221
+ duration: Date.now() - start, success: false,
222
+ });
223
+ throw err;
224
+ }
225
+ }
226
+
227
+ /**
228
+ * Try to handle a query with the local model.
229
+ * @param {string} query
230
+ * @returns {Object|null}
231
+ * @private
232
+ */
233
+ _tryLocal(query) {
234
+ const localResult = getLocalResponse(query);
235
+ if (localResult !== null) {
236
+ return {
237
+ answer: localResult,
238
+ mode: 'flash',
239
+ usedLLM: false,
240
+ };
241
+ }
242
+ return null;
243
+ }
244
+
245
+ /**
246
+ * Execute local (non-LLM) tasks.
247
+ * @param {Object[]} tasks
248
+ * @returns {Promise<Object[]>}
249
+ * @private
250
+ */
251
+ async _executeLocalTasks(tasks) {
252
+ const results = [];
253
+ for (const task of tasks) {
254
+ try {
255
+ const toolCalls = this._orchestrator.route(task.type, task.params);
256
+ if (toolCalls.length > 0) {
257
+ const toolResults = await this._orchestrator.executeMany(toolCalls);
258
+ results.push({
259
+ taskId: task.id,
260
+ result: toolResults.map(r => r.result).filter(Boolean),
261
+ });
262
+ }
263
+ } catch (err) {
264
+ results.push({ taskId: task.id, error: err.message });
265
+ }
266
+ }
267
+ return results;
268
+ }
269
+
270
+ /**
271
+ * Get analytics data.
272
+ * @returns {Object}
273
+ */
274
+ getAnalytics() {
275
+ return {
276
+ metrics: this._tracker.getMetrics(),
277
+ savings: this._tracker.estimateSavings(),
278
+ cache: this._cache.stats(),
279
+ };
280
+ }
281
+
282
+ /**
283
+ * Clear all caches.
284
+ */
285
+ async clearCache() {
286
+ await this._cache.clear();
287
+ }
288
+
289
+ /**
290
+ * Get registered plugins.
291
+ * @returns {Object[]}
292
+ */
293
+ getPlugins() {
294
+ return this._orchestrator.listTools();
295
+ }
296
+
297
+ /**
298
+ * Update engine configuration.
299
+ * @param {Object} config
300
+ */
301
+ updateConfig(config) {
302
+ Object.assign(this._config, config);
303
+ }
304
+
305
+ /**
306
+ * Get the stream handler for progress tracking.
307
+ * @returns {StreamHandler}
308
+ */
309
+ getStream() {
310
+ return this._stream;
311
+ }
312
+
313
+ /**
314
+ * Get the tracker.
315
+ * @returns {Tracker}
316
+ */
317
+ getTracker() {
318
+ return this._tracker;
319
+ }
320
+
321
+ /**
322
+ * Get the cache manager.
323
+ * @returns {CacheManager}
324
+ */
325
+ getCache() {
326
+ return this._cache;
327
+ }
328
+
329
+ /**
330
+ * Get the orchestrator.
331
+ * @returns {Orchestrator}
332
+ */
333
+ getOrchestrator() {
334
+ return this._orchestrator;
335
+ }
336
+ }
337
+
338
+ export default PrepiaEngine;
@@ -0,0 +1,84 @@
1
+ /**
2
+ * @fileoverview Global event bus for inter-module communication.
3
+ * Centralizes all event handling across Prepia modules.
4
+ * @module core/event-bus
5
+ */
6
+
7
+ import { EventEmitter } from 'node:events';
8
+
9
+ /** @type {EventEmitter} Singleton event bus instance */
10
+ const bus = new EventEmitter();
11
+
12
+ /** Raise max listeners to avoid warnings in complex workflows */
13
+ bus.setMaxListeners(100);
14
+
15
+ /**
16
+ * Emit an event on the global bus.
17
+ * @param {string} event - Event name
18
+ * @param {...*} args - Event arguments
19
+ * @returns {boolean} Whether the event had listeners
20
+ */
21
+ export function emit(event, ...args) {
22
+ return bus.emit(event, ...args);
23
+ }
24
+
25
+ /**
26
+ * Register a listener on the global bus.
27
+ * @param {string} event - Event name
28
+ * @param {Function} listener - Callback function
29
+ * @returns {EventEmitter} The bus instance for chaining
30
+ */
31
+ export function on(event, listener) {
32
+ return bus.on(event, listener);
33
+ }
34
+
35
+ /**
36
+ * Register a one-time listener on the global bus.
37
+ * @param {string} event - Event name
38
+ * @param {Function} listener - Callback function
39
+ * @returns {EventEmitter} The bus instance for chaining
40
+ */
41
+ export function once(event, listener) {
42
+ return bus.once(event, listener);
43
+ }
44
+
45
+ /**
46
+ * Remove a listener from the global bus.
47
+ * @param {string} event - Event name
48
+ * @param {Function} listener - Callback function to remove
49
+ * @returns {EventEmitter} The bus instance for chaining
50
+ */
51
+ export function off(event, listener) {
52
+ return bus.off(event, listener);
53
+ }
54
+
55
+ /**
56
+ * Remove all listeners for a specific event, or all events.
57
+ * @param {string} [event] - Optional event name
58
+ */
59
+ export function removeAll(event) {
60
+ if (event) {
61
+ bus.removeAllListeners(event);
62
+ } else {
63
+ bus.removeAllListeners();
64
+ }
65
+ }
66
+
67
+ /**
68
+ * Get the underlying EventEmitter for advanced usage.
69
+ * @returns {EventEmitter}
70
+ */
71
+ export function getBus() {
72
+ return bus;
73
+ }
74
+
75
+ /**
76
+ * Get listener count for an event.
77
+ * @param {string} event - Event name
78
+ * @returns {number}
79
+ */
80
+ export function listenerCount(event) {
81
+ return bus.listenerCount(event);
82
+ }
83
+
84
+ export default { emit, on, once, off, removeAll, getBus, listenerCount };
@@ -0,0 +1,120 @@
1
+ /**
2
+ * @fileoverview PrepiShot - hyper-focused prompt generation for one-shot LLM calls.
3
+ * Strips all unnecessary context to get the absolute minimum the LLM needs.
4
+ * @module core/prepimshot
5
+ */
6
+
7
+ import { estimateTokens } from './context-packager.mjs';
8
+
9
+ /**
10
+ * Generate a minimal, focused prompt for a single LLM call.
11
+ * @param {Object} params
12
+ * @param {string} params.query - Original user query
13
+ * @param {string} [params.context] - Gathered context
14
+ * @param {string} [params.systemPrompt] - System prompt
15
+ * @param {Object} [params.options] - Additional options
16
+ * @param {number} [params.options.maxTokens=4000] - Max context tokens
17
+ * @param {string} [params.options.outputFormat='text'] - Desired output format
18
+ * @returns {Object} Optimized prompt
19
+ */
20
+ export function generatePrompt(params) {
21
+ const { query, context = '', systemPrompt = '', options = {} } = params;
22
+ const { maxTokens = 4000, outputFormat = 'text' } = options;
23
+
24
+ if (!query) {
25
+ throw new Error('Query is required for prompt generation');
26
+ }
27
+
28
+ // Build the minimal system prompt
29
+ const system = buildSystemPrompt(systemPrompt, outputFormat);
30
+
31
+ // Build the user prompt with context
32
+ const userPrompt = buildUserPrompt(query, context, maxTokens);
33
+
34
+ const totalTokens = estimateTokens(system) + estimateTokens(userPrompt);
35
+
36
+ return {
37
+ system,
38
+ user: userPrompt,
39
+ tokens: totalTokens,
40
+ optimized: context.length > 0,
41
+ format: outputFormat,
42
+ };
43
+ }
44
+
45
+ /**
46
+ * Build a minimal system prompt.
47
+ * @param {string} custom - Custom system prompt
48
+ * @param {string} format - Output format
49
+ * @returns {string}
50
+ */
51
+ function buildSystemPrompt(custom, format) {
52
+ const parts = [];
53
+
54
+ if (custom) {
55
+ parts.push(custom);
56
+ } else {
57
+ parts.push('You are a helpful assistant. Provide accurate, concise answers.');
58
+ }
59
+
60
+ if (format === 'json') {
61
+ parts.push('Respond in valid JSON format.');
62
+ } else if (format === 'markdown') {
63
+ parts.push('Use markdown formatting.');
64
+ } else if (format === 'list') {
65
+ parts.push('Respond as a bulleted list.');
66
+ }
67
+
68
+ return parts.join(' ');
69
+ }
70
+
71
+ /**
72
+ * Build the user prompt with context.
73
+ * @param {string} query
74
+ * @param {string} context
75
+ * @param {number} maxTokens
76
+ * @returns {string}
77
+ */
78
+ function buildUserPrompt(query, context, maxTokens) {
79
+ if (!context || context.trim().length === 0) {
80
+ return query;
81
+ }
82
+
83
+ // Trim context to fit within token budget (leaving room for query)
84
+ const queryTokens = estimateTokens(query);
85
+ const availableTokens = maxTokens - queryTokens - 50; // 50 token buffer
86
+
87
+ let trimmedContext = context;
88
+ if (estimateTokens(context) > availableTokens) {
89
+ trimmedContext = context.substring(0, availableTokens * 4) + '\n[...]';
90
+ }
91
+
92
+ return `Context:\n${trimmedContext}\n\nQuestion: ${query}`;
93
+ }
94
+
95
+ /**
96
+ * Generate prompts for multiple sub-tasks and merge them into one.
97
+ * @param {string} query - Original query
98
+ * @param {Object[]} subtaskResults - Results from sub-tasks
99
+ * @param {Object} [options]
100
+ * @returns {Object} Merged prompt
101
+ */
102
+ export function mergePrompts(query, subtaskResults, options = {}) {
103
+ const contextParts = [];
104
+
105
+ for (const result of subtaskResults) {
106
+ if (result.error) continue;
107
+ if (result.result) {
108
+ const text = typeof result.result === 'string' ? result.result : JSON.stringify(result.result);
109
+ contextParts.push(text);
110
+ }
111
+ }
112
+
113
+ return generatePrompt({
114
+ query,
115
+ context: contextParts.join('\n\n---\n\n'),
116
+ options,
117
+ });
118
+ }
119
+
120
+ export default { generatePrompt, mergePrompts };
@@ -0,0 +1,158 @@
1
+ /**
2
+ * @fileoverview Task decomposer - breaks complex tasks into sub-tasks.
3
+ * @module core/task-decomposer
4
+ */
5
+
6
+ /**
7
+ * @typedef {Object} SubTask
8
+ * @property {string} id - Sub-task ID
9
+ * @property {string} type - Task type
10
+ * @property {string} description - What this sub-task does
11
+ * @property {boolean} needsLLM - Whether this requires an LLM call
12
+ * @property {string[]} dependencies - IDs of tasks this depends on
13
+ * @property {Object} params - Task parameters
14
+ */
15
+
16
+ /**
17
+ * Decompose a complex task into sub-tasks.
18
+ * @param {string} task - The task description
19
+ * @param {Object} [options]
20
+ * @returns {Object} Decomposed task tree
21
+ */
22
+ export function decompose(task, options = {}) {
23
+ if (!task || typeof task !== 'string') {
24
+ return { subtasks: [], root: null };
25
+ }
26
+
27
+ const subtasks = [];
28
+ let idCounter = 0;
29
+
30
+ const genId = () => `task_${++idCounter}`;
31
+
32
+ // Analyze the task to determine what's needed
33
+ const analysis = analyzeTask(task);
34
+
35
+ // Create sub-tasks based on analysis
36
+ if (analysis.needsSearch) {
37
+ const searchId = genId();
38
+ subtasks.push({
39
+ id: searchId,
40
+ type: 'search',
41
+ description: `Search for information about: ${analysis.topic}`,
42
+ needsLLM: false,
43
+ dependencies: [],
44
+ params: { query: analysis.searchQuery || analysis.topic },
45
+ });
46
+
47
+ if (analysis.needsScraping) {
48
+ subtasks.push({
49
+ id: genId(),
50
+ type: 'scrape',
51
+ description: 'Extract content from search results',
52
+ needsLLM: false,
53
+ dependencies: [searchId],
54
+ params: {},
55
+ });
56
+ }
57
+ }
58
+
59
+ if (analysis.needsCalculation) {
60
+ subtasks.push({
61
+ id: genId(),
62
+ type: 'calculate',
63
+ description: `Perform calculation: ${analysis.calcExpression || 'compute results'}`,
64
+ needsLLM: false,
65
+ dependencies: [],
66
+ params: { expression: analysis.calcExpression },
67
+ });
68
+ }
69
+
70
+ // Always have a synthesis step if there are sub-tasks
71
+ if (subtasks.length > 0) {
72
+ const synthDeps = subtasks.map(t => t.id);
73
+ subtasks.push({
74
+ id: genId(),
75
+ type: 'synthesize',
76
+ description: 'Combine results into final answer',
77
+ needsLLM: true,
78
+ dependencies: synthDeps,
79
+ params: { originalQuery: task },
80
+ });
81
+ } else {
82
+ // Simple task - just needs LLM
83
+ subtasks.push({
84
+ id: genId(),
85
+ type: 'llm',
86
+ description: task,
87
+ needsLLM: true,
88
+ dependencies: [],
89
+ params: { query: task },
90
+ });
91
+ }
92
+
93
+ return {
94
+ subtasks,
95
+ root: subtasks[subtasks.length - 1]?.id || null,
96
+ analysis,
97
+ };
98
+ }
99
+
100
+ /**
101
+ * Analyze a task to determine its components.
102
+ * @param {string} task
103
+ * @returns {Object}
104
+ */
105
+ function analyzeTask(task) {
106
+ const lower = task.toLowerCase();
107
+
108
+ const needsSearch = /\b(search|find|look\s+up|what\s+is|who\s+is|when\s+did|latest|current|news)\b/i.test(lower);
109
+ const needsScraping = /\b(read|extract|scrape|get\s+content\s+from|fetch\s+page)\b/i.test(lower) ||
110
+ /https?:\/\//i.test(task);
111
+ const needsCalculation = /\b(calculate|compute|how\s+much|how\s+many|percentage|convert|math)\b/i.test(lower) ||
112
+ /\d+\s*[+\-*/^]\s*\d+/.test(task);
113
+ const needsFileOps = /\b(read\s+file|write\s+file|save|load|file)\b/i.test(lower);
114
+
115
+ // Extract topic
116
+ let topic = task;
117
+ topic = topic.replace(/^(please\s+)?(can\s+you\s+)?/i, '');
118
+ topic = topic.replace(/\?.*$/, '').trim();
119
+
120
+ // Extract search query
121
+ let searchQuery = null;
122
+ const searchMatch = task.match(/(?:search|find|look\s+up)\s+(?:for\s+)?(.+)/i);
123
+ if (searchMatch) searchQuery = searchMatch[1];
124
+
125
+ // Extract math expression
126
+ let calcExpression = null;
127
+ const pctMatch = task.match(/(\d+\.?\d*)\s*%\s*(?:of|off)?\s*(\d+\.?\d*)/);
128
+ if (pctMatch) {
129
+ calcExpression = `${pctMatch[1]}% of ${pctMatch[2]}`;
130
+ } else {
131
+ const mathMatch = task.match(/(\d+[\s+\-*/^()\d.]*\d+)/);
132
+ if (mathMatch) calcExpression = mathMatch[1];
133
+ }
134
+
135
+ return {
136
+ topic,
137
+ needsSearch,
138
+ needsScraping,
139
+ needsCalculation,
140
+ needsFileOps,
141
+ searchQuery,
142
+ calcExpression,
143
+ complexity: needsSearch && needsCalculation ? 'complex' : needsSearch || needsCalculation ? 'medium' : 'simple',
144
+ };
145
+ }
146
+
147
+ /**
148
+ * Check which sub-tasks can be handled locally (no LLM).
149
+ * @param {SubTask[]} subtasks
150
+ * @returns {{ local: SubTask[], llm: SubTask[] }}
151
+ */
152
+ export function partitionTasks(subtasks) {
153
+ const local = subtasks.filter(t => !t.needsLLM);
154
+ const llm = subtasks.filter(t => t.needsLLM);
155
+ return { local, llm };
156
+ }
157
+
158
+ export default { decompose, partitionTasks };