@ryanfw/prompt-orchestration-pipeline 0.5.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -2
- package/package.json +1 -2
- package/src/api/validators/json.js +39 -0
- package/src/components/DAGGrid.jsx +392 -303
- package/src/components/JobCard.jsx +14 -12
- package/src/components/JobDetail.jsx +54 -51
- package/src/components/JobTable.jsx +72 -23
- package/src/components/Layout.jsx +145 -42
- package/src/components/LiveText.jsx +47 -0
- package/src/components/PageSubheader.jsx +75 -0
- package/src/components/TaskDetailSidebar.jsx +216 -0
- package/src/components/TimerText.jsx +82 -0
- package/src/components/UploadSeed.jsx +0 -70
- package/src/components/ui/Logo.jsx +16 -0
- package/src/components/ui/RestartJobModal.jsx +140 -0
- package/src/components/ui/toast.jsx +138 -0
- package/src/config/models.js +322 -0
- package/src/config/statuses.js +119 -0
- package/src/core/config.js +4 -34
- package/src/core/file-io.js +13 -28
- package/src/core/module-loader.js +54 -40
- package/src/core/pipeline-runner.js +65 -26
- package/src/core/status-writer.js +213 -58
- package/src/core/symlink-bridge.js +57 -0
- package/src/core/symlink-utils.js +94 -0
- package/src/core/task-runner.js +321 -437
- package/src/llm/index.js +258 -86
- package/src/pages/Code.jsx +351 -0
- package/src/pages/PipelineDetail.jsx +124 -15
- package/src/pages/PromptPipelineDashboard.jsx +20 -88
- package/src/providers/anthropic.js +83 -69
- package/src/providers/base.js +52 -0
- package/src/providers/deepseek.js +20 -21
- package/src/providers/gemini.js +226 -0
- package/src/providers/openai.js +36 -106
- package/src/providers/zhipu.js +136 -0
- package/src/ui/client/adapters/job-adapter.js +42 -28
- package/src/ui/client/api.js +134 -0
- package/src/ui/client/hooks/useJobDetailWithUpdates.js +65 -179
- package/src/ui/client/index.css +15 -0
- package/src/ui/client/index.html +2 -1
- package/src/ui/client/main.jsx +19 -14
- package/src/ui/client/time-store.js +161 -0
- package/src/ui/config-bridge.js +15 -24
- package/src/ui/config-bridge.node.js +15 -24
- package/src/ui/dist/assets/{index-CxcrauYR.js → index-DqkbzXZ1.js} +2132 -1086
- package/src/ui/dist/assets/style-DBF9NQGk.css +62 -0
- package/src/ui/dist/index.html +4 -3
- package/src/ui/job-reader.js +0 -108
- package/src/ui/public/favicon.svg +12 -0
- package/src/ui/server.js +252 -0
- package/src/ui/sse-enhancer.js +0 -1
- package/src/ui/transformers/list-transformer.js +32 -12
- package/src/ui/transformers/status-transformer.js +29 -42
- package/src/utils/dag.js +8 -4
- package/src/utils/duration.js +13 -19
- package/src/utils/formatters.js +27 -0
- package/src/utils/geometry-equality.js +83 -0
- package/src/utils/pipelines.js +5 -1
- package/src/utils/time-utils.js +40 -0
- package/src/utils/token-cost-calculator.js +294 -0
- package/src/utils/ui.jsx +18 -20
- package/src/components/ui/select.jsx +0 -27
- package/src/lib/utils.js +0 -6
- package/src/ui/client/hooks/useTicker.js +0 -26
- package/src/ui/config-bridge.browser.js +0 -149
- package/src/ui/dist/assets/style-D6K_oQ12.css +0 -62
package/src/llm/index.js
CHANGED
|
@@ -1,7 +1,15 @@
|
|
|
1
1
|
import { openaiChat } from "../providers/openai.js";
|
|
2
2
|
import { deepseekChat } from "../providers/deepseek.js";
|
|
3
|
+
import { anthropicChat } from "../providers/anthropic.js";
|
|
4
|
+
import { geminiChat } from "../providers/gemini.js";
|
|
5
|
+
import { zhipuChat } from "../providers/zhipu.js";
|
|
3
6
|
import { EventEmitter } from "node:events";
|
|
4
7
|
import { getConfig } from "../core/config.js";
|
|
8
|
+
import {
|
|
9
|
+
MODEL_CONFIG,
|
|
10
|
+
DEFAULT_MODEL_BY_PROVIDER,
|
|
11
|
+
aliasToFunctionName,
|
|
12
|
+
} from "../config/models.js";
|
|
5
13
|
import fs from "node:fs";
|
|
6
14
|
|
|
7
15
|
// Global mock provider instance (for demo/testing)
|
|
@@ -18,8 +26,12 @@ export function registerMockProvider(provider) {
|
|
|
18
26
|
|
|
19
27
|
// Auto-register mock provider in test mode when default provider is "mock"
|
|
20
28
|
function autoRegisterMockProvider() {
|
|
21
|
-
|
|
22
|
-
|
|
29
|
+
// Skip config check in tests to avoid PO_ROOT requirement
|
|
30
|
+
const isTest =
|
|
31
|
+
process.env.NODE_ENV === "test" || process.env.VITEST === "true";
|
|
32
|
+
const defaultProvider = isTest ? "mock" : getConfig().llm.defaultProvider;
|
|
33
|
+
|
|
34
|
+
if (defaultProvider === "mock" && !mockProviderInstance) {
|
|
23
35
|
// Auto-register a basic mock provider for testing
|
|
24
36
|
mockProviderInstance = {
|
|
25
37
|
chat: async () => ({
|
|
@@ -40,6 +52,8 @@ export function getAvailableProviders() {
|
|
|
40
52
|
openai: !!process.env.OPENAI_API_KEY,
|
|
41
53
|
deepseek: !!process.env.DEEPSEEK_API_KEY,
|
|
42
54
|
anthropic: !!process.env.ANTHROPIC_API_KEY,
|
|
55
|
+
gemini: !!process.env.GEMINI_API_KEY,
|
|
56
|
+
zhipu: !!process.env.ZHIPU_API_KEY,
|
|
43
57
|
mock: !!mockProviderInstance,
|
|
44
58
|
};
|
|
45
59
|
}
|
|
@@ -49,36 +63,28 @@ export function estimateTokens(text) {
|
|
|
49
63
|
return Math.ceil((text || "").length / 4);
|
|
50
64
|
}
|
|
51
65
|
|
|
52
|
-
// Calculate cost based on provider and model
|
|
66
|
+
// Calculate cost based on provider and model, derived from config
|
|
53
67
|
export function calculateCost(provider, model, usage) {
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
deepseek: {
|
|
67
|
-
"deepseek-reasoner": { prompt: 0.001, completion: 0.002 },
|
|
68
|
-
"deepseek-chat": { prompt: 0.0005, completion: 0.001 },
|
|
69
|
-
},
|
|
70
|
-
anthropic: {
|
|
71
|
-
"claude-3-opus": { prompt: 0.015, completion: 0.075 },
|
|
72
|
-
"claude-3-sonnet": { prompt: 0.003, completion: 0.015 },
|
|
73
|
-
},
|
|
74
|
-
};
|
|
68
|
+
if (!usage) {
|
|
69
|
+
// Fallback for missing usage
|
|
70
|
+
return 0;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const modelConfig = Object.values(MODEL_CONFIG).find(
|
|
74
|
+
(cfg) => cfg.provider === provider && cfg.model === model
|
|
75
|
+
);
|
|
76
|
+
|
|
77
|
+
if (!modelConfig) {
|
|
78
|
+
return 0;
|
|
79
|
+
}
|
|
75
80
|
|
|
76
|
-
|
|
77
|
-
|
|
81
|
+
// Convert per-million pricing to per-1k for calculation
|
|
82
|
+
const promptCostPer1k = modelConfig.tokenCostInPerMillion / 1000;
|
|
83
|
+
const completionCostPer1k = modelConfig.tokenCostOutPerMillion / 1000;
|
|
78
84
|
|
|
79
|
-
const promptCost = ((usage.promptTokens || 0) / 1000) *
|
|
85
|
+
const promptCost = ((usage.promptTokens || 0) / 1000) * promptCostPer1k;
|
|
80
86
|
const completionCost =
|
|
81
|
-
((usage.completionTokens || 0) / 1000) *
|
|
87
|
+
((usage.completionTokens || 0) / 1000) * completionCostPer1k;
|
|
82
88
|
|
|
83
89
|
return promptCost + completionCost;
|
|
84
90
|
}
|
|
@@ -92,6 +98,11 @@ export async function chat(options) {
|
|
|
92
98
|
temperature,
|
|
93
99
|
maxTokens,
|
|
94
100
|
metadata = {},
|
|
101
|
+
topP,
|
|
102
|
+
frequencyPenalty,
|
|
103
|
+
presencePenalty,
|
|
104
|
+
stop,
|
|
105
|
+
responseFormat,
|
|
95
106
|
...rest
|
|
96
107
|
} = options;
|
|
97
108
|
|
|
@@ -107,12 +118,15 @@ export async function chat(options) {
|
|
|
107
118
|
const startTime = Date.now();
|
|
108
119
|
const requestId = `req_${Date.now()}_${Math.random().toString(36).substring(7)}`;
|
|
109
120
|
|
|
121
|
+
// Default to JSON mode if not specified
|
|
122
|
+
const finalResponseFormat = responseFormat ?? "json";
|
|
123
|
+
|
|
110
124
|
// Extract system and user messages
|
|
111
125
|
const systemMsg = messages.find((m) => m.role === "system")?.content || "";
|
|
112
126
|
const userMessages = messages.filter((m) => m.role === "user");
|
|
113
127
|
const userMsg = userMessages.map((m) => m.content).join("\n");
|
|
114
128
|
|
|
115
|
-
// DEBUG
|
|
129
|
+
// DEBUG write_to_file messages to /tmp/messages.log for debugging
|
|
116
130
|
fs.writeFileSync(
|
|
117
131
|
"/tmp/messages.log",
|
|
118
132
|
JSON.stringify({ messages, systemMsg, userMsg, provider, model }, null, 2)
|
|
@@ -157,52 +171,210 @@ export async function chat(options) {
|
|
|
157
171
|
totalTokens: result.usage.total_tokens,
|
|
158
172
|
};
|
|
159
173
|
} else if (provider === "openai") {
|
|
160
|
-
const
|
|
174
|
+
const openaiArgs = {
|
|
161
175
|
messages,
|
|
162
176
|
model: model || "gpt-5-chat-latest",
|
|
177
|
+
temperature,
|
|
163
178
|
maxTokens,
|
|
179
|
+
...rest,
|
|
180
|
+
};
|
|
181
|
+
openaiArgs.responseFormat = finalResponseFormat;
|
|
182
|
+
if (topP !== undefined) openaiArgs.topP = topP;
|
|
183
|
+
if (frequencyPenalty !== undefined)
|
|
184
|
+
openaiArgs.frequencyPenalty = frequencyPenalty;
|
|
185
|
+
if (presencePenalty !== undefined)
|
|
186
|
+
openaiArgs.presencePenalty = presencePenalty;
|
|
187
|
+
if (stop !== undefined) openaiArgs.stop = stop;
|
|
188
|
+
|
|
189
|
+
const result = await openaiChat(openaiArgs);
|
|
190
|
+
|
|
191
|
+
response = {
|
|
192
|
+
content:
|
|
193
|
+
result?.content ??
|
|
194
|
+
(typeof result === "string" ? result : String(result)),
|
|
195
|
+
raw: result?.raw ?? result,
|
|
196
|
+
};
|
|
197
|
+
|
|
198
|
+
// Use provider usage if available; otherwise estimate tokens
|
|
199
|
+
if (result?.usage) {
|
|
200
|
+
const { prompt_tokens, completion_tokens, total_tokens } = result.usage;
|
|
201
|
+
usage = {
|
|
202
|
+
promptTokens: prompt_tokens,
|
|
203
|
+
completionTokens: completion_tokens,
|
|
204
|
+
totalTokens: total_tokens,
|
|
205
|
+
};
|
|
206
|
+
} else {
|
|
207
|
+
const promptTokens = estimateTokens(systemMsg + userMsg);
|
|
208
|
+
const completionTokens = estimateTokens(response.content);
|
|
209
|
+
usage = {
|
|
210
|
+
promptTokens,
|
|
211
|
+
completionTokens,
|
|
212
|
+
totalTokens: promptTokens + completionTokens,
|
|
213
|
+
};
|
|
214
|
+
}
|
|
215
|
+
} else if (provider === "deepseek") {
|
|
216
|
+
const deepseekArgs = {
|
|
217
|
+
messages,
|
|
218
|
+
model: model || "deepseek-reasoner",
|
|
164
219
|
temperature,
|
|
220
|
+
maxTokens,
|
|
165
221
|
...rest,
|
|
166
|
-
}
|
|
222
|
+
};
|
|
223
|
+
if (topP !== undefined) deepseekArgs.topP = topP;
|
|
224
|
+
if (frequencyPenalty !== undefined)
|
|
225
|
+
deepseekArgs.frequencyPenalty = frequencyPenalty;
|
|
226
|
+
if (presencePenalty !== undefined)
|
|
227
|
+
deepseekArgs.presencePenalty = presencePenalty;
|
|
228
|
+
if (stop !== undefined) deepseekArgs.stop = stop;
|
|
229
|
+
deepseekArgs.responseFormat = finalResponseFormat;
|
|
230
|
+
|
|
231
|
+
const result = await deepseekChat(deepseekArgs);
|
|
167
232
|
|
|
168
233
|
response = {
|
|
169
|
-
content:
|
|
170
|
-
raw: result,
|
|
234
|
+
content: result.content,
|
|
171
235
|
};
|
|
172
236
|
|
|
173
|
-
//
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
237
|
+
// Use actual usage from deepseek API if available; otherwise estimate
|
|
238
|
+
if (result?.usage) {
|
|
239
|
+
const { prompt_tokens, completion_tokens, total_tokens } = result.usage;
|
|
240
|
+
usage = {
|
|
241
|
+
promptTokens: prompt_tokens,
|
|
242
|
+
completionTokens: completion_tokens,
|
|
243
|
+
totalTokens: total_tokens,
|
|
244
|
+
};
|
|
245
|
+
} else {
|
|
246
|
+
const promptTokens = estimateTokens(systemMsg + userMsg);
|
|
247
|
+
const completionTokens = estimateTokens(
|
|
248
|
+
typeof result === "string" ? result : JSON.stringify(result)
|
|
249
|
+
);
|
|
250
|
+
usage = {
|
|
251
|
+
promptTokens,
|
|
252
|
+
completionTokens,
|
|
253
|
+
totalTokens: promptTokens + completionTokens,
|
|
254
|
+
};
|
|
255
|
+
}
|
|
256
|
+
} else if (provider === "anthropic") {
|
|
257
|
+
const defaultAlias = DEFAULT_MODEL_BY_PROVIDER.anthropic;
|
|
258
|
+
const defaultModelConfig = MODEL_CONFIG[defaultAlias];
|
|
259
|
+
const defaultModel = defaultModelConfig?.model;
|
|
260
|
+
|
|
261
|
+
const anthropicArgs = {
|
|
262
|
+
messages,
|
|
263
|
+
model: model || defaultModel,
|
|
264
|
+
temperature,
|
|
265
|
+
maxTokens,
|
|
266
|
+
...rest,
|
|
180
267
|
};
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
}
|
|
187
|
-
|
|
188
|
-
// systemMsg,
|
|
189
|
-
// userMsg,
|
|
190
|
-
// model || "deepseek-reasoner"
|
|
191
|
-
);
|
|
268
|
+
if (topP !== undefined) anthropicArgs.topP = topP;
|
|
269
|
+
if (stop !== undefined) anthropicArgs.stop = stop;
|
|
270
|
+
anthropicArgs.responseFormat = finalResponseFormat;
|
|
271
|
+
|
|
272
|
+
const result = await anthropicChat(anthropicArgs);
|
|
192
273
|
|
|
193
274
|
response = {
|
|
194
275
|
content: result.content,
|
|
276
|
+
raw: result.raw,
|
|
195
277
|
};
|
|
196
278
|
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
279
|
+
// Use actual usage from anthropic API if available; otherwise estimate
|
|
280
|
+
if (result?.usage) {
|
|
281
|
+
const { prompt_tokens, completion_tokens, total_tokens } = result.usage;
|
|
282
|
+
usage = {
|
|
283
|
+
promptTokens: prompt_tokens,
|
|
284
|
+
completionTokens: completion_tokens,
|
|
285
|
+
totalTokens: total_tokens,
|
|
286
|
+
};
|
|
287
|
+
} else {
|
|
288
|
+
const promptTokens = estimateTokens(systemMsg + userMsg);
|
|
289
|
+
const completionTokens = estimateTokens(
|
|
290
|
+
typeof result === "string" ? result : JSON.stringify(result)
|
|
291
|
+
);
|
|
292
|
+
usage = {
|
|
293
|
+
promptTokens,
|
|
294
|
+
completionTokens,
|
|
295
|
+
totalTokens: promptTokens + completionTokens,
|
|
296
|
+
};
|
|
297
|
+
}
|
|
298
|
+
} else if (provider === "gemini") {
|
|
299
|
+
const geminiArgs = {
|
|
300
|
+
messages,
|
|
301
|
+
model: model || "gemini-2.5-flash",
|
|
302
|
+
temperature,
|
|
303
|
+
maxTokens,
|
|
304
|
+
...rest,
|
|
205
305
|
};
|
|
306
|
+
if (topP !== undefined) geminiArgs.topP = topP;
|
|
307
|
+
if (stop !== undefined) geminiArgs.stop = stop;
|
|
308
|
+
geminiArgs.responseFormat = finalResponseFormat;
|
|
309
|
+
|
|
310
|
+
const result = await geminiChat(geminiArgs);
|
|
311
|
+
|
|
312
|
+
response = {
|
|
313
|
+
content: result.content,
|
|
314
|
+
raw: result.raw,
|
|
315
|
+
};
|
|
316
|
+
|
|
317
|
+
// Use actual usage from gemini API if available; otherwise estimate
|
|
318
|
+
if (result?.usage) {
|
|
319
|
+
const { prompt_tokens, completion_tokens, total_tokens } = result.usage;
|
|
320
|
+
usage = {
|
|
321
|
+
promptTokens: prompt_tokens,
|
|
322
|
+
completionTokens: completion_tokens,
|
|
323
|
+
totalTokens: total_tokens,
|
|
324
|
+
};
|
|
325
|
+
} else {
|
|
326
|
+
const promptTokens = estimateTokens(systemMsg + userMsg);
|
|
327
|
+
const completionTokens = estimateTokens(
|
|
328
|
+
typeof result === "string" ? result : JSON.stringify(result)
|
|
329
|
+
);
|
|
330
|
+
usage = {
|
|
331
|
+
promptTokens,
|
|
332
|
+
completionTokens,
|
|
333
|
+
totalTokens: promptTokens + completionTokens,
|
|
334
|
+
};
|
|
335
|
+
}
|
|
336
|
+
} else if (provider === "zhipu") {
|
|
337
|
+
const defaultAlias = DEFAULT_MODEL_BY_PROVIDER.zhipu;
|
|
338
|
+
const defaultModelConfig = MODEL_CONFIG[defaultAlias];
|
|
339
|
+
const defaultModel = defaultModelConfig?.model;
|
|
340
|
+
|
|
341
|
+
const zhipuArgs = {
|
|
342
|
+
messages,
|
|
343
|
+
model: model || defaultModel,
|
|
344
|
+
temperature,
|
|
345
|
+
maxTokens,
|
|
346
|
+
...rest,
|
|
347
|
+
};
|
|
348
|
+
if (topP !== undefined) zhipuArgs.topP = topP;
|
|
349
|
+
if (stop !== undefined) zhipuArgs.stop = stop;
|
|
350
|
+
zhipuArgs.responseFormat = finalResponseFormat;
|
|
351
|
+
|
|
352
|
+
const result = await zhipuChat(zhipuArgs);
|
|
353
|
+
|
|
354
|
+
response = {
|
|
355
|
+
content: result.content,
|
|
356
|
+
raw: result.raw,
|
|
357
|
+
};
|
|
358
|
+
|
|
359
|
+
// Use actual usage from zhipu API if available; otherwise estimate
|
|
360
|
+
if (result?.usage) {
|
|
361
|
+
const { prompt_tokens, completion_tokens, total_tokens } = result.usage;
|
|
362
|
+
usage = {
|
|
363
|
+
promptTokens: prompt_tokens,
|
|
364
|
+
completionTokens: completion_tokens,
|
|
365
|
+
totalTokens: total_tokens,
|
|
366
|
+
};
|
|
367
|
+
} else {
|
|
368
|
+
const promptTokens = estimateTokens(systemMsg + userMsg);
|
|
369
|
+
const completionTokens = estimateTokens(
|
|
370
|
+
typeof result === "string" ? result : JSON.stringify(result)
|
|
371
|
+
);
|
|
372
|
+
usage = {
|
|
373
|
+
promptTokens,
|
|
374
|
+
completionTokens,
|
|
375
|
+
totalTokens: promptTokens + completionTokens,
|
|
376
|
+
};
|
|
377
|
+
}
|
|
206
378
|
} else {
|
|
207
379
|
throw new Error(`Provider ${provider} not yet implemented`);
|
|
208
380
|
}
|
|
@@ -222,8 +394,11 @@ export async function chat(options) {
|
|
|
222
394
|
timestamp: new Date().toISOString(),
|
|
223
395
|
});
|
|
224
396
|
|
|
225
|
-
// Return clean response - no metrics attached!
|
|
226
|
-
return
|
|
397
|
+
// Return clean response with usage - no metrics attached!
|
|
398
|
+
return {
|
|
399
|
+
...response,
|
|
400
|
+
usage,
|
|
401
|
+
};
|
|
227
402
|
} catch (error) {
|
|
228
403
|
const duration = Date.now() - startTime;
|
|
229
404
|
|
|
@@ -242,19 +417,6 @@ export async function chat(options) {
|
|
|
242
417
|
}
|
|
243
418
|
}
|
|
244
419
|
|
|
245
|
-
// Helper to convert model alias to camelCase function name
|
|
246
|
-
function toCamelCase(alias) {
|
|
247
|
-
const [provider, ...modelParts] = alias.split(":");
|
|
248
|
-
const model = modelParts.join("-");
|
|
249
|
-
|
|
250
|
-
// Convert to camelCase (handle both letters and numbers after hyphens)
|
|
251
|
-
const camelModel = model.replace(/-([a-z0-9])/g, (match, char) =>
|
|
252
|
-
char.toUpperCase()
|
|
253
|
-
);
|
|
254
|
-
|
|
255
|
-
return camelModel;
|
|
256
|
-
}
|
|
257
|
-
|
|
258
420
|
// Build provider-grouped functions from registry
|
|
259
421
|
function buildProviderFunctions(models) {
|
|
260
422
|
const functions = {};
|
|
@@ -274,7 +436,7 @@ function buildProviderFunctions(models) {
|
|
|
274
436
|
functions[provider] = {};
|
|
275
437
|
|
|
276
438
|
for (const [alias, modelConfig] of Object.entries(providerModels)) {
|
|
277
|
-
const functionName =
|
|
439
|
+
const functionName = aliasToFunctionName(alias);
|
|
278
440
|
|
|
279
441
|
functions[provider][functionName] = (options = {}) => {
|
|
280
442
|
// Respect provider overrides in options (last-write-wins)
|
|
@@ -299,8 +461,11 @@ function buildProviderFunctions(models) {
|
|
|
299
461
|
|
|
300
462
|
// Helper function for single prompt completion
|
|
301
463
|
export async function complete(prompt, options = {}) {
|
|
302
|
-
|
|
303
|
-
const
|
|
464
|
+
// Skip config check in tests to avoid PO_ROOT requirement
|
|
465
|
+
const isTest =
|
|
466
|
+
process.env.NODE_ENV === "test" || process.env.VITEST === "true";
|
|
467
|
+
const defaultProvider =
|
|
468
|
+
options.provider || (isTest ? "openai" : getConfig().llm.defaultProvider);
|
|
304
469
|
|
|
305
470
|
return chat({
|
|
306
471
|
provider: defaultProvider,
|
|
@@ -407,21 +572,28 @@ export async function parallel(workerFn, items, concurrency = 5) {
|
|
|
407
572
|
|
|
408
573
|
// Create a bound LLM interface - for named-models tests, only return provider functions
|
|
409
574
|
export function createLLM() {
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
// Build functions from registry
|
|
413
|
-
const providerFunctions = buildProviderFunctions(config.llm.models);
|
|
575
|
+
// Build functions from centralized registry
|
|
576
|
+
const providerFunctions = buildProviderFunctions(MODEL_CONFIG);
|
|
414
577
|
|
|
415
578
|
return providerFunctions;
|
|
416
579
|
}
|
|
417
580
|
|
|
581
|
+
// Create named models API (explicit function for clarity)
|
|
582
|
+
export function createNamedModelsAPI() {
|
|
583
|
+
return buildProviderFunctions(MODEL_CONFIG);
|
|
584
|
+
}
|
|
585
|
+
|
|
418
586
|
// Separate function for high-level LLM interface (used by llm.test.js)
|
|
419
587
|
export function createHighLevelLLM(options = {}) {
|
|
420
|
-
|
|
421
|
-
const
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
const
|
|
588
|
+
// Skip config check in tests to avoid PO_ROOT requirement
|
|
589
|
+
const isTest =
|
|
590
|
+
process.env.NODE_ENV === "test" || process.env.VITEST === "true";
|
|
591
|
+
const config = isTest ? { llm: { defaultProvider: "openai" } } : getConfig();
|
|
592
|
+
const defaultProvider =
|
|
593
|
+
options.defaultProvider || (isTest ? "openai" : config.llm.defaultProvider);
|
|
594
|
+
|
|
595
|
+
// Build functions from centralized registry
|
|
596
|
+
const providerFunctions = buildProviderFunctions(MODEL_CONFIG);
|
|
425
597
|
|
|
426
598
|
return {
|
|
427
599
|
// High-level interface methods
|