@ryanfw/prompt-orchestration-pipeline 0.14.2 → 0.15.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/core/orchestrator.js +0 -18
- package/src/core/pipeline-runner.js +0 -37
- package/src/core/status-writer.js +0 -18
- package/src/core/symlink-utils.js +0 -12
- package/src/core/task-runner.js +0 -23
- package/src/llm/index.js +36 -36
- package/src/providers/anthropic.js +16 -13
- package/src/providers/deepseek.js +4 -1
- package/src/providers/gemini.js +16 -15
- package/src/providers/openai.js +21 -25
- package/src/providers/zhipu.js +16 -13
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@ryanfw/prompt-orchestration-pipeline",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.15.1",
|
|
4
4
|
"description": "A Prompt-orchestration pipeline (POP) is a framework for building, running, and experimenting with complex chains of LLM tasks.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "src/ui/server.js",
|
package/src/core/orchestrator.js
CHANGED
|
@@ -123,10 +123,8 @@ export async function startOrchestrator(opts) {
|
|
|
123
123
|
} catch {}
|
|
124
124
|
|
|
125
125
|
// Move seed to current/{jobId}/seed.json
|
|
126
|
-
logger.log("Moving file", { from: filePath, to: dest });
|
|
127
126
|
try {
|
|
128
127
|
await moveFile(filePath, dest);
|
|
129
|
-
logger.log("Successfully moved file", { destination: dest });
|
|
130
128
|
} catch (error) {
|
|
131
129
|
logger.error("Failed to move file", {
|
|
132
130
|
from: filePath,
|
|
@@ -173,12 +171,6 @@ export async function startOrchestrator(opts) {
|
|
|
173
171
|
// Apply artifact initialization to the status
|
|
174
172
|
const updatedStatus = applyArtifacts(status);
|
|
175
173
|
await fs.writeFile(statusPath, JSON.stringify(updatedStatus, null, 2));
|
|
176
|
-
|
|
177
|
-
logger.log("Initialized status from upload artifacts", {
|
|
178
|
-
jobId,
|
|
179
|
-
pipeline: seed?.pipeline,
|
|
180
|
-
artifactsCount: updatedStatus.files?.artifacts?.length || 0,
|
|
181
|
-
});
|
|
182
174
|
} catch (artifactError) {
|
|
183
175
|
// Don't fail job startup if artifact initialization fails, just log
|
|
184
176
|
logger.warn("Failed to initialize status from artifacts", {
|
|
@@ -233,7 +225,6 @@ export async function startOrchestrator(opts) {
|
|
|
233
225
|
|
|
234
226
|
// Watch pending directory for seeds
|
|
235
227
|
const watchPattern = path.join(dirs.pending, "*.json");
|
|
236
|
-
logger.log("Watching pattern", { pattern: watchPattern });
|
|
237
228
|
const watcher = watcherFactory(watchPattern, {
|
|
238
229
|
ignoreInitial: false,
|
|
239
230
|
awaitWriteFinish: false, // Disable awaitWriteFinish for faster detection
|
|
@@ -243,7 +234,6 @@ export async function startOrchestrator(opts) {
|
|
|
243
234
|
// Wait for watcher to be ready before resolving
|
|
244
235
|
await new Promise((resolve, reject) => {
|
|
245
236
|
watcher.on("ready", () => {
|
|
246
|
-
logger.log("Watcher is ready");
|
|
247
237
|
resolve();
|
|
248
238
|
});
|
|
249
239
|
|
|
@@ -254,7 +244,6 @@ export async function startOrchestrator(opts) {
|
|
|
254
244
|
});
|
|
255
245
|
|
|
256
246
|
watcher.on("add", (file) => {
|
|
257
|
-
logger.log("Detected file add", { file });
|
|
258
247
|
// Return promise so tests awaiting the add handler block until processing completes
|
|
259
248
|
return handleSeedAdd(file);
|
|
260
249
|
});
|
|
@@ -330,13 +319,6 @@ function spawnRunner(
|
|
|
330
319
|
const availablePipelines = Object.keys(configSnapshot?.pipelines ?? {});
|
|
331
320
|
const pipelineSlug = seed?.pipeline;
|
|
332
321
|
|
|
333
|
-
logger.log("spawnRunner invoked", {
|
|
334
|
-
jobId,
|
|
335
|
-
pipelineSlug: pipelineSlug ?? null,
|
|
336
|
-
availablePipelines,
|
|
337
|
-
seedKeys: seed ? Object.keys(seed) : null,
|
|
338
|
-
});
|
|
339
|
-
|
|
340
322
|
if (!availablePipelines.length) {
|
|
341
323
|
logger.warn(
|
|
342
324
|
"No pipelines registered in config() when spawnRunner invoked"
|
|
@@ -143,10 +143,6 @@ try {
|
|
|
143
143
|
startFromTask &&
|
|
144
144
|
taskNames.indexOf(taskName) < taskNames.indexOf(startFromTask)
|
|
145
145
|
) {
|
|
146
|
-
logger.log("Skipping task before restart point", {
|
|
147
|
-
taskName,
|
|
148
|
-
startFromTask,
|
|
149
|
-
});
|
|
150
146
|
continue;
|
|
151
147
|
}
|
|
152
148
|
|
|
@@ -155,7 +151,6 @@ try {
|
|
|
155
151
|
const outputPath = path.join(workDir, "tasks", taskName, "output.json");
|
|
156
152
|
const output = JSON.parse(await fs.readFile(outputPath, "utf8"));
|
|
157
153
|
pipelineArtifacts[taskName] = output;
|
|
158
|
-
logger.log("Task already completed", { taskName });
|
|
159
154
|
} catch {
|
|
160
155
|
logger.warn("Failed to read completed task output", { taskName });
|
|
161
156
|
}
|
|
@@ -190,7 +185,6 @@ try {
|
|
|
190
185
|
}
|
|
191
186
|
}
|
|
192
187
|
|
|
193
|
-
logger.log("Starting task", { taskName });
|
|
194
188
|
await updateStatus(taskName, {
|
|
195
189
|
state: TaskState.RUNNING,
|
|
196
190
|
startedAt: now(),
|
|
@@ -265,13 +259,6 @@ try {
|
|
|
265
259
|
process.exitCode = 1;
|
|
266
260
|
process.exit(1);
|
|
267
261
|
}
|
|
268
|
-
|
|
269
|
-
logger.log("Task symlinks repaired successfully", {
|
|
270
|
-
taskName,
|
|
271
|
-
taskDir,
|
|
272
|
-
repairDuration: repairResult.duration,
|
|
273
|
-
relocatedEntry: repairResult.relocatedEntry,
|
|
274
|
-
});
|
|
275
262
|
} else {
|
|
276
263
|
logger.debug("Task symlinks validation passed", {
|
|
277
264
|
taskName,
|
|
@@ -295,7 +282,6 @@ try {
|
|
|
295
282
|
statusPath: tasksStatusPath,
|
|
296
283
|
});
|
|
297
284
|
|
|
298
|
-
logger.log("Running task", { taskName, modulePath: absoluteModulePath });
|
|
299
285
|
const result = await runPipeline(relocatedEntry, ctx);
|
|
300
286
|
|
|
301
287
|
if (!result.ok) {
|
|
@@ -363,13 +349,6 @@ try {
|
|
|
363
349
|
process.exit(1);
|
|
364
350
|
}
|
|
365
351
|
|
|
366
|
-
logger.log("Task completed successfully", {
|
|
367
|
-
taskName,
|
|
368
|
-
executionTimeMs:
|
|
369
|
-
result.logs?.reduce((total, log) => total + (log.ms || 0), 0) || 0,
|
|
370
|
-
refinementAttempts: result.refinementAttempts || 0,
|
|
371
|
-
});
|
|
372
|
-
|
|
373
352
|
// The file I/O system automatically handles writing outputs and updating tasks-status.json
|
|
374
353
|
// No need to manually write output.json or enumerate artifacts
|
|
375
354
|
|
|
@@ -396,7 +375,6 @@ try {
|
|
|
396
375
|
|
|
397
376
|
// Check if this is a single task run and we've completed the target task
|
|
398
377
|
if (runSingleTask && taskName === startFromTask) {
|
|
399
|
-
logger.log("Stopping after single task execution", { taskName });
|
|
400
378
|
break;
|
|
401
379
|
}
|
|
402
380
|
} catch (err) {
|
|
@@ -415,19 +393,6 @@ try {
|
|
|
415
393
|
await fs.mkdir(COMPLETE_DIR, { recursive: true });
|
|
416
394
|
const dest = path.join(COMPLETE_DIR, jobId);
|
|
417
395
|
|
|
418
|
-
logger.log("Pipeline completed", {
|
|
419
|
-
jobId,
|
|
420
|
-
totalExecutionTime: Object.values(status.tasks).reduce(
|
|
421
|
-
(total, t) => total + (t.executionTimeMs || 0),
|
|
422
|
-
0
|
|
423
|
-
),
|
|
424
|
-
totalRefinementAttempts: Object.values(status.tasks).reduce(
|
|
425
|
-
(total, t) => total + (t.refinementAttempts || 0),
|
|
426
|
-
0
|
|
427
|
-
),
|
|
428
|
-
finalArtifacts: Object.keys(pipelineArtifacts),
|
|
429
|
-
});
|
|
430
|
-
|
|
431
396
|
await fs.rename(workDir, dest);
|
|
432
397
|
await appendLine(
|
|
433
398
|
path.join(COMPLETE_DIR, "runs.jsonl"),
|
|
@@ -449,8 +414,6 @@ try {
|
|
|
449
414
|
|
|
450
415
|
// Clean up task symlinks to avoid dangling links in archives
|
|
451
416
|
await cleanupTaskSymlinks(dest);
|
|
452
|
-
} else {
|
|
453
|
-
logger.log("Single task run completed, job remains in current", { jobId });
|
|
454
417
|
}
|
|
455
418
|
} catch (error) {
|
|
456
419
|
throw error;
|
|
@@ -164,13 +164,8 @@ export async function writeJobStatus(jobDir, updateFn) {
|
|
|
164
164
|
|
|
165
165
|
const next = prev
|
|
166
166
|
.then(async () => {
|
|
167
|
-
logger.group("Status Write Operation");
|
|
168
|
-
logger.log(`Updating status for job: ${jobId}`);
|
|
169
|
-
logger.log(`Status file path: ${statusPath}`);
|
|
170
|
-
|
|
171
167
|
// Read existing status or create default
|
|
172
168
|
const current = await readStatusFile(statusPath, jobId);
|
|
173
|
-
logger.log("Current status snapshot:", current);
|
|
174
169
|
|
|
175
170
|
// Validate basic structure
|
|
176
171
|
const validated = validateStatusSnapshot(current);
|
|
@@ -188,11 +183,9 @@ export async function writeJobStatus(jobDir, updateFn) {
|
|
|
188
183
|
);
|
|
189
184
|
|
|
190
185
|
snapshot.lastUpdated = new Date().toISOString();
|
|
191
|
-
logger.log("Status after update function:", snapshot);
|
|
192
186
|
|
|
193
187
|
// Atomic write
|
|
194
188
|
await atomicWrite(statusPath, snapshot);
|
|
195
|
-
logger.log("Status file written successfully");
|
|
196
189
|
|
|
197
190
|
// Emit SSE event for tasks-status.json change using logger
|
|
198
191
|
try {
|
|
@@ -202,7 +195,6 @@ export async function writeJobStatus(jobDir, updateFn) {
|
|
|
202
195
|
jobId,
|
|
203
196
|
};
|
|
204
197
|
await logger.sse("state:change", eventData);
|
|
205
|
-
logger.log("SSE event broadcasted successfully");
|
|
206
198
|
} catch (error) {
|
|
207
199
|
// Don't fail the write if SSE emission fails
|
|
208
200
|
logger.error("Failed to emit SSE event:", error);
|
|
@@ -218,10 +210,6 @@ export async function writeJobStatus(jobDir, updateFn) {
|
|
|
218
210
|
reason: snapshot.lifecycleBlockReason,
|
|
219
211
|
};
|
|
220
212
|
await logger.sse("lifecycle_block", lifecycleEventData);
|
|
221
|
-
logger.log(
|
|
222
|
-
"lifecycle_block SSE event broadcasted successfully",
|
|
223
|
-
lifecycleEventData
|
|
224
|
-
);
|
|
225
213
|
} catch (error) {
|
|
226
214
|
// Don't fail the write if SSE emission fails
|
|
227
215
|
logger.error("Failed to emit lifecycle_block SSE event:", error);
|
|
@@ -310,9 +298,6 @@ export async function updateTaskStatus(jobDir, taskId, taskUpdateFn) {
|
|
|
310
298
|
|
|
311
299
|
const next = prev
|
|
312
300
|
.then(async () => {
|
|
313
|
-
logger.group("Task Status Update Operation");
|
|
314
|
-
logger.log(`Updating task ${taskId} for job: ${jobId}`);
|
|
315
|
-
|
|
316
301
|
const statusPath = path.join(jobDir, "tasks-status.json");
|
|
317
302
|
|
|
318
303
|
// Read existing status or create default
|
|
@@ -336,7 +321,6 @@ export async function updateTaskStatus(jobDir, taskId, taskUpdateFn) {
|
|
|
336
321
|
|
|
337
322
|
// Atomic write
|
|
338
323
|
await atomicWrite(statusPath, validated);
|
|
339
|
-
logger.log("Task status file written successfully");
|
|
340
324
|
|
|
341
325
|
// Emit task:updated SSE event after successful write
|
|
342
326
|
try {
|
|
@@ -346,13 +330,11 @@ export async function updateTaskStatus(jobDir, taskId, taskUpdateFn) {
|
|
|
346
330
|
task: validated.tasks[taskId],
|
|
347
331
|
};
|
|
348
332
|
await logger.sse("task:updated", eventData);
|
|
349
|
-
logger.log("task:updated SSE event broadcasted successfully");
|
|
350
333
|
} catch (error) {
|
|
351
334
|
// Don't fail the write if SSE emission fails
|
|
352
335
|
logger.error("Failed to emit task:updated SSE event:", error);
|
|
353
336
|
}
|
|
354
337
|
|
|
355
|
-
logger.groupEnd();
|
|
356
338
|
resultSnapshot = validated;
|
|
357
339
|
})
|
|
358
340
|
.catch((e) => {
|
|
@@ -196,12 +196,6 @@ export async function repairTaskSymlinks(taskDir, poRoot, taskModulePath) {
|
|
|
196
196
|
const startTime = Date.now();
|
|
197
197
|
|
|
198
198
|
try {
|
|
199
|
-
logger.log("Repairing task symlinks", {
|
|
200
|
-
taskDir,
|
|
201
|
-
poRoot,
|
|
202
|
-
taskModulePath,
|
|
203
|
-
});
|
|
204
|
-
|
|
205
199
|
// Use existing ensureTaskSymlinkBridge for repairs
|
|
206
200
|
const relocatedEntry = await ensureTaskSymlinkBridge({
|
|
207
201
|
taskDir,
|
|
@@ -211,12 +205,6 @@ export async function repairTaskSymlinks(taskDir, poRoot, taskModulePath) {
|
|
|
211
205
|
|
|
212
206
|
const duration = Date.now() - startTime;
|
|
213
207
|
|
|
214
|
-
logger.log("Task symlinks repaired successfully", {
|
|
215
|
-
taskDir,
|
|
216
|
-
duration,
|
|
217
|
-
relocatedEntry,
|
|
218
|
-
});
|
|
219
|
-
|
|
220
208
|
return {
|
|
221
209
|
success: true,
|
|
222
210
|
relocatedEntry,
|
package/src/core/task-runner.js
CHANGED
|
@@ -484,10 +484,6 @@ export async function runPipeline(modulePath, initialContext = {}) {
|
|
|
484
484
|
|
|
485
485
|
// Skip stages when skipIf predicate returns true
|
|
486
486
|
if (stageConfig.skipIf && stageConfig.skipIf(context.flags)) {
|
|
487
|
-
logger.log("Skipping stage", {
|
|
488
|
-
stage: stageName,
|
|
489
|
-
reason: "skipIf predicate returned true",
|
|
490
|
-
});
|
|
491
487
|
context.logs.push({
|
|
492
488
|
stage: stageName,
|
|
493
489
|
action: "skipped",
|
|
@@ -499,7 +495,6 @@ export async function runPipeline(modulePath, initialContext = {}) {
|
|
|
499
495
|
|
|
500
496
|
// Skip if handler is not available (not implemented)
|
|
501
497
|
if (typeof stageHandler !== "function") {
|
|
502
|
-
logger.log("Stage not available, skipping", { stage: stageName });
|
|
503
498
|
logs.push({
|
|
504
499
|
stage: stageName,
|
|
505
500
|
skipped: true,
|
|
@@ -526,11 +521,6 @@ export async function runPipeline(modulePath, initialContext = {}) {
|
|
|
526
521
|
// Set current stage before execution
|
|
527
522
|
context.currentStage = stageName;
|
|
528
523
|
|
|
529
|
-
logger.log("Starting stage execution", {
|
|
530
|
-
stage: stageName,
|
|
531
|
-
taskName: context.meta.taskName,
|
|
532
|
-
});
|
|
533
|
-
|
|
534
524
|
// Write stage start status using writeJobStatus
|
|
535
525
|
if (context.meta.workDir && context.meta.taskName) {
|
|
536
526
|
try {
|
|
@@ -719,12 +709,6 @@ export async function runPipeline(modulePath, initialContext = {}) {
|
|
|
719
709
|
);
|
|
720
710
|
|
|
721
711
|
const ms = +(performance.now() - start).toFixed(2);
|
|
722
|
-
logger.log("Stage completed successfully", {
|
|
723
|
-
stage: stageName,
|
|
724
|
-
executionTimeMs: ms,
|
|
725
|
-
outputType: typeof stageResult.output,
|
|
726
|
-
flagKeys: Object.keys(stageResult.flags),
|
|
727
|
-
});
|
|
728
712
|
logs.push({
|
|
729
713
|
stage: stageName,
|
|
730
714
|
ok: true,
|
|
@@ -819,13 +803,6 @@ export async function runPipeline(modulePath, initialContext = {}) {
|
|
|
819
803
|
|
|
820
804
|
llmEvents.off("llm:request:complete", onLLMComplete);
|
|
821
805
|
|
|
822
|
-
logger.log("Pipeline completed successfully", {
|
|
823
|
-
taskName: context.meta.taskName,
|
|
824
|
-
totalStages: PIPELINE_STAGES.length,
|
|
825
|
-
executedStages: logs.filter((l) => l.ok).length,
|
|
826
|
-
llmMetricsCount: llmMetrics.length,
|
|
827
|
-
});
|
|
828
|
-
|
|
829
806
|
// Write final status with currentStage: null to indicate completion
|
|
830
807
|
if (context.meta.workDir && context.meta.taskName) {
|
|
831
808
|
try {
|
package/src/llm/index.js
CHANGED
|
@@ -11,6 +11,9 @@ import {
|
|
|
11
11
|
aliasToFunctionName,
|
|
12
12
|
} from "../config/models.js";
|
|
13
13
|
import fs from "node:fs";
|
|
14
|
+
import { createLogger } from "../core/logger.js";
|
|
15
|
+
|
|
16
|
+
const logger = createLogger("LLM");
|
|
14
17
|
|
|
15
18
|
// Global mock provider instance (for demo/testing)
|
|
16
19
|
let mockProviderInstance = null;
|
|
@@ -103,7 +106,7 @@ function shouldInferJsonFormat(messages) {
|
|
|
103
106
|
|
|
104
107
|
// Core chat function - no metrics handling needed!
|
|
105
108
|
export async function chat(options) {
|
|
106
|
-
|
|
109
|
+
logger.log("chat() called with options:", {
|
|
107
110
|
provider: options.provider,
|
|
108
111
|
model: options.model,
|
|
109
112
|
messageCount: options.messages?.length || 0,
|
|
@@ -133,11 +136,11 @@ export async function chat(options) {
|
|
|
133
136
|
|
|
134
137
|
const available = getAvailableProviders();
|
|
135
138
|
|
|
136
|
-
|
|
137
|
-
|
|
139
|
+
logger.log("Available providers:", available);
|
|
140
|
+
logger.log("Requested provider:", provider);
|
|
138
141
|
|
|
139
142
|
if (!available[provider]) {
|
|
140
|
-
|
|
143
|
+
logger.error("Provider not available:", provider);
|
|
141
144
|
throw new Error(`Provider ${provider} not available. Check API keys.`);
|
|
142
145
|
}
|
|
143
146
|
|
|
@@ -149,7 +152,7 @@ export async function chat(options) {
|
|
|
149
152
|
const userMessages = messages.filter((m) => m.role === "user");
|
|
150
153
|
const userMsg = userMessages.map((m) => m.content).join("\n");
|
|
151
154
|
|
|
152
|
-
|
|
155
|
+
logger.log("Message analysis:", {
|
|
153
156
|
hasSystemMessage: !!systemMsg,
|
|
154
157
|
systemMessageLength: systemMsg.length,
|
|
155
158
|
userMessageCount: userMessages.length,
|
|
@@ -163,10 +166,7 @@ export async function chat(options) {
|
|
|
163
166
|
JSON.stringify({ messages, systemMsg, userMsg, provider, model }, null, 2)
|
|
164
167
|
);
|
|
165
168
|
|
|
166
|
-
|
|
167
|
-
"[llm] Emitting llm:request:start event for requestId:",
|
|
168
|
-
requestId
|
|
169
|
-
);
|
|
169
|
+
logger.log("Emitting llm:request:start event for requestId:", requestId);
|
|
170
170
|
|
|
171
171
|
// Emit request start event
|
|
172
172
|
llmEvents.emit("llm:request:start", {
|
|
@@ -178,12 +178,12 @@ export async function chat(options) {
|
|
|
178
178
|
});
|
|
179
179
|
|
|
180
180
|
try {
|
|
181
|
-
|
|
181
|
+
logger.log("Starting provider call for:", provider);
|
|
182
182
|
let response;
|
|
183
183
|
let usage;
|
|
184
184
|
|
|
185
185
|
if (provider === "mock") {
|
|
186
|
-
|
|
186
|
+
logger.log("Using mock provider");
|
|
187
187
|
if (!mockProviderInstance) {
|
|
188
188
|
throw new Error(
|
|
189
189
|
"Mock provider not registered. Call registerMockProvider() first."
|
|
@@ -197,7 +197,7 @@ export async function chat(options) {
|
|
|
197
197
|
maxTokens,
|
|
198
198
|
...rest,
|
|
199
199
|
});
|
|
200
|
-
|
|
200
|
+
logger.log("Mock provider returned result");
|
|
201
201
|
|
|
202
202
|
response = {
|
|
203
203
|
content: result.content,
|
|
@@ -210,7 +210,7 @@ export async function chat(options) {
|
|
|
210
210
|
totalTokens: result.usage.total_tokens,
|
|
211
211
|
};
|
|
212
212
|
} else if (provider === "openai") {
|
|
213
|
-
|
|
213
|
+
logger.log("Using OpenAI provider");
|
|
214
214
|
|
|
215
215
|
// Infer JSON format if not explicitly provided
|
|
216
216
|
const effectiveResponseFormat =
|
|
@@ -229,7 +229,7 @@ export async function chat(options) {
|
|
|
229
229
|
maxTokens,
|
|
230
230
|
...rest,
|
|
231
231
|
};
|
|
232
|
-
|
|
232
|
+
logger.log("OpenAI call parameters:", {
|
|
233
233
|
model: openaiArgs.model,
|
|
234
234
|
hasMessages: !!openaiArgs.messages,
|
|
235
235
|
messageCount: openaiArgs.messages?.length,
|
|
@@ -244,9 +244,9 @@ export async function chat(options) {
|
|
|
244
244
|
openaiArgs.presencePenalty = presencePenalty;
|
|
245
245
|
if (stop !== undefined) openaiArgs.stop = stop;
|
|
246
246
|
|
|
247
|
-
|
|
247
|
+
logger.log("Calling openaiChat()...");
|
|
248
248
|
const result = await openaiChat(openaiArgs);
|
|
249
|
-
|
|
249
|
+
logger.log("openaiChat() returned:", {
|
|
250
250
|
hasResult: !!result,
|
|
251
251
|
hasContent: !!result?.content,
|
|
252
252
|
hasUsage: !!result?.usage,
|
|
@@ -277,7 +277,7 @@ export async function chat(options) {
|
|
|
277
277
|
};
|
|
278
278
|
}
|
|
279
279
|
} else if (provider === "deepseek") {
|
|
280
|
-
|
|
280
|
+
logger.log("Using DeepSeek provider");
|
|
281
281
|
|
|
282
282
|
// Infer JSON format if not explicitly provided
|
|
283
283
|
const effectiveResponseFormat =
|
|
@@ -296,7 +296,7 @@ export async function chat(options) {
|
|
|
296
296
|
maxTokens,
|
|
297
297
|
...rest,
|
|
298
298
|
};
|
|
299
|
-
|
|
299
|
+
logger.log("DeepSeek call parameters:", {
|
|
300
300
|
model: deepseekArgs.model,
|
|
301
301
|
hasMessages: !!deepseekArgs.messages,
|
|
302
302
|
messageCount: deepseekArgs.messages?.length,
|
|
@@ -312,9 +312,9 @@ export async function chat(options) {
|
|
|
312
312
|
deepseekArgs.responseFormat = effectiveResponseFormat;
|
|
313
313
|
}
|
|
314
314
|
|
|
315
|
-
|
|
315
|
+
logger.log("Calling deepseekChat()...");
|
|
316
316
|
const result = await deepseekChat(deepseekArgs);
|
|
317
|
-
|
|
317
|
+
logger.log("deepseekChat() returned:", {
|
|
318
318
|
hasResult: !!result,
|
|
319
319
|
isStream: typeof result?.[Symbol.asyncIterator] !== "undefined",
|
|
320
320
|
hasContent: !!result?.content,
|
|
@@ -350,7 +350,7 @@ export async function chat(options) {
|
|
|
350
350
|
};
|
|
351
351
|
}
|
|
352
352
|
} else if (provider === "anthropic") {
|
|
353
|
-
|
|
353
|
+
logger.log("Using Anthropic provider");
|
|
354
354
|
const defaultAlias = DEFAULT_MODEL_BY_PROVIDER.anthropic;
|
|
355
355
|
const defaultModelConfig = MODEL_CONFIG[defaultAlias];
|
|
356
356
|
const defaultModel = defaultModelConfig?.model;
|
|
@@ -362,7 +362,7 @@ export async function chat(options) {
|
|
|
362
362
|
maxTokens,
|
|
363
363
|
...rest,
|
|
364
364
|
};
|
|
365
|
-
|
|
365
|
+
logger.log("Anthropic call parameters:", {
|
|
366
366
|
model: anthropicArgs.model,
|
|
367
367
|
hasMessages: !!anthropicArgs.messages,
|
|
368
368
|
messageCount: anthropicArgs.messages?.length,
|
|
@@ -373,9 +373,9 @@ export async function chat(options) {
|
|
|
373
373
|
anthropicArgs.responseFormat = responseFormat;
|
|
374
374
|
}
|
|
375
375
|
|
|
376
|
-
|
|
376
|
+
logger.log("Calling anthropicChat()...");
|
|
377
377
|
const result = await anthropicChat(anthropicArgs);
|
|
378
|
-
|
|
378
|
+
logger.log("anthropicChat() returned:", {
|
|
379
379
|
hasResult: !!result,
|
|
380
380
|
hasContent: !!result?.content,
|
|
381
381
|
hasUsage: !!result?.usage,
|
|
@@ -406,7 +406,7 @@ export async function chat(options) {
|
|
|
406
406
|
};
|
|
407
407
|
}
|
|
408
408
|
} else if (provider === "gemini") {
|
|
409
|
-
|
|
409
|
+
logger.log("Using Gemini provider");
|
|
410
410
|
|
|
411
411
|
// Infer JSON format if not explicitly provided
|
|
412
412
|
const effectiveResponseFormat =
|
|
@@ -425,7 +425,7 @@ export async function chat(options) {
|
|
|
425
425
|
maxTokens,
|
|
426
426
|
...rest,
|
|
427
427
|
};
|
|
428
|
-
|
|
428
|
+
logger.log("Gemini call parameters:", {
|
|
429
429
|
model: geminiArgs.model,
|
|
430
430
|
hasMessages: !!geminiArgs.messages,
|
|
431
431
|
messageCount: geminiArgs.messages?.length,
|
|
@@ -436,9 +436,9 @@ export async function chat(options) {
|
|
|
436
436
|
geminiArgs.responseFormat = effectiveResponseFormat;
|
|
437
437
|
}
|
|
438
438
|
|
|
439
|
-
|
|
439
|
+
logger.log("Calling geminiChat()...");
|
|
440
440
|
const result = await geminiChat(geminiArgs);
|
|
441
|
-
|
|
441
|
+
logger.log("geminiChat() returned:", {
|
|
442
442
|
hasResult: !!result,
|
|
443
443
|
hasContent: !!result?.content,
|
|
444
444
|
hasUsage: !!result?.usage,
|
|
@@ -469,7 +469,7 @@ export async function chat(options) {
|
|
|
469
469
|
};
|
|
470
470
|
}
|
|
471
471
|
} else if (provider === "zhipu") {
|
|
472
|
-
|
|
472
|
+
logger.log("Using Zhipu provider");
|
|
473
473
|
const defaultAlias = DEFAULT_MODEL_BY_PROVIDER.zhipu;
|
|
474
474
|
const defaultModelConfig = MODEL_CONFIG[defaultAlias];
|
|
475
475
|
const defaultModel = defaultModelConfig?.model;
|
|
@@ -481,7 +481,7 @@ export async function chat(options) {
|
|
|
481
481
|
maxTokens,
|
|
482
482
|
...rest,
|
|
483
483
|
};
|
|
484
|
-
|
|
484
|
+
logger.log("Zhipu call parameters:", {
|
|
485
485
|
model: zhipuArgs.model,
|
|
486
486
|
hasMessages: !!zhipuArgs.messages,
|
|
487
487
|
messageCount: zhipuArgs.messages?.length,
|
|
@@ -492,9 +492,9 @@ export async function chat(options) {
|
|
|
492
492
|
zhipuArgs.responseFormat = responseFormat;
|
|
493
493
|
}
|
|
494
494
|
|
|
495
|
-
|
|
495
|
+
logger.log("Calling zhipuChat()...");
|
|
496
496
|
const result = await zhipuChat(zhipuArgs);
|
|
497
|
-
|
|
497
|
+
logger.log("zhipuChat() returned:", {
|
|
498
498
|
hasResult: !!result,
|
|
499
499
|
hasContent: !!result?.content,
|
|
500
500
|
hasUsage: !!result?.usage,
|
|
@@ -525,16 +525,16 @@ export async function chat(options) {
|
|
|
525
525
|
};
|
|
526
526
|
}
|
|
527
527
|
} else {
|
|
528
|
-
|
|
528
|
+
logger.error("Unknown provider:", provider);
|
|
529
529
|
throw new Error(`Provider ${provider} not yet implemented`);
|
|
530
530
|
}
|
|
531
531
|
|
|
532
|
-
|
|
532
|
+
logger.log("Processing response from provider:", provider);
|
|
533
533
|
|
|
534
534
|
const duration = Date.now() - startTime;
|
|
535
535
|
const cost = calculateCost(provider, model, usage);
|
|
536
536
|
|
|
537
|
-
|
|
537
|
+
logger.log("Request completed:", {
|
|
538
538
|
duration: `${duration}ms`,
|
|
539
539
|
cost,
|
|
540
540
|
usage,
|
|
@@ -560,7 +560,7 @@ export async function chat(options) {
|
|
|
560
560
|
} catch (error) {
|
|
561
561
|
const duration = Date.now() - startTime;
|
|
562
562
|
|
|
563
|
-
|
|
563
|
+
logger.error("Error in chat():", {
|
|
564
564
|
error: error.message,
|
|
565
565
|
name: error.name,
|
|
566
566
|
stack: error.stack,
|
|
@@ -7,6 +7,9 @@ import {
|
|
|
7
7
|
ensureJsonResponseFormat,
|
|
8
8
|
ProviderJsonParseError,
|
|
9
9
|
} from "./base.js";
|
|
10
|
+
import { createLogger } from "../core/logger.js";
|
|
11
|
+
|
|
12
|
+
const logger = createLogger("Anthropic");
|
|
10
13
|
|
|
11
14
|
export async function anthropicChat({
|
|
12
15
|
messages,
|
|
@@ -18,16 +21,16 @@ export async function anthropicChat({
|
|
|
18
21
|
stop,
|
|
19
22
|
maxRetries = 3,
|
|
20
23
|
}) {
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
+
logger.log("\nStarting anthropicChat call");
|
|
25
|
+
logger.log("Model:", model);
|
|
26
|
+
logger.log("Response format:", responseFormat);
|
|
24
27
|
|
|
25
28
|
// Enforce JSON mode - reject calls without proper JSON responseFormat
|
|
26
29
|
ensureJsonResponseFormat(responseFormat, "Anthropic");
|
|
27
30
|
|
|
28
31
|
const { systemMsg, userMsg } = extractMessages(messages);
|
|
29
|
-
|
|
30
|
-
|
|
32
|
+
logger.log("System message length:", systemMsg.length);
|
|
33
|
+
logger.log("User message length:", userMsg.length);
|
|
31
34
|
|
|
32
35
|
// Build system guard for JSON enforcement
|
|
33
36
|
let system = systemMsg;
|
|
@@ -43,7 +46,7 @@ export async function anthropicChat({
|
|
|
43
46
|
}
|
|
44
47
|
|
|
45
48
|
try {
|
|
46
|
-
|
|
49
|
+
logger.log(`Attempt ${attempt + 1}/${maxRetries + 1}`);
|
|
47
50
|
|
|
48
51
|
const requestBody = {
|
|
49
52
|
model,
|
|
@@ -55,7 +58,7 @@ export async function anthropicChat({
|
|
|
55
58
|
...(stop !== undefined ? { stop_sequences: stop } : {}),
|
|
56
59
|
};
|
|
57
60
|
|
|
58
|
-
|
|
61
|
+
logger.log("Calling Anthropic API...");
|
|
59
62
|
const response = await fetch("https://api.anthropic.com/v1/messages", {
|
|
60
63
|
method: "POST",
|
|
61
64
|
headers: {
|
|
@@ -74,7 +77,7 @@ export async function anthropicChat({
|
|
|
74
77
|
}
|
|
75
78
|
|
|
76
79
|
const data = await response.json();
|
|
77
|
-
|
|
80
|
+
logger.log("Response received from Anthropic API");
|
|
78
81
|
|
|
79
82
|
// Extract text from response.content blocks
|
|
80
83
|
const blocks = Array.isArray(data?.content) ? data.content : [];
|
|
@@ -84,7 +87,7 @@ export async function anthropicChat({
|
|
|
84
87
|
.join("");
|
|
85
88
|
// Always strip markdown fences first to prevent parse failures
|
|
86
89
|
const text = stripMarkdownFences(rawText);
|
|
87
|
-
|
|
90
|
+
logger.log("Response text length:", text.length);
|
|
88
91
|
|
|
89
92
|
// Parse JSON - this is required for all calls
|
|
90
93
|
const parsed = tryParseJSON(text);
|
|
@@ -106,7 +109,7 @@ export async function anthropicChat({
|
|
|
106
109
|
? { prompt_tokens, completion_tokens, total_tokens }
|
|
107
110
|
: undefined;
|
|
108
111
|
|
|
109
|
-
|
|
112
|
+
logger.log("Returning response from Anthropic API");
|
|
110
113
|
return {
|
|
111
114
|
content: parsed,
|
|
112
115
|
text,
|
|
@@ -116,13 +119,13 @@ export async function anthropicChat({
|
|
|
116
119
|
} catch (error) {
|
|
117
120
|
lastError = error;
|
|
118
121
|
const msg = error?.error?.message || error?.message || "";
|
|
119
|
-
|
|
120
|
-
|
|
122
|
+
logger.error("Error occurred:", msg);
|
|
123
|
+
logger.error("Error status:", error?.status);
|
|
121
124
|
|
|
122
125
|
if (error.status === 401) throw error;
|
|
123
126
|
|
|
124
127
|
if (isRetryableError(error) && attempt < maxRetries) {
|
|
125
|
-
|
|
128
|
+
logger.log("Retrying due to retryable error");
|
|
126
129
|
continue;
|
|
127
130
|
}
|
|
128
131
|
|
|
@@ -7,6 +7,9 @@ import {
|
|
|
7
7
|
ensureJsonResponseFormat,
|
|
8
8
|
ProviderJsonParseError,
|
|
9
9
|
} from "./base.js";
|
|
10
|
+
import { createLogger } from "../core/logger.js";
|
|
11
|
+
|
|
12
|
+
const logger = createLogger("DeepSeek");
|
|
10
13
|
|
|
11
14
|
export async function deepseekChat({
|
|
12
15
|
messages,
|
|
@@ -163,7 +166,7 @@ async function* createStreamGenerator(stream) {
|
|
|
163
166
|
}
|
|
164
167
|
} catch (e) {
|
|
165
168
|
// Skip malformed JSON
|
|
166
|
-
|
|
169
|
+
logger.warn("Failed to parse stream chunk:", e);
|
|
167
170
|
}
|
|
168
171
|
}
|
|
169
172
|
}
|
package/src/providers/gemini.js
CHANGED
|
@@ -7,6 +7,9 @@ import {
|
|
|
7
7
|
ensureJsonResponseFormat,
|
|
8
8
|
ProviderJsonParseError,
|
|
9
9
|
} from "./base.js";
|
|
10
|
+
import { createLogger } from "../core/logger.js";
|
|
11
|
+
|
|
12
|
+
const logger = createLogger("Gemini");
|
|
10
13
|
|
|
11
14
|
/**
|
|
12
15
|
* Google Gemini provider implementation
|
|
@@ -120,15 +123,13 @@ export async function geminiChat(options) {
|
|
|
120
123
|
}
|
|
121
124
|
|
|
122
125
|
try {
|
|
123
|
-
|
|
124
|
-
`
|
|
125
|
-
);
|
|
126
|
-
console.log(`[Gemini] Model: ${model}`);
|
|
127
|
-
console.log(`[Gemini] Response format:`, responseFormat);
|
|
128
|
-
console.log(
|
|
129
|
-
`[Gemini] System instruction length: ${systemInstruction.length}`
|
|
126
|
+
logger.log(
|
|
127
|
+
`Starting geminiChat call (attempt ${attempt + 1}/${maxRetries + 1})`
|
|
130
128
|
);
|
|
131
|
-
|
|
129
|
+
logger.log(`Model: ${model}`);
|
|
130
|
+
logger.log(`Response format:`, responseFormat);
|
|
131
|
+
logger.log(`System instruction length: ${systemInstruction.length}`);
|
|
132
|
+
logger.log(`User message length: ${userMsg.length}`);
|
|
132
133
|
|
|
133
134
|
const response = await fetch(url, {
|
|
134
135
|
method: "POST",
|
|
@@ -153,7 +154,7 @@ export async function geminiChat(options) {
|
|
|
153
154
|
|
|
154
155
|
// Retry on retryable errors
|
|
155
156
|
if (isRetryableError(error) && attempt < maxRetries) {
|
|
156
|
-
|
|
157
|
+
logger.log(`Retryable error, retrying...`);
|
|
157
158
|
lastError = error;
|
|
158
159
|
continue;
|
|
159
160
|
}
|
|
@@ -162,8 +163,8 @@ export async function geminiChat(options) {
|
|
|
162
163
|
}
|
|
163
164
|
|
|
164
165
|
const data = await response.json();
|
|
165
|
-
|
|
166
|
-
`
|
|
166
|
+
logger.log(
|
|
167
|
+
`Response received, candidates length: ${data.candidates?.length || 0}`
|
|
167
168
|
);
|
|
168
169
|
|
|
169
170
|
// Extract text from response
|
|
@@ -175,7 +176,7 @@ export async function geminiChat(options) {
|
|
|
175
176
|
const rawText = candidate.content.parts[0].text;
|
|
176
177
|
// Always strip markdown fences first to prevent parse failures
|
|
177
178
|
const text = stripMarkdownFences(rawText);
|
|
178
|
-
|
|
179
|
+
logger.log(`Text length: ${text.length}`);
|
|
179
180
|
|
|
180
181
|
// Parse JSON if required
|
|
181
182
|
const parsed = tryParseJSON(text);
|
|
@@ -197,7 +198,7 @@ export async function geminiChat(options) {
|
|
|
197
198
|
}
|
|
198
199
|
: undefined;
|
|
199
200
|
|
|
200
|
-
|
|
201
|
+
logger.log(`Usage:`, usage);
|
|
201
202
|
|
|
202
203
|
return {
|
|
203
204
|
content: parsed || text,
|
|
@@ -206,8 +207,8 @@ export async function geminiChat(options) {
|
|
|
206
207
|
raw: data,
|
|
207
208
|
};
|
|
208
209
|
} catch (error) {
|
|
209
|
-
|
|
210
|
-
|
|
210
|
+
logger.error(`Error occurred: ${error.message}`);
|
|
211
|
+
logger.error(`Error status: ${error.status}`);
|
|
211
212
|
|
|
212
213
|
lastError = error;
|
|
213
214
|
|
package/src/providers/openai.js
CHANGED
|
@@ -8,6 +8,9 @@ import {
|
|
|
8
8
|
ensureJsonResponseFormat,
|
|
9
9
|
ProviderJsonParseError,
|
|
10
10
|
} from "./base.js";
|
|
11
|
+
import { createLogger } from "../core/logger.js";
|
|
12
|
+
|
|
13
|
+
const logger = createLogger("OpenAI");
|
|
11
14
|
|
|
12
15
|
let client = null;
|
|
13
16
|
|
|
@@ -43,16 +46,16 @@ export async function openaiChat({
|
|
|
43
46
|
maxRetries = 3,
|
|
44
47
|
...rest
|
|
45
48
|
}) {
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
+
logger.log("\nStarting openaiChat call");
|
|
50
|
+
logger.log("Model:", model);
|
|
51
|
+
logger.log("Response format:", responseFormat);
|
|
49
52
|
|
|
50
53
|
const openai = getClient();
|
|
51
54
|
if (!openai) throw new Error("OpenAI API key not configured");
|
|
52
55
|
|
|
53
56
|
const { systemMsg, userMsg } = extractMessages(messages);
|
|
54
|
-
|
|
55
|
-
|
|
57
|
+
logger.log("System message length:", systemMsg.length);
|
|
58
|
+
logger.log("User message length:", userMsg.length);
|
|
56
59
|
|
|
57
60
|
// Determine if JSON mode is requested (handle both object and string formats)
|
|
58
61
|
const isJsonMode =
|
|
@@ -68,11 +71,11 @@ export async function openaiChat({
|
|
|
68
71
|
const useResponsesAPI = /^gpt-5/i.test(model);
|
|
69
72
|
|
|
70
73
|
try {
|
|
71
|
-
|
|
74
|
+
logger.log(`Attempt ${attempt + 1}/${maxRetries + 1}`);
|
|
72
75
|
|
|
73
76
|
// ---------- RESPONSES API path (GPT-5 models) ----------
|
|
74
77
|
if (useResponsesAPI) {
|
|
75
|
-
|
|
78
|
+
logger.log("Using Responses API for GPT-5 model");
|
|
76
79
|
const responsesReq = {
|
|
77
80
|
model,
|
|
78
81
|
instructions: systemMsg,
|
|
@@ -100,12 +103,12 @@ export async function openaiChat({
|
|
|
100
103
|
responsesReq.text = { format: { type: "json_object" } };
|
|
101
104
|
}
|
|
102
105
|
|
|
103
|
-
|
|
106
|
+
logger.log("Calling responses.create...");
|
|
104
107
|
const resp = await openai.responses.create(responsesReq);
|
|
105
108
|
const rawText = resp.output_text ?? "";
|
|
106
109
|
// Always strip markdown fences first to prevent parse failures
|
|
107
110
|
const text = stripMarkdownFences(rawText);
|
|
108
|
-
|
|
111
|
+
logger.log("Response received, text length:", text.length);
|
|
109
112
|
|
|
110
113
|
// Approximate usage (tests don't assert exact values)
|
|
111
114
|
const promptTokens = Math.ceil((systemMsg + userMsg).length / 4);
|
|
@@ -127,20 +130,16 @@ export async function openaiChat({
|
|
|
127
130
|
"Failed to parse JSON response from Responses API"
|
|
128
131
|
);
|
|
129
132
|
}
|
|
130
|
-
|
|
131
|
-
"[OpenAI] Returning response from Responses API (JSON mode)"
|
|
132
|
-
);
|
|
133
|
+
logger.log("Returning response from Responses API (JSON mode)");
|
|
133
134
|
return { content: parsed, text, usage, raw: resp };
|
|
134
135
|
}
|
|
135
136
|
|
|
136
|
-
|
|
137
|
-
"[OpenAI] Returning response from Responses API (text mode)"
|
|
138
|
-
);
|
|
137
|
+
logger.log("Returning response from Responses API (text mode)");
|
|
139
138
|
return { content: text, text, usage, raw: resp };
|
|
140
139
|
}
|
|
141
140
|
|
|
142
141
|
// ---------- CLASSIC CHAT COMPLETIONS path (non-GPT-5) ----------
|
|
143
|
-
|
|
142
|
+
logger.log("Using Classic Chat Completions API");
|
|
144
143
|
const classicReq = {
|
|
145
144
|
model,
|
|
146
145
|
messages,
|
|
@@ -163,15 +162,12 @@ export async function openaiChat({
|
|
|
163
162
|
classicReq.response_format = { type: "json_object" };
|
|
164
163
|
}
|
|
165
164
|
|
|
166
|
-
|
|
165
|
+
logger.log("Calling chat.completions.create...");
|
|
167
166
|
const classicRes = await openai.chat.completions.create(classicReq);
|
|
168
167
|
const rawClassicText = classicRes?.choices?.[0]?.message?.content ?? "";
|
|
169
168
|
// Always strip markdown fences first to prevent parse failures
|
|
170
169
|
const classicText = stripMarkdownFences(rawClassicText);
|
|
171
|
-
|
|
172
|
-
"[OpenAI] Response received, text length:",
|
|
173
|
-
classicText.length
|
|
174
|
-
);
|
|
170
|
+
logger.log("Response received, text length:", classicText.length);
|
|
175
171
|
|
|
176
172
|
// Parse JSON only in JSON mode; return raw string for text mode
|
|
177
173
|
if (isJsonMode) {
|
|
@@ -201,16 +197,16 @@ export async function openaiChat({
|
|
|
201
197
|
} catch (error) {
|
|
202
198
|
lastError = error;
|
|
203
199
|
const msg = error?.error?.message || error?.message || "";
|
|
204
|
-
|
|
205
|
-
|
|
200
|
+
logger.error("Error occurred:", msg);
|
|
201
|
+
logger.error("Error status:", error?.status);
|
|
206
202
|
|
|
207
203
|
// Only fall back when RESPONSES path failed due to lack of support
|
|
208
204
|
if (
|
|
209
205
|
useResponsesAPI &&
|
|
210
206
|
(/not supported/i.test(msg) || /unsupported/i.test(msg))
|
|
211
207
|
) {
|
|
212
|
-
|
|
213
|
-
"
|
|
208
|
+
logger.log(
|
|
209
|
+
"Falling back to Classic API due to unsupported Responses API"
|
|
214
210
|
);
|
|
215
211
|
const classicReq = {
|
|
216
212
|
model,
|
package/src/providers/zhipu.js
CHANGED
|
@@ -7,6 +7,9 @@ import {
|
|
|
7
7
|
ensureJsonResponseFormat,
|
|
8
8
|
ProviderJsonParseError,
|
|
9
9
|
} from "./base.js";
|
|
10
|
+
import { createLogger } from "../core/logger.js";
|
|
11
|
+
|
|
12
|
+
const logger = createLogger("Zhipu");
|
|
10
13
|
|
|
11
14
|
export async function zhipuChat({
|
|
12
15
|
messages,
|
|
@@ -18,9 +21,9 @@ export async function zhipuChat({
|
|
|
18
21
|
stop,
|
|
19
22
|
maxRetries = 3,
|
|
20
23
|
}) {
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
+
logger.log("\nStarting zhipuChat call");
|
|
25
|
+
logger.log("Model:", model);
|
|
26
|
+
logger.log("Response format:", responseFormat);
|
|
24
27
|
|
|
25
28
|
// Enforce JSON mode - reject calls without proper JSON responseFormat
|
|
26
29
|
ensureJsonResponseFormat(responseFormat, "Zhipu");
|
|
@@ -30,8 +33,8 @@ export async function zhipuChat({
|
|
|
30
33
|
}
|
|
31
34
|
|
|
32
35
|
const { systemMsg, userMsg } = extractMessages(messages);
|
|
33
|
-
|
|
34
|
-
|
|
36
|
+
logger.log("System message length:", systemMsg.length);
|
|
37
|
+
logger.log("User message length:", userMsg.length);
|
|
35
38
|
|
|
36
39
|
// Build system guard for JSON enforcement
|
|
37
40
|
let system = systemMsg;
|
|
@@ -51,7 +54,7 @@ export async function zhipuChat({
|
|
|
51
54
|
}
|
|
52
55
|
|
|
53
56
|
try {
|
|
54
|
-
|
|
57
|
+
logger.log(`Attempt ${attempt + 1}/${maxRetries + 1}`);
|
|
55
58
|
|
|
56
59
|
const requestBody = {
|
|
57
60
|
model,
|
|
@@ -65,7 +68,7 @@ export async function zhipuChat({
|
|
|
65
68
|
...(stop !== undefined ? { stop: stop } : {}),
|
|
66
69
|
};
|
|
67
70
|
|
|
68
|
-
|
|
71
|
+
logger.log("Calling Zhipu API...");
|
|
69
72
|
const response = await fetch(
|
|
70
73
|
"https://api.z.ai/api/paas/v4/chat/completions",
|
|
71
74
|
{
|
|
@@ -102,13 +105,13 @@ export async function zhipuChat({
|
|
|
102
105
|
}
|
|
103
106
|
|
|
104
107
|
const data = await response.json();
|
|
105
|
-
|
|
108
|
+
logger.log("Response received from Zhipu API");
|
|
106
109
|
|
|
107
110
|
// Extract text from response
|
|
108
111
|
const rawText = data?.choices?.[0]?.message?.content || "";
|
|
109
112
|
// Always strip markdown fences first to prevent parse failures
|
|
110
113
|
const text = stripMarkdownFences(rawText);
|
|
111
|
-
|
|
114
|
+
logger.log("Response text length:", text.length);
|
|
112
115
|
|
|
113
116
|
// Parse JSON - this is required for all calls
|
|
114
117
|
const parsed = tryParseJSON(text);
|
|
@@ -130,7 +133,7 @@ export async function zhipuChat({
|
|
|
130
133
|
? { prompt_tokens, completion_tokens, total_tokens }
|
|
131
134
|
: undefined;
|
|
132
135
|
|
|
133
|
-
|
|
136
|
+
logger.log("Returning response from Zhipu API");
|
|
134
137
|
return {
|
|
135
138
|
content: parsed,
|
|
136
139
|
text,
|
|
@@ -140,13 +143,13 @@ export async function zhipuChat({
|
|
|
140
143
|
} catch (error) {
|
|
141
144
|
lastError = error;
|
|
142
145
|
const msg = error?.message || error?.toString() || "Unknown error";
|
|
143
|
-
|
|
144
|
-
|
|
146
|
+
logger.error("Error occurred:", msg);
|
|
147
|
+
logger.error("Error status:", error?.status);
|
|
145
148
|
|
|
146
149
|
if (error.status === 401) throw error;
|
|
147
150
|
|
|
148
151
|
if (isRetryableError(error) && attempt < maxRetries) {
|
|
149
|
-
|
|
152
|
+
logger.log("Retrying due to retryable error");
|
|
150
153
|
continue;
|
|
151
154
|
}
|
|
152
155
|
|