elasticdash-test 0.1.13 → 0.1.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +36 -5
- package/dist/cli.js +0 -0
- package/dist/dashboard-server.d.ts +9 -0
- package/dist/dashboard-server.d.ts.map +1 -1
- package/dist/dashboard-server.js +1984 -17
- package/dist/dashboard-server.js.map +1 -1
- package/dist/html/dashboard.html +161 -11
- package/dist/index.cjs +828 -108
- package/dist/index.d.ts +3 -2
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +2 -2
- package/dist/index.js.map +1 -1
- package/dist/interceptors/telemetry-push.d.ts +47 -0
- package/dist/interceptors/telemetry-push.d.ts.map +1 -1
- package/dist/interceptors/telemetry-push.js +139 -6
- package/dist/interceptors/telemetry-push.js.map +1 -1
- package/dist/interceptors/tool.d.ts.map +1 -1
- package/dist/interceptors/tool.js +2 -1
- package/dist/interceptors/tool.js.map +1 -1
- package/dist/interceptors/workflow-ai.d.ts.map +1 -1
- package/dist/interceptors/workflow-ai.js +28 -4
- package/dist/interceptors/workflow-ai.js.map +1 -1
- package/dist/internals/mock-resolver.d.ts +42 -5
- package/dist/internals/mock-resolver.d.ts.map +1 -1
- package/dist/internals/mock-resolver.js +124 -5
- package/dist/internals/mock-resolver.js.map +1 -1
- package/dist/workflow-runner-worker.js +8 -2
- package/dist/workflow-runner-worker.js.map +1 -1
- package/package.json +3 -2
- package/src/dashboard-server.ts +86 -17
- package/src/html/dashboard.html +161 -11
- package/src/index.ts +3 -2
- package/src/interceptors/telemetry-push.ts +158 -7
- package/src/interceptors/tool.ts +2 -1
- package/src/interceptors/workflow-ai.ts +30 -4
- package/src/internals/mock-resolver.ts +131 -5
- package/src/workflow-runner-worker.ts +23 -2
package/dist/index.cjs
CHANGED
|
@@ -47,9 +47,21 @@ var init_recorder = __esm({
|
|
|
47
47
|
events = [];
|
|
48
48
|
_counter = 0;
|
|
49
49
|
_sideEffectCounter = 0;
|
|
50
|
+
_pending = /* @__PURE__ */ new Set();
|
|
50
51
|
record(event) {
|
|
51
52
|
this.events.push(event);
|
|
52
53
|
}
|
|
54
|
+
/** Register an in-flight async recording promise so flush() can await it. */
|
|
55
|
+
trackAsync(promise) {
|
|
56
|
+
this._pending.add(promise);
|
|
57
|
+
promise.finally(() => {
|
|
58
|
+
this._pending.delete(promise);
|
|
59
|
+
});
|
|
60
|
+
}
|
|
61
|
+
/** Await all in-flight async recordings. No-op when none are pending. */
|
|
62
|
+
async flush() {
|
|
63
|
+
await Promise.allSettled([...this._pending]);
|
|
64
|
+
}
|
|
53
65
|
nextId() {
|
|
54
66
|
return ++this._counter;
|
|
55
67
|
}
|
|
@@ -173,7 +185,7 @@ function createTraceHandle() {
|
|
|
173
185
|
steps.push({
|
|
174
186
|
type: "llm",
|
|
175
187
|
timestamp: rawDateNow(),
|
|
176
|
-
durationMs: 0,
|
|
188
|
+
durationMs: step.durationMs ?? 0,
|
|
177
189
|
data: step
|
|
178
190
|
});
|
|
179
191
|
},
|
|
@@ -182,7 +194,7 @@ function createTraceHandle() {
|
|
|
182
194
|
steps.push({
|
|
183
195
|
type: "tool",
|
|
184
196
|
timestamp: rawDateNow(),
|
|
185
|
-
durationMs: 0,
|
|
197
|
+
durationMs: call.durationMs ?? 0,
|
|
186
198
|
data: call
|
|
187
199
|
});
|
|
188
200
|
},
|
|
@@ -227,7 +239,7 @@ __export(tracing_exports, {
|
|
|
227
239
|
function wrapperRecordingActive() {
|
|
228
240
|
return globalThis[TOOL_WRAPPER_ACTIVE_KEY] === true;
|
|
229
241
|
}
|
|
230
|
-
function recordToolCall(name, args, result) {
|
|
242
|
+
function recordToolCall(name, args, result, durationMs = 0) {
|
|
231
243
|
if (!globalThis.__ELASTICDASH_WORKER__) return;
|
|
232
244
|
try {
|
|
233
245
|
if (wrapperRecordingActive()) return;
|
|
@@ -235,7 +247,7 @@ function recordToolCall(name, args, result) {
|
|
|
235
247
|
if (!trace || typeof trace.recordToolCall !== "function") return;
|
|
236
248
|
const ctx = getCaptureContext();
|
|
237
249
|
if (!ctx) {
|
|
238
|
-
trace.recordToolCall({ name, args, result });
|
|
250
|
+
trace.recordToolCall({ name, args, result, durationMs });
|
|
239
251
|
return;
|
|
240
252
|
}
|
|
241
253
|
const { recorder, replay } = ctx;
|
|
@@ -255,9 +267,9 @@ function recordToolCall(name, args, result) {
|
|
|
255
267
|
input: args,
|
|
256
268
|
output,
|
|
257
269
|
timestamp: rawDateNow(),
|
|
258
|
-
durationMs
|
|
270
|
+
durationMs
|
|
259
271
|
});
|
|
260
|
-
trace.recordToolCall({ name, args, result: output, workflowEventId: id });
|
|
272
|
+
trace.recordToolCall({ name, args, result: output, workflowEventId: id, durationMs });
|
|
261
273
|
} catch {
|
|
262
274
|
}
|
|
263
275
|
}
|
|
@@ -290,28 +302,41 @@ __export(index_exports, {
|
|
|
290
302
|
fetchCapturedTrace: () => fetchCapturedTrace,
|
|
291
303
|
getCaptureContext: () => getCaptureContext,
|
|
292
304
|
getCurrentTrace: () => getCurrentTrace,
|
|
305
|
+
getHttpFrozenEvent: () => getHttpFrozenEvent,
|
|
306
|
+
getHttpPromptMock: () => getHttpPromptMock,
|
|
307
|
+
getHttpRunContext: () => getHttpRunContext,
|
|
293
308
|
getRegistry: () => getRegistry,
|
|
309
|
+
initHttpRunContext: () => initHttpRunContext,
|
|
294
310
|
installAIInterceptor: () => installAIInterceptor,
|
|
295
311
|
installDBAutoInterceptor: () => installDBAutoInterceptor,
|
|
296
312
|
interceptDateNow: () => interceptDateNow,
|
|
297
313
|
interceptFetch: () => interceptFetch,
|
|
298
314
|
interceptRandom: () => interceptRandom,
|
|
299
315
|
isWorker: () => isWorker,
|
|
316
|
+
pushTelemetryEvent: () => pushTelemetryEvent,
|
|
317
|
+
readVercelAIStream: () => readVercelAIStream,
|
|
300
318
|
recordToolCall: () => recordToolCall,
|
|
301
319
|
registerMatchers: () => registerMatchers,
|
|
302
320
|
reportResults: () => reportResults,
|
|
321
|
+
resolveAIMock: () => resolveAIMock,
|
|
322
|
+
resolveMock: () => resolveMock,
|
|
323
|
+
resolvePromptMock: () => resolvePromptMock,
|
|
303
324
|
resolveTaskInput: () => resolveTaskInput,
|
|
304
325
|
restoreDateNow: () => restoreDateNow,
|
|
305
326
|
restoreFetch: () => restoreFetch,
|
|
306
327
|
restoreRandom: () => restoreRandom,
|
|
307
328
|
runFiles: () => runFiles,
|
|
329
|
+
runInHttpContext: () => runInHttpContext,
|
|
330
|
+
runWithInitializedHttpContext: () => runWithInitializedHttpContext,
|
|
308
331
|
runWorkflow: () => runWorkflow,
|
|
309
332
|
safeRecordToolCall: () => safeRecordToolCall,
|
|
310
333
|
serializeAgentState: () => serializeAgentState,
|
|
311
334
|
setCaptureContext: () => setCaptureContext,
|
|
312
335
|
setCurrentTrace: () => setCurrentTrace,
|
|
336
|
+
setHttpRunContext: () => setHttpRunContext,
|
|
313
337
|
startLLMProxy: () => startLLMProxy,
|
|
314
338
|
startTraceSession: () => startTraceSession,
|
|
339
|
+
tryAutoInitHttpContext: () => tryAutoInitHttpContext,
|
|
315
340
|
uninstallAIInterceptor: () => uninstallAIInterceptor,
|
|
316
341
|
uninstallDBAutoInterceptor: () => uninstallDBAutoInterceptor,
|
|
317
342
|
wrapAI: () => wrapAI,
|
|
@@ -396,6 +421,7 @@ async function callProviderLLM(prompt, options = {}, systemPrompt = "You are an
|
|
|
396
421
|
const provider = options.provider ?? "openai";
|
|
397
422
|
const sdk = options.sdk;
|
|
398
423
|
const resolvedModel = options.model ?? defaultModels[provider];
|
|
424
|
+
const t0 = Date.now();
|
|
399
425
|
switch (provider) {
|
|
400
426
|
case "openai": {
|
|
401
427
|
if (sdk && sdk.chat?.completions?.create) {
|
|
@@ -408,7 +434,12 @@ async function callProviderLLM(prompt, options = {}, systemPrompt = "You are an
|
|
|
408
434
|
max_tokens: maxTokens,
|
|
409
435
|
temperature
|
|
410
436
|
});
|
|
411
|
-
|
|
437
|
+
const u2 = resp?.usage;
|
|
438
|
+
return {
|
|
439
|
+
content: resp?.choices?.[0]?.message?.content?.trim() ?? "",
|
|
440
|
+
durationMs: Date.now() - t0,
|
|
441
|
+
usage: u2 ? { inputTokens: u2.prompt_tokens ?? 0, outputTokens: u2.completion_tokens ?? 0, totalTokens: u2.total_tokens ?? 0 } : void 0
|
|
442
|
+
};
|
|
412
443
|
}
|
|
413
444
|
const apiKey = options.apiKey ?? process.env.OPENAI_API_KEY;
|
|
414
445
|
if (!apiKey) throw new Error("Provide apiKey or set OPENAI_API_KEY for OpenAI-compatible endpoint.");
|
|
@@ -433,7 +464,12 @@ async function callProviderLLM(prompt, options = {}, systemPrompt = "You are an
|
|
|
433
464
|
throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`);
|
|
434
465
|
}
|
|
435
466
|
const data = await response.json();
|
|
436
|
-
|
|
467
|
+
const u = data?.usage;
|
|
468
|
+
return {
|
|
469
|
+
content: data.choices?.[0]?.message?.content?.trim() ?? "",
|
|
470
|
+
durationMs: Date.now() - t0,
|
|
471
|
+
usage: u ? { inputTokens: u.prompt_tokens ?? 0, outputTokens: u.completion_tokens ?? 0, totalTokens: u.total_tokens ?? 0 } : void 0
|
|
472
|
+
};
|
|
437
473
|
}
|
|
438
474
|
case "claude": {
|
|
439
475
|
if (sdk && sdk.messages?.create) {
|
|
@@ -445,7 +481,12 @@ async function callProviderLLM(prompt, options = {}, systemPrompt = "You are an
|
|
|
445
481
|
|
|
446
482
|
${prompt}` }]
|
|
447
483
|
});
|
|
448
|
-
|
|
484
|
+
const u2 = resp?.usage;
|
|
485
|
+
return {
|
|
486
|
+
content: resp?.content?.[0]?.text?.trim() ?? "",
|
|
487
|
+
durationMs: Date.now() - t0,
|
|
488
|
+
usage: u2 ? { inputTokens: u2.input_tokens ?? 0, outputTokens: u2.output_tokens ?? 0, totalTokens: (u2.input_tokens ?? 0) + (u2.output_tokens ?? 0) } : void 0
|
|
489
|
+
};
|
|
449
490
|
}
|
|
450
491
|
const apiKey = process.env.ANTHROPIC_API_KEY;
|
|
451
492
|
if (!apiKey) throw new Error("ANTHROPIC_API_KEY is not set in environment.");
|
|
@@ -469,7 +510,12 @@ ${prompt}` }]
|
|
|
469
510
|
throw new Error(`Claude API error: ${response.status} ${response.statusText}`);
|
|
470
511
|
}
|
|
471
512
|
const data = await response.json();
|
|
472
|
-
|
|
513
|
+
const u = data?.usage;
|
|
514
|
+
return {
|
|
515
|
+
content: data?.content?.[0]?.text?.trim() ?? "",
|
|
516
|
+
durationMs: Date.now() - t0,
|
|
517
|
+
usage: u ? { inputTokens: u.input_tokens ?? 0, outputTokens: u.output_tokens ?? 0, totalTokens: (u.input_tokens ?? 0) + (u.output_tokens ?? 0) } : void 0
|
|
518
|
+
};
|
|
473
519
|
}
|
|
474
520
|
case "gemini": {
|
|
475
521
|
if (sdk && sdk.models?.generateContent) {
|
|
@@ -480,7 +526,12 @@ ${prompt}` }]
|
|
|
480
526
|
${prompt}` }] }],
|
|
481
527
|
generationConfig: { temperature, maxOutputTokens: maxTokens }
|
|
482
528
|
});
|
|
483
|
-
|
|
529
|
+
const u2 = resp?.response?.usageMetadata;
|
|
530
|
+
return {
|
|
531
|
+
content: resp?.response?.candidates?.[0]?.content?.parts?.[0]?.text?.trim() ?? "",
|
|
532
|
+
durationMs: Date.now() - t0,
|
|
533
|
+
usage: u2 ? { inputTokens: u2.promptTokenCount ?? 0, outputTokens: u2.candidatesTokenCount ?? 0, totalTokens: u2.totalTokenCount ?? 0 } : void 0
|
|
534
|
+
};
|
|
484
535
|
}
|
|
485
536
|
const apiKey = process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY;
|
|
486
537
|
if (!apiKey) throw new Error("GEMINI_API_KEY (or GOOGLE_API_KEY) is not set in environment.");
|
|
@@ -501,7 +552,12 @@ ${prompt}` }] }],
|
|
|
501
552
|
throw new Error(`Gemini API error: ${response.status} ${response.statusText}`);
|
|
502
553
|
}
|
|
503
554
|
const data = await response.json();
|
|
504
|
-
|
|
555
|
+
const u = data?.usageMetadata;
|
|
556
|
+
return {
|
|
557
|
+
content: data?.candidates?.[0]?.content?.parts?.[0]?.text?.trim() ?? "",
|
|
558
|
+
durationMs: Date.now() - t0,
|
|
559
|
+
usage: u ? { inputTokens: u.promptTokenCount ?? 0, outputTokens: u.candidatesTokenCount ?? 0, totalTokens: u.totalTokenCount ?? 0 } : void 0
|
|
560
|
+
};
|
|
505
561
|
}
|
|
506
562
|
case "grok": {
|
|
507
563
|
if (sdk && sdk.chat?.completions?.create) {
|
|
@@ -514,7 +570,12 @@ ${prompt}` }] }],
|
|
|
514
570
|
max_tokens: maxTokens,
|
|
515
571
|
temperature
|
|
516
572
|
});
|
|
517
|
-
|
|
573
|
+
const u2 = resp?.usage;
|
|
574
|
+
return {
|
|
575
|
+
content: resp?.choices?.[0]?.message?.content?.trim() ?? "",
|
|
576
|
+
durationMs: Date.now() - t0,
|
|
577
|
+
usage: u2 ? { inputTokens: u2.prompt_tokens ?? 0, outputTokens: u2.completion_tokens ?? 0, totalTokens: u2.total_tokens ?? 0 } : void 0
|
|
578
|
+
};
|
|
518
579
|
}
|
|
519
580
|
const apiKey = process.env.GROK_API_KEY;
|
|
520
581
|
if (!apiKey) throw new Error("GROK_API_KEY is not set in environment.");
|
|
@@ -538,7 +599,12 @@ ${prompt}` }] }],
|
|
|
538
599
|
throw new Error(`Grok API error: ${response.status} ${response.statusText}`);
|
|
539
600
|
}
|
|
540
601
|
const data = await response.json();
|
|
541
|
-
|
|
602
|
+
const u = data?.usage;
|
|
603
|
+
return {
|
|
604
|
+
content: data.choices?.[0]?.message?.content?.trim() ?? "",
|
|
605
|
+
durationMs: Date.now() - t0,
|
|
606
|
+
usage: u ? { inputTokens: u.prompt_tokens ?? 0, outputTokens: u.completion_tokens ?? 0, totalTokens: u.total_tokens ?? 0 } : void 0
|
|
607
|
+
};
|
|
542
608
|
}
|
|
543
609
|
case "kimi": {
|
|
544
610
|
const apiKey = process.env.KIMI_API_KEY;
|
|
@@ -563,7 +629,12 @@ ${prompt}` }] }],
|
|
|
563
629
|
throw new Error(`Kimi API error: ${response.status} ${response.statusText}`);
|
|
564
630
|
}
|
|
565
631
|
const data = await response.json();
|
|
566
|
-
|
|
632
|
+
const u = data?.usage;
|
|
633
|
+
return {
|
|
634
|
+
content: data.choices?.[0]?.message?.content?.trim() ?? "",
|
|
635
|
+
durationMs: Date.now() - t0,
|
|
636
|
+
usage: u ? { inputTokens: u.prompt_tokens ?? 0, outputTokens: u.completion_tokens ?? 0, totalTokens: u.total_tokens ?? 0 } : void 0
|
|
637
|
+
};
|
|
567
638
|
}
|
|
568
639
|
default:
|
|
569
640
|
throw new Error(`Unsupported provider: ${provider}`);
|
|
@@ -581,7 +652,7 @@ ${expected}
|
|
|
581
652
|
|
|
582
653
|
Answer only "YES" or "NO".
|
|
583
654
|
`.trim();
|
|
584
|
-
const content = (await callProviderLLM(prompt, options, "You are an expert test judge.", 8, 0)).trim().toUpperCase();
|
|
655
|
+
const content = (await callProviderLLM(prompt, options, "You are an expert test judge.", 8, 0)).content.trim().toUpperCase();
|
|
585
656
|
return content.startsWith("YES");
|
|
586
657
|
}
|
|
587
658
|
function parseFirstNumber(text) {
|
|
@@ -774,13 +845,13 @@ Text:
|
|
|
774
845
|
${targetText}
|
|
775
846
|
`.trim();
|
|
776
847
|
try {
|
|
777
|
-
const raw = await callProviderLLM(
|
|
848
|
+
const raw = (await callProviderLLM(
|
|
778
849
|
evalPrompt,
|
|
779
850
|
{ provider: config.provider, model: config.model, sdk: config.sdk, apiKey: config.apiKey, baseURL: config.baseURL },
|
|
780
851
|
"You are an evaluation assistant. Return only a number between 0 and 1.",
|
|
781
852
|
16,
|
|
782
853
|
0
|
|
783
|
-
);
|
|
854
|
+
)).content;
|
|
784
855
|
const score = parseFirstNumber(raw);
|
|
785
856
|
if (score === null) {
|
|
786
857
|
return {
|
|
@@ -1391,9 +1462,51 @@ function formatError(error) {
|
|
|
1391
1462
|
init_context();
|
|
1392
1463
|
init_recorder();
|
|
1393
1464
|
init_side_effects();
|
|
1465
|
+
function extractUsage(provider, body) {
|
|
1466
|
+
if (provider === "openai" || provider === "grok" || provider === "kimi") {
|
|
1467
|
+
const u = body.usage;
|
|
1468
|
+
if (!u) return void 0;
|
|
1469
|
+
return { inputTokens: u.prompt_tokens, outputTokens: u.completion_tokens, totalTokens: u.total_tokens };
|
|
1470
|
+
}
|
|
1471
|
+
if (provider === "anthropic") {
|
|
1472
|
+
const u = body.usage;
|
|
1473
|
+
if (!u) return void 0;
|
|
1474
|
+
return { inputTokens: u.input_tokens, outputTokens: u.output_tokens, totalTokens: (u.input_tokens ?? 0) + (u.output_tokens ?? 0) };
|
|
1475
|
+
}
|
|
1476
|
+
if (provider === "gemini") {
|
|
1477
|
+
const u = body.usageMetadata;
|
|
1478
|
+
if (!u) return void 0;
|
|
1479
|
+
return { inputTokens: u.promptTokenCount, outputTokens: u.candidatesTokenCount, totalTokens: u.totalTokenCount };
|
|
1480
|
+
}
|
|
1481
|
+
return void 0;
|
|
1482
|
+
}
|
|
1483
|
+
function extractAssistantMessage(provider, body) {
|
|
1484
|
+
if (provider === "openai" || provider === "grok" || provider === "kimi") {
|
|
1485
|
+
const choices = body.choices;
|
|
1486
|
+
if (Array.isArray(choices) && choices.length > 0) {
|
|
1487
|
+
const msg = choices[0].message;
|
|
1488
|
+
if (msg && typeof msg === "object") return msg;
|
|
1489
|
+
}
|
|
1490
|
+
}
|
|
1491
|
+
if (provider === "anthropic") {
|
|
1492
|
+
const content = body.content;
|
|
1493
|
+
if (Array.isArray(content)) {
|
|
1494
|
+
return { role: "assistant", content };
|
|
1495
|
+
}
|
|
1496
|
+
}
|
|
1497
|
+
if (provider === "gemini") {
|
|
1498
|
+
const candidates = body.candidates;
|
|
1499
|
+
if (Array.isArray(candidates) && candidates.length > 0) {
|
|
1500
|
+
const content = candidates[0].content;
|
|
1501
|
+
if (content && typeof content === "object") return content;
|
|
1502
|
+
}
|
|
1503
|
+
}
|
|
1504
|
+
return null;
|
|
1505
|
+
}
|
|
1394
1506
|
var AI_PATTERNS2 = {
|
|
1395
1507
|
openai: /https?:\/\/api\.openai\.com\/v1\/((chat\/)?completions|embeddings)/,
|
|
1396
|
-
|
|
1508
|
+
anthropic: /https?:\/\/api\.anthropic\.com\/v1\/messages/,
|
|
1509
|
+
gemini: /https?:\/\/generativelanguage\.googleapis\.com\/.*\/models\/[^\/:]+:(generateContent|streamGenerateContent)/,
|
|
1397
1510
|
grok: /https?:\/\/api\.x\.ai\/v1\/(chat\/)?completions/,
|
|
1398
1511
|
kimi: /https?:\/\/api\.moonshot\.ai\/v1\/(chat\/)?completions/
|
|
1399
1512
|
};
|
|
@@ -1411,16 +1524,40 @@ function extractModel2(provider, body, url) {
|
|
|
1411
1524
|
return typeof body.model === "string" ? body.model : "unknown";
|
|
1412
1525
|
}
|
|
1413
1526
|
function extractPrompt2(provider, body) {
|
|
1414
|
-
if (provider === "openai" || provider === "grok" || provider === "kimi") {
|
|
1527
|
+
if (provider === "openai" || provider === "anthropic" || provider === "grok" || provider === "kimi") {
|
|
1528
|
+
let systemPrefix = "";
|
|
1529
|
+
if (provider === "anthropic") {
|
|
1530
|
+
if (typeof body.system === "string") {
|
|
1531
|
+
systemPrefix = `system: ${body.system}
|
|
1532
|
+
`;
|
|
1533
|
+
} else if (Array.isArray(body.system)) {
|
|
1534
|
+
systemPrefix = body.system.map((b) => {
|
|
1535
|
+
if (b && typeof b === "object") {
|
|
1536
|
+
return String(b.text ?? "");
|
|
1537
|
+
}
|
|
1538
|
+
return String(b);
|
|
1539
|
+
}).filter(Boolean).map((t) => `system: ${t}`).join("\n") + "\n";
|
|
1540
|
+
}
|
|
1541
|
+
}
|
|
1415
1542
|
const messages = body.messages;
|
|
1416
1543
|
if (Array.isArray(messages)) {
|
|
1417
|
-
|
|
1544
|
+
const msgText = messages.map((m) => {
|
|
1418
1545
|
if (m && typeof m === "object") {
|
|
1419
1546
|
const msg = m;
|
|
1420
|
-
|
|
1547
|
+
let content = msg.content;
|
|
1548
|
+
if (Array.isArray(content)) {
|
|
1549
|
+
content = content.map((b) => {
|
|
1550
|
+
if (b && typeof b === "object") {
|
|
1551
|
+
return String(b.text ?? "");
|
|
1552
|
+
}
|
|
1553
|
+
return String(b);
|
|
1554
|
+
}).filter(Boolean).join("");
|
|
1555
|
+
}
|
|
1556
|
+
return `${msg.role}: ${content}`;
|
|
1421
1557
|
}
|
|
1422
1558
|
return String(m);
|
|
1423
1559
|
}).join("\n");
|
|
1560
|
+
return systemPrefix + msgText;
|
|
1424
1561
|
}
|
|
1425
1562
|
if (typeof body.prompt === "string") return body.prompt;
|
|
1426
1563
|
if (typeof body.input === "string") return body.input;
|
|
@@ -1469,6 +1606,18 @@ function extractCompletion2(provider, responseBody) {
|
|
|
1469
1606
|
}
|
|
1470
1607
|
}
|
|
1471
1608
|
}
|
|
1609
|
+
if (provider === "anthropic") {
|
|
1610
|
+
const content = responseBody.content;
|
|
1611
|
+
if (Array.isArray(content)) {
|
|
1612
|
+
return content.map((block) => {
|
|
1613
|
+
if (block && typeof block === "object") {
|
|
1614
|
+
const b = block;
|
|
1615
|
+
if (b.type === "text" && typeof b.text === "string") return b.text;
|
|
1616
|
+
}
|
|
1617
|
+
return "";
|
|
1618
|
+
}).filter(Boolean).join("");
|
|
1619
|
+
}
|
|
1620
|
+
}
|
|
1472
1621
|
if (provider === "gemini") {
|
|
1473
1622
|
const candidates = responseBody.candidates;
|
|
1474
1623
|
if (Array.isArray(candidates) && candidates.length > 0) {
|
|
@@ -1524,6 +1673,21 @@ async function bufferSSEStream(provider, stream) {
|
|
|
1524
1673
|
} catch {
|
|
1525
1674
|
}
|
|
1526
1675
|
}
|
|
1676
|
+
} else if (provider === "anthropic") {
|
|
1677
|
+
for (const line of lines) {
|
|
1678
|
+
if (!line.startsWith("data: ")) continue;
|
|
1679
|
+
const data = line.slice(6).trim();
|
|
1680
|
+
try {
|
|
1681
|
+
const obj = JSON.parse(data);
|
|
1682
|
+
if (obj.type === "content_block_delta") {
|
|
1683
|
+
const delta = obj.delta;
|
|
1684
|
+
if (delta && delta.type === "text_delta" && typeof delta.text === "string") {
|
|
1685
|
+
completion += delta.text;
|
|
1686
|
+
}
|
|
1687
|
+
}
|
|
1688
|
+
} catch {
|
|
1689
|
+
}
|
|
1690
|
+
}
|
|
1527
1691
|
} else {
|
|
1528
1692
|
for (const line of lines) {
|
|
1529
1693
|
if (!line.startsWith("data: ")) continue;
|
|
@@ -1550,6 +1714,16 @@ function synthesizeCompletionJSON(provider, completion) {
|
|
|
1550
1714
|
candidates: [{ content: { parts: [{ text: completion }], role: "model" }, finishReason: "STOP" }]
|
|
1551
1715
|
};
|
|
1552
1716
|
}
|
|
1717
|
+
if (provider === "anthropic") {
|
|
1718
|
+
return {
|
|
1719
|
+
id: "replay",
|
|
1720
|
+
type: "message",
|
|
1721
|
+
role: "assistant",
|
|
1722
|
+
content: [{ type: "text", text: completion }],
|
|
1723
|
+
stop_reason: "end_turn",
|
|
1724
|
+
stop_sequence: null
|
|
1725
|
+
};
|
|
1726
|
+
}
|
|
1553
1727
|
return {
|
|
1554
1728
|
id: "replay",
|
|
1555
1729
|
object: "chat.completion",
|
|
@@ -1564,6 +1738,37 @@ function synthesizeSSEStream(provider, completion) {
|
|
|
1564
1738
|
const chunk = `[{"candidates":[{"content":{"parts":[{"text":${JSON.stringify(completion)}}],"role":"model"},"finishReason":"STOP"}]}]
|
|
1565
1739
|
`;
|
|
1566
1740
|
ctrl.enqueue(encoder.encode(chunk));
|
|
1741
|
+
} else if (provider === "anthropic") {
|
|
1742
|
+
const msgStart = `event: message_start
|
|
1743
|
+
data: ${JSON.stringify({ type: "message_start", message: { id: "replay", type: "message", role: "assistant", content: [], stop_reason: null, stop_sequence: null } })}
|
|
1744
|
+
|
|
1745
|
+
`;
|
|
1746
|
+
const blockStart = `event: content_block_start
|
|
1747
|
+
data: ${JSON.stringify({ type: "content_block_start", index: 0, content_block: { type: "text", text: "" } })}
|
|
1748
|
+
|
|
1749
|
+
`;
|
|
1750
|
+
const delta = `event: content_block_delta
|
|
1751
|
+
data: ${JSON.stringify({ type: "content_block_delta", index: 0, delta: { type: "text_delta", text: completion } })}
|
|
1752
|
+
|
|
1753
|
+
`;
|
|
1754
|
+
const blockStop = `event: content_block_stop
|
|
1755
|
+
data: ${JSON.stringify({ type: "content_block_stop", index: 0 })}
|
|
1756
|
+
|
|
1757
|
+
`;
|
|
1758
|
+
const msgDelta = `event: message_delta
|
|
1759
|
+
data: ${JSON.stringify({ type: "message_delta", delta: { stop_reason: "end_turn", stop_sequence: null } })}
|
|
1760
|
+
|
|
1761
|
+
`;
|
|
1762
|
+
const msgStop = `event: message_stop
|
|
1763
|
+
data: ${JSON.stringify({ type: "message_stop" })}
|
|
1764
|
+
|
|
1765
|
+
`;
|
|
1766
|
+
ctrl.enqueue(encoder.encode(msgStart));
|
|
1767
|
+
ctrl.enqueue(encoder.encode(blockStart));
|
|
1768
|
+
ctrl.enqueue(encoder.encode(delta));
|
|
1769
|
+
ctrl.enqueue(encoder.encode(blockStop));
|
|
1770
|
+
ctrl.enqueue(encoder.encode(msgDelta));
|
|
1771
|
+
ctrl.enqueue(encoder.encode(msgStop));
|
|
1567
1772
|
} else {
|
|
1568
1773
|
const frame1 = `data: ${JSON.stringify({ id: "replay", choices: [{ delta: { content: completion }, index: 0, finish_reason: null }] })}
|
|
1569
1774
|
|
|
@@ -1590,6 +1795,7 @@ function installAIInterceptor() {
|
|
|
1590
1795
|
let model = "unknown";
|
|
1591
1796
|
let prompt = "";
|
|
1592
1797
|
let isStreaming = false;
|
|
1798
|
+
let messages;
|
|
1593
1799
|
try {
|
|
1594
1800
|
const rawBody = init?.body;
|
|
1595
1801
|
if (rawBody && typeof rawBody === "string") {
|
|
@@ -1597,6 +1803,8 @@ function installAIInterceptor() {
|
|
|
1597
1803
|
model = extractModel2(provider, body, url);
|
|
1598
1804
|
prompt = extractPrompt2(provider, body);
|
|
1599
1805
|
isStreaming = body.stream === true;
|
|
1806
|
+
if (Array.isArray(body.messages)) messages = body.messages;
|
|
1807
|
+
else if (Array.isArray(body.contents)) messages = body.contents;
|
|
1600
1808
|
}
|
|
1601
1809
|
} catch {
|
|
1602
1810
|
}
|
|
@@ -1639,33 +1847,38 @@ function installAIInterceptor() {
|
|
|
1639
1847
|
if (isStreaming) {
|
|
1640
1848
|
if (response2.body) {
|
|
1641
1849
|
const [streamForCaller, streamForRecorder] = response2.body.tee();
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
|
|
1649
|
-
|
|
1850
|
+
recorder.trackAsync(
|
|
1851
|
+
bufferSSEStream(provider, streamForRecorder).then((completion) => {
|
|
1852
|
+
const durationMs2 = rawDateNow() - start;
|
|
1853
|
+
traceAtCall.recordLLMStep({ model, provider, prompt, completion, workflowEventId: id, durationMs: durationMs2 });
|
|
1854
|
+
recorder.record({ id, type: "ai", name: model, input: { url, provider, model, prompt, messages }, output: { streamed: true, completion }, timestamp: start, durationMs: durationMs2 });
|
|
1855
|
+
}).catch(() => {
|
|
1856
|
+
const durationMs2 = rawDateNow() - start;
|
|
1857
|
+
traceAtCall.recordLLMStep({ model, provider, prompt, completion: "(streamed-error)", workflowEventId: id, durationMs: durationMs2 });
|
|
1858
|
+
recorder.record({ id, type: "ai", name: model, input: { url, provider, model, prompt, messages }, output: null, timestamp: start, durationMs: durationMs2 });
|
|
1859
|
+
})
|
|
1860
|
+
);
|
|
1650
1861
|
return new Response(streamForCaller, {
|
|
1651
1862
|
status: response2.status,
|
|
1652
1863
|
statusText: response2.statusText,
|
|
1653
1864
|
headers: response2.headers
|
|
1654
1865
|
});
|
|
1655
1866
|
} else {
|
|
1656
|
-
traceAtCall.recordLLMStep({ model, provider, prompt, completion: "(streamed)", workflowEventId: id });
|
|
1657
|
-
recorder.record({ id, type: "ai", name: model, input: { url, provider, model, prompt }, output: null, timestamp: start, durationMs });
|
|
1867
|
+
traceAtCall.recordLLMStep({ model, provider, prompt, completion: "(streamed)", workflowEventId: id, durationMs });
|
|
1868
|
+
recorder.record({ id, type: "ai", name: model, input: { url, provider, model, prompt, messages }, output: null, timestamp: start, durationMs });
|
|
1658
1869
|
}
|
|
1659
1870
|
} else {
|
|
1660
1871
|
try {
|
|
1661
1872
|
const cloned = response2.clone();
|
|
1662
1873
|
const responseBody = await cloned.json();
|
|
1663
1874
|
const completion = extractCompletion2(provider, responseBody);
|
|
1664
|
-
|
|
1665
|
-
|
|
1875
|
+
const usage = extractUsage(provider, responseBody);
|
|
1876
|
+
const assistantMessage = extractAssistantMessage(provider, responseBody);
|
|
1877
|
+
traceAtCall.recordLLMStep({ model, provider, prompt, completion, workflowEventId: id, durationMs });
|
|
1878
|
+
recorder.record({ id, type: "ai", name: model, input: { url, provider, model, prompt, messages }, output: assistantMessage ?? responseBody, timestamp: start, durationMs, usage });
|
|
1666
1879
|
} catch {
|
|
1667
|
-
traceAtCall.recordLLMStep({ model, provider, prompt, completion: "", workflowEventId: id });
|
|
1668
|
-
recorder.record({ id, type: "ai", name: model, input: { url, provider, model, prompt }, output: null, timestamp: start, durationMs });
|
|
1880
|
+
traceAtCall.recordLLMStep({ model, provider, prompt, completion: "", workflowEventId: id, durationMs });
|
|
1881
|
+
recorder.record({ id, type: "ai", name: model, input: { url, provider, model, prompt, messages }, output: null, timestamp: start, durationMs });
|
|
1669
1882
|
}
|
|
1670
1883
|
}
|
|
1671
1884
|
return response2;
|
|
@@ -1736,6 +1949,136 @@ var isWorker = () => {
|
|
|
1736
1949
|
return isWorkerContext();
|
|
1737
1950
|
};
|
|
1738
1951
|
|
|
1952
|
+
// src/internals/mock-resolver.ts
|
|
1953
|
+
function normaliseMockResult(value) {
|
|
1954
|
+
if (typeof value === "string") {
|
|
1955
|
+
try {
|
|
1956
|
+
return normaliseMockResult(JSON.parse(value));
|
|
1957
|
+
} catch {
|
|
1958
|
+
return value;
|
|
1959
|
+
}
|
|
1960
|
+
}
|
|
1961
|
+
if (Array.isArray(value)) {
|
|
1962
|
+
return value.map(normaliseMockResult);
|
|
1963
|
+
}
|
|
1964
|
+
if (value !== null && typeof value === "object") {
|
|
1965
|
+
const out = {};
|
|
1966
|
+
for (const [k, v] of Object.entries(value)) {
|
|
1967
|
+
out[k] = normaliseMockResult(v);
|
|
1968
|
+
}
|
|
1969
|
+
return out;
|
|
1970
|
+
}
|
|
1971
|
+
return value;
|
|
1972
|
+
}
|
|
1973
|
+
function resolveMock(toolName) {
|
|
1974
|
+
const g4 = globalThis;
|
|
1975
|
+
const mocks = g4["__ELASTICDASH_TOOL_MOCKS__"];
|
|
1976
|
+
if (!mocks) return { mocked: false };
|
|
1977
|
+
const entry = mocks[toolName];
|
|
1978
|
+
if (!entry || entry.mode === "live") return { mocked: false };
|
|
1979
|
+
if (!g4["__ELASTICDASH_TOOL_CALL_COUNTERS__"]) {
|
|
1980
|
+
g4["__ELASTICDASH_TOOL_CALL_COUNTERS__"] = {};
|
|
1981
|
+
}
|
|
1982
|
+
const counters = g4["__ELASTICDASH_TOOL_CALL_COUNTERS__"];
|
|
1983
|
+
counters[toolName] = (counters[toolName] ?? 0) + 1;
|
|
1984
|
+
const callNumber = counters[toolName];
|
|
1985
|
+
if (entry.mode === "mock-all") {
|
|
1986
|
+
const data = entry.mockData ?? {};
|
|
1987
|
+
const raw = data[callNumber] !== void 0 ? data[callNumber] : data[0];
|
|
1988
|
+
return { mocked: true, result: normaliseMockResult(raw) };
|
|
1989
|
+
}
|
|
1990
|
+
if (entry.mode === "mock-specific") {
|
|
1991
|
+
const indices = entry.callIndices ?? [];
|
|
1992
|
+
if (indices.includes(callNumber)) {
|
|
1993
|
+
const data = entry.mockData ?? {};
|
|
1994
|
+
return { mocked: true, result: normaliseMockResult(data[callNumber]) };
|
|
1995
|
+
}
|
|
1996
|
+
return { mocked: false };
|
|
1997
|
+
}
|
|
1998
|
+
return { mocked: false };
|
|
1999
|
+
}
|
|
2000
|
+
function extractSystemPrompt(input) {
|
|
2001
|
+
if (!input || typeof input !== "object") return void 0;
|
|
2002
|
+
const o = input;
|
|
2003
|
+
if (typeof o.system === "string") return o.system;
|
|
2004
|
+
if (typeof o.systemPrompt === "string" && o.systemPrompt.length > 0) return o.systemPrompt;
|
|
2005
|
+
const msgs = Array.isArray(o.messages) ? o.messages : Array.isArray(input) ? input : null;
|
|
2006
|
+
if (msgs) {
|
|
2007
|
+
for (const m of msgs) {
|
|
2008
|
+
if (m && typeof m === "object") {
|
|
2009
|
+
const msg = m;
|
|
2010
|
+
if (msg.role === "system" && typeof msg.content === "string") return msg.content;
|
|
2011
|
+
}
|
|
2012
|
+
}
|
|
2013
|
+
}
|
|
2014
|
+
return void 0;
|
|
2015
|
+
}
|
|
2016
|
+
function replaceSystemPrompt(input, newSystemPrompt) {
|
|
2017
|
+
if (!input || typeof input !== "object") return input;
|
|
2018
|
+
const o = input;
|
|
2019
|
+
if (typeof o.system === "string") return { ...o, system: newSystemPrompt };
|
|
2020
|
+
if (typeof o.systemPrompt === "string") return { ...o, systemPrompt: newSystemPrompt };
|
|
2021
|
+
if (Array.isArray(input)) {
|
|
2022
|
+
return input.map((m) => {
|
|
2023
|
+
if (m && typeof m === "object") {
|
|
2024
|
+
const msg = m;
|
|
2025
|
+
if (msg.role === "system" && typeof msg.content === "string") return { ...msg, content: newSystemPrompt };
|
|
2026
|
+
}
|
|
2027
|
+
return m;
|
|
2028
|
+
});
|
|
2029
|
+
}
|
|
2030
|
+
if (Array.isArray(o.messages)) {
|
|
2031
|
+
return {
|
|
2032
|
+
...o,
|
|
2033
|
+
messages: o.messages.map((m) => {
|
|
2034
|
+
if (m && typeof m === "object") {
|
|
2035
|
+
const msg = m;
|
|
2036
|
+
if (msg.role === "system" && typeof msg.content === "string") return { ...msg, content: newSystemPrompt };
|
|
2037
|
+
}
|
|
2038
|
+
return m;
|
|
2039
|
+
})
|
|
2040
|
+
};
|
|
2041
|
+
}
|
|
2042
|
+
return input;
|
|
2043
|
+
}
|
|
2044
|
+
function resolvePromptMock(input) {
|
|
2045
|
+
const g4 = globalThis;
|
|
2046
|
+
const mocks = g4["__ELASTICDASH_PROMPT_MOCKS__"];
|
|
2047
|
+
if (!mocks || Object.keys(mocks).length === 0) return void 0;
|
|
2048
|
+
const systemPrompt = extractSystemPrompt(input);
|
|
2049
|
+
if (systemPrompt === void 0) return void 0;
|
|
2050
|
+
const newSystemPrompt = mocks[systemPrompt];
|
|
2051
|
+
if (newSystemPrompt === void 0) return void 0;
|
|
2052
|
+
return replaceSystemPrompt(input, newSystemPrompt);
|
|
2053
|
+
}
|
|
2054
|
+
function resolveAIMock(modelName) {
|
|
2055
|
+
const g4 = globalThis;
|
|
2056
|
+
const mocks = g4["__ELASTICDASH_AI_MOCKS__"];
|
|
2057
|
+
if (!mocks) return { mocked: false };
|
|
2058
|
+
const entry = mocks[modelName];
|
|
2059
|
+
if (!entry || entry.mode === "live") return { mocked: false };
|
|
2060
|
+
if (!g4["__ELASTICDASH_AI_CALL_COUNTERS__"]) {
|
|
2061
|
+
g4["__ELASTICDASH_AI_CALL_COUNTERS__"] = {};
|
|
2062
|
+
}
|
|
2063
|
+
const counters = g4["__ELASTICDASH_AI_CALL_COUNTERS__"];
|
|
2064
|
+
counters[modelName] = (counters[modelName] ?? 0) + 1;
|
|
2065
|
+
const callNumber = counters[modelName];
|
|
2066
|
+
if (entry.mode === "mock-all") {
|
|
2067
|
+
const data = entry.mockData ?? {};
|
|
2068
|
+
const raw = data[callNumber] !== void 0 ? data[callNumber] : data[0];
|
|
2069
|
+
return { mocked: true, result: normaliseMockResult(raw) };
|
|
2070
|
+
}
|
|
2071
|
+
if (entry.mode === "mock-specific") {
|
|
2072
|
+
const indices = entry.callIndices ?? [];
|
|
2073
|
+
if (indices.includes(callNumber)) {
|
|
2074
|
+
const data = entry.mockData ?? {};
|
|
2075
|
+
return { mocked: true, result: normaliseMockResult(data[callNumber]) };
|
|
2076
|
+
}
|
|
2077
|
+
return { mocked: false };
|
|
2078
|
+
}
|
|
2079
|
+
return { mocked: false };
|
|
2080
|
+
}
|
|
2081
|
+
|
|
1739
2082
|
// src/index.ts
|
|
1740
2083
|
init_recorder();
|
|
1741
2084
|
|
|
@@ -1788,6 +2131,132 @@ var ReplayController = class {
|
|
|
1788
2131
|
init_recorder();
|
|
1789
2132
|
init_context();
|
|
1790
2133
|
init_side_effects();
|
|
2134
|
+
|
|
2135
|
+
// src/interceptors/telemetry-push.ts
|
|
2136
|
+
var import_node_async_hooks3 = require("node:async_hooks");
|
|
2137
|
+
var import_node_crypto3 = require("node:crypto");
|
|
2138
|
+
var g3 = globalThis;
|
|
2139
|
+
var HTTP_RUN_ALS_KEY = "__elasticdash_http_run_als__";
|
|
2140
|
+
var httpRunAls = g3[HTTP_RUN_ALS_KEY] ?? new import_node_async_hooks3.AsyncLocalStorage();
|
|
2141
|
+
if (!g3[HTTP_RUN_ALS_KEY]) g3[HTTP_RUN_ALS_KEY] = httpRunAls;
|
|
2142
|
+
function buildContext(runId, dashboardUrl, frozenEvents, promptMocksRecord = {}) {
|
|
2143
|
+
let counter = 0;
|
|
2144
|
+
const frozenMap = /* @__PURE__ */ new Map();
|
|
2145
|
+
for (const e of frozenEvents) frozenMap.set(e.id, e);
|
|
2146
|
+
const promptMocksMap = new Map(Object.entries(promptMocksRecord));
|
|
2147
|
+
return { runId, dashboardUrl, nextId: () => ++counter, frozenEvents: frozenMap, promptMocks: promptMocksMap };
|
|
2148
|
+
}
|
|
2149
|
+
function setHttpRunContext(runId, dashboardUrl) {
|
|
2150
|
+
httpRunAls.enterWith(buildContext(runId, dashboardUrl, [], {}));
|
|
2151
|
+
}
|
|
2152
|
+
async function initHttpRunContext(runId, dashboardUrl) {
|
|
2153
|
+
let frozenEvents = [];
|
|
2154
|
+
let promptMocks = {};
|
|
2155
|
+
try {
|
|
2156
|
+
const res = await fetch(`${dashboardUrl}/api/run-configs/${runId}`);
|
|
2157
|
+
if (res.ok) {
|
|
2158
|
+
const data = await res.json();
|
|
2159
|
+
frozenEvents = Array.isArray(data.frozenEvents) ? data.frozenEvents : [];
|
|
2160
|
+
promptMocks = data.promptMocks && typeof data.promptMocks === "object" && !Array.isArray(data.promptMocks) ? data.promptMocks : {};
|
|
2161
|
+
}
|
|
2162
|
+
} catch {
|
|
2163
|
+
}
|
|
2164
|
+
httpRunAls.enterWith(buildContext(runId, dashboardUrl, frozenEvents, promptMocks));
|
|
2165
|
+
}
|
|
2166
|
+
function getHttpRunContext() {
|
|
2167
|
+
return httpRunAls.getStore();
|
|
2168
|
+
}
|
|
2169
|
+
function getHttpFrozenEvent(id) {
|
|
2170
|
+
return httpRunAls.getStore()?.frozenEvents.get(id);
|
|
2171
|
+
}
|
|
2172
|
+
function getHttpPromptMock(input) {
|
|
2173
|
+
const ctx = httpRunAls.getStore();
|
|
2174
|
+
if (!ctx || ctx.promptMocks.size === 0) {
|
|
2175
|
+
console.log(`[elasticdash] getHttpPromptMock: skip \u2014 promptMocks.size=${ctx?.promptMocks.size ?? "no ctx"}`);
|
|
2176
|
+
return void 0;
|
|
2177
|
+
}
|
|
2178
|
+
const systemPrompt = extractSystemPrompt(input);
|
|
2179
|
+
if (systemPrompt === void 0) {
|
|
2180
|
+
const inputKeys = input && typeof input === "object" ? Object.keys(input).join(",") : typeof input;
|
|
2181
|
+
console.log(`[elasticdash] getHttpPromptMock: no system prompt found in input (keys: ${inputKeys})`);
|
|
2182
|
+
return void 0;
|
|
2183
|
+
}
|
|
2184
|
+
const newSystemPrompt = ctx.promptMocks.get(systemPrompt);
|
|
2185
|
+
console.log(`[elasticdash] getHttpPromptMock: extracted system prompt (len=${systemPrompt.length}, first50=${JSON.stringify(systemPrompt.slice(0, 50))}) \u2014 mock found=${newSystemPrompt !== void 0}`);
|
|
2186
|
+
if (newSystemPrompt !== void 0) {
|
|
2187
|
+
console.log(`[elasticdash] getHttpPromptMock: available mock keys=${JSON.stringify([...ctx.promptMocks.keys()].map((k) => k.slice(0, 50)))}`);
|
|
2188
|
+
}
|
|
2189
|
+
if (newSystemPrompt === void 0) {
|
|
2190
|
+
console.log(`[elasticdash] getHttpPromptMock: no mock for this prompt. Available mock keys (first 50 chars each): ${JSON.stringify([...ctx.promptMocks.keys()].map((k) => k.slice(0, 50)))}`);
|
|
2191
|
+
return void 0;
|
|
2192
|
+
}
|
|
2193
|
+
return replaceSystemPrompt(input, newSystemPrompt);
|
|
2194
|
+
}
|
|
2195
|
+
function pushTelemetryEvent(event) {
|
|
2196
|
+
const ctx = httpRunAls.getStore();
|
|
2197
|
+
if (!ctx) {
|
|
2198
|
+
console.log(`[elasticdash] pushTelemetryEvent: no HTTP context, dropping event type=${event.type} name=${"name" in event ? event.name : "?"}`);
|
|
2199
|
+
return;
|
|
2200
|
+
}
|
|
2201
|
+
const { runId, dashboardUrl } = ctx;
|
|
2202
|
+
console.log(`[elasticdash] pushTelemetryEvent: posting event type=${event.type} name=${"name" in event ? event.name : "?"} runId=${runId} to ${dashboardUrl}`);
|
|
2203
|
+
fetch(`${dashboardUrl}/api/trace-events`, {
|
|
2204
|
+
method: "POST",
|
|
2205
|
+
headers: { "Content-Type": "application/json" },
|
|
2206
|
+
body: JSON.stringify({ runId, event })
|
|
2207
|
+
}).then((r) => {
|
|
2208
|
+
console.log(`[elasticdash] pushTelemetryEvent: response status=${r.status} for type=${event.type} name=${"name" in event ? event.name : "?"}`);
|
|
2209
|
+
}).catch((e) => {
|
|
2210
|
+
console.log(`[elasticdash] pushTelemetryEvent: fetch failed: ${e instanceof Error ? e.message : String(e)}`);
|
|
2211
|
+
});
|
|
2212
|
+
}
|
|
2213
|
+
var AUTO_INIT_KEY = "__elasticdash_auto_init_promise__";
|
|
2214
|
+
function runInHttpContext(runId, dashboardUrl, callback) {
|
|
2215
|
+
return httpRunAls.run(buildContext(runId, dashboardUrl, [], {}), callback);
|
|
2216
|
+
}
|
|
2217
|
+
async function runWithInitializedHttpContext(runId, dashboardUrl, callback) {
|
|
2218
|
+
let frozenEvents = [];
|
|
2219
|
+
let promptMocks = {};
|
|
2220
|
+
try {
|
|
2221
|
+
const res = await fetch(`${dashboardUrl}/api/run-configs/${runId}`);
|
|
2222
|
+
if (res.ok) {
|
|
2223
|
+
const data = await res.json();
|
|
2224
|
+
frozenEvents = Array.isArray(data.frozenEvents) ? data.frozenEvents : [];
|
|
2225
|
+
promptMocks = data.promptMocks && typeof data.promptMocks === "object" && !Array.isArray(data.promptMocks) ? data.promptMocks : {};
|
|
2226
|
+
const mockKeys = Object.keys(promptMocks);
|
|
2227
|
+
console.log(`[elasticdash] runWithInitializedHttpContext: fetched ${mockKeys.length} prompt mocks, ${frozenEvents.length} frozen events`);
|
|
2228
|
+
if (mockKeys.length > 0) {
|
|
2229
|
+
console.log(`[elasticdash] runWithInitializedHttpContext: mock keys (first 80 chars each): ${JSON.stringify(mockKeys.map((k) => k.slice(0, 80)))}`);
|
|
2230
|
+
}
|
|
2231
|
+
} else {
|
|
2232
|
+
console.log(`[elasticdash] runWithInitializedHttpContext: run-configs fetch returned ${res.status}`);
|
|
2233
|
+
}
|
|
2234
|
+
} catch {
|
|
2235
|
+
}
|
|
2236
|
+
return httpRunAls.run(buildContext(runId, dashboardUrl, frozenEvents, promptMocks), callback);
|
|
2237
|
+
}
|
|
2238
|
+
async function tryAutoInitHttpContext() {
|
|
2239
|
+
if (getHttpRunContext()) return;
|
|
2240
|
+
const serverUrl = (typeof process !== "undefined" && process.env?.ELASTICDASH_SERVER) ?? "";
|
|
2241
|
+
if (!serverUrl) return;
|
|
2242
|
+
const g4 = globalThis;
|
|
2243
|
+
if (!g4[AUTO_INIT_KEY]) {
|
|
2244
|
+
g4[AUTO_INIT_KEY] = (async () => {
|
|
2245
|
+
try {
|
|
2246
|
+
const runId = (typeof process !== "undefined" && process.env?.ELASTICDASH_RUN_ID) ?? "";
|
|
2247
|
+
if (runId) {
|
|
2248
|
+
await initHttpRunContext(runId, serverUrl);
|
|
2249
|
+
} else {
|
|
2250
|
+
setHttpRunContext((0, import_node_crypto3.randomUUID)(), serverUrl);
|
|
2251
|
+
}
|
|
2252
|
+
} catch {
|
|
2253
|
+
}
|
|
2254
|
+
})();
|
|
2255
|
+
}
|
|
2256
|
+
await g4[AUTO_INIT_KEY];
|
|
2257
|
+
}
|
|
2258
|
+
|
|
2259
|
+
// src/interceptors/tool.ts
|
|
1791
2260
|
var TOOL_WRAPPER_ACTIVE_KEY2 = "__elasticdash_tool_wrapper_active__";
|
|
1792
2261
|
function toTraceArgs(input) {
|
|
1793
2262
|
if (input && typeof input === "object" && !Array.isArray(input)) {
|
|
@@ -1851,8 +2320,55 @@ function wrapAsyncIterable(source, onComplete) {
|
|
|
1851
2320
|
}
|
|
1852
2321
|
function wrapTool(name, fn) {
|
|
1853
2322
|
return async (...args) => {
|
|
2323
|
+
await tryAutoInitHttpContext();
|
|
1854
2324
|
const ctx = getCaptureContext();
|
|
1855
|
-
|
|
2325
|
+
const httpCtx = getHttpRunContext();
|
|
2326
|
+
console.log(`[elasticdash] Tool called: ${name}`, { args });
|
|
2327
|
+
console.log(`[elasticdash] Current capture context:`, ctx ? { hasRecorder: !!ctx.recorder, hasReplay: !!ctx.replay } : null);
|
|
2328
|
+
console.log(`[elasticdash] Current HTTP context:`, httpCtx ? { hasHttpContext: !!httpCtx } : null);
|
|
2329
|
+
if (!ctx && !httpCtx) return fn(...args);
|
|
2330
|
+
if (!ctx) {
|
|
2331
|
+
const id2 = httpCtx.nextId();
|
|
2332
|
+
const input2 = args.length === 1 ? args[0] : args;
|
|
2333
|
+
const frozen = getHttpFrozenEvent(id2);
|
|
2334
|
+
if (frozen) {
|
|
2335
|
+
pushTelemetryEvent(frozen);
|
|
2336
|
+
if (frozen.streamed === true) {
|
|
2337
|
+
const raw = typeof frozen.streamRaw === "string" ? frozen.streamRaw : "";
|
|
2338
|
+
return reconstructStream(raw);
|
|
2339
|
+
}
|
|
2340
|
+
return frozen.output;
|
|
2341
|
+
}
|
|
2342
|
+
const start2 = rawDateNow();
|
|
2343
|
+
try {
|
|
2344
|
+
const output = await fn(...args);
|
|
2345
|
+
if (isReadableStream(output)) {
|
|
2346
|
+
const [streamForCaller, streamForRecorder] = output.tee();
|
|
2347
|
+
bufferReadableStream(streamForRecorder).then((rawText) => {
|
|
2348
|
+
const durationMs2 = rawDateNow() - start2;
|
|
2349
|
+
pushTelemetryEvent({ id: id2, type: "tool", name, input: input2, output: null, streamed: true, streamRaw: rawText, timestamp: start2, durationMs: durationMs2 });
|
|
2350
|
+
}).catch(() => {
|
|
2351
|
+
const durationMs2 = rawDateNow() - start2;
|
|
2352
|
+
pushTelemetryEvent({ id: id2, type: "tool", name, input: input2, output: null, streamed: true, streamRaw: "", timestamp: start2, durationMs: durationMs2 });
|
|
2353
|
+
});
|
|
2354
|
+
return streamForCaller;
|
|
2355
|
+
}
|
|
2356
|
+
if (isAsyncIterable(output)) {
|
|
2357
|
+
return wrapAsyncIterable(output, (chunks) => {
|
|
2358
|
+
const durationMs2 = rawDateNow() - start2;
|
|
2359
|
+
const rawText = chunks.map((c) => typeof c === "string" ? c : JSON.stringify(c)).join("");
|
|
2360
|
+
pushTelemetryEvent({ id: id2, type: "tool", name, input: input2, output: null, streamed: true, streamRaw: rawText, timestamp: start2, durationMs: durationMs2 });
|
|
2361
|
+
});
|
|
2362
|
+
}
|
|
2363
|
+
const durationMs = rawDateNow() - start2;
|
|
2364
|
+
pushTelemetryEvent({ id: id2, type: "tool", name, input: input2, output, timestamp: start2, durationMs });
|
|
2365
|
+
return output;
|
|
2366
|
+
} catch (e) {
|
|
2367
|
+
const durationMs = rawDateNow() - start2;
|
|
2368
|
+
pushTelemetryEvent({ id: id2, type: "tool", name, input: input2, output: { error: String(e) }, timestamp: start2, durationMs });
|
|
2369
|
+
throw e;
|
|
2370
|
+
}
|
|
2371
|
+
}
|
|
1856
2372
|
const trace = getCurrentTrace();
|
|
1857
2373
|
const { recorder, replay } = ctx;
|
|
1858
2374
|
const id = recorder.nextId();
|
|
@@ -1874,18 +2390,24 @@ function wrapTool(name, fn) {
|
|
|
1874
2390
|
}
|
|
1875
2391
|
return replayed;
|
|
1876
2392
|
}
|
|
1877
|
-
const
|
|
1878
|
-
const prev =
|
|
1879
|
-
|
|
2393
|
+
const g4 = globalThis;
|
|
2394
|
+
const prev = g4[TOOL_WRAPPER_ACTIVE_KEY2];
|
|
2395
|
+
g4[TOOL_WRAPPER_ACTIVE_KEY2] = true;
|
|
1880
2396
|
const start = rawDateNow();
|
|
1881
2397
|
try {
|
|
1882
2398
|
const output = await fn(...args);
|
|
1883
2399
|
if (isReadableStream(output)) {
|
|
1884
2400
|
const [streamForCaller, streamForRecorder] = output.tee();
|
|
1885
2401
|
bufferReadableStream(streamForRecorder).then((rawText) => {
|
|
1886
|
-
|
|
2402
|
+
const durationMs2 = rawDateNow() - start;
|
|
2403
|
+
const event = { id, type: "tool", name, input, output: null, streamed: true, streamRaw: rawText, timestamp: start, durationMs: durationMs2 };
|
|
2404
|
+
recorder.record(event);
|
|
2405
|
+
if (httpCtx) pushTelemetryEvent(event);
|
|
1887
2406
|
}).catch(() => {
|
|
1888
|
-
|
|
2407
|
+
const durationMs2 = rawDateNow() - start;
|
|
2408
|
+
const event = { id, type: "tool", name, input, output: null, streamed: true, streamRaw: "", timestamp: start, durationMs: durationMs2 };
|
|
2409
|
+
recorder.record(event);
|
|
2410
|
+
if (httpCtx) pushTelemetryEvent(event);
|
|
1889
2411
|
});
|
|
1890
2412
|
const result = streamForCaller;
|
|
1891
2413
|
if (trace && typeof trace.recordToolCall === "function") {
|
|
@@ -1895,71 +2417,179 @@ function wrapTool(name, fn) {
|
|
|
1895
2417
|
}
|
|
1896
2418
|
if (isAsyncIterable(output)) {
|
|
1897
2419
|
const wrapped = wrapAsyncIterable(output, (chunks) => {
|
|
2420
|
+
const durationMs2 = rawDateNow() - start;
|
|
1898
2421
|
const rawText = chunks.map((c) => typeof c === "string" ? c : JSON.stringify(c)).join("");
|
|
1899
|
-
|
|
2422
|
+
const event = { id, type: "tool", name, input, output: null, streamed: true, streamRaw: rawText, timestamp: start, durationMs: durationMs2 };
|
|
2423
|
+
recorder.record(event);
|
|
2424
|
+
if (httpCtx) pushTelemetryEvent(event);
|
|
1900
2425
|
});
|
|
1901
2426
|
if (trace && typeof trace.recordToolCall === "function") {
|
|
1902
2427
|
trace.recordToolCall({ name, args: toTraceArgs(input), result: wrapped, workflowEventId: id });
|
|
1903
2428
|
}
|
|
1904
2429
|
return wrapped;
|
|
1905
2430
|
}
|
|
1906
|
-
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
input,
|
|
1911
|
-
output,
|
|
1912
|
-
timestamp: start,
|
|
1913
|
-
durationMs: rawDateNow() - start
|
|
1914
|
-
});
|
|
2431
|
+
const durationMs = rawDateNow() - start;
|
|
2432
|
+
const successEvent = { id, type: "tool", name, input, output, timestamp: start, durationMs };
|
|
2433
|
+
recorder.record(successEvent);
|
|
2434
|
+
if (httpCtx) pushTelemetryEvent(successEvent);
|
|
1915
2435
|
if (trace && typeof trace.recordToolCall === "function") {
|
|
1916
|
-
trace.recordToolCall({ name, args: toTraceArgs(input), result: output, workflowEventId: id });
|
|
2436
|
+
trace.recordToolCall({ name, args: toTraceArgs(input), result: output, workflowEventId: id, durationMs });
|
|
1917
2437
|
}
|
|
1918
2438
|
return output;
|
|
1919
2439
|
} catch (e) {
|
|
1920
|
-
|
|
1921
|
-
|
|
1922
|
-
|
|
1923
|
-
|
|
1924
|
-
input,
|
|
1925
|
-
output: { error: String(e) },
|
|
1926
|
-
timestamp: start,
|
|
1927
|
-
durationMs: rawDateNow() - start
|
|
1928
|
-
});
|
|
2440
|
+
const durationMs = rawDateNow() - start;
|
|
2441
|
+
const errorEvent = { id, type: "tool", name, input, output: { error: String(e) }, timestamp: start, durationMs };
|
|
2442
|
+
recorder.record(errorEvent);
|
|
2443
|
+
if (httpCtx) pushTelemetryEvent(errorEvent);
|
|
1929
2444
|
if (trace && typeof trace.recordToolCall === "function") {
|
|
1930
|
-
trace.recordToolCall({ name, args: toTraceArgs(input), result: { error: String(e) }, workflowEventId: id });
|
|
2445
|
+
trace.recordToolCall({ name, args: toTraceArgs(input), result: { error: String(e) }, workflowEventId: id, durationMs });
|
|
1931
2446
|
}
|
|
1932
2447
|
throw e;
|
|
1933
2448
|
} finally {
|
|
1934
|
-
if (prev === void 0) delete
|
|
1935
|
-
else
|
|
2449
|
+
if (prev === void 0) delete g4[TOOL_WRAPPER_ACTIVE_KEY2];
|
|
2450
|
+
else g4[TOOL_WRAPPER_ACTIVE_KEY2] = prev;
|
|
1936
2451
|
}
|
|
1937
2452
|
};
|
|
1938
2453
|
}
|
|
1939
2454
|
|
|
1940
2455
|
// src/interceptors/workflow-ai.ts
|
|
1941
2456
|
init_recorder();
|
|
2457
|
+
init_side_effects();
|
|
2458
|
+
function extractUsage2(output) {
|
|
2459
|
+
if (!output || typeof output !== "object") return void 0;
|
|
2460
|
+
const o = output;
|
|
2461
|
+
if (o.usage && typeof o.usage === "object") {
|
|
2462
|
+
const u = o.usage;
|
|
2463
|
+
if (u.input_tokens != null || u.output_tokens != null) {
|
|
2464
|
+
return {
|
|
2465
|
+
inputTokens: u.input_tokens,
|
|
2466
|
+
outputTokens: u.output_tokens,
|
|
2467
|
+
totalTokens: (u.input_tokens ?? 0) + (u.output_tokens ?? 0)
|
|
2468
|
+
};
|
|
2469
|
+
}
|
|
2470
|
+
if (u.prompt_tokens != null || u.completion_tokens != null) {
|
|
2471
|
+
return {
|
|
2472
|
+
inputTokens: u.prompt_tokens,
|
|
2473
|
+
outputTokens: u.completion_tokens,
|
|
2474
|
+
totalTokens: u.total_tokens
|
|
2475
|
+
};
|
|
2476
|
+
}
|
|
2477
|
+
}
|
|
2478
|
+
if (o.usageMetadata && typeof o.usageMetadata === "object") {
|
|
2479
|
+
const u = o.usageMetadata;
|
|
2480
|
+
return {
|
|
2481
|
+
inputTokens: u.promptTokenCount,
|
|
2482
|
+
outputTokens: u.candidatesTokenCount,
|
|
2483
|
+
totalTokens: u.totalTokenCount
|
|
2484
|
+
};
|
|
2485
|
+
}
|
|
2486
|
+
return void 0;
|
|
2487
|
+
}
|
|
1942
2488
|
function wrapAI(modelName, callFn) {
|
|
1943
2489
|
return async (...args) => {
|
|
2490
|
+
await tryAutoInitHttpContext();
|
|
1944
2491
|
const ctx = getCaptureContext();
|
|
1945
|
-
|
|
1946
|
-
|
|
1947
|
-
const
|
|
1948
|
-
if (
|
|
1949
|
-
|
|
2492
|
+
const httpCtx = getHttpRunContext();
|
|
2493
|
+
if (!ctx && !httpCtx) return callFn(...args);
|
|
2494
|
+
const start = rawDateNow();
|
|
2495
|
+
if (ctx) {
|
|
2496
|
+
const { recorder, replay } = ctx;
|
|
2497
|
+
const id2 = recorder.nextId();
|
|
2498
|
+
if (replay.shouldReplay(id2)) {
|
|
2499
|
+
return replay.getRecordedResult(id2);
|
|
2500
|
+
}
|
|
2501
|
+
const aiMock = resolveAIMock(modelName);
|
|
2502
|
+
if (aiMock.mocked) {
|
|
2503
|
+
const input3 = args.length === 1 ? args[0] : args;
|
|
2504
|
+
const event = {
|
|
2505
|
+
id: id2,
|
|
2506
|
+
type: "ai",
|
|
2507
|
+
name: modelName,
|
|
2508
|
+
input: input3,
|
|
2509
|
+
output: aiMock.result,
|
|
2510
|
+
timestamp: start,
|
|
2511
|
+
durationMs: 0
|
|
2512
|
+
};
|
|
2513
|
+
recorder.record(event);
|
|
2514
|
+
if (httpCtx) pushTelemetryEvent(event);
|
|
2515
|
+
return aiMock.result;
|
|
2516
|
+
}
|
|
2517
|
+
const rawInput = args.length === 1 ? args[0] : args;
|
|
2518
|
+
const modifiedInput = resolvePromptMock(rawInput);
|
|
2519
|
+
const effectiveArgs = modifiedInput !== void 0 ? [modifiedInput] : args;
|
|
2520
|
+
const input2 = modifiedInput !== void 0 ? modifiedInput : rawInput;
|
|
2521
|
+
try {
|
|
2522
|
+
const output = await callFn(...effectiveArgs);
|
|
2523
|
+
const durationMs = rawDateNow() - start;
|
|
2524
|
+
const usage = extractUsage2(output);
|
|
2525
|
+
const event = {
|
|
2526
|
+
id: id2,
|
|
2527
|
+
type: "ai",
|
|
2528
|
+
name: modelName,
|
|
2529
|
+
input: input2,
|
|
2530
|
+
output,
|
|
2531
|
+
timestamp: start,
|
|
2532
|
+
durationMs,
|
|
2533
|
+
...usage ? { usage } : {}
|
|
2534
|
+
};
|
|
2535
|
+
recorder.record(event);
|
|
2536
|
+
if (httpCtx) pushTelemetryEvent(event);
|
|
2537
|
+
return output;
|
|
2538
|
+
} catch (e) {
|
|
2539
|
+
const durationMs = rawDateNow() - start;
|
|
2540
|
+
const event = {
|
|
2541
|
+
id: id2,
|
|
2542
|
+
type: "ai",
|
|
2543
|
+
name: modelName,
|
|
2544
|
+
input: input2,
|
|
2545
|
+
output: { error: String(e) },
|
|
2546
|
+
timestamp: start,
|
|
2547
|
+
durationMs
|
|
2548
|
+
};
|
|
2549
|
+
recorder.record(event);
|
|
2550
|
+
if (httpCtx) pushTelemetryEvent(event);
|
|
2551
|
+
throw e;
|
|
2552
|
+
}
|
|
2553
|
+
}
|
|
2554
|
+
const id = httpCtx.nextId();
|
|
2555
|
+
const frozen = getHttpFrozenEvent(id);
|
|
2556
|
+
if (frozen) {
|
|
2557
|
+
pushTelemetryEvent(frozen);
|
|
2558
|
+
return frozen.output;
|
|
2559
|
+
}
|
|
2560
|
+
const rawHttpInput = args.length === 1 ? args[0] : args;
|
|
2561
|
+
const httpModifiedInput = getHttpPromptMock(rawHttpInput);
|
|
2562
|
+
const httpEffectiveArgs = httpModifiedInput !== void 0 ? [httpModifiedInput] : args;
|
|
2563
|
+
const input = httpModifiedInput !== void 0 ? httpModifiedInput : rawHttpInput;
|
|
2564
|
+
try {
|
|
2565
|
+
const output = await callFn(...httpEffectiveArgs);
|
|
2566
|
+
const durationMs = rawDateNow() - start;
|
|
2567
|
+
const usage = extractUsage2(output);
|
|
2568
|
+
const event = {
|
|
2569
|
+
id,
|
|
2570
|
+
type: "ai",
|
|
2571
|
+
name: modelName,
|
|
2572
|
+
input,
|
|
2573
|
+
output,
|
|
2574
|
+
timestamp: start,
|
|
2575
|
+
durationMs,
|
|
2576
|
+
...usage ? { usage } : {}
|
|
2577
|
+
};
|
|
2578
|
+
pushTelemetryEvent(event);
|
|
2579
|
+
return output;
|
|
2580
|
+
} catch (e) {
|
|
2581
|
+
const durationMs = rawDateNow() - start;
|
|
2582
|
+
pushTelemetryEvent({
|
|
2583
|
+
id,
|
|
2584
|
+
type: "ai",
|
|
2585
|
+
name: modelName,
|
|
2586
|
+
input,
|
|
2587
|
+
output: { error: String(e) },
|
|
2588
|
+
timestamp: start,
|
|
2589
|
+
durationMs
|
|
2590
|
+
});
|
|
2591
|
+
throw e;
|
|
1950
2592
|
}
|
|
1951
|
-
const start = Date.now();
|
|
1952
|
-
const output = await callFn(...args);
|
|
1953
|
-
recorder.record({
|
|
1954
|
-
id,
|
|
1955
|
-
type: "ai",
|
|
1956
|
-
name: modelName,
|
|
1957
|
-
input: args.length === 1 ? args[0] : args,
|
|
1958
|
-
output,
|
|
1959
|
-
timestamp: start,
|
|
1960
|
-
durationMs: Date.now() - start
|
|
1961
|
-
});
|
|
1962
|
-
return output;
|
|
1963
2593
|
};
|
|
1964
2594
|
}
|
|
1965
2595
|
|
|
@@ -2204,7 +2834,80 @@ function pickReplayResponseHeaders(headers) {
|
|
|
2204
2834
|
}
|
|
2205
2835
|
function isStreamingContentType(headers) {
|
|
2206
2836
|
const ct = headers.get("content-type") ?? "";
|
|
2207
|
-
return ct.includes("text/event-stream") || ct.includes("application/x-ndjson") || ct.includes("application/stream+json") || ct.includes("application/jsonl");
|
|
2837
|
+
return ct.includes("text/event-stream") || ct.includes("application/x-ndjson") || ct.includes("application/stream+json") || ct.includes("application/jsonl") || headers.get("x-vercel-ai-data-stream") === "v1";
|
|
2838
|
+
}
|
|
2839
|
+
function isVercelAIDataStream(headers) {
|
|
2840
|
+
return headers.get("x-vercel-ai-data-stream") === "v1";
|
|
2841
|
+
}
|
|
2842
|
+
function parseVercelAIDataStream(raw) {
|
|
2843
|
+
let accumulatedText = "";
|
|
2844
|
+
let resultData = {};
|
|
2845
|
+
let errorMessage = "";
|
|
2846
|
+
let hasError = false;
|
|
2847
|
+
for (const line of raw.split("\n")) {
|
|
2848
|
+
if (!line) continue;
|
|
2849
|
+
const colonIdx = line.indexOf(":");
|
|
2850
|
+
if (colonIdx === -1) continue;
|
|
2851
|
+
const prefix = line.slice(0, colonIdx);
|
|
2852
|
+
const payload = line.slice(colonIdx + 1);
|
|
2853
|
+
try {
|
|
2854
|
+
if (prefix === "0") {
|
|
2855
|
+
accumulatedText += JSON.parse(payload);
|
|
2856
|
+
} else if (prefix === "2") {
|
|
2857
|
+
const events = JSON.parse(payload);
|
|
2858
|
+
for (const event of events) {
|
|
2859
|
+
if (event.type === "result" || event.type === "plan") {
|
|
2860
|
+
resultData = { ...resultData, ...event };
|
|
2861
|
+
}
|
|
2862
|
+
}
|
|
2863
|
+
} else if (prefix === "3") {
|
|
2864
|
+
hasError = true;
|
|
2865
|
+
errorMessage = JSON.parse(payload);
|
|
2866
|
+
}
|
|
2867
|
+
} catch {
|
|
2868
|
+
}
|
|
2869
|
+
}
|
|
2870
|
+
if (hasError) {
|
|
2871
|
+
return { message: errorMessage, type: "error", error: errorMessage };
|
|
2872
|
+
}
|
|
2873
|
+
if (accumulatedText) {
|
|
2874
|
+
return { message: accumulatedText, type: "text", refinedQuery: resultData.refinedQuery };
|
|
2875
|
+
}
|
|
2876
|
+
if (resultData.type === "plan") {
|
|
2877
|
+
return {
|
|
2878
|
+
message: resultData.message ?? "",
|
|
2879
|
+
type: "plan",
|
|
2880
|
+
sessionId: resultData.sessionId,
|
|
2881
|
+
awaitingApproval: true,
|
|
2882
|
+
executionPlan: resultData.executionPlan,
|
|
2883
|
+
refinedQuery: resultData.refinedQuery
|
|
2884
|
+
};
|
|
2885
|
+
}
|
|
2886
|
+
return {
|
|
2887
|
+
message: resultData.message ?? "",
|
|
2888
|
+
type: "result",
|
|
2889
|
+
refinedQuery: resultData.refinedQuery,
|
|
2890
|
+
error: resultData.error,
|
|
2891
|
+
planRejected: resultData.planRejected
|
|
2892
|
+
};
|
|
2893
|
+
}
|
|
2894
|
+
async function readVercelAIStream(response) {
|
|
2895
|
+
if (!response.body) {
|
|
2896
|
+
return { message: "No response body", type: "error", error: "No response body" };
|
|
2897
|
+
}
|
|
2898
|
+
const reader = response.body.getReader();
|
|
2899
|
+
const decoder = new TextDecoder();
|
|
2900
|
+
let raw = "";
|
|
2901
|
+
try {
|
|
2902
|
+
for (; ; ) {
|
|
2903
|
+
const { done, value } = await reader.read();
|
|
2904
|
+
if (done) break;
|
|
2905
|
+
raw += decoder.decode(value, { stream: true });
|
|
2906
|
+
}
|
|
2907
|
+
} finally {
|
|
2908
|
+
reader.releaseLock();
|
|
2909
|
+
}
|
|
2910
|
+
return parseVercelAIDataStream(raw);
|
|
2208
2911
|
}
|
|
2209
2912
|
async function bufferStream(stream) {
|
|
2210
2913
|
const decoder = new TextDecoder();
|
|
@@ -2302,31 +3005,34 @@ function interceptFetch() {
|
|
|
2302
3005
|
};
|
|
2303
3006
|
if (isStreamingContentType(res.headers) && res.body) {
|
|
2304
3007
|
const [streamForCaller, streamForRecorder] = res.body.tee();
|
|
2305
|
-
|
|
2306
|
-
|
|
2307
|
-
|
|
2308
|
-
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
|
|
2314
|
-
|
|
2315
|
-
|
|
2316
|
-
|
|
2317
|
-
|
|
2318
|
-
|
|
2319
|
-
|
|
2320
|
-
|
|
2321
|
-
|
|
2322
|
-
|
|
2323
|
-
|
|
2324
|
-
|
|
2325
|
-
|
|
2326
|
-
|
|
2327
|
-
|
|
2328
|
-
|
|
2329
|
-
|
|
3008
|
+
const vercelAI = isVercelAIDataStream(res.headers);
|
|
3009
|
+
recorder.trackAsync(
|
|
3010
|
+
bufferStream(streamForRecorder).then((rawText) => {
|
|
3011
|
+
recorder.record({
|
|
3012
|
+
id,
|
|
3013
|
+
type: "http",
|
|
3014
|
+
name: "fetch",
|
|
3015
|
+
input: baseInput,
|
|
3016
|
+
output: vercelAI ? parseVercelAIDataStream(rawText) : null,
|
|
3017
|
+
streamed: true,
|
|
3018
|
+
streamRaw: rawText,
|
|
3019
|
+
timestamp: start,
|
|
3020
|
+
durationMs: Date.now() - start
|
|
3021
|
+
});
|
|
3022
|
+
}).catch(() => {
|
|
3023
|
+
recorder.record({
|
|
3024
|
+
id,
|
|
3025
|
+
type: "http",
|
|
3026
|
+
name: "fetch",
|
|
3027
|
+
input: baseInput,
|
|
3028
|
+
output: null,
|
|
3029
|
+
streamed: true,
|
|
3030
|
+
streamRaw: "",
|
|
3031
|
+
timestamp: start,
|
|
3032
|
+
durationMs: Date.now() - start
|
|
3033
|
+
});
|
|
3034
|
+
})
|
|
3035
|
+
);
|
|
2330
3036
|
return new Response(streamForCaller, {
|
|
2331
3037
|
status: res.status,
|
|
2332
3038
|
statusText: res.statusText,
|
|
@@ -2381,6 +3087,7 @@ async function runWorkflow(workflowFn, options = {}) {
|
|
|
2381
3087
|
}
|
|
2382
3088
|
try {
|
|
2383
3089
|
const result = await workflowFn();
|
|
3090
|
+
await recorder.flush();
|
|
2384
3091
|
return { result, trace: recorder.toTrace() };
|
|
2385
3092
|
} finally {
|
|
2386
3093
|
if (interceptHttp) restoreFetch();
|
|
@@ -2492,28 +3199,41 @@ function resolveRef(ref, previousOutputs) {
|
|
|
2492
3199
|
fetchCapturedTrace,
|
|
2493
3200
|
getCaptureContext,
|
|
2494
3201
|
getCurrentTrace,
|
|
3202
|
+
getHttpFrozenEvent,
|
|
3203
|
+
getHttpPromptMock,
|
|
3204
|
+
getHttpRunContext,
|
|
2495
3205
|
getRegistry,
|
|
3206
|
+
initHttpRunContext,
|
|
2496
3207
|
installAIInterceptor,
|
|
2497
3208
|
installDBAutoInterceptor,
|
|
2498
3209
|
interceptDateNow,
|
|
2499
3210
|
interceptFetch,
|
|
2500
3211
|
interceptRandom,
|
|
2501
3212
|
isWorker,
|
|
3213
|
+
pushTelemetryEvent,
|
|
3214
|
+
readVercelAIStream,
|
|
2502
3215
|
recordToolCall,
|
|
2503
3216
|
registerMatchers,
|
|
2504
3217
|
reportResults,
|
|
3218
|
+
resolveAIMock,
|
|
3219
|
+
resolveMock,
|
|
3220
|
+
resolvePromptMock,
|
|
2505
3221
|
resolveTaskInput,
|
|
2506
3222
|
restoreDateNow,
|
|
2507
3223
|
restoreFetch,
|
|
2508
3224
|
restoreRandom,
|
|
2509
3225
|
runFiles,
|
|
3226
|
+
runInHttpContext,
|
|
3227
|
+
runWithInitializedHttpContext,
|
|
2510
3228
|
runWorkflow,
|
|
2511
3229
|
safeRecordToolCall,
|
|
2512
3230
|
serializeAgentState,
|
|
2513
3231
|
setCaptureContext,
|
|
2514
3232
|
setCurrentTrace,
|
|
3233
|
+
setHttpRunContext,
|
|
2515
3234
|
startLLMProxy,
|
|
2516
3235
|
startTraceSession,
|
|
3236
|
+
tryAutoInitHttpContext,
|
|
2517
3237
|
uninstallAIInterceptor,
|
|
2518
3238
|
uninstallDBAutoInterceptor,
|
|
2519
3239
|
wrapAI,
|