@databuddy/sdk 2.3.29 → 2.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,708 +0,0 @@
1
- const computeCosts = async (modelId, provider, usage) => {
2
- try {
3
- const { computeCostUSD } = await import('tokenlens');
4
- const result = await computeCostUSD({
5
- modelId,
6
- provider,
7
- usage: {
8
- input_tokens: usage.inputTokens,
9
- output_tokens: usage.outputTokens
10
- }
11
- });
12
- return {
13
- inputTokenCostUSD: result.inputTokenCostUSD,
14
- outputTokenCostUSD: result.outputTokenCostUSD,
15
- totalTokenCostUSD: result.totalTokenCostUSD
16
- };
17
- } catch {
18
- return {};
19
- }
20
- };
21
-
22
- const extractTokenCount = (value) => {
23
- if (typeof value === "number") {
24
- return value;
25
- }
26
- if (value && typeof value === "object" && "total" in value && typeof value.total === "number") {
27
- return value.total;
28
- }
29
- return void 0;
30
- };
31
- const extractReasoningTokens = (usage) => {
32
- if ("reasoningTokens" in usage && typeof usage.reasoningTokens === "number") {
33
- return usage.reasoningTokens;
34
- }
35
- if ("outputTokens" in usage && usage.outputTokens && typeof usage.outputTokens === "object" && "reasoning" in usage.outputTokens && typeof usage.outputTokens.reasoning === "number") {
36
- return usage.outputTokens.reasoning;
37
- }
38
- return void 0;
39
- };
40
- const extractCacheReadTokens = (usage) => {
41
- if ("cachedInputTokens" in usage && typeof usage.cachedInputTokens === "number") {
42
- return usage.cachedInputTokens;
43
- }
44
- if ("inputTokens" in usage && usage.inputTokens && typeof usage.inputTokens === "object" && "cacheRead" in usage.inputTokens && typeof usage.inputTokens.cacheRead === "number") {
45
- return usage.inputTokens.cacheRead;
46
- }
47
- return void 0;
48
- };
49
- const extractCacheCreationTokens = (providerMetadata) => {
50
- if (providerMetadata && typeof providerMetadata === "object" && "anthropic" in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === "object" && "cacheCreationInputTokens" in providerMetadata.anthropic && typeof providerMetadata.anthropic.cacheCreationInputTokens === "number") {
51
- return providerMetadata.anthropic.cacheCreationInputTokens;
52
- }
53
- return void 0;
54
- };
55
- const calculateWebSearchCount = (result) => {
56
- if (!result || typeof result !== "object") {
57
- return 0;
58
- }
59
- if (result.usage && typeof result.usage === "object" && result.usage !== null && "search_context_size" in result.usage && result.usage.search_context_size) {
60
- return 1;
61
- }
62
- return 0;
63
- };
64
- const extractWebSearchCount = (providerMetadata, usage) => {
65
- if (providerMetadata && typeof providerMetadata === "object" && "anthropic" in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === "object" && "server_tool_use" in providerMetadata.anthropic) {
66
- const serverToolUse = providerMetadata.anthropic.server_tool_use;
67
- if (serverToolUse && typeof serverToolUse === "object" && "web_search_requests" in serverToolUse && typeof serverToolUse.web_search_requests === "number") {
68
- return serverToolUse.web_search_requests;
69
- }
70
- }
71
- return calculateWebSearchCount({ usage});
72
- };
73
- const extractToolInfo = (content, params) => {
74
- const toolCalls = content.filter((part) => part.type === "tool-call");
75
- const toolResults = content.filter((part) => part.type === "tool-result");
76
- const toolCallNames = [
77
- ...new Set(
78
- toolCalls.map((c) => c.toolName).filter((name) => Boolean(name))
79
- )
80
- ];
81
- const availableTools = params?.tools?.map((t) => t.name) ?? [];
82
- return {
83
- toolCallCount: toolCalls.length,
84
- toolResultCount: toolResults.length,
85
- toolCallNames,
86
- availableTools: availableTools.length > 0 ? availableTools : void 0
87
- };
88
- };
89
- const extractAdditionalTokenValues = (providerMetadata) => {
90
- if (providerMetadata && typeof providerMetadata === "object" && "anthropic" in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === "object" && "cacheCreationInputTokens" in providerMetadata.anthropic) {
91
- return {
92
- cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
93
- };
94
- }
95
- return {};
96
- };
97
- const extractUsage = (usage, providerMetadata) => {
98
- const usageObj = usage;
99
- const inputTokens = extractTokenCount(usage.inputTokens) ?? 0;
100
- const outputTokens = extractTokenCount(usage.outputTokens) ?? 0;
101
- const totalTokens = usage.totalTokens ?? inputTokens + outputTokens;
102
- const cachedInputTokens = extractCacheReadTokens(usageObj);
103
- const cacheCreationInputTokens = extractCacheCreationTokens(providerMetadata);
104
- const reasoningTokens = extractReasoningTokens(usageObj);
105
- const webSearchCount = extractWebSearchCount(providerMetadata, usage);
106
- return {
107
- inputTokens,
108
- outputTokens,
109
- totalTokens,
110
- cachedInputTokens,
111
- cacheCreationInputTokens,
112
- reasoningTokens,
113
- webSearchCount
114
- };
115
- };
116
- const adjustAnthropicV3CacheTokens = (model, provider, usage) => {
117
- if (model.specificationVersion === "v3" && provider.toLowerCase().includes("anthropic")) {
118
- const cacheReadTokens = usage.cachedInputTokens ?? 0;
119
- const cacheWriteTokens = usage.cacheCreationInputTokens ?? 0;
120
- const cacheTokens = cacheReadTokens + cacheWriteTokens;
121
- if (usage.inputTokens && cacheTokens > 0) {
122
- usage.inputTokens = Math.max(usage.inputTokens - cacheTokens, 0);
123
- usage.totalTokens = usage.inputTokens + usage.outputTokens;
124
- }
125
- }
126
- };
127
-
128
- const MAX_TEXT_LENGTH = 1e5;
129
- const DATA_URL_REGEX = /^data:([^;,]+)/;
130
- const WHITESPACE_REGEX = /\s/;
131
- const BASE64_REGEX = /^[A-Za-z0-9+/=]+$/;
132
- const truncate = (text, maxLength = MAX_TEXT_LENGTH) => {
133
- if (text.length <= maxLength) {
134
- return text;
135
- }
136
- return `${text.slice(0, maxLength)}... [truncated ${text.length - maxLength} chars]`;
137
- };
138
- const redactBase64DataUrl = (data) => {
139
- if (data.startsWith("data:")) {
140
- const match = data.match(DATA_URL_REGEX);
141
- const mediaType = match?.[1] ?? "unknown";
142
- return `[${mediaType} data URL redacted]`;
143
- }
144
- if (data.length > 1e3 && !WHITESPACE_REGEX.test(data) && BASE64_REGEX.test(data)) {
145
- return `[base64 data redacted - ${data.length} chars]`;
146
- }
147
- return data;
148
- };
149
- const toContentString = (content) => {
150
- if (typeof content === "string") {
151
- return content;
152
- }
153
- if (Array.isArray(content)) {
154
- return content.map((c) => {
155
- if (typeof c === "string") {
156
- return c;
157
- }
158
- if (c && typeof c === "object" && "text" in c) {
159
- return c.text;
160
- }
161
- return "";
162
- }).join("");
163
- }
164
- return "";
165
- };
166
- const generateTraceId = () => {
167
- return `${Date.now().toString(36)}-${Math.random().toString(36).slice(2, 11)}`;
168
- };
169
-
170
- const mapPromptToMessages = (prompt, maxSize) => {
171
- const messages = prompt.map((message) => {
172
- if (message.role === "system") {
173
- return {
174
- role: "system",
175
- content: truncate(toContentString(message.content))
176
- };
177
- }
178
- if (Array.isArray(message.content)) {
179
- const content = message.content.map((c) => {
180
- if (c.type === "text") {
181
- return { type: "text", text: truncate(c.text) };
182
- }
183
- if (c.type === "file") {
184
- const data = c.data;
185
- const fileData = data instanceof URL ? data.toString() : typeof data === "string" ? redactBase64DataUrl(data) : "[binary file]";
186
- return { type: "file", file: fileData, mediaType: c.mediaType };
187
- }
188
- if (c.type === "image") {
189
- const data = c.image;
190
- const imageData = data instanceof URL ? data.toString() : typeof data === "string" ? redactBase64DataUrl(data) : "[binary image]";
191
- return {
192
- type: "image",
193
- image: imageData,
194
- mediaType: c.mimeType ?? "image/unknown"
195
- };
196
- }
197
- if (c.type === "tool-call") {
198
- const input = c.input;
199
- return {
200
- type: "tool-call",
201
- id: c.toolCallId,
202
- function: {
203
- name: c.toolName,
204
- arguments: truncate(
205
- typeof input === "string" ? input : JSON.stringify(input ?? {})
206
- )
207
- }
208
- };
209
- }
210
- if (c.type === "tool-result") {
211
- return {
212
- type: "tool-result",
213
- toolCallId: c.toolCallId,
214
- toolName: c.toolName,
215
- output: c.output,
216
- isError: false
217
- };
218
- }
219
- return { type: "text", text: "" };
220
- });
221
- return { role: message.role, content };
222
- }
223
- return {
224
- role: message.role,
225
- content: truncate(toContentString(message.content))
226
- };
227
- });
228
- try {
229
- let serialized = JSON.stringify(messages);
230
- let removedCount = 0;
231
- const initialSize = messages.length;
232
- for (let i = 0; i < initialSize && Buffer.byteLength(serialized, "utf8") > maxSize; i++) {
233
- messages.shift();
234
- removedCount++;
235
- serialized = JSON.stringify(messages);
236
- }
237
- if (removedCount > 0) {
238
- messages.unshift({
239
- role: "system",
240
- content: `[${removedCount} message${removedCount === 1 ? "" : "s"} removed due to size limit]`
241
- });
242
- }
243
- } catch (error) {
244
- console.error("Error stringifying inputs", error);
245
- return [
246
- {
247
- role: "system",
248
- content: "An error occurred while processing your request. Please try again."
249
- }
250
- ];
251
- }
252
- return messages;
253
- };
254
- const mapResultToMessages = (content) => {
255
- const mappedContent = content.map((item) => {
256
- if (item.type === "text") {
257
- return { type: "text", text: truncate(item.text ?? "") };
258
- }
259
- if (item.type === "reasoning") {
260
- return { type: "reasoning", text: truncate(item.text ?? "") };
261
- }
262
- if (item.type === "tool-call") {
263
- const toolItem = item;
264
- const rawArgs = toolItem.args ?? toolItem.arguments ?? toolItem.input;
265
- const argsValue = typeof rawArgs === "string" ? rawArgs : JSON.stringify(rawArgs ?? {});
266
- return {
267
- type: "tool-call",
268
- id: toolItem.toolCallId ?? "",
269
- function: {
270
- name: toolItem.toolName ?? "",
271
- arguments: truncate(argsValue)
272
- }
273
- };
274
- }
275
- if (item.type === "file") {
276
- let fileData;
277
- if (item.data instanceof URL) {
278
- fileData = item.data.toString();
279
- } else if (typeof item.data === "string") {
280
- fileData = redactBase64DataUrl(item.data);
281
- if (fileData === item.data && item.data.length > 1e3) {
282
- fileData = `[${item.mediaType ?? "unknown"} file - ${item.data.length} bytes]`;
283
- }
284
- } else {
285
- fileData = "[binary file]";
286
- }
287
- return {
288
- type: "file",
289
- file: fileData,
290
- mediaType: item.mediaType ?? "application/octet-stream"
291
- };
292
- }
293
- if (item.type === "source") {
294
- return {
295
- type: "source",
296
- sourceType: item.sourceType ?? "unknown",
297
- id: item.id ?? "",
298
- url: item.url ?? "",
299
- title: item.title ?? ""
300
- };
301
- }
302
- return { type: "text", text: truncate(JSON.stringify(item)) };
303
- });
304
- if (mappedContent.length === 0) {
305
- return [];
306
- }
307
- return [
308
- {
309
- role: "assistant",
310
- content: mappedContent.length === 1 && mappedContent[0].type === "text" ? mappedContent[0].text : mappedContent
311
- }
312
- ];
313
- };
314
- const buildStreamOutput = (generatedText, reasoningText, toolCalls, sources = []) => {
315
- const outputContent = [];
316
- if (reasoningText) {
317
- outputContent.push({ type: "reasoning", text: truncate(reasoningText) });
318
- }
319
- if (generatedText) {
320
- outputContent.push({ type: "text", text: truncate(generatedText) });
321
- }
322
- for (const toolCall of toolCalls.values()) {
323
- outputContent.push({
324
- type: "tool-call",
325
- id: toolCall.toolCallId,
326
- function: {
327
- name: toolCall.toolName,
328
- arguments: truncate(toolCall.input)
329
- }
330
- });
331
- }
332
- for (const source of sources) {
333
- outputContent.push({
334
- type: "source",
335
- sourceType: source.sourceType,
336
- id: source.id,
337
- url: source.url,
338
- title: source.title
339
- });
340
- }
341
- if (outputContent.length === 0) {
342
- return [];
343
- }
344
- return [
345
- {
346
- role: "assistant",
347
- content: outputContent.length === 1 && outputContent[0].type === "text" ? outputContent[0].text : outputContent
348
- }
349
- ];
350
- };
351
-
352
- const createDefaultTransport = (apiUrl, apiKey) => {
353
- return async (call) => {
354
- const headers = {
355
- "Content-Type": "application/json"
356
- };
357
- if (apiKey) {
358
- headers.Authorization = `Bearer ${apiKey}`;
359
- }
360
- const response = await fetch(apiUrl, {
361
- method: "POST",
362
- headers,
363
- body: JSON.stringify(call)
364
- });
365
- if (!response.ok) {
366
- throw new Error(
367
- `Failed to send AI log: ${response.status} ${response.statusText}`
368
- );
369
- }
370
- };
371
- };
372
- const httpTransport = (url, apiKey) => {
373
- return async (call) => {
374
- const headers = {
375
- "Content-Type": "application/json"
376
- };
377
- if (apiKey) {
378
- headers.Authorization = `Bearer ${apiKey}`;
379
- }
380
- const response = await fetch(url, {
381
- method: "POST",
382
- headers,
383
- body: JSON.stringify(call)
384
- });
385
- if (!response.ok) {
386
- throw new Error(
387
- `Failed to send AI log: ${response.status} ${response.statusText}`
388
- );
389
- }
390
- };
391
- };
392
-
393
- const MAX_CONTENT_SIZE = 1048576;
394
- const extractProvider = (model) => {
395
- return model.provider.toLowerCase().split(".")[0];
396
- };
397
- const createErrorCall = (traceId, type, model, provider, input, durationMs, error) => {
398
- return {
399
- timestamp: /* @__PURE__ */ new Date(),
400
- traceId,
401
- type,
402
- model,
403
- provider,
404
- input,
405
- output: [],
406
- usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
407
- cost: {},
408
- tools: { toolCallCount: 0, toolResultCount: 0, toolCallNames: [] },
409
- durationMs,
410
- error: {
411
- name: error instanceof Error ? error.name : "UnknownError",
412
- message: error instanceof Error ? error.message : String(error),
413
- stack: error instanceof Error ? error.stack : void 0
414
- }
415
- };
416
- };
417
- const sendCall = (call, transport, onSuccess, onError) => {
418
- Promise.resolve(transport(call)).catch((error) => {
419
- console.error("[databuddy] Failed to send AI log:", error);
420
- });
421
- call.error ? onError?.(call) : onSuccess?.(call);
422
- };
423
- const databuddyLLM = (options = {}) => {
424
- const {
425
- apiUrl,
426
- apiKey,
427
- transport: customTransport,
428
- computeCosts: defaultComputeCosts = true,
429
- privacyMode: defaultPrivacyMode = false,
430
- maxContentSize = MAX_CONTENT_SIZE,
431
- onSuccess: defaultOnSuccess,
432
- onError: defaultOnError
433
- } = options;
434
- const transport = customTransport ? customTransport : createDefaultTransport(
435
- apiUrl ?? process.env.DATABUDDY_API_URL ?? "https://basket.databuddy.cc/llm",
436
- apiKey ?? process.env.DATABUDDY_API_KEY
437
- );
438
- const track = (model, trackOptions = {}) => {
439
- const getEffectiveTransport = () => {
440
- if (trackOptions.transport) {
441
- return trackOptions.transport;
442
- }
443
- return transport;
444
- };
445
- return Object.create(model, {
446
- doGenerate: {
447
- value: async (params) => {
448
- const startTime = Date.now();
449
- const traceId = trackOptions.traceId ?? generateTraceId();
450
- const effectiveTransport = getEffectiveTransport();
451
- try {
452
- const result = await model.doGenerate(params);
453
- const durationMs = Date.now() - startTime;
454
- const tools = extractToolInfo(
455
- result.content,
456
- params
457
- );
458
- const provider = extractProvider(model);
459
- const usage = extractUsage(result.usage, result.providerMetadata);
460
- adjustAnthropicV3CacheTokens(model, provider, usage);
461
- const cost = (trackOptions.computeCosts ?? defaultComputeCosts) && (usage.inputTokens > 0 || usage.outputTokens > 0) ? await computeCosts(model.modelId, model.provider, {
462
- inputTokens: usage.inputTokens,
463
- outputTokens: usage.outputTokens
464
- }) : {};
465
- const input = trackOptions.privacyMode ?? defaultPrivacyMode ? [] : mapPromptToMessages(
466
- params.prompt,
467
- maxContentSize
468
- );
469
- const output = trackOptions.privacyMode ?? defaultPrivacyMode ? [] : mapResultToMessages(
470
- result.content
471
- );
472
- const rawFinishReason = result.finishReason;
473
- let finishReason;
474
- if (typeof rawFinishReason === "string") {
475
- finishReason = rawFinishReason;
476
- } else if (rawFinishReason && typeof rawFinishReason === "object") {
477
- if ("unified" in rawFinishReason) {
478
- finishReason = rawFinishReason.unified;
479
- } else if ("type" in rawFinishReason) {
480
- finishReason = rawFinishReason.type;
481
- }
482
- }
483
- const call = {
484
- timestamp: /* @__PURE__ */ new Date(),
485
- traceId,
486
- type: "generate",
487
- model: result.response?.modelId ?? model.modelId,
488
- provider,
489
- finishReason,
490
- input,
491
- output,
492
- usage,
493
- cost,
494
- tools,
495
- durationMs,
496
- httpStatus: 200
497
- };
498
- sendCall(
499
- call,
500
- effectiveTransport,
501
- trackOptions.onSuccess ?? defaultOnSuccess,
502
- trackOptions.onError ?? defaultOnError
503
- );
504
- return result;
505
- } catch (error) {
506
- const durationMs = Date.now() - startTime;
507
- const input = trackOptions.privacyMode ?? defaultPrivacyMode ? [] : mapPromptToMessages(
508
- params.prompt,
509
- maxContentSize
510
- );
511
- const call = createErrorCall(
512
- traceId,
513
- "generate",
514
- model.modelId,
515
- extractProvider(model),
516
- input,
517
- durationMs,
518
- error
519
- );
520
- sendCall(
521
- call,
522
- effectiveTransport,
523
- trackOptions.onSuccess ?? defaultOnSuccess,
524
- trackOptions.onError ?? defaultOnError
525
- );
526
- throw error;
527
- }
528
- },
529
- writable: true,
530
- configurable: true,
531
- enumerable: false
532
- },
533
- doStream: {
534
- value: async (params) => {
535
- const startTime = Date.now();
536
- const traceId = trackOptions.traceId ?? generateTraceId();
537
- const effectiveTransport = getEffectiveTransport();
538
- try {
539
- const { stream, ...rest } = await model.doStream(params);
540
- let generatedText = "";
541
- let reasoningText = "";
542
- let finishReason;
543
- let providerMetadata;
544
- let usage = {};
545
- const toolCallsInProgress = /* @__PURE__ */ new Map();
546
- const sources = [];
547
- const transformStream = new TransformStream({
548
- transform(chunk, controller) {
549
- if (chunk.type === "text-delta") {
550
- generatedText += chunk.delta;
551
- }
552
- if (chunk.type === "reasoning-delta") {
553
- reasoningText += chunk.delta;
554
- }
555
- if (chunk.type === "tool-input-start") {
556
- toolCallsInProgress.set(chunk.id, {
557
- toolCallId: chunk.id,
558
- toolName: chunk.toolName,
559
- input: ""
560
- });
561
- }
562
- if (chunk.type === "tool-input-delta") {
563
- const toolCall = toolCallsInProgress.get(chunk.id);
564
- if (toolCall) {
565
- toolCall.input += chunk.delta;
566
- }
567
- }
568
- if (chunk.type === "tool-call") {
569
- const input = chunk.input;
570
- toolCallsInProgress.set(chunk.toolCallId, {
571
- toolCallId: chunk.toolCallId,
572
- toolName: chunk.toolName,
573
- input: typeof input === "string" ? input : JSON.stringify(input ?? {})
574
- });
575
- }
576
- if (chunk.type === "source") {
577
- const sourceChunk = chunk;
578
- sources.push({
579
- sourceType: sourceChunk.sourceType ?? "unknown",
580
- id: sourceChunk.id ?? "",
581
- url: sourceChunk.url ?? "",
582
- title: sourceChunk.title ?? ""
583
- });
584
- }
585
- if (chunk.type === "finish") {
586
- providerMetadata = chunk.providerMetadata;
587
- const additionalTokenValues = extractAdditionalTokenValues(
588
- providerMetadata
589
- );
590
- const chunkUsage = chunk.usage ?? {};
591
- usage = {
592
- inputTokens: extractTokenCount(chunk.usage?.inputTokens),
593
- outputTokens: extractTokenCount(chunk.usage?.outputTokens),
594
- reasoningTokens: extractReasoningTokens(chunkUsage),
595
- cacheReadInputTokens: extractCacheReadTokens(chunkUsage),
596
- ...additionalTokenValues
597
- };
598
- const rawFinishReason = chunk.finishReason;
599
- if (typeof rawFinishReason === "string") {
600
- finishReason = rawFinishReason;
601
- } else if (rawFinishReason && typeof rawFinishReason === "object") {
602
- if ("unified" in rawFinishReason) {
603
- finishReason = rawFinishReason.unified;
604
- } else if ("type" in rawFinishReason) {
605
- finishReason = rawFinishReason.type;
606
- }
607
- }
608
- }
609
- controller.enqueue(chunk);
610
- },
611
- flush: async () => {
612
- const durationMs = Date.now() - startTime;
613
- const webSearchCount = extractWebSearchCount(
614
- providerMetadata,
615
- usage
616
- );
617
- const finalUsageObj = {
618
- ...usage,
619
- webSearchCount
620
- };
621
- const finalUsage = extractUsage(
622
- finalUsageObj,
623
- providerMetadata
624
- );
625
- const provider = extractProvider(model);
626
- adjustAnthropicV3CacheTokens(model, provider, finalUsage);
627
- const output = trackOptions.privacyMode ?? defaultPrivacyMode ? [] : buildStreamOutput(
628
- generatedText,
629
- reasoningText,
630
- toolCallsInProgress,
631
- sources
632
- );
633
- const tools = {
634
- toolCallCount: toolCallsInProgress.size,
635
- toolResultCount: 0,
636
- toolCallNames: [
637
- ...new Set(
638
- [...toolCallsInProgress.values()].map((t) => t.toolName)
639
- )
640
- ],
641
- availableTools: params.tools?.map((t) => t.name)
642
- };
643
- const cost = (trackOptions.computeCosts ?? defaultComputeCosts) && (finalUsage.inputTokens > 0 || finalUsage.outputTokens > 0) ? await computeCosts(model.modelId, model.provider, {
644
- inputTokens: finalUsage.inputTokens,
645
- outputTokens: finalUsage.outputTokens
646
- }) : {};
647
- const input = trackOptions.privacyMode ?? defaultPrivacyMode ? [] : mapPromptToMessages(
648
- params.prompt,
649
- maxContentSize
650
- );
651
- const call = {
652
- timestamp: /* @__PURE__ */ new Date(),
653
- traceId,
654
- type: "stream",
655
- model: model.modelId,
656
- provider,
657
- finishReason,
658
- input,
659
- output,
660
- usage: finalUsage,
661
- cost,
662
- tools,
663
- durationMs,
664
- httpStatus: 200
665
- };
666
- sendCall(
667
- call,
668
- effectiveTransport,
669
- trackOptions.onSuccess ?? defaultOnSuccess,
670
- trackOptions.onError ?? defaultOnError
671
- );
672
- }
673
- });
674
- return { stream: stream.pipeThrough(transformStream), ...rest };
675
- } catch (error) {
676
- const durationMs = Date.now() - startTime;
677
- const input = trackOptions.privacyMode ?? defaultPrivacyMode ? [] : mapPromptToMessages(
678
- params.prompt,
679
- maxContentSize
680
- );
681
- const call = createErrorCall(
682
- traceId,
683
- "stream",
684
- model.modelId,
685
- extractProvider(model),
686
- input,
687
- durationMs,
688
- error
689
- );
690
- sendCall(
691
- call,
692
- effectiveTransport,
693
- trackOptions.onSuccess ?? defaultOnSuccess,
694
- trackOptions.onError ?? defaultOnError
695
- );
696
- throw error;
697
- }
698
- },
699
- writable: true,
700
- configurable: true,
701
- enumerable: false
702
- }
703
- });
704
- };
705
- return { track, transport };
706
- };
707
-
708
- export { databuddyLLM, generateTraceId, httpTransport };