@brizz/sdk 0.1.2 → 0.1.3-rc.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +153 -966
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1 -40
- package/dist/index.d.ts +1 -40
- package/dist/index.js +148 -963
- package/dist/index.js.map +1 -1
- package/dist/preload.cjs +145 -956
- package/dist/preload.cjs.map +1 -1
- package/dist/preload.js +143 -957
- package/dist/preload.js.map +1 -1
- package/package.json +29 -25
package/dist/preload.cjs
CHANGED
|
@@ -251,938 +251,6 @@ var import_instrumentation_pinecone = require("@traceloop/instrumentation-pineco
|
|
|
251
251
|
var import_instrumentation_qdrant = require("@traceloop/instrumentation-qdrant");
|
|
252
252
|
var import_instrumentation_together = require("@traceloop/instrumentation-together");
|
|
253
253
|
var import_instrumentation_vertexai = require("@traceloop/instrumentation-vertexai");
|
|
254
|
-
|
|
255
|
-
// src/internal/instrumentation/vercel-ai/instrumentation.ts
|
|
256
|
-
var import_instrumentation = require("@opentelemetry/instrumentation");
|
|
257
|
-
|
|
258
|
-
// src/internal/instrumentation/vercel-ai/patchers/base-patcher.ts
|
|
259
|
-
var import_api2 = require("@opentelemetry/api");
|
|
260
|
-
|
|
261
|
-
// src/internal/instrumentation/vercel-ai/semconv.ts
|
|
262
|
-
var ATTR_GEN_AI_SYSTEM = "gen_ai.system";
|
|
263
|
-
var ATTR_GEN_AI_OPERATION_NAME = "gen_ai.operation.name";
|
|
264
|
-
var ATTR_GEN_AI_REQUEST_MODEL = "gen_ai.request.model";
|
|
265
|
-
var ATTR_GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens";
|
|
266
|
-
var ATTR_GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature";
|
|
267
|
-
var ATTR_GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p";
|
|
268
|
-
var ATTR_GEN_AI_REQUEST_TOP_K = "gen_ai.request.top_k";
|
|
269
|
-
var ATTR_GEN_AI_REQUEST_STOP_SEQUENCES = "gen_ai.request.stop_sequences";
|
|
270
|
-
var ATTR_GEN_AI_REQUEST_FREQUENCY_PENALTY = "gen_ai.request.frequency_penalty";
|
|
271
|
-
var ATTR_GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty";
|
|
272
|
-
var ATTR_GEN_AI_RESPONSE_ID = "gen_ai.response.id";
|
|
273
|
-
var ATTR_GEN_AI_RESPONSE_MODEL = "gen_ai.response.model";
|
|
274
|
-
var ATTR_GEN_AI_RESPONSE_FINISH_REASONS = "gen_ai.response.finish_reasons";
|
|
275
|
-
var ATTR_GEN_AI_TOKEN_TYPE = "gen_ai.token.type";
|
|
276
|
-
var ATTR_GEN_AI_PROMPT = "gen_ai.prompt";
|
|
277
|
-
var ATTR_GEN_AI_COMPLETION = "gen_ai.completion";
|
|
278
|
-
var ATTR_GEN_AI_OPENAI_API_BASE = "gen_ai.openai.api_base";
|
|
279
|
-
var ATTR_EVENT_NAME = "event.name";
|
|
280
|
-
var EVENT_GEN_AI_USER_MESSAGE = "gen_ai.user.message";
|
|
281
|
-
var EVENT_GEN_AI_ASSISTANT_MESSAGE = "gen_ai.assistant.message";
|
|
282
|
-
var EVENT_GEN_AI_SYSTEM_MESSAGE = "gen_ai.system.message";
|
|
283
|
-
var EVENT_GEN_AI_TOOL_MESSAGE = "gen_ai.tool.message";
|
|
284
|
-
var METRIC_GEN_AI_CLIENT_OPERATION_DURATION = "gen_ai.client.operation.duration";
|
|
285
|
-
var METRIC_GEN_AI_CLIENT_TOKEN_USAGE = "gen_ai.client.token.usage";
|
|
286
|
-
var OPERATION_NAME_CHAT = "chat";
|
|
287
|
-
var OPERATION_NAME_EMBEDDINGS = "embeddings";
|
|
288
|
-
var TOKEN_TYPE_INPUT = "input";
|
|
289
|
-
var TOKEN_TYPE_OUTPUT = "output";
|
|
290
|
-
var PROVIDER_OPENAI = "openai";
|
|
291
|
-
var PROVIDER_ANTHROPIC = "anthropic";
|
|
292
|
-
var PROVIDER_GOOGLE = "google";
|
|
293
|
-
var PROVIDER_AMAZON = "amazon";
|
|
294
|
-
var PROVIDER_AZURE = "azure";
|
|
295
|
-
var PROVIDER_VERCEL = "vercel";
|
|
296
|
-
var PROVIDER_UNKNOWN = "unknown";
|
|
297
|
-
var SPAN_NAME_GEN_AI_CHAT = "gen_ai.chat";
|
|
298
|
-
var SPAN_NAME_GEN_AI_EMBEDDINGS = "gen_ai.embeddings";
|
|
299
|
-
|
|
300
|
-
// src/internal/instrumentation/vercel-ai/utils.ts
|
|
301
|
-
function detectProvider(model) {
|
|
302
|
-
if (typeof model === "object" && model !== null) {
|
|
303
|
-
const modelObj = model;
|
|
304
|
-
if (modelObj.provider) {
|
|
305
|
-
return {
|
|
306
|
-
system: normalizeProviderName(modelObj.provider),
|
|
307
|
-
apiBase: extractApiBase(modelObj)
|
|
308
|
-
};
|
|
309
|
-
}
|
|
310
|
-
if (modelObj.modelId) {
|
|
311
|
-
return detectProviderFromModelId(modelObj.modelId);
|
|
312
|
-
}
|
|
313
|
-
}
|
|
314
|
-
if (typeof model === "string") {
|
|
315
|
-
return detectProviderFromModelId(model);
|
|
316
|
-
}
|
|
317
|
-
return { system: PROVIDER_UNKNOWN };
|
|
318
|
-
}
|
|
319
|
-
function detectProviderFromModelId(modelId) {
|
|
320
|
-
const lowerModel = modelId.toLowerCase();
|
|
321
|
-
if (lowerModel.startsWith("gpt-") || lowerModel.startsWith("text-davinci-") || lowerModel.startsWith("text-embedding-") || lowerModel.startsWith("dall-e") || lowerModel.startsWith("whisper-") || lowerModel.startsWith("tts-")) {
|
|
322
|
-
return { system: PROVIDER_OPENAI };
|
|
323
|
-
}
|
|
324
|
-
if (lowerModel.startsWith("claude-")) {
|
|
325
|
-
return { system: PROVIDER_ANTHROPIC };
|
|
326
|
-
}
|
|
327
|
-
if (lowerModel.startsWith("gemini-") || lowerModel.startsWith("palm-") || lowerModel.includes("bison") || lowerModel.includes("gecko")) {
|
|
328
|
-
return { system: PROVIDER_GOOGLE };
|
|
329
|
-
}
|
|
330
|
-
if (lowerModel.startsWith("amazon.") || lowerModel.startsWith("anthropic.claude-") || lowerModel.startsWith("ai21.") || lowerModel.startsWith("cohere.") || lowerModel.startsWith("meta.llama")) {
|
|
331
|
-
return { system: PROVIDER_AMAZON };
|
|
332
|
-
}
|
|
333
|
-
if (lowerModel.includes("azure") || lowerModel.includes(".openai.azure.com")) {
|
|
334
|
-
return { system: PROVIDER_AZURE };
|
|
335
|
-
}
|
|
336
|
-
const parts = modelId.split(/[-._/]/);
|
|
337
|
-
if (parts.length > 0 && parts[0]) {
|
|
338
|
-
return { system: normalizeProviderName(parts[0]) };
|
|
339
|
-
}
|
|
340
|
-
return { system: PROVIDER_UNKNOWN };
|
|
341
|
-
}
|
|
342
|
-
function normalizeProviderName(provider) {
|
|
343
|
-
const normalized = provider.toLowerCase().trim();
|
|
344
|
-
switch (normalized) {
|
|
345
|
-
case "openai":
|
|
346
|
-
case "open-ai":
|
|
347
|
-
case "open_ai": {
|
|
348
|
-
return PROVIDER_OPENAI;
|
|
349
|
-
}
|
|
350
|
-
case "anthropic":
|
|
351
|
-
case "claude": {
|
|
352
|
-
return PROVIDER_ANTHROPIC;
|
|
353
|
-
}
|
|
354
|
-
case "google":
|
|
355
|
-
case "vertex":
|
|
356
|
-
case "vertexai":
|
|
357
|
-
case "vertex-ai":
|
|
358
|
-
case "gemini": {
|
|
359
|
-
return PROVIDER_GOOGLE;
|
|
360
|
-
}
|
|
361
|
-
case "amazon":
|
|
362
|
-
case "aws":
|
|
363
|
-
case "bedrock":
|
|
364
|
-
case "amazon-bedrock": {
|
|
365
|
-
return PROVIDER_AMAZON;
|
|
366
|
-
}
|
|
367
|
-
case "azure":
|
|
368
|
-
case "azure-openai":
|
|
369
|
-
case "microsoft": {
|
|
370
|
-
return PROVIDER_AZURE;
|
|
371
|
-
}
|
|
372
|
-
case "vercel":
|
|
373
|
-
case "vercel-ai": {
|
|
374
|
-
return PROVIDER_VERCEL;
|
|
375
|
-
}
|
|
376
|
-
default: {
|
|
377
|
-
return normalized;
|
|
378
|
-
}
|
|
379
|
-
}
|
|
380
|
-
}
|
|
381
|
-
function extractApiBase(model) {
|
|
382
|
-
if (typeof model === "object" && model !== null) {
|
|
383
|
-
const anyModel = model;
|
|
384
|
-
return anyModel.apiBase || anyModel.baseURL || anyModel.endpoint || void 0;
|
|
385
|
-
}
|
|
386
|
-
return void 0;
|
|
387
|
-
}
|
|
388
|
-
function extractModelId(model) {
|
|
389
|
-
if (typeof model === "string") {
|
|
390
|
-
return model;
|
|
391
|
-
}
|
|
392
|
-
if (typeof model === "object" && model !== null) {
|
|
393
|
-
return model.modelId || "unknown";
|
|
394
|
-
}
|
|
395
|
-
return "unknown";
|
|
396
|
-
}
|
|
397
|
-
function messagesToAttributes(messages, prefix, captureContent) {
|
|
398
|
-
const attributes = {};
|
|
399
|
-
for (const [index, msg] of messages.entries()) {
|
|
400
|
-
const baseKey = `${prefix}.${index}`;
|
|
401
|
-
attributes[`${baseKey}.role`] = msg.role;
|
|
402
|
-
if (captureContent && msg.content) {
|
|
403
|
-
if (typeof msg.content === "string") {
|
|
404
|
-
attributes[`${baseKey}.content`] = msg.content;
|
|
405
|
-
} else if (Array.isArray(msg.content)) {
|
|
406
|
-
const textParts = msg.content.filter((part) => part.type === "text" && part.text).map((part) => part.text).join(" ");
|
|
407
|
-
if (textParts) {
|
|
408
|
-
attributes[`${baseKey}.content`] = textParts;
|
|
409
|
-
}
|
|
410
|
-
}
|
|
411
|
-
}
|
|
412
|
-
if (msg.toolInvocations && msg.toolInvocations.length > 0) {
|
|
413
|
-
attributes[`${baseKey}.tool_calls`] = msg.toolInvocations.length;
|
|
414
|
-
}
|
|
415
|
-
}
|
|
416
|
-
return attributes;
|
|
417
|
-
}
|
|
418
|
-
function promptToAttributes(prompt, captureContent) {
|
|
419
|
-
const attributes = {};
|
|
420
|
-
attributes[`${ATTR_GEN_AI_PROMPT}.0.role`] = "user";
|
|
421
|
-
if (captureContent) {
|
|
422
|
-
attributes[`${ATTR_GEN_AI_PROMPT}.0.content`] = prompt;
|
|
423
|
-
}
|
|
424
|
-
return attributes;
|
|
425
|
-
}
|
|
426
|
-
function completionToAttributes(text, finishReason, captureContent) {
|
|
427
|
-
const attributes = {};
|
|
428
|
-
attributes[`${ATTR_GEN_AI_COMPLETION}.0.role`] = "assistant";
|
|
429
|
-
if (captureContent) {
|
|
430
|
-
attributes[`${ATTR_GEN_AI_COMPLETION}.0.content`] = text;
|
|
431
|
-
}
|
|
432
|
-
if (finishReason) {
|
|
433
|
-
attributes[`${ATTR_GEN_AI_COMPLETION}.0.finish_reason`] = finishReason;
|
|
434
|
-
}
|
|
435
|
-
return attributes;
|
|
436
|
-
}
|
|
437
|
-
function tokenUsageToAttributes(usage) {
|
|
438
|
-
if (!usage) {
|
|
439
|
-
return {};
|
|
440
|
-
}
|
|
441
|
-
const attributes = {};
|
|
442
|
-
if (usage.inputTokens !== void 0) {
|
|
443
|
-
attributes["gen_ai.usage.prompt_tokens"] = usage.inputTokens;
|
|
444
|
-
attributes["gen_ai.usage.input_tokens"] = usage.inputTokens;
|
|
445
|
-
attributes["llm.usage.prompt_tokens"] = usage.inputTokens;
|
|
446
|
-
} else if (usage.promptTokens !== void 0) {
|
|
447
|
-
attributes["gen_ai.usage.prompt_tokens"] = usage.promptTokens;
|
|
448
|
-
attributes["gen_ai.usage.input_tokens"] = usage.promptTokens;
|
|
449
|
-
attributes["llm.usage.prompt_tokens"] = usage.promptTokens;
|
|
450
|
-
}
|
|
451
|
-
if (usage.outputTokens !== void 0) {
|
|
452
|
-
attributes["gen_ai.usage.completion_tokens"] = usage.outputTokens;
|
|
453
|
-
attributes["gen_ai.usage.output_tokens"] = usage.outputTokens;
|
|
454
|
-
attributes["llm.usage.completion_tokens"] = usage.outputTokens;
|
|
455
|
-
} else if (usage.completionTokens !== void 0) {
|
|
456
|
-
attributes["gen_ai.usage.completion_tokens"] = usage.completionTokens;
|
|
457
|
-
attributes["gen_ai.usage.output_tokens"] = usage.completionTokens;
|
|
458
|
-
attributes["llm.usage.completion_tokens"] = usage.completionTokens;
|
|
459
|
-
}
|
|
460
|
-
if (usage.totalTokens === void 0) {
|
|
461
|
-
const inputTokens = usage.inputTokens || usage.promptTokens;
|
|
462
|
-
const outputTokens = usage.outputTokens || usage.completionTokens;
|
|
463
|
-
if (inputTokens !== void 0 && outputTokens !== void 0) {
|
|
464
|
-
const totalTokens = inputTokens + outputTokens;
|
|
465
|
-
attributes["gen_ai.usage.total_tokens"] = totalTokens;
|
|
466
|
-
attributes["llm.usage.total_tokens"] = totalTokens;
|
|
467
|
-
}
|
|
468
|
-
} else {
|
|
469
|
-
attributes["gen_ai.usage.total_tokens"] = usage.totalTokens;
|
|
470
|
-
attributes["llm.usage.total_tokens"] = usage.totalTokens;
|
|
471
|
-
}
|
|
472
|
-
return attributes;
|
|
473
|
-
}
|
|
474
|
-
function shouldRecordError(error) {
|
|
475
|
-
if (error instanceof Error) {
|
|
476
|
-
const message = error.message.toLowerCase();
|
|
477
|
-
if (message.includes("abort") || message.includes("cancel")) {
|
|
478
|
-
return false;
|
|
479
|
-
}
|
|
480
|
-
}
|
|
481
|
-
return true;
|
|
482
|
-
}
|
|
483
|
-
function getEnvBool(name) {
|
|
484
|
-
const value = process.env[name];
|
|
485
|
-
if (value === void 0) {
|
|
486
|
-
return void 0;
|
|
487
|
-
}
|
|
488
|
-
return value.toLowerCase() === "true" || value === "1";
|
|
489
|
-
}
|
|
490
|
-
|
|
491
|
-
// src/internal/instrumentation/vercel-ai/patchers/base-patcher.ts
|
|
492
|
-
var BasePatcher = class {
|
|
493
|
-
constructor(context8) {
|
|
494
|
-
this.context = context8;
|
|
495
|
-
}
|
|
496
|
-
createSpan(spanName, params, operationName, additionalAttributes) {
|
|
497
|
-
const provider = detectProvider(params.model);
|
|
498
|
-
const modelId = extractModelId(params.model);
|
|
499
|
-
const span = this.context.tracer.startSpan(spanName, {
|
|
500
|
-
kind: import_api2.SpanKind.CLIENT,
|
|
501
|
-
attributes: {
|
|
502
|
-
[ATTR_GEN_AI_SYSTEM]: provider.system,
|
|
503
|
-
[ATTR_GEN_AI_OPERATION_NAME]: operationName,
|
|
504
|
-
[ATTR_GEN_AI_REQUEST_MODEL]: modelId,
|
|
505
|
-
...params.maxTokens && { [ATTR_GEN_AI_REQUEST_MAX_TOKENS]: params.maxTokens },
|
|
506
|
-
...params.temperature !== void 0 && {
|
|
507
|
-
[ATTR_GEN_AI_REQUEST_TEMPERATURE]: params.temperature
|
|
508
|
-
},
|
|
509
|
-
...params.topP !== void 0 && { [ATTR_GEN_AI_REQUEST_TOP_P]: params.topP },
|
|
510
|
-
...params.topK !== void 0 && { [ATTR_GEN_AI_REQUEST_TOP_K]: params.topK },
|
|
511
|
-
...params.frequencyPenalty !== void 0 && {
|
|
512
|
-
[ATTR_GEN_AI_REQUEST_FREQUENCY_PENALTY]: params.frequencyPenalty
|
|
513
|
-
},
|
|
514
|
-
...params.presencePenalty !== void 0 && {
|
|
515
|
-
[ATTR_GEN_AI_REQUEST_PRESENCE_PENALTY]: params.presencePenalty
|
|
516
|
-
},
|
|
517
|
-
...params.stopSequences && {
|
|
518
|
-
[ATTR_GEN_AI_REQUEST_STOP_SEQUENCES]: params.stopSequences
|
|
519
|
-
},
|
|
520
|
-
...provider.apiBase && { [ATTR_GEN_AI_OPENAI_API_BASE]: provider.apiBase },
|
|
521
|
-
...additionalAttributes
|
|
522
|
-
}
|
|
523
|
-
});
|
|
524
|
-
return { span, provider, modelId };
|
|
525
|
-
}
|
|
526
|
-
handleError(error, span) {
|
|
527
|
-
if (shouldRecordError(error)) {
|
|
528
|
-
span.recordException(error);
|
|
529
|
-
span.setStatus({ code: import_api2.SpanStatusCode.ERROR, message: error.message });
|
|
530
|
-
}
|
|
531
|
-
}
|
|
532
|
-
finalizeDuration(span, startTime, config, provider, modelId, operationName) {
|
|
533
|
-
if (config.enableMetrics) {
|
|
534
|
-
const duration = (globalThis.performance.now() - startTime) / 1e3;
|
|
535
|
-
this.context.recordDurationMetric(duration, provider.system, modelId, operationName);
|
|
536
|
-
}
|
|
537
|
-
span.end();
|
|
538
|
-
}
|
|
539
|
-
};
|
|
540
|
-
|
|
541
|
-
// src/internal/instrumentation/vercel-ai/patchers/generate-text-patcher.ts
|
|
542
|
-
var import_api3 = require("@opentelemetry/api");
|
|
543
|
-
var GenerateTextPatcher = class extends BasePatcher {
|
|
544
|
-
patch(original) {
|
|
545
|
-
return async (params) => {
|
|
546
|
-
const config = this.context.getConfig();
|
|
547
|
-
const startTime = globalThis.performance.now();
|
|
548
|
-
const { span, provider, modelId } = this.createSpan(
|
|
549
|
-
SPAN_NAME_GEN_AI_CHAT,
|
|
550
|
-
params,
|
|
551
|
-
OPERATION_NAME_CHAT
|
|
552
|
-
);
|
|
553
|
-
if (params.prompt) {
|
|
554
|
-
span.setAttributes(
|
|
555
|
-
promptToAttributes(params.prompt, config.captureMessageContent || false)
|
|
556
|
-
);
|
|
557
|
-
} else if (params.messages) {
|
|
558
|
-
span.setAttributes(
|
|
559
|
-
messagesToAttributes(
|
|
560
|
-
params.messages,
|
|
561
|
-
"gen_ai.prompt",
|
|
562
|
-
config.captureMessageContent || false
|
|
563
|
-
)
|
|
564
|
-
);
|
|
565
|
-
if (config.emitEvents) {
|
|
566
|
-
this.context.emitMessageEvents(params.messages, provider.system, span);
|
|
567
|
-
}
|
|
568
|
-
}
|
|
569
|
-
try {
|
|
570
|
-
const result = await import_api3.context.with(
|
|
571
|
-
import_api3.trace.setSpan(import_api3.context.active(), span),
|
|
572
|
-
() => original(params)
|
|
573
|
-
);
|
|
574
|
-
if (result.response) {
|
|
575
|
-
span.setAttributes({
|
|
576
|
-
...result.response.id && { [ATTR_GEN_AI_RESPONSE_ID]: result.response.id },
|
|
577
|
-
...result.response.model && { [ATTR_GEN_AI_RESPONSE_MODEL]: result.response.model }
|
|
578
|
-
});
|
|
579
|
-
}
|
|
580
|
-
if (result.finishReason) {
|
|
581
|
-
span.setAttribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [result.finishReason]);
|
|
582
|
-
}
|
|
583
|
-
span.setAttributes(
|
|
584
|
-
completionToAttributes(
|
|
585
|
-
result.text,
|
|
586
|
-
result.finishReason,
|
|
587
|
-
config.captureMessageContent || false
|
|
588
|
-
)
|
|
589
|
-
);
|
|
590
|
-
const usage = result.usage || result.totalUsage || result.steps?.[0]?.usage;
|
|
591
|
-
if (usage) {
|
|
592
|
-
span.setAttributes(tokenUsageToAttributes(usage));
|
|
593
|
-
if (config.enableMetrics) {
|
|
594
|
-
this.context.recordTokenMetrics(usage, provider.system, modelId);
|
|
595
|
-
}
|
|
596
|
-
}
|
|
597
|
-
if (config.emitEvents) {
|
|
598
|
-
this.context.emitAssistantMessageEvent(result.text, provider.system, span);
|
|
599
|
-
}
|
|
600
|
-
span.setStatus({ code: import_api3.SpanStatusCode.OK });
|
|
601
|
-
return result;
|
|
602
|
-
} catch (error) {
|
|
603
|
-
this.handleError(error, span);
|
|
604
|
-
throw error;
|
|
605
|
-
} finally {
|
|
606
|
-
this.finalizeDuration(span, startTime, config, provider, modelId, OPERATION_NAME_CHAT);
|
|
607
|
-
}
|
|
608
|
-
};
|
|
609
|
-
}
|
|
610
|
-
};
|
|
611
|
-
|
|
612
|
-
// src/internal/instrumentation/vercel-ai/patchers/stream-text-patcher.ts
|
|
613
|
-
var import_api4 = require("@opentelemetry/api");
|
|
614
|
-
var StreamTextPatcher = class extends BasePatcher {
|
|
615
|
-
constructor(context8, streamHandler) {
|
|
616
|
-
super(context8);
|
|
617
|
-
this.streamHandler = streamHandler;
|
|
618
|
-
}
|
|
619
|
-
patch(original) {
|
|
620
|
-
return async (params) => {
|
|
621
|
-
const config = this.context.getConfig();
|
|
622
|
-
const startTime = globalThis.performance.now();
|
|
623
|
-
const { span, provider, modelId } = this.createSpan(
|
|
624
|
-
SPAN_NAME_GEN_AI_CHAT,
|
|
625
|
-
params,
|
|
626
|
-
OPERATION_NAME_CHAT,
|
|
627
|
-
{ "gen_ai.streaming": true }
|
|
628
|
-
);
|
|
629
|
-
if (params.prompt) {
|
|
630
|
-
span.setAttributes(
|
|
631
|
-
promptToAttributes(params.prompt, config.captureMessageContent || false)
|
|
632
|
-
);
|
|
633
|
-
} else if (params.messages) {
|
|
634
|
-
span.setAttributes(
|
|
635
|
-
messagesToAttributes(
|
|
636
|
-
params.messages,
|
|
637
|
-
"gen_ai.prompt",
|
|
638
|
-
config.captureMessageContent || false
|
|
639
|
-
)
|
|
640
|
-
);
|
|
641
|
-
if (config.emitEvents) {
|
|
642
|
-
this.context.emitMessageEvents(params.messages, provider.system, span);
|
|
643
|
-
}
|
|
644
|
-
}
|
|
645
|
-
try {
|
|
646
|
-
const stream = await import_api4.context.with(
|
|
647
|
-
import_api4.trace.setSpan(import_api4.context.active(), span),
|
|
648
|
-
() => original(params)
|
|
649
|
-
);
|
|
650
|
-
return this.streamHandler.wrapStream(stream, span, config, provider, modelId, startTime);
|
|
651
|
-
} catch (error) {
|
|
652
|
-
this.handleError(error, span);
|
|
653
|
-
span.end();
|
|
654
|
-
throw error;
|
|
655
|
-
}
|
|
656
|
-
};
|
|
657
|
-
}
|
|
658
|
-
};
|
|
659
|
-
|
|
660
|
-
// src/internal/instrumentation/vercel-ai/patchers/embeddings-patcher.ts
|
|
661
|
-
var import_api5 = require("@opentelemetry/api");
|
|
662
|
-
var EmbeddingsPatcher = class extends BasePatcher {
|
|
663
|
-
patch(original, isMany = false) {
|
|
664
|
-
return async (params) => {
|
|
665
|
-
const config = this.context.getConfig();
|
|
666
|
-
const startTime = globalThis.performance.now();
|
|
667
|
-
const additionalAttributes = isMany ? { "gen_ai.embeddings.count": params.values ? params.values.length : 0 } : {};
|
|
668
|
-
const { span, provider, modelId } = this.createSpan(
|
|
669
|
-
SPAN_NAME_GEN_AI_EMBEDDINGS,
|
|
670
|
-
params,
|
|
671
|
-
OPERATION_NAME_EMBEDDINGS,
|
|
672
|
-
additionalAttributes
|
|
673
|
-
);
|
|
674
|
-
if (!isMany && config.captureMessageContent && params.value) {
|
|
675
|
-
span.setAttribute("gen_ai.prompt.0.content", params.value);
|
|
676
|
-
}
|
|
677
|
-
try {
|
|
678
|
-
const result = await import_api5.context.with(
|
|
679
|
-
import_api5.trace.setSpan(import_api5.context.active(), span),
|
|
680
|
-
() => original(params)
|
|
681
|
-
);
|
|
682
|
-
if (result.response) {
|
|
683
|
-
span.setAttributes({
|
|
684
|
-
...result.response.id && { [ATTR_GEN_AI_RESPONSE_ID]: result.response.id },
|
|
685
|
-
...result.response.model && { [ATTR_GEN_AI_RESPONSE_MODEL]: result.response.model }
|
|
686
|
-
});
|
|
687
|
-
}
|
|
688
|
-
if (isMany) {
|
|
689
|
-
if (result.embeddings && result.embeddings.length > 0 && result.embeddings[0]) {
|
|
690
|
-
span.setAttribute("gen_ai.response.embedding_dimensions", result.embeddings[0].length);
|
|
691
|
-
}
|
|
692
|
-
} else {
|
|
693
|
-
if (result.embedding) {
|
|
694
|
-
span.setAttribute("gen_ai.response.embedding_dimensions", result.embedding.length);
|
|
695
|
-
}
|
|
696
|
-
}
|
|
697
|
-
if (result.usage) {
|
|
698
|
-
span.setAttributes(tokenUsageToAttributes(result.usage));
|
|
699
|
-
if (config.enableMetrics) {
|
|
700
|
-
this.context.recordTokenMetrics(result.usage, provider.system, modelId);
|
|
701
|
-
}
|
|
702
|
-
}
|
|
703
|
-
span.setStatus({ code: import_api5.SpanStatusCode.OK });
|
|
704
|
-
return result;
|
|
705
|
-
} catch (error) {
|
|
706
|
-
this.handleError(error, span);
|
|
707
|
-
throw error;
|
|
708
|
-
} finally {
|
|
709
|
-
this.finalizeDuration(span, startTime, config, provider, modelId, OPERATION_NAME_EMBEDDINGS);
|
|
710
|
-
}
|
|
711
|
-
};
|
|
712
|
-
}
|
|
713
|
-
};
|
|
714
|
-
|
|
715
|
-
// src/internal/instrumentation/vercel-ai/stream-handler.ts
|
|
716
|
-
var import_api6 = require("@opentelemetry/api");
|
|
717
|
-
var StreamHandler = class {
|
|
718
|
-
constructor(context8) {
|
|
719
|
-
this.context = context8;
|
|
720
|
-
}
|
|
721
|
-
wrapStream(stream, span, config, provider, modelId, startTime) {
|
|
722
|
-
const self = this;
|
|
723
|
-
let fullText = "";
|
|
724
|
-
let finishReason;
|
|
725
|
-
let usage;
|
|
726
|
-
let response;
|
|
727
|
-
const wrappedStream = new Proxy(stream, {
|
|
728
|
-
get(target, prop) {
|
|
729
|
-
if (prop === Symbol.asyncIterator) {
|
|
730
|
-
return async function* () {
|
|
731
|
-
try {
|
|
732
|
-
for await (const chunk of target) {
|
|
733
|
-
if (chunk.type === "text-delta" && chunk.textDelta) {
|
|
734
|
-
fullText += chunk.textDelta;
|
|
735
|
-
} else if (chunk.type === "finish") {
|
|
736
|
-
finishReason = chunk.finishReason;
|
|
737
|
-
usage = chunk.usage;
|
|
738
|
-
} else if (chunk.type === "response-metadata") {
|
|
739
|
-
response = chunk.response;
|
|
740
|
-
}
|
|
741
|
-
yield chunk;
|
|
742
|
-
}
|
|
743
|
-
} finally {
|
|
744
|
-
self.finalizeStream(
|
|
745
|
-
span,
|
|
746
|
-
config,
|
|
747
|
-
provider,
|
|
748
|
-
modelId,
|
|
749
|
-
startTime,
|
|
750
|
-
fullText,
|
|
751
|
-
finishReason,
|
|
752
|
-
usage,
|
|
753
|
-
response
|
|
754
|
-
);
|
|
755
|
-
}
|
|
756
|
-
};
|
|
757
|
-
}
|
|
758
|
-
if (prop === "textStream" || prop === "fullStream") {
|
|
759
|
-
const originalStream = target[prop];
|
|
760
|
-
return {
|
|
761
|
-
[Symbol.asyncIterator]: async function* () {
|
|
762
|
-
try {
|
|
763
|
-
for await (const chunk of originalStream) {
|
|
764
|
-
if (prop === "textStream") {
|
|
765
|
-
fullText += chunk;
|
|
766
|
-
}
|
|
767
|
-
yield chunk;
|
|
768
|
-
}
|
|
769
|
-
} finally {
|
|
770
|
-
const streamUsage = await target.usage.catch(() => null);
|
|
771
|
-
if (streamUsage) {
|
|
772
|
-
usage = streamUsage;
|
|
773
|
-
}
|
|
774
|
-
self.finalizeStream(
|
|
775
|
-
span,
|
|
776
|
-
config,
|
|
777
|
-
provider,
|
|
778
|
-
modelId,
|
|
779
|
-
startTime,
|
|
780
|
-
fullText,
|
|
781
|
-
finishReason,
|
|
782
|
-
usage,
|
|
783
|
-
response
|
|
784
|
-
);
|
|
785
|
-
}
|
|
786
|
-
}
|
|
787
|
-
};
|
|
788
|
-
}
|
|
789
|
-
const value = target[prop];
|
|
790
|
-
if (typeof value === "function") {
|
|
791
|
-
return value.bind(target);
|
|
792
|
-
}
|
|
793
|
-
return value;
|
|
794
|
-
}
|
|
795
|
-
});
|
|
796
|
-
return wrappedStream;
|
|
797
|
-
}
|
|
798
|
-
finalizeStream(span, config, provider, modelId, startTime, fullText, finishReason, usage, response) {
|
|
799
|
-
if (response) {
|
|
800
|
-
span.setAttributes({
|
|
801
|
-
...response.id && { [ATTR_GEN_AI_RESPONSE_ID]: response.id },
|
|
802
|
-
...response.model && { [ATTR_GEN_AI_RESPONSE_MODEL]: response.model }
|
|
803
|
-
});
|
|
804
|
-
}
|
|
805
|
-
if (finishReason) {
|
|
806
|
-
span.setAttribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [finishReason]);
|
|
807
|
-
}
|
|
808
|
-
if (fullText) {
|
|
809
|
-
span.setAttributes(
|
|
810
|
-
completionToAttributes(
|
|
811
|
-
fullText,
|
|
812
|
-
finishReason,
|
|
813
|
-
config.captureMessageContent || false
|
|
814
|
-
)
|
|
815
|
-
);
|
|
816
|
-
}
|
|
817
|
-
if (usage) {
|
|
818
|
-
span.setAttributes(tokenUsageToAttributes(usage));
|
|
819
|
-
if (config.enableMetrics) {
|
|
820
|
-
this.context.recordTokenMetrics(usage, provider.system, modelId);
|
|
821
|
-
}
|
|
822
|
-
}
|
|
823
|
-
if (config.enableMetrics) {
|
|
824
|
-
const duration = (performance.now() - startTime) / 1e3;
|
|
825
|
-
this.context.recordDurationMetric(duration, provider.system, modelId, OPERATION_NAME_CHAT);
|
|
826
|
-
}
|
|
827
|
-
span.setStatus({ code: import_api6.SpanStatusCode.OK });
|
|
828
|
-
span.end();
|
|
829
|
-
}
|
|
830
|
-
};
|
|
831
|
-
|
|
832
|
-
// src/internal/instrumentation/vercel-ai/telemetry-recorder.ts
|
|
833
|
-
var import_api7 = require("@opentelemetry/api");
|
|
834
|
-
var import_api_logs = require("@opentelemetry/api-logs");
|
|
835
|
-
var TelemetryRecorder = class {
|
|
836
|
-
constructor(genaiClientOperationDuration, genaiClientTokenUsage, logger2) {
|
|
837
|
-
this.genaiClientOperationDuration = genaiClientOperationDuration;
|
|
838
|
-
this.genaiClientTokenUsage = genaiClientTokenUsage;
|
|
839
|
-
this.logger = logger2;
|
|
840
|
-
}
|
|
841
|
-
/**
|
|
842
|
-
* Record token usage metrics
|
|
843
|
-
*/
|
|
844
|
-
recordTokenMetrics(usage, system, model) {
|
|
845
|
-
if (!this.genaiClientTokenUsage) {
|
|
846
|
-
return;
|
|
847
|
-
}
|
|
848
|
-
const commonAttrs = {
|
|
849
|
-
[ATTR_GEN_AI_SYSTEM]: system,
|
|
850
|
-
[ATTR_GEN_AI_REQUEST_MODEL]: model
|
|
851
|
-
};
|
|
852
|
-
const inputTokens = usage.inputTokens || usage.promptTokens;
|
|
853
|
-
const outputTokens = usage.outputTokens || usage.completionTokens;
|
|
854
|
-
if (inputTokens !== void 0) {
|
|
855
|
-
this.genaiClientTokenUsage.record(inputTokens, {
|
|
856
|
-
...commonAttrs,
|
|
857
|
-
[ATTR_GEN_AI_TOKEN_TYPE]: TOKEN_TYPE_INPUT
|
|
858
|
-
});
|
|
859
|
-
}
|
|
860
|
-
if (outputTokens !== void 0) {
|
|
861
|
-
this.genaiClientTokenUsage.record(outputTokens, {
|
|
862
|
-
...commonAttrs,
|
|
863
|
-
[ATTR_GEN_AI_TOKEN_TYPE]: TOKEN_TYPE_OUTPUT
|
|
864
|
-
});
|
|
865
|
-
}
|
|
866
|
-
}
|
|
867
|
-
/**
|
|
868
|
-
* Record operation duration metric
|
|
869
|
-
*/
|
|
870
|
-
recordDurationMetric(duration, system, model, operation) {
|
|
871
|
-
if (!this.genaiClientOperationDuration) {
|
|
872
|
-
return;
|
|
873
|
-
}
|
|
874
|
-
this.genaiClientOperationDuration.record(duration, {
|
|
875
|
-
[ATTR_GEN_AI_SYSTEM]: system,
|
|
876
|
-
[ATTR_GEN_AI_REQUEST_MODEL]: model,
|
|
877
|
-
[ATTR_GEN_AI_OPERATION_NAME]: operation
|
|
878
|
-
});
|
|
879
|
-
}
|
|
880
|
-
/**
|
|
881
|
-
* Emit message events
|
|
882
|
-
*/
|
|
883
|
-
emitMessageEvents(messages, system, span) {
|
|
884
|
-
if (!this.logger) {
|
|
885
|
-
return;
|
|
886
|
-
}
|
|
887
|
-
const ctx = import_api7.trace.setSpan(import_api7.context.active(), span);
|
|
888
|
-
for (const msg of messages) {
|
|
889
|
-
let eventName;
|
|
890
|
-
switch (msg.role) {
|
|
891
|
-
case "system": {
|
|
892
|
-
eventName = EVENT_GEN_AI_SYSTEM_MESSAGE;
|
|
893
|
-
break;
|
|
894
|
-
}
|
|
895
|
-
case "user": {
|
|
896
|
-
eventName = EVENT_GEN_AI_USER_MESSAGE;
|
|
897
|
-
break;
|
|
898
|
-
}
|
|
899
|
-
case "assistant": {
|
|
900
|
-
eventName = EVENT_GEN_AI_ASSISTANT_MESSAGE;
|
|
901
|
-
break;
|
|
902
|
-
}
|
|
903
|
-
case "tool":
|
|
904
|
-
case "function": {
|
|
905
|
-
eventName = EVENT_GEN_AI_TOOL_MESSAGE;
|
|
906
|
-
break;
|
|
907
|
-
}
|
|
908
|
-
default: {
|
|
909
|
-
continue;
|
|
910
|
-
}
|
|
911
|
-
}
|
|
912
|
-
this.logger.emit({
|
|
913
|
-
timestamp: Date.now(),
|
|
914
|
-
context: ctx,
|
|
915
|
-
severityNumber: import_api_logs.SeverityNumber.INFO,
|
|
916
|
-
attributes: {
|
|
917
|
-
[ATTR_EVENT_NAME]: eventName,
|
|
918
|
-
[ATTR_GEN_AI_SYSTEM]: system
|
|
919
|
-
},
|
|
920
|
-
body: {
|
|
921
|
-
role: msg.role,
|
|
922
|
-
content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content),
|
|
923
|
-
name: msg.name
|
|
924
|
-
}
|
|
925
|
-
});
|
|
926
|
-
}
|
|
927
|
-
}
|
|
928
|
-
/**
|
|
929
|
-
* Emit assistant message event
|
|
930
|
-
*/
|
|
931
|
-
emitAssistantMessageEvent(text, system, span) {
|
|
932
|
-
if (!this.logger) {
|
|
933
|
-
return;
|
|
934
|
-
}
|
|
935
|
-
const ctx = import_api7.trace.setSpan(import_api7.context.active(), span);
|
|
936
|
-
this.logger.emit({
|
|
937
|
-
timestamp: Date.now(),
|
|
938
|
-
context: ctx,
|
|
939
|
-
severityNumber: import_api_logs.SeverityNumber.INFO,
|
|
940
|
-
attributes: {
|
|
941
|
-
[ATTR_EVENT_NAME]: EVENT_GEN_AI_ASSISTANT_MESSAGE,
|
|
942
|
-
[ATTR_GEN_AI_SYSTEM]: system
|
|
943
|
-
},
|
|
944
|
-
body: {
|
|
945
|
-
role: "assistant",
|
|
946
|
-
content: text
|
|
947
|
-
}
|
|
948
|
-
});
|
|
949
|
-
}
|
|
950
|
-
};
|
|
951
|
-
|
|
952
|
-
// src/internal/instrumentation/vercel-ai/instrumentation.ts
|
|
953
|
-
var PACKAGE_NAME = "@brizz/vercel-ai-instrumentation";
|
|
954
|
-
var PACKAGE_VERSION = "0.1.0";
|
|
955
|
-
var VercelAIInstrumentation = class _VercelAIInstrumentation extends import_instrumentation.InstrumentationBase {
|
|
956
|
-
_genaiClientOperationDuration;
|
|
957
|
-
_genaiClientTokenUsage;
|
|
958
|
-
_telemetryRecorder;
|
|
959
|
-
_streamHandler;
|
|
960
|
-
_patchers = /* @__PURE__ */ new Map();
|
|
961
|
-
// Holds last patched namespace when available (reserved for future factory wrapping)
|
|
962
|
-
_vercelAiNamespace = null;
|
|
963
|
-
static _WRAPPED_SYMBOL = Symbol.for("brizz.vercel-ai.patched");
|
|
964
|
-
constructor(config = {}) {
|
|
965
|
-
super(PACKAGE_NAME, PACKAGE_VERSION, config);
|
|
966
|
-
const cfg = this.getConfig();
|
|
967
|
-
const envCC = getEnvBool("OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT");
|
|
968
|
-
if (envCC !== void 0) {
|
|
969
|
-
cfg.captureMessageContent = envCC;
|
|
970
|
-
}
|
|
971
|
-
this._initializeComponents();
|
|
972
|
-
}
|
|
973
|
-
setConfig(config = {}) {
|
|
974
|
-
const {
|
|
975
|
-
captureMessageContent = true,
|
|
976
|
-
enableMetrics = true,
|
|
977
|
-
emitEvents = true,
|
|
978
|
-
...validConfig
|
|
979
|
-
} = config;
|
|
980
|
-
const fullConfig = {
|
|
981
|
-
...validConfig,
|
|
982
|
-
captureMessageContent,
|
|
983
|
-
enableMetrics,
|
|
984
|
-
emitEvents
|
|
985
|
-
};
|
|
986
|
-
super.setConfig(fullConfig);
|
|
987
|
-
}
|
|
988
|
-
_initializeComponents() {
|
|
989
|
-
this._telemetryRecorder = new TelemetryRecorder(
|
|
990
|
-
this._genaiClientOperationDuration,
|
|
991
|
-
this._genaiClientTokenUsage,
|
|
992
|
-
this.logger
|
|
993
|
-
);
|
|
994
|
-
this._streamHandler = new StreamHandler({
|
|
995
|
-
recordTokenMetrics: this._telemetryRecorder.recordTokenMetrics.bind(this._telemetryRecorder),
|
|
996
|
-
recordDurationMetric: this._telemetryRecorder.recordDurationMetric.bind(
|
|
997
|
-
this._telemetryRecorder
|
|
998
|
-
)
|
|
999
|
-
});
|
|
1000
|
-
const patcherContext = {
|
|
1001
|
-
tracer: this.tracer,
|
|
1002
|
-
getConfig: this.getConfig.bind(this),
|
|
1003
|
-
recordTokenMetrics: this._telemetryRecorder.recordTokenMetrics.bind(this._telemetryRecorder),
|
|
1004
|
-
recordDurationMetric: this._telemetryRecorder.recordDurationMetric.bind(
|
|
1005
|
-
this._telemetryRecorder
|
|
1006
|
-
),
|
|
1007
|
-
emitMessageEvents: this._telemetryRecorder.emitMessageEvents.bind(this._telemetryRecorder),
|
|
1008
|
-
emitAssistantMessageEvent: this._telemetryRecorder.emitAssistantMessageEvent.bind(
|
|
1009
|
-
this._telemetryRecorder
|
|
1010
|
-
)
|
|
1011
|
-
};
|
|
1012
|
-
this._patchers.set("generateText", new GenerateTextPatcher(patcherContext));
|
|
1013
|
-
this._patchers.set("streamText", new StreamTextPatcher(patcherContext, this._streamHandler));
|
|
1014
|
-
this._patchers.set("embed", new EmbeddingsPatcher(patcherContext));
|
|
1015
|
-
this._patchers.set("embedMany", new EmbeddingsPatcher(patcherContext));
|
|
1016
|
-
}
|
|
1017
|
-
init() {
|
|
1018
|
-
return [
|
|
1019
|
-
new import_instrumentation.InstrumentationNodeModuleDefinition(
|
|
1020
|
-
"ai",
|
|
1021
|
-
[">=4.0.0 <6"],
|
|
1022
|
-
(moduleExports) => {
|
|
1023
|
-
logger.info("Starting instrumentation of Vercel AI SDK module");
|
|
1024
|
-
this._vercelAiNamespace = moduleExports;
|
|
1025
|
-
const patched = this._patchModuleExports(moduleExports);
|
|
1026
|
-
return patched ?? moduleExports;
|
|
1027
|
-
},
|
|
1028
|
-
(moduleExports) => {
|
|
1029
|
-
logger.debug("Uninstrumenting @vercel/ai module");
|
|
1030
|
-
return moduleExports;
|
|
1031
|
-
}
|
|
1032
|
-
)
|
|
1033
|
-
];
|
|
1034
|
-
}
|
|
1035
|
-
_updateMetricInstruments() {
|
|
1036
|
-
const config = this.getConfig();
|
|
1037
|
-
if (!config.enableMetrics) {
|
|
1038
|
-
return;
|
|
1039
|
-
}
|
|
1040
|
-
this._genaiClientOperationDuration = this.meter.createHistogram(
|
|
1041
|
-
METRIC_GEN_AI_CLIENT_OPERATION_DURATION,
|
|
1042
|
-
{
|
|
1043
|
-
description: "GenAI operation duration",
|
|
1044
|
-
unit: "s",
|
|
1045
|
-
advice: {
|
|
1046
|
-
explicitBucketBoundaries: [
|
|
1047
|
-
0.01,
|
|
1048
|
-
0.02,
|
|
1049
|
-
0.04,
|
|
1050
|
-
0.08,
|
|
1051
|
-
0.16,
|
|
1052
|
-
0.32,
|
|
1053
|
-
0.64,
|
|
1054
|
-
1.28,
|
|
1055
|
-
2.56,
|
|
1056
|
-
5.12,
|
|
1057
|
-
10.24,
|
|
1058
|
-
20.48,
|
|
1059
|
-
40.96,
|
|
1060
|
-
81.92
|
|
1061
|
-
]
|
|
1062
|
-
}
|
|
1063
|
-
}
|
|
1064
|
-
);
|
|
1065
|
-
this._genaiClientTokenUsage = this.meter.createHistogram(METRIC_GEN_AI_CLIENT_TOKEN_USAGE, {
|
|
1066
|
-
description: "Measures number of input and output tokens used",
|
|
1067
|
-
unit: "{token}",
|
|
1068
|
-
advice: {
|
|
1069
|
-
explicitBucketBoundaries: [
|
|
1070
|
-
1,
|
|
1071
|
-
4,
|
|
1072
|
-
16,
|
|
1073
|
-
64,
|
|
1074
|
-
256,
|
|
1075
|
-
1024,
|
|
1076
|
-
4096,
|
|
1077
|
-
16384,
|
|
1078
|
-
65536,
|
|
1079
|
-
262144,
|
|
1080
|
-
1048576,
|
|
1081
|
-
4194304,
|
|
1082
|
-
16777216,
|
|
1083
|
-
67108864
|
|
1084
|
-
]
|
|
1085
|
-
}
|
|
1086
|
-
});
|
|
1087
|
-
this._telemetryRecorder = new TelemetryRecorder(
|
|
1088
|
-
this._genaiClientOperationDuration,
|
|
1089
|
-
this._genaiClientTokenUsage,
|
|
1090
|
-
this.logger
|
|
1091
|
-
);
|
|
1092
|
-
}
|
|
1093
|
-
/**
|
|
1094
|
-
* Patch known AI SDK functions in-place on the provided module exports object.
|
|
1095
|
-
* This approach is compatible with both CJS and ESM module loaders.
|
|
1096
|
-
*/
|
|
1097
|
-
_patchModuleExports(moduleExports) {
|
|
1098
|
-
if (!moduleExports || typeof moduleExports !== "object") {
|
|
1099
|
-
return null;
|
|
1100
|
-
}
|
|
1101
|
-
let inPlacePatched = true;
|
|
1102
|
-
const wrapFunction = (name, isEmbedMany = false) => {
|
|
1103
|
-
const current = moduleExports[name];
|
|
1104
|
-
if (typeof current !== "function") {
|
|
1105
|
-
return;
|
|
1106
|
-
}
|
|
1107
|
-
const currentFn = current;
|
|
1108
|
-
if (currentFn[_VercelAIInstrumentation._WRAPPED_SYMBOL]) {
|
|
1109
|
-
return;
|
|
1110
|
-
}
|
|
1111
|
-
const descriptor = Object.getOwnPropertyDescriptor(moduleExports, name);
|
|
1112
|
-
if (descriptor && (!descriptor.writable || !descriptor.configurable) && !descriptor.set) {
|
|
1113
|
-
inPlacePatched = false;
|
|
1114
|
-
return;
|
|
1115
|
-
}
|
|
1116
|
-
const patcher = this._patchers.get(name);
|
|
1117
|
-
if (!patcher) {
|
|
1118
|
-
return;
|
|
1119
|
-
}
|
|
1120
|
-
const patched = isEmbedMany ? patcher.patch(currentFn, true) : patcher.patch(currentFn);
|
|
1121
|
-
try {
|
|
1122
|
-
Object.defineProperty(patched, _VercelAIInstrumentation._WRAPPED_SYMBOL, {
|
|
1123
|
-
value: true,
|
|
1124
|
-
enumerable: false,
|
|
1125
|
-
configurable: false
|
|
1126
|
-
});
|
|
1127
|
-
} catch {
|
|
1128
|
-
}
|
|
1129
|
-
try {
|
|
1130
|
-
moduleExports[name] = patched;
|
|
1131
|
-
} catch {
|
|
1132
|
-
inPlacePatched = false;
|
|
1133
|
-
}
|
|
1134
|
-
};
|
|
1135
|
-
wrapFunction("generateText");
|
|
1136
|
-
wrapFunction("streamText");
|
|
1137
|
-
wrapFunction("embed");
|
|
1138
|
-
wrapFunction("embedMany", true);
|
|
1139
|
-
if (!inPlacePatched) {
|
|
1140
|
-
const proxiedModule = new Proxy(moduleExports, {
|
|
1141
|
-
get: (target, prop, receiver) => {
|
|
1142
|
-
const originalValue = Reflect.get(target, prop, receiver);
|
|
1143
|
-
if (typeof originalValue === "function" && typeof prop === "string" && this._patchers.has(prop)) {
|
|
1144
|
-
const patcher = this._patchers.get(prop);
|
|
1145
|
-
const isEmbedMany = prop === "embedMany";
|
|
1146
|
-
const wrapped = isEmbedMany ? patcher.patch(originalValue, true) : patcher.patch(originalValue);
|
|
1147
|
-
return wrapped;
|
|
1148
|
-
}
|
|
1149
|
-
return originalValue;
|
|
1150
|
-
}
|
|
1151
|
-
});
|
|
1152
|
-
return proxiedModule;
|
|
1153
|
-
}
|
|
1154
|
-
return moduleExports;
|
|
1155
|
-
}
|
|
1156
|
-
/**
|
|
1157
|
-
* Manual instrumentation hook for bundlers/Next.js. Applies in-place wrapping
|
|
1158
|
-
* on the provided module namespace.
|
|
1159
|
-
*/
|
|
1160
|
-
manuallyInstrument(module3) {
|
|
1161
|
-
try {
|
|
1162
|
-
const result = this._patchModuleExports(module3);
|
|
1163
|
-
if (result !== null) {
|
|
1164
|
-
logger.debug("Applied manual Vercel AI instrumentation");
|
|
1165
|
-
this._vercelAiNamespace = result;
|
|
1166
|
-
return result;
|
|
1167
|
-
}
|
|
1168
|
-
logger.warn("Manual Vercel AI instrumentation received invalid module");
|
|
1169
|
-
return module3;
|
|
1170
|
-
} catch (error) {
|
|
1171
|
-
logger.error(`Failed manual Vercel AI instrumentation: ${String(error)}`);
|
|
1172
|
-
return this._vercelAiNamespace || module3;
|
|
1173
|
-
}
|
|
1174
|
-
}
|
|
1175
|
-
/**
|
|
1176
|
-
* Wrap a created provider/client instance (factory return) when possible.
|
|
1177
|
-
* Call this from wrappers that construct provider clients (e.g., OpenAI SDK).
|
|
1178
|
-
*/
|
|
1179
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
1180
|
-
wrapFactoryReturn(instance) {
|
|
1181
|
-
return instance;
|
|
1182
|
-
}
|
|
1183
|
-
};
|
|
1184
|
-
|
|
1185
|
-
// src/internal/instrumentation/registry.ts
|
|
1186
254
|
var InstrumentationRegistry = class _InstrumentationRegistry {
|
|
1187
255
|
static instance;
|
|
1188
256
|
manualModules = null;
|
|
@@ -1258,8 +326,7 @@ var InstrumentationRegistry = class _InstrumentationRegistry {
|
|
|
1258
326
|
},
|
|
1259
327
|
{ class: import_instrumentation_chromadb.ChromaDBInstrumentation, name: "ChromaDB", module: this.manualModules?.chromadb },
|
|
1260
328
|
{ class: import_instrumentation_qdrant.QdrantInstrumentation, name: "Qdrant", module: this.manualModules?.qdrant },
|
|
1261
|
-
{ class: import_instrumentation_together.TogetherInstrumentation, name: "Together", module: this.manualModules?.together }
|
|
1262
|
-
{ class: VercelAIInstrumentation, name: "Vercel AI", module: this.manualModules?.vercelAI }
|
|
329
|
+
{ class: import_instrumentation_together.TogetherInstrumentation, name: "Together", module: this.manualModules?.together }
|
|
1263
330
|
];
|
|
1264
331
|
for (const config of instrumentationConfigs) {
|
|
1265
332
|
if (config.module) {
|
|
@@ -1278,13 +345,13 @@ var InstrumentationRegistry = class _InstrumentationRegistry {
|
|
|
1278
345
|
};
|
|
1279
346
|
|
|
1280
347
|
// src/internal/log/logging.ts
|
|
1281
|
-
var
|
|
348
|
+
var import_api_logs = require("@opentelemetry/api-logs");
|
|
1282
349
|
var import_exporter_logs_otlp_http = require("@opentelemetry/exporter-logs-otlp-http");
|
|
1283
350
|
var import_resources = require("@opentelemetry/resources");
|
|
1284
351
|
var import_sdk_logs2 = require("@opentelemetry/sdk-logs");
|
|
1285
352
|
|
|
1286
353
|
// src/internal/log/processors/log-processor.ts
|
|
1287
|
-
var
|
|
354
|
+
var import_api3 = require("@opentelemetry/api");
|
|
1288
355
|
var import_sdk_logs = require("@opentelemetry/sdk-logs");
|
|
1289
356
|
|
|
1290
357
|
// src/internal/masking/patterns.ts
|
|
@@ -1910,10 +977,10 @@ function maskAttributes(attributes, rules, outputOriginalValue = false) {
|
|
|
1910
977
|
}
|
|
1911
978
|
|
|
1912
979
|
// src/internal/semantic-conventions.ts
|
|
1913
|
-
var
|
|
980
|
+
var import_api2 = require("@opentelemetry/api");
|
|
1914
981
|
var BRIZZ = "brizz";
|
|
1915
982
|
var PROPERTIES = "properties";
|
|
1916
|
-
var PROPERTIES_CONTEXT_KEY = (0,
|
|
983
|
+
var PROPERTIES_CONTEXT_KEY = (0, import_api2.createContextKey)(PROPERTIES);
|
|
1917
984
|
|
|
1918
985
|
// src/internal/log/processors/log-processor.ts
|
|
1919
986
|
var DEFAULT_LOG_MASKING_RULES = [
|
|
@@ -1934,7 +1001,7 @@ var BrizzSimpleLogRecordProcessor = class extends import_sdk_logs.SimpleLogRecor
|
|
|
1934
1001
|
if (maskingConfig) {
|
|
1935
1002
|
maskLog(logRecord, maskingConfig);
|
|
1936
1003
|
}
|
|
1937
|
-
const associationProperties =
|
|
1004
|
+
const associationProperties = import_api3.context.active().getValue(PROPERTIES_CONTEXT_KEY);
|
|
1938
1005
|
if (associationProperties) {
|
|
1939
1006
|
for (const [key, value] of Object.entries(associationProperties)) {
|
|
1940
1007
|
logRecord.setAttribute(`${BRIZZ}.${key}`, value);
|
|
@@ -1954,7 +1021,7 @@ var BrizzBatchLogRecordProcessor = class extends import_sdk_logs.BatchLogRecordP
|
|
|
1954
1021
|
if (maskingConfig) {
|
|
1955
1022
|
maskLog(logRecord, maskingConfig);
|
|
1956
1023
|
}
|
|
1957
|
-
const associationProperties =
|
|
1024
|
+
const associationProperties = import_api3.context.active().getValue(PROPERTIES_CONTEXT_KEY);
|
|
1958
1025
|
if (associationProperties) {
|
|
1959
1026
|
for (const [key, value] of Object.entries(associationProperties)) {
|
|
1960
1027
|
logRecord.setAttribute(`${BRIZZ}.${key}`, value);
|
|
@@ -2091,7 +1158,7 @@ var LoggingModule = class _LoggingModule {
|
|
|
2091
1158
|
/**
|
|
2092
1159
|
* Emit a custom event to the telemetry pipeline
|
|
2093
1160
|
*/
|
|
2094
|
-
emitEvent(name, attributes, body, severityNumber =
|
|
1161
|
+
emitEvent(name, attributes, body, severityNumber = import_api_logs.SeverityNumber.INFO) {
|
|
2095
1162
|
logger.debug("Attempting to emit event", {
|
|
2096
1163
|
name,
|
|
2097
1164
|
hasAttributes: !!attributes,
|
|
@@ -2263,8 +1330,131 @@ function getMetricsReader() {
|
|
|
2263
1330
|
var import_exporter_trace_otlp_http = require("@opentelemetry/exporter-trace-otlp-http");
|
|
2264
1331
|
|
|
2265
1332
|
// src/internal/trace/processors/span-processor.ts
|
|
2266
|
-
var
|
|
1333
|
+
var import_api4 = require("@opentelemetry/api");
|
|
2267
1334
|
var import_sdk_trace_base = require("@opentelemetry/sdk-trace-base");
|
|
1335
|
+
|
|
1336
|
+
// src/internal/trace/transformations/vercel-ai.ts
|
|
1337
|
+
var import_ai_semantic_conventions = require("@traceloop/ai-semantic-conventions");
|
|
1338
|
+
var AI_GENERATE_TEXT_DO_GENERATE = "ai.generateText.doGenerate";
|
|
1339
|
+
var AI_STREAM_TEXT_DO_STREAM = "ai.streamText.doStream";
|
|
1340
|
+
var HANDLED_SPAN_NAMES = {
|
|
1341
|
+
[AI_GENERATE_TEXT_DO_GENERATE]: "gen_ai.chat",
|
|
1342
|
+
[AI_STREAM_TEXT_DO_STREAM]: "gen_ai.chat",
|
|
1343
|
+
"ai.streamText": "ai.streamText",
|
|
1344
|
+
"ai.toolCall": (span) => {
|
|
1345
|
+
const toolName = span.attributes["ai.toolCall.name"];
|
|
1346
|
+
return `${toolName}.tool`;
|
|
1347
|
+
}
|
|
1348
|
+
};
|
|
1349
|
+
var AI_RESPONSE_TEXT = "ai.response.text";
|
|
1350
|
+
var AI_PROMPT_MESSAGES = "ai.prompt.messages";
|
|
1351
|
+
var AI_USAGE_PROMPT_TOKENS = "ai.usage.promptTokens";
|
|
1352
|
+
var AI_USAGE_COMPLETION_TOKENS = "ai.usage.completionTokens";
|
|
1353
|
+
var AI_MODEL_PROVIDER = "ai.model.provider";
|
|
1354
|
+
var transformAiSdkSpanName = (span) => {
|
|
1355
|
+
if (span.name in HANDLED_SPAN_NAMES) {
|
|
1356
|
+
if (typeof HANDLED_SPAN_NAMES[span.name] === "function") {
|
|
1357
|
+
span.name = HANDLED_SPAN_NAMES[span.name](span);
|
|
1358
|
+
} else {
|
|
1359
|
+
span.name = HANDLED_SPAN_NAMES[span.name];
|
|
1360
|
+
}
|
|
1361
|
+
}
|
|
1362
|
+
};
|
|
1363
|
+
var transformResponseText = (attributes) => {
|
|
1364
|
+
if (AI_RESPONSE_TEXT in attributes) {
|
|
1365
|
+
attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_COMPLETIONS}.0.content`] = attributes[AI_RESPONSE_TEXT];
|
|
1366
|
+
attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_COMPLETIONS}.0.role`] = "assistant";
|
|
1367
|
+
delete attributes[AI_RESPONSE_TEXT];
|
|
1368
|
+
}
|
|
1369
|
+
};
|
|
1370
|
+
var transformPromptMessages = (attributes) => {
|
|
1371
|
+
if (AI_PROMPT_MESSAGES in attributes) {
|
|
1372
|
+
try {
|
|
1373
|
+
const messages = JSON.parse(attributes[AI_PROMPT_MESSAGES]);
|
|
1374
|
+
messages.forEach((msg, index) => {
|
|
1375
|
+
logger.debug("Transforming prompt message", { msg, type: typeof msg.content });
|
|
1376
|
+
if (typeof msg.content === "string") {
|
|
1377
|
+
attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.content`] = msg.content;
|
|
1378
|
+
} else {
|
|
1379
|
+
if (Array.isArray(msg.content) && msg.content.length > 0) {
|
|
1380
|
+
const lastContent = msg.content[msg.content.length - 1];
|
|
1381
|
+
if (lastContent.text) {
|
|
1382
|
+
attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.content`] = lastContent.text;
|
|
1383
|
+
}
|
|
1384
|
+
} else {
|
|
1385
|
+
attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.content`] = JSON.stringify(
|
|
1386
|
+
msg.content
|
|
1387
|
+
);
|
|
1388
|
+
}
|
|
1389
|
+
}
|
|
1390
|
+
attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.role`] = msg.role;
|
|
1391
|
+
});
|
|
1392
|
+
delete attributes[AI_PROMPT_MESSAGES];
|
|
1393
|
+
} catch {
|
|
1394
|
+
}
|
|
1395
|
+
}
|
|
1396
|
+
};
|
|
1397
|
+
var transformPromptTokens = (attributes) => {
|
|
1398
|
+
if (AI_USAGE_PROMPT_TOKENS in attributes) {
|
|
1399
|
+
attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_USAGE_PROMPT_TOKENS}`] = attributes[AI_USAGE_PROMPT_TOKENS];
|
|
1400
|
+
delete attributes[AI_USAGE_PROMPT_TOKENS];
|
|
1401
|
+
}
|
|
1402
|
+
};
|
|
1403
|
+
var transformCompletionTokens = (attributes) => {
|
|
1404
|
+
if (AI_USAGE_COMPLETION_TOKENS in attributes) {
|
|
1405
|
+
attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}`] = attributes[AI_USAGE_COMPLETION_TOKENS];
|
|
1406
|
+
delete attributes[AI_USAGE_COMPLETION_TOKENS];
|
|
1407
|
+
}
|
|
1408
|
+
};
|
|
1409
|
+
var calculateTotalTokens = (attributes) => {
|
|
1410
|
+
const promptTokens = attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_USAGE_PROMPT_TOKENS}`];
|
|
1411
|
+
const completionTokens = attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}`];
|
|
1412
|
+
if (promptTokens && completionTokens) {
|
|
1413
|
+
attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_USAGE_TOTAL_TOKENS}`] = Number(promptTokens) + Number(completionTokens);
|
|
1414
|
+
}
|
|
1415
|
+
};
|
|
1416
|
+
var transformVendor = (attributes) => {
|
|
1417
|
+
if (AI_MODEL_PROVIDER in attributes) {
|
|
1418
|
+
const vendor = attributes[AI_MODEL_PROVIDER];
|
|
1419
|
+
if (vendor && vendor.startsWith("openai")) {
|
|
1420
|
+
attributes[import_ai_semantic_conventions.SpanAttributes.LLM_SYSTEM] = "OpenAI";
|
|
1421
|
+
} else {
|
|
1422
|
+
attributes[import_ai_semantic_conventions.SpanAttributes.LLM_SYSTEM] = vendor;
|
|
1423
|
+
}
|
|
1424
|
+
delete attributes[AI_MODEL_PROVIDER];
|
|
1425
|
+
}
|
|
1426
|
+
};
|
|
1427
|
+
var transformAiSdkAttributes = (attributes) => {
|
|
1428
|
+
transformResponseText(attributes);
|
|
1429
|
+
transformPromptMessages(attributes);
|
|
1430
|
+
transformPromptTokens(attributes);
|
|
1431
|
+
transformCompletionTokens(attributes);
|
|
1432
|
+
calculateTotalTokens(attributes);
|
|
1433
|
+
transformVendor(attributes);
|
|
1434
|
+
};
|
|
1435
|
+
var shouldHandleSpan = (span) => {
|
|
1436
|
+
return span.name in HANDLED_SPAN_NAMES;
|
|
1437
|
+
};
|
|
1438
|
+
var transformAiSdkSpan = (span) => {
|
|
1439
|
+
for (const key in span.attributes) {
|
|
1440
|
+
if (Number.isNaN(span.attributes[key])) {
|
|
1441
|
+
span.attributes[key] = 0;
|
|
1442
|
+
}
|
|
1443
|
+
}
|
|
1444
|
+
logger.debug("Transforming AI SDK span", {
|
|
1445
|
+
spanName: span.name,
|
|
1446
|
+
spanContext: span.spanContext(),
|
|
1447
|
+
attributes: span.attributes
|
|
1448
|
+
});
|
|
1449
|
+
if (!shouldHandleSpan(span)) {
|
|
1450
|
+
logger.debug("Skipping span transformation", { spanName: span.name });
|
|
1451
|
+
return;
|
|
1452
|
+
}
|
|
1453
|
+
transformAiSdkSpanName(span);
|
|
1454
|
+
transformAiSdkAttributes(span.attributes);
|
|
1455
|
+
};
|
|
1456
|
+
|
|
1457
|
+
// src/internal/trace/processors/span-processor.ts
|
|
2268
1458
|
var DEFAULT_MASKING_RULES = [
|
|
2269
1459
|
{
|
|
2270
1460
|
mode: "partial",
|
|
@@ -2275,16 +1465,6 @@ var DEFAULT_MASKING_RULES = [
|
|
|
2275
1465
|
mode: "partial",
|
|
2276
1466
|
attributePattern: "gen_ai.completion",
|
|
2277
1467
|
patterns: DEFAULT_PII_PATTERNS
|
|
2278
|
-
},
|
|
2279
|
-
{
|
|
2280
|
-
mode: "partial",
|
|
2281
|
-
attributePattern: "traceloop.entity.input",
|
|
2282
|
-
patterns: DEFAULT_PII_PATTERNS
|
|
2283
|
-
},
|
|
2284
|
-
{
|
|
2285
|
-
mode: "partial",
|
|
2286
|
-
attributePattern: "traceloop.entity.output",
|
|
2287
|
-
patterns: DEFAULT_PII_PATTERNS
|
|
2288
1468
|
}
|
|
2289
1469
|
];
|
|
2290
1470
|
var BrizzSimpleSpanProcessor = class extends import_sdk_trace_base.SimpleSpanProcessor {
|
|
@@ -2309,7 +1489,7 @@ var BrizzSimpleSpanProcessor = class extends import_sdk_trace_base.SimpleSpanPro
|
|
|
2309
1489
|
if (maskingConfig) {
|
|
2310
1490
|
maskSpan(span, maskingConfig);
|
|
2311
1491
|
}
|
|
2312
|
-
const associationProperties =
|
|
1492
|
+
const associationProperties = import_api4.context.active().getValue(PROPERTIES_CONTEXT_KEY);
|
|
2313
1493
|
if (associationProperties) {
|
|
2314
1494
|
for (const [key, value] of Object.entries(associationProperties)) {
|
|
2315
1495
|
span.setAttribute(`${BRIZZ}.${key}`, value);
|
|
@@ -2317,6 +1497,10 @@ var BrizzSimpleSpanProcessor = class extends import_sdk_trace_base.SimpleSpanPro
|
|
|
2317
1497
|
}
|
|
2318
1498
|
super.onStart(span, parentContext);
|
|
2319
1499
|
}
|
|
1500
|
+
onEnd(span) {
|
|
1501
|
+
transformAiSdkSpan(span);
|
|
1502
|
+
super.onEnd(span);
|
|
1503
|
+
}
|
|
2320
1504
|
};
|
|
2321
1505
|
var BrizzBatchSpanProcessor = class extends import_sdk_trace_base.BatchSpanProcessor {
|
|
2322
1506
|
config;
|
|
@@ -2329,7 +1513,7 @@ var BrizzBatchSpanProcessor = class extends import_sdk_trace_base.BatchSpanProce
|
|
|
2329
1513
|
if (maskingConfig) {
|
|
2330
1514
|
maskSpan(span, maskingConfig);
|
|
2331
1515
|
}
|
|
2332
|
-
const associationProperties =
|
|
1516
|
+
const associationProperties = import_api4.context.active().getValue(PROPERTIES_CONTEXT_KEY);
|
|
2333
1517
|
if (associationProperties) {
|
|
2334
1518
|
for (const [key, value] of Object.entries(associationProperties)) {
|
|
2335
1519
|
span.setAttribute(`${BRIZZ}.${key}`, value);
|
|
@@ -2337,6 +1521,10 @@ var BrizzBatchSpanProcessor = class extends import_sdk_trace_base.BatchSpanProce
|
|
|
2337
1521
|
}
|
|
2338
1522
|
super.onStart(span, parentContext);
|
|
2339
1523
|
}
|
|
1524
|
+
onEnd(span) {
|
|
1525
|
+
transformAiSdkSpan(span);
|
|
1526
|
+
super.onEnd(span);
|
|
1527
|
+
}
|
|
2340
1528
|
};
|
|
2341
1529
|
function maskSpan(span, config) {
|
|
2342
1530
|
if (!span.attributes || Object.keys(span.attributes).length === 0) {
|
|
@@ -2424,8 +1612,9 @@ var TracingModule = class _TracingModule {
|
|
|
2424
1612
|
disableBatch: config.disableBatch,
|
|
2425
1613
|
hasMasking: !!config.masking?.spanMasking
|
|
2426
1614
|
});
|
|
2427
|
-
|
|
1615
|
+
const spanProcessor = config.disableBatch ? new BrizzSimpleSpanProcessor(this.spanExporter, config) : new BrizzBatchSpanProcessor(this.spanExporter, config);
|
|
2428
1616
|
logger.debug("Span processor initialized successfully");
|
|
1617
|
+
this.spanProcessor = spanProcessor;
|
|
2429
1618
|
}
|
|
2430
1619
|
/**
|
|
2431
1620
|
* Get the span exporter
|
|
@@ -2464,7 +1653,7 @@ function getSpanProcessor() {
|
|
|
2464
1653
|
}
|
|
2465
1654
|
|
|
2466
1655
|
// src/internal/trace/session.ts
|
|
2467
|
-
var
|
|
1656
|
+
var import_api5 = require("@opentelemetry/api");
|
|
2468
1657
|
|
|
2469
1658
|
// src/internal/sdk.ts
|
|
2470
1659
|
var _Brizz = class __Brizz {
|