braintrust 0.4.8 → 0.4.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1504,7 +1504,7 @@ var init_util = __esm({
1504
1504
 
1505
1505
  // src/generated_types.ts
1506
1506
  import { z as z6 } from "zod/v3";
1507
- var AclObjectType, Permission, Acl, AISecret, ResponseFormatJsonSchema, ResponseFormatNullish, AnyModelParams, ApiKey, AsyncScoringState, AsyncScoringControl, BraintrustAttachmentReference, ExternalAttachmentReference, AttachmentReference, UploadStatus, AttachmentStatus, BraintrustModelParams, CallEvent, ChatCompletionContentPartTextWithTitle, ChatCompletionContentPartImageWithTitle, ChatCompletionContentPart, ChatCompletionContentPartText, ChatCompletionMessageToolCall, ChatCompletionMessageReasoning, ChatCompletionMessageParam, ChatCompletionOpenAIMessageParam, ChatCompletionTool, CodeBundle, Dataset, ObjectReferenceNullish, DatasetEvent, EnvVar, RepoInfo, Experiment, SpanType, SpanAttributes, ExperimentEvent, ExtendedSavedFunctionId, PromptBlockDataNullish, ModelParams, PromptOptionsNullish, PromptParserNullish, SavedFunctionId, PromptDataNullish, FunctionTypeEnumNullish, FunctionIdRef, PromptBlockData, GraphNode, GraphEdge, GraphData, FunctionData, Function, FunctionFormat, PromptData, FunctionTypeEnum, FunctionId, FunctionObjectType, FunctionOutputType, GitMetadataSettings, Group, IfExists, InvokeParent, StreamingMode, InvokeFunction, MessageRole, ObjectReference, OnlineScoreConfig, Organization, ProjectSettings, Project, RetentionObjectType, ProjectAutomation, ProjectLogsEvent, ProjectScoreType, ProjectScoreCategory, ProjectScoreCategories, ProjectScoreConfig, ProjectScore, ProjectTag, Prompt, PromptOptions, PromptSessionEvent, ResponseFormat, Role, RunEval, ServiceToken, SpanIFrame, SSEConsoleEventData, SSEProgressEventData, ToolFunctionDefinition, User, ViewDataSearch, ViewData, ViewOptions, View;
1507
+ var AclObjectType, Permission, Acl, AISecret, ResponseFormatJsonSchema, ResponseFormatNullish, AnyModelParams, ApiKey, AsyncScoringState, AsyncScoringControl, BraintrustAttachmentReference, ExternalAttachmentReference, AttachmentReference, UploadStatus, AttachmentStatus, BraintrustModelParams, CallEvent, ChatCompletionContentPartTextWithTitle, ChatCompletionContentPartImageWithTitle, ChatCompletionContentPartFileFile, ChatCompletionContentPartFileWithTitle, ChatCompletionContentPart, ChatCompletionContentPartText, ChatCompletionMessageToolCall, ChatCompletionMessageReasoning, ChatCompletionMessageParam, ChatCompletionOpenAIMessageParam, ChatCompletionTool, CodeBundle, Dataset, ObjectReferenceNullish, DatasetEvent, EnvVar, RepoInfo, Experiment, SpanType, SpanAttributes, ExperimentEvent, ExtendedSavedFunctionId, PromptBlockDataNullish, ModelParams, PromptOptionsNullish, PromptParserNullish, SavedFunctionId, PromptDataNullish, FunctionTypeEnumNullish, FunctionIdRef, PromptBlockData, GraphNode, GraphEdge, GraphData, FunctionData, Function, FunctionFormat, PromptData, FunctionTypeEnum, FunctionId, FunctionObjectType, FunctionOutputType, GitMetadataSettings, Group, IfExists, InvokeParent, StreamingMode, InvokeFunction, MessageRole, ObjectReference, OnlineScoreConfig, Organization, ProjectSettings, Project, RetentionObjectType, ProjectAutomation, ProjectLogsEvent, ProjectScoreType, ProjectScoreCategory, ProjectScoreCategories, ProjectScoreConfig, ProjectScore, ProjectTag, Prompt, PromptOptions, PromptSessionEvent, ResponseFormat, Role, RunEval, ServiceToken, SpanIFrame, SSEConsoleEventData, SSEProgressEventData, ToolFunctionDefinition, User, ViewDataSearch, ViewData, ViewOptions, View;
1508
1508
  var init_generated_types = __esm({
1509
1509
  "src/generated_types.ts"() {
1510
1510
  "use strict";
@@ -1595,10 +1595,12 @@ var init_generated_types = __esm({
1595
1595
  ]).optional(),
1596
1596
  n: z6.number().optional(),
1597
1597
  stop: z6.array(z6.string()).optional(),
1598
- reasoning_effort: z6.enum(["minimal", "low", "medium", "high"]).optional(),
1598
+ reasoning_effort: z6.enum(["none", "minimal", "low", "medium", "high"]).optional(),
1599
1599
  verbosity: z6.enum(["low", "medium", "high"]).optional(),
1600
1600
  top_k: z6.number().optional(),
1601
1601
  stop_sequences: z6.array(z6.string()).optional(),
1602
+ reasoning_enabled: z6.boolean().optional(),
1603
+ reasoning_budget: z6.number().optional(),
1602
1604
  max_tokens_to_sample: z6.number().optional(),
1603
1605
  maxOutputTokens: z6.number().optional(),
1604
1606
  topP: z6.number().optional(),
@@ -1654,7 +1656,11 @@ var init_generated_types = __esm({
1654
1656
  upload_status: UploadStatus,
1655
1657
  error_message: z6.string().optional()
1656
1658
  });
1657
- BraintrustModelParams = z6.object({ use_cache: z6.boolean() }).partial();
1659
+ BraintrustModelParams = z6.object({
1660
+ use_cache: z6.boolean(),
1661
+ reasoning_enabled: z6.boolean(),
1662
+ reasoning_budget: z6.number()
1663
+ }).partial();
1658
1664
  CallEvent = z6.union([
1659
1665
  z6.object({
1660
1666
  id: z6.string().optional(),
@@ -1709,9 +1715,15 @@ var init_generated_types = __esm({
1709
1715
  }),
1710
1716
  type: z6.literal("image_url")
1711
1717
  });
1718
+ ChatCompletionContentPartFileFile = z6.object({ file_data: z6.string(), filename: z6.string(), file_id: z6.string() }).partial();
1719
+ ChatCompletionContentPartFileWithTitle = z6.object({
1720
+ file: ChatCompletionContentPartFileFile,
1721
+ type: z6.literal("file")
1722
+ });
1712
1723
  ChatCompletionContentPart = z6.union([
1713
1724
  ChatCompletionContentPartTextWithTitle,
1714
- ChatCompletionContentPartImageWithTitle
1725
+ ChatCompletionContentPartImageWithTitle,
1726
+ ChatCompletionContentPartFileWithTitle
1715
1727
  ]);
1716
1728
  ChatCompletionContentPartText = z6.object({
1717
1729
  text: z6.string().default(""),
@@ -1808,7 +1820,7 @@ var init_generated_types = __esm({
1808
1820
  });
1809
1821
  CodeBundle = z6.object({
1810
1822
  runtime_context: z6.object({
1811
- runtime: z6.enum(["node", "python"]),
1823
+ runtime: z6.enum(["node", "python", "browser"]),
1812
1824
  version: z6.string()
1813
1825
  }),
1814
1826
  location: z6.union([
@@ -1869,7 +1881,9 @@ var init_generated_types = __esm({
1869
1881
  span_id: z6.string(),
1870
1882
  root_span_id: z6.string(),
1871
1883
  is_root: z6.union([z6.boolean(), z6.null()]).optional(),
1872
- origin: ObjectReferenceNullish.optional()
1884
+ origin: ObjectReferenceNullish.optional(),
1885
+ comments: z6.union([z6.array(z6.unknown()), z6.null()]).optional(),
1886
+ audit_data: z6.union([z6.array(z6.unknown()), z6.null()]).optional()
1873
1887
  });
1874
1888
  EnvVar = z6.object({
1875
1889
  id: z6.string().uuid(),
@@ -1949,7 +1963,9 @@ var init_generated_types = __esm({
1949
1963
  root_span_id: z6.string(),
1950
1964
  span_attributes: SpanAttributes.optional(),
1951
1965
  is_root: z6.union([z6.boolean(), z6.null()]).optional(),
1952
- origin: ObjectReferenceNullish.optional()
1966
+ origin: ObjectReferenceNullish.optional(),
1967
+ comments: z6.union([z6.array(z6.unknown()), z6.null()]).optional(),
1968
+ audit_data: z6.union([z6.array(z6.unknown()), z6.null()]).optional()
1953
1969
  });
1954
1970
  ExtendedSavedFunctionId = z6.union([
1955
1971
  z6.object({ type: z6.literal("function"), id: z6.string() }),
@@ -1961,17 +1977,19 @@ var init_generated_types = __esm({
1961
1977
  })
1962
1978
  ]);
1963
1979
  PromptBlockDataNullish = z6.union([
1964
- z6.object({ type: z6.literal("completion"), content: z6.string() }),
1965
1980
  z6.object({
1966
1981
  type: z6.literal("chat"),
1967
1982
  messages: z6.array(ChatCompletionMessageParam),
1968
1983
  tools: z6.string().optional()
1969
1984
  }),
1985
+ z6.object({ type: z6.literal("completion"), content: z6.string() }),
1970
1986
  z6.null()
1971
1987
  ]);
1972
1988
  ModelParams = z6.union([
1973
1989
  z6.object({
1974
1990
  use_cache: z6.boolean(),
1991
+ reasoning_enabled: z6.boolean(),
1992
+ reasoning_budget: z6.number(),
1975
1993
  temperature: z6.number(),
1976
1994
  top_p: z6.number(),
1977
1995
  max_tokens: z6.number(),
@@ -1995,11 +2013,13 @@ var init_generated_types = __esm({
1995
2013
  ]),
1996
2014
  n: z6.number(),
1997
2015
  stop: z6.array(z6.string()),
1998
- reasoning_effort: z6.enum(["minimal", "low", "medium", "high"]),
2016
+ reasoning_effort: z6.enum(["none", "minimal", "low", "medium", "high"]),
1999
2017
  verbosity: z6.enum(["low", "medium", "high"])
2000
2018
  }).partial().passthrough(),
2001
2019
  z6.object({
2002
2020
  use_cache: z6.boolean().optional(),
2021
+ reasoning_enabled: z6.boolean().optional(),
2022
+ reasoning_budget: z6.number().optional(),
2003
2023
  max_tokens: z6.number(),
2004
2024
  temperature: z6.number(),
2005
2025
  top_p: z6.number().optional(),
@@ -2009,6 +2029,8 @@ var init_generated_types = __esm({
2009
2029
  }).passthrough(),
2010
2030
  z6.object({
2011
2031
  use_cache: z6.boolean(),
2032
+ reasoning_enabled: z6.boolean(),
2033
+ reasoning_budget: z6.number(),
2012
2034
  temperature: z6.number(),
2013
2035
  maxOutputTokens: z6.number(),
2014
2036
  topP: z6.number(),
@@ -2016,10 +2038,16 @@ var init_generated_types = __esm({
2016
2038
  }).partial().passthrough(),
2017
2039
  z6.object({
2018
2040
  use_cache: z6.boolean(),
2041
+ reasoning_enabled: z6.boolean(),
2042
+ reasoning_budget: z6.number(),
2019
2043
  temperature: z6.number(),
2020
2044
  topK: z6.number()
2021
2045
  }).partial().passthrough(),
2022
- z6.object({ use_cache: z6.boolean() }).partial().passthrough()
2046
+ z6.object({
2047
+ use_cache: z6.boolean(),
2048
+ reasoning_enabled: z6.boolean(),
2049
+ reasoning_budget: z6.number()
2050
+ }).partial().passthrough()
2023
2051
  ]);
2024
2052
  PromptOptionsNullish = z6.union([
2025
2053
  z6.object({ model: z6.string(), params: ModelParams, position: z6.string() }).partial(),
@@ -2055,17 +2083,17 @@ var init_generated_types = __esm({
2055
2083
  z6.null()
2056
2084
  ]);
2057
2085
  FunctionTypeEnumNullish = z6.union([
2058
- z6.enum(["llm", "scorer", "task", "tool"]),
2086
+ z6.enum(["llm", "scorer", "task", "tool", "custom_view"]),
2059
2087
  z6.null()
2060
2088
  ]);
2061
2089
  FunctionIdRef = z6.object({}).partial().passthrough();
2062
2090
  PromptBlockData = z6.union([
2063
- z6.object({ type: z6.literal("completion"), content: z6.string() }),
2064
2091
  z6.object({
2065
2092
  type: z6.literal("chat"),
2066
2093
  messages: z6.array(ChatCompletionMessageParam),
2067
2094
  tools: z6.string().optional()
2068
- })
2095
+ }),
2096
+ z6.object({ type: z6.literal("completion"), content: z6.string() })
2069
2097
  ]);
2070
2098
  GraphNode = z6.union([
2071
2099
  z6.object({
@@ -2133,7 +2161,7 @@ var init_generated_types = __esm({
2133
2161
  z6.object({
2134
2162
  type: z6.literal("inline"),
2135
2163
  runtime_context: z6.object({
2136
- runtime: z6.enum(["node", "python"]),
2164
+ runtime: z6.enum(["node", "python", "browser"]),
2137
2165
  version: z6.string()
2138
2166
  }),
2139
2167
  code: z6.string()
@@ -2192,7 +2220,13 @@ var init_generated_types = __esm({
2192
2220
  z6.null()
2193
2221
  ])
2194
2222
  }).partial();
2195
- FunctionTypeEnum = z6.enum(["llm", "scorer", "task", "tool"]);
2223
+ FunctionTypeEnum = z6.enum([
2224
+ "llm",
2225
+ "scorer",
2226
+ "task",
2227
+ "tool",
2228
+ "custom_view"
2229
+ ]);
2196
2230
  FunctionId = z6.union([
2197
2231
  z6.object({ function_id: z6.string(), version: z6.string().optional() }),
2198
2232
  z6.object({
@@ -2208,7 +2242,7 @@ var init_generated_types = __esm({
2208
2242
  }),
2209
2243
  z6.object({
2210
2244
  inline_context: z6.object({
2211
- runtime: z6.enum(["node", "python"]),
2245
+ runtime: z6.enum(["node", "python", "browser"]),
2212
2246
  version: z6.string()
2213
2247
  }),
2214
2248
  code: z6.string(),
@@ -2231,7 +2265,8 @@ var init_generated_types = __esm({
2231
2265
  "tool",
2232
2266
  "scorer",
2233
2267
  "task",
2234
- "agent"
2268
+ "agent",
2269
+ "custom_view"
2235
2270
  ]);
2236
2271
  FunctionOutputType = z6.enum(["completion", "score", "any"]);
2237
2272
  GitMetadataSettings = z6.object({
@@ -2359,7 +2394,8 @@ var init_generated_types = __esm({
2359
2394
  })
2360
2395
  ),
2361
2396
  z6.null()
2362
- ])
2397
+ ]),
2398
+ disable_realtime_queries: z6.union([z6.boolean(), z6.null()])
2363
2399
  }).partial(),
2364
2400
  z6.null()
2365
2401
  ]);
@@ -2447,7 +2483,10 @@ var init_generated_types = __esm({
2447
2483
  root_span_id: z6.string(),
2448
2484
  is_root: z6.union([z6.boolean(), z6.null()]).optional(),
2449
2485
  span_attributes: SpanAttributes.optional(),
2450
- origin: ObjectReferenceNullish.optional()
2486
+ origin: ObjectReferenceNullish.optional(),
2487
+ comments: z6.union([z6.array(z6.unknown()), z6.null()]).optional(),
2488
+ audit_data: z6.union([z6.array(z6.unknown()), z6.null()]).optional(),
2489
+ _async_scoring_state: z6.unknown().optional()
2451
2490
  });
2452
2491
  ProjectScoreType = z6.enum([
2453
2492
  "slider",
@@ -2662,7 +2701,7 @@ var init_generated_types = __esm({
2662
2701
  z6.null()
2663
2702
  ]);
2664
2703
  ViewData = z6.union([
2665
- z6.object({ search: ViewDataSearch }).partial(),
2704
+ z6.object({ search: ViewDataSearch, custom_charts: z6.unknown() }).partial(),
2666
2705
  z6.null()
2667
2706
  ]);
2668
2707
  ViewOptions = z6.union([
@@ -2728,7 +2767,8 @@ var init_generated_types = __esm({
2728
2767
  z6.string(),
2729
2768
  z6.object({ from: z6.string(), to: z6.string() }),
2730
2769
  z6.null()
2731
- ])
2770
+ ]),
2771
+ queryShape: z6.union([z6.enum(["traces", "spans"]), z6.null()])
2732
2772
  }).partial(),
2733
2773
  z6.null()
2734
2774
  ]);
@@ -2749,7 +2789,8 @@ var init_generated_types = __esm({
2749
2789
  "scorers",
2750
2790
  "logs",
2751
2791
  "agents",
2752
- "monitor"
2792
+ "monitor",
2793
+ "for_review"
2753
2794
  ]),
2754
2795
  name: z6.string(),
2755
2796
  created: z6.union([z6.string(), z6.null()]).optional(),
@@ -2933,7 +2974,7 @@ var init_stream = __esm({
2933
2974
  /**
2934
2975
  * Get the underlying ReadableStream.
2935
2976
  *
2936
- * @returns The underlying ReadableStream<BraintrustStreamChunk>.
2977
+ * @returns The underlying `ReadableStream<BraintrustStreamChunk>`.
2937
2978
  */
2938
2979
  toReadableStream() {
2939
2980
  return this.stream;
@@ -6771,6 +6812,19 @@ function renderMessage(render, message) {
6771
6812
  url: render(c.image_url.url)
6772
6813
  }
6773
6814
  };
6815
+ case "file":
6816
+ return {
6817
+ ...c,
6818
+ file: {
6819
+ file_data: render(c.file.file_data || ""),
6820
+ ...c.file.file_id && {
6821
+ file_id: render(c.file.file_id)
6822
+ },
6823
+ ...c.file.filename && {
6824
+ filename: render(c.file.filename)
6825
+ }
6826
+ }
6827
+ };
6774
6828
  default:
6775
6829
  const _exhaustiveCheck = c;
6776
6830
  return _exhaustiveCheck;
@@ -8529,6 +8583,7 @@ Error: ${errorText}`;
8529
8583
  },
8530
8584
  use_columnstore: false,
8531
8585
  brainstore_realtime: true,
8586
+ query_source: `js_sdk_object_fetcher_${this.objectType}`,
8532
8587
  ...this.pinnedVersion !== void 0 ? {
8533
8588
  version: this.pinnedVersion
8534
8589
  } : {}
@@ -12815,6 +12870,11 @@ function proxyCreate(target, hooks) {
12815
12870
  } else {
12816
12871
  const event = hooks.resultToEventFunc(result);
12817
12872
  const span = timedSpan.span;
12873
+ const ttft = getCurrentUnixTimestamp() - timedSpan.start;
12874
+ if (!event.metrics) {
12875
+ event.metrics = {};
12876
+ }
12877
+ event.metrics.time_to_first_token = ttft;
12818
12878
  span.log(event);
12819
12879
  span.end();
12820
12880
  return result;
@@ -13298,194 +13358,9 @@ var WrapperStream = class {
13298
13358
  }
13299
13359
  };
13300
13360
 
13301
- // src/wrappers/ai-sdk-5/middleware.ts
13302
- init_util();
13303
- init_logger();
13304
-
13305
- // src/wrappers/anthropic-tokens-util.ts
13306
- function finalizeAnthropicTokens(metrics2) {
13307
- const prompt_tokens = (metrics2.prompt_tokens || 0) + (metrics2.prompt_cached_tokens || 0) + (metrics2.prompt_cache_creation_tokens || 0);
13308
- return {
13309
- ...metrics2,
13310
- prompt_tokens,
13311
- tokens: prompt_tokens + (metrics2.completion_tokens || 0)
13312
- };
13313
- }
13314
- function extractAnthropicCacheTokens(cacheReadTokens = 0, cacheCreationTokens = 0) {
13315
- const cacheTokens = {};
13316
- if (cacheReadTokens > 0) {
13317
- cacheTokens.prompt_cached_tokens = cacheReadTokens;
13318
- }
13319
- if (cacheCreationTokens > 0) {
13320
- cacheTokens.prompt_cache_creation_tokens = cacheCreationTokens;
13321
- }
13322
- return cacheTokens;
13323
- }
13324
-
13325
- // src/wrappers/ai-sdk-shared/utils.ts
13361
+ // src/wrappers/ai-sdk/ai-sdk.ts
13326
13362
  init_logger();
13327
- function detectProviderFromResult(result) {
13328
- if (!result?.providerMetadata) {
13329
- return void 0;
13330
- }
13331
- const keys = Object.keys(result.providerMetadata);
13332
- return keys?.at(0);
13333
- }
13334
- function extractModelFromResult(result) {
13335
- if (result?.response?.modelId) {
13336
- return result.response.modelId;
13337
- }
13338
- if (result?.request?.body?.model) {
13339
- return result.request.body.model;
13340
- }
13341
- return void 0;
13342
- }
13343
- function extractModelFromWrapGenerateCallback(model) {
13344
- return model?.modelId;
13345
- }
13346
- function camelToSnake(str) {
13347
- return str.replace(/[A-Z]/g, (letter) => `_${letter.toLowerCase()}`);
13348
- }
13349
- function extractModelParameters(params, excludeKeys) {
13350
- const modelParams = {};
13351
- for (const [key, value] of Object.entries(params)) {
13352
- if (value !== void 0 && !excludeKeys.has(key)) {
13353
- const snakeKey = camelToSnake(key);
13354
- modelParams[snakeKey] = value;
13355
- }
13356
- }
13357
- return modelParams;
13358
- }
13359
- function getNumberProperty(obj, key) {
13360
- if (!obj || typeof obj !== "object" || !(key in obj)) {
13361
- return void 0;
13362
- }
13363
- const value = Reflect.get(obj, key);
13364
- return typeof value === "number" ? value : void 0;
13365
- }
13366
- function normalizeUsageMetrics(usage, provider, providerMetadata) {
13367
- const metrics2 = {};
13368
- const inputTokens = getNumberProperty(usage, "inputTokens");
13369
- if (inputTokens !== void 0) {
13370
- metrics2.prompt_tokens = inputTokens;
13371
- }
13372
- const outputTokens = getNumberProperty(usage, "outputTokens");
13373
- if (outputTokens !== void 0) {
13374
- metrics2.completion_tokens = outputTokens;
13375
- }
13376
- const totalTokens = getNumberProperty(usage, "totalTokens");
13377
- if (totalTokens !== void 0) {
13378
- metrics2.tokens = totalTokens;
13379
- }
13380
- const reasoningTokens = getNumberProperty(usage, "reasoningTokens");
13381
- if (reasoningTokens !== void 0) {
13382
- metrics2.completion_reasoning_tokens = reasoningTokens;
13383
- }
13384
- const cachedInputTokens = getNumberProperty(usage, "cachedInputTokens");
13385
- if (cachedInputTokens !== void 0) {
13386
- metrics2.prompt_cached_tokens = cachedInputTokens;
13387
- }
13388
- if (provider === "anthropic") {
13389
- const anthropicMetadata = providerMetadata?.anthropic;
13390
- if (anthropicMetadata) {
13391
- const cacheReadTokens = getNumberProperty(anthropicMetadata.usage, "cache_read_input_tokens") || 0;
13392
- const cacheCreationTokens = getNumberProperty(
13393
- anthropicMetadata.usage,
13394
- "cache_creation_input_tokens"
13395
- ) || 0;
13396
- const cacheTokens = extractAnthropicCacheTokens(
13397
- cacheReadTokens,
13398
- cacheCreationTokens
13399
- );
13400
- Object.assign(metrics2, cacheTokens);
13401
- Object.assign(metrics2, finalizeAnthropicTokens(metrics2));
13402
- }
13403
- }
13404
- return metrics2;
13405
- }
13406
- function normalizeFinishReason(reason) {
13407
- if (typeof reason !== "string") return void 0;
13408
- return reason.replace(/-/g, "_");
13409
- }
13410
- function extractToolCallsFromSteps(steps) {
13411
- const toolCalls = [];
13412
- if (!Array.isArray(steps)) return toolCalls;
13413
- let idx = 0;
13414
- for (const step of steps) {
13415
- const blocks = step?.content;
13416
- if (!Array.isArray(blocks)) continue;
13417
- for (const block of blocks) {
13418
- if (block && typeof block === "object" && block.type === "tool-call") {
13419
- toolCalls.push({
13420
- id: block.toolCallId,
13421
- type: "function",
13422
- index: idx++,
13423
- function: {
13424
- name: block.toolName,
13425
- arguments: typeof block.input === "string" ? block.input : JSON.stringify(block.input ?? {})
13426
- }
13427
- });
13428
- }
13429
- }
13430
- }
13431
- return toolCalls;
13432
- }
13433
- function buildAssistantOutputWithToolCalls(result, toolCalls) {
13434
- return [
13435
- {
13436
- index: 0,
13437
- logprobs: null,
13438
- finish_reason: normalizeFinishReason(result?.finishReason) ?? (toolCalls.length ? "tool_calls" : void 0),
13439
- message: {
13440
- role: "assistant",
13441
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
13442
- }
13443
- }
13444
- ];
13445
- }
13446
- function extractToolCallsFromBlocks(blocks) {
13447
- if (!Array.isArray(blocks)) return [];
13448
- return extractToolCallsFromSteps([{ content: blocks }]);
13449
- }
13450
- function wrapTools(tools) {
13451
- if (!tools) return tools;
13452
- const inferName = (tool, fallback2) => tool && (tool.name || tool.toolName || tool.id) || fallback2;
13453
- if (Array.isArray(tools)) {
13454
- const arr = tools;
13455
- const out = arr.map((tool, idx) => {
13456
- if (tool != null && typeof tool === "object" && "execute" in tool && typeof tool.execute === "function") {
13457
- const name = inferName(tool, `tool[${idx}]`);
13458
- return {
13459
- ...tool,
13460
- execute: wrapTraced(tool.execute.bind(tool), {
13461
- name,
13462
- type: "tool"
13463
- })
13464
- };
13465
- }
13466
- return tool;
13467
- });
13468
- return out;
13469
- }
13470
- const wrappedTools = {};
13471
- for (const [key, tool] of Object.entries(tools)) {
13472
- if (tool != null && typeof tool === "object" && "execute" in tool && typeof tool.execute === "function") {
13473
- wrappedTools[key] = {
13474
- ...tool,
13475
- execute: wrapTraced(tool.execute.bind(tool), {
13476
- name: key,
13477
- type: "tool"
13478
- })
13479
- };
13480
- } else {
13481
- wrappedTools[key] = tool;
13482
- }
13483
- }
13484
- return wrappedTools;
13485
- }
13486
- function extractInput(params) {
13487
- return params?.prompt ?? params?.messages ?? params?.system;
13488
- }
13363
+ init_util();
13489
13364
 
13490
13365
  // src/wrappers/attachment-utils.ts
13491
13366
  init_logger();
@@ -13550,7 +13425,7 @@ function processInputAttachments(input) {
13550
13425
  return input;
13551
13426
  }
13552
13427
  let attachmentIndex = 0;
13553
- const processContentPart = (part) => {
13428
+ const processContentPart2 = (part) => {
13554
13429
  if (!part || typeof part !== "object") {
13555
13430
  return part;
13556
13431
  }
@@ -13598,940 +13473,1437 @@ function processInputAttachments(input) {
13598
13473
  }
13599
13474
  return part;
13600
13475
  };
13601
- const processMessage = (message) => {
13476
+ const processMessage2 = (message) => {
13602
13477
  if (!message || typeof message !== "object") {
13603
13478
  return message;
13604
13479
  }
13605
13480
  if (Array.isArray(message.content)) {
13606
13481
  return {
13607
13482
  ...message,
13608
- content: message.content.map(processContentPart)
13483
+ content: message.content.map(processContentPart2)
13609
13484
  };
13610
13485
  }
13611
13486
  return message;
13612
13487
  };
13613
13488
  if (Array.isArray(input)) {
13614
- return input.map(processMessage);
13489
+ return input.map(processMessage2);
13615
13490
  } else if (typeof input === "object" && input.content) {
13616
- return processMessage(input);
13491
+ return processMessage2(input);
13617
13492
  }
13618
13493
  return input;
13619
13494
  }
13620
13495
 
13621
- // src/wrappers/ai-sdk-5/middleware.ts
13622
- var V2_EXCLUDE_KEYS = /* @__PURE__ */ new Set([
13623
- "prompt",
13624
- // Already captured as input
13625
- "system",
13626
- // Already captured as input
13627
- "messages",
13628
- // Already captured as input
13629
- "model",
13630
- // Already captured in metadata.model
13631
- "providerOptions"
13632
- // Internal AI SDK configuration
13633
- ]);
13634
- function BraintrustMiddleware(config = {}) {
13635
- return {
13636
- wrapGenerate: async ({
13637
- doGenerate,
13638
- params,
13639
- model: modelFromWrapGenerate
13640
- }) => {
13641
- const rawInput = extractInput(params);
13642
- const processedInput = processInputAttachments(rawInput);
13643
- const spanArgs = {
13644
- name: config.spanInfo?.name || "ai-sdk.doGenerate",
13496
+ // src/wrappers/ai-sdk/ai-sdk.ts
13497
+ import { zodToJsonSchema } from "zod-to-json-schema";
13498
+ var DENY_OUTPUT_PATHS = [
13499
+ // v3
13500
+ "roundtrips[].request.body",
13501
+ "roundtrips[].response.headers",
13502
+ "rawResponse.headers",
13503
+ "responseMessages",
13504
+ // v5
13505
+ "request.body",
13506
+ "response.body",
13507
+ "response.headers",
13508
+ "steps[].request.body",
13509
+ "steps[].response.body",
13510
+ "steps[].response.headers"
13511
+ ];
13512
+ function wrapAISDK(aiSDK, options = {}) {
13513
+ return new Proxy(aiSDK, {
13514
+ get(target, prop, receiver) {
13515
+ const original = Reflect.get(target, prop, receiver);
13516
+ switch (prop) {
13517
+ case "generateText":
13518
+ return wrapGenerateText(original, options);
13519
+ case "streamText":
13520
+ return wrapStreamText(original, options);
13521
+ case "generateObject":
13522
+ return wrapGenerateObject(original, options);
13523
+ case "streamObject":
13524
+ return wrapStreamObject(original, options);
13525
+ }
13526
+ return original;
13527
+ }
13528
+ });
13529
+ }
13530
+ var wrapGenerateText = (generateText, options = {}) => {
13531
+ return async function wrappedGenerateText(params) {
13532
+ return traced(
13533
+ async (span) => {
13534
+ const result = await generateText({
13535
+ ...params,
13536
+ tools: wrapTools(params.tools)
13537
+ });
13538
+ span.log({
13539
+ output: await processOutput(result, options.denyOutputPaths),
13540
+ metrics: extractTokenMetrics(result)
13541
+ });
13542
+ return result;
13543
+ },
13544
+ {
13545
+ name: "generateText",
13645
13546
  spanAttributes: {
13646
- type: "llm" /* LLM */,
13647
- ...config.spanInfo?.spanAttributes || {}
13547
+ type: "llm" /* LLM */
13648
13548
  },
13649
13549
  event: {
13650
- input: processedInput,
13550
+ input: processInputAttachments2(params),
13651
13551
  metadata: {
13652
- ...extractModelParameters(params, V2_EXCLUDE_KEYS),
13653
- ...config.spanInfo?.metadata || {}
13552
+ model: serializeModel(params.model),
13553
+ braintrust: {
13554
+ integration_name: "ai-sdk",
13555
+ sdk_language: "typescript"
13556
+ }
13654
13557
  }
13655
13558
  }
13656
- };
13657
- const span = startSpan(spanArgs);
13658
- try {
13659
- const result = await doGenerate();
13660
- const metadata = {};
13661
- const provider = detectProviderFromResult(result);
13662
- if (provider !== void 0) {
13663
- metadata.provider = provider;
13664
- }
13665
- if (result.finishReason !== void 0) {
13666
- metadata.finish_reason = result.finishReason;
13667
- }
13668
- const model = extractModelFromResult(result);
13669
- if (model !== void 0) {
13670
- metadata.model = model;
13671
- } else if (modelFromWrapGenerate) {
13672
- const modelId = extractModelFromWrapGenerateCallback(
13673
- modelFromWrapGenerate
13674
- );
13675
- if (modelId) {
13676
- metadata.model = modelId;
13677
- }
13678
- }
13679
- let toolCalls = extractToolCallsFromSteps(result?.steps);
13680
- if (!toolCalls || toolCalls.length === 0) {
13681
- toolCalls = extractToolCallsFromBlocks(result?.content);
13682
- }
13683
- span.log({
13684
- output: toolCalls.length > 0 ? buildAssistantOutputWithToolCalls(result, toolCalls) : result?.content,
13685
- metadata,
13686
- metrics: normalizeUsageMetrics(
13687
- result.usage,
13688
- provider,
13689
- result.providerMetadata
13690
- )
13559
+ }
13560
+ );
13561
+ };
13562
+ };
13563
+ var wrapGenerateObject = (generateObject, options = {}) => {
13564
+ return async function wrappedGenerateObject(params) {
13565
+ return traced(
13566
+ async (span) => {
13567
+ const result = await generateObject({
13568
+ ...params,
13569
+ tools: wrapTools(params.tools)
13691
13570
  });
13692
- return result;
13693
- } catch (error2) {
13694
13571
  span.log({
13695
- error: error2 instanceof Error ? error2.message : String(error2)
13572
+ output: processOutput(result, options.denyOutputPaths),
13573
+ metrics: extractTokenMetrics(result)
13696
13574
  });
13697
- throw error2;
13698
- } finally {
13699
- span.end();
13700
- }
13701
- },
13702
- wrapStream: async ({ doStream, params }) => {
13703
- const rawInput = extractInput(params);
13704
- const processedInput = processInputAttachments(rawInput);
13705
- const spanArgs = {
13706
- name: config.spanInfo?.name || "ai-sdk.doStream",
13575
+ return result;
13576
+ },
13577
+ {
13578
+ name: "generateObject",
13707
13579
  spanAttributes: {
13708
- type: "llm" /* LLM */,
13709
- ...config.spanInfo?.spanAttributes || {}
13580
+ type: "llm" /* LLM */
13710
13581
  },
13711
13582
  event: {
13712
- input: processedInput,
13583
+ input: processInputAttachments2(params),
13713
13584
  metadata: {
13714
- ...extractModelParameters(params, V2_EXCLUDE_KEYS),
13715
- ...config.spanInfo?.metadata || {}
13585
+ model: serializeModel(params.model),
13586
+ braintrust: {
13587
+ integration_name: "ai-sdk",
13588
+ sdk_language: "typescript"
13589
+ }
13716
13590
  }
13717
13591
  }
13718
- };
13719
- const span = startSpan(spanArgs);
13720
- try {
13721
- const { stream, ...rest } = await doStream();
13722
- const textChunks = [];
13723
- const toolBlocks = [];
13724
- let finalUsage = {};
13725
- let finalFinishReason = void 0;
13726
- let providerMetadata = {};
13727
- const transformStream = new TransformStream({
13728
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
13729
- transform(chunk, controller) {
13730
- try {
13731
- if (chunk.type === "text-delta" && chunk.delta) {
13732
- textChunks.push(chunk.delta);
13733
- }
13734
- if (chunk.type === "tool-call" || chunk.type === "tool-result") {
13735
- toolBlocks.push(chunk);
13736
- }
13737
- if (chunk.type === "finish") {
13738
- finalFinishReason = chunk.finishReason;
13739
- finalUsage = chunk.usage || {};
13740
- providerMetadata = chunk.providerMetadata || {};
13741
- }
13742
- controller.enqueue(chunk);
13743
- } catch (error2) {
13592
+ }
13593
+ );
13594
+ };
13595
+ };
13596
+ var wrapStreamText = (streamText, options = {}) => {
13597
+ return function wrappedStreamText(params) {
13598
+ const span = startSpan({
13599
+ name: "streamText",
13600
+ spanAttributes: {
13601
+ type: "llm" /* LLM */
13602
+ },
13603
+ event: {
13604
+ input: processInputAttachments2(params),
13605
+ metadata: {
13606
+ model: serializeModel(params.model),
13607
+ braintrust: {
13608
+ integration_name: "ai-sdk",
13609
+ sdk_language: "typescript"
13610
+ }
13611
+ }
13612
+ }
13613
+ });
13614
+ try {
13615
+ const startTime = Date.now();
13616
+ let receivedFirst = false;
13617
+ const result = withCurrent(
13618
+ span,
13619
+ () => streamText({
13620
+ ...params,
13621
+ tools: wrapTools(params.tools),
13622
+ onChunk: (chunk) => {
13623
+ if (!receivedFirst) {
13624
+ receivedFirst = true;
13744
13625
  span.log({
13745
- error: error2 instanceof Error ? error2.message : String(error2)
13746
- });
13747
- span.end();
13748
- controller.error(error2);
13749
- }
13750
- },
13751
- flush() {
13752
- try {
13753
- const generatedText = textChunks.join("");
13754
- let output = generatedText ? [{ type: "text", text: generatedText }] : [];
13755
- const resultForDetection = {
13756
- providerMetadata,
13757
- response: rest.response,
13758
- ...rest,
13759
- finishReason: finalFinishReason
13760
- };
13761
- const metadata = {};
13762
- const provider = detectProviderFromResult(resultForDetection);
13763
- if (provider !== void 0) {
13764
- metadata.provider = provider;
13765
- }
13766
- if (finalFinishReason !== void 0) {
13767
- metadata.finish_reason = finalFinishReason;
13768
- }
13769
- const model = extractModelFromResult(resultForDetection);
13770
- if (model !== void 0) {
13771
- metadata.model = model;
13772
- }
13773
- if (toolBlocks.length > 0) {
13774
- const toolCalls = extractToolCallsFromSteps([
13775
- { content: toolBlocks }
13776
- ]);
13777
- if (toolCalls.length > 0) {
13778
- output = buildAssistantOutputWithToolCalls(
13779
- resultForDetection,
13780
- toolCalls
13781
- );
13626
+ metrics: {
13627
+ time_to_first_token: (Date.now() - startTime) / 1e3
13782
13628
  }
13783
- }
13784
- span.log({
13785
- output,
13786
- metadata,
13787
- metrics: normalizeUsageMetrics(
13788
- finalUsage,
13789
- provider,
13790
- providerMetadata
13791
- )
13792
- });
13793
- span.end();
13794
- } catch (error2) {
13795
- span.log({
13796
- error: error2 instanceof Error ? error2.message : String(error2)
13797
13629
  });
13798
- span.end();
13799
- throw error2;
13800
13630
  }
13631
+ params.onChunk?.(chunk);
13632
+ },
13633
+ onFinish: async (event) => {
13634
+ params.onFinish?.(event);
13635
+ span.log({
13636
+ output: await processOutput(event, options.denyOutputPaths),
13637
+ metrics: extractTokenMetrics(event)
13638
+ });
13639
+ span.end();
13640
+ },
13641
+ onError: async (err) => {
13642
+ params.onError?.(err);
13643
+ span.log({
13644
+ error: serializeError(err)
13645
+ });
13646
+ span.end();
13801
13647
  }
13648
+ })
13649
+ );
13650
+ const trackFirstToken = () => {
13651
+ if (!receivedFirst) {
13652
+ receivedFirst = true;
13653
+ span.log({
13654
+ metrics: {
13655
+ time_to_first_token: (Date.now() - startTime) / 1e3
13656
+ }
13657
+ });
13658
+ }
13659
+ };
13660
+ if (result && result.baseStream) {
13661
+ const [stream1, stream2] = result.baseStream.tee();
13662
+ result.baseStream = stream2;
13663
+ stream1.pipeThrough(
13664
+ new TransformStream({
13665
+ transform(chunk, controller) {
13666
+ trackFirstToken();
13667
+ controller.enqueue(chunk);
13668
+ }
13669
+ })
13670
+ ).pipeTo(
13671
+ new WritableStream({
13672
+ write() {
13673
+ }
13674
+ })
13675
+ ).catch(() => {
13802
13676
  });
13803
- return {
13804
- stream: stream.pipeThrough(transformStream),
13805
- ...rest
13806
- };
13807
- } catch (error2) {
13808
- span.log({
13809
- error: error2 instanceof Error ? error2.message : String(error2)
13810
- });
13811
- span.end();
13812
- throw error2;
13813
13677
  }
13678
+ return result;
13679
+ } catch (error2) {
13680
+ span.log({
13681
+ error: serializeError(error2)
13682
+ });
13683
+ span.end();
13684
+ throw error2;
13814
13685
  }
13815
13686
  };
13816
- }
13817
-
13818
- // src/wrappers/ai-sdk-4/ai-sdk.ts
13819
- init_logger();
13820
- init_util2();
13821
- function wrapAISDKModel(model) {
13822
- const m = model;
13823
- if (m?.specificationVersion === "v1" && typeof m?.provider === "string" && typeof m?.modelId === "string") {
13824
- return new BraintrustLanguageModelWrapper(m);
13825
- } else {
13826
- console.warn("Unsupported AI SDK model. Not wrapping.");
13827
- return model;
13828
- }
13829
- }
13830
- var BraintrustLanguageModelWrapper = class {
13831
- constructor(model) {
13832
- this.model = model;
13833
- if (typeof this.model.supportsUrl === "function") {
13834
- this.supportsUrl = (url) => this.model.supportsUrl(url);
13835
- }
13836
- }
13837
- supportsUrl;
13838
- get specificationVersion() {
13839
- return this.model.specificationVersion;
13840
- }
13841
- get provider() {
13842
- return this.model.provider;
13843
- }
13844
- get modelId() {
13845
- return this.model.modelId;
13846
- }
13847
- get defaultObjectGenerationMode() {
13848
- return this.model.defaultObjectGenerationMode;
13849
- }
13850
- get supportsImageUrls() {
13851
- return this.model.supportsImageUrls;
13852
- }
13853
- get supportsStructuredOutputs() {
13854
- return this.model.supportsStructuredOutputs;
13855
- }
13856
- // For the first cut, do not support custom span_info arguments. We can
13857
- // propagate those via async local storage
13858
- async doGenerate(options) {
13687
+ };
13688
+ var wrapStreamObject = (streamObject, options = {}) => {
13689
+ return function wrappedStreamObject(params) {
13859
13690
  const span = startSpan({
13860
- name: "Chat Completion",
13691
+ name: "streamObject",
13861
13692
  spanAttributes: {
13862
- type: "llm"
13693
+ type: "llm" /* LLM */
13694
+ },
13695
+ event: {
13696
+ input: processInputAttachments2(params),
13697
+ metadata: {
13698
+ model: serializeModel(params.model),
13699
+ braintrust: {
13700
+ integration_name: "ai-sdk",
13701
+ sdk_language: "typescript"
13702
+ }
13703
+ }
13863
13704
  }
13864
13705
  });
13865
- const { prompt, mode, ...rest } = options;
13866
- const startTime = getCurrentUnixTimestamp();
13867
13706
  try {
13868
- const ret = await this.model.doGenerate(options);
13869
- span.log({
13870
- input: postProcessPrompt(prompt),
13871
- metadata: {
13872
- model: this.modelId,
13873
- ...rest,
13874
- ..."tools" in mode && mode.tools ? { tools: convertTools(mode.tools) } : "tool" in mode && mode.tool ? { tools: convertTools([mode.tool]) } : {}
13875
- },
13876
- output: postProcessOutput(ret.text, ret.toolCalls, ret.finishReason),
13877
- metrics: {
13878
- time_to_first_token: getCurrentUnixTimestamp() - startTime,
13879
- tokens: !isEmpty(ret.usage) ? ret.usage.promptTokens + ret.usage.completionTokens : void 0,
13880
- prompt_tokens: ret.usage?.promptTokens,
13881
- completion_tokens: ret.usage?.completionTokens,
13882
- cached: parseCachedHeader(
13883
- ret.rawResponse?.headers?.[X_CACHED_HEADER] ?? ret.rawResponse?.headers?.[LEGACY_CACHED_HEADER]
13884
- )
13707
+ const startTime = Date.now();
13708
+ let receivedFirst = false;
13709
+ const result = withCurrent(
13710
+ span,
13711
+ () => streamObject({
13712
+ ...params,
13713
+ tools: wrapTools(params.tools),
13714
+ onChunk: (chunk) => {
13715
+ if (!receivedFirst) {
13716
+ receivedFirst = true;
13717
+ span.log({
13718
+ metrics: {
13719
+ time_to_first_token: (Date.now() - startTime) / 1e3
13720
+ }
13721
+ });
13722
+ }
13723
+ params.onChunk?.(chunk);
13724
+ },
13725
+ onFinish: async (event) => {
13726
+ params.onFinish?.(event);
13727
+ span.log({
13728
+ output: await processOutput(event, options.denyOutputPaths),
13729
+ metrics: extractTokenMetrics(event)
13730
+ });
13731
+ span.end();
13732
+ },
13733
+ onError: async (err) => {
13734
+ params.onError?.(err);
13735
+ span.log({
13736
+ error: serializeError(err)
13737
+ });
13738
+ span.end();
13739
+ }
13740
+ })
13741
+ );
13742
+ const trackFirstToken = () => {
13743
+ if (!receivedFirst) {
13744
+ receivedFirst = true;
13745
+ span.log({
13746
+ metrics: {
13747
+ time_to_first_token: (Date.now() - startTime) / 1e3
13748
+ }
13749
+ });
13885
13750
  }
13751
+ };
13752
+ if (result && result.baseStream) {
13753
+ const [stream1, stream2] = result.baseStream.tee();
13754
+ result.baseStream = stream2;
13755
+ stream1.pipeThrough(
13756
+ new TransformStream({
13757
+ transform(chunk, controller) {
13758
+ trackFirstToken();
13759
+ controller.enqueue(chunk);
13760
+ }
13761
+ })
13762
+ ).pipeTo(
13763
+ new WritableStream({
13764
+ write() {
13765
+ }
13766
+ })
13767
+ ).catch(() => {
13768
+ });
13769
+ }
13770
+ return result;
13771
+ } catch (error2) {
13772
+ span.log({
13773
+ error: serializeError(error2)
13886
13774
  });
13887
- return ret;
13888
- } finally {
13889
13775
  span.end();
13776
+ throw error2;
13890
13777
  }
13891
- }
13892
- async doStream(options) {
13893
- const { prompt, mode, ...rest } = options;
13894
- const startTime = getCurrentUnixTimestamp();
13895
- const span = startSpan({
13896
- name: "Chat Completion",
13897
- spanAttributes: {
13898
- type: "llm"
13899
- }
13778
+ };
13779
+ };
13780
+ var wrapTools = (tools) => {
13781
+ if (!tools) return tools;
13782
+ const inferName = (tool, fallback2) => tool && (tool.name || tool.toolName || tool.id) || fallback2;
13783
+ if (Array.isArray(tools)) {
13784
+ return tools.map((tool, idx) => {
13785
+ const name = inferName(tool, `tool[${idx}]`);
13786
+ return wrapToolExecute(tool, name);
13900
13787
  });
13901
- span.log({
13902
- input: postProcessPrompt(prompt),
13903
- metadata: {
13904
- model: this.modelId,
13905
- ...rest,
13906
- ..."tools" in mode && mode.tools ? { tools: convertTools(mode.tools) } : "tool" in mode && mode.tool ? { tools: convertTools([mode.tool]) } : {}
13788
+ }
13789
+ const wrappedTools = {};
13790
+ for (const [key, tool] of Object.entries(tools)) {
13791
+ wrappedTools[key] = wrapToolExecute(tool, key);
13792
+ }
13793
+ return wrappedTools;
13794
+ };
13795
+ var wrapToolExecute = (tool, name) => {
13796
+ if (tool != null && typeof tool === "object" && "execute" in tool && typeof tool.execute === "function") {
13797
+ const originalExecute = tool.execute;
13798
+ return new Proxy(tool, {
13799
+ get(target, prop) {
13800
+ if (prop === "execute") {
13801
+ return (...args) => traced(
13802
+ async (span) => {
13803
+ span.log({ input: args.length === 1 ? args[0] : args });
13804
+ const result = await originalExecute.apply(target, args);
13805
+ span.log({ output: result });
13806
+ return result;
13807
+ },
13808
+ {
13809
+ name,
13810
+ spanAttributes: {
13811
+ type: "tool" /* TOOL */
13812
+ }
13813
+ }
13814
+ );
13815
+ }
13816
+ return target[prop];
13817
+ },
13818
+ // Implement additional traps for full transparency
13819
+ has(target, prop) {
13820
+ return prop in target;
13821
+ },
13822
+ ownKeys(target) {
13823
+ return Reflect.ownKeys(target);
13824
+ },
13825
+ getOwnPropertyDescriptor(target, prop) {
13826
+ return Object.getOwnPropertyDescriptor(target, prop);
13827
+ },
13828
+ set(target, prop, value) {
13829
+ target[prop] = value;
13830
+ return true;
13831
+ },
13832
+ deleteProperty(target, prop) {
13833
+ delete target[prop];
13834
+ return true;
13835
+ },
13836
+ defineProperty(target, prop, descriptor) {
13837
+ Object.defineProperty(target, prop, descriptor);
13838
+ return true;
13839
+ },
13840
+ getPrototypeOf(target) {
13841
+ return Object.getPrototypeOf(target);
13842
+ },
13843
+ setPrototypeOf(target, proto) {
13844
+ Object.setPrototypeOf(target, proto);
13845
+ return true;
13846
+ },
13847
+ isExtensible(target) {
13848
+ return Object.isExtensible(target);
13849
+ },
13850
+ preventExtensions(target) {
13851
+ Object.preventExtensions(target);
13852
+ return true;
13907
13853
  }
13908
13854
  });
13909
- let ended = false;
13910
- const end = () => {
13911
- if (!ended) {
13912
- span.end();
13913
- ended = true;
13914
- }
13915
- };
13855
+ }
13856
+ return tool;
13857
+ };
13858
+ var serializeError = (error2) => {
13859
+ if (error2 instanceof Error) {
13860
+ return error2;
13861
+ }
13862
+ if (typeof error2 === "object" && error2 !== null) {
13916
13863
  try {
13917
- const ret = await this.model.doStream(options);
13918
- let time_to_first_token = void 0;
13919
- let usage = void 0;
13920
- let fullText = void 0;
13921
- const toolCalls = {};
13922
- let finishReason = void 0;
13923
- return {
13924
- ...ret,
13925
- stream: ret.stream.pipeThrough(
13926
- new TransformStream({
13927
- transform(chunk, controller) {
13928
- if (time_to_first_token === void 0) {
13929
- time_to_first_token = getCurrentUnixTimestamp() - startTime;
13930
- span.log({ metrics: { time_to_first_token } });
13931
- }
13932
- switch (chunk.type) {
13933
- case "text-delta":
13934
- if (fullText === void 0) {
13935
- fullText = "";
13936
- }
13937
- fullText += chunk.textDelta;
13938
- break;
13939
- case "tool-call":
13940
- toolCalls[chunk.toolCallId] = {
13941
- toolCallType: chunk.toolCallType,
13942
- toolCallId: chunk.toolCallId,
13943
- toolName: chunk.toolName,
13944
- args: chunk.args
13945
- };
13946
- break;
13947
- case "tool-call-delta":
13948
- if (toolCalls[chunk.toolCallId] === void 0) {
13949
- toolCalls[chunk.toolCallId] = {
13950
- toolCallType: chunk.toolCallType,
13951
- toolCallId: chunk.toolCallId,
13952
- toolName: chunk.toolName,
13953
- args: ""
13954
- };
13955
- }
13956
- toolCalls[chunk.toolCallId].args += chunk.argsTextDelta;
13957
- break;
13958
- case "finish":
13959
- usage = chunk.usage;
13960
- finishReason = chunk.finishReason;
13961
- break;
13962
- }
13963
- controller.enqueue(chunk);
13964
- },
13965
- async flush(controller) {
13966
- span.log({
13967
- output: postProcessOutput(
13968
- fullText,
13969
- Object.keys(toolCalls).length > 0 ? Object.values(toolCalls) : void 0,
13970
- finishReason
13971
- ),
13972
- metrics: {
13973
- time_to_first_token,
13974
- tokens: !isEmpty(usage) ? usage.promptTokens + usage.completionTokens : void 0,
13975
- prompt_tokens: usage?.promptTokens,
13976
- completion_tokens: usage?.completionTokens,
13977
- cached: parseCachedHeader(
13978
- ret.rawResponse?.headers?.[X_CACHED_HEADER] ?? ret.rawResponse?.headers?.[LEGACY_CACHED_HEADER]
13979
- )
13980
- }
13981
- });
13982
- end();
13983
- controller.terminate();
13984
- }
13985
- })
13986
- )
13987
- };
13988
- } finally {
13989
- end();
13864
+ return JSON.stringify(error2);
13865
+ } catch {
13990
13866
  }
13991
13867
  }
13868
+ return String(error2);
13992
13869
  };
13993
- function convertTools(tools) {
13994
- return tools.map((tool) => {
13995
- const { type: _, ...rest } = tool;
13870
+ var serializeModel = (model) => {
13871
+ return typeof model === "string" ? model : model?.modelId;
13872
+ };
13873
+ var isZodSchema = (value) => {
13874
+ return value != null && typeof value === "object" && "_def" in value && typeof value._def === "object";
13875
+ };
13876
+ var serializeZodSchema = (schema) => {
13877
+ try {
13878
+ return zodToJsonSchema(schema);
13879
+ } catch {
13996
13880
  return {
13997
- type: tool.type,
13998
- function: rest
13881
+ type: "object",
13882
+ description: "Zod schema (conversion failed)"
13999
13883
  };
14000
- });
14001
- }
14002
- function postProcessPrompt(prompt) {
14003
- return prompt.flatMap((message) => {
14004
- switch (message.role) {
14005
- case "system":
14006
- return [
14007
- {
14008
- role: "system",
14009
- content: message.content
14010
- }
14011
- ];
14012
- case "assistant":
14013
- const textPart = message.content.find(
14014
- (part) => part.type === "text"
14015
- );
14016
- const toolCallParts = (
14017
- // eslint-disable-next-line @typescript-eslint/consistent-type-assertions
14018
- message.content.filter(
14019
- (part) => part.type === "tool-call"
14020
- )
14021
- );
14022
- return [
14023
- {
14024
- role: "assistant",
14025
- content: textPart?.text,
14026
- ...toolCallParts.length > 0 ? {
14027
- tool_calls: toolCallParts.map((part) => ({
14028
- id: part.toolCallId,
14029
- function: {
14030
- name: part.toolName,
14031
- arguments: JSON.stringify(part.args)
14032
- },
14033
- type: "function"
14034
- }))
14035
- } : {}
14036
- }
14037
- ];
14038
- case "user":
14039
- return [
14040
- {
14041
- role: "user",
14042
- content: message.content.map((part) => {
14043
- switch (part.type) {
14044
- case "text":
14045
- return {
14046
- type: "text",
14047
- text: part.text,
14048
- ...part.providerMetadata ? { providerMetadata: part.providerMetadata } : {}
14049
- };
14050
- case "image":
14051
- return {
14052
- type: "image_url",
14053
- image_url: {
14054
- url: part.image.toString(),
14055
- ...part.providerMetadata ? { providerMetadata: part.providerMetadata } : {}
14056
- }
14057
- };
14058
- default:
14059
- return part;
14060
- }
14061
- })
14062
- }
14063
- ];
14064
- case "tool":
14065
- return message.content.map((part) => ({
14066
- role: "tool",
14067
- tool_call_id: part.toolCallId,
14068
- content: JSON.stringify(part.result)
14069
- }));
13884
+ }
13885
+ };
13886
+ var processTools = (tools) => {
13887
+ if (!tools || typeof tools !== "object") return tools;
13888
+ if (Array.isArray(tools)) {
13889
+ return tools.map(processTool);
13890
+ }
13891
+ const processed = {};
13892
+ for (const [key, tool] of Object.entries(tools)) {
13893
+ processed[key] = processTool(tool);
13894
+ }
13895
+ return processed;
13896
+ };
13897
+ var processTool = (tool) => {
13898
+ if (!tool || typeof tool !== "object") return tool;
13899
+ const processed = { ...tool };
13900
+ if (isZodSchema(processed.inputSchema)) {
13901
+ processed.inputSchema = serializeZodSchema(processed.inputSchema);
13902
+ }
13903
+ if (isZodSchema(processed.parameters)) {
13904
+ processed.parameters = serializeZodSchema(processed.parameters);
13905
+ }
13906
+ if ("execute" in processed) {
13907
+ processed.execute = "[Function]";
13908
+ }
13909
+ if ("render" in processed) {
13910
+ processed.render = "[Function]";
13911
+ }
13912
+ return processed;
13913
+ };
13914
+ var processInputAttachments2 = (input) => {
13915
+ if (!input) return input;
13916
+ const processed = { ...input };
13917
+ if (input.messages && Array.isArray(input.messages)) {
13918
+ processed.messages = input.messages.map(processMessage);
13919
+ }
13920
+ if (input.prompt && typeof input.prompt === "object" && !Array.isArray(input.prompt)) {
13921
+ processed.prompt = processPromptContent(input.prompt);
13922
+ }
13923
+ if (input.tools) {
13924
+ processed.tools = processTools(input.tools);
13925
+ }
13926
+ return processed;
13927
+ };
13928
+ var processMessage = (message) => {
13929
+ if (!message || typeof message !== "object") return message;
13930
+ if (Array.isArray(message.content)) {
13931
+ return {
13932
+ ...message,
13933
+ content: message.content.map(processContentPart)
13934
+ };
13935
+ }
13936
+ if (typeof message.content === "object" && message.content !== null) {
13937
+ return {
13938
+ ...message,
13939
+ content: processContentPart(message.content)
13940
+ };
13941
+ }
13942
+ return message;
13943
+ };
13944
+ var processPromptContent = (prompt) => {
13945
+ if (Array.isArray(prompt)) {
13946
+ return prompt.map(processContentPart);
13947
+ }
13948
+ if (prompt.content) {
13949
+ if (Array.isArray(prompt.content)) {
13950
+ return {
13951
+ ...prompt,
13952
+ content: prompt.content.map(processContentPart)
13953
+ };
13954
+ } else if (typeof prompt.content === "object") {
13955
+ return {
13956
+ ...prompt,
13957
+ content: processContentPart(prompt.content)
13958
+ };
14070
13959
  }
14071
- });
14072
- }
14073
- function postProcessOutput(text, toolCalls, finishReason) {
14074
- return [
14075
- {
14076
- index: 0,
14077
- message: {
14078
- role: "assistant",
14079
- content: text ?? "",
14080
- ...toolCalls && toolCalls.length > 0 ? {
14081
- tool_calls: toolCalls.map((toolCall) => ({
14082
- id: toolCall.toolCallId,
14083
- function: {
14084
- name: toolCall.toolName,
14085
- arguments: toolCall.args
14086
- },
14087
- type: "function"
14088
- }))
14089
- } : {}
14090
- },
14091
- finish_reason: finishReason
13960
+ }
13961
+ return prompt;
13962
+ };
13963
+ var processContentPart = (part) => {
13964
+ if (!part || typeof part !== "object") return part;
13965
+ try {
13966
+ if (part.type === "image" && part.image) {
13967
+ const imageAttachment = convertImageToAttachment(
13968
+ part.image,
13969
+ part.mimeType || part.mediaType
13970
+ );
13971
+ if (imageAttachment) {
13972
+ return {
13973
+ ...part,
13974
+ image: imageAttachment
13975
+ };
13976
+ }
13977
+ }
13978
+ if (part.type === "file" && part.data && (part.mimeType || part.mediaType)) {
13979
+ const fileAttachment = convertDataToAttachment(
13980
+ part.data,
13981
+ part.mimeType || part.mediaType,
13982
+ part.name || part.filename
13983
+ );
13984
+ if (fileAttachment) {
13985
+ return {
13986
+ ...part,
13987
+ data: fileAttachment
13988
+ };
13989
+ }
13990
+ }
13991
+ if (part.type === "image_url" && part.image_url) {
13992
+ if (typeof part.image_url === "object" && part.image_url.url) {
13993
+ const imageAttachment = convertImageToAttachment(part.image_url.url);
13994
+ if (imageAttachment) {
13995
+ return {
13996
+ ...part,
13997
+ image_url: {
13998
+ ...part.image_url,
13999
+ url: imageAttachment
14000
+ }
14001
+ };
14002
+ }
14003
+ }
14004
+ }
14005
+ } catch (error2) {
14006
+ console.warn("Error processing content part:", error2);
14007
+ }
14008
+ return part;
14009
+ };
14010
+ var convertImageToAttachment = (image, explicitMimeType) => {
14011
+ try {
14012
+ if (typeof image === "string" && image.startsWith("data:")) {
14013
+ const [mimeTypeSection, base64Data] = image.split(",");
14014
+ const mimeType = mimeTypeSection.match(/data:(.*?);/)?.[1];
14015
+ if (mimeType && base64Data) {
14016
+ const blob = convertDataToBlob(base64Data, mimeType);
14017
+ if (blob) {
14018
+ return new Attachment({
14019
+ data: blob,
14020
+ filename: `image.${getExtensionFromMediaType(mimeType)}`,
14021
+ contentType: mimeType
14022
+ });
14023
+ }
14024
+ }
14092
14025
  }
14026
+ if (explicitMimeType) {
14027
+ if (image instanceof Uint8Array) {
14028
+ return new Attachment({
14029
+ data: new Blob([image], { type: explicitMimeType }),
14030
+ filename: `image.${getExtensionFromMediaType(explicitMimeType)}`,
14031
+ contentType: explicitMimeType
14032
+ });
14033
+ }
14034
+ if (typeof Buffer !== "undefined" && Buffer.isBuffer(image)) {
14035
+ return new Attachment({
14036
+ data: new Blob([image], { type: explicitMimeType }),
14037
+ filename: `image.${getExtensionFromMediaType(explicitMimeType)}`,
14038
+ contentType: explicitMimeType
14039
+ });
14040
+ }
14041
+ }
14042
+ if (image instanceof Blob && image.type) {
14043
+ return new Attachment({
14044
+ data: image,
14045
+ filename: `image.${getExtensionFromMediaType(image.type)}`,
14046
+ contentType: image.type
14047
+ });
14048
+ }
14049
+ if (image instanceof Attachment) {
14050
+ return image;
14051
+ }
14052
+ } catch (error2) {
14053
+ console.warn("Error converting image to attachment:", error2);
14054
+ }
14055
+ return null;
14056
+ };
14057
+ var convertDataToAttachment = (data, mimeType, filename) => {
14058
+ if (!mimeType) return null;
14059
+ try {
14060
+ let blob = null;
14061
+ if (typeof data === "string" && data.startsWith("data:")) {
14062
+ const [, base64Data] = data.split(",");
14063
+ if (base64Data) {
14064
+ blob = convertDataToBlob(base64Data, mimeType);
14065
+ }
14066
+ } else if (typeof data === "string" && data.length > 0) {
14067
+ blob = convertDataToBlob(data, mimeType);
14068
+ } else if (data instanceof Uint8Array) {
14069
+ blob = new Blob([data], { type: mimeType });
14070
+ } else if (typeof Buffer !== "undefined" && Buffer.isBuffer(data)) {
14071
+ blob = new Blob([data], { type: mimeType });
14072
+ } else if (data instanceof Blob) {
14073
+ blob = data;
14074
+ }
14075
+ if (blob) {
14076
+ return new Attachment({
14077
+ data: blob,
14078
+ filename: filename || `file.${getExtensionFromMediaType(mimeType)}`,
14079
+ contentType: mimeType
14080
+ });
14081
+ }
14082
+ } catch (error2) {
14083
+ console.warn("Error converting data to attachment:", error2);
14084
+ }
14085
+ return null;
14086
+ };
14087
+ var extractGetterValues = (obj) => {
14088
+ const getterValues = {};
14089
+ const getterNames = [
14090
+ "text",
14091
+ "finishReason",
14092
+ "usage",
14093
+ "toolCalls",
14094
+ "toolResults",
14095
+ "warnings",
14096
+ "experimental_providerMetadata",
14097
+ "rawResponse",
14098
+ "response"
14093
14099
  ];
14094
- }
14095
-
14096
- // src/wrappers/ai-sdk-5/ai-sdk.ts
14097
- init_logger();
14098
- var V3_EXCLUDE_KEYS = /* @__PURE__ */ new Set([
14099
- "prompt",
14100
- // Already captured as input
14101
- "system",
14102
- // Already captured as input
14103
- "messages",
14104
- // Already captured as input
14105
- "model",
14106
- // Already captured in metadata.model
14107
- "providerOptions",
14108
- // Internal AI SDK configuration
14109
- "tools",
14110
- // Already captured in metadata.tools
14111
- "span_info"
14112
- // Extracted separately for prompt linking
14113
- ]);
14114
- function extractSpanInfo(params) {
14115
- const { span_info } = params;
14116
- return { spanInfo: span_info };
14117
- }
14118
- function processFilesAsAttachments(files) {
14119
- if (!files || !Array.isArray(files) || files.length === 0) {
14120
- return void 0;
14100
+ for (const name of getterNames) {
14101
+ try {
14102
+ if (obj && name in obj && typeof obj[name] !== "function") {
14103
+ getterValues[name] = obj[name];
14104
+ }
14105
+ } catch {
14106
+ }
14107
+ }
14108
+ return getterValues;
14109
+ };
14110
+ var processOutput = async (output, denyOutputPaths) => {
14111
+ const getterValues = extractGetterValues(output);
14112
+ const processed = await processOutputAttachments(output);
14113
+ const merged = { ...processed, ...getterValues };
14114
+ return omit(merged, denyOutputPaths ?? DENY_OUTPUT_PATHS);
14115
+ };
14116
+ var processOutputAttachments = async (output) => {
14117
+ try {
14118
+ return await doProcessOutputAttachments(output);
14119
+ } catch (error2) {
14120
+ console.error("Error processing output attachments:", error2);
14121
+ return output;
14122
+ }
14123
+ };
14124
+ var doProcessOutputAttachments = async (output) => {
14125
+ if (!output || !("files" in output)) {
14126
+ return output;
14127
+ }
14128
+ if (output.files && typeof output.files.then === "function") {
14129
+ return {
14130
+ ...output,
14131
+ files: output.files.then(async (files) => {
14132
+ if (!files || !Array.isArray(files) || files.length === 0) {
14133
+ return files;
14134
+ }
14135
+ return files.map(convertFileToAttachment);
14136
+ })
14137
+ };
14138
+ } else if (output.files && Array.isArray(output.files) && output.files.length > 0) {
14139
+ return {
14140
+ ...output,
14141
+ files: output.files.map(convertFileToAttachment)
14142
+ };
14121
14143
  }
14122
- return files.map((file, index) => {
14144
+ return output;
14145
+ };
14146
+ var convertFileToAttachment = (file, index) => {
14147
+ try {
14123
14148
  const mediaType = file.mediaType || "application/octet-stream";
14124
14149
  const filename = `generated_file_${index}.${getExtensionFromMediaType(mediaType)}`;
14125
- const blob = convertDataToBlob(file.data, mediaType);
14150
+ let blob = null;
14151
+ if (file.base64) {
14152
+ blob = convertDataToBlob(file.base64, mediaType);
14153
+ } else if (file.uint8Array) {
14154
+ blob = new Blob([file.uint8Array], { type: mediaType });
14155
+ }
14126
14156
  if (!blob) {
14127
- return null;
14157
+ console.warn(`Failed to convert file at index ${index} to Blob`);
14158
+ return file;
14128
14159
  }
14129
14160
  return new Attachment({
14130
14161
  data: blob,
14131
14162
  filename,
14132
14163
  contentType: mediaType
14133
14164
  });
14134
- }).filter((attachment) => attachment !== null);
14165
+ } catch (error2) {
14166
+ console.warn(`Error processing file at index ${index}:`, error2);
14167
+ return file;
14168
+ }
14169
+ };
14170
+ function extractTokenMetrics(result) {
14171
+ const metrics2 = {};
14172
+ const usage = result?.usage;
14173
+ if (!usage) {
14174
+ return metrics2;
14175
+ }
14176
+ if (usage.inputTokens !== void 0) {
14177
+ metrics2.prompt_tokens = usage.inputTokens;
14178
+ } else if (usage.promptTokens !== void 0) {
14179
+ metrics2.prompt_tokens = usage.promptTokens;
14180
+ } else if (usage.prompt_tokens !== void 0) {
14181
+ metrics2.prompt_tokens = usage.prompt_tokens;
14182
+ }
14183
+ if (usage.outputTokens !== void 0) {
14184
+ metrics2.completion_tokens = usage.outputTokens;
14185
+ } else if (usage.completionTokens !== void 0) {
14186
+ metrics2.completion_tokens = usage.completionTokens;
14187
+ } else if (usage.completion_tokens !== void 0) {
14188
+ metrics2.completion_tokens = usage.completion_tokens;
14189
+ }
14190
+ if (usage.totalTokens !== void 0) {
14191
+ metrics2.tokens = usage.totalTokens;
14192
+ } else if (usage.tokens !== void 0) {
14193
+ metrics2.tokens = usage.tokens;
14194
+ } else if (usage.total_tokens !== void 0) {
14195
+ metrics2.tokens = usage.total_tokens;
14196
+ }
14197
+ if (usage.cachedInputTokens !== void 0 || usage.promptCachedTokens !== void 0 || usage.prompt_cached_tokens !== void 0) {
14198
+ metrics2.prompt_cached_tokens = usage.cachedInputTokens || usage.promptCachedTokens || usage.prompt_cached_tokens;
14199
+ }
14200
+ if (usage.promptCacheCreationTokens !== void 0 || usage.prompt_cache_creation_tokens !== void 0) {
14201
+ metrics2.prompt_cache_creation_tokens = usage.promptCacheCreationTokens || usage.prompt_cache_creation_tokens;
14202
+ }
14203
+ if (usage.promptReasoningTokens !== void 0 || usage.prompt_reasoning_tokens !== void 0) {
14204
+ metrics2.prompt_reasoning_tokens = usage.promptReasoningTokens || usage.prompt_reasoning_tokens;
14205
+ }
14206
+ if (usage.completionCachedTokens !== void 0 || usage.completion_cached_tokens !== void 0) {
14207
+ metrics2.completion_cached_tokens = usage.completionCachedTokens || usage.completion_cached_tokens;
14208
+ }
14209
+ if (usage.reasoningTokens !== void 0 || usage.completionReasoningTokens !== void 0 || usage.completion_reasoning_tokens !== void 0 || usage.reasoning_tokens !== void 0 || usage.thinkingTokens !== void 0 || usage.thinking_tokens !== void 0) {
14210
+ const reasoningTokenCount = usage.reasoningTokens || usage.completionReasoningTokens || usage.completion_reasoning_tokens || usage.reasoning_tokens || usage.thinkingTokens || usage.thinking_tokens;
14211
+ metrics2.completion_reasoning_tokens = reasoningTokenCount;
14212
+ metrics2.reasoning_tokens = reasoningTokenCount;
14213
+ }
14214
+ if (usage.completionAudioTokens !== void 0 || usage.completion_audio_tokens !== void 0) {
14215
+ metrics2.completion_audio_tokens = usage.completionAudioTokens || usage.completion_audio_tokens;
14216
+ }
14217
+ return metrics2;
14135
14218
  }
14136
- function wrapAISDK(ai) {
14137
- const {
14138
- wrapLanguageModel,
14139
- generateText,
14140
- streamText,
14141
- generateObject,
14142
- streamObject
14143
- } = ai;
14144
- const wrappedGenerateText = (params) => {
14145
- const { spanInfo } = extractSpanInfo(params);
14146
- return traced(
14147
- async (span) => {
14148
- const wrappedModel = wrapLanguageModel({
14149
- model: params.model,
14150
- middleware: BraintrustMiddleware({ spanInfo })
14151
- });
14152
- const result = await generateText({
14153
- ...params,
14154
- tools: params.tools ? wrapTools(params.tools) : void 0,
14155
- model: wrappedModel
14156
- });
14157
- const provider = detectProviderFromResult(result);
14158
- const model = extractModelFromResult(result);
14159
- const finishReason = normalizeFinishReason(result?.finishReason);
14160
- const input = processInputAttachments(extractInput(params));
14161
- const outputAttachments = processFilesAsAttachments(result.files);
14162
- const output = outputAttachments ? { text: result.text || result.content, files: outputAttachments } : result.text || result.content;
14163
- span.log({
14164
- input,
14165
- output,
14166
- metadata: {
14167
- ...extractModelParameters(params, V3_EXCLUDE_KEYS),
14168
- ...provider ? { provider } : {},
14169
- ...model ? { model } : {},
14170
- ...finishReason ? { finish_reason: finishReason } : {}
14171
- }
14172
- });
14173
- return result;
14174
- },
14175
- {
14176
- name: "ai-sdk.generateText"
14177
- }
14178
- );
14179
- };
14180
- const wrappedGenerateObject = (params) => {
14181
- const { spanInfo } = extractSpanInfo(params);
14182
- return traced(
14183
- async (span) => {
14184
- const wrappedModel = wrapLanguageModel({
14185
- model: params.model,
14186
- middleware: BraintrustMiddleware({ spanInfo })
14187
- });
14188
- const result = await generateObject({
14189
- ...params,
14190
- tools: params.tools ? wrapTools(params.tools) : void 0,
14191
- model: wrappedModel
14192
- });
14193
- const provider = detectProviderFromResult(result);
14194
- const model = extractModelFromResult(result);
14195
- const finishReason = normalizeFinishReason(result.finishReason);
14196
- const input = processInputAttachments(extractInput(params));
14197
- const outputAttachments = processFilesAsAttachments(result.files);
14198
- const output = outputAttachments ? { object: result.object, files: outputAttachments } : result.object;
14199
- span.log({
14200
- input,
14201
- output,
14202
- metadata: {
14203
- ...extractModelParameters(params, V3_EXCLUDE_KEYS),
14204
- ...provider ? { provider } : {},
14205
- ...model ? { model } : {},
14206
- ...finishReason ? { finish_reason: finishReason } : {}
14207
- }
14208
- });
14209
- return result;
14210
- },
14211
- {
14212
- name: "ai-sdk.generateObject"
14213
- }
14214
- );
14215
- };
14216
- const wrappedStreamText = (params) => {
14217
- const { spanInfo } = extractSpanInfo(params);
14218
- const input = processInputAttachments(extractInput(params));
14219
- const span = startSpan({
14220
- name: "ai-sdk.streamText",
14221
- event: {
14222
- input,
14223
- metadata: extractModelParameters(params, V3_EXCLUDE_KEYS)
14219
+ var deepCopy = (obj) => {
14220
+ return JSON.parse(JSON.stringify(obj));
14221
+ };
14222
+ var parsePath = (path3) => {
14223
+ const keys = [];
14224
+ let current = "";
14225
+ for (let i = 0; i < path3.length; i++) {
14226
+ const char = path3[i];
14227
+ if (char === ".") {
14228
+ if (current) {
14229
+ keys.push(current);
14230
+ current = "";
14231
+ }
14232
+ } else if (char === "[") {
14233
+ if (current) {
14234
+ keys.push(current);
14235
+ current = "";
14236
+ }
14237
+ let bracketContent = "";
14238
+ i++;
14239
+ while (i < path3.length && path3[i] !== "]") {
14240
+ bracketContent += path3[i];
14241
+ i++;
14242
+ }
14243
+ if (bracketContent === "") {
14244
+ keys.push("[]");
14245
+ } else {
14246
+ const index = parseInt(bracketContent, 10);
14247
+ keys.push(isNaN(index) ? bracketContent : index);
14224
14248
  }
14225
- });
14226
- const userOnFinish = params.onFinish;
14227
- const userOnError = params.onError;
14228
- const userOnChunk = params.onChunk;
14229
- try {
14230
- const wrappedModel = wrapLanguageModel({
14231
- model: params.model,
14232
- middleware: BraintrustMiddleware({ spanInfo })
14249
+ } else {
14250
+ current += char;
14251
+ }
14252
+ }
14253
+ if (current) {
14254
+ keys.push(current);
14255
+ }
14256
+ return keys;
14257
+ };
14258
+ var omitAtPath = (obj, keys) => {
14259
+ if (keys.length === 0) return;
14260
+ const firstKey = keys[0];
14261
+ const remainingKeys = keys.slice(1);
14262
+ if (firstKey === "[]") {
14263
+ if (Array.isArray(obj)) {
14264
+ obj.forEach((item) => {
14265
+ if (remainingKeys.length > 0) {
14266
+ omitAtPath(item, remainingKeys);
14267
+ }
14233
14268
  });
14234
- const startTime = Date.now();
14235
- let receivedFirst = false;
14236
- const result = withCurrent(
14237
- span,
14238
- () => streamText({
14239
- ...params,
14240
- tools: params.tools ? wrapTools(params.tools) : void 0,
14241
- model: wrappedModel,
14242
- onChunk: (chunk) => {
14243
- if (!receivedFirst) {
14244
- receivedFirst = true;
14245
- span.log({
14246
- metrics: {
14247
- time_to_first_token: (Date.now() - startTime) / 1e3
14248
- }
14249
- });
14250
- }
14251
- if (typeof userOnChunk === "function") {
14252
- userOnChunk(chunk);
14253
- }
14254
- },
14255
- onFinish: async (event) => {
14256
- if (typeof userOnFinish === "function") {
14257
- await userOnFinish(event);
14258
- }
14259
- const provider = detectProviderFromResult(event);
14260
- const model = extractModelFromResult(event);
14261
- const finishReason = normalizeFinishReason(event?.finishReason);
14262
- const outputAttachments = processFilesAsAttachments(event.files);
14263
- const output = outputAttachments ? {
14264
- text: event?.text || event?.content,
14265
- files: outputAttachments
14266
- } : event?.text || event?.content;
14267
- span.log({
14268
- output,
14269
- metadata: {
14270
- ...extractModelParameters(params, V3_EXCLUDE_KEYS),
14271
- ...provider ? { provider } : {},
14272
- ...model ? { model } : {},
14273
- ...finishReason ? { finish_reason: finishReason } : {}
14274
- }
14275
- });
14276
- span.end();
14277
- },
14278
- onError: async (err) => {
14279
- if (typeof userOnError === "function") {
14280
- await userOnError(err);
14281
- }
14282
- span.log({
14283
- error: err instanceof Error ? err.message : String(err)
14284
- });
14285
- span.end();
14286
- }
14287
- })
14288
- );
14289
- return result;
14290
- } catch (error2) {
14269
+ }
14270
+ } else if (remainingKeys.length === 0) {
14271
+ if (obj && typeof obj === "object" && firstKey in obj) {
14272
+ obj[firstKey] = "<omitted>";
14273
+ }
14274
+ } else {
14275
+ if (obj && typeof obj === "object" && firstKey in obj) {
14276
+ omitAtPath(obj[firstKey], remainingKeys);
14277
+ }
14278
+ }
14279
+ };
14280
+ var omit = (obj, paths) => {
14281
+ const result = deepCopy(obj);
14282
+ for (const path3 of paths) {
14283
+ const keys = parsePath(path3);
14284
+ omitAtPath(result, keys);
14285
+ }
14286
+ return result;
14287
+ };
14288
+
14289
+ // src/wrappers/ai-sdk/deprecated/wrapAISDKModel.ts
14290
+ init_logger();
14291
+ init_util2();
14292
+ function wrapAISDKModel(model) {
14293
+ const m = model;
14294
+ if (m?.specificationVersion === "v1" && typeof m?.provider === "string" && typeof m?.modelId === "string") {
14295
+ return new BraintrustLanguageModelWrapper(m);
14296
+ } else {
14297
+ console.warn("Unsupported AI SDK model. Not wrapping.");
14298
+ return model;
14299
+ }
14300
+ }
14301
+ var BraintrustLanguageModelWrapper = class {
14302
+ constructor(model) {
14303
+ this.model = model;
14304
+ if (typeof this.model.supportsUrl === "function") {
14305
+ this.supportsUrl = (url) => this.model.supportsUrl(url);
14306
+ }
14307
+ }
14308
+ supportsUrl;
14309
+ get specificationVersion() {
14310
+ return this.model.specificationVersion;
14311
+ }
14312
+ get provider() {
14313
+ return this.model.provider;
14314
+ }
14315
+ get modelId() {
14316
+ return this.model.modelId;
14317
+ }
14318
+ get defaultObjectGenerationMode() {
14319
+ return this.model.defaultObjectGenerationMode;
14320
+ }
14321
+ get supportsImageUrls() {
14322
+ return this.model.supportsImageUrls;
14323
+ }
14324
+ get supportsStructuredOutputs() {
14325
+ return this.model.supportsStructuredOutputs;
14326
+ }
14327
+ // For the first cut, do not support custom span_info arguments. We can
14328
+ // propagate those via async local storage
14329
+ async doGenerate(options) {
14330
+ const span = startSpan({
14331
+ name: "Chat Completion",
14332
+ spanAttributes: {
14333
+ type: "llm"
14334
+ }
14335
+ });
14336
+ const { prompt, mode, ...rest } = options;
14337
+ const startTime = getCurrentUnixTimestamp();
14338
+ try {
14339
+ const ret = await this.model.doGenerate(options);
14291
14340
  span.log({
14292
- error: error2 instanceof Error ? error2.message : String(error2)
14341
+ input: postProcessPrompt(prompt),
14342
+ metadata: {
14343
+ model: this.modelId,
14344
+ ...rest,
14345
+ ..."tools" in mode && mode.tools ? { tools: convertTools(mode.tools) } : "tool" in mode && mode.tool ? { tools: convertTools([mode.tool]) } : {}
14346
+ },
14347
+ output: postProcessOutput(ret.text, ret.toolCalls, ret.finishReason),
14348
+ metrics: {
14349
+ time_to_first_token: getCurrentUnixTimestamp() - startTime,
14350
+ tokens: !isEmpty(ret.usage) ? ret.usage.promptTokens + ret.usage.completionTokens : void 0,
14351
+ prompt_tokens: ret.usage?.promptTokens,
14352
+ completion_tokens: ret.usage?.completionTokens,
14353
+ cached: parseCachedHeader(
14354
+ ret.rawResponse?.headers?.[X_CACHED_HEADER] ?? ret.rawResponse?.headers?.[LEGACY_CACHED_HEADER]
14355
+ )
14356
+ }
14293
14357
  });
14358
+ return ret;
14359
+ } finally {
14294
14360
  span.end();
14295
- throw error2;
14296
14361
  }
14297
- };
14298
- const wrappedStreamObject = (params) => {
14299
- const { spanInfo } = extractSpanInfo(params);
14300
- const input = processInputAttachments(extractInput(params));
14362
+ }
14363
+ async doStream(options) {
14364
+ const { prompt, mode, ...rest } = options;
14365
+ const startTime = getCurrentUnixTimestamp();
14301
14366
  const span = startSpan({
14302
- name: "ai-sdk.streamObject",
14303
- event: {
14304
- input,
14305
- metadata: extractModelParameters(params, V3_EXCLUDE_KEYS)
14367
+ name: "Chat Completion",
14368
+ spanAttributes: {
14369
+ type: "llm"
14306
14370
  }
14307
14371
  });
14308
- const userOnFinish = params.onFinish;
14309
- const userOnError = params.onError;
14372
+ span.log({
14373
+ input: postProcessPrompt(prompt),
14374
+ metadata: {
14375
+ model: this.modelId,
14376
+ ...rest,
14377
+ ..."tools" in mode && mode.tools ? { tools: convertTools(mode.tools) } : "tool" in mode && mode.tool ? { tools: convertTools([mode.tool]) } : {}
14378
+ }
14379
+ });
14380
+ let ended = false;
14381
+ const end = () => {
14382
+ if (!ended) {
14383
+ span.end();
14384
+ ended = true;
14385
+ }
14386
+ };
14310
14387
  try {
14311
- const wrappedModel = wrapLanguageModel({
14312
- model: params.model,
14313
- middleware: BraintrustMiddleware({ spanInfo })
14314
- });
14315
- const result = withCurrent(
14316
- span,
14317
- () => streamObject({
14318
- ...params,
14319
- tools: params.tools ? wrapTools(params.tools) : void 0,
14320
- model: wrappedModel,
14321
- onFinish: async (event) => {
14322
- if (typeof userOnFinish === "function") {
14323
- await userOnFinish(event);
14324
- }
14325
- const provider = detectProviderFromResult(event);
14326
- const model = extractModelFromResult(event);
14327
- const finishReason = normalizeFinishReason(event?.finishReason);
14328
- const outputAttachments = processFilesAsAttachments(event.files);
14329
- const output = outputAttachments ? { object: event?.object, files: outputAttachments } : event?.object;
14330
- span.log({
14331
- output,
14332
- metadata: {
14333
- ...extractModelParameters(params, V3_EXCLUDE_KEYS),
14334
- ...provider ? { provider } : {},
14335
- ...model ? { model } : {},
14336
- ...finishReason ? { finish_reason: finishReason } : {}
14388
+ const ret = await this.model.doStream(options);
14389
+ let time_to_first_token = void 0;
14390
+ let usage = void 0;
14391
+ let fullText = void 0;
14392
+ const toolCalls = {};
14393
+ let finishReason = void 0;
14394
+ return {
14395
+ ...ret,
14396
+ stream: ret.stream.pipeThrough(
14397
+ new TransformStream({
14398
+ transform(chunk, controller) {
14399
+ if (time_to_first_token === void 0) {
14400
+ time_to_first_token = getCurrentUnixTimestamp() - startTime;
14401
+ span.log({ metrics: { time_to_first_token } });
14337
14402
  }
14338
- });
14339
- span.end();
14340
- },
14341
- onError: async (err) => {
14342
- if (typeof userOnError === "function") {
14343
- await userOnError(err);
14344
- }
14345
- span.log({
14346
- error: err instanceof Error ? err.message : String(err)
14347
- });
14348
- span.end();
14349
- }
14350
- })
14351
- );
14352
- const startTime = Date.now();
14353
- let receivedFirst = false;
14354
- const trackFirstAccess = () => {
14355
- if (!receivedFirst) {
14356
- receivedFirst = true;
14357
- span.log({
14358
- metrics: {
14359
- time_to_first_token: (Date.now() - startTime) / 1e3
14403
+ switch (chunk.type) {
14404
+ case "text-delta":
14405
+ if (fullText === void 0) {
14406
+ fullText = "";
14407
+ }
14408
+ fullText += chunk.textDelta;
14409
+ break;
14410
+ case "tool-call":
14411
+ toolCalls[chunk.toolCallId] = {
14412
+ toolCallType: chunk.toolCallType,
14413
+ toolCallId: chunk.toolCallId,
14414
+ toolName: chunk.toolName,
14415
+ args: chunk.args
14416
+ };
14417
+ break;
14418
+ case "tool-call-delta":
14419
+ if (toolCalls[chunk.toolCallId] === void 0) {
14420
+ toolCalls[chunk.toolCallId] = {
14421
+ toolCallType: chunk.toolCallType,
14422
+ toolCallId: chunk.toolCallId,
14423
+ toolName: chunk.toolName,
14424
+ args: ""
14425
+ };
14426
+ }
14427
+ toolCalls[chunk.toolCallId].args += chunk.argsTextDelta;
14428
+ break;
14429
+ case "finish":
14430
+ usage = chunk.usage;
14431
+ finishReason = chunk.finishReason;
14432
+ break;
14433
+ }
14434
+ controller.enqueue(chunk);
14435
+ },
14436
+ async flush(controller) {
14437
+ span.log({
14438
+ output: postProcessOutput(
14439
+ fullText,
14440
+ Object.keys(toolCalls).length > 0 ? Object.values(toolCalls) : void 0,
14441
+ finishReason
14442
+ ),
14443
+ metrics: {
14444
+ time_to_first_token,
14445
+ tokens: !isEmpty(usage) ? usage.promptTokens + usage.completionTokens : void 0,
14446
+ prompt_tokens: usage?.promptTokens,
14447
+ completion_tokens: usage?.completionTokens,
14448
+ cached: parseCachedHeader(
14449
+ ret.rawResponse?.headers?.[X_CACHED_HEADER] ?? ret.rawResponse?.headers?.[LEGACY_CACHED_HEADER]
14450
+ )
14451
+ }
14452
+ });
14453
+ end();
14454
+ controller.terminate();
14360
14455
  }
14361
- });
14362
- }
14456
+ })
14457
+ )
14363
14458
  };
14364
- const [stream1, stream2] = result.baseStream.tee();
14365
- result.baseStream = stream2;
14366
- stream1.pipeThrough(
14367
- new TransformStream({
14368
- transform(chunk, controller) {
14369
- trackFirstAccess();
14370
- controller.enqueue(chunk);
14459
+ } finally {
14460
+ end();
14461
+ }
14462
+ }
14463
+ };
14464
+ function convertTools(tools) {
14465
+ return tools.map((tool) => {
14466
+ const { type: _, ...rest } = tool;
14467
+ return {
14468
+ type: tool.type,
14469
+ function: rest
14470
+ };
14471
+ });
14472
+ }
14473
+ function postProcessPrompt(prompt) {
14474
+ return prompt.flatMap((message) => {
14475
+ switch (message.role) {
14476
+ case "system":
14477
+ return [
14478
+ {
14479
+ role: "system",
14480
+ content: message.content
14371
14481
  }
14372
- })
14373
- ).pipeTo(
14374
- new WritableStream({
14375
- write() {
14482
+ ];
14483
+ case "assistant":
14484
+ const textPart = message.content.find(
14485
+ (part) => part.type === "text"
14486
+ );
14487
+ const toolCallParts = message.content.filter(
14488
+ (part) => part.type === "tool-call"
14489
+ );
14490
+ return [
14491
+ {
14492
+ role: "assistant",
14493
+ content: textPart?.text,
14494
+ ...toolCallParts.length > 0 ? {
14495
+ tool_calls: toolCallParts.map((part) => ({
14496
+ id: part.toolCallId,
14497
+ function: {
14498
+ name: part.toolName,
14499
+ arguments: JSON.stringify(part.args)
14500
+ },
14501
+ type: "function"
14502
+ }))
14503
+ } : {}
14376
14504
  }
14377
- })
14378
- ).catch(() => {
14379
- });
14380
- return result;
14381
- } catch (error2) {
14382
- span.log({
14383
- error: error2 instanceof Error ? error2.message : String(error2)
14384
- });
14385
- span.end();
14386
- throw error2;
14505
+ ];
14506
+ case "user":
14507
+ return [
14508
+ {
14509
+ role: "user",
14510
+ content: message.content.map((part) => {
14511
+ switch (part.type) {
14512
+ case "text":
14513
+ return {
14514
+ type: "text",
14515
+ text: part.text,
14516
+ ...part.providerMetadata ? { providerMetadata: part.providerMetadata } : {}
14517
+ };
14518
+ case "image":
14519
+ return {
14520
+ type: "image_url",
14521
+ image_url: {
14522
+ url: part.image.toString(),
14523
+ ...part.providerMetadata ? { providerMetadata: part.providerMetadata } : {}
14524
+ }
14525
+ };
14526
+ default:
14527
+ return part;
14528
+ }
14529
+ })
14530
+ }
14531
+ ];
14532
+ case "tool":
14533
+ return message.content.map((part) => ({
14534
+ role: "tool",
14535
+ tool_call_id: part.toolCallId,
14536
+ content: JSON.stringify(part.result)
14537
+ }));
14387
14538
  }
14388
- };
14539
+ });
14540
+ }
14541
+ function postProcessOutput(text, toolCalls, finishReason) {
14542
+ return [
14543
+ {
14544
+ index: 0,
14545
+ message: {
14546
+ role: "assistant",
14547
+ content: text ?? "",
14548
+ ...toolCalls && toolCalls.length > 0 ? {
14549
+ tool_calls: toolCalls.map((toolCall) => ({
14550
+ id: toolCall.toolCallId,
14551
+ function: {
14552
+ name: toolCall.toolName,
14553
+ arguments: toolCall.args
14554
+ },
14555
+ type: "function"
14556
+ }))
14557
+ } : {}
14558
+ },
14559
+ finish_reason: finishReason
14560
+ }
14561
+ ];
14562
+ }
14563
+
14564
+ // src/wrappers/ai-sdk/deprecated/BraintrustMiddleware.ts
14565
+ init_util();
14566
+ init_logger();
14567
+
14568
+ // src/wrappers/anthropic-tokens-util.ts
14569
+ function finalizeAnthropicTokens(metrics2) {
14570
+ const prompt_tokens = (metrics2.prompt_tokens || 0) + (metrics2.prompt_cached_tokens || 0) + (metrics2.prompt_cache_creation_tokens || 0);
14389
14571
  return {
14390
- generateText: wrappedGenerateText,
14391
- generateObject: wrappedGenerateObject,
14392
- streamText: wrappedStreamText,
14393
- streamObject: wrappedStreamObject
14572
+ ...metrics2,
14573
+ prompt_tokens,
14574
+ tokens: prompt_tokens + (metrics2.completion_tokens || 0)
14394
14575
  };
14395
14576
  }
14577
+ function extractAnthropicCacheTokens(cacheReadTokens = 0, cacheCreationTokens = 0) {
14578
+ const cacheTokens = {};
14579
+ if (cacheReadTokens > 0) {
14580
+ cacheTokens.prompt_cached_tokens = cacheReadTokens;
14581
+ }
14582
+ if (cacheCreationTokens > 0) {
14583
+ cacheTokens.prompt_cache_creation_tokens = cacheCreationTokens;
14584
+ }
14585
+ return cacheTokens;
14586
+ }
14396
14587
 
14397
- // src/wrappers/mastra/mastra.ts
14398
- init_logger();
14399
- var aiSDKFormatWarning = false;
14400
- function wrapMastraAgent(agent, options) {
14401
- const prefix = options?.name ?? options?.span_name ?? agent.name ?? "Agent";
14402
- if (!hasAllMethods(agent)) {
14403
- return agent;
14588
+ // src/wrappers/ai-sdk/deprecated/BraintrustMiddleware.ts
14589
+ function detectProviderFromResult(result) {
14590
+ if (!result?.providerMetadata) {
14591
+ return void 0;
14404
14592
  }
14405
- if (agent.tools) {
14406
- agent.__setTools(wrapTools(agent.tools));
14593
+ const keys = Object.keys(result.providerMetadata);
14594
+ return keys?.at(0);
14595
+ }
14596
+ function extractModelFromResult(result) {
14597
+ if (result?.response?.modelId) {
14598
+ return result.response.modelId;
14407
14599
  }
14408
- return new Proxy(agent, {
14409
- get(target, prop, receiver) {
14410
- const value = Reflect.get(target, prop, receiver);
14411
- if (prop === "generate" && typeof value === "function") {
14412
- return wrapGenerate(value, target, prefix);
14413
- }
14414
- if (prop === "stream" && typeof value === "function") {
14415
- return wrapStream(value, target, prefix);
14600
+ if (result?.request?.body?.model) {
14601
+ return result.request.body.model;
14602
+ }
14603
+ return void 0;
14604
+ }
14605
+ function extractModelFromWrapGenerateCallback(model) {
14606
+ return model?.modelId;
14607
+ }
14608
+ function camelToSnake(str) {
14609
+ return str.replace(/[A-Z]/g, (letter) => `_${letter.toLowerCase()}`);
14610
+ }
14611
+ function extractModelParameters(params, excludeKeys) {
14612
+ const modelParams = {};
14613
+ for (const [key, value] of Object.entries(params)) {
14614
+ if (value !== void 0 && !excludeKeys.has(key)) {
14615
+ const snakeKey = camelToSnake(key);
14616
+ modelParams[snakeKey] = value;
14617
+ }
14618
+ }
14619
+ return modelParams;
14620
+ }
14621
+ function getNumberProperty(obj, key) {
14622
+ if (!obj || typeof obj !== "object" || !(key in obj)) {
14623
+ return void 0;
14624
+ }
14625
+ const value = Reflect.get(obj, key);
14626
+ return typeof value === "number" ? value : void 0;
14627
+ }
14628
+ function normalizeUsageMetrics(usage, provider, providerMetadata) {
14629
+ const metrics2 = {};
14630
+ const inputTokens = getNumberProperty(usage, "inputTokens");
14631
+ if (inputTokens !== void 0) {
14632
+ metrics2.prompt_tokens = inputTokens;
14633
+ }
14634
+ const outputTokens = getNumberProperty(usage, "outputTokens");
14635
+ if (outputTokens !== void 0) {
14636
+ metrics2.completion_tokens = outputTokens;
14637
+ }
14638
+ const totalTokens = getNumberProperty(usage, "totalTokens");
14639
+ if (totalTokens !== void 0) {
14640
+ metrics2.tokens = totalTokens;
14641
+ }
14642
+ const reasoningTokens = getNumberProperty(usage, "reasoningTokens");
14643
+ if (reasoningTokens !== void 0) {
14644
+ metrics2.completion_reasoning_tokens = reasoningTokens;
14645
+ }
14646
+ const cachedInputTokens = getNumberProperty(usage, "cachedInputTokens");
14647
+ if (cachedInputTokens !== void 0) {
14648
+ metrics2.prompt_cached_tokens = cachedInputTokens;
14649
+ }
14650
+ if (provider === "anthropic") {
14651
+ const anthropicMetadata = providerMetadata?.anthropic;
14652
+ if (anthropicMetadata) {
14653
+ const cacheReadTokens = getNumberProperty(anthropicMetadata.usage, "cache_read_input_tokens") || 0;
14654
+ const cacheCreationTokens = getNumberProperty(
14655
+ anthropicMetadata.usage,
14656
+ "cache_creation_input_tokens"
14657
+ ) || 0;
14658
+ const cacheTokens = extractAnthropicCacheTokens(
14659
+ cacheReadTokens,
14660
+ cacheCreationTokens
14661
+ );
14662
+ Object.assign(metrics2, cacheTokens);
14663
+ Object.assign(metrics2, finalizeAnthropicTokens(metrics2));
14664
+ }
14665
+ }
14666
+ return metrics2;
14667
+ }
14668
+ function normalizeFinishReason(reason) {
14669
+ if (typeof reason !== "string") return void 0;
14670
+ return reason.replace(/-/g, "_");
14671
+ }
14672
+ function buildAssistantOutputWithToolCalls(result, toolCalls) {
14673
+ return [
14674
+ {
14675
+ index: 0,
14676
+ logprobs: null,
14677
+ finish_reason: normalizeFinishReason(result?.finishReason) ?? (toolCalls.length ? "tool_calls" : void 0),
14678
+ message: {
14679
+ role: "assistant",
14680
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
14416
14681
  }
14417
- if (typeof value === "function") {
14418
- return value.bind(target);
14682
+ }
14683
+ ];
14684
+ }
14685
+ function extractToolCallsFromSteps(steps) {
14686
+ const toolCalls = [];
14687
+ if (!Array.isArray(steps)) return toolCalls;
14688
+ let idx = 0;
14689
+ for (const step of steps) {
14690
+ const blocks = step?.content;
14691
+ if (!Array.isArray(blocks)) continue;
14692
+ for (const block of blocks) {
14693
+ if (block && typeof block === "object" && block.type === "tool-call") {
14694
+ toolCalls.push({
14695
+ id: block.toolCallId,
14696
+ type: "function",
14697
+ index: idx++,
14698
+ function: {
14699
+ name: block.toolName,
14700
+ arguments: typeof block.input === "string" ? block.input : JSON.stringify(block.input ?? {})
14701
+ }
14702
+ });
14419
14703
  }
14420
- return value;
14421
14704
  }
14422
- });
14705
+ }
14706
+ return toolCalls;
14423
14707
  }
14424
- function hasAllMethods(a) {
14425
- return typeof a.generate === "function" && typeof a.stream === "function";
14708
+ function extractToolCallsFromBlocks(blocks) {
14709
+ if (!Array.isArray(blocks)) return [];
14710
+ return extractToolCallsFromSteps([{ content: blocks }]);
14426
14711
  }
14427
- function wrapGenerate(original, target, prefix) {
14428
- return function(...args) {
14429
- const input = args[0];
14430
- return traced(
14431
- async (span) => {
14432
- const result = await original.apply(target, args);
14712
+ function extractInput(params) {
14713
+ return params?.prompt ?? params?.messages ?? params?.system;
14714
+ }
14715
+ var V2_EXCLUDE_KEYS = /* @__PURE__ */ new Set([
14716
+ "prompt",
14717
+ // Already captured as input
14718
+ "system",
14719
+ // Already captured as input
14720
+ "messages",
14721
+ // Already captured as input
14722
+ "model",
14723
+ // Already captured in metadata.model
14724
+ "providerOptions"
14725
+ // Internal AI SDK configuration
14726
+ ]);
14727
+ function BraintrustMiddleware(config = {}) {
14728
+ return {
14729
+ wrapGenerate: async ({
14730
+ doGenerate,
14731
+ params,
14732
+ model: modelFromWrapGenerate
14733
+ }) => {
14734
+ const rawInput = extractInput(params);
14735
+ const processedInput = processInputAttachments(rawInput);
14736
+ const spanArgs = {
14737
+ name: config.spanInfo?.name || "ai-sdk.doGenerate",
14738
+ spanAttributes: {
14739
+ type: "llm" /* LLM */,
14740
+ ...config.spanInfo?.spanAttributes || {}
14741
+ },
14742
+ event: {
14743
+ input: processedInput,
14744
+ metadata: {
14745
+ ...extractModelParameters(params, V2_EXCLUDE_KEYS),
14746
+ ...config.spanInfo?.metadata || {}
14747
+ }
14748
+ }
14749
+ };
14750
+ const span = startSpan(spanArgs);
14751
+ try {
14752
+ const result = await doGenerate();
14753
+ const metadata = {};
14433
14754
  const provider = detectProviderFromResult(result);
14755
+ if (provider !== void 0) {
14756
+ metadata.provider = provider;
14757
+ }
14758
+ if (result.finishReason !== void 0) {
14759
+ metadata.finish_reason = result.finishReason;
14760
+ }
14434
14761
  const model = extractModelFromResult(result);
14435
- const finishReason = normalizeFinishReason(result?.finishReason);
14436
- const metrics2 = result?.usage ? normalizeUsageMetrics(
14437
- result.usage,
14438
- provider,
14439
- result.providerMetadata
14440
- ) : {};
14762
+ if (model !== void 0) {
14763
+ metadata.model = model;
14764
+ } else if (modelFromWrapGenerate) {
14765
+ const modelId = extractModelFromWrapGenerateCallback(
14766
+ modelFromWrapGenerate
14767
+ );
14768
+ if (modelId) {
14769
+ metadata.model = modelId;
14770
+ }
14771
+ }
14772
+ let toolCalls = extractToolCallsFromSteps(result?.steps);
14773
+ if (!toolCalls || toolCalls.length === 0) {
14774
+ toolCalls = extractToolCallsFromBlocks(result?.content);
14775
+ }
14441
14776
  span.log({
14442
- input,
14443
- output: result,
14444
- metadata: {
14445
- agent_name: target.name ?? prefix,
14446
- ...provider ? { provider } : {},
14447
- ...model ? { model } : {},
14448
- ...finishReason ? { finish_reason: finishReason } : {}
14449
- },
14450
- metrics: metrics2
14777
+ output: toolCalls.length > 0 ? buildAssistantOutputWithToolCalls(result, toolCalls) : result?.content,
14778
+ metadata,
14779
+ metrics: normalizeUsageMetrics(
14780
+ result.usage,
14781
+ provider,
14782
+ result.providerMetadata
14783
+ )
14451
14784
  });
14452
14785
  return result;
14453
- },
14454
- {
14455
- name: `${prefix}.generate`
14456
- }
14457
- );
14458
- };
14459
- }
14460
- function wrapStream(original, target, prefix) {
14461
- return function(...args) {
14462
- const input = args[0];
14463
- const span = startSpan({
14464
- name: `${prefix}.stream`,
14465
- event: {
14466
- input,
14467
- metadata: {
14468
- agent_name: target.name ?? prefix
14469
- }
14470
- }
14471
- });
14472
- const baseOpts = typeof args[1] === "object" && args[1] !== null ? args[1] : {};
14473
- if (baseOpts.format && baseOpts.format !== "aisdk" && !aiSDKFormatWarning) {
14474
- aiSDKFormatWarning = true;
14475
- console.warn(
14476
- `Braintrust Mastra wrapper: For best compatibility, use { format: 'aisdk' } (AI SDK v5) instead of format: '${baseOpts.format}'. See https://mastra.ai/en/docs/frameworks/agentic-uis/ai-sdk for more details.`
14477
- );
14478
- }
14479
- const wrappedOpts = {
14480
- ...baseOpts,
14481
- format: baseOpts.format || "aisdk"
14482
- // Default to AI SDK v5 format if not specified
14483
- };
14484
- const userOnChunk = baseOpts?.onChunk;
14485
- const userOnFinish = baseOpts?.onFinish;
14486
- const userOnError = baseOpts?.onError;
14487
- const startTime = Date.now();
14488
- let receivedFirst = false;
14489
- wrappedOpts.onChunk = (chunk) => {
14490
- try {
14491
- userOnChunk?.(chunk);
14492
- } finally {
14493
- if (!receivedFirst) {
14494
- receivedFirst = true;
14495
- span.log({
14496
- metrics: { time_to_first_token: (Date.now() - startTime) / 1e3 }
14497
- });
14498
- }
14499
- }
14500
- };
14501
- wrappedOpts.onFinish = async (event) => {
14502
- try {
14503
- await userOnFinish?.(event);
14504
- } finally {
14505
- const e = event;
14506
- const provider = detectProviderFromResult(e);
14507
- const model = extractModelFromResult(e);
14508
- const finishReason = normalizeFinishReason(e?.finishReason);
14509
- const metrics2 = e?.usage ? normalizeUsageMetrics(e.usage, provider, e.providerMetadata) : {};
14786
+ } catch (error2) {
14510
14787
  span.log({
14511
- output: e.text ?? e.content ?? e,
14512
- metadata: {
14513
- agent_name: target.name ?? prefix,
14514
- ...provider ? { provider } : {},
14515
- ...model ? { model } : {},
14516
- ...finishReason ? { finish_reason: finishReason } : {}
14517
- },
14518
- metrics: metrics2
14788
+ error: error2 instanceof Error ? error2.message : String(error2)
14519
14789
  });
14790
+ throw error2;
14791
+ } finally {
14520
14792
  span.end();
14521
14793
  }
14522
- };
14523
- wrappedOpts.onError = async (err) => {
14794
+ },
14795
+ wrapStream: async ({ doStream, params }) => {
14796
+ const rawInput = extractInput(params);
14797
+ const processedInput = processInputAttachments(rawInput);
14798
+ const spanArgs = {
14799
+ name: config.spanInfo?.name || "ai-sdk.doStream",
14800
+ spanAttributes: {
14801
+ type: "llm" /* LLM */,
14802
+ ...config.spanInfo?.spanAttributes || {}
14803
+ },
14804
+ event: {
14805
+ input: processedInput,
14806
+ metadata: {
14807
+ ...extractModelParameters(params, V2_EXCLUDE_KEYS),
14808
+ ...config.spanInfo?.metadata || {}
14809
+ }
14810
+ }
14811
+ };
14812
+ const span = startSpan(spanArgs);
14524
14813
  try {
14525
- await userOnError?.(err);
14526
- } finally {
14527
- logError(span, err);
14814
+ const { stream, ...rest } = await doStream();
14815
+ const textChunks = [];
14816
+ const toolBlocks = [];
14817
+ let finalUsage = {};
14818
+ let finalFinishReason = void 0;
14819
+ let providerMetadata = {};
14820
+ const transformStream = new TransformStream({
14821
+ transform(chunk, controller) {
14822
+ try {
14823
+ if (chunk.type === "text-delta" && chunk.delta) {
14824
+ textChunks.push(chunk.delta);
14825
+ }
14826
+ if (chunk.type === "tool-call" || chunk.type === "tool-result") {
14827
+ toolBlocks.push(chunk);
14828
+ }
14829
+ if (chunk.type === "finish") {
14830
+ finalFinishReason = chunk.finishReason;
14831
+ finalUsage = chunk.usage || {};
14832
+ providerMetadata = chunk.providerMetadata || {};
14833
+ }
14834
+ controller.enqueue(chunk);
14835
+ } catch (error2) {
14836
+ span.log({
14837
+ error: error2 instanceof Error ? error2.message : String(error2)
14838
+ });
14839
+ span.end();
14840
+ controller.error(error2);
14841
+ }
14842
+ },
14843
+ flush() {
14844
+ try {
14845
+ const generatedText = textChunks.join("");
14846
+ let output = generatedText ? [{ type: "text", text: generatedText }] : [];
14847
+ const resultForDetection = {
14848
+ providerMetadata,
14849
+ response: rest.response,
14850
+ ...rest,
14851
+ finishReason: finalFinishReason
14852
+ };
14853
+ const metadata = {};
14854
+ const provider = detectProviderFromResult(resultForDetection);
14855
+ if (provider !== void 0) {
14856
+ metadata.provider = provider;
14857
+ }
14858
+ if (finalFinishReason !== void 0) {
14859
+ metadata.finish_reason = finalFinishReason;
14860
+ }
14861
+ const model = extractModelFromResult(resultForDetection);
14862
+ if (model !== void 0) {
14863
+ metadata.model = model;
14864
+ }
14865
+ if (toolBlocks.length > 0) {
14866
+ const toolCalls = extractToolCallsFromSteps([
14867
+ { content: toolBlocks }
14868
+ ]);
14869
+ if (toolCalls.length > 0) {
14870
+ output = buildAssistantOutputWithToolCalls(
14871
+ resultForDetection,
14872
+ toolCalls
14873
+ );
14874
+ }
14875
+ }
14876
+ span.log({
14877
+ output,
14878
+ metadata,
14879
+ metrics: normalizeUsageMetrics(
14880
+ finalUsage,
14881
+ provider,
14882
+ providerMetadata
14883
+ )
14884
+ });
14885
+ span.end();
14886
+ } catch (error2) {
14887
+ span.log({
14888
+ error: error2 instanceof Error ? error2.message : String(error2)
14889
+ });
14890
+ span.end();
14891
+ throw error2;
14892
+ }
14893
+ }
14894
+ });
14895
+ return {
14896
+ stream: stream.pipeThrough(transformStream),
14897
+ ...rest
14898
+ };
14899
+ } catch (error2) {
14900
+ span.log({
14901
+ error: error2 instanceof Error ? error2.message : String(error2)
14902
+ });
14528
14903
  span.end();
14904
+ throw error2;
14529
14905
  }
14530
- };
14531
- return withCurrent(
14532
- span,
14533
- () => original.apply(target, [args[0], wrappedOpts, ...args.slice(2)])
14534
- );
14906
+ }
14535
14907
  };
14536
14908
  }
14537
14909
 
@@ -14610,10 +14982,14 @@ function createProxy(create) {
14610
14982
  const apiPromise = Reflect.apply(target, thisArg, argArray);
14611
14983
  const onThen = function(msgOrStream) {
14612
14984
  if (!args["stream"]) {
14985
+ const ttft = getCurrentUnixTimestamp() - sspan.startTime;
14613
14986
  const event = parseEventFromMessage(msgOrStream);
14614
14987
  span.log({
14615
14988
  ...event,
14616
- metrics: event.metrics ? finalizeAnthropicTokens(event.metrics) : void 0
14989
+ metrics: event.metrics ? finalizeAnthropicTokens({
14990
+ ...event.metrics,
14991
+ time_to_first_token: ttft
14992
+ }) : { time_to_first_token: ttft }
14617
14993
  });
14618
14994
  span.end();
14619
14995
  return msgOrStream;
@@ -14829,6 +15205,11 @@ function coalesceInput(messages, system) {
14829
15205
  return input;
14830
15206
  }
14831
15207
 
15208
+ // src/wrappers/mastra.ts
15209
+ function wrapMastraAgent(agent, _options) {
15210
+ return agent;
15211
+ }
15212
+
14832
15213
  // src/wrappers/claude-agent-sdk/claude-agent-sdk.ts
14833
15214
  init_logger();
14834
15215
  init_util2();
@@ -14892,7 +15273,7 @@ function wrapClaudeAgentQuery(queryFn, defaultThis) {
14892
15273
  }
14893
15274
  const lastMessage = currentMessages[currentMessages.length - 1];
14894
15275
  if (lastMessage?.message?.usage) {
14895
- const outputTokens = getNumberProperty(lastMessage.message.usage, "output_tokens") || 0;
15276
+ const outputTokens = getNumberProperty2(lastMessage.message.usage, "output_tokens") || 0;
14896
15277
  accumulatedOutputTokens += outputTokens;
14897
15278
  }
14898
15279
  currentMessages.length = 0;
@@ -15007,16 +15388,16 @@ function _extractUsageFromMessage(message) {
15007
15388
  if (!usage || typeof usage !== "object") {
15008
15389
  return metrics2;
15009
15390
  }
15010
- const inputTokens = getNumberProperty(usage, "input_tokens");
15391
+ const inputTokens = getNumberProperty2(usage, "input_tokens");
15011
15392
  if (inputTokens !== void 0) {
15012
15393
  metrics2.prompt_tokens = inputTokens;
15013
15394
  }
15014
- const outputTokens = getNumberProperty(usage, "output_tokens");
15395
+ const outputTokens = getNumberProperty2(usage, "output_tokens");
15015
15396
  if (outputTokens !== void 0) {
15016
15397
  metrics2.completion_tokens = outputTokens;
15017
15398
  }
15018
- const cacheReadTokens = getNumberProperty(usage, "cache_read_input_tokens") || 0;
15019
- const cacheCreationTokens = getNumberProperty(usage, "cache_creation_input_tokens") || 0;
15399
+ const cacheReadTokens = getNumberProperty2(usage, "cache_read_input_tokens") || 0;
15400
+ const cacheCreationTokens = getNumberProperty2(usage, "cache_creation_input_tokens") || 0;
15020
15401
  if (cacheReadTokens > 0 || cacheCreationTokens > 0) {
15021
15402
  const cacheTokens = extractAnthropicCacheTokens(
15022
15403
  cacheReadTokens,
@@ -15107,6 +15488,13 @@ function wrapClaudeAgentSDK(sdk) {
15107
15488
  }
15108
15489
  });
15109
15490
  }
15491
+ function getNumberProperty2(obj, key) {
15492
+ if (!obj || typeof obj !== "object" || !(key in obj)) {
15493
+ return void 0;
15494
+ }
15495
+ const value = Reflect.get(obj, key);
15496
+ return typeof value === "number" ? value : void 0;
15497
+ }
15110
15498
 
15111
15499
  // src/wrappers/google-genai.ts
15112
15500
  init_logger();