openlit 1.10.0 → 1.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. package/README.md +35 -1
  2. package/dist/config.d.ts +12 -4
  3. package/dist/config.js +7 -17
  4. package/dist/config.js.map +1 -1
  5. package/dist/evals/llm/anthropic.js +10 -6
  6. package/dist/evals/llm/anthropic.js.map +1 -1
  7. package/dist/evals/llm/openai.js +9 -5
  8. package/dist/evals/llm/openai.js.map +1 -1
  9. package/dist/features/__tests__/rule-engine.test.d.ts +1 -0
  10. package/dist/features/__tests__/rule-engine.test.js +146 -0
  11. package/dist/features/__tests__/rule-engine.test.js.map +1 -0
  12. package/dist/features/base.d.ts +2 -0
  13. package/dist/features/base.js +2 -0
  14. package/dist/features/base.js.map +1 -1
  15. package/dist/features/rule-engine.d.ts +6 -0
  16. package/dist/features/rule-engine.js +60 -0
  17. package/dist/features/rule-engine.js.map +1 -0
  18. package/dist/features/vault.js +1 -1
  19. package/dist/features/vault.js.map +1 -1
  20. package/dist/helpers.d.ts +93 -1
  21. package/dist/helpers.js +270 -8
  22. package/dist/helpers.js.map +1 -1
  23. package/dist/index.d.ts +6 -5
  24. package/dist/index.js +95 -50
  25. package/dist/index.js.map +1 -1
  26. package/dist/instrumentation/__tests__/anthropic-wrapper.test.js +215 -27
  27. package/dist/instrumentation/__tests__/anthropic-wrapper.test.js.map +1 -1
  28. package/dist/instrumentation/__tests__/base-wrapper.test.js +19 -23
  29. package/dist/instrumentation/__tests__/base-wrapper.test.js.map +1 -1
  30. package/dist/instrumentation/__tests__/bedrock-trace-comparison.test.d.ts +1 -0
  31. package/dist/instrumentation/__tests__/bedrock-trace-comparison.test.js +422 -0
  32. package/dist/instrumentation/__tests__/bedrock-trace-comparison.test.js.map +1 -0
  33. package/dist/instrumentation/__tests__/chroma-trace-comparison.test.js +1 -1
  34. package/dist/instrumentation/__tests__/chroma-trace-comparison.test.js.map +1 -1
  35. package/dist/instrumentation/__tests__/cohere-wrapper.test.js +150 -25
  36. package/dist/instrumentation/__tests__/cohere-wrapper.test.js.map +1 -1
  37. package/dist/instrumentation/__tests__/google-ai-trace-comparison.test.js +152 -33
  38. package/dist/instrumentation/__tests__/google-ai-trace-comparison.test.js.map +1 -1
  39. package/dist/instrumentation/__tests__/groq-trace-comparison.test.js +391 -45
  40. package/dist/instrumentation/__tests__/groq-trace-comparison.test.js.map +1 -1
  41. package/dist/instrumentation/__tests__/huggingface-trace-comparison.test.d.ts +2 -2
  42. package/dist/instrumentation/__tests__/huggingface-trace-comparison.test.js +323 -31
  43. package/dist/instrumentation/__tests__/huggingface-trace-comparison.test.js.map +1 -1
  44. package/dist/instrumentation/__tests__/langchain-wrapper.test.d.ts +1 -0
  45. package/dist/instrumentation/__tests__/langchain-wrapper.test.js +282 -0
  46. package/dist/instrumentation/__tests__/langchain-wrapper.test.js.map +1 -0
  47. package/dist/instrumentation/__tests__/milvus-trace-comparison.test.js +1 -1
  48. package/dist/instrumentation/__tests__/milvus-trace-comparison.test.js.map +1 -1
  49. package/dist/instrumentation/__tests__/mistral-trace-comparison.test.d.ts +0 -3
  50. package/dist/instrumentation/__tests__/mistral-trace-comparison.test.js +275 -68
  51. package/dist/instrumentation/__tests__/mistral-trace-comparison.test.js.map +1 -1
  52. package/dist/instrumentation/__tests__/openai-wrapper.test.js +7 -9
  53. package/dist/instrumentation/__tests__/openai-wrapper.test.js.map +1 -1
  54. package/dist/instrumentation/__tests__/qdrant-trace-comparison.test.js +1 -1
  55. package/dist/instrumentation/__tests__/qdrant-trace-comparison.test.js.map +1 -1
  56. package/dist/instrumentation/__tests__/replicate-trace-comparison.test.d.ts +2 -1
  57. package/dist/instrumentation/__tests__/replicate-trace-comparison.test.js +209 -21
  58. package/dist/instrumentation/__tests__/replicate-trace-comparison.test.js.map +1 -1
  59. package/dist/instrumentation/__tests__/together-trace-comparison.test.js +231 -51
  60. package/dist/instrumentation/__tests__/together-trace-comparison.test.js.map +1 -1
  61. package/dist/instrumentation/__tests__/vercel-ai-trace-comparison.test.d.ts +8 -0
  62. package/dist/instrumentation/__tests__/vercel-ai-trace-comparison.test.js +446 -0
  63. package/dist/instrumentation/__tests__/vercel-ai-trace-comparison.test.js.map +1 -0
  64. package/dist/instrumentation/anthropic/index.d.ts +2 -3
  65. package/dist/instrumentation/anthropic/index.js.map +1 -1
  66. package/dist/instrumentation/anthropic/wrapper.d.ts +1 -3
  67. package/dist/instrumentation/anthropic/wrapper.js +211 -91
  68. package/dist/instrumentation/anthropic/wrapper.js.map +1 -1
  69. package/dist/instrumentation/azure-ai-inference/index.d.ts +11 -0
  70. package/dist/instrumentation/azure-ai-inference/index.js +76 -0
  71. package/dist/instrumentation/azure-ai-inference/index.js.map +1 -0
  72. package/dist/instrumentation/azure-ai-inference/wrapper.d.ts +42 -0
  73. package/dist/instrumentation/azure-ai-inference/wrapper.js +515 -0
  74. package/dist/instrumentation/azure-ai-inference/wrapper.js.map +1 -0
  75. package/dist/instrumentation/base-wrapper.d.ts +2 -1
  76. package/dist/instrumentation/base-wrapper.js +35 -23
  77. package/dist/instrumentation/base-wrapper.js.map +1 -1
  78. package/dist/instrumentation/bedrock/wrapper.d.ts +21 -3
  79. package/dist/instrumentation/bedrock/wrapper.js +318 -265
  80. package/dist/instrumentation/bedrock/wrapper.js.map +1 -1
  81. package/dist/instrumentation/chroma/wrapper.js +1 -1
  82. package/dist/instrumentation/chroma/wrapper.js.map +1 -1
  83. package/dist/instrumentation/claude-agent-sdk/index.d.ts +23 -0
  84. package/dist/instrumentation/claude-agent-sdk/index.js +83 -0
  85. package/dist/instrumentation/claude-agent-sdk/index.js.map +1 -0
  86. package/dist/instrumentation/claude-agent-sdk/wrapper.d.ts +13 -0
  87. package/dist/instrumentation/claude-agent-sdk/wrapper.js +1031 -0
  88. package/dist/instrumentation/claude-agent-sdk/wrapper.js.map +1 -0
  89. package/dist/instrumentation/cohere/index.d.ts +2 -3
  90. package/dist/instrumentation/cohere/index.js.map +1 -1
  91. package/dist/instrumentation/cohere/wrapper.d.ts +1 -1
  92. package/dist/instrumentation/cohere/wrapper.js +215 -56
  93. package/dist/instrumentation/cohere/wrapper.js.map +1 -1
  94. package/dist/instrumentation/google-adk/index.d.ts +57 -0
  95. package/dist/instrumentation/google-adk/index.js +371 -0
  96. package/dist/instrumentation/google-adk/index.js.map +1 -0
  97. package/dist/instrumentation/google-adk/utils.d.ts +45 -0
  98. package/dist/instrumentation/google-adk/utils.js +663 -0
  99. package/dist/instrumentation/google-adk/utils.js.map +1 -0
  100. package/dist/instrumentation/google-adk/wrapper.d.ts +11 -0
  101. package/dist/instrumentation/google-adk/wrapper.js +391 -0
  102. package/dist/instrumentation/google-adk/wrapper.js.map +1 -0
  103. package/dist/instrumentation/google-ai/wrapper.d.ts +7 -4
  104. package/dist/instrumentation/google-ai/wrapper.js +197 -61
  105. package/dist/instrumentation/google-ai/wrapper.js.map +1 -1
  106. package/dist/instrumentation/groq/wrapper.js +137 -65
  107. package/dist/instrumentation/groq/wrapper.js.map +1 -1
  108. package/dist/instrumentation/huggingface/wrapper.js +241 -39
  109. package/dist/instrumentation/huggingface/wrapper.js.map +1 -1
  110. package/dist/instrumentation/index.d.ts +2 -2
  111. package/dist/instrumentation/index.js +64 -6
  112. package/dist/instrumentation/index.js.map +1 -1
  113. package/dist/instrumentation/langchain/index.d.ts +0 -7
  114. package/dist/instrumentation/langchain/index.js +2 -20
  115. package/dist/instrumentation/langchain/index.js.map +1 -1
  116. package/dist/instrumentation/langchain/wrapper.d.ts +35 -0
  117. package/dist/instrumentation/langchain/wrapper.js +1098 -184
  118. package/dist/instrumentation/langchain/wrapper.js.map +1 -1
  119. package/dist/instrumentation/langgraph/index.d.ts +12 -0
  120. package/dist/instrumentation/langgraph/index.js +99 -0
  121. package/dist/instrumentation/langgraph/index.js.map +1 -0
  122. package/dist/instrumentation/langgraph/wrapper.d.ts +20 -0
  123. package/dist/instrumentation/langgraph/wrapper.js +619 -0
  124. package/dist/instrumentation/langgraph/wrapper.js.map +1 -0
  125. package/dist/instrumentation/llamaindex/index.d.ts +31 -6
  126. package/dist/instrumentation/llamaindex/index.js +180 -61
  127. package/dist/instrumentation/llamaindex/index.js.map +1 -1
  128. package/dist/instrumentation/llamaindex/wrapper.d.ts +15 -3
  129. package/dist/instrumentation/llamaindex/wrapper.js +670 -179
  130. package/dist/instrumentation/llamaindex/wrapper.js.map +1 -1
  131. package/dist/instrumentation/milvus/wrapper.js +1 -1
  132. package/dist/instrumentation/milvus/wrapper.js.map +1 -1
  133. package/dist/instrumentation/mistral/wrapper.js +154 -79
  134. package/dist/instrumentation/mistral/wrapper.js.map +1 -1
  135. package/dist/instrumentation/ollama/index.js +33 -4
  136. package/dist/instrumentation/ollama/index.js.map +1 -1
  137. package/dist/instrumentation/ollama/wrapper.d.ts +28 -2
  138. package/dist/instrumentation/ollama/wrapper.js +432 -48
  139. package/dist/instrumentation/ollama/wrapper.js.map +1 -1
  140. package/dist/instrumentation/openai/index.d.ts +2 -3
  141. package/dist/instrumentation/openai/index.js.map +1 -1
  142. package/dist/instrumentation/openai/wrapper.js +293 -194
  143. package/dist/instrumentation/openai/wrapper.js.map +1 -1
  144. package/dist/instrumentation/openai-agents/index.d.ts +20 -0
  145. package/dist/instrumentation/openai-agents/index.js +174 -0
  146. package/dist/instrumentation/openai-agents/index.js.map +1 -0
  147. package/dist/instrumentation/openai-agents/processor.d.ts +35 -0
  148. package/dist/instrumentation/openai-agents/processor.js +249 -0
  149. package/dist/instrumentation/openai-agents/processor.js.map +1 -0
  150. package/dist/instrumentation/openai-agents/utils.d.ts +20 -0
  151. package/dist/instrumentation/openai-agents/utils.js +624 -0
  152. package/dist/instrumentation/openai-agents/utils.js.map +1 -0
  153. package/dist/instrumentation/pinecone/wrapper.js +2 -2
  154. package/dist/instrumentation/pinecone/wrapper.js.map +1 -1
  155. package/dist/instrumentation/qdrant/wrapper.js +1 -1
  156. package/dist/instrumentation/qdrant/wrapper.js.map +1 -1
  157. package/dist/instrumentation/replicate/wrapper.js +103 -21
  158. package/dist/instrumentation/replicate/wrapper.js.map +1 -1
  159. package/dist/instrumentation/strands/index.d.ts +21 -0
  160. package/dist/instrumentation/strands/index.js +83 -0
  161. package/dist/instrumentation/strands/index.js.map +1 -0
  162. package/dist/instrumentation/strands/processor.d.ts +45 -0
  163. package/dist/instrumentation/strands/processor.js +545 -0
  164. package/dist/instrumentation/strands/processor.js.map +1 -0
  165. package/dist/instrumentation/strands/utils.d.ts +24 -0
  166. package/dist/instrumentation/strands/utils.js +360 -0
  167. package/dist/instrumentation/strands/utils.js.map +1 -0
  168. package/dist/instrumentation/together/wrapper.js +125 -51
  169. package/dist/instrumentation/together/wrapper.js.map +1 -1
  170. package/dist/instrumentation/vercel-ai/wrapper.d.ts +28 -2
  171. package/dist/instrumentation/vercel-ai/wrapper.js +314 -164
  172. package/dist/instrumentation/vercel-ai/wrapper.js.map +1 -1
  173. package/dist/llm/anthropic.js +10 -6
  174. package/dist/llm/anthropic.js.map +1 -1
  175. package/dist/llm/openai.js +9 -5
  176. package/dist/llm/openai.js.map +1 -1
  177. package/dist/otel/__tests__/metrics.test.js +16 -27
  178. package/dist/otel/__tests__/metrics.test.js.map +1 -1
  179. package/dist/otel/events.d.ts +11 -0
  180. package/dist/otel/events.js +74 -0
  181. package/dist/otel/events.js.map +1 -0
  182. package/dist/otel/metrics.d.ts +5 -6
  183. package/dist/otel/metrics.js +66 -48
  184. package/dist/otel/metrics.js.map +1 -1
  185. package/dist/otel/tracing.d.ts +6 -2
  186. package/dist/otel/tracing.js +71 -24
  187. package/dist/otel/tracing.js.map +1 -1
  188. package/dist/otel/utils.d.ts +11 -0
  189. package/dist/otel/utils.js +34 -0
  190. package/dist/otel/utils.js.map +1 -0
  191. package/dist/semantic-convention.d.ts +44 -5
  192. package/dist/semantic-convention.js +51 -8
  193. package/dist/semantic-convention.js.map +1 -1
  194. package/dist/types.d.ts +74 -22
  195. package/package.json +41 -9
@@ -8,106 +8,71 @@ const config_1 = __importDefault(require("../../config"));
8
8
  const helpers_1 = __importDefault(require("../../helpers"));
9
9
  const semantic_convention_1 = __importDefault(require("../../semantic-convention"));
10
10
  const base_wrapper_1 = __importDefault(require("../base-wrapper"));
11
+ function spanCreationAttrs(operationName, requestModel) {
12
+ return {
13
+ [semantic_convention_1.default.GEN_AI_OPERATION]: operationName,
14
+ [semantic_convention_1.default.GEN_AI_PROVIDER_NAME_OTEL]: VercelAIWrapper.aiSystem,
15
+ [semantic_convention_1.default.GEN_AI_REQUEST_MODEL]: requestModel,
16
+ [semantic_convention_1.default.SERVER_ADDRESS]: VercelAIWrapper.serverAddress,
17
+ [semantic_convention_1.default.SERVER_PORT]: VercelAIWrapper.serverPort,
18
+ };
19
+ }
11
20
  class VercelAIWrapper extends base_wrapper_1.default {
12
- static _getProviderFromModel(model) {
13
- if (!model)
14
- return VercelAIWrapper.aiSystem;
15
- const provider = model.provider || '';
16
- if (provider.startsWith('openai'))
17
- return semantic_convention_1.default.GEN_AI_SYSTEM_OPENAI;
18
- if (provider.startsWith('anthropic'))
19
- return semantic_convention_1.default.GEN_AI_SYSTEM_ANTHROPIC;
20
- if (provider.startsWith('google'))
21
- return semantic_convention_1.default.GEN_AI_SYSTEM_VERTEXAI;
22
- if (provider.startsWith('mistral'))
23
- return semantic_convention_1.default.GEN_AI_SYSTEM_MISTRAL;
24
- if (provider.startsWith('cohere'))
25
- return semantic_convention_1.default.GEN_AI_SYSTEM_COHERE;
26
- if (provider.startsWith('amazon') || provider.startsWith('aws'))
27
- return semantic_convention_1.default.GEN_AI_SYSTEM_AWS_BEDROCK;
28
- return VercelAIWrapper.aiSystem;
29
- }
30
21
  static _patchGenerateText(tracer) {
31
- const genAIEndpoint = 'ai.generateText';
22
+ const genAIEndpoint = 'vercel_ai.generateText';
32
23
  return (originalMethod) => {
33
24
  return async function (...args) {
34
- const span = tracer.startSpan(genAIEndpoint, { kind: api_1.SpanKind.CLIENT });
35
- return api_1.context.with(api_1.trace.setSpan(api_1.context.active(), span), async () => {
36
- let metricParams;
37
- try {
38
- const response = await originalMethod.apply(this, args);
39
- const params = args[0] || {};
40
- const model = params.model;
41
- const modelId = model?.modelId || 'unknown';
42
- const aiSystem = VercelAIWrapper._getProviderFromModel(model);
43
- const pricingInfo = await config_1.default.updatePricingJson(config_1.default.pricing_json);
44
- const cost = helpers_1.default.getChatModelCost(modelId, pricingInfo, response.usage?.promptTokens || 0, response.usage?.completionTokens || 0);
45
- VercelAIWrapper.setBaseSpanAttributes(span, { genAIEndpoint, model: modelId, cost, aiSystem });
46
- span.setAttribute(semantic_convention_1.default.GEN_AI_OPERATION, semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT);
47
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_IS_STREAM, false);
48
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_MAX_TOKENS, params.maxTokens || -1);
49
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TEMPERATURE, params.temperature ?? 1);
50
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TOP_P, params.topP ?? 1);
51
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_MODEL, modelId);
52
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_INPUT_TOKENS, response.usage?.promptTokens || 0);
53
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_OUTPUT_TOKENS, response.usage?.completionTokens || 0);
54
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_TOTAL_TOKENS, response.usage?.totalTokens || 0);
55
- span.setAttribute(semantic_convention_1.default.GEN_AI_CLIENT_TOKEN_USAGE, response.usage?.totalTokens || 0);
56
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_FINISH_REASON, [response.finishReason || 'stop']);
57
- span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_TYPE, semantic_convention_1.default.GEN_AI_OUTPUT_TYPE_TEXT);
58
- if (response.toolCalls?.length > 0) {
59
- const toolNames = response.toolCalls.map((t) => t.toolName || '').filter(Boolean);
60
- const toolArgs = response.toolCalls.map((t) => JSON.stringify(t.args || {}));
61
- if (toolNames.length > 0)
62
- span.setAttribute(semantic_convention_1.default.GEN_AI_TOOL_NAME, toolNames.join(', '));
63
- if (toolArgs.length > 0)
64
- span.setAttribute(semantic_convention_1.default.GEN_AI_TOOL_CALL_ARGUMENTS, toolArgs);
65
- }
66
- if (config_1.default.traceContent) {
67
- const messages = params.messages || (params.prompt ? [{ role: 'user', content: params.prompt }] : []);
68
- span.setAttribute(semantic_convention_1.default.GEN_AI_INPUT_MESSAGES, helpers_1.default.buildInputMessages(messages));
69
- span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_MESSAGES, helpers_1.default.buildOutputMessages(response.text || '', response.finishReason || 'stop', response.toolCalls));
70
- }
71
- metricParams = { genAIEndpoint, model: modelId, cost, aiSystem };
72
- return response;
73
- }
74
- catch (e) {
75
- helpers_1.default.handleException(span, e);
76
- throw e;
77
- }
78
- finally {
79
- span.end();
80
- if (metricParams)
81
- base_wrapper_1.default.recordMetrics(span, metricParams);
82
- }
25
+ const params = args[0] || {};
26
+ const modelId = params.model?.modelId || 'unknown';
27
+ const spanName = `${semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT} ${modelId}`;
28
+ const span = tracer.startSpan(spanName, {
29
+ kind: api_1.SpanKind.CLIENT,
30
+ attributes: spanCreationAttrs(semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT, modelId),
31
+ });
32
+ return api_1.context
33
+ .with(api_1.trace.setSpan(api_1.context.active(), span), async () => {
34
+ return originalMethod.apply(this, args);
35
+ })
36
+ .then((response) => {
37
+ return VercelAIWrapper._chatComplete({
38
+ args,
39
+ genAIEndpoint,
40
+ response,
41
+ span,
42
+ outputType: semantic_convention_1.default.GEN_AI_OUTPUT_TYPE_TEXT,
43
+ });
44
+ })
45
+ .catch((e) => {
46
+ helpers_1.default.handleException(span, e);
47
+ base_wrapper_1.default.recordMetrics(span, {
48
+ genAIEndpoint,
49
+ model: modelId,
50
+ aiSystem: VercelAIWrapper.aiSystem,
51
+ serverAddress: VercelAIWrapper.serverAddress,
52
+ serverPort: VercelAIWrapper.serverPort,
53
+ errorType: e?.constructor?.name || '_OTHER',
54
+ });
55
+ span.end();
56
+ throw e;
83
57
  });
84
58
  };
85
59
  };
86
60
  }
87
61
  static _patchStreamText(tracer) {
88
- const genAIEndpoint = 'ai.streamText';
62
+ const genAIEndpoint = 'vercel_ai.streamText';
89
63
  return (originalMethod) => {
90
64
  return async function (...args) {
91
- const span = tracer.startSpan(genAIEndpoint, { kind: api_1.SpanKind.CLIENT });
65
+ const params = args[0] || {};
66
+ const modelId = params.model?.modelId || 'unknown';
67
+ const spanName = `${semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT} ${modelId}`;
68
+ const span = tracer.startSpan(spanName, {
69
+ kind: api_1.SpanKind.CLIENT,
70
+ attributes: spanCreationAttrs(semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT, modelId),
71
+ });
92
72
  const startTime = Date.now();
93
73
  const chunkTimestamps = [];
94
74
  try {
95
- const response = await originalMethod.apply(this, args);
96
- const params = args[0] || {};
97
- const model = params.model;
98
- const modelId = model?.modelId || 'unknown';
99
- const aiSystem = VercelAIWrapper._getProviderFromModel(model);
100
- // Set request attributes immediately
101
- span.setAttribute(semantic_convention_1.default.GEN_AI_OPERATION, semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT);
102
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_IS_STREAM, true);
103
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_MAX_TOKENS, params.maxTokens || -1);
104
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TEMPERATURE, params.temperature ?? 1);
105
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TOP_P, params.topP ?? 1);
106
- if (config_1.default.traceContent) {
107
- const messages = params.messages || (params.prompt ? [{ role: 'user', content: params.prompt }] : []);
108
- span.setAttribute(semantic_convention_1.default.GEN_AI_INPUT_MESSAGES, helpers_1.default.buildInputMessages(messages));
109
- }
110
- // Intercept textStream to capture per-chunk timestamps for TTFT/TBT
75
+ const response = await api_1.context.with(api_1.trace.setSpan(api_1.context.active(), span), async () => originalMethod.apply(this, args));
111
76
  try {
112
77
  const originalTextStream = response.textStream;
113
78
  if (originalTextStream && typeof originalTextStream.getReader === 'function') {
@@ -135,11 +100,11 @@ class VercelAIWrapper extends base_wrapper_1.default {
135
100
  }
136
101
  }
137
102
  catch (_) {
138
- // Stream interception failed; TTFT/TBT won't be captured from textStream
103
+ // Stream interception is best-effort; TTFT/TBT won't be captured
139
104
  }
140
- // Observe stream completion via usage promise
141
105
  Promise.resolve(response.usage)
142
106
  .then(async (usage) => {
107
+ let metricParams;
143
108
  try {
144
109
  const ttft = chunkTimestamps.length > 0 ? (chunkTimestamps[0] - startTime) / 1000 : 0;
145
110
  let tbt = 0;
@@ -147,42 +112,65 @@ class VercelAIWrapper extends base_wrapper_1.default {
147
112
  const timeDiffs = chunkTimestamps.slice(1).map((t, i) => t - chunkTimestamps[i]);
148
113
  tbt = timeDiffs.reduce((a, b) => a + b, 0) / timeDiffs.length / 1000;
149
114
  }
150
- const pricingInfo = await config_1.default.updatePricingJson(config_1.default.pricing_json);
151
- const cost = helpers_1.default.getChatModelCost(modelId, pricingInfo, usage?.promptTokens || 0, usage?.completionTokens || 0);
152
- VercelAIWrapper.setBaseSpanAttributes(span, { genAIEndpoint, model: modelId, cost, aiSystem });
153
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_MODEL, modelId);
154
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_INPUT_TOKENS, usage?.promptTokens || 0);
155
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_OUTPUT_TOKENS, usage?.completionTokens || 0);
156
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_TOTAL_TOKENS, usage?.totalTokens || 0);
157
- span.setAttribute(semantic_convention_1.default.GEN_AI_CLIENT_TOKEN_USAGE, usage?.totalTokens || 0);
158
- span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_TYPE, semantic_convention_1.default.GEN_AI_OUTPUT_TYPE_TEXT);
159
- if (ttft > 0)
160
- span.setAttribute(semantic_convention_1.default.GEN_AI_SERVER_TTFT, ttft);
161
- if (tbt > 0)
162
- span.setAttribute(semantic_convention_1.default.GEN_AI_SERVER_TBT, tbt);
163
115
  const finishReason = await Promise.resolve(response.finishReason).catch(() => 'stop');
164
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_FINISH_REASON, [finishReason || 'stop']);
165
- if (config_1.default.traceContent) {
166
- const text = await Promise.resolve(response.text).catch(() => '');
167
- span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_MESSAGES, helpers_1.default.buildOutputMessages(text || '', finishReason || 'stop'));
168
- }
169
- base_wrapper_1.default.recordMetrics(span, { genAIEndpoint, model: modelId, cost, aiSystem });
116
+ const text = await Promise.resolve(response.text).catch(() => '');
117
+ const toolCalls = await Promise.resolve(response.toolCalls).catch(() => undefined);
118
+ const responseDetails = await Promise.resolve(response.response).catch(() => undefined);
119
+ const result = {
120
+ usage: {
121
+ promptTokens: usage?.promptTokens || 0,
122
+ completionTokens: usage?.completionTokens || 0,
123
+ },
124
+ finishReason: finishReason || 'stop',
125
+ text: text || '',
126
+ toolCalls,
127
+ response: responseDetails,
128
+ };
129
+ metricParams = await VercelAIWrapper._chatCommonSetter({
130
+ args,
131
+ genAIEndpoint,
132
+ result,
133
+ span,
134
+ isStream: true,
135
+ outputType: semantic_convention_1.default.GEN_AI_OUTPUT_TYPE_TEXT,
136
+ ttft,
137
+ tbt,
138
+ });
170
139
  }
171
140
  catch (e) {
172
141
  helpers_1.default.handleException(span, e);
173
142
  }
174
143
  finally {
175
144
  span.end();
145
+ if (metricParams) {
146
+ base_wrapper_1.default.recordMetrics(span, metricParams);
147
+ }
176
148
  }
177
149
  })
178
150
  .catch((e) => {
179
151
  helpers_1.default.handleException(span, e);
152
+ base_wrapper_1.default.recordMetrics(span, {
153
+ genAIEndpoint,
154
+ model: modelId,
155
+ aiSystem: VercelAIWrapper.aiSystem,
156
+ serverAddress: VercelAIWrapper.serverAddress,
157
+ serverPort: VercelAIWrapper.serverPort,
158
+ errorType: e?.constructor?.name || '_OTHER',
159
+ });
180
160
  span.end();
181
161
  });
182
162
  return response;
183
163
  }
184
164
  catch (e) {
185
165
  helpers_1.default.handleException(span, e);
166
+ base_wrapper_1.default.recordMetrics(span, {
167
+ genAIEndpoint,
168
+ model: modelId,
169
+ aiSystem: VercelAIWrapper.aiSystem,
170
+ serverAddress: VercelAIWrapper.serverAddress,
171
+ serverPort: VercelAIWrapper.serverPort,
172
+ errorType: e?.constructor?.name || '_OTHER',
173
+ });
186
174
  span.end();
187
175
  throw e;
188
176
  }
@@ -190,83 +178,103 @@ class VercelAIWrapper extends base_wrapper_1.default {
190
178
  };
191
179
  }
192
180
  static _patchGenerateObject(tracer) {
193
- const genAIEndpoint = 'ai.generateObject';
181
+ const genAIEndpoint = 'vercel_ai.generateObject';
194
182
  return (originalMethod) => {
195
183
  return async function (...args) {
196
- const span = tracer.startSpan(genAIEndpoint, { kind: api_1.SpanKind.CLIENT });
197
- return api_1.context.with(api_1.trace.setSpan(api_1.context.active(), span), async () => {
198
- let metricParams;
199
- try {
200
- const response = await originalMethod.apply(this, args);
201
- const params = args[0] || {};
202
- const model = params.model;
203
- const modelId = model?.modelId || 'unknown';
204
- const aiSystem = VercelAIWrapper._getProviderFromModel(model);
205
- const pricingInfo = await config_1.default.updatePricingJson(config_1.default.pricing_json);
206
- const cost = helpers_1.default.getChatModelCost(modelId, pricingInfo, response.usage?.promptTokens || 0, response.usage?.completionTokens || 0);
207
- VercelAIWrapper.setBaseSpanAttributes(span, { genAIEndpoint, model: modelId, cost, aiSystem });
208
- span.setAttribute(semantic_convention_1.default.GEN_AI_OPERATION, semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT);
209
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_IS_STREAM, false);
210
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_MAX_TOKENS, params.maxTokens || -1);
211
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TEMPERATURE, params.temperature ?? 1);
212
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_MODEL, modelId);
213
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_INPUT_TOKENS, response.usage?.promptTokens || 0);
214
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_OUTPUT_TOKENS, response.usage?.completionTokens || 0);
215
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_TOTAL_TOKENS, response.usage?.totalTokens || 0);
216
- span.setAttribute(semantic_convention_1.default.GEN_AI_CLIENT_TOKEN_USAGE, response.usage?.totalTokens || 0);
217
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_FINISH_REASON, [response.finishReason || 'stop']);
218
- span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_TYPE, semantic_convention_1.default.GEN_AI_OUTPUT_TYPE_JSON);
219
- if (config_1.default.traceContent) {
220
- const messages = params.messages || (params.prompt ? [{ role: 'user', content: params.prompt }] : []);
221
- span.setAttribute(semantic_convention_1.default.GEN_AI_INPUT_MESSAGES, helpers_1.default.buildInputMessages(messages));
222
- span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_MESSAGES, helpers_1.default.buildOutputMessages(JSON.stringify(response.object || {}), response.finishReason || 'stop'));
223
- }
224
- metricParams = { genAIEndpoint, model: modelId, cost, aiSystem };
225
- return response;
226
- }
227
- catch (e) {
228
- helpers_1.default.handleException(span, e);
229
- throw e;
230
- }
231
- finally {
232
- span.end();
233
- if (metricParams)
234
- base_wrapper_1.default.recordMetrics(span, metricParams);
235
- }
184
+ const params = args[0] || {};
185
+ const modelId = params.model?.modelId || 'unknown';
186
+ const spanName = `${semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT} ${modelId}`;
187
+ const span = tracer.startSpan(spanName, {
188
+ kind: api_1.SpanKind.CLIENT,
189
+ attributes: spanCreationAttrs(semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT, modelId),
190
+ });
191
+ return api_1.context
192
+ .with(api_1.trace.setSpan(api_1.context.active(), span), async () => {
193
+ return originalMethod.apply(this, args);
194
+ })
195
+ .then((response) => {
196
+ const result = {
197
+ ...response,
198
+ text: JSON.stringify(response.object || {}),
199
+ };
200
+ return VercelAIWrapper._chatComplete({
201
+ args,
202
+ genAIEndpoint,
203
+ response,
204
+ span,
205
+ outputType: semantic_convention_1.default.GEN_AI_OUTPUT_TYPE_JSON,
206
+ resultOverride: result,
207
+ });
208
+ })
209
+ .catch((e) => {
210
+ helpers_1.default.handleException(span, e);
211
+ base_wrapper_1.default.recordMetrics(span, {
212
+ genAIEndpoint,
213
+ model: modelId,
214
+ aiSystem: VercelAIWrapper.aiSystem,
215
+ serverAddress: VercelAIWrapper.serverAddress,
216
+ serverPort: VercelAIWrapper.serverPort,
217
+ errorType: e?.constructor?.name || '_OTHER',
218
+ });
219
+ span.end();
220
+ throw e;
236
221
  });
237
222
  };
238
223
  };
239
224
  }
240
225
  static _patchEmbed(tracer) {
241
- const genAIEndpoint = 'ai.embed';
226
+ const genAIEndpoint = 'vercel_ai.embed';
242
227
  return (originalMethod) => {
243
228
  return async function (...args) {
244
- const span = tracer.startSpan(genAIEndpoint, { kind: api_1.SpanKind.CLIENT });
229
+ const params = args[0] || {};
230
+ const modelId = params.model?.modelId || 'unknown';
231
+ const spanName = `${semantic_convention_1.default.GEN_AI_OPERATION_TYPE_EMBEDDING} ${modelId}`;
232
+ const span = tracer.startSpan(spanName, {
233
+ kind: api_1.SpanKind.CLIENT,
234
+ attributes: spanCreationAttrs(semantic_convention_1.default.GEN_AI_OPERATION_TYPE_EMBEDDING, modelId),
235
+ });
245
236
  return api_1.context.with(api_1.trace.setSpan(api_1.context.active(), span), async () => {
237
+ const captureContent = config_1.default.captureMessageContent;
246
238
  let metricParams;
247
239
  try {
248
240
  const response = await originalMethod.apply(this, args);
249
- const params = args[0] || {};
250
- const model = params.model;
251
- const modelId = model?.modelId || 'unknown';
252
- const aiSystem = VercelAIWrapper._getProviderFromModel(model);
253
- const pricingInfo = await config_1.default.updatePricingJson(config_1.default.pricing_json);
254
- const cost = helpers_1.default.getEmbedModelCost(modelId, pricingInfo, response.usage?.tokens || 0);
255
- VercelAIWrapper.setBaseSpanAttributes(span, { genAIEndpoint, model: modelId, cost, aiSystem });
256
- span.setAttribute(semantic_convention_1.default.GEN_AI_OPERATION, semantic_convention_1.default.GEN_AI_OPERATION_TYPE_EMBEDDING);
241
+ const pricingInfo = config_1.default.pricingInfo || {};
242
+ const inputTokens = response.usage?.tokens || 0;
243
+ const cost = helpers_1.default.getEmbedModelCost(modelId, pricingInfo, inputTokens);
244
+ VercelAIWrapper.setBaseSpanAttributes(span, {
245
+ genAIEndpoint,
246
+ model: modelId,
247
+ cost,
248
+ aiSystem: VercelAIWrapper.aiSystem,
249
+ serverAddress: VercelAIWrapper.serverAddress,
250
+ serverPort: VercelAIWrapper.serverPort,
251
+ });
257
252
  span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_IS_STREAM, false);
258
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_MODEL, modelId);
259
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_INPUT_TOKENS, response.usage?.tokens || 0);
260
- span.setAttribute(semantic_convention_1.default.GEN_AI_CLIENT_TOKEN_USAGE, response.usage?.tokens || 0);
261
- if (config_1.default.traceContent && params.value !== undefined) {
253
+ span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_INPUT_TOKENS, inputTokens);
254
+ if (captureContent && params.value !== undefined) {
262
255
  const inputStr = typeof params.value === 'string' ? params.value : JSON.stringify(params.value);
263
256
  span.setAttribute(semantic_convention_1.default.GEN_AI_INPUT_MESSAGES, inputStr);
264
257
  }
265
- metricParams = { genAIEndpoint, model: modelId, cost, aiSystem };
258
+ metricParams = {
259
+ genAIEndpoint,
260
+ model: modelId,
261
+ cost,
262
+ aiSystem: VercelAIWrapper.aiSystem,
263
+ serverAddress: VercelAIWrapper.serverAddress,
264
+ serverPort: VercelAIWrapper.serverPort,
265
+ };
266
266
  return response;
267
267
  }
268
268
  catch (e) {
269
269
  helpers_1.default.handleException(span, e);
270
+ metricParams = {
271
+ genAIEndpoint,
272
+ model: modelId,
273
+ aiSystem: VercelAIWrapper.aiSystem,
274
+ serverAddress: VercelAIWrapper.serverAddress,
275
+ serverPort: VercelAIWrapper.serverPort,
276
+ errorType: e?.constructor?.name || '_OTHER',
277
+ };
270
278
  throw e;
271
279
  }
272
280
  finally {
@@ -278,7 +286,149 @@ class VercelAIWrapper extends base_wrapper_1.default {
278
286
  };
279
287
  };
280
288
  }
289
+ static async _chatComplete({ args, genAIEndpoint, response, span, outputType, resultOverride, }) {
290
+ let metricParams;
291
+ try {
292
+ metricParams = await VercelAIWrapper._chatCommonSetter({
293
+ args,
294
+ genAIEndpoint,
295
+ result: resultOverride || response,
296
+ span,
297
+ isStream: false,
298
+ outputType,
299
+ });
300
+ return response;
301
+ }
302
+ catch (e) {
303
+ helpers_1.default.handleException(span, e);
304
+ throw e;
305
+ }
306
+ finally {
307
+ span.end();
308
+ if (metricParams) {
309
+ base_wrapper_1.default.recordMetrics(span, metricParams);
310
+ }
311
+ }
312
+ }
313
+ static async _chatCommonSetter({ args, genAIEndpoint, result, span, isStream, outputType, ttft = 0, tbt = 0, }) {
314
+ const captureContent = config_1.default.captureMessageContent;
315
+ const params = args[0] || {};
316
+ const modelId = params.model?.modelId || 'unknown';
317
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TEMPERATURE, params.temperature ?? 1);
318
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TOP_P, params.topP ?? 1);
319
+ if (params.maxTokens != null) {
320
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_MAX_TOKENS, params.maxTokens);
321
+ }
322
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_IS_STREAM, isStream);
323
+ if (params.seed != null) {
324
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_SEED, Number(params.seed));
325
+ }
326
+ if (params.frequencyPenalty) {
327
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_FREQUENCY_PENALTY, params.frequencyPenalty);
328
+ }
329
+ if (params.presencePenalty) {
330
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_PRESENCE_PENALTY, params.presencePenalty);
331
+ }
332
+ if (params.stopSequences) {
333
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_STOP_SEQUENCES, Array.isArray(params.stopSequences) ? params.stopSequences : [params.stopSequences]);
334
+ }
335
+ if (params.topK != null) {
336
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TOP_K, params.topK);
337
+ }
338
+ const messages = params.messages || (params.prompt ? [{ role: 'user', content: params.prompt }] : []);
339
+ if (captureContent) {
340
+ span.setAttribute(semantic_convention_1.default.GEN_AI_INPUT_MESSAGES, helpers_1.default.buildInputMessages(messages, params.system));
341
+ }
342
+ const responseId = result.response?.id;
343
+ const responseModel = result.response?.modelId || modelId;
344
+ const inputTokens = result.usage?.promptTokens || 0;
345
+ const outputTokens = result.usage?.completionTokens || 0;
346
+ const pricingInfo = config_1.default.pricingInfo || {};
347
+ const cost = helpers_1.default.getChatModelCost(modelId, pricingInfo, inputTokens, outputTokens);
348
+ VercelAIWrapper.setBaseSpanAttributes(span, {
349
+ genAIEndpoint,
350
+ model: modelId,
351
+ cost,
352
+ aiSystem: VercelAIWrapper.aiSystem,
353
+ serverAddress: VercelAIWrapper.serverAddress,
354
+ serverPort: VercelAIWrapper.serverPort,
355
+ });
356
+ span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_MODEL, responseModel);
357
+ if (responseId) {
358
+ span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_ID, responseId);
359
+ }
360
+ span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_INPUT_TOKENS, inputTokens);
361
+ span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_OUTPUT_TOKENS, outputTokens);
362
+ const finishReason = result.finishReason || 'stop';
363
+ span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_FINISH_REASON, [finishReason]);
364
+ span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_TYPE, outputType);
365
+ if (ttft > 0) {
366
+ span.setAttribute(semantic_convention_1.default.GEN_AI_SERVER_TTFT, ttft);
367
+ }
368
+ if (tbt > 0) {
369
+ span.setAttribute(semantic_convention_1.default.GEN_AI_SERVER_TBT, tbt);
370
+ }
371
+ if (result.toolCalls?.length > 0) {
372
+ const toolNames = result.toolCalls.map((t) => t.toolName || '').filter(Boolean);
373
+ const toolIds = result.toolCalls.map((t) => t.toolCallId || '').filter(Boolean);
374
+ const toolArgs = result.toolCalls.map((t) => JSON.stringify(t.args || {})).filter(Boolean);
375
+ if (toolNames.length > 0) {
376
+ span.setAttribute(semantic_convention_1.default.GEN_AI_TOOL_NAME, toolNames.join(', '));
377
+ }
378
+ if (toolIds.length > 0) {
379
+ span.setAttribute(semantic_convention_1.default.GEN_AI_TOOL_CALL_ID, toolIds.join(', '));
380
+ }
381
+ if (toolArgs.length > 0) {
382
+ span.setAttribute(semantic_convention_1.default.GEN_AI_TOOL_ARGS, toolArgs.join(', '));
383
+ }
384
+ }
385
+ const normalizedToolCalls = result.toolCalls?.map((t) => ({
386
+ id: t.toolCallId || '',
387
+ name: t.toolName || '',
388
+ arguments: t.args || {},
389
+ }));
390
+ let inputMessagesJson;
391
+ let outputMessagesJson;
392
+ if (captureContent) {
393
+ outputMessagesJson = helpers_1.default.buildOutputMessages(result.text || '', finishReason, normalizedToolCalls);
394
+ span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_MESSAGES, outputMessagesJson);
395
+ inputMessagesJson = helpers_1.default.buildInputMessages(messages, params.system);
396
+ }
397
+ if (!config_1.default.disableEvents) {
398
+ const eventAttrs = {
399
+ [semantic_convention_1.default.GEN_AI_OPERATION]: semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT,
400
+ [semantic_convention_1.default.GEN_AI_REQUEST_MODEL]: modelId,
401
+ [semantic_convention_1.default.GEN_AI_RESPONSE_MODEL]: responseModel,
402
+ [semantic_convention_1.default.SERVER_ADDRESS]: VercelAIWrapper.serverAddress,
403
+ [semantic_convention_1.default.SERVER_PORT]: VercelAIWrapper.serverPort,
404
+ [semantic_convention_1.default.GEN_AI_RESPONSE_FINISH_REASON]: [finishReason],
405
+ [semantic_convention_1.default.GEN_AI_OUTPUT_TYPE]: outputType,
406
+ [semantic_convention_1.default.GEN_AI_USAGE_INPUT_TOKENS]: inputTokens,
407
+ [semantic_convention_1.default.GEN_AI_USAGE_OUTPUT_TOKENS]: outputTokens,
408
+ };
409
+ if (responseId) {
410
+ eventAttrs[semantic_convention_1.default.GEN_AI_RESPONSE_ID] = responseId;
411
+ }
412
+ if (captureContent) {
413
+ if (inputMessagesJson)
414
+ eventAttrs[semantic_convention_1.default.GEN_AI_INPUT_MESSAGES] = inputMessagesJson;
415
+ if (outputMessagesJson)
416
+ eventAttrs[semantic_convention_1.default.GEN_AI_OUTPUT_MESSAGES] = outputMessagesJson;
417
+ }
418
+ helpers_1.default.emitInferenceEvent(span, eventAttrs);
419
+ }
420
+ return {
421
+ genAIEndpoint,
422
+ model: modelId,
423
+ cost,
424
+ aiSystem: VercelAIWrapper.aiSystem,
425
+ serverAddress: VercelAIWrapper.serverAddress,
426
+ serverPort: VercelAIWrapper.serverPort,
427
+ };
428
+ }
281
429
  }
282
- VercelAIWrapper.aiSystem = 'vercel_ai';
430
+ VercelAIWrapper.aiSystem = semantic_convention_1.default.GEN_AI_SYSTEM_VERCEL_AI;
431
+ VercelAIWrapper.serverAddress = 'vercel.ai';
432
+ VercelAIWrapper.serverPort = 443;
283
433
  exports.default = VercelAIWrapper;
284
434
  //# sourceMappingURL=wrapper.js.map