@databuddy/sdk 2.3.24 → 2.3.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,58 @@
1
- import { LanguageModelV2 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, LanguageModelV3 } from '@ai-sdk/provider';
2
2
 
3
+ /**
4
+ * Type guards and union types for Vercel AI SDK V2/V3 support
5
+ *
6
+ * Adapted from PostHog's AI SDK implementation:
7
+ * https://github.com/PostHog/posthog-js/tree/main/packages/ai
8
+ */
9
+
10
+ type LanguageModel = LanguageModelV2 | LanguageModelV3;
11
+
12
+ /**
13
+ * Content types for input/output arrays
14
+ */
15
+ type MessageContent = {
16
+ type: "text";
17
+ text: string;
18
+ } | {
19
+ type: "reasoning";
20
+ text: string;
21
+ } | {
22
+ type: "tool-call";
23
+ id: string;
24
+ function: {
25
+ name: string;
26
+ arguments: string;
27
+ };
28
+ } | {
29
+ type: "tool-result";
30
+ toolCallId: string;
31
+ toolName: string;
32
+ output: unknown;
33
+ isError?: boolean;
34
+ } | {
35
+ type: "file";
36
+ file: string;
37
+ mediaType: string;
38
+ } | {
39
+ type: "image";
40
+ image: string;
41
+ mediaType: string;
42
+ } | {
43
+ type: "source";
44
+ sourceType: string;
45
+ id: string;
46
+ url: string;
47
+ title: string;
48
+ };
49
+ /**
50
+ * Message format for input/output
51
+ */
52
+ interface AIMessage {
53
+ role: string;
54
+ content: string | MessageContent[];
55
+ }
3
56
  /**
4
57
  * Token usage from AI model calls
5
58
  */
@@ -8,6 +61,9 @@ interface TokenUsage {
8
61
  outputTokens: number;
9
62
  totalTokens: number;
10
63
  cachedInputTokens?: number;
64
+ cacheCreationInputTokens?: number;
65
+ reasoningTokens?: number;
66
+ webSearchCount?: number;
11
67
  }
12
68
  /**
13
69
  * Cost breakdown from TokenLens
@@ -24,6 +80,7 @@ interface ToolCallInfo {
24
80
  toolCallCount: number;
25
81
  toolResultCount: number;
26
82
  toolCallNames: string[];
83
+ availableTools?: string[];
27
84
  }
28
85
  /**
29
86
  * Error information for failed AI calls
@@ -38,15 +95,20 @@ interface AIError {
38
95
  */
39
96
  interface AICall {
40
97
  timestamp: Date;
98
+ traceId: string;
41
99
  type: "generate" | "stream";
42
100
  model: string;
43
101
  provider: string;
44
102
  finishReason?: string;
103
+ input: AIMessage[];
104
+ output: AIMessage[];
45
105
  usage: TokenUsage;
46
106
  cost: TokenCost;
47
107
  tools: ToolCallInfo;
48
108
  error?: AIError;
49
109
  durationMs: number;
110
+ httpStatus?: number;
111
+ params?: Record<string, unknown>;
50
112
  }
51
113
  /**
52
114
  * Transport function for sending log entries
@@ -81,6 +143,16 @@ interface DatabuddyLLMOptions {
81
143
  * @default true
82
144
  */
83
145
  computeCosts?: boolean;
146
+ /**
147
+ * Privacy mode - when true, input/output content is not captured
148
+ * @default false
149
+ */
150
+ privacyMode?: boolean;
151
+ /**
152
+ * Maximum size for input/output content in bytes
153
+ * @default 1048576 (1MB)
154
+ */
155
+ maxContentSize?: number;
84
156
  /**
85
157
  * Called on successful AI calls
86
158
  */
@@ -99,11 +171,24 @@ interface TrackOptions {
99
171
  * If not provided, uses the transport from DatabuddyLLM instance
100
172
  */
101
173
  transport?: Transport;
174
+ /**
175
+ * Trace ID to link related calls together
176
+ */
177
+ traceId?: string;
178
+ /**
179
+ * Client ID for this specific call (overrides instance-level clientId)
180
+ */
181
+ clientId?: string;
102
182
  /**
103
183
  * Whether to compute costs using TokenLens
104
184
  * @default true
105
185
  */
106
186
  computeCosts?: boolean;
187
+ /**
188
+ * Privacy mode - when true, input/output content is not captured
189
+ * @default false
190
+ */
191
+ privacyMode?: boolean;
107
192
  /**
108
193
  * Called on successful AI calls
109
194
  */
@@ -115,48 +200,40 @@ interface TrackOptions {
115
200
  }
116
201
 
117
202
  /**
118
- * Create a Databuddy LLM tracking instance
203
+ * Create an HTTP transport that sends logs to an API endpoint
119
204
  *
120
205
  * @example
121
206
  * ```ts
122
- * import { databuddyLLM } from "@databuddy/sdk/ai/vercel";
123
- *
124
- * // Use default endpoint (basket.databuddy.cc/llm)
125
- * const { track } = databuddyLLM({
126
- * apiKey: "your-api-key",
127
- * });
128
- *
129
- * // Or override with custom endpoint
130
- * const { track } = databuddyLLM({
131
- * apiUrl: "https://custom.example.com/llm",
132
- * apiKey: "your-api-key",
133
- * });
134
- *
135
- * // Track a model
136
- * const model = track(openai("gpt-4"));
207
+ * import { databuddyLLM, httpTransport } from "@databuddy/sdk/ai/vercel";
137
208
  *
138
- * // Or with custom transport
139
209
  * const { track } = databuddyLLM({
140
- * transport: async (call) => console.log(call),
210
+ * transport: httpTransport("https://api.example.com/ai-logs", "client-id", "api-key"),
141
211
  * });
142
212
  * ```
143
213
  */
144
- declare const databuddyLLM: (options?: DatabuddyLLMOptions) => {
145
- track: (model: LanguageModelV2, trackOptions?: TrackOptions) => LanguageModelV2;
146
- };
214
+ declare const httpTransport: (url: string, clientId?: string, apiKey?: string) => Transport;
215
+
147
216
  /**
148
- * Create an HTTP transport that sends logs to an API endpoint
217
+ * Vercel AI SDK middleware for Databuddy
218
+ *
219
+ * Inspired by and adapted from PostHog's AI SDK implementation:
220
+ * https://github.com/PostHog/posthog-js/tree/main/packages/ai
221
+ */
222
+
223
+ /**
224
+ * Create a Databuddy LLM tracking instance
149
225
  *
150
226
  * @example
151
227
  * ```ts
152
- * import { databuddyLLM, httpTransport } from "@databuddy/sdk/ai/vercel";
228
+ * import { databuddyLLM } from "@databuddy/sdk/ai/vercel";
153
229
  *
154
- * const { track } = databuddyLLM({
155
- * transport: httpTransport("https://api.example.com/ai-logs", "client-id", "api-key"),
156
- * });
230
+ * const { track } = databuddyLLM({ apiKey: "your-api-key" });
231
+ * const model = track(openai("gpt-4"));
157
232
  * ```
158
233
  */
159
- declare const httpTransport: (url: string, clientId?: string, apiKey?: string) => Transport;
234
+ declare const databuddyLLM: (options?: DatabuddyLLMOptions) => {
235
+ track: <T extends LanguageModel>(model: T, trackOptions?: TrackOptions) => T;
236
+ };
160
237
 
161
238
  export { databuddyLLM, httpTransport };
162
239
  export type { AICall, AIError, DatabuddyLLMOptions, TokenCost, TokenUsage, ToolCallInfo, TrackOptions, Transport };
@@ -1,5 +1,58 @@
1
- import { LanguageModelV2 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, LanguageModelV3 } from '@ai-sdk/provider';
2
2
 
3
+ /**
4
+ * Type guards and union types for Vercel AI SDK V2/V3 support
5
+ *
6
+ * Adapted from PostHog's AI SDK implementation:
7
+ * https://github.com/PostHog/posthog-js/tree/main/packages/ai
8
+ */
9
+
10
+ type LanguageModel = LanguageModelV2 | LanguageModelV3;
11
+
12
+ /**
13
+ * Content types for input/output arrays
14
+ */
15
+ type MessageContent = {
16
+ type: "text";
17
+ text: string;
18
+ } | {
19
+ type: "reasoning";
20
+ text: string;
21
+ } | {
22
+ type: "tool-call";
23
+ id: string;
24
+ function: {
25
+ name: string;
26
+ arguments: string;
27
+ };
28
+ } | {
29
+ type: "tool-result";
30
+ toolCallId: string;
31
+ toolName: string;
32
+ output: unknown;
33
+ isError?: boolean;
34
+ } | {
35
+ type: "file";
36
+ file: string;
37
+ mediaType: string;
38
+ } | {
39
+ type: "image";
40
+ image: string;
41
+ mediaType: string;
42
+ } | {
43
+ type: "source";
44
+ sourceType: string;
45
+ id: string;
46
+ url: string;
47
+ title: string;
48
+ };
49
+ /**
50
+ * Message format for input/output
51
+ */
52
+ interface AIMessage {
53
+ role: string;
54
+ content: string | MessageContent[];
55
+ }
3
56
  /**
4
57
  * Token usage from AI model calls
5
58
  */
@@ -8,6 +61,9 @@ interface TokenUsage {
8
61
  outputTokens: number;
9
62
  totalTokens: number;
10
63
  cachedInputTokens?: number;
64
+ cacheCreationInputTokens?: number;
65
+ reasoningTokens?: number;
66
+ webSearchCount?: number;
11
67
  }
12
68
  /**
13
69
  * Cost breakdown from TokenLens
@@ -24,6 +80,7 @@ interface ToolCallInfo {
24
80
  toolCallCount: number;
25
81
  toolResultCount: number;
26
82
  toolCallNames: string[];
83
+ availableTools?: string[];
27
84
  }
28
85
  /**
29
86
  * Error information for failed AI calls
@@ -38,15 +95,20 @@ interface AIError {
38
95
  */
39
96
  interface AICall {
40
97
  timestamp: Date;
98
+ traceId: string;
41
99
  type: "generate" | "stream";
42
100
  model: string;
43
101
  provider: string;
44
102
  finishReason?: string;
103
+ input: AIMessage[];
104
+ output: AIMessage[];
45
105
  usage: TokenUsage;
46
106
  cost: TokenCost;
47
107
  tools: ToolCallInfo;
48
108
  error?: AIError;
49
109
  durationMs: number;
110
+ httpStatus?: number;
111
+ params?: Record<string, unknown>;
50
112
  }
51
113
  /**
52
114
  * Transport function for sending log entries
@@ -81,6 +143,16 @@ interface DatabuddyLLMOptions {
81
143
  * @default true
82
144
  */
83
145
  computeCosts?: boolean;
146
+ /**
147
+ * Privacy mode - when true, input/output content is not captured
148
+ * @default false
149
+ */
150
+ privacyMode?: boolean;
151
+ /**
152
+ * Maximum size for input/output content in bytes
153
+ * @default 1048576 (1MB)
154
+ */
155
+ maxContentSize?: number;
84
156
  /**
85
157
  * Called on successful AI calls
86
158
  */
@@ -99,11 +171,24 @@ interface TrackOptions {
99
171
  * If not provided, uses the transport from DatabuddyLLM instance
100
172
  */
101
173
  transport?: Transport;
174
+ /**
175
+ * Trace ID to link related calls together
176
+ */
177
+ traceId?: string;
178
+ /**
179
+ * Client ID for this specific call (overrides instance-level clientId)
180
+ */
181
+ clientId?: string;
102
182
  /**
103
183
  * Whether to compute costs using TokenLens
104
184
  * @default true
105
185
  */
106
186
  computeCosts?: boolean;
187
+ /**
188
+ * Privacy mode - when true, input/output content is not captured
189
+ * @default false
190
+ */
191
+ privacyMode?: boolean;
107
192
  /**
108
193
  * Called on successful AI calls
109
194
  */
@@ -115,48 +200,40 @@ interface TrackOptions {
115
200
  }
116
201
 
117
202
  /**
118
- * Create a Databuddy LLM tracking instance
203
+ * Create an HTTP transport that sends logs to an API endpoint
119
204
  *
120
205
  * @example
121
206
  * ```ts
122
- * import { databuddyLLM } from "@databuddy/sdk/ai/vercel";
123
- *
124
- * // Use default endpoint (basket.databuddy.cc/llm)
125
- * const { track } = databuddyLLM({
126
- * apiKey: "your-api-key",
127
- * });
128
- *
129
- * // Or override with custom endpoint
130
- * const { track } = databuddyLLM({
131
- * apiUrl: "https://custom.example.com/llm",
132
- * apiKey: "your-api-key",
133
- * });
134
- *
135
- * // Track a model
136
- * const model = track(openai("gpt-4"));
207
+ * import { databuddyLLM, httpTransport } from "@databuddy/sdk/ai/vercel";
137
208
  *
138
- * // Or with custom transport
139
209
  * const { track } = databuddyLLM({
140
- * transport: async (call) => console.log(call),
210
+ * transport: httpTransport("https://api.example.com/ai-logs", "client-id", "api-key"),
141
211
  * });
142
212
  * ```
143
213
  */
144
- declare const databuddyLLM: (options?: DatabuddyLLMOptions) => {
145
- track: (model: LanguageModelV2, trackOptions?: TrackOptions) => LanguageModelV2;
146
- };
214
+ declare const httpTransport: (url: string, clientId?: string, apiKey?: string) => Transport;
215
+
147
216
  /**
148
- * Create an HTTP transport that sends logs to an API endpoint
217
+ * Vercel AI SDK middleware for Databuddy
218
+ *
219
+ * Inspired by and adapted from PostHog's AI SDK implementation:
220
+ * https://github.com/PostHog/posthog-js/tree/main/packages/ai
221
+ */
222
+
223
+ /**
224
+ * Create a Databuddy LLM tracking instance
149
225
  *
150
226
  * @example
151
227
  * ```ts
152
- * import { databuddyLLM, httpTransport } from "@databuddy/sdk/ai/vercel";
228
+ * import { databuddyLLM } from "@databuddy/sdk/ai/vercel";
153
229
  *
154
- * const { track } = databuddyLLM({
155
- * transport: httpTransport("https://api.example.com/ai-logs", "client-id", "api-key"),
156
- * });
230
+ * const { track } = databuddyLLM({ apiKey: "your-api-key" });
231
+ * const model = track(openai("gpt-4"));
157
232
  * ```
158
233
  */
159
- declare const httpTransport: (url: string, clientId?: string, apiKey?: string) => Transport;
234
+ declare const databuddyLLM: (options?: DatabuddyLLMOptions) => {
235
+ track: <T extends LanguageModel>(model: T, trackOptions?: TrackOptions) => T;
236
+ };
160
237
 
161
238
  export { databuddyLLM, httpTransport };
162
239
  export type { AICall, AIError, DatabuddyLLMOptions, TokenCost, TokenUsage, ToolCallInfo, TrackOptions, Transport };
@@ -1,19 +1,3 @@
1
- import { wrapLanguageModel } from 'ai';
2
-
3
- const extractToolInfo = (content) => {
4
- const toolCalls = content.filter((part) => part.type === "tool-call");
5
- const toolResults = content.filter((part) => part.type === "tool-result");
6
- const toolCallNames = [
7
- ...new Set(
8
- toolCalls.map((c) => c.toolName).filter((name) => Boolean(name))
9
- )
10
- ];
11
- return {
12
- toolCallCount: toolCalls.length,
13
- toolResultCount: toolResults.length,
14
- toolCallNames
15
- };
16
- };
17
1
  const computeCosts = async (modelId, provider, usage) => {
18
2
  try {
19
3
  const { computeCostUSD } = await import('tokenlens');
@@ -34,6 +18,337 @@ const computeCosts = async (modelId, provider, usage) => {
34
18
  return {};
35
19
  }
36
20
  };
21
+
22
+ const extractTokenCount = (value) => {
23
+ if (typeof value === "number") {
24
+ return value;
25
+ }
26
+ if (value && typeof value === "object" && "total" in value && typeof value.total === "number") {
27
+ return value.total;
28
+ }
29
+ return void 0;
30
+ };
31
+ const extractReasoningTokens = (usage) => {
32
+ if ("reasoningTokens" in usage && typeof usage.reasoningTokens === "number") {
33
+ return usage.reasoningTokens;
34
+ }
35
+ if ("outputTokens" in usage && usage.outputTokens && typeof usage.outputTokens === "object" && "reasoning" in usage.outputTokens && typeof usage.outputTokens.reasoning === "number") {
36
+ return usage.outputTokens.reasoning;
37
+ }
38
+ return void 0;
39
+ };
40
+ const extractCacheReadTokens = (usage) => {
41
+ if ("cachedInputTokens" in usage && typeof usage.cachedInputTokens === "number") {
42
+ return usage.cachedInputTokens;
43
+ }
44
+ if ("inputTokens" in usage && usage.inputTokens && typeof usage.inputTokens === "object" && "cacheRead" in usage.inputTokens && typeof usage.inputTokens.cacheRead === "number") {
45
+ return usage.inputTokens.cacheRead;
46
+ }
47
+ return void 0;
48
+ };
49
+ const extractCacheCreationTokens = (providerMetadata) => {
50
+ if (providerMetadata && typeof providerMetadata === "object" && "anthropic" in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === "object" && "cacheCreationInputTokens" in providerMetadata.anthropic && typeof providerMetadata.anthropic.cacheCreationInputTokens === "number") {
51
+ return providerMetadata.anthropic.cacheCreationInputTokens;
52
+ }
53
+ return void 0;
54
+ };
55
+ const calculateWebSearchCount = (result) => {
56
+ if (!result || typeof result !== "object") {
57
+ return 0;
58
+ }
59
+ if (result.usage && typeof result.usage === "object" && result.usage !== null && "search_context_size" in result.usage && result.usage.search_context_size) {
60
+ return 1;
61
+ }
62
+ return 0;
63
+ };
64
+ const extractWebSearchCount = (providerMetadata, usage) => {
65
+ if (providerMetadata && typeof providerMetadata === "object" && "anthropic" in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === "object" && "server_tool_use" in providerMetadata.anthropic) {
66
+ const serverToolUse = providerMetadata.anthropic.server_tool_use;
67
+ if (serverToolUse && typeof serverToolUse === "object" && "web_search_requests" in serverToolUse && typeof serverToolUse.web_search_requests === "number") {
68
+ return serverToolUse.web_search_requests;
69
+ }
70
+ }
71
+ return calculateWebSearchCount({ usage});
72
+ };
73
+ const extractToolInfo = (content, params) => {
74
+ const toolCalls = content.filter((part) => part.type === "tool-call");
75
+ const toolResults = content.filter((part) => part.type === "tool-result");
76
+ const toolCallNames = [
77
+ ...new Set(
78
+ toolCalls.map((c) => c.toolName).filter((name) => Boolean(name))
79
+ )
80
+ ];
81
+ const availableTools = params?.tools?.map((t) => t.name) ?? [];
82
+ return {
83
+ toolCallCount: toolCalls.length,
84
+ toolResultCount: toolResults.length,
85
+ toolCallNames,
86
+ availableTools: availableTools.length > 0 ? availableTools : void 0
87
+ };
88
+ };
89
+ const extractAdditionalTokenValues = (providerMetadata) => {
90
+ if (providerMetadata && typeof providerMetadata === "object" && "anthropic" in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === "object" && "cacheCreationInputTokens" in providerMetadata.anthropic) {
91
+ return {
92
+ cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
93
+ };
94
+ }
95
+ return {};
96
+ };
97
+ const extractUsage = (usage, providerMetadata) => {
98
+ const usageObj = usage;
99
+ const inputTokens = extractTokenCount(usage.inputTokens) ?? 0;
100
+ const outputTokens = extractTokenCount(usage.outputTokens) ?? 0;
101
+ const totalTokens = usage.totalTokens ?? inputTokens + outputTokens;
102
+ const cachedInputTokens = extractCacheReadTokens(usageObj);
103
+ const cacheCreationInputTokens = extractCacheCreationTokens(providerMetadata);
104
+ const reasoningTokens = extractReasoningTokens(usageObj);
105
+ const webSearchCount = extractWebSearchCount(providerMetadata, usage);
106
+ return {
107
+ inputTokens,
108
+ outputTokens,
109
+ totalTokens,
110
+ cachedInputTokens,
111
+ cacheCreationInputTokens,
112
+ reasoningTokens,
113
+ webSearchCount
114
+ };
115
+ };
116
+ const adjustAnthropicV3CacheTokens = (model, provider, usage) => {
117
+ if (model.specificationVersion === "v3" && provider.toLowerCase().includes("anthropic")) {
118
+ const cacheReadTokens = usage.cachedInputTokens ?? 0;
119
+ const cacheWriteTokens = usage.cacheCreationInputTokens ?? 0;
120
+ const cacheTokens = cacheReadTokens + cacheWriteTokens;
121
+ if (usage.inputTokens && cacheTokens > 0) {
122
+ usage.inputTokens = Math.max(usage.inputTokens - cacheTokens, 0);
123
+ usage.totalTokens = usage.inputTokens + usage.outputTokens;
124
+ }
125
+ }
126
+ };
127
+
128
+ const MAX_TEXT_LENGTH = 1e5;
129
+ const DATA_URL_REGEX = /^data:([^;,]+)/;
130
+ const WHITESPACE_REGEX = /\s/;
131
+ const BASE64_REGEX = /^[A-Za-z0-9+/=]+$/;
132
+ const truncate = (text, maxLength = MAX_TEXT_LENGTH) => {
133
+ if (text.length <= maxLength) {
134
+ return text;
135
+ }
136
+ return `${text.slice(0, maxLength)}... [truncated ${text.length - maxLength} chars]`;
137
+ };
138
+ const redactBase64DataUrl = (data) => {
139
+ if (data.startsWith("data:")) {
140
+ const match = data.match(DATA_URL_REGEX);
141
+ const mediaType = match?.[1] ?? "unknown";
142
+ return `[${mediaType} data URL redacted]`;
143
+ }
144
+ if (data.length > 1e3 && !WHITESPACE_REGEX.test(data) && BASE64_REGEX.test(data)) {
145
+ return `[base64 data redacted - ${data.length} chars]`;
146
+ }
147
+ return data;
148
+ };
149
+ const toContentString = (content) => {
150
+ if (typeof content === "string") {
151
+ return content;
152
+ }
153
+ if (Array.isArray(content)) {
154
+ return content.map((c) => {
155
+ if (typeof c === "string") {
156
+ return c;
157
+ }
158
+ if (c && typeof c === "object" && "text" in c) {
159
+ return c.text;
160
+ }
161
+ return "";
162
+ }).join("");
163
+ }
164
+ return "";
165
+ };
166
+ const generateTraceId = () => {
167
+ return `${Date.now().toString(36)}-${Math.random().toString(36).slice(2, 11)}`;
168
+ };
169
+
170
+ const mapPromptToMessages = (prompt, maxSize) => {
171
+ const messages = prompt.map((message) => {
172
+ if (message.role === "system") {
173
+ return {
174
+ role: "system",
175
+ content: truncate(toContentString(message.content))
176
+ };
177
+ }
178
+ if (Array.isArray(message.content)) {
179
+ const content = message.content.map((c) => {
180
+ if (c.type === "text") {
181
+ return { type: "text", text: truncate(c.text) };
182
+ }
183
+ if (c.type === "file") {
184
+ const data = c.data;
185
+ const fileData = data instanceof URL ? data.toString() : typeof data === "string" ? redactBase64DataUrl(data) : "[binary file]";
186
+ return { type: "file", file: fileData, mediaType: c.mediaType };
187
+ }
188
+ if (c.type === "image") {
189
+ const data = c.image;
190
+ const imageData = data instanceof URL ? data.toString() : typeof data === "string" ? redactBase64DataUrl(data) : "[binary image]";
191
+ return {
192
+ type: "image",
193
+ image: imageData,
194
+ mediaType: c.mimeType ?? "image/unknown"
195
+ };
196
+ }
197
+ if (c.type === "tool-call") {
198
+ const input = c.input;
199
+ return {
200
+ type: "tool-call",
201
+ id: c.toolCallId,
202
+ function: {
203
+ name: c.toolName,
204
+ arguments: truncate(
205
+ typeof input === "string" ? input : JSON.stringify(input ?? {})
206
+ )
207
+ }
208
+ };
209
+ }
210
+ if (c.type === "tool-result") {
211
+ return {
212
+ type: "tool-result",
213
+ toolCallId: c.toolCallId,
214
+ toolName: c.toolName,
215
+ output: c.output,
216
+ isError: false
217
+ };
218
+ }
219
+ return { type: "text", text: "" };
220
+ });
221
+ return { role: message.role, content };
222
+ }
223
+ return {
224
+ role: message.role,
225
+ content: truncate(toContentString(message.content))
226
+ };
227
+ });
228
+ try {
229
+ let serialized = JSON.stringify(messages);
230
+ let removedCount = 0;
231
+ const initialSize = messages.length;
232
+ for (let i = 0; i < initialSize && Buffer.byteLength(serialized, "utf8") > maxSize; i++) {
233
+ messages.shift();
234
+ removedCount++;
235
+ serialized = JSON.stringify(messages);
236
+ }
237
+ if (removedCount > 0) {
238
+ messages.unshift({
239
+ role: "system",
240
+ content: `[${removedCount} message${removedCount === 1 ? "" : "s"} removed due to size limit]`
241
+ });
242
+ }
243
+ } catch (error) {
244
+ console.error("Error stringifying inputs", error);
245
+ return [
246
+ {
247
+ role: "system",
248
+ content: "An error occurred while processing your request. Please try again."
249
+ }
250
+ ];
251
+ }
252
+ return messages;
253
+ };
254
+ const mapResultToMessages = (content) => {
255
+ const mappedContent = content.map((item) => {
256
+ if (item.type === "text") {
257
+ return { type: "text", text: truncate(item.text ?? "") };
258
+ }
259
+ if (item.type === "reasoning") {
260
+ return { type: "reasoning", text: truncate(item.text ?? "") };
261
+ }
262
+ if (item.type === "tool-call") {
263
+ const toolItem = item;
264
+ const rawArgs = toolItem.args ?? toolItem.arguments ?? toolItem.input;
265
+ const argsValue = typeof rawArgs === "string" ? rawArgs : JSON.stringify(rawArgs ?? {});
266
+ return {
267
+ type: "tool-call",
268
+ id: toolItem.toolCallId ?? "",
269
+ function: {
270
+ name: toolItem.toolName ?? "",
271
+ arguments: truncate(argsValue)
272
+ }
273
+ };
274
+ }
275
+ if (item.type === "file") {
276
+ let fileData;
277
+ if (item.data instanceof URL) {
278
+ fileData = item.data.toString();
279
+ } else if (typeof item.data === "string") {
280
+ fileData = redactBase64DataUrl(item.data);
281
+ if (fileData === item.data && item.data.length > 1e3) {
282
+ fileData = `[${item.mediaType ?? "unknown"} file - ${item.data.length} bytes]`;
283
+ }
284
+ } else {
285
+ fileData = "[binary file]";
286
+ }
287
+ return {
288
+ type: "file",
289
+ file: fileData,
290
+ mediaType: item.mediaType ?? "application/octet-stream"
291
+ };
292
+ }
293
+ if (item.type === "source") {
294
+ return {
295
+ type: "source",
296
+ sourceType: item.sourceType ?? "unknown",
297
+ id: item.id ?? "",
298
+ url: item.url ?? "",
299
+ title: item.title ?? ""
300
+ };
301
+ }
302
+ return { type: "text", text: truncate(JSON.stringify(item)) };
303
+ });
304
+ if (mappedContent.length === 0) {
305
+ return [];
306
+ }
307
+ return [
308
+ {
309
+ role: "assistant",
310
+ content: mappedContent.length === 1 && mappedContent[0].type === "text" ? mappedContent[0].text : mappedContent
311
+ }
312
+ ];
313
+ };
314
+ const buildStreamOutput = (generatedText, reasoningText, toolCalls, sources = []) => {
315
+ const outputContent = [];
316
+ if (reasoningText) {
317
+ outputContent.push({ type: "reasoning", text: truncate(reasoningText) });
318
+ }
319
+ if (generatedText) {
320
+ outputContent.push({ type: "text", text: truncate(generatedText) });
321
+ }
322
+ for (const toolCall of toolCalls.values()) {
323
+ outputContent.push({
324
+ type: "tool-call",
325
+ id: toolCall.toolCallId,
326
+ function: {
327
+ name: toolCall.toolName,
328
+ arguments: truncate(toolCall.input)
329
+ }
330
+ });
331
+ }
332
+ for (const source of sources) {
333
+ outputContent.push({
334
+ type: "source",
335
+ sourceType: source.sourceType,
336
+ id: source.id,
337
+ url: source.url,
338
+ title: source.title
339
+ });
340
+ }
341
+ if (outputContent.length === 0) {
342
+ return [];
343
+ }
344
+ return [
345
+ {
346
+ role: "assistant",
347
+ content: outputContent.length === 1 && outputContent[0].type === "text" ? outputContent[0].text : outputContent
348
+ }
349
+ ];
350
+ };
351
+
37
352
  const createDefaultTransport = (apiUrl, clientId, apiKey) => {
38
353
  return async (call) => {
39
354
  const headers = {
@@ -57,160 +372,6 @@ const createDefaultTransport = (apiUrl, clientId, apiKey) => {
57
372
  }
58
373
  };
59
374
  };
60
- const createMiddleware = (transport, options = {}) => {
61
- const { computeCosts: shouldComputeCosts = true } = options;
62
- return {
63
- wrapGenerate: async ({ doGenerate, model }) => {
64
- const startTime = Date.now();
65
- try {
66
- const result = await doGenerate();
67
- const durationMs = Date.now() - startTime;
68
- const tools = extractToolInfo(
69
- result.content
70
- );
71
- const inputTokens = result.usage.inputTokens ?? 0;
72
- const outputTokens = result.usage.outputTokens ?? 0;
73
- const totalTokens = result.usage.totalTokens ?? inputTokens + outputTokens;
74
- const cachedInputTokens = result.usage.cachedInputTokens;
75
- const cost = shouldComputeCosts && (inputTokens > 0 || outputTokens > 0) ? await computeCosts(model.modelId, model.provider, {
76
- inputTokens,
77
- outputTokens
78
- }) : {};
79
- const call = {
80
- timestamp: /* @__PURE__ */ new Date(),
81
- type: "generate",
82
- model: model.modelId,
83
- provider: model.provider,
84
- finishReason: result.finishReason,
85
- usage: {
86
- inputTokens,
87
- outputTokens,
88
- totalTokens,
89
- cachedInputTokens
90
- },
91
- cost,
92
- tools,
93
- durationMs
94
- };
95
- const effectiveTransport = options.transport ?? transport;
96
- const transportResult = effectiveTransport(call);
97
- if (transportResult instanceof Promise) {
98
- transportResult.catch(() => {
99
- });
100
- }
101
- options.onSuccess?.(call);
102
- return result;
103
- } catch (error) {
104
- const durationMs = Date.now() - startTime;
105
- const call = {
106
- timestamp: /* @__PURE__ */ new Date(),
107
- type: "generate",
108
- model: model.modelId,
109
- provider: model.provider,
110
- usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
111
- cost: {},
112
- tools: { toolCallCount: 0, toolResultCount: 0, toolCallNames: [] },
113
- durationMs,
114
- error: {
115
- name: error instanceof Error ? error.name : "UnknownError",
116
- message: error instanceof Error ? error.message : String(error),
117
- stack: error instanceof Error ? error.stack : void 0
118
- }
119
- };
120
- const effectiveTransport = options.transport ?? transport;
121
- const transportResult = effectiveTransport(call);
122
- if (transportResult instanceof Promise) {
123
- transportResult.catch(() => {
124
- });
125
- }
126
- options.onError?.(call);
127
- throw error;
128
- }
129
- },
130
- wrapStream: async ({ doStream, model }) => {
131
- const startTime = Date.now();
132
- try {
133
- const { stream, ...rest } = await doStream();
134
- const durationMs = Date.now() - startTime;
135
- const call = {
136
- timestamp: /* @__PURE__ */ new Date(),
137
- type: "stream",
138
- model: model.modelId,
139
- provider: model.provider,
140
- usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
141
- cost: {},
142
- tools: { toolCallCount: 0, toolResultCount: 0, toolCallNames: [] },
143
- durationMs
144
- };
145
- const effectiveTransport = options.transport ?? transport;
146
- const transportResult = effectiveTransport(call);
147
- if (transportResult instanceof Promise) {
148
- transportResult.catch(() => {
149
- });
150
- }
151
- options.onSuccess?.(call);
152
- return { stream, ...rest };
153
- } catch (error) {
154
- const durationMs = Date.now() - startTime;
155
- const call = {
156
- timestamp: /* @__PURE__ */ new Date(),
157
- type: "stream",
158
- model: model.modelId,
159
- provider: model.provider,
160
- usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
161
- cost: {},
162
- tools: { toolCallCount: 0, toolResultCount: 0, toolCallNames: [] },
163
- durationMs,
164
- error: {
165
- name: error instanceof Error ? error.name : "UnknownError",
166
- message: error instanceof Error ? error.message : String(error),
167
- stack: error instanceof Error ? error.stack : void 0
168
- }
169
- };
170
- const effectiveTransport = options.transport ?? transport;
171
- const transportResult = effectiveTransport(call);
172
- if (transportResult instanceof Promise) {
173
- transportResult.catch(() => {
174
- });
175
- }
176
- options.onError?.(call);
177
- throw error;
178
- }
179
- }
180
- };
181
- };
182
- const databuddyLLM = (options = {}) => {
183
- const {
184
- apiUrl,
185
- apiKey,
186
- clientId,
187
- transport: customTransport,
188
- computeCosts: defaultComputeCosts = true,
189
- onSuccess: defaultOnSuccess,
190
- onError: defaultOnError
191
- } = options;
192
- let transport;
193
- if (customTransport) {
194
- transport = customTransport;
195
- } else {
196
- const endpoint = apiUrl ?? process.env.DATABUDDY_API_URL ?? "https://basket.databuddy.cc/llm";
197
- const client = clientId ?? process.env.DATABUDDY_CLIENT_ID;
198
- const key = apiKey ?? process.env.DATABUDDY_API_KEY;
199
- transport = createDefaultTransport(endpoint, client, key);
200
- }
201
- const track = (model, trackOptions = {}) => {
202
- return wrapLanguageModel({
203
- model,
204
- middleware: createMiddleware(transport, {
205
- computeCosts: trackOptions.computeCosts ?? defaultComputeCosts,
206
- onSuccess: trackOptions.onSuccess ?? defaultOnSuccess,
207
- onError: trackOptions.onError ?? defaultOnError,
208
- transport: trackOptions.transport
209
- })
210
- });
211
- };
212
- return { track };
213
- };
214
375
  const httpTransport = (url, clientId, apiKey) => {
215
376
  return async (call) => {
216
377
  const headers = {
@@ -235,4 +396,328 @@ const httpTransport = (url, clientId, apiKey) => {
235
396
  };
236
397
  };
237
398
 
399
+ const MAX_CONTENT_SIZE = 1048576;
400
+ const extractProvider = (model) => {
401
+ return model.provider.toLowerCase().split(".")[0];
402
+ };
403
+ const createErrorCall = (traceId, type, model, provider, input, durationMs, error) => {
404
+ return {
405
+ timestamp: /* @__PURE__ */ new Date(),
406
+ traceId,
407
+ type,
408
+ model,
409
+ provider,
410
+ input,
411
+ output: [],
412
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
413
+ cost: {},
414
+ tools: { toolCallCount: 0, toolResultCount: 0, toolCallNames: [] },
415
+ durationMs,
416
+ error: {
417
+ name: error instanceof Error ? error.name : "UnknownError",
418
+ message: error instanceof Error ? error.message : String(error),
419
+ stack: error instanceof Error ? error.stack : void 0
420
+ }
421
+ };
422
+ };
423
+ const sendCall = (call, transport, onSuccess, onError) => {
424
+ Promise.resolve(transport(call)).catch((error) => {
425
+ console.error("[databuddy] Failed to send AI log:", error);
426
+ });
427
+ call.error ? onError?.(call) : onSuccess?.(call);
428
+ };
429
+ const databuddyLLM = (options = {}) => {
430
+ const {
431
+ apiUrl,
432
+ apiKey,
433
+ clientId,
434
+ transport: customTransport,
435
+ computeCosts: defaultComputeCosts = true,
436
+ privacyMode: defaultPrivacyMode = false,
437
+ maxContentSize = MAX_CONTENT_SIZE,
438
+ onSuccess: defaultOnSuccess,
439
+ onError: defaultOnError
440
+ } = options;
441
+ const transport = customTransport ? customTransport : createDefaultTransport(
442
+ apiUrl ?? process.env.DATABUDDY_API_URL ?? "https://basket.databuddy.cc/llm",
443
+ clientId ?? process.env.DATABUDDY_CLIENT_ID,
444
+ apiKey ?? process.env.DATABUDDY_API_KEY
445
+ );
446
+ const track = (model, trackOptions = {}) => {
447
+ const getEffectiveTransport = () => {
448
+ if (trackOptions.transport) {
449
+ return trackOptions.transport;
450
+ }
451
+ if (trackOptions.clientId && trackOptions.clientId !== clientId) {
452
+ return createDefaultTransport(
453
+ apiUrl ?? process.env.DATABUDDY_API_URL ?? "https://basket.databuddy.cc/llm",
454
+ trackOptions.clientId,
455
+ apiKey ?? process.env.DATABUDDY_API_KEY
456
+ );
457
+ }
458
+ return transport;
459
+ };
460
+ return Object.create(model, {
461
+ doGenerate: {
462
+ value: async (params) => {
463
+ const startTime = Date.now();
464
+ const traceId = trackOptions.traceId ?? generateTraceId();
465
+ const effectiveTransport = getEffectiveTransport();
466
+ try {
467
+ const result = await model.doGenerate(params);
468
+ const durationMs = Date.now() - startTime;
469
+ const tools = extractToolInfo(
470
+ result.content,
471
+ params
472
+ );
473
+ const provider = extractProvider(model);
474
+ const usage = extractUsage(result.usage, result.providerMetadata);
475
+ adjustAnthropicV3CacheTokens(model, provider, usage);
476
+ const cost = (trackOptions.computeCosts ?? defaultComputeCosts) && (usage.inputTokens > 0 || usage.outputTokens > 0) ? await computeCosts(model.modelId, model.provider, {
477
+ inputTokens: usage.inputTokens,
478
+ outputTokens: usage.outputTokens
479
+ }) : {};
480
+ const input = trackOptions.privacyMode ?? defaultPrivacyMode ? [] : mapPromptToMessages(
481
+ params.prompt,
482
+ maxContentSize
483
+ );
484
+ const output = trackOptions.privacyMode ?? defaultPrivacyMode ? [] : mapResultToMessages(
485
+ result.content
486
+ );
487
+ const rawFinishReason = result.finishReason;
488
+ let finishReason;
489
+ if (typeof rawFinishReason === "string") {
490
+ finishReason = rawFinishReason;
491
+ } else if (rawFinishReason && typeof rawFinishReason === "object") {
492
+ if ("unified" in rawFinishReason) {
493
+ finishReason = rawFinishReason.unified;
494
+ } else if ("type" in rawFinishReason) {
495
+ finishReason = rawFinishReason.type;
496
+ }
497
+ }
498
+ const call = {
499
+ timestamp: /* @__PURE__ */ new Date(),
500
+ traceId,
501
+ type: "generate",
502
+ model: result.response?.modelId ?? model.modelId,
503
+ provider,
504
+ finishReason,
505
+ input,
506
+ output,
507
+ usage,
508
+ cost,
509
+ tools,
510
+ durationMs,
511
+ httpStatus: 200
512
+ };
513
+ sendCall(
514
+ call,
515
+ effectiveTransport,
516
+ trackOptions.onSuccess ?? defaultOnSuccess,
517
+ trackOptions.onError ?? defaultOnError
518
+ );
519
+ return result;
520
+ } catch (error) {
521
+ const durationMs = Date.now() - startTime;
522
+ const input = trackOptions.privacyMode ?? defaultPrivacyMode ? [] : mapPromptToMessages(
523
+ params.prompt,
524
+ maxContentSize
525
+ );
526
+ const call = createErrorCall(
527
+ traceId,
528
+ "generate",
529
+ model.modelId,
530
+ extractProvider(model),
531
+ input,
532
+ durationMs,
533
+ error
534
+ );
535
+ sendCall(
536
+ call,
537
+ effectiveTransport,
538
+ trackOptions.onSuccess ?? defaultOnSuccess,
539
+ trackOptions.onError ?? defaultOnError
540
+ );
541
+ throw error;
542
+ }
543
+ },
544
+ writable: true,
545
+ configurable: true,
546
+ enumerable: false
547
+ },
548
+ doStream: {
549
+ value: async (params) => {
550
+ const startTime = Date.now();
551
+ const traceId = trackOptions.traceId ?? generateTraceId();
552
+ const effectiveTransport = getEffectiveTransport();
553
+ try {
554
+ const { stream, ...rest } = await model.doStream(params);
555
+ let generatedText = "";
556
+ let reasoningText = "";
557
+ let finishReason;
558
+ let providerMetadata;
559
+ let usage = {};
560
+ const toolCallsInProgress = /* @__PURE__ */ new Map();
561
+ const sources = [];
562
+ const transformStream = new TransformStream({
563
+ transform(chunk, controller) {
564
+ if (chunk.type === "text-delta") {
565
+ generatedText += chunk.delta;
566
+ }
567
+ if (chunk.type === "reasoning-delta") {
568
+ reasoningText += chunk.delta;
569
+ }
570
+ if (chunk.type === "tool-input-start") {
571
+ toolCallsInProgress.set(chunk.id, {
572
+ toolCallId: chunk.id,
573
+ toolName: chunk.toolName,
574
+ input: ""
575
+ });
576
+ }
577
+ if (chunk.type === "tool-input-delta") {
578
+ const toolCall = toolCallsInProgress.get(chunk.id);
579
+ if (toolCall) {
580
+ toolCall.input += chunk.delta;
581
+ }
582
+ }
583
+ if (chunk.type === "tool-call") {
584
+ const input = chunk.input;
585
+ toolCallsInProgress.set(chunk.toolCallId, {
586
+ toolCallId: chunk.toolCallId,
587
+ toolName: chunk.toolName,
588
+ input: typeof input === "string" ? input : JSON.stringify(input ?? {})
589
+ });
590
+ }
591
+ if (chunk.type === "source") {
592
+ const sourceChunk = chunk;
593
+ sources.push({
594
+ sourceType: sourceChunk.sourceType ?? "unknown",
595
+ id: sourceChunk.id ?? "",
596
+ url: sourceChunk.url ?? "",
597
+ title: sourceChunk.title ?? ""
598
+ });
599
+ }
600
+ if (chunk.type === "finish") {
601
+ providerMetadata = chunk.providerMetadata;
602
+ const additionalTokenValues = extractAdditionalTokenValues(
603
+ providerMetadata
604
+ );
605
+ const chunkUsage = chunk.usage ?? {};
606
+ usage = {
607
+ inputTokens: extractTokenCount(chunk.usage?.inputTokens),
608
+ outputTokens: extractTokenCount(chunk.usage?.outputTokens),
609
+ reasoningTokens: extractReasoningTokens(chunkUsage),
610
+ cacheReadInputTokens: extractCacheReadTokens(chunkUsage),
611
+ ...additionalTokenValues
612
+ };
613
+ const rawFinishReason = chunk.finishReason;
614
+ if (typeof rawFinishReason === "string") {
615
+ finishReason = rawFinishReason;
616
+ } else if (rawFinishReason && typeof rawFinishReason === "object") {
617
+ if ("unified" in rawFinishReason) {
618
+ finishReason = rawFinishReason.unified;
619
+ } else if ("type" in rawFinishReason) {
620
+ finishReason = rawFinishReason.type;
621
+ }
622
+ }
623
+ }
624
+ controller.enqueue(chunk);
625
+ },
626
+ flush: async () => {
627
+ const durationMs = Date.now() - startTime;
628
+ const webSearchCount = extractWebSearchCount(
629
+ providerMetadata,
630
+ usage
631
+ );
632
+ const finalUsageObj = {
633
+ ...usage,
634
+ webSearchCount
635
+ };
636
+ const finalUsage = extractUsage(
637
+ finalUsageObj,
638
+ providerMetadata
639
+ );
640
+ const provider = extractProvider(model);
641
+ adjustAnthropicV3CacheTokens(model, provider, finalUsage);
642
+ const output = trackOptions.privacyMode ?? defaultPrivacyMode ? [] : buildStreamOutput(
643
+ generatedText,
644
+ reasoningText,
645
+ toolCallsInProgress,
646
+ sources
647
+ );
648
+ const tools = {
649
+ toolCallCount: toolCallsInProgress.size,
650
+ toolResultCount: 0,
651
+ toolCallNames: [
652
+ ...new Set(
653
+ [...toolCallsInProgress.values()].map((t) => t.toolName)
654
+ )
655
+ ],
656
+ availableTools: params.tools?.map((t) => t.name)
657
+ };
658
+ const cost = (trackOptions.computeCosts ?? defaultComputeCosts) && (finalUsage.inputTokens > 0 || finalUsage.outputTokens > 0) ? await computeCosts(model.modelId, model.provider, {
659
+ inputTokens: finalUsage.inputTokens,
660
+ outputTokens: finalUsage.outputTokens
661
+ }) : {};
662
+ const input = trackOptions.privacyMode ?? defaultPrivacyMode ? [] : mapPromptToMessages(
663
+ params.prompt,
664
+ maxContentSize
665
+ );
666
+ const call = {
667
+ timestamp: /* @__PURE__ */ new Date(),
668
+ traceId,
669
+ type: "stream",
670
+ model: model.modelId,
671
+ provider,
672
+ finishReason,
673
+ input,
674
+ output,
675
+ usage: finalUsage,
676
+ cost,
677
+ tools,
678
+ durationMs,
679
+ httpStatus: 200
680
+ };
681
+ sendCall(
682
+ call,
683
+ effectiveTransport,
684
+ trackOptions.onSuccess ?? defaultOnSuccess,
685
+ trackOptions.onError ?? defaultOnError
686
+ );
687
+ }
688
+ });
689
+ return { stream: stream.pipeThrough(transformStream), ...rest };
690
+ } catch (error) {
691
+ const durationMs = Date.now() - startTime;
692
+ const input = trackOptions.privacyMode ?? defaultPrivacyMode ? [] : mapPromptToMessages(
693
+ params.prompt,
694
+ maxContentSize
695
+ );
696
+ const call = createErrorCall(
697
+ traceId,
698
+ "stream",
699
+ model.modelId,
700
+ extractProvider(model),
701
+ input,
702
+ durationMs,
703
+ error
704
+ );
705
+ sendCall(
706
+ call,
707
+ effectiveTransport,
708
+ trackOptions.onSuccess ?? defaultOnSuccess,
709
+ trackOptions.onError ?? defaultOnError
710
+ );
711
+ throw error;
712
+ }
713
+ },
714
+ writable: true,
715
+ configurable: true,
716
+ enumerable: false
717
+ }
718
+ });
719
+ };
720
+ return { track };
721
+ };
722
+
238
723
  export { databuddyLLM, httpTransport };
@@ -1,4 +1,4 @@
1
- export { c as createScript, i as isScriptInjected } from '../shared/@databuddy/sdk.z1buNzG1.mjs';
1
+ export { c as createScript, i as isScriptInjected } from '../shared/@databuddy/sdk.d6Qh5SSI.mjs';
2
2
 
3
3
  function detectClientId(providedClientId) {
4
4
  if (providedClientId) {
@@ -2,7 +2,7 @@
2
2
 
3
3
  import { detectClientId } from '../core/index.mjs';
4
4
  export { clear, flush, getAnonymousId, getSessionId, getTracker, getTrackingIds, getTrackingParams, isTrackerAvailable, track, trackError } from '../core/index.mjs';
5
- import { i as isScriptInjected, c as createScript } from '../shared/@databuddy/sdk.z1buNzG1.mjs';
5
+ import { i as isScriptInjected, c as createScript } from '../shared/@databuddy/sdk.d6Qh5SSI.mjs';
6
6
  import React, { useRef, useMemo, useEffect, useSyncExternalStore, createContext, useContext } from 'react';
7
7
  import { B as BrowserFlagStorage, C as CoreFlagsManager } from '../shared/@databuddy/sdk.DE24-JrU.mjs';
8
8
  import { l as logger } from '../shared/@databuddy/sdk.CALvx07o.mjs';
@@ -1,4 +1,4 @@
1
- const version = "2.3.23";
1
+ const version = "2.3.3";
2
2
 
3
3
  const INJECTED_SCRIPT_ATTRIBUTE = "data-databuddy-injected";
4
4
  function isScriptInjected() {
@@ -1,5 +1,5 @@
1
1
  import { defineComponent, ref, onMounted, onUnmounted, watch, reactive, watchEffect, computed } from 'vue';
2
- import { i as isScriptInjected, c as createScript } from '../shared/@databuddy/sdk.z1buNzG1.mjs';
2
+ import { i as isScriptInjected, c as createScript } from '../shared/@databuddy/sdk.d6Qh5SSI.mjs';
3
3
  import { B as BrowserFlagStorage, C as CoreFlagsManager } from '../shared/@databuddy/sdk.DE24-JrU.mjs';
4
4
  import '../shared/@databuddy/sdk.CALvx07o.mjs';
5
5
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@databuddy/sdk",
3
- "version": "2.3.24",
3
+ "version": "2.3.25",
4
4
  "description": "Official Databuddy Analytics SDK",
5
5
  "main": "./dist/core/index.mjs",
6
6
  "types": "./dist/core/index.d.ts",
@@ -14,7 +14,7 @@
14
14
  "vscode:prepublish": "bun run build"
15
15
  },
16
16
  "devDependencies": {
17
- "@ai-sdk/provider": "^2.0.0",
17
+ "@ai-sdk/provider": "^3.0.2",
18
18
  "@types/node": "^20.0.0",
19
19
  "@vitejs/plugin-react": "^5.0.0",
20
20
  "ai": "^5.0.51",