@brizz/sdk 0.1.3-rc.1 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -50,6 +50,7 @@ pnpm add @brizz/sdk
50
50
  ## Quick Start
51
51
 
52
52
  First, set up your environment variables:
53
+
53
54
  ```bash
54
55
  BRIZZ_API_KEY=your-api-key
55
56
  BRIZZ_BASE_URL=https://telemetry.brizz.dev # Optional
@@ -60,6 +61,7 @@ BRIZZ_LOG_LEVEL=info # Optional: debug, info, warn, error
60
61
  ### CommonJS Projects
61
62
 
62
63
  **Option 1: Preload Only (Zero Config)**
64
+
63
65
  ```bash
64
66
  node --require @brizz/sdk/preload your-app.js
65
67
  ```
@@ -73,10 +75,12 @@ const { openai } = require('@ai-sdk/openai');
73
75
  generateText({
74
76
  model: openai('gpt-3.5-turbo'),
75
77
  prompt: 'Hello, world!',
76
- }).then(result => console.log(result.text));
78
+ experimental_telemetry: { isEnabled: true },
79
+ }).then((result) => console.log(result.text));
77
80
  ```
78
81
 
79
82
  **Option 2: Preload + Initialize (Custom Config)**
83
+
80
84
  ```bash
81
85
  node --require @brizz/sdk/preload your-app.js
82
86
  ```
@@ -96,6 +100,7 @@ const { generateText } = require('ai');
96
100
  ```
97
101
 
98
102
  **Option 3: Manual Import + Initialize**
103
+
99
104
  ```javascript
100
105
  // Must be first import
101
106
  const { Brizz } = require('@brizz/sdk');
@@ -112,9 +117,11 @@ const { openai } = require('@ai-sdk/openai');
112
117
 
113
118
  ### ESM Projects
114
119
 
115
- > ⚠️ **ESM Requirement**: ESM projects **must** use the `--import @brizz/sdk/loader` flag for instrumentation to work. Manual import without the loader will not instrument AI libraries.
120
+ > ⚠️ **ESM Requirement**: ESM projects **must** use the `--import @brizz/sdk/loader` flag for
121
+ > instrumentation to work. Manual import without the loader will not instrument AI libraries.
116
122
 
117
123
  **Loader + Initialize (Required for ESM)**
124
+
118
125
  ```bash
119
126
  node --import @brizz/sdk/loader your-app.mjs
120
127
  ```
@@ -136,6 +143,7 @@ import { openai } from '@ai-sdk/openai';
136
143
  const result = await generateText({
137
144
  model: openai('gpt-3.5-turbo'),
138
145
  prompt: 'Hello, world!',
146
+ experimental_telemetry: { isEnabled: true },
139
147
  });
140
148
  ```
141
149
 
@@ -145,10 +153,12 @@ const result = await generateText({
145
153
  ## Module System Support
146
154
 
147
155
  ### CommonJS
156
+
148
157
  - **Preload**: `node --require @brizz/sdk/preload app.js` ⭐ (with optional `Brizz.initialize()`)
149
158
  - **Manual**: Require `@brizz/sdk` first, then `Brizz.initialize()`, then AI libraries
150
159
 
151
160
  ### ESM (ES Modules)
161
+
152
162
  - **Loader**: `node --import @brizz/sdk/loader app.mjs` + `Brizz.initialize()` ⭐
153
163
  - **Manual**: Import `@brizz/sdk` first, then `Brizz.initialize()`, then AI libraries
154
164
 
@@ -174,6 +184,7 @@ The SDK automatically instruments:
174
184
  - **OpenAI** - `openai` package
175
185
  - **Anthropic** - `@anthropic-ai/sdk` package
176
186
  - **Vercel AI SDK** - `ai` package (generateText, streamText, etc.)
187
+ - Requires `experimental_telemetry: { isEnabled: true }` in function calls
177
188
  - **LangChain** - `langchain` and `@langchain/*` packages
178
189
  - **LlamaIndex** - `llamaindex` package
179
190
  - **AWS Bedrock** - `@aws-sdk/client-bedrock-runtime`
@@ -181,6 +192,36 @@ The SDK automatically instruments:
181
192
  - **Vector Databases** - Pinecone, Qdrant, ChromaDB
182
193
  - **HTTP/Fetch** - Automatic network request tracing
183
194
 
195
+ ### Vercel AI SDK Integration
196
+
197
+ For Vercel AI SDK instrumentation, you need to enable telemetry in your function calls:
198
+
199
+ ```typescript
200
+ import { generateText, streamText } from 'ai';
201
+ import { openai } from '@ai-sdk/openai';
202
+
203
+ // For generateText
204
+ const result = await generateText({
205
+ model: openai('gpt-4'),
206
+ prompt: 'Hello, world!',
207
+ experimental_telemetry: { isEnabled: true }, // Required for instrumentation
208
+ });
209
+
210
+ // For streamText
211
+ const stream = streamText({
212
+ model: openai('gpt-4'),
213
+ messages: [{ role: 'user', content: 'Hello!' }],
214
+ experimental_telemetry: { isEnabled: true }, // Required for instrumentation
215
+ });
216
+ ```
217
+
218
+ This enables automatic tracing of:
219
+
220
+ - Model calls and responses
221
+ - Token usage and costs
222
+ - Tool calls and executions
223
+ - Streaming data
224
+
184
225
  ## PII Protection & Data Masking
185
226
 
186
227
  Automatically protects sensitive data in traces and logs:
@@ -218,24 +259,81 @@ API keys, crypto addresses, IPs, and more.
218
259
 
219
260
  Group related operations under a session context:
220
261
 
262
+ ### Function Wrapper Pattern
263
+
221
264
  ```typescript
222
- import { WithSessionId, emitEvent } from '@brizz/sdk';
265
+ import { withSessionId, emitEvent } from '@brizz/sdk';
223
266
 
224
267
  async function processUserWorkflow(userId: string) {
225
- // All traces within this function will include session-123
268
+ // All traces within this function will include the session ID
226
269
  const result = await generateText({
227
270
  model: openai('gpt-4'),
228
271
  messages: [{ role: 'user', content: 'Hello' }],
272
+ experimental_telemetry: { isEnabled: true },
229
273
  });
230
274
 
231
- emitEvent('workflow.completed', { userId, result: result.text });
232
275
  return result;
233
276
  }
234
277
 
235
- // Wrap function with session context
236
- await WithSessionId('session-123', processUserWorkflow, null, 'user-456');
278
+ // Create a wrapped function that always executes with session context
279
+ const sessionedWorkflow = withSessionId('session-123', processUserWorkflow);
280
+
281
+ // Call multiple times, each with the same session context
282
+ await sessionedWorkflow('user-456');
283
+ await sessionedWorkflow('user-789');
284
+ ```
285
+
286
+ ### Immediate Execution Pattern
287
+
288
+ ```typescript
289
+ import { callWithSessionId } from '@brizz/sdk';
290
+
291
+ // Execute function immediately with session context
292
+ await callWithSessionId('session-123', processUserWorkflow, null, 'user-456');
293
+ ```
294
+
295
+ ### Handling Method Context
296
+
297
+ When wrapping methods that use `this`, you have several options:
298
+
299
+ #### Option 1: Arrow Function (Recommended)
300
+
301
+ ```typescript
302
+ class ChatService {
303
+ async processMessage(userId: string, message: string) {
304
+ // This method uses 'this' context
305
+ return `Processed by ${this.serviceName}: ${message}`;
306
+ }
307
+ }
308
+
309
+ const service = new ChatService();
310
+ // Wrap with arrow function to preserve 'this' context
311
+ const sessionedProcess = withSessionId('session-123', (userId: string, message: string) =>
312
+ service.processMessage(userId, message),
313
+ );
314
+ ```
315
+
316
+ #### Option 2: Using bind()
317
+
318
+ ```typescript
319
+ // Pre-bind the method to preserve 'this' context
320
+ const sessionedProcess = withSessionId('session-123', service.processMessage.bind(service));
237
321
  ```
238
322
 
323
+ #### Option 3: Explicit thisArg Parameter
324
+
325
+ ```typescript
326
+ // Pass 'this' context explicitly as third parameter
327
+ const sessionedProcess = withSessionId(
328
+ 'session-123',
329
+ service.processMessage,
330
+ service, // explicit 'this' context
331
+ );
332
+ ```
333
+
334
+ **Note:** The arrow function approach (Option 1) is recommended as it's more explicit, avoids lint
335
+ warnings, and is less prone to `this` binding issues.
336
+
239
337
  ## Custom Events & Logging
240
338
 
241
339
  Emit custom events and structured logs:
@@ -380,6 +478,7 @@ Check out the [examples](./examples/) directory for complete working examples:
380
478
  - For ESM, use loader + `Brizz.initialize()`
381
479
  - For CommonJS, use preload (with optional `Brizz.initialize()`)
382
480
  - Check that `BRIZZ_API_KEY` is set
481
+ - For Vercel AI SDK: Add `experimental_telemetry: { isEnabled: true }` to function calls
383
482
 
384
483
  **CJS/ESM compatibility issues**
385
484
 
package/dist/index.cjs CHANGED
@@ -34,7 +34,7 @@ __export(src_exports, {
34
34
  DEFAULT_PII_PATTERNS: () => DEFAULT_PII_PATTERNS,
35
35
  LogLevel: () => LogLevel,
36
36
  SeverityNumber: () => import_api_logs2.SeverityNumber,
37
- WithSessionId: () => WithSessionId,
37
+ callWithSessionId: () => callWithSessionId,
38
38
  detectRuntime: () => detectRuntime,
39
39
  emitEvent: () => emitEvent,
40
40
  getLogLevel: () => getLogLevel,
@@ -46,7 +46,8 @@ __export(src_exports, {
46
46
  logger: () => logger,
47
47
  maskAttributes: () => maskAttributes,
48
48
  maskValue: () => maskValue,
49
- setLogLevel: () => setLogLevel
49
+ setLogLevel: () => setLogLevel,
50
+ withSessionId: () => withSessionId
50
51
  });
51
52
  module.exports = __toCommonJS(src_exports);
52
53
 
@@ -1469,7 +1470,7 @@ var HANDLED_SPAN_NAMES = {
1469
1470
  "ai.streamText": "ai.streamText",
1470
1471
  "ai.toolCall": (span) => {
1471
1472
  const toolName = span.attributes["ai.toolCall.name"];
1472
- return `${toolName}.tool`;
1473
+ return `${String(toolName ?? "unknown")}.tool`;
1473
1474
  }
1474
1475
  };
1475
1476
  var AI_RESPONSE_TEXT = "ai.response.text";
@@ -1479,10 +1480,11 @@ var AI_USAGE_COMPLETION_TOKENS = "ai.usage.completionTokens";
1479
1480
  var AI_MODEL_PROVIDER = "ai.model.provider";
1480
1481
  var transformAiSdkSpanName = (span) => {
1481
1482
  if (span.name in HANDLED_SPAN_NAMES) {
1482
- if (typeof HANDLED_SPAN_NAMES[span.name] === "function") {
1483
- span.name = HANDLED_SPAN_NAMES[span.name](span);
1484
- } else {
1485
- span.name = HANDLED_SPAN_NAMES[span.name];
1483
+ const handler = HANDLED_SPAN_NAMES[span.name];
1484
+ if (typeof handler === "function") {
1485
+ span.name = handler(span);
1486
+ } else if (handler) {
1487
+ span.name = handler;
1486
1488
  }
1487
1489
  }
1488
1490
  };
@@ -1497,26 +1499,30 @@ var transformPromptMessages = (attributes) => {
1497
1499
  if (AI_PROMPT_MESSAGES in attributes) {
1498
1500
  try {
1499
1501
  const messages = JSON.parse(attributes[AI_PROMPT_MESSAGES]);
1500
- messages.forEach((msg, index) => {
1501
- logger.debug("Transforming prompt message", { msg, type: typeof msg.content });
1502
- if (typeof msg.content === "string") {
1503
- attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.content`] = msg.content;
1502
+ for (const [index, msg] of messages.entries()) {
1503
+ const message = msg;
1504
+ logger.debug("Transforming prompt message", { msg: message, type: typeof message.content });
1505
+ if (typeof message.content === "string") {
1506
+ attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.content`] = message.content;
1504
1507
  } else {
1505
- if (Array.isArray(msg.content) && msg.content.length > 0) {
1506
- const lastContent = msg.content[msg.content.length - 1];
1507
- if (lastContent.text) {
1508
+ if (Array.isArray(message.content) && message.content.length > 0) {
1509
+ const lastContent = message.content.at(-1);
1510
+ if (lastContent?.text) {
1508
1511
  attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.content`] = lastContent.text;
1509
1512
  }
1510
1513
  } else {
1511
1514
  attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.content`] = JSON.stringify(
1512
- msg.content
1515
+ message.content
1513
1516
  );
1514
1517
  }
1515
1518
  }
1516
- attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.role`] = msg.role;
1517
- });
1519
+ attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.role`] = message.role;
1520
+ }
1518
1521
  delete attributes[AI_PROMPT_MESSAGES];
1519
- } catch {
1522
+ } catch (error) {
1523
+ logger.debug("Skipping prompt messages transformation because of JSON parsing error", {
1524
+ e: error
1525
+ });
1520
1526
  }
1521
1527
  }
1522
1528
  };
@@ -1542,11 +1548,7 @@ var calculateTotalTokens = (attributes) => {
1542
1548
  var transformVendor = (attributes) => {
1543
1549
  if (AI_MODEL_PROVIDER in attributes) {
1544
1550
  const vendor = attributes[AI_MODEL_PROVIDER];
1545
- if (vendor && vendor.startsWith("openai")) {
1546
- attributes[import_ai_semantic_conventions.SpanAttributes.LLM_SYSTEM] = "OpenAI";
1547
- } else {
1548
- attributes[import_ai_semantic_conventions.SpanAttributes.LLM_SYSTEM] = vendor;
1549
- }
1551
+ attributes[import_ai_semantic_conventions.SpanAttributes.LLM_SYSTEM] = vendor && vendor.startsWith("openai") ? "OpenAI" : vendor;
1550
1552
  delete attributes[AI_MODEL_PROVIDER];
1551
1553
  }
1552
1554
  };
@@ -1562,6 +1564,10 @@ var shouldHandleSpan = (span) => {
1562
1564
  return span.name in HANDLED_SPAN_NAMES;
1563
1565
  };
1564
1566
  var transformAiSdkSpan = (span) => {
1567
+ if (!shouldHandleSpan(span)) {
1568
+ logger.debug("Skipping span transformation", { spanName: span.name });
1569
+ return;
1570
+ }
1565
1571
  for (const key in span.attributes) {
1566
1572
  if (Number.isNaN(span.attributes[key])) {
1567
1573
  span.attributes[key] = 0;
@@ -1572,10 +1578,6 @@ var transformAiSdkSpan = (span) => {
1572
1578
  spanContext: span.spanContext(),
1573
1579
  attributes: span.attributes
1574
1580
  });
1575
- if (!shouldHandleSpan(span)) {
1576
- logger.debug("Skipping span transformation", { spanName: span.name });
1577
- return;
1578
- }
1579
1581
  transformAiSdkSpanName(span);
1580
1582
  transformAiSdkAttributes(span.attributes);
1581
1583
  };
@@ -1790,7 +1792,15 @@ function withProperties(properties, fn, thisArg, ...args) {
1790
1792
  const newContext = import_api5.context.active().setValue(PROPERTIES_CONTEXT_KEY, properties);
1791
1793
  return import_api5.context.with(newContext, fn, thisArg, ...args);
1792
1794
  }
1793
- function WithSessionId(sessionId, fn, thisArg, ...args) {
1795
+ function withSessionId(sessionId, fn, thisArg) {
1796
+ return function wrapped(...args) {
1797
+ const base = import_api5.context.active();
1798
+ const prev = base.getValue(PROPERTIES_CONTEXT_KEY);
1799
+ const next = base.setValue(PROPERTIES_CONTEXT_KEY, prev ? { ...prev, [SESSION_ID]: sessionId } : { [SESSION_ID]: sessionId });
1800
+ return import_api5.context.with(next, fn, thisArg ?? this, ...args);
1801
+ };
1802
+ }
1803
+ function callWithSessionId(sessionId, fn, thisArg, ...args) {
1794
1804
  return withProperties({ [SESSION_ID]: sessionId }, fn, thisArg, ...args);
1795
1805
  }
1796
1806
 
@@ -2034,7 +2044,7 @@ var init_exports = {};
2034
2044
  DEFAULT_PII_PATTERNS,
2035
2045
  LogLevel,
2036
2046
  SeverityNumber,
2037
- WithSessionId,
2047
+ callWithSessionId,
2038
2048
  detectRuntime,
2039
2049
  emitEvent,
2040
2050
  getLogLevel,
@@ -2046,6 +2056,7 @@ var init_exports = {};
2046
2056
  logger,
2047
2057
  maskAttributes,
2048
2058
  maskValue,
2049
- setLogLevel
2059
+ setLogLevel,
2060
+ withSessionId
2050
2061
  });
2051
2062
  //# sourceMappingURL=index.cjs.map