@posthog/ai 6.1.0 → 6.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -230,6 +230,129 @@ const sendEventToPosthog = async ({
230
230
  }
231
231
  };
232
232
 
233
+ // Type guards for safer type checking
234
+
235
+ const isString = value => {
236
+ return typeof value === 'string';
237
+ };
238
+ const isObject = value => {
239
+ return value !== null && typeof value === 'object' && !Array.isArray(value);
240
+ };
241
+
242
+ const REDACTED_IMAGE_PLACEHOLDER = '[base64 image redacted]';
243
+
244
+ // ============================================
245
+ // Base64 Detection Helpers
246
+ // ============================================
247
+
248
+ const isBase64DataUrl = str => {
249
+ return /^data:([^;]+);base64,/.test(str);
250
+ };
251
+ const isValidUrl = str => {
252
+ try {
253
+ new URL(str);
254
+ return true;
255
+ } catch {
256
+ // Not an absolute URL, check if it's a relative URL or path
257
+ return str.startsWith('/') || str.startsWith('./') || str.startsWith('../');
258
+ }
259
+ };
260
+ const isRawBase64 = str => {
261
+ // Skip if it's a valid URL or path
262
+ if (isValidUrl(str)) {
263
+ return false;
264
+ }
265
+
266
+ // Check if it's a valid base64 string
267
+ // Base64 images are typically at least a few hundred chars, but we'll be conservative
268
+ return str.length > 20 && /^[A-Za-z0-9+/]+=*$/.test(str);
269
+ };
270
+ function redactBase64DataUrl(str) {
271
+ if (!isString(str)) return str;
272
+
273
+ // Check for data URL format
274
+ if (isBase64DataUrl(str)) {
275
+ return REDACTED_IMAGE_PLACEHOLDER;
276
+ }
277
+
278
+ // Check for raw base64 (Vercel sends raw base64 for inline images)
279
+ if (isRawBase64(str)) {
280
+ return REDACTED_IMAGE_PLACEHOLDER;
281
+ }
282
+ return str;
283
+ }
284
+
285
+ // ============================================
286
+ // Common Message Processing
287
+ // ============================================
288
+
289
+ const processMessages = (messages, transformContent) => {
290
+ if (!messages) return messages;
291
+ const processContent = content => {
292
+ if (typeof content === 'string') return content;
293
+ if (!content) return content;
294
+ if (Array.isArray(content)) {
295
+ return content.map(transformContent);
296
+ }
297
+
298
+ // Handle single object content
299
+ return transformContent(content);
300
+ };
301
+ const processMessage = msg => {
302
+ if (!isObject(msg) || !('content' in msg)) return msg;
303
+ return {
304
+ ...msg,
305
+ content: processContent(msg.content)
306
+ };
307
+ };
308
+
309
+ // Handle both arrays and single messages
310
+ if (Array.isArray(messages)) {
311
+ return messages.map(processMessage);
312
+ }
313
+ return processMessage(messages);
314
+ };
315
+
316
+ // ============================================
317
+ // Provider-Specific Image Sanitizers
318
+ // ============================================
319
+
320
+ const sanitizeOpenAIImage = item => {
321
+ if (!isObject(item)) return item;
322
+
323
+ // Handle image_url format
324
+ if (item.type === 'image_url' && 'image_url' in item && isObject(item.image_url) && 'url' in item.image_url) {
325
+ return {
326
+ ...item,
327
+ image_url: {
328
+ ...item.image_url,
329
+ url: redactBase64DataUrl(item.image_url.url)
330
+ }
331
+ };
332
+ }
333
+ return item;
334
+ };
335
+ const sanitizeOpenAIResponseImage = item => {
336
+ if (!isObject(item)) return item;
337
+
338
+ // Handle input_image format
339
+ if (item.type === 'input_image' && 'image_url' in item) {
340
+ return {
341
+ ...item,
342
+ image_url: redactBase64DataUrl(item.image_url)
343
+ };
344
+ }
345
+ return item;
346
+ };
347
+
348
+ // Export individual sanitizers for tree-shaking
349
+ const sanitizeOpenAI = data => {
350
+ return processMessages(data, sanitizeOpenAIImage);
351
+ };
352
+ const sanitizeOpenAIResponse = data => {
353
+ return processMessages(data, sanitizeOpenAIResponseImage);
354
+ };
355
+
233
356
  const Chat = openai.OpenAI.Chat;
234
357
  const Completions = Chat.Completions;
235
358
  const Responses = openai.OpenAI.Responses;
@@ -284,14 +407,56 @@ class WrappedCompletions extends Completions {
284
407
  const [stream1, stream2] = value.tee();
285
408
  (async () => {
286
409
  try {
410
+ const contentBlocks = [];
287
411
  let accumulatedContent = '';
288
412
  let usage = {
289
413
  inputTokens: 0,
290
414
  outputTokens: 0
291
415
  };
416
+
417
+ // Map to track in-progress tool calls
418
+ const toolCallsInProgress = new Map();
292
419
  for await (const chunk of stream1) {
293
- const delta = chunk?.choices?.[0]?.delta?.content ?? '';
294
- accumulatedContent += delta;
420
+ const choice = chunk?.choices?.[0];
421
+
422
+ // Handle text content
423
+ const deltaContent = choice?.delta?.content;
424
+ if (deltaContent) {
425
+ accumulatedContent += deltaContent;
426
+ }
427
+
428
+ // Handle tool calls
429
+ const deltaToolCalls = choice?.delta?.tool_calls;
430
+ if (deltaToolCalls && Array.isArray(deltaToolCalls)) {
431
+ for (const toolCall of deltaToolCalls) {
432
+ const index = toolCall.index;
433
+ if (index !== undefined) {
434
+ if (!toolCallsInProgress.has(index)) {
435
+ // New tool call
436
+ toolCallsInProgress.set(index, {
437
+ id: toolCall.id || '',
438
+ name: toolCall.function?.name || '',
439
+ arguments: ''
440
+ });
441
+ }
442
+ const inProgressCall = toolCallsInProgress.get(index);
443
+ if (inProgressCall) {
444
+ // Update tool call data
445
+ if (toolCall.id) {
446
+ inProgressCall.id = toolCall.id;
447
+ }
448
+ if (toolCall.function?.name) {
449
+ inProgressCall.name = toolCall.function.name;
450
+ }
451
+ if (toolCall.function?.arguments) {
452
+ inProgressCall.arguments += toolCall.function.arguments;
453
+ }
454
+ }
455
+ }
456
+ }
457
+ }
458
+
459
+ // Handle usage information
295
460
  if (chunk.usage) {
296
461
  usage = {
297
462
  inputTokens: chunk.usage.prompt_tokens ?? 0,
@@ -301,6 +466,40 @@ class WrappedCompletions extends Completions {
301
466
  };
302
467
  }
303
468
  }
469
+
470
+ // Build final content blocks
471
+ if (accumulatedContent) {
472
+ contentBlocks.push({
473
+ type: 'text',
474
+ text: accumulatedContent
475
+ });
476
+ }
477
+
478
+ // Add completed tool calls to content blocks
479
+ for (const toolCall of toolCallsInProgress.values()) {
480
+ if (toolCall.name) {
481
+ contentBlocks.push({
482
+ type: 'function',
483
+ id: toolCall.id,
484
+ function: {
485
+ name: toolCall.name,
486
+ arguments: toolCall.arguments
487
+ }
488
+ });
489
+ }
490
+ }
491
+
492
+ // Format output to match non-streaming version
493
+ const formattedOutput = contentBlocks.length > 0 ? [{
494
+ role: 'assistant',
495
+ content: contentBlocks
496
+ }] : [{
497
+ role: 'assistant',
498
+ content: [{
499
+ type: 'text',
500
+ text: ''
501
+ }]
502
+ }];
304
503
  const latency = (Date.now() - startTime) / 1000;
305
504
  const availableTools = extractAvailableToolCalls('openai', openAIParams);
306
505
  await sendEventToPosthog({
@@ -309,11 +508,8 @@ class WrappedCompletions extends Completions {
309
508
  traceId,
310
509
  model: openAIParams.model,
311
510
  provider: 'openai',
312
- input: openAIParams.messages,
313
- output: [{
314
- content: accumulatedContent,
315
- role: 'assistant'
316
- }],
511
+ input: sanitizeOpenAI(openAIParams.messages),
512
+ output: formattedOutput,
317
513
  latency,
318
514
  baseURL: this.baseURL ?? '',
319
515
  params: body,
@@ -323,18 +519,19 @@ class WrappedCompletions extends Completions {
323
519
  captureImmediate: posthogCaptureImmediate
324
520
  });
325
521
  } catch (error) {
522
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
326
523
  await sendEventToPosthog({
327
524
  client: this.phClient,
328
525
  distinctId: posthogDistinctId,
329
526
  traceId,
330
527
  model: openAIParams.model,
331
528
  provider: 'openai',
332
- input: openAIParams.messages,
529
+ input: sanitizeOpenAI(openAIParams.messages),
333
530
  output: [],
334
531
  latency: 0,
335
532
  baseURL: this.baseURL ?? '',
336
533
  params: body,
337
- httpStatus: error?.status ? error.status : 500,
534
+ httpStatus,
338
535
  usage: {
339
536
  inputTokens: 0,
340
537
  outputTokens: 0
@@ -362,7 +559,7 @@ class WrappedCompletions extends Completions {
362
559
  traceId,
363
560
  model: openAIParams.model,
364
561
  provider: 'openai',
365
- input: openAIParams.messages,
562
+ input: sanitizeOpenAI(openAIParams.messages),
366
563
  output: formatResponseOpenAI(result),
367
564
  latency,
368
565
  baseURL: this.baseURL ?? '',
@@ -380,18 +577,19 @@ class WrappedCompletions extends Completions {
380
577
  }
381
578
  return result;
382
579
  }, async error => {
580
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
383
581
  await sendEventToPosthog({
384
582
  client: this.phClient,
385
583
  distinctId: posthogDistinctId,
386
584
  traceId,
387
585
  model: openAIParams.model,
388
586
  provider: 'openai',
389
- input: openAIParams.messages,
587
+ input: sanitizeOpenAI(openAIParams.messages),
390
588
  output: [],
391
589
  latency: 0,
392
590
  baseURL: this.baseURL ?? '',
393
591
  params: body,
394
- httpStatus: error?.status ? error.status : 500,
592
+ httpStatus,
395
593
  usage: {
396
594
  inputTokens: 0,
397
595
  outputTokens: 0
@@ -466,7 +664,7 @@ class WrappedResponses extends Responses {
466
664
  //@ts-expect-error
467
665
  model: openAIParams.model,
468
666
  provider: 'openai',
469
- input: openAIParams.input,
667
+ input: sanitizeOpenAIResponse(openAIParams.input),
470
668
  output: finalContent,
471
669
  latency,
472
670
  baseURL: this.baseURL ?? '',
@@ -477,6 +675,7 @@ class WrappedResponses extends Responses {
477
675
  captureImmediate: posthogCaptureImmediate
478
676
  });
479
677
  } catch (error) {
678
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
480
679
  await sendEventToPosthog({
481
680
  client: this.phClient,
482
681
  distinctId: posthogDistinctId,
@@ -484,12 +683,12 @@ class WrappedResponses extends Responses {
484
683
  //@ts-expect-error
485
684
  model: openAIParams.model,
486
685
  provider: 'openai',
487
- input: openAIParams.input,
686
+ input: sanitizeOpenAIResponse(openAIParams.input),
488
687
  output: [],
489
688
  latency: 0,
490
689
  baseURL: this.baseURL ?? '',
491
690
  params: body,
492
- httpStatus: error?.status ? error.status : 500,
691
+ httpStatus,
493
692
  usage: {
494
693
  inputTokens: 0,
495
694
  outputTokens: 0
@@ -516,7 +715,7 @@ class WrappedResponses extends Responses {
516
715
  //@ts-expect-error
517
716
  model: openAIParams.model,
518
717
  provider: 'openai',
519
- input: openAIParams.input,
718
+ input: sanitizeOpenAIResponse(openAIParams.input),
520
719
  output: formatResponseOpenAI({
521
720
  output: result.output
522
721
  }),
@@ -536,6 +735,7 @@ class WrappedResponses extends Responses {
536
735
  }
537
736
  return result;
538
737
  }, async error => {
738
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
539
739
  await sendEventToPosthog({
540
740
  client: this.phClient,
541
741
  distinctId: posthogDistinctId,
@@ -543,12 +743,12 @@ class WrappedResponses extends Responses {
543
743
  //@ts-expect-error
544
744
  model: openAIParams.model,
545
745
  provider: 'openai',
546
- input: openAIParams.input,
746
+ input: sanitizeOpenAIResponse(openAIParams.input),
547
747
  output: [],
548
748
  latency: 0,
549
749
  baseURL: this.baseURL ?? '',
550
750
  params: body,
551
- httpStatus: error?.status ? error.status : 500,
751
+ httpStatus,
552
752
  usage: {
553
753
  inputTokens: 0,
554
754
  outputTokens: 0
@@ -592,7 +792,7 @@ class WrappedResponses extends Responses {
592
792
  //@ts-expect-error
593
793
  model: openAIParams.model,
594
794
  provider: 'openai',
595
- input: openAIParams.input,
795
+ input: sanitizeOpenAIResponse(openAIParams.input),
596
796
  output: result.output,
597
797
  latency,
598
798
  baseURL: this.baseURL ?? '',
@@ -608,6 +808,7 @@ class WrappedResponses extends Responses {
608
808
  });
609
809
  return result;
610
810
  }, async error => {
811
+ const httpStatus = error && typeof error === 'object' && 'status' in error ? error.status ?? 500 : 500;
611
812
  await sendEventToPosthog({
612
813
  client: this.phClient,
613
814
  distinctId: posthogDistinctId,
@@ -615,12 +816,12 @@ class WrappedResponses extends Responses {
615
816
  //@ts-expect-error
616
817
  model: openAIParams.model,
617
818
  provider: 'openai',
618
- input: openAIParams.input,
819
+ input: sanitizeOpenAIResponse(openAIParams.input),
619
820
  output: [],
620
821
  latency: 0,
621
822
  baseURL: this.baseURL ?? '',
622
823
  params: body,
623
- httpStatus: error?.status ? error.status : 500,
824
+ httpStatus,
624
825
  usage: {
625
826
  inputTokens: 0,
626
827
  outputTokens: 0