@posthog/ai 4.0.0 → 4.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.esm.js CHANGED
@@ -1,9 +1,13 @@
1
1
  import OpenAIOrignal, { AzureOpenAI } from 'openai';
2
2
  import * as uuid from 'uuid';
3
3
  import { v4 } from 'uuid';
4
+ import { Buffer } from 'buffer';
4
5
  import { experimental_wrapLanguageModel } from 'ai';
5
6
  import AnthropicOriginal from '@anthropic-ai/sdk';
6
7
 
8
+ // limit large outputs by truncating to 200kb (approx 200k bytes)
9
+ const MAX_OUTPUT_SIZE = 200000;
10
+ const STRING_FORMAT = 'utf8';
7
11
  const getModelParams = params => {
8
12
  if (!params) {
9
13
  return {};
@@ -59,6 +63,33 @@ const mergeSystemPrompt = (params, provider) => {
59
63
  const withPrivacyMode = (client, privacyMode, input) => {
60
64
  return client.privacy_mode || privacyMode ? null : input;
61
65
  };
66
+ const truncate = str => {
67
+ try {
68
+ const buffer = Buffer.from(str, STRING_FORMAT);
69
+ if (buffer.length <= MAX_OUTPUT_SIZE) {
70
+ return str;
71
+ }
72
+ const truncatedBuffer = buffer.slice(0, MAX_OUTPUT_SIZE);
73
+ return `${truncatedBuffer.toString(STRING_FORMAT)}... [truncated]`;
74
+ } catch (error) {
75
+ console.error('Error truncating, likely not a string');
76
+ return str;
77
+ }
78
+ };
79
+ function sanitizeValues(obj) {
80
+ if (obj === undefined || obj === null) {
81
+ return obj;
82
+ }
83
+ const jsonSafe = JSON.parse(JSON.stringify(obj));
84
+ if (typeof jsonSafe === 'string') {
85
+ return Buffer.from(jsonSafe, STRING_FORMAT).toString(STRING_FORMAT);
86
+ } else if (Array.isArray(jsonSafe)) {
87
+ return jsonSafe.map(sanitizeValues);
88
+ } else if (jsonSafe && typeof jsonSafe === 'object') {
89
+ return Object.fromEntries(Object.entries(jsonSafe).map(([k, v]) => [k, sanitizeValues(v)]));
90
+ }
91
+ return jsonSafe;
92
+ }
62
93
  const sendEventToPosthog = ({
63
94
  client,
64
95
  distinctId,
@@ -77,11 +108,15 @@ const sendEventToPosthog = ({
77
108
  tools
78
109
  }) => {
79
110
  if (client.capture) {
111
+ // sanitize input and output for UTF-8 validity
112
+ const safeInput = sanitizeValues(input);
113
+ const safeOutput = sanitizeValues(output);
114
+ const safeError = sanitizeValues(error);
80
115
  let errorData = {};
81
116
  if (isError) {
82
117
  errorData = {
83
118
  $ai_is_error: true,
84
- $ai_error: error
119
+ $ai_error: safeError
85
120
  };
86
121
  }
87
122
  let costOverrideData = {};
@@ -112,8 +147,8 @@ const sendEventToPosthog = ({
112
147
  $ai_provider: params.posthogProviderOverride ?? provider,
113
148
  $ai_model: params.posthogModelOverride ?? model,
114
149
  $ai_model_parameters: getModelParams(params),
115
- $ai_input: withPrivacyMode(client, params.posthogPrivacyMode ?? false, input),
116
- $ai_output_choices: withPrivacyMode(client, params.posthogPrivacyMode ?? false, output),
150
+ $ai_input: withPrivacyMode(client, params.posthogPrivacyMode ?? false, safeInput),
151
+ $ai_output_choices: withPrivacyMode(client, params.posthogPrivacyMode ?? false, safeOutput),
117
152
  $ai_http_status: httpStatus,
118
153
  $ai_input_tokens: usage.inputTokens ?? 0,
119
154
  $ai_output_tokens: usage.outputTokens ?? 0,
@@ -469,14 +504,26 @@ const mapVercelParams = params => {
469
504
  };
470
505
  };
471
506
  const mapVercelPrompt = prompt => {
472
- return prompt.map(p => {
507
+ // normalize single inputs into an array of messages
508
+ let promptsArray;
509
+ if (typeof prompt === 'string') {
510
+ promptsArray = [{
511
+ role: 'user',
512
+ content: prompt
513
+ }];
514
+ } else if (!Array.isArray(prompt)) {
515
+ promptsArray = [prompt];
516
+ } else {
517
+ promptsArray = prompt;
518
+ }
519
+ return promptsArray.map(p => {
473
520
  let content = {};
474
521
  if (Array.isArray(p.content)) {
475
522
  content = p.content.map(c => {
476
523
  if (c.type === 'text') {
477
524
  return {
478
525
  type: 'text',
479
- content: c.text
526
+ content: truncate(c.text)
480
527
  };
481
528
  } else if (c.type === 'image') {
482
529
  return {
@@ -522,7 +569,7 @@ const mapVercelPrompt = prompt => {
522
569
  } else {
523
570
  content = {
524
571
  type: 'text',
525
- text: p.content
572
+ text: truncate(p.content)
526
573
  };
527
574
  }
528
575
  return {
@@ -532,46 +579,62 @@ const mapVercelPrompt = prompt => {
532
579
  });
533
580
  };
534
581
  const mapVercelOutput = result => {
582
+ // normalize string results to object
583
+ const normalizedResult = typeof result === 'string' ? {
584
+ text: result
585
+ } : result;
535
586
  const output = {
536
- ...(result.text ? {
537
- text: result.text
587
+ ...(normalizedResult.text ? {
588
+ text: normalizedResult.text
538
589
  } : {}),
539
- ...(result.object ? {
540
- object: result.object
590
+ ...(normalizedResult.object ? {
591
+ object: normalizedResult.object
541
592
  } : {}),
542
- ...(result.reasoning ? {
543
- reasoning: result.reasoning
593
+ ...(normalizedResult.reasoning ? {
594
+ reasoning: normalizedResult.reasoning
544
595
  } : {}),
545
- ...(result.response ? {
546
- response: result.response
596
+ ...(normalizedResult.response ? {
597
+ response: normalizedResult.response
547
598
  } : {}),
548
- ...(result.finishReason ? {
549
- finishReason: result.finishReason
599
+ ...(normalizedResult.finishReason ? {
600
+ finishReason: normalizedResult.finishReason
550
601
  } : {}),
551
- ...(result.usage ? {
552
- usage: result.usage
602
+ ...(normalizedResult.usage ? {
603
+ usage: normalizedResult.usage
553
604
  } : {}),
554
- ...(result.warnings ? {
555
- warnings: result.warnings
605
+ ...(normalizedResult.warnings ? {
606
+ warnings: normalizedResult.warnings
556
607
  } : {}),
557
- ...(result.providerMetadata ? {
558
- toolCalls: result.providerMetadata
608
+ ...(normalizedResult.providerMetadata ? {
609
+ toolCalls: normalizedResult.providerMetadata
610
+ } : {}),
611
+ ...(normalizedResult.files ? {
612
+ files: normalizedResult.files.map(file => ({
613
+ name: file.name,
614
+ size: file.size,
615
+ type: file.type
616
+ }))
559
617
  } : {})
560
618
  };
561
- // if text and no object or reasoning, return text
562
619
  if (output.text && !output.object && !output.reasoning) {
563
620
  return [{
564
- content: output.text,
621
+ content: truncate(output.text),
565
622
  role: 'assistant'
566
623
  }];
567
624
  }
568
- return [{
569
- content: JSON.stringify(output),
570
- role: 'assistant'
571
- }];
625
+ // otherwise stringify and truncate
626
+ try {
627
+ const jsonOutput = JSON.stringify(output);
628
+ return [{
629
+ content: truncate(jsonOutput),
630
+ role: 'assistant'
631
+ }];
632
+ } catch (error) {
633
+ console.error('Error stringifying output');
634
+ return [];
635
+ }
572
636
  };
573
637
  const extractProvider = model => {
574
- // vercel provider is in the format of provider.endpoint
575
638
  const provider = model.provider.toLowerCase();
576
639
  const providerName = provider.split('.')[0];
577
640
  return providerName;
@@ -649,7 +712,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
649
712
  outputTokens: 0
650
713
  },
651
714
  isError: true,
652
- error: JSON.stringify(error)
715
+ error: truncate(JSON.stringify(error))
653
716
  });
654
717
  throw error;
655
718
  }
@@ -741,7 +804,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
741
804
  outputTokens: 0
742
805
  },
743
806
  isError: true,
744
- error: JSON.stringify(error)
807
+ error: truncate(JSON.stringify(error))
745
808
  });
746
809
  throw error;
747
810
  }