@posthog/ai 7.2.2 → 7.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.cjs +1 -1
- package/dist/anthropic/index.cjs.map +1 -1
- package/dist/anthropic/index.mjs +1 -1
- package/dist/anthropic/index.mjs.map +1 -1
- package/dist/gemini/index.cjs +1 -1
- package/dist/gemini/index.cjs.map +1 -1
- package/dist/gemini/index.d.ts +2 -7
- package/dist/gemini/index.mjs +1 -1
- package/dist/gemini/index.mjs.map +1 -1
- package/dist/index.cjs +167 -141
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +9 -9
- package/dist/index.mjs +167 -141
- package/dist/index.mjs.map +1 -1
- package/dist/langchain/index.cjs +1 -1
- package/dist/langchain/index.cjs.map +1 -1
- package/dist/langchain/index.mjs +1 -1
- package/dist/langchain/index.mjs.map +1 -1
- package/dist/openai/index.cjs +21 -14
- package/dist/openai/index.cjs.map +1 -1
- package/dist/openai/index.mjs +21 -14
- package/dist/openai/index.mjs.map +1 -1
- package/dist/vercel/index.cjs +131 -113
- package/dist/vercel/index.cjs.map +1 -1
- package/dist/vercel/index.d.ts +7 -2
- package/dist/vercel/index.mjs +131 -113
- package/dist/vercel/index.mjs.map +1 -1
- package/package.json +11 -6
package/dist/vercel/index.mjs
CHANGED
|
@@ -1,8 +1,7 @@
|
|
|
1
|
-
import { wrapLanguageModel } from 'ai';
|
|
2
1
|
import { v4 } from 'uuid';
|
|
3
2
|
import { Buffer } from 'buffer';
|
|
4
3
|
|
|
5
|
-
var version = "7.
|
|
4
|
+
var version = "7.3.1";
|
|
6
5
|
|
|
7
6
|
// Type guards for safer type checking
|
|
8
7
|
|
|
@@ -582,68 +581,126 @@ const extractProvider = model => {
|
|
|
582
581
|
const providerName = provider.split('.')[0];
|
|
583
582
|
return providerName;
|
|
584
583
|
};
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
584
|
+
|
|
585
|
+
// Extract web search count from provider metadata (works for both V2 and V3)
|
|
586
|
+
const extractWebSearchCount = (providerMetadata, usage) => {
|
|
587
|
+
// Try Anthropic-specific extraction
|
|
588
|
+
if (providerMetadata && typeof providerMetadata === 'object' && 'anthropic' in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === 'object' && 'server_tool_use' in providerMetadata.anthropic) {
|
|
589
|
+
const serverToolUse = providerMetadata.anthropic.server_tool_use;
|
|
590
|
+
if (serverToolUse && typeof serverToolUse === 'object' && 'web_search_requests' in serverToolUse && typeof serverToolUse.web_search_requests === 'number') {
|
|
591
|
+
return serverToolUse.web_search_requests;
|
|
592
|
+
}
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
// Fall back to generic calculation
|
|
596
|
+
return calculateWebSearchCount({
|
|
597
|
+
usage,
|
|
598
|
+
providerMetadata
|
|
599
|
+
});
|
|
600
|
+
};
|
|
601
|
+
|
|
602
|
+
// Extract additional token values from provider metadata
|
|
603
|
+
const extractAdditionalTokenValues = providerMetadata => {
|
|
604
|
+
if (providerMetadata && typeof providerMetadata === 'object' && 'anthropic' in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === 'object' && 'cacheCreationInputTokens' in providerMetadata.anthropic) {
|
|
605
|
+
return {
|
|
606
|
+
cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
|
|
607
|
+
};
|
|
608
|
+
}
|
|
609
|
+
return {};
|
|
610
|
+
};
|
|
611
|
+
|
|
612
|
+
// Helper to extract numeric token value from V2 (number) or V3 (object with .total) usage formats
|
|
613
|
+
const extractTokenCount = value => {
|
|
614
|
+
if (typeof value === 'number') {
|
|
615
|
+
return value;
|
|
616
|
+
}
|
|
617
|
+
if (value && typeof value === 'object' && 'total' in value && typeof value.total === 'number') {
|
|
618
|
+
return value.total;
|
|
619
|
+
}
|
|
620
|
+
return undefined;
|
|
621
|
+
};
|
|
622
|
+
|
|
623
|
+
// Helper to extract reasoning tokens from V2 (usage.reasoningTokens) or V3 (usage.outputTokens.reasoning)
|
|
624
|
+
const extractReasoningTokens = usage => {
|
|
625
|
+
// V2 style: top-level reasoningTokens
|
|
626
|
+
if ('reasoningTokens' in usage) {
|
|
627
|
+
return usage.reasoningTokens;
|
|
628
|
+
}
|
|
629
|
+
// V3 style: nested in outputTokens.reasoning
|
|
630
|
+
if ('outputTokens' in usage && usage.outputTokens && typeof usage.outputTokens === 'object' && 'reasoning' in usage.outputTokens) {
|
|
631
|
+
return usage.outputTokens.reasoning;
|
|
632
|
+
}
|
|
633
|
+
return undefined;
|
|
634
|
+
};
|
|
635
|
+
|
|
636
|
+
// Helper to extract cached input tokens from V2 (usage.cachedInputTokens) or V3 (usage.inputTokens.cacheRead)
|
|
637
|
+
const extractCacheReadTokens = usage => {
|
|
638
|
+
// V2 style: top-level cachedInputTokens
|
|
639
|
+
if ('cachedInputTokens' in usage) {
|
|
640
|
+
return usage.cachedInputTokens;
|
|
641
|
+
}
|
|
642
|
+
// V3 style: nested in inputTokens.cacheRead
|
|
643
|
+
if ('inputTokens' in usage && usage.inputTokens && typeof usage.inputTokens === 'object' && 'cacheRead' in usage.inputTokens) {
|
|
644
|
+
return usage.inputTokens.cacheRead;
|
|
645
|
+
}
|
|
646
|
+
return undefined;
|
|
647
|
+
};
|
|
648
|
+
|
|
649
|
+
/**
|
|
650
|
+
* Wraps a Vercel AI SDK language model (V2 or V3) with PostHog tracing.
|
|
651
|
+
* Automatically detects the model version and applies appropriate instrumentation.
|
|
652
|
+
*/
|
|
653
|
+
const wrapVercelLanguageModel = (model, phClient, options) => {
|
|
654
|
+
const traceId = options.posthogTraceId ?? v4();
|
|
655
|
+
const mergedOptions = {
|
|
656
|
+
...options,
|
|
657
|
+
posthogTraceId: traceId,
|
|
658
|
+
posthogDistinctId: options.posthogDistinctId,
|
|
659
|
+
posthogProperties: {
|
|
660
|
+
...options.posthogProperties,
|
|
661
|
+
$ai_framework: 'vercel',
|
|
662
|
+
$ai_framework_version: model.specificationVersion === 'v3' ? '6' : '5'
|
|
663
|
+
}
|
|
664
|
+
};
|
|
665
|
+
|
|
666
|
+
// Create wrapped model that preserves the original type
|
|
667
|
+
const wrappedModel = {
|
|
668
|
+
...model,
|
|
669
|
+
doGenerate: async params => {
|
|
591
670
|
const startTime = Date.now();
|
|
592
671
|
const mergedParams = {
|
|
593
|
-
...
|
|
594
|
-
...mapVercelParams(params)
|
|
595
|
-
posthogProperties: {
|
|
596
|
-
...options.posthogProperties,
|
|
597
|
-
$ai_framework: 'vercel'
|
|
598
|
-
}
|
|
672
|
+
...mergedOptions,
|
|
673
|
+
...mapVercelParams(params)
|
|
599
674
|
};
|
|
600
675
|
const availableTools = extractAvailableToolCalls('vercel', params);
|
|
601
676
|
try {
|
|
602
|
-
const result = await doGenerate();
|
|
603
|
-
const modelId =
|
|
604
|
-
const provider =
|
|
677
|
+
const result = await model.doGenerate(params);
|
|
678
|
+
const modelId = mergedOptions.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId);
|
|
679
|
+
const provider = mergedOptions.posthogProviderOverride ?? extractProvider(model);
|
|
605
680
|
const baseURL = ''; // cannot currently get baseURL from vercel
|
|
606
681
|
const content = mapVercelOutput(result.content);
|
|
607
682
|
const latency = (Date.now() - startTime) / 1000;
|
|
608
683
|
const providerMetadata = result.providerMetadata;
|
|
609
|
-
const additionalTokenValues =
|
|
610
|
-
|
|
611
|
-
cacheCreationInputTokens: providerMetadata.anthropic.cacheCreationInputTokens
|
|
612
|
-
} : {})
|
|
613
|
-
};
|
|
684
|
+
const additionalTokenValues = extractAdditionalTokenValues(providerMetadata);
|
|
685
|
+
const webSearchCount = extractWebSearchCount(providerMetadata, result.usage);
|
|
614
686
|
|
|
615
|
-
//
|
|
616
|
-
|
|
617
|
-
if (providerMetadata?.anthropic && typeof providerMetadata.anthropic === 'object' && 'server_tool_use' in providerMetadata.anthropic) {
|
|
618
|
-
// Anthropic-specific extraction
|
|
619
|
-
const serverToolUse = providerMetadata.anthropic.server_tool_use;
|
|
620
|
-
if (serverToolUse && typeof serverToolUse === 'object' && 'web_search_requests' in serverToolUse && typeof serverToolUse.web_search_requests === 'number') {
|
|
621
|
-
webSearchCount = serverToolUse.web_search_requests;
|
|
622
|
-
}
|
|
623
|
-
} else {
|
|
624
|
-
// For other providers through Vercel, pass available metadata to helper
|
|
625
|
-
// Note: Vercel abstracts provider responses, so we may not have access to
|
|
626
|
-
// raw citations/annotations unless Vercel exposes them in usage/metadata
|
|
627
|
-
webSearchCount = calculateWebSearchCount({
|
|
628
|
-
usage: result.usage,
|
|
629
|
-
providerMetadata: providerMetadata
|
|
630
|
-
});
|
|
631
|
-
}
|
|
687
|
+
// V2 usage has simple numbers, V3 has objects with .total - normalize both
|
|
688
|
+
const usageObj = result.usage;
|
|
632
689
|
const usage = {
|
|
633
|
-
inputTokens: result.usage.inputTokens,
|
|
634
|
-
outputTokens: result.usage.outputTokens,
|
|
635
|
-
reasoningTokens:
|
|
636
|
-
cacheReadInputTokens:
|
|
690
|
+
inputTokens: extractTokenCount(result.usage.inputTokens),
|
|
691
|
+
outputTokens: extractTokenCount(result.usage.outputTokens),
|
|
692
|
+
reasoningTokens: extractReasoningTokens(usageObj),
|
|
693
|
+
cacheReadInputTokens: extractCacheReadTokens(usageObj),
|
|
637
694
|
webSearchCount,
|
|
638
695
|
...additionalTokenValues
|
|
639
696
|
};
|
|
640
697
|
await sendEventToPosthog({
|
|
641
698
|
client: phClient,
|
|
642
|
-
distinctId:
|
|
643
|
-
traceId:
|
|
699
|
+
distinctId: mergedOptions.posthogDistinctId,
|
|
700
|
+
traceId: mergedOptions.posthogTraceId ?? v4(),
|
|
644
701
|
model: modelId,
|
|
645
702
|
provider: provider,
|
|
646
|
-
input:
|
|
703
|
+
input: mergedOptions.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
|
|
647
704
|
output: content,
|
|
648
705
|
latency,
|
|
649
706
|
baseURL,
|
|
@@ -651,18 +708,18 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
651
708
|
httpStatus: 200,
|
|
652
709
|
usage,
|
|
653
710
|
tools: availableTools,
|
|
654
|
-
captureImmediate:
|
|
711
|
+
captureImmediate: mergedOptions.posthogCaptureImmediate
|
|
655
712
|
});
|
|
656
713
|
return result;
|
|
657
714
|
} catch (error) {
|
|
658
715
|
const modelId = model.modelId;
|
|
659
716
|
await sendEventToPosthog({
|
|
660
717
|
client: phClient,
|
|
661
|
-
distinctId:
|
|
662
|
-
traceId:
|
|
718
|
+
distinctId: mergedOptions.posthogDistinctId,
|
|
719
|
+
traceId: mergedOptions.posthogTraceId ?? v4(),
|
|
663
720
|
model: modelId,
|
|
664
721
|
provider: model.provider,
|
|
665
|
-
input:
|
|
722
|
+
input: mergedOptions.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
|
|
666
723
|
output: [],
|
|
667
724
|
latency: 0,
|
|
668
725
|
baseURL: '',
|
|
@@ -675,30 +732,23 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
675
732
|
isError: true,
|
|
676
733
|
error: truncate(JSON.stringify(error)),
|
|
677
734
|
tools: availableTools,
|
|
678
|
-
captureImmediate:
|
|
735
|
+
captureImmediate: mergedOptions.posthogCaptureImmediate
|
|
679
736
|
});
|
|
680
737
|
throw error;
|
|
681
738
|
}
|
|
682
739
|
},
|
|
683
|
-
|
|
684
|
-
doStream,
|
|
685
|
-
params
|
|
686
|
-
}) => {
|
|
740
|
+
doStream: async params => {
|
|
687
741
|
const startTime = Date.now();
|
|
688
742
|
let generatedText = '';
|
|
689
743
|
let reasoningText = '';
|
|
690
744
|
let usage = {};
|
|
691
745
|
let providerMetadata = undefined;
|
|
692
746
|
const mergedParams = {
|
|
693
|
-
...
|
|
694
|
-
...mapVercelParams(params)
|
|
695
|
-
posthogProperties: {
|
|
696
|
-
...options.posthogProperties,
|
|
697
|
-
$ai_framework: 'vercel'
|
|
698
|
-
}
|
|
747
|
+
...mergedOptions,
|
|
748
|
+
...mapVercelParams(params)
|
|
699
749
|
};
|
|
700
|
-
const modelId =
|
|
701
|
-
const provider =
|
|
750
|
+
const modelId = mergedOptions.posthogModelOverride ?? model.modelId;
|
|
751
|
+
const provider = mergedOptions.posthogProviderOverride ?? extractProvider(model);
|
|
702
752
|
const availableTools = extractAvailableToolCalls('vercel', params);
|
|
703
753
|
const baseURL = ''; // cannot currently get baseURL from vercel
|
|
704
754
|
|
|
@@ -708,15 +758,15 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
708
758
|
const {
|
|
709
759
|
stream,
|
|
710
760
|
...rest
|
|
711
|
-
} = await doStream();
|
|
761
|
+
} = await model.doStream(params);
|
|
712
762
|
const transformStream = new TransformStream({
|
|
713
763
|
transform(chunk, controller) {
|
|
714
|
-
// Handle
|
|
764
|
+
// Handle streaming patterns - compatible with both V2 and V3
|
|
715
765
|
if (chunk.type === 'text-delta') {
|
|
716
766
|
generatedText += chunk.delta;
|
|
717
767
|
}
|
|
718
768
|
if (chunk.type === 'reasoning-delta') {
|
|
719
|
-
reasoningText += chunk.delta;
|
|
769
|
+
reasoningText += chunk.delta;
|
|
720
770
|
}
|
|
721
771
|
|
|
722
772
|
// Handle tool call chunks
|
|
@@ -737,7 +787,6 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
737
787
|
}
|
|
738
788
|
if (chunk.type === 'tool-input-end') {
|
|
739
789
|
// Tool call is complete, keep it in the map for final processing
|
|
740
|
-
// Nothing specific to do here, the tool call is already complete
|
|
741
790
|
}
|
|
742
791
|
if (chunk.type === 'tool-call') {
|
|
743
792
|
// Direct tool call chunk (complete tool call)
|
|
@@ -749,14 +798,13 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
749
798
|
}
|
|
750
799
|
if (chunk.type === 'finish') {
|
|
751
800
|
providerMetadata = chunk.providerMetadata;
|
|
752
|
-
const additionalTokenValues = providerMetadata
|
|
753
|
-
|
|
754
|
-
} : {};
|
|
801
|
+
const additionalTokenValues = extractAdditionalTokenValues(providerMetadata);
|
|
802
|
+
const chunkUsage = chunk.usage || {};
|
|
755
803
|
usage = {
|
|
756
|
-
inputTokens: chunk.usage?.inputTokens,
|
|
757
|
-
outputTokens: chunk.usage?.outputTokens,
|
|
758
|
-
reasoningTokens:
|
|
759
|
-
cacheReadInputTokens:
|
|
804
|
+
inputTokens: extractTokenCount(chunk.usage?.inputTokens),
|
|
805
|
+
outputTokens: extractTokenCount(chunk.usage?.outputTokens),
|
|
806
|
+
reasoningTokens: extractReasoningTokens(chunkUsage),
|
|
807
|
+
cacheReadInputTokens: extractCacheReadTokens(chunkUsage),
|
|
760
808
|
...additionalTokenValues
|
|
761
809
|
};
|
|
762
810
|
}
|
|
@@ -798,24 +846,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
798
846
|
role: 'assistant',
|
|
799
847
|
content: content.length === 1 && content[0].type === 'text' ? content[0].text : content
|
|
800
848
|
}] : [];
|
|
801
|
-
|
|
802
|
-
// Calculate web search count based on provider
|
|
803
|
-
let webSearchCount = 0;
|
|
804
|
-
if (providerMetadata && typeof providerMetadata === 'object' && 'anthropic' in providerMetadata && providerMetadata.anthropic && typeof providerMetadata.anthropic === 'object' && 'server_tool_use' in providerMetadata.anthropic) {
|
|
805
|
-
// Anthropic-specific extraction
|
|
806
|
-
const serverToolUse = providerMetadata.anthropic.server_tool_use;
|
|
807
|
-
if (serverToolUse && typeof serverToolUse === 'object' && 'web_search_requests' in serverToolUse && typeof serverToolUse.web_search_requests === 'number') {
|
|
808
|
-
webSearchCount = serverToolUse.web_search_requests;
|
|
809
|
-
}
|
|
810
|
-
} else {
|
|
811
|
-
// For other providers through Vercel, pass available metadata to helper
|
|
812
|
-
// Note: Vercel abstracts provider responses, so we may not have access to
|
|
813
|
-
// raw citations/annotations unless Vercel exposes them in usage/metadata
|
|
814
|
-
webSearchCount = calculateWebSearchCount({
|
|
815
|
-
usage: usage,
|
|
816
|
-
providerMetadata: providerMetadata
|
|
817
|
-
});
|
|
818
|
-
}
|
|
849
|
+
const webSearchCount = extractWebSearchCount(providerMetadata, usage);
|
|
819
850
|
|
|
820
851
|
// Update usage with web search count
|
|
821
852
|
const finalUsage = {
|
|
@@ -824,11 +855,11 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
824
855
|
};
|
|
825
856
|
await sendEventToPosthog({
|
|
826
857
|
client: phClient,
|
|
827
|
-
distinctId:
|
|
828
|
-
traceId:
|
|
858
|
+
distinctId: mergedOptions.posthogDistinctId,
|
|
859
|
+
traceId: mergedOptions.posthogTraceId ?? v4(),
|
|
829
860
|
model: modelId,
|
|
830
861
|
provider: provider,
|
|
831
|
-
input:
|
|
862
|
+
input: mergedOptions.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
|
|
832
863
|
output: output,
|
|
833
864
|
latency,
|
|
834
865
|
baseURL,
|
|
@@ -836,7 +867,7 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
836
867
|
httpStatus: 200,
|
|
837
868
|
usage: finalUsage,
|
|
838
869
|
tools: availableTools,
|
|
839
|
-
captureImmediate:
|
|
870
|
+
captureImmediate: mergedOptions.posthogCaptureImmediate
|
|
840
871
|
});
|
|
841
872
|
}
|
|
842
873
|
});
|
|
@@ -847,11 +878,11 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
847
878
|
} catch (error) {
|
|
848
879
|
await sendEventToPosthog({
|
|
849
880
|
client: phClient,
|
|
850
|
-
distinctId:
|
|
851
|
-
traceId:
|
|
881
|
+
distinctId: mergedOptions.posthogDistinctId,
|
|
882
|
+
traceId: mergedOptions.posthogTraceId ?? v4(),
|
|
852
883
|
model: modelId,
|
|
853
884
|
provider: provider,
|
|
854
|
-
input:
|
|
885
|
+
input: mergedOptions.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
|
|
855
886
|
output: [],
|
|
856
887
|
latency: 0,
|
|
857
888
|
baseURL: '',
|
|
@@ -864,25 +895,12 @@ const createInstrumentationMiddleware = (phClient, model, options) => {
|
|
|
864
895
|
isError: true,
|
|
865
896
|
error: truncate(JSON.stringify(error)),
|
|
866
897
|
tools: availableTools,
|
|
867
|
-
captureImmediate:
|
|
898
|
+
captureImmediate: mergedOptions.posthogCaptureImmediate
|
|
868
899
|
});
|
|
869
900
|
throw error;
|
|
870
901
|
}
|
|
871
902
|
}
|
|
872
903
|
};
|
|
873
|
-
return middleware;
|
|
874
|
-
};
|
|
875
|
-
const wrapVercelLanguageModel = (model, phClient, options) => {
|
|
876
|
-
const traceId = options.posthogTraceId ?? v4();
|
|
877
|
-
const middleware = createInstrumentationMiddleware(phClient, model, {
|
|
878
|
-
...options,
|
|
879
|
-
posthogTraceId: traceId,
|
|
880
|
-
posthogDistinctId: options.posthogDistinctId
|
|
881
|
-
});
|
|
882
|
-
const wrappedModel = wrapLanguageModel({
|
|
883
|
-
model,
|
|
884
|
-
middleware
|
|
885
|
-
});
|
|
886
904
|
return wrappedModel;
|
|
887
905
|
};
|
|
888
906
|
|