@posthog/ai 3.3.1 → 3.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +4 -0
- package/lib/index.cjs.js +36 -8
- package/lib/index.cjs.js.map +1 -1
- package/lib/index.esm.js +36 -8
- package/lib/index.esm.js.map +1 -1
- package/package.json +1 -1
- package/src/langchain/callbacks.ts +39 -7
- package/src/utils.ts +1 -1
- package/src/vercel/middleware.ts +4 -4
package/package.json
CHANGED
|
@@ -82,7 +82,6 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
82
82
|
parentRunId?: string,
|
|
83
83
|
tags?: string[],
|
|
84
84
|
metadata?: Record<string, unknown>,
|
|
85
|
-
|
|
86
85
|
runType?: string,
|
|
87
86
|
runName?: string
|
|
88
87
|
): void {
|
|
@@ -432,10 +431,18 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
432
431
|
eventProperties['$ai_is_error'] = true
|
|
433
432
|
} else {
|
|
434
433
|
// Handle token usage
|
|
435
|
-
const [inputTokens, outputTokens] = this.parseUsage(output)
|
|
434
|
+
const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output)
|
|
436
435
|
eventProperties['$ai_input_tokens'] = inputTokens
|
|
437
436
|
eventProperties['$ai_output_tokens'] = outputTokens
|
|
438
437
|
|
|
438
|
+
// Add additional token data to properties
|
|
439
|
+
if (additionalTokenData.cacheReadInputTokens) {
|
|
440
|
+
eventProperties['$ai_cache_read_tokens'] = additionalTokenData.cacheReadInputTokens
|
|
441
|
+
}
|
|
442
|
+
if (additionalTokenData.reasoningTokens) {
|
|
443
|
+
eventProperties['$ai_reasoning_tokens'] = additionalTokenData.reasoningTokens
|
|
444
|
+
}
|
|
445
|
+
|
|
439
446
|
// Handle generations/completions
|
|
440
447
|
let completions
|
|
441
448
|
if (output.generations && Array.isArray(output.generations)) {
|
|
@@ -471,14 +478,17 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
471
478
|
}
|
|
472
479
|
}
|
|
473
480
|
|
|
474
|
-
private _getLangchainRunName(serialized: any, ...args: any
|
|
481
|
+
private _getLangchainRunName(serialized: any, ...args: any): string | undefined {
|
|
475
482
|
if (args && args.length > 0) {
|
|
476
483
|
for (const arg of args) {
|
|
477
484
|
if (arg && typeof arg === 'object' && 'name' in arg) {
|
|
478
485
|
return arg.name
|
|
486
|
+
} else if (arg && typeof arg === 'object' && 'runName' in arg) {
|
|
487
|
+
return arg.runName
|
|
479
488
|
}
|
|
480
489
|
}
|
|
481
490
|
}
|
|
491
|
+
|
|
482
492
|
if (serialized && serialized.name) {
|
|
483
493
|
return serialized.name
|
|
484
494
|
}
|
|
@@ -520,7 +530,7 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
520
530
|
return messageDict
|
|
521
531
|
}
|
|
522
532
|
|
|
523
|
-
private _parseUsageModel(usage: any): [number, number] {
|
|
533
|
+
private _parseUsageModel(usage: any): [number, number, Record<string, any>] {
|
|
524
534
|
const conversionList: Array<[string, 'input' | 'output']> = [
|
|
525
535
|
['promptTokens', 'input'],
|
|
526
536
|
['completionTokens', 'output'],
|
|
@@ -548,11 +558,32 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
548
558
|
{ input: 0, output: 0 }
|
|
549
559
|
)
|
|
550
560
|
|
|
551
|
-
|
|
561
|
+
// Extract additional token details like cached tokens and reasoning tokens
|
|
562
|
+
const additionalTokenData: Record<string, any> = {}
|
|
563
|
+
|
|
564
|
+
// Check for cached tokens in various formats
|
|
565
|
+
if (usage.prompt_tokens_details?.cached_tokens != null) {
|
|
566
|
+
additionalTokenData.cacheReadInputTokens = usage.prompt_tokens_details.cached_tokens
|
|
567
|
+
} else if (usage.input_token_details?.cache_read != null) {
|
|
568
|
+
additionalTokenData.cacheReadInputTokens = usage.input_token_details.cache_read
|
|
569
|
+
} else if (usage.cachedPromptTokens != null) {
|
|
570
|
+
additionalTokenData.cacheReadInputTokens = usage.cachedPromptTokens
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
// Check for reasoning tokens in various formats
|
|
574
|
+
if (usage.completion_tokens_details?.reasoning_tokens != null) {
|
|
575
|
+
additionalTokenData.reasoningTokens = usage.completion_tokens_details.reasoning_tokens
|
|
576
|
+
} else if (usage.output_token_details?.reasoning != null) {
|
|
577
|
+
additionalTokenData.reasoningTokens = usage.output_token_details.reasoning
|
|
578
|
+
} else if (usage.reasoningTokens != null) {
|
|
579
|
+
additionalTokenData.reasoningTokens = usage.reasoningTokens
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
return [parsedUsage.input, parsedUsage.output, additionalTokenData]
|
|
552
583
|
}
|
|
553
584
|
|
|
554
|
-
private parseUsage(response: LLMResult): [number, number] {
|
|
555
|
-
let llmUsage: [number, number] = [0, 0]
|
|
585
|
+
private parseUsage(response: LLMResult): [number, number, Record<string, any>] {
|
|
586
|
+
let llmUsage: [number, number, Record<string, any>] = [0, 0, {}]
|
|
556
587
|
const llmUsageKeys = ['token_usage', 'usage', 'tokenUsage']
|
|
557
588
|
|
|
558
589
|
if (response.llmOutput != null) {
|
|
@@ -566,6 +597,7 @@ export class LangChainCallbackHandler extends BaseCallbackHandler {
|
|
|
566
597
|
if (llmUsage[0] === 0 && llmUsage[1] === 0 && response.generations) {
|
|
567
598
|
for (const generation of response.generations) {
|
|
568
599
|
for (const genChunk of generation) {
|
|
600
|
+
// Check other paths for usage information
|
|
569
601
|
if (genChunk.generationInfo?.usage_metadata) {
|
|
570
602
|
llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata)
|
|
571
603
|
return llmUsage
|
package/src/utils.ts
CHANGED
|
@@ -167,7 +167,7 @@ export const sendEventToPosthog = ({
|
|
|
167
167
|
}
|
|
168
168
|
}
|
|
169
169
|
|
|
170
|
-
|
|
170
|
+
const additionalTokenValues = {
|
|
171
171
|
...(usage.reasoningTokens ? { $ai_reasoning_tokens: usage.reasoningTokens } : {}),
|
|
172
172
|
...(usage.cacheReadInputTokens ? { $ai_cache_read_input_tokens: usage.cacheReadInputTokens } : {}),
|
|
173
173
|
...(usage.cacheCreationInputTokens ? { $ai_cache_creation_input_tokens: usage.cacheCreationInputTokens } : {}),
|
package/src/vercel/middleware.ts
CHANGED
|
@@ -113,7 +113,7 @@ const mapVercelPrompt = (prompt: LanguageModelV1Prompt): PostHogInput[] => {
|
|
|
113
113
|
}
|
|
114
114
|
|
|
115
115
|
const mapVercelOutput = (result: any): PostHogInput[] => {
|
|
116
|
-
|
|
116
|
+
const output = {
|
|
117
117
|
...(result.text ? { text: result.text } : {}),
|
|
118
118
|
...(result.object ? { object: result.object } : {}),
|
|
119
119
|
...(result.reasoning ? { reasoning: result.reasoning } : {}),
|
|
@@ -156,10 +156,10 @@ export const createInstrumentationMiddleware = (
|
|
|
156
156
|
options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId)
|
|
157
157
|
const provider = options.posthogProviderOverride ?? extractProvider(model)
|
|
158
158
|
const baseURL = '' // cannot currently get baseURL from vercel
|
|
159
|
-
|
|
159
|
+
const content = mapVercelOutput(result)
|
|
160
160
|
// let tools = result.toolCalls
|
|
161
|
-
|
|
162
|
-
|
|
161
|
+
const providerMetadata = result.providerMetadata
|
|
162
|
+
const additionalTokenValues = {
|
|
163
163
|
...(providerMetadata?.openai?.reasoningTokens
|
|
164
164
|
? { reasoningTokens: providerMetadata.openai.reasoningTokens }
|
|
165
165
|
: {}),
|