@posthog/ai 5.2.2 → 5.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/LICENSE +245 -0
  2. package/{lib → dist}/anthropic/index.cjs +7 -12
  3. package/{lib → dist}/anthropic/index.cjs.map +1 -1
  4. package/{lib → dist}/anthropic/index.mjs +4 -5
  5. package/{lib → dist}/anthropic/index.mjs.map +1 -1
  6. package/{lib → dist}/gemini/index.cjs +1 -1
  7. package/{lib → dist}/gemini/index.cjs.map +1 -1
  8. package/{lib → dist}/gemini/index.mjs.map +1 -1
  9. package/{lib → dist}/index.cjs +547 -479
  10. package/dist/index.cjs.map +1 -0
  11. package/{lib → dist}/index.mjs +530 -456
  12. package/dist/index.mjs.map +1 -0
  13. package/{lib → dist}/langchain/index.cjs +150 -110
  14. package/dist/langchain/index.cjs.map +1 -0
  15. package/{lib → dist}/langchain/index.mjs +147 -104
  16. package/dist/langchain/index.mjs.map +1 -0
  17. package/{lib → dist}/openai/index.cjs +7 -1
  18. package/dist/openai/index.cjs.map +1 -0
  19. package/{lib → dist}/openai/index.mjs +6 -0
  20. package/dist/openai/index.mjs.map +1 -0
  21. package/{lib → dist}/vercel/index.cjs +0 -2
  22. package/{lib → dist}/vercel/index.cjs.map +1 -1
  23. package/{lib → dist}/vercel/index.mjs.map +1 -1
  24. package/package.json +42 -33
  25. package/CHANGELOG.md +0 -89
  26. package/index.ts +0 -1
  27. package/lib/index.cjs.map +0 -1
  28. package/lib/index.mjs.map +0 -1
  29. package/lib/langchain/index.cjs.map +0 -1
  30. package/lib/langchain/index.mjs.map +0 -1
  31. package/lib/openai/index.cjs.map +0 -1
  32. package/lib/openai/index.mjs.map +0 -1
  33. package/src/anthropic/index.ts +0 -211
  34. package/src/gemini/index.ts +0 -254
  35. package/src/index.ts +0 -13
  36. package/src/langchain/callbacks.ts +0 -640
  37. package/src/langchain/index.ts +0 -1
  38. package/src/openai/azure.ts +0 -481
  39. package/src/openai/index.ts +0 -498
  40. package/src/utils.ts +0 -287
  41. package/src/vercel/index.ts +0 -1
  42. package/src/vercel/middleware.ts +0 -393
  43. package/tests/callbacks.test.ts +0 -48
  44. package/tests/gemini.test.ts +0 -344
  45. package/tests/openai.test.ts +0 -403
  46. package/tsconfig.json +0 -10
  47. /package/{lib → dist}/anthropic/index.d.ts +0 -0
  48. /package/{lib → dist}/gemini/index.d.ts +0 -0
  49. /package/{lib → dist}/gemini/index.mjs +0 -0
  50. /package/{lib → dist}/index.d.ts +0 -0
  51. /package/{lib → dist}/langchain/index.d.ts +0 -0
  52. /package/{lib → dist}/openai/index.d.ts +0 -0
  53. /package/{lib → dist}/vercel/index.d.ts +0 -0
  54. /package/{lib → dist}/vercel/index.mjs +0 -0
@@ -1,640 +0,0 @@
1
- import { PostHog } from 'posthog-node'
2
- import { withPrivacyMode, getModelParams } from '../utils'
3
- import { BaseCallbackHandler } from '@langchain/core/callbacks/base'
4
- import type { Serialized } from '@langchain/core/load/serializable'
5
- import type { ChainValues } from '@langchain/core/utils/types'
6
- import type { LLMResult } from '@langchain/core/outputs'
7
- import type { AgentAction, AgentFinish } from '@langchain/core/agents'
8
- import type { DocumentInterface } from '@langchain/core/documents'
9
- import { ToolCall } from '@langchain/core/messages/tool'
10
- import { BaseMessage } from '@langchain/core/messages'
11
-
12
- interface SpanMetadata {
13
- /** Name of the trace/span (e.g. chain name) */
14
- name: string
15
- /** Timestamp (in ms) when the run started */
16
- startTime: number
17
- /** Timestamp (in ms) when the run ended (if already finished) */
18
- endTime?: number
19
- /** The input state */
20
- input?: any
21
- }
22
-
23
- interface GenerationMetadata extends SpanMetadata {
24
- /** Provider used (e.g. openai, anthropic) */
25
- provider?: string
26
- /** Model name used in the generation */
27
- model?: string
28
- /** The model parameters (temperature, max_tokens, etc.) */
29
- modelParams?: Record<string, any>
30
- /** The base URL—for example, the API base used */
31
- baseUrl?: string
32
- /** The tools used in the generation */
33
- tools?: Record<string, any>
34
- }
35
-
36
- /** A run may either be a Span or a Generation */
37
- type RunMetadata = SpanMetadata | GenerationMetadata
38
-
39
- /** Storage for run metadata */
40
- type RunMetadataStorage = { [runId: string]: RunMetadata }
41
-
42
- export class LangChainCallbackHandler extends BaseCallbackHandler {
43
- public name = 'PosthogCallbackHandler'
44
- private client: PostHog
45
- private distinctId?: string | number
46
- private traceId?: string | number
47
- private properties: Record<string, any>
48
- private privacyMode: boolean
49
- private groups: Record<string, any>
50
- private debug: boolean
51
-
52
- private runs: RunMetadataStorage = {}
53
- private parentTree: { [runId: string]: string } = {}
54
-
55
- constructor(options: {
56
- client: PostHog
57
- distinctId?: string | number
58
- traceId?: string | number
59
- properties?: Record<string, any>
60
- privacyMode?: boolean
61
- groups?: Record<string, any>
62
- debug?: boolean
63
- }) {
64
- if (!options.client) {
65
- throw new Error('PostHog client is required')
66
- }
67
- super()
68
- this.client = options.client
69
- this.distinctId = options.distinctId
70
- this.traceId = options.traceId
71
- this.properties = options.properties || {}
72
- this.privacyMode = options.privacyMode || false
73
- this.groups = options.groups || {}
74
- this.debug = options.debug || false
75
- }
76
-
77
- // ===== CALLBACK METHODS =====
78
-
79
- public handleChainStart(
80
- chain: Serialized,
81
- inputs: ChainValues,
82
- runId: string,
83
- parentRunId?: string,
84
- tags?: string[],
85
- metadata?: Record<string, unknown>,
86
- runType?: string,
87
- runName?: string
88
- ): void {
89
- this._logDebugEvent('on_chain_start', runId, parentRunId, { inputs, tags })
90
- this._setParentOfRun(runId, parentRunId)
91
- this._setTraceOrSpanMetadata(chain, inputs, runId, parentRunId, metadata, tags, runName)
92
- }
93
-
94
- public handleChainEnd(
95
- outputs: ChainValues,
96
- runId: string,
97
- parentRunId?: string,
98
- tags?: string[],
99
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
100
- kwargs?: { inputs?: Record<string, unknown> }
101
- ): void {
102
- this._logDebugEvent('on_chain_end', runId, parentRunId, { outputs, tags })
103
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, outputs)
104
- }
105
-
106
- public handleChainError(
107
- error: Error,
108
- runId: string,
109
- parentRunId?: string,
110
- tags?: string[],
111
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
112
- kwargs?: { inputs?: Record<string, unknown> }
113
- ): void {
114
- this._logDebugEvent('on_chain_error', runId, parentRunId, { error, tags })
115
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, error)
116
- }
117
-
118
- public handleChatModelStart(
119
- serialized: Serialized,
120
- messages: BaseMessage[][],
121
- runId: string,
122
- parentRunId?: string,
123
- extraParams?: Record<string, unknown>,
124
- tags?: string[],
125
- metadata?: Record<string, unknown>,
126
- runName?: string
127
- ): void {
128
- this._logDebugEvent('on_chat_model_start', runId, parentRunId, { messages, tags })
129
- this._setParentOfRun(runId, parentRunId)
130
- // Flatten the two-dimensional messages and convert each message to a plain object
131
- const input = messages.flat().map((m) => this._convertMessageToDict(m))
132
- this._setLLMMetadata(serialized, runId, input, metadata, extraParams, runName)
133
- }
134
-
135
- public handleLLMStart(
136
- serialized: Serialized,
137
- prompts: string[],
138
- runId: string,
139
- parentRunId?: string,
140
- extraParams?: Record<string, unknown>,
141
- tags?: string[],
142
- metadata?: Record<string, unknown>,
143
- runName?: string
144
- ): void {
145
- this._logDebugEvent('on_llm_start', runId, parentRunId, { prompts, tags })
146
- this._setParentOfRun(runId, parentRunId)
147
- this._setLLMMetadata(serialized, runId, prompts, metadata, extraParams, runName)
148
- }
149
-
150
- public handleLLMEnd(
151
- output: LLMResult,
152
- runId: string,
153
- parentRunId?: string,
154
- tags?: string[],
155
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
156
- extraParams?: Record<string, unknown>
157
- ): void {
158
- this._logDebugEvent('on_llm_end', runId, parentRunId, { output, tags })
159
- this._popRunAndCaptureGeneration(runId, parentRunId, output)
160
- }
161
-
162
- public handleLLMError(
163
- err: Error,
164
- runId: string,
165
- parentRunId?: string,
166
- tags?: string[],
167
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
168
- extraParams?: Record<string, unknown>
169
- ): void {
170
- this._logDebugEvent('on_llm_error', runId, parentRunId, { err, tags })
171
- this._popRunAndCaptureGeneration(runId, parentRunId, err)
172
- }
173
-
174
- public handleToolStart(
175
- tool: Serialized,
176
- input: string,
177
- runId: string,
178
- parentRunId?: string,
179
- tags?: string[],
180
- metadata?: Record<string, unknown>,
181
- runName?: string
182
- ): void {
183
- this._logDebugEvent('on_tool_start', runId, parentRunId, { input, tags })
184
- this._setParentOfRun(runId, parentRunId)
185
- this._setTraceOrSpanMetadata(tool, input, runId, parentRunId, metadata, tags, runName)
186
- }
187
-
188
- public handleToolEnd(output: any, runId: string, parentRunId?: string, tags?: string[]): void {
189
- this._logDebugEvent('on_tool_end', runId, parentRunId, { output, tags })
190
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, output)
191
- }
192
-
193
- public handleToolError(err: Error, runId: string, parentRunId?: string, tags?: string[]): void {
194
- this._logDebugEvent('on_tool_error', runId, parentRunId, { err, tags })
195
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, err)
196
- }
197
-
198
- public handleRetrieverStart(
199
- retriever: Serialized,
200
- query: string,
201
- runId: string,
202
- parentRunId?: string,
203
- tags?: string[],
204
- metadata?: Record<string, unknown>,
205
- name?: string
206
- ): void {
207
- this._logDebugEvent('on_retriever_start', runId, parentRunId, { query, tags })
208
- this._setParentOfRun(runId, parentRunId)
209
- this._setTraceOrSpanMetadata(retriever, query, runId, parentRunId, metadata, tags, name)
210
- }
211
-
212
- public handleRetrieverEnd(
213
- documents: DocumentInterface[],
214
- runId: string,
215
- parentRunId?: string,
216
- tags?: string[]
217
- ): void {
218
- this._logDebugEvent('on_retriever_end', runId, parentRunId, { documents, tags })
219
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, documents)
220
- }
221
-
222
- public handleRetrieverError(err: Error, runId: string, parentRunId?: string, tags?: string[]): void {
223
- this._logDebugEvent('on_retriever_error', runId, parentRunId, { err, tags })
224
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, err)
225
- }
226
-
227
- public handleAgentAction(action: AgentAction, runId: string, parentRunId?: string, tags?: string[]): void {
228
- this._logDebugEvent('on_agent_action', runId, parentRunId, { action, tags })
229
- this._setParentOfRun(runId, parentRunId)
230
- this._setTraceOrSpanMetadata(null, action, runId, parentRunId)
231
- }
232
-
233
- public handleAgentEnd(action: AgentFinish, runId: string, parentRunId?: string, tags?: string[]): void {
234
- this._logDebugEvent('on_agent_finish', runId, parentRunId, { action, tags })
235
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, action)
236
- }
237
-
238
- // ===== PRIVATE HELPERS =====
239
-
240
- private _setParentOfRun(runId: string, parentRunId?: string): void {
241
- if (parentRunId) {
242
- this.parentTree[runId] = parentRunId
243
- }
244
- }
245
-
246
- private _popParentOfRun(runId: string): void {
247
- delete this.parentTree[runId]
248
- }
249
-
250
- private _findRootRun(runId: string): string {
251
- let id = runId
252
- while (this.parentTree[id]) {
253
- id = this.parentTree[id]
254
- }
255
- return id
256
- }
257
-
258
- private _setTraceOrSpanMetadata(
259
- serialized: any,
260
- input: any,
261
- runId: string,
262
- parentRunId?: string,
263
- ...args: any[]
264
- ): void {
265
- // Use default names if not provided: if this is a top-level run, we mark it as a trace, otherwise as a span.
266
- const defaultName = parentRunId ? 'span' : 'trace'
267
- const runName = this._getLangchainRunName(serialized, ...args) || defaultName
268
- this.runs[runId] = {
269
- name: runName,
270
- input,
271
- startTime: Date.now(),
272
- } as SpanMetadata
273
- }
274
-
275
- private _setLLMMetadata(
276
- serialized: Serialized | null,
277
- runId: string,
278
- messages: any,
279
- metadata?: any,
280
- extraParams?: any,
281
- runName?: string
282
- ): void {
283
- const runNameFound = this._getLangchainRunName(serialized, { extraParams, runName }) || 'generation'
284
- const generation: GenerationMetadata = {
285
- name: runNameFound,
286
- input: messages,
287
- startTime: Date.now(),
288
- }
289
- if (extraParams) {
290
- generation.modelParams = getModelParams(extraParams.invocation_params)
291
- }
292
- if (metadata) {
293
- if (metadata.ls_model_name) {
294
- generation.model = metadata.ls_model_name
295
- }
296
- if (metadata.ls_provider) {
297
- generation.provider = metadata.ls_provider
298
- }
299
- }
300
- if (serialized && 'kwargs' in serialized && serialized.kwargs.openai_api_base) {
301
- generation.baseUrl = serialized.kwargs.openai_api_base
302
- }
303
- this.runs[runId] = generation
304
- }
305
-
306
- private _popRunMetadata(runId: string): RunMetadata | undefined {
307
- const endTime = Date.now()
308
- const run = this.runs[runId]
309
- if (!run) {
310
- console.warn(`No run metadata found for run ${runId}`)
311
- return undefined
312
- }
313
- run.endTime = endTime
314
- delete this.runs[runId]
315
- return run
316
- }
317
-
318
- private _getTraceId(runId: string): string {
319
- return this.traceId ? String(this.traceId) : this._findRootRun(runId)
320
- }
321
-
322
- private _getParentRunId(traceId: string, runId: string, parentRunId?: string): string | undefined {
323
- // Replace the parent-run if not found in our stored parent tree.
324
- if (parentRunId && !this.parentTree[parentRunId]) {
325
- return traceId
326
- }
327
- return parentRunId
328
- }
329
-
330
- private _popRunAndCaptureTraceOrSpan(
331
- runId: string,
332
- parentRunId: string | undefined,
333
- outputs: ChainValues | DocumentInterface[] | AgentFinish | Error | any
334
- ): void {
335
- const traceId = this._getTraceId(runId)
336
- this._popParentOfRun(runId)
337
- const run = this._popRunMetadata(runId)
338
- if (!run) {
339
- return
340
- }
341
- if ('modelParams' in run) {
342
- console.warn(`Run ${runId} is a generation, but attempted to be captured as a trace/span.`)
343
- return
344
- }
345
- const actualParentRunId = this._getParentRunId(traceId, runId, parentRunId)
346
- this._captureTraceOrSpan(traceId, runId, run as SpanMetadata, outputs, actualParentRunId)
347
- }
348
-
349
- private _captureTraceOrSpan(
350
- traceId: string,
351
- runId: string,
352
- run: SpanMetadata,
353
- outputs: ChainValues | DocumentInterface[] | AgentFinish | Error | any,
354
- parentRunId?: string
355
- ): void {
356
- const eventName = parentRunId ? '$ai_span' : '$ai_trace'
357
- const latency = run.endTime ? (run.endTime - run.startTime) / 1000 : 0
358
- const eventProperties: Record<string, any> = {
359
- $ai_trace_id: traceId,
360
- $ai_input_state: withPrivacyMode(this.client, this.privacyMode, run.input),
361
- $ai_latency: latency,
362
- $ai_span_name: run.name,
363
- $ai_span_id: runId,
364
- }
365
- if (parentRunId) {
366
- eventProperties['$ai_parent_id'] = parentRunId
367
- }
368
-
369
- Object.assign(eventProperties, this.properties)
370
- if (!this.distinctId) {
371
- eventProperties['$process_person_profile'] = false
372
- }
373
- if (outputs instanceof Error) {
374
- eventProperties['$ai_error'] = outputs.toString()
375
- eventProperties['$ai_is_error'] = true
376
- } else if (outputs !== undefined) {
377
- eventProperties['$ai_output_state'] = withPrivacyMode(this.client, this.privacyMode, outputs)
378
- }
379
- this.client.capture({
380
- distinctId: this.distinctId ? this.distinctId.toString() : runId,
381
- event: eventName,
382
- properties: eventProperties,
383
- groups: this.groups,
384
- })
385
- }
386
-
387
- private _popRunAndCaptureGeneration(
388
- runId: string,
389
- parentRunId: string | undefined,
390
- response: LLMResult | Error
391
- ): void {
392
- const traceId = this._getTraceId(runId)
393
- this._popParentOfRun(runId)
394
- const run = this._popRunMetadata(runId)
395
- if (!run || typeof run !== 'object' || !('modelParams' in run)) {
396
- console.warn(`Run ${runId} is not a generation, but attempted to be captured as such.`)
397
- return
398
- }
399
- const actualParentRunId = this._getParentRunId(traceId, runId, parentRunId)
400
- this._captureGeneration(traceId, runId, run as GenerationMetadata, response, actualParentRunId)
401
- }
402
-
403
- private _captureGeneration(
404
- traceId: string,
405
- runId: string,
406
- run: GenerationMetadata,
407
- output: LLMResult | Error,
408
- parentRunId?: string
409
- ): void {
410
- const latency = run.endTime ? (run.endTime - run.startTime) / 1000 : 0
411
- const eventProperties: Record<string, any> = {
412
- $ai_trace_id: traceId,
413
- $ai_span_id: runId,
414
- $ai_span_name: run.name,
415
- $ai_parent_id: parentRunId,
416
- $ai_provider: run.provider,
417
- $ai_model: run.model,
418
- $ai_model_parameters: run.modelParams,
419
- $ai_input: withPrivacyMode(this.client, this.privacyMode, run.input),
420
- $ai_http_status: 200,
421
- $ai_latency: latency,
422
- $ai_base_url: run.baseUrl,
423
- }
424
-
425
- if (run.tools) {
426
- eventProperties['$ai_tools'] = withPrivacyMode(this.client, this.privacyMode, run.tools)
427
- }
428
-
429
- if (output instanceof Error) {
430
- eventProperties['$ai_http_status'] = (output as any).status || 500
431
- eventProperties['$ai_error'] = output.toString()
432
- eventProperties['$ai_is_error'] = true
433
- } else {
434
- // Handle token usage
435
- const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output)
436
- eventProperties['$ai_input_tokens'] = inputTokens
437
- eventProperties['$ai_output_tokens'] = outputTokens
438
-
439
- // Add additional token data to properties
440
- if (additionalTokenData.cacheReadInputTokens) {
441
- eventProperties['$ai_cache_read_tokens'] = additionalTokenData.cacheReadInputTokens
442
- }
443
- if (additionalTokenData.reasoningTokens) {
444
- eventProperties['$ai_reasoning_tokens'] = additionalTokenData.reasoningTokens
445
- }
446
-
447
- // Handle generations/completions
448
- let completions
449
- if (output.generations && Array.isArray(output.generations)) {
450
- const lastGeneration = output.generations[output.generations.length - 1]
451
- if (Array.isArray(lastGeneration)) {
452
- completions = lastGeneration.map((gen) => {
453
- return { role: 'assistant', content: gen.text }
454
- })
455
- }
456
- }
457
-
458
- if (completions) {
459
- eventProperties['$ai_output_choices'] = withPrivacyMode(this.client, this.privacyMode, completions)
460
- }
461
- }
462
-
463
- Object.assign(eventProperties, this.properties)
464
- if (!this.distinctId) {
465
- eventProperties['$process_person_profile'] = false
466
- }
467
-
468
- this.client.capture({
469
- distinctId: this.distinctId ? this.distinctId.toString() : traceId,
470
- event: '$ai_generation',
471
- properties: eventProperties,
472
- groups: this.groups,
473
- })
474
- }
475
-
476
- private _logDebugEvent(eventName: string, runId: string, parentRunId: string | undefined, extra: any): void {
477
- if (this.debug) {
478
- console.log(`Event: ${eventName}, runId: ${runId}, parentRunId: ${parentRunId}, extra:`, extra)
479
- }
480
- }
481
-
482
- private _getLangchainRunName(serialized: any, ...args: any): string | undefined {
483
- if (args && args.length > 0) {
484
- for (const arg of args) {
485
- if (arg && typeof arg === 'object' && 'name' in arg) {
486
- return arg.name
487
- } else if (arg && typeof arg === 'object' && 'runName' in arg) {
488
- return arg.runName
489
- }
490
- }
491
- }
492
-
493
- if (serialized && serialized.name) {
494
- return serialized.name
495
- }
496
- if (serialized && serialized.id) {
497
- return Array.isArray(serialized.id) ? serialized.id[serialized.id.length - 1] : serialized.id
498
- }
499
- return undefined
500
- }
501
-
502
- private _convertLcToolCallsToOai(toolCalls: ToolCall[]): Record<string, any>[] {
503
- return toolCalls.map((toolCall: ToolCall) => ({
504
- type: 'function',
505
- id: toolCall.id,
506
- function: {
507
- name: toolCall.name,
508
- arguments: JSON.stringify(toolCall.args),
509
- },
510
- }))
511
- }
512
-
513
- private _convertMessageToDict(message: any): Record<string, any> {
514
- let messageDict: Record<string, any> = {}
515
-
516
- const messageType: string = message.getType()
517
-
518
- switch (messageType) {
519
- case 'human':
520
- messageDict = { role: 'user', content: message.content }
521
- break
522
- case 'ai':
523
- messageDict = { role: 'assistant', content: message.content }
524
-
525
- if (message.tool_calls) {
526
- messageDict.tool_calls = this._convertLcToolCallsToOai(message.tool_calls)
527
- }
528
-
529
- break
530
- case 'system':
531
- messageDict = { role: 'system', content: message.content }
532
- break
533
- case 'tool':
534
- messageDict = { role: 'tool', content: message.content }
535
- break
536
- case 'function':
537
- messageDict = { role: 'function', content: message.content }
538
- break
539
- default:
540
- messageDict = { role: messageType, content: String(message.content) }
541
- break
542
- }
543
-
544
- if (message.additional_kwargs) {
545
- messageDict = { ...messageDict, ...message.additional_kwargs }
546
- }
547
-
548
- return messageDict
549
- }
550
-
551
- private _parseUsageModel(usage: any): [number, number, Record<string, any>] {
552
- const conversionList: Array<[string, 'input' | 'output']> = [
553
- ['promptTokens', 'input'],
554
- ['completionTokens', 'output'],
555
- ['input_tokens', 'input'],
556
- ['output_tokens', 'output'],
557
- ['prompt_token_count', 'input'],
558
- ['candidates_token_count', 'output'],
559
- ['inputTokenCount', 'input'],
560
- ['outputTokenCount', 'output'],
561
- ['input_token_count', 'input'],
562
- ['generated_token_count', 'output'],
563
- ]
564
-
565
- const parsedUsage = conversionList.reduce(
566
- (acc: { input: number; output: number }, [modelKey, typeKey]) => {
567
- const value = usage[modelKey]
568
- if (value != null) {
569
- const finalCount = Array.isArray(value)
570
- ? value.reduce((sum: number, tokenCount: number) => sum + tokenCount, 0)
571
- : value
572
- acc[typeKey] = finalCount
573
- }
574
- return acc
575
- },
576
- { input: 0, output: 0 }
577
- )
578
-
579
- // Extract additional token details like cached tokens and reasoning tokens
580
- const additionalTokenData: Record<string, any> = {}
581
-
582
- // Check for cached tokens in various formats
583
- if (usage.prompt_tokens_details?.cached_tokens != null) {
584
- additionalTokenData.cacheReadInputTokens = usage.prompt_tokens_details.cached_tokens
585
- } else if (usage.input_token_details?.cache_read != null) {
586
- additionalTokenData.cacheReadInputTokens = usage.input_token_details.cache_read
587
- } else if (usage.cachedPromptTokens != null) {
588
- additionalTokenData.cacheReadInputTokens = usage.cachedPromptTokens
589
- }
590
-
591
- // Check for reasoning tokens in various formats
592
- if (usage.completion_tokens_details?.reasoning_tokens != null) {
593
- additionalTokenData.reasoningTokens = usage.completion_tokens_details.reasoning_tokens
594
- } else if (usage.output_token_details?.reasoning != null) {
595
- additionalTokenData.reasoningTokens = usage.output_token_details.reasoning
596
- } else if (usage.reasoningTokens != null) {
597
- additionalTokenData.reasoningTokens = usage.reasoningTokens
598
- }
599
-
600
- return [parsedUsage.input, parsedUsage.output, additionalTokenData]
601
- }
602
-
603
- private parseUsage(response: LLMResult): [number, number, Record<string, any>] {
604
- let llmUsage: [number, number, Record<string, any>] = [0, 0, {}]
605
- const llmUsageKeys = ['token_usage', 'usage', 'tokenUsage']
606
-
607
- if (response.llmOutput != null) {
608
- const key = llmUsageKeys.find((k) => response.llmOutput?.[k] != null)
609
- if (key) {
610
- llmUsage = this._parseUsageModel(response.llmOutput[key])
611
- }
612
- }
613
-
614
- // If top-level usage info was not found, try checking the generations.
615
- if (llmUsage[0] === 0 && llmUsage[1] === 0 && response.generations) {
616
- for (const generation of response.generations) {
617
- for (const genChunk of generation) {
618
- // Check other paths for usage information
619
- if (genChunk.generationInfo?.usage_metadata) {
620
- llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata)
621
- return llmUsage
622
- }
623
-
624
- const messageChunk = genChunk.generationInfo ?? {}
625
- const responseMetadata = messageChunk.response_metadata ?? {}
626
- const chunkUsage =
627
- responseMetadata['usage'] ??
628
- responseMetadata['amazon-bedrock-invocationMetrics'] ??
629
- messageChunk.usage_metadata
630
- if (chunkUsage) {
631
- llmUsage = this._parseUsageModel(chunkUsage)
632
- return llmUsage
633
- }
634
- }
635
- }
636
- }
637
-
638
- return llmUsage
639
- }
640
- }
@@ -1 +0,0 @@
1
- export * from './callbacks'