@posthog/ai 5.2.1 → 5.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/LICENSE +245 -0
  2. package/{lib → dist}/anthropic/index.cjs +7 -12
  3. package/{lib → dist}/anthropic/index.cjs.map +1 -1
  4. package/{lib → dist}/anthropic/index.mjs +4 -5
  5. package/{lib → dist}/anthropic/index.mjs.map +1 -1
  6. package/{lib → dist}/gemini/index.cjs +1 -1
  7. package/{lib → dist}/gemini/index.cjs.map +1 -1
  8. package/{lib → dist}/gemini/index.mjs.map +1 -1
  9. package/{lib → dist}/index.cjs +563 -482
  10. package/dist/index.cjs.map +1 -0
  11. package/{lib → dist}/index.d.ts +2 -1
  12. package/{lib → dist}/index.mjs +546 -459
  13. package/dist/index.mjs.map +1 -0
  14. package/{lib → dist}/langchain/index.cjs +166 -114
  15. package/dist/langchain/index.cjs.map +1 -0
  16. package/{lib → dist}/langchain/index.d.ts +2 -1
  17. package/{lib → dist}/langchain/index.mjs +163 -108
  18. package/dist/langchain/index.mjs.map +1 -0
  19. package/{lib → dist}/openai/index.cjs +7 -1
  20. package/dist/openai/index.cjs.map +1 -0
  21. package/{lib → dist}/openai/index.mjs +6 -0
  22. package/dist/openai/index.mjs.map +1 -0
  23. package/{lib → dist}/vercel/index.cjs +0 -2
  24. package/{lib → dist}/vercel/index.cjs.map +1 -1
  25. package/{lib → dist}/vercel/index.mjs.map +1 -1
  26. package/package.json +42 -33
  27. package/CHANGELOG.md +0 -85
  28. package/index.ts +0 -1
  29. package/lib/index.cjs.map +0 -1
  30. package/lib/index.mjs.map +0 -1
  31. package/lib/langchain/index.cjs.map +0 -1
  32. package/lib/langchain/index.mjs.map +0 -1
  33. package/lib/openai/index.cjs.map +0 -1
  34. package/lib/openai/index.mjs.map +0 -1
  35. package/src/anthropic/index.ts +0 -211
  36. package/src/gemini/index.ts +0 -254
  37. package/src/index.ts +0 -13
  38. package/src/langchain/callbacks.ts +0 -622
  39. package/src/langchain/index.ts +0 -1
  40. package/src/openai/azure.ts +0 -481
  41. package/src/openai/index.ts +0 -498
  42. package/src/utils.ts +0 -287
  43. package/src/vercel/index.ts +0 -1
  44. package/src/vercel/middleware.ts +0 -393
  45. package/tests/gemini.test.ts +0 -344
  46. package/tests/openai.test.ts +0 -403
  47. package/tsconfig.json +0 -10
  48. /package/{lib → dist}/anthropic/index.d.ts +0 -0
  49. /package/{lib → dist}/gemini/index.d.ts +0 -0
  50. /package/{lib → dist}/gemini/index.mjs +0 -0
  51. /package/{lib → dist}/openai/index.d.ts +0 -0
  52. /package/{lib → dist}/vercel/index.d.ts +0 -0
  53. /package/{lib → dist}/vercel/index.mjs +0 -0
@@ -1,622 +0,0 @@
1
- import { PostHog } from 'posthog-node'
2
- import { withPrivacyMode, getModelParams } from '../utils'
3
- import { BaseCallbackHandler } from '@langchain/core/callbacks/base'
4
- import type { Serialized } from '@langchain/core/load/serializable'
5
- import type { ChainValues } from '@langchain/core/utils/types'
6
- import type { BaseMessage } from '@langchain/core/messages'
7
- import type { LLMResult } from '@langchain/core/outputs'
8
- import type { AgentAction, AgentFinish } from '@langchain/core/agents'
9
- import type { DocumentInterface } from '@langchain/core/documents'
10
-
11
- interface SpanMetadata {
12
- /** Name of the trace/span (e.g. chain name) */
13
- name: string
14
- /** Timestamp (in ms) when the run started */
15
- startTime: number
16
- /** Timestamp (in ms) when the run ended (if already finished) */
17
- endTime?: number
18
- /** The input state */
19
- input?: any
20
- }
21
-
22
- interface GenerationMetadata extends SpanMetadata {
23
- /** Provider used (e.g. openai, anthropic) */
24
- provider?: string
25
- /** Model name used in the generation */
26
- model?: string
27
- /** The model parameters (temperature, max_tokens, etc.) */
28
- modelParams?: Record<string, any>
29
- /** The base URL—for example, the API base used */
30
- baseUrl?: string
31
- /** The tools used in the generation */
32
- tools?: Record<string, any>
33
- }
34
-
35
- /** A run may either be a Span or a Generation */
36
- type RunMetadata = SpanMetadata | GenerationMetadata
37
-
38
- /** Storage for run metadata */
39
- type RunMetadataStorage = { [runId: string]: RunMetadata }
40
-
41
- export class LangChainCallbackHandler extends BaseCallbackHandler {
42
- public name = 'PosthogCallbackHandler'
43
- private client: PostHog
44
- private distinctId?: string | number
45
- private traceId?: string | number
46
- private properties: Record<string, any>
47
- private privacyMode: boolean
48
- private groups: Record<string, any>
49
- private debug: boolean
50
-
51
- private runs: RunMetadataStorage = {}
52
- private parentTree: { [runId: string]: string } = {}
53
-
54
- constructor(options: {
55
- client: PostHog
56
- distinctId?: string | number
57
- traceId?: string | number
58
- properties?: Record<string, any>
59
- privacyMode?: boolean
60
- groups?: Record<string, any>
61
- debug?: boolean
62
- }) {
63
- if (!options.client) {
64
- throw new Error('PostHog client is required')
65
- }
66
- super()
67
- this.client = options.client
68
- this.distinctId = options.distinctId
69
- this.traceId = options.traceId
70
- this.properties = options.properties || {}
71
- this.privacyMode = options.privacyMode || false
72
- this.groups = options.groups || {}
73
- this.debug = options.debug || false
74
- }
75
-
76
- // ===== CALLBACK METHODS =====
77
-
78
- public handleChainStart(
79
- chain: Serialized,
80
- inputs: ChainValues,
81
- runId: string,
82
- parentRunId?: string,
83
- tags?: string[],
84
- metadata?: Record<string, unknown>,
85
- runType?: string,
86
- runName?: string
87
- ): void {
88
- this._logDebugEvent('on_chain_start', runId, parentRunId, { inputs, tags })
89
- this._setParentOfRun(runId, parentRunId)
90
- this._setTraceOrSpanMetadata(chain, inputs, runId, parentRunId, metadata, tags, runName)
91
- }
92
-
93
- public handleChainEnd(
94
- outputs: ChainValues,
95
- runId: string,
96
- parentRunId?: string,
97
- tags?: string[],
98
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
99
- kwargs?: { inputs?: Record<string, unknown> }
100
- ): void {
101
- this._logDebugEvent('on_chain_end', runId, parentRunId, { outputs, tags })
102
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, outputs)
103
- }
104
-
105
- public handleChainError(
106
- error: Error,
107
- runId: string,
108
- parentRunId?: string,
109
- tags?: string[],
110
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
111
- kwargs?: { inputs?: Record<string, unknown> }
112
- ): void {
113
- this._logDebugEvent('on_chain_error', runId, parentRunId, { error, tags })
114
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, error)
115
- }
116
-
117
- public handleChatModelStart(
118
- serialized: Serialized,
119
- messages: BaseMessage[][],
120
- runId: string,
121
- parentRunId?: string,
122
- extraParams?: Record<string, unknown>,
123
- tags?: string[],
124
- metadata?: Record<string, unknown>,
125
- runName?: string
126
- ): void {
127
- this._logDebugEvent('on_chat_model_start', runId, parentRunId, { messages, tags })
128
- this._setParentOfRun(runId, parentRunId)
129
- // Flatten the two-dimensional messages and convert each message to a plain object
130
- const input = messages.flat().map((m) => this._convertMessageToDict(m))
131
- this._setLLMMetadata(serialized, runId, input, metadata, extraParams, runName)
132
- }
133
-
134
- public handleLLMStart(
135
- serialized: Serialized,
136
- prompts: string[],
137
- runId: string,
138
- parentRunId?: string,
139
- extraParams?: Record<string, unknown>,
140
- tags?: string[],
141
- metadata?: Record<string, unknown>,
142
- runName?: string
143
- ): void {
144
- this._logDebugEvent('on_llm_start', runId, parentRunId, { prompts, tags })
145
- this._setParentOfRun(runId, parentRunId)
146
- this._setLLMMetadata(serialized, runId, prompts, metadata, extraParams, runName)
147
- }
148
-
149
- public handleLLMEnd(
150
- output: LLMResult,
151
- runId: string,
152
- parentRunId?: string,
153
- tags?: string[],
154
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
155
- extraParams?: Record<string, unknown>
156
- ): void {
157
- this._logDebugEvent('on_llm_end', runId, parentRunId, { output, tags })
158
- this._popRunAndCaptureGeneration(runId, parentRunId, output)
159
- }
160
-
161
- public handleLLMError(
162
- err: Error,
163
- runId: string,
164
- parentRunId?: string,
165
- tags?: string[],
166
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
167
- extraParams?: Record<string, unknown>
168
- ): void {
169
- this._logDebugEvent('on_llm_error', runId, parentRunId, { err, tags })
170
- this._popRunAndCaptureGeneration(runId, parentRunId, err)
171
- }
172
-
173
- public handleToolStart(
174
- tool: Serialized,
175
- input: string,
176
- runId: string,
177
- parentRunId?: string,
178
- tags?: string[],
179
- metadata?: Record<string, unknown>,
180
- runName?: string
181
- ): void {
182
- this._logDebugEvent('on_tool_start', runId, parentRunId, { input, tags })
183
- this._setParentOfRun(runId, parentRunId)
184
- this._setTraceOrSpanMetadata(tool, input, runId, parentRunId, metadata, tags, runName)
185
- }
186
-
187
- public handleToolEnd(output: any, runId: string, parentRunId?: string, tags?: string[]): void {
188
- this._logDebugEvent('on_tool_end', runId, parentRunId, { output, tags })
189
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, output)
190
- }
191
-
192
- public handleToolError(err: Error, runId: string, parentRunId?: string, tags?: string[]): void {
193
- this._logDebugEvent('on_tool_error', runId, parentRunId, { err, tags })
194
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, err)
195
- }
196
-
197
- public handleRetrieverStart(
198
- retriever: Serialized,
199
- query: string,
200
- runId: string,
201
- parentRunId?: string,
202
- tags?: string[],
203
- metadata?: Record<string, unknown>,
204
- name?: string
205
- ): void {
206
- this._logDebugEvent('on_retriever_start', runId, parentRunId, { query, tags })
207
- this._setParentOfRun(runId, parentRunId)
208
- this._setTraceOrSpanMetadata(retriever, query, runId, parentRunId, metadata, tags, name)
209
- }
210
-
211
- public handleRetrieverEnd(
212
- documents: DocumentInterface[],
213
- runId: string,
214
- parentRunId?: string,
215
- tags?: string[]
216
- ): void {
217
- this._logDebugEvent('on_retriever_end', runId, parentRunId, { documents, tags })
218
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, documents)
219
- }
220
-
221
- public handleRetrieverError(err: Error, runId: string, parentRunId?: string, tags?: string[]): void {
222
- this._logDebugEvent('on_retriever_error', runId, parentRunId, { err, tags })
223
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, err)
224
- }
225
-
226
- public handleAgentAction(action: AgentAction, runId: string, parentRunId?: string, tags?: string[]): void {
227
- this._logDebugEvent('on_agent_action', runId, parentRunId, { action, tags })
228
- this._setParentOfRun(runId, parentRunId)
229
- this._setTraceOrSpanMetadata(null, action, runId, parentRunId)
230
- }
231
-
232
- public handleAgentEnd(action: AgentFinish, runId: string, parentRunId?: string, tags?: string[]): void {
233
- this._logDebugEvent('on_agent_finish', runId, parentRunId, { action, tags })
234
- this._popRunAndCaptureTraceOrSpan(runId, parentRunId, action)
235
- }
236
-
237
- // ===== PRIVATE HELPERS =====
238
-
239
- private _setParentOfRun(runId: string, parentRunId?: string): void {
240
- if (parentRunId) {
241
- this.parentTree[runId] = parentRunId
242
- }
243
- }
244
-
245
- private _popParentOfRun(runId: string): void {
246
- delete this.parentTree[runId]
247
- }
248
-
249
- private _findRootRun(runId: string): string {
250
- let id = runId
251
- while (this.parentTree[id]) {
252
- id = this.parentTree[id]
253
- }
254
- return id
255
- }
256
-
257
- private _setTraceOrSpanMetadata(
258
- serialized: any,
259
- input: any,
260
- runId: string,
261
- parentRunId?: string,
262
- ...args: any[]
263
- ): void {
264
- // Use default names if not provided: if this is a top-level run, we mark it as a trace, otherwise as a span.
265
- const defaultName = parentRunId ? 'span' : 'trace'
266
- const runName = this._getLangchainRunName(serialized, ...args) || defaultName
267
- this.runs[runId] = {
268
- name: runName,
269
- input,
270
- startTime: Date.now(),
271
- } as SpanMetadata
272
- }
273
-
274
- private _setLLMMetadata(
275
- serialized: Serialized | null,
276
- runId: string,
277
- messages: any,
278
- metadata?: any,
279
- extraParams?: any,
280
- runName?: string
281
- ): void {
282
- const runNameFound = this._getLangchainRunName(serialized, { extraParams, runName }) || 'generation'
283
- const generation: GenerationMetadata = {
284
- name: runNameFound,
285
- input: messages,
286
- startTime: Date.now(),
287
- }
288
- if (extraParams) {
289
- generation.modelParams = getModelParams(extraParams.invocation_params)
290
- }
291
- if (metadata) {
292
- if (metadata.ls_model_name) {
293
- generation.model = metadata.ls_model_name
294
- }
295
- if (metadata.ls_provider) {
296
- generation.provider = metadata.ls_provider
297
- }
298
- }
299
- if (serialized && 'kwargs' in serialized && serialized.kwargs.openai_api_base) {
300
- generation.baseUrl = serialized.kwargs.openai_api_base
301
- }
302
- this.runs[runId] = generation
303
- }
304
-
305
- private _popRunMetadata(runId: string): RunMetadata | undefined {
306
- const endTime = Date.now()
307
- const run = this.runs[runId]
308
- if (!run) {
309
- console.warn(`No run metadata found for run ${runId}`)
310
- return undefined
311
- }
312
- run.endTime = endTime
313
- delete this.runs[runId]
314
- return run
315
- }
316
-
317
- private _getTraceId(runId: string): string {
318
- return this.traceId ? String(this.traceId) : this._findRootRun(runId)
319
- }
320
-
321
- private _getParentRunId(traceId: string, runId: string, parentRunId?: string): string | undefined {
322
- // Replace the parent-run if not found in our stored parent tree.
323
- if (parentRunId && !this.parentTree[parentRunId]) {
324
- return traceId
325
- }
326
- return parentRunId
327
- }
328
-
329
- private _popRunAndCaptureTraceOrSpan(
330
- runId: string,
331
- parentRunId: string | undefined,
332
- outputs: ChainValues | DocumentInterface[] | AgentFinish | Error | any
333
- ): void {
334
- const traceId = this._getTraceId(runId)
335
- this._popParentOfRun(runId)
336
- const run = this._popRunMetadata(runId)
337
- if (!run) {
338
- return
339
- }
340
- if ('modelParams' in run) {
341
- console.warn(`Run ${runId} is a generation, but attempted to be captured as a trace/span.`)
342
- return
343
- }
344
- const actualParentRunId = this._getParentRunId(traceId, runId, parentRunId)
345
- this._captureTraceOrSpan(traceId, runId, run as SpanMetadata, outputs, actualParentRunId)
346
- }
347
-
348
- private _captureTraceOrSpan(
349
- traceId: string,
350
- runId: string,
351
- run: SpanMetadata,
352
- outputs: ChainValues | DocumentInterface[] | AgentFinish | Error | any,
353
- parentRunId?: string
354
- ): void {
355
- const eventName = parentRunId ? '$ai_span' : '$ai_trace'
356
- const latency = run.endTime ? (run.endTime - run.startTime) / 1000 : 0
357
- const eventProperties: Record<string, any> = {
358
- $ai_trace_id: traceId,
359
- $ai_input_state: withPrivacyMode(this.client, this.privacyMode, run.input),
360
- $ai_latency: latency,
361
- $ai_span_name: run.name,
362
- $ai_span_id: runId,
363
- }
364
- if (parentRunId) {
365
- eventProperties['$ai_parent_id'] = parentRunId
366
- }
367
-
368
- Object.assign(eventProperties, this.properties)
369
- if (!this.distinctId) {
370
- eventProperties['$process_person_profile'] = false
371
- }
372
- if (outputs instanceof Error) {
373
- eventProperties['$ai_error'] = outputs.toString()
374
- eventProperties['$ai_is_error'] = true
375
- } else if (outputs !== undefined) {
376
- eventProperties['$ai_output_state'] = withPrivacyMode(this.client, this.privacyMode, outputs)
377
- }
378
- this.client.capture({
379
- distinctId: this.distinctId ? this.distinctId.toString() : runId,
380
- event: eventName,
381
- properties: eventProperties,
382
- groups: this.groups,
383
- })
384
- }
385
-
386
- private _popRunAndCaptureGeneration(
387
- runId: string,
388
- parentRunId: string | undefined,
389
- response: LLMResult | Error
390
- ): void {
391
- const traceId = this._getTraceId(runId)
392
- this._popParentOfRun(runId)
393
- const run = this._popRunMetadata(runId)
394
- if (!run || typeof run !== 'object' || !('modelParams' in run)) {
395
- console.warn(`Run ${runId} is not a generation, but attempted to be captured as such.`)
396
- return
397
- }
398
- const actualParentRunId = this._getParentRunId(traceId, runId, parentRunId)
399
- this._captureGeneration(traceId, runId, run as GenerationMetadata, response, actualParentRunId)
400
- }
401
-
402
- private _captureGeneration(
403
- traceId: string,
404
- runId: string,
405
- run: GenerationMetadata,
406
- output: LLMResult | Error,
407
- parentRunId?: string
408
- ): void {
409
- const latency = run.endTime ? (run.endTime - run.startTime) / 1000 : 0
410
- const eventProperties: Record<string, any> = {
411
- $ai_trace_id: traceId,
412
- $ai_span_id: runId,
413
- $ai_span_name: run.name,
414
- $ai_parent_id: parentRunId,
415
- $ai_provider: run.provider,
416
- $ai_model: run.model,
417
- $ai_model_parameters: run.modelParams,
418
- $ai_input: withPrivacyMode(this.client, this.privacyMode, run.input),
419
- $ai_http_status: 200,
420
- $ai_latency: latency,
421
- $ai_base_url: run.baseUrl,
422
- }
423
-
424
- if (run.tools) {
425
- eventProperties['$ai_tools'] = withPrivacyMode(this.client, this.privacyMode, run.tools)
426
- }
427
-
428
- if (output instanceof Error) {
429
- eventProperties['$ai_http_status'] = (output as any).status || 500
430
- eventProperties['$ai_error'] = output.toString()
431
- eventProperties['$ai_is_error'] = true
432
- } else {
433
- // Handle token usage
434
- const [inputTokens, outputTokens, additionalTokenData] = this.parseUsage(output)
435
- eventProperties['$ai_input_tokens'] = inputTokens
436
- eventProperties['$ai_output_tokens'] = outputTokens
437
-
438
- // Add additional token data to properties
439
- if (additionalTokenData.cacheReadInputTokens) {
440
- eventProperties['$ai_cache_read_tokens'] = additionalTokenData.cacheReadInputTokens
441
- }
442
- if (additionalTokenData.reasoningTokens) {
443
- eventProperties['$ai_reasoning_tokens'] = additionalTokenData.reasoningTokens
444
- }
445
-
446
- // Handle generations/completions
447
- let completions
448
- if (output.generations && Array.isArray(output.generations)) {
449
- const lastGeneration = output.generations[output.generations.length - 1]
450
- if (Array.isArray(lastGeneration)) {
451
- completions = lastGeneration.map((gen) => {
452
- return { role: 'assistant', content: gen.text }
453
- })
454
- }
455
- }
456
-
457
- if (completions) {
458
- eventProperties['$ai_output_choices'] = withPrivacyMode(this.client, this.privacyMode, completions)
459
- }
460
- }
461
-
462
- Object.assign(eventProperties, this.properties)
463
- if (!this.distinctId) {
464
- eventProperties['$process_person_profile'] = false
465
- }
466
-
467
- this.client.capture({
468
- distinctId: this.distinctId ? this.distinctId.toString() : traceId,
469
- event: '$ai_generation',
470
- properties: eventProperties,
471
- groups: this.groups,
472
- })
473
- }
474
-
475
- private _logDebugEvent(eventName: string, runId: string, parentRunId: string | undefined, extra: any): void {
476
- if (this.debug) {
477
- console.log(`Event: ${eventName}, runId: ${runId}, parentRunId: ${parentRunId}, extra:`, extra)
478
- }
479
- }
480
-
481
- private _getLangchainRunName(serialized: any, ...args: any): string | undefined {
482
- if (args && args.length > 0) {
483
- for (const arg of args) {
484
- if (arg && typeof arg === 'object' && 'name' in arg) {
485
- return arg.name
486
- } else if (arg && typeof arg === 'object' && 'runName' in arg) {
487
- return arg.runName
488
- }
489
- }
490
- }
491
-
492
- if (serialized && serialized.name) {
493
- return serialized.name
494
- }
495
- if (serialized && serialized.id) {
496
- return Array.isArray(serialized.id) ? serialized.id[serialized.id.length - 1] : serialized.id
497
- }
498
- return undefined
499
- }
500
-
501
- private _convertMessageToDict(message: any): Record<string, any> {
502
- let messageDict: Record<string, any> = {}
503
-
504
- // Check the _getType() method or type property instead of instanceof
505
- const messageType = message._getType?.() || message.type
506
-
507
- switch (messageType) {
508
- case 'human':
509
- messageDict = { role: 'user', content: message.content }
510
- break
511
- case 'ai':
512
- messageDict = { role: 'assistant', content: message.content }
513
- break
514
- case 'system':
515
- messageDict = { role: 'system', content: message.content }
516
- break
517
- case 'tool':
518
- messageDict = { role: 'tool', content: message.content }
519
- break
520
- case 'function':
521
- messageDict = { role: 'function', content: message.content }
522
- break
523
- default:
524
- messageDict = { role: messageType || 'unknown', content: String(message.content) }
525
- }
526
-
527
- if (message.additional_kwargs) {
528
- messageDict = { ...messageDict, ...message.additional_kwargs }
529
- }
530
- return messageDict
531
- }
532
-
533
- private _parseUsageModel(usage: any): [number, number, Record<string, any>] {
534
- const conversionList: Array<[string, 'input' | 'output']> = [
535
- ['promptTokens', 'input'],
536
- ['completionTokens', 'output'],
537
- ['input_tokens', 'input'],
538
- ['output_tokens', 'output'],
539
- ['prompt_token_count', 'input'],
540
- ['candidates_token_count', 'output'],
541
- ['inputTokenCount', 'input'],
542
- ['outputTokenCount', 'output'],
543
- ['input_token_count', 'input'],
544
- ['generated_token_count', 'output'],
545
- ]
546
-
547
- const parsedUsage = conversionList.reduce(
548
- (acc: { input: number; output: number }, [modelKey, typeKey]) => {
549
- const value = usage[modelKey]
550
- if (value != null) {
551
- const finalCount = Array.isArray(value)
552
- ? value.reduce((sum: number, tokenCount: number) => sum + tokenCount, 0)
553
- : value
554
- acc[typeKey] = finalCount
555
- }
556
- return acc
557
- },
558
- { input: 0, output: 0 }
559
- )
560
-
561
- // Extract additional token details like cached tokens and reasoning tokens
562
- const additionalTokenData: Record<string, any> = {}
563
-
564
- // Check for cached tokens in various formats
565
- if (usage.prompt_tokens_details?.cached_tokens != null) {
566
- additionalTokenData.cacheReadInputTokens = usage.prompt_tokens_details.cached_tokens
567
- } else if (usage.input_token_details?.cache_read != null) {
568
- additionalTokenData.cacheReadInputTokens = usage.input_token_details.cache_read
569
- } else if (usage.cachedPromptTokens != null) {
570
- additionalTokenData.cacheReadInputTokens = usage.cachedPromptTokens
571
- }
572
-
573
- // Check for reasoning tokens in various formats
574
- if (usage.completion_tokens_details?.reasoning_tokens != null) {
575
- additionalTokenData.reasoningTokens = usage.completion_tokens_details.reasoning_tokens
576
- } else if (usage.output_token_details?.reasoning != null) {
577
- additionalTokenData.reasoningTokens = usage.output_token_details.reasoning
578
- } else if (usage.reasoningTokens != null) {
579
- additionalTokenData.reasoningTokens = usage.reasoningTokens
580
- }
581
-
582
- return [parsedUsage.input, parsedUsage.output, additionalTokenData]
583
- }
584
-
585
- private parseUsage(response: LLMResult): [number, number, Record<string, any>] {
586
- let llmUsage: [number, number, Record<string, any>] = [0, 0, {}]
587
- const llmUsageKeys = ['token_usage', 'usage', 'tokenUsage']
588
-
589
- if (response.llmOutput != null) {
590
- const key = llmUsageKeys.find((k) => response.llmOutput?.[k] != null)
591
- if (key) {
592
- llmUsage = this._parseUsageModel(response.llmOutput[key])
593
- }
594
- }
595
-
596
- // If top-level usage info was not found, try checking the generations.
597
- if (llmUsage[0] === 0 && llmUsage[1] === 0 && response.generations) {
598
- for (const generation of response.generations) {
599
- for (const genChunk of generation) {
600
- // Check other paths for usage information
601
- if (genChunk.generationInfo?.usage_metadata) {
602
- llmUsage = this._parseUsageModel(genChunk.generationInfo.usage_metadata)
603
- return llmUsage
604
- }
605
-
606
- const messageChunk = genChunk.generationInfo ?? {}
607
- const responseMetadata = messageChunk.response_metadata ?? {}
608
- const chunkUsage =
609
- responseMetadata['usage'] ??
610
- responseMetadata['amazon-bedrock-invocationMetrics'] ??
611
- messageChunk.usage_metadata
612
- if (chunkUsage) {
613
- llmUsage = this._parseUsageModel(chunkUsage)
614
- return llmUsage
615
- }
616
- }
617
- }
618
- }
619
-
620
- return llmUsage
621
- }
622
- }
@@ -1 +0,0 @@
1
- export * from './callbacks'