openlayer 0.16.1 → 0.16.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. package/CHANGELOG.md +29 -0
  2. package/_shims/bun-runtime.js +1 -2
  3. package/_shims/bun-runtime.js.map +1 -1
  4. package/_shims/node-runtime.js +18 -9
  5. package/_shims/node-runtime.js.map +1 -1
  6. package/_shims/node-runtime.mjs.map +1 -1
  7. package/_shims/registry.js +2 -2
  8. package/_shims/registry.js.map +1 -1
  9. package/_shims/registry.mjs.map +1 -1
  10. package/_shims/web-runtime.js +1 -2
  11. package/_shims/web-runtime.js.map +1 -1
  12. package/_shims/web-runtime.mjs.map +1 -1
  13. package/core.d.ts +2 -4
  14. package/core.d.ts.map +1 -1
  15. package/core.js +6 -6
  16. package/core.js.map +1 -1
  17. package/core.mjs +1 -1
  18. package/core.mjs.map +1 -1
  19. package/error.js.map +1 -1
  20. package/error.mjs.map +1 -1
  21. package/index.js +17 -7
  22. package/index.js.map +1 -1
  23. package/index.mjs.map +1 -1
  24. package/internal/qs/stringify.js +1 -2
  25. package/internal/qs/stringify.js.map +1 -1
  26. package/internal/qs/stringify.mjs.map +1 -1
  27. package/internal/qs/utils.js +9 -9
  28. package/internal/qs/utils.js.map +1 -1
  29. package/internal/qs/utils.mjs.map +1 -1
  30. package/lib/core/cli.d.ts.map +1 -1
  31. package/lib/core/cli.js +17 -8
  32. package/lib/core/cli.js.map +1 -1
  33. package/lib/core/cli.mjs +0 -1
  34. package/lib/core/cli.mjs.map +1 -1
  35. package/lib/core/openai-monitor.d.ts.map +1 -1
  36. package/lib/core/openai-monitor.js +1 -1
  37. package/lib/core/openai-monitor.js.map +1 -1
  38. package/lib/core/openai-monitor.mjs +1 -1
  39. package/lib/core/openai-monitor.mjs.map +1 -1
  40. package/lib/integrations/bedrockAgentTracer.js +49 -50
  41. package/lib/integrations/bedrockAgentTracer.js.map +1 -1
  42. package/lib/integrations/bedrockAgentTracer.mjs +48 -48
  43. package/lib/integrations/bedrockAgentTracer.mjs.map +1 -1
  44. package/lib/integrations/index.d.ts +5 -0
  45. package/lib/integrations/index.d.ts.map +1 -0
  46. package/lib/integrations/index.js +21 -0
  47. package/lib/integrations/index.js.map +1 -0
  48. package/lib/integrations/index.mjs +5 -0
  49. package/lib/integrations/index.mjs.map +1 -0
  50. package/lib/integrations/langchainCallback.d.ts +83 -27
  51. package/lib/integrations/langchainCallback.d.ts.map +1 -1
  52. package/lib/integrations/langchainCallback.js +597 -88
  53. package/lib/integrations/langchainCallback.js.map +1 -1
  54. package/lib/integrations/langchainCallback.mjs +599 -87
  55. package/lib/integrations/langchainCallback.mjs.map +1 -1
  56. package/lib/integrations/openAiTracer.js +1 -2
  57. package/lib/integrations/openAiTracer.js.map +1 -1
  58. package/lib/integrations/openAiTracer.mjs.map +1 -1
  59. package/lib/integrations/tracedTool.d.ts +66 -0
  60. package/lib/integrations/tracedTool.d.ts.map +1 -0
  61. package/lib/integrations/tracedTool.js +186 -0
  62. package/lib/integrations/tracedTool.js.map +1 -0
  63. package/lib/integrations/tracedTool.mjs +149 -0
  64. package/lib/integrations/tracedTool.mjs.map +1 -0
  65. package/lib/tracing/steps.d.ts +120 -1
  66. package/lib/tracing/steps.d.ts.map +1 -1
  67. package/lib/tracing/steps.js +128 -4
  68. package/lib/tracing/steps.js.map +1 -1
  69. package/lib/tracing/steps.mjs +119 -1
  70. package/lib/tracing/steps.mjs.map +1 -1
  71. package/lib/tracing/tracer.d.ts +69 -1
  72. package/lib/tracing/tracer.d.ts.map +1 -1
  73. package/lib/tracing/tracer.js +92 -18
  74. package/lib/tracing/tracer.js.map +1 -1
  75. package/lib/tracing/tracer.mjs +78 -10
  76. package/lib/tracing/tracer.mjs.map +1 -1
  77. package/package.json +3 -1
  78. package/resources/commits/commits.js +17 -7
  79. package/resources/commits/commits.js.map +1 -1
  80. package/resources/commits/test-results.js.map +1 -1
  81. package/resources/commits/test-results.mjs.map +1 -1
  82. package/resources/inference-pipelines/inference-pipelines.js +17 -7
  83. package/resources/inference-pipelines/inference-pipelines.js.map +1 -1
  84. package/resources/inference-pipelines/inference-pipelines.mjs.map +1 -1
  85. package/resources/inference-pipelines/test-results.js.map +1 -1
  86. package/resources/inference-pipelines/test-results.mjs.map +1 -1
  87. package/resources/projects/commits.js.map +1 -1
  88. package/resources/projects/commits.mjs.map +1 -1
  89. package/resources/projects/inference-pipelines.js.map +1 -1
  90. package/resources/projects/inference-pipelines.mjs.map +1 -1
  91. package/resources/projects/projects.js +17 -7
  92. package/resources/projects/projects.js.map +1 -1
  93. package/resources/projects/projects.mjs.map +1 -1
  94. package/resources/projects/tests.js.map +1 -1
  95. package/resources/projects/tests.mjs.map +1 -1
  96. package/resources/storage/storage.js +17 -7
  97. package/resources/storage/storage.js.map +1 -1
  98. package/shims/node.d.ts +0 -4
  99. package/shims/node.d.ts.map +1 -1
  100. package/shims/node.js +0 -25
  101. package/shims/node.js.map +1 -1
  102. package/shims/node.mjs +0 -1
  103. package/shims/node.mjs.map +1 -1
  104. package/shims/web.js +0 -25
  105. package/shims/web.js.map +1 -1
  106. package/shims/web.mjs +0 -1
  107. package/shims/web.mjs.map +1 -1
  108. package/src/core.ts +1 -1
  109. package/src/lib/core/cli.ts +0 -1
  110. package/src/lib/core/openai-monitor.ts +1 -1
  111. package/src/lib/integrations/index.ts +4 -0
  112. package/src/lib/integrations/langchainCallback.ts +802 -88
  113. package/src/lib/integrations/tracedTool.ts +175 -0
  114. package/src/lib/tracing/index.d.ts +10 -0
  115. package/src/lib/tracing/steps.ts +218 -5
  116. package/src/lib/tracing/tracer.ts +153 -9
  117. package/src/uploads.ts +3 -2
  118. package/src/version.ts +1 -1
  119. package/uploads.d.ts +2 -2
  120. package/uploads.d.ts.map +1 -1
  121. package/uploads.js +3 -4
  122. package/uploads.js.map +1 -1
  123. package/uploads.mjs +1 -2
  124. package/uploads.mjs.map +1 -1
  125. package/version.d.ts +1 -1
  126. package/version.js +1 -1
  127. package/version.mjs +1 -1
  128. package/lib/tracing/enums.d.ts +0 -5
  129. package/lib/tracing/enums.d.ts.map +0 -1
  130. package/lib/tracing/enums.js +0 -9
  131. package/lib/tracing/enums.js.map +0 -1
  132. package/lib/tracing/enums.mjs +0 -6
  133. package/lib/tracing/enums.mjs.map +0 -1
  134. package/src/lib/tracing/enums.ts +0 -4
@@ -1,144 +1,858 @@
1
+ import type { AgentAction, AgentFinish } from '@langchain/core/agents';
1
2
  import { BaseCallbackHandler } from '@langchain/core/callbacks/base';
2
- import { LLMResult } from '@langchain/core/dist/outputs';
3
+ import type { Document } from '@langchain/core/documents';
3
4
  import type { Serialized } from '@langchain/core/load/serializable';
4
- import { AIMessage, BaseMessage, SystemMessage } from '@langchain/core/messages';
5
- import performanceNow from 'performance-now';
6
- import { addChatCompletionStepToTrace } from '../tracing/tracer';
5
+ import {
6
+ AIMessage,
7
+ AIMessageChunk,
8
+ BaseMessage,
9
+ ChatMessage,
10
+ FunctionMessage,
11
+ HumanMessage,
12
+ SystemMessage,
13
+ ToolMessage,
14
+ type UsageMetadata,
15
+ type BaseMessageFields,
16
+ type MessageContent,
17
+ } from '@langchain/core/messages';
18
+ import type { Generation, LLMResult } from '@langchain/core/outputs';
19
+ import type { ChainValues } from '@langchain/core/utils/types';
20
+ import {
21
+ addChatCompletionStepToTrace,
22
+ addChainStepToTrace,
23
+ addAgentStepToTrace,
24
+ addToolStepToTrace,
25
+ addRetrieverStepToTrace,
26
+ } from '../tracing/tracer';
7
27
 
8
28
  const LANGCHAIN_TO_OPENLAYER_PROVIDER_MAP: Record<string, string> = {
9
29
  openai: 'OpenAI',
10
30
  'openai-chat': 'OpenAI',
11
31
  'chat-ollama': 'Ollama',
12
32
  vertexai: 'Google',
33
+ anthropic: 'Anthropic',
34
+ 'azure-openai': 'Azure OpenAI',
35
+ cohere: 'Cohere',
36
+ huggingface: 'Hugging Face',
13
37
  };
38
+
14
39
  const PROVIDER_TO_STEP_NAME: Record<string, string> = {
15
40
  OpenAI: 'OpenAI Chat Completion',
16
41
  Ollama: 'Ollama Chat Completion',
17
42
  Google: 'Google Vertex AI Chat Completion',
43
+ Anthropic: 'Anthropic Chat Completion',
44
+ 'Azure OpenAI': 'Azure OpenAI Chat Completion',
45
+ Cohere: 'Cohere Chat Completion',
46
+ 'Hugging Face': 'Hugging Face Chat Completion',
47
+ };
48
+
49
+ const LANGSMITH_HIDDEN_TAG = 'langsmith:hidden';
50
+
51
+ type OpenlayerPrompt = {
52
+ name: string;
53
+ version: number;
54
+ isFallback: boolean;
55
+ };
56
+
57
+ export type LlmMessage = {
58
+ role: string;
59
+ content: BaseMessageFields['content'];
60
+ additional_kwargs?: BaseMessageFields['additional_kwargs'];
61
+ };
62
+
63
+ export type AnonymousLlmMessage = {
64
+ content: BaseMessageFields['content'];
65
+ additional_kwargs?: BaseMessageFields['additional_kwargs'];
66
+ };
67
+
68
+ type ConstructorParams = {
69
+ userId?: string | undefined;
70
+ sessionId?: string | undefined;
71
+ tags?: string[] | undefined;
72
+ version?: string | undefined;
73
+ traceMetadata?: Record<string, unknown> | undefined;
18
74
  };
19
75
 
76
+ /**
77
+ * Comprehensive LangChain callback handler for Openlayer tracing.
78
+ *
79
+ * Supports all LangChain components:
80
+ * - LLMs and Chat Models (with streaming support)
81
+ * - Chains
82
+ * - Agents and Tools
83
+ * - Retrievers
84
+ * - Error handling and hierarchical tracking
85
+ *
86
+ * @example
87
+ * ```typescript
88
+ * import { OpenlayerHandler } from 'openlayer/lib/integrations/langchainCallback';
89
+ *
90
+ * const handler = new OpenlayerHandler({
91
+ * userId: 'user-123',
92
+ * sessionId: 'session-456',
93
+ * tags: ['production'],
94
+ * version: '1.0.0'
95
+ * });
96
+ *
97
+ * const model = new ChatOpenAI({
98
+ * callbacks: [handler]
99
+ * });
100
+ * ```
101
+ */
20
102
  export class OpenlayerHandler extends BaseCallbackHandler {
21
103
  name = 'OpenlayerHandler';
22
- startTime: number | null = null;
23
- endTime: number | null = null;
24
- prompt: Array<{ role: string; content: string }> | null = null;
25
- latency: number = 0;
26
- provider: string | undefined;
27
- model: string | null = null;
28
- modelParameters: Record<string, any> | null = null;
29
- promptTokens: number | null = 0;
30
- completionTokens: number | null = 0;
31
- totalTokens: number | null = 0;
32
- output: string = '';
33
- metadata: Record<string, any>;
34
-
35
- constructor(kwargs: Record<string, any> = {}) {
104
+
105
+ private userId?: string | undefined;
106
+ private version?: string | undefined;
107
+ private sessionId?: string | undefined;
108
+ private tags: string[];
109
+ private traceMetadata?: Record<string, unknown> | undefined;
110
+
111
+ private completionStartTimes: Record<string, Date> = {};
112
+ private promptToParentRunMap: Map<string, OpenlayerPrompt> = new Map();
113
+ private runMap: Map<string, { step: any; endStep: () => void }> = new Map();
114
+
115
+ constructor(params?: ConstructorParams) {
36
116
  super();
37
- this.metadata = kwargs;
117
+
118
+ this.sessionId = params?.sessionId;
119
+ this.userId = params?.userId;
120
+ this.tags = params?.tags ?? [];
121
+ this.traceMetadata = params?.traceMetadata;
122
+ this.version = params?.version;
38
123
  }
124
+
125
+ // ============================================================================
126
+ // LLM Handlers
127
+ // ============================================================================
128
+
129
+ override async handleLLMNewToken(
130
+ token: string,
131
+ _idx: any,
132
+ runId: string,
133
+ _parentRunId?: string,
134
+ _tags?: string[],
135
+ _fields?: any,
136
+ ): Promise<void> {
137
+ try {
138
+ // Track first token timing for streaming
139
+ if (runId && !(runId in this.completionStartTimes)) {
140
+ console.debug(`LLM first streaming token: ${runId}`);
141
+ this.completionStartTimes[runId] = new Date();
142
+ }
143
+ } catch (e) {
144
+ console.debug(e instanceof Error ? e.message : String(e));
145
+ }
146
+ }
147
+
148
+ override async handleLLMStart(
149
+ llm: Serialized,
150
+ prompts: string[],
151
+ runId: string,
152
+ parentRunId?: string,
153
+ extraParams?: Record<string, unknown>,
154
+ tags?: string[],
155
+ metadata?: Record<string, unknown>,
156
+ name?: string,
157
+ ): Promise<void> {
158
+ try {
159
+ console.debug(`LLM start with ID: ${runId}`);
160
+
161
+ this.handleGenerationStart(llm, prompts, runId, parentRunId, extraParams, tags, metadata, name);
162
+ } catch (e) {
163
+ console.debug(e instanceof Error ? e.message : String(e));
164
+ }
165
+ }
166
+
39
167
  override async handleChatModelStart(
40
168
  llm: Serialized,
41
169
  messages: BaseMessage[][],
42
170
  runId: string,
43
- parentRunId?: string | undefined,
44
- extraParams?: Record<string, unknown> | undefined,
45
- tags?: string[] | undefined,
46
- metadata?: Record<string, unknown> | undefined,
171
+ parentRunId?: string,
172
+ extraParams?: Record<string, unknown>,
173
+ tags?: string[],
174
+ metadata?: Record<string, unknown>,
47
175
  name?: string,
48
176
  ): Promise<void> {
49
- this.initializeRun(extraParams || {}, metadata || {});
50
- this.prompt = this.langchainMassagesToPrompt(messages);
51
- this.startTime = performanceNow();
52
- }
177
+ try {
178
+ console.debug(`Chat model start with ID: ${runId}`);
53
179
 
54
- private initializeRun(extraParams: Record<string, any>, metadata: Record<string, unknown>): void {
55
- this.modelParameters = extraParams['invocation_params'] || {};
180
+ const prompts = messages.flatMap((message) => message.map((m) => this.extractChatMessageContent(m)));
56
181
 
57
- const provider = metadata?.['ls_provider'] as string;
58
- if (provider && LANGCHAIN_TO_OPENLAYER_PROVIDER_MAP[provider]) {
59
- this.provider = LANGCHAIN_TO_OPENLAYER_PROVIDER_MAP[provider];
182
+ this.handleGenerationStart(llm, prompts, runId, parentRunId, extraParams, tags, metadata, name);
183
+ } catch (e) {
184
+ console.debug(e instanceof Error ? e.message : String(e));
60
185
  }
61
- this.model = (this.modelParameters?.['model'] as string) || (metadata['ls_model_name'] as string) || null;
62
- this.output = '';
63
186
  }
64
187
 
65
- private langchainMassagesToPrompt(messages: BaseMessage[][]): Array<{ role: string; content: string }> {
66
- let prompt: Array<{ role: string; content: string }> = [];
67
- for (const message of messages) {
68
- for (const m of message) {
69
- if (m instanceof AIMessage) {
70
- prompt.push({ role: 'assistant', content: m.content as string });
71
- } else if (m instanceof SystemMessage) {
72
- prompt.push({ role: 'system', content: m.content as string });
73
- } else {
74
- prompt.push({ role: 'user', content: m.content as string });
188
+ override async handleLLMEnd(output: LLMResult, runId: string, parentRunId?: string): Promise<void> {
189
+ try {
190
+ console.debug(`LLM end with ID: ${runId}`);
191
+
192
+ if (output.generations.length === 0) {
193
+ console.debug('No generations found in LLM output');
194
+ return;
195
+ }
196
+
197
+ const lastGeneration = output.generations[output.generations.length - 1];
198
+ if (!lastGeneration || lastGeneration.length === 0) {
199
+ console.debug('No responses found in last generation');
200
+ return;
201
+ }
202
+
203
+ const lastResponse = lastGeneration[lastGeneration.length - 1];
204
+
205
+ const llmUsage =
206
+ (lastResponse ? this.extractUsageMetadata(lastResponse) : undefined) ||
207
+ output.llmOutput?.['tokenUsage'];
208
+ const modelName = lastResponse ? this.extractModelNameFromMetadata(lastResponse) : undefined;
209
+
210
+ const usageDetails: Record<string, any> = {
211
+ input: llmUsage?.input_tokens ?? llmUsage?.promptTokens,
212
+ output: llmUsage?.output_tokens ?? llmUsage?.completionTokens,
213
+ total: llmUsage?.total_tokens ?? llmUsage?.totalTokens,
214
+ };
215
+
216
+ // Handle detailed token usage if available
217
+ if (llmUsage && 'input_token_details' in llmUsage) {
218
+ for (const [key, val] of Object.entries(llmUsage['input_token_details'] ?? {})) {
219
+ usageDetails[`input_${key}`] = val;
220
+ if ('input' in usageDetails && typeof val === 'number') {
221
+ usageDetails['input'] = Math.max(0, usageDetails['input'] - val);
222
+ }
75
223
  }
76
224
  }
225
+
226
+ if (llmUsage && 'output_token_details' in llmUsage) {
227
+ for (const [key, val] of Object.entries(llmUsage['output_token_details'] ?? {})) {
228
+ usageDetails[`output_${key}`] = val;
229
+ if ('output' in usageDetails && typeof val === 'number') {
230
+ usageDetails['output'] = Math.max(0, usageDetails['output'] - val);
231
+ }
232
+ }
233
+ }
234
+
235
+ // Extract clean output for dashboard display
236
+ const extractedOutput =
237
+ lastResponse ?
238
+ 'message' in lastResponse && lastResponse['message'] instanceof BaseMessage ?
239
+ lastResponse['message'].content // Just the content, not the full message object
240
+ : lastResponse.text || ''
241
+ : '';
242
+
243
+ // Extract raw output (complete response object for debugging/analysis)
244
+ const rawOutput =
245
+ lastResponse ?
246
+ JSON.stringify(
247
+ {
248
+ generation: lastResponse,
249
+ llmOutput: output.llmOutput,
250
+ fullResponse: output,
251
+ },
252
+ null,
253
+ 2,
254
+ )
255
+ : null;
256
+
257
+ this.handleStepEnd({
258
+ runId,
259
+ output: extractedOutput,
260
+ rawOutput,
261
+ ...(modelName && { modelName }),
262
+ usageDetails,
263
+ ...(runId in this.completionStartTimes && { completionStartTime: this.completionStartTimes[runId] }),
264
+ });
265
+
266
+ if (runId in this.completionStartTimes) {
267
+ delete this.completionStartTimes[runId];
268
+ }
269
+ } catch (e) {
270
+ console.debug(e instanceof Error ? e.message : String(e));
77
271
  }
78
- return prompt;
79
272
  }
80
273
 
81
- override async handleLLMStart(
274
+ override async handleLLMError(err: any, runId: string, parentRunId?: string): Promise<void> {
275
+ try {
276
+ console.debug(`LLM error ${err} with ID: ${runId}`);
277
+
278
+ const azureRefusalError = this.parseAzureRefusalError(err);
279
+
280
+ this.handleStepEnd({
281
+ runId,
282
+ error: err.toString() + azureRefusalError,
283
+ });
284
+ } catch (e) {
285
+ console.debug(e instanceof Error ? e.message : String(e));
286
+ }
287
+ }
288
+
289
+ // ============================================================================
290
+ // Chain Handlers
291
+ // ============================================================================
292
+
293
+ override async handleChainStart(
294
+ chain: Serialized,
295
+ inputs: ChainValues,
296
+ runId: string,
297
+ parentRunId?: string,
298
+ tags?: string[],
299
+ metadata?: Record<string, unknown>,
300
+ runType?: string,
301
+ name?: string,
302
+ ): Promise<void> {
303
+ try {
304
+ console.debug(`Chain start with ID: ${runId}`);
305
+
306
+ const runName = name ?? chain.id.at(-1)?.toString() ?? 'Langchain Chain';
307
+
308
+ this.registerOpenlayerPrompt(parentRunId, metadata);
309
+
310
+ // Process inputs to handle different formats
311
+ let finalInput: string | ChainValues = inputs;
312
+ if (
313
+ typeof inputs === 'object' &&
314
+ 'input' in inputs &&
315
+ Array.isArray(inputs['input']) &&
316
+ inputs['input'].every((m) => m instanceof BaseMessage)
317
+ ) {
318
+ finalInput = inputs['input'].map((m) => this.extractChatMessageContent(m));
319
+ } else if (typeof inputs === 'object' && 'content' in inputs && typeof inputs['content'] === 'string') {
320
+ finalInput = inputs['content'];
321
+ }
322
+
323
+ const { step, endStep } = addChainStepToTrace({
324
+ name: runName,
325
+ inputs: finalInput,
326
+ metadata: this.joinTagsAndMetaData(tags, metadata) || {},
327
+ });
328
+
329
+ this.runMap.set(runId, { step, endStep });
330
+ } catch (e) {
331
+ console.debug(e instanceof Error ? e.message : String(e));
332
+ }
333
+ }
334
+
335
+ override async handleChainEnd(outputs: ChainValues, runId: string, parentRunId?: string): Promise<void> {
336
+ try {
337
+ console.debug(`Chain end with ID: ${runId}`);
338
+
339
+ let finalOutput: ChainValues | string = outputs;
340
+ if (typeof outputs === 'object' && 'output' in outputs && typeof outputs['output'] === 'string') {
341
+ finalOutput = outputs['output'];
342
+ }
343
+
344
+ this.handleStepEnd({
345
+ runId,
346
+ output: finalOutput,
347
+ });
348
+
349
+ this.deregisterOpenlayerPrompt(runId);
350
+ } catch (e) {
351
+ console.debug(e instanceof Error ? e.message : String(e));
352
+ }
353
+ }
354
+
355
+ override async handleChainError(err: any, runId: string, parentRunId?: string): Promise<void> {
356
+ try {
357
+ console.debug(`Chain error: ${err} with ID: ${runId}`);
358
+
359
+ const azureRefusalError = this.parseAzureRefusalError(err);
360
+
361
+ this.handleStepEnd({
362
+ runId,
363
+ error: err.toString() + azureRefusalError,
364
+ });
365
+ } catch (e) {
366
+ console.debug(e instanceof Error ? e.message : String(e));
367
+ }
368
+ }
369
+
370
+ // ============================================================================
371
+ // Agent Handlers
372
+ // ============================================================================
373
+
374
+ override async handleAgentAction(action: AgentAction, runId: string, parentRunId?: string): Promise<void> {
375
+ try {
376
+ console.debug(`Agent action ${action.tool} with ID: ${runId}`);
377
+
378
+ const { step, endStep } = addAgentStepToTrace({
379
+ name: action.tool,
380
+ inputs: action,
381
+ tool: action.tool,
382
+ action: action,
383
+ });
384
+
385
+ this.runMap.set(runId, { step, endStep });
386
+ } catch (e) {
387
+ console.debug(e instanceof Error ? e.message : String(e));
388
+ }
389
+ }
390
+
391
+ override async handleAgentEnd(action: AgentFinish, runId: string, parentRunId?: string): Promise<void> {
392
+ try {
393
+ console.debug(`Agent finish with ID: ${runId}`);
394
+
395
+ this.handleStepEnd({
396
+ runId,
397
+ output: action,
398
+ });
399
+ } catch (e) {
400
+ console.debug(e instanceof Error ? e.message : String(e));
401
+ }
402
+ }
403
+
404
+ // ============================================================================
405
+ // Tool Handlers
406
+ // ============================================================================
407
+
408
+ override async handleToolStart(
409
+ tool: Serialized,
410
+ input: string,
411
+ runId: string,
412
+ parentRunId?: string,
413
+ tags?: string[],
414
+ metadata?: Record<string, unknown>,
415
+ name?: string,
416
+ ): Promise<void> {
417
+ try {
418
+ console.debug(`Tool start with ID: ${runId}`);
419
+
420
+ const { step, endStep } = addToolStepToTrace({
421
+ name: name ?? tool.id.at(-1)?.toString() ?? 'Tool execution',
422
+ inputs: input,
423
+ metadata: this.joinTagsAndMetaData(tags, metadata) || {},
424
+ });
425
+
426
+ this.runMap.set(runId, { step, endStep });
427
+ } catch (e) {
428
+ console.debug(e instanceof Error ? e.message : String(e));
429
+ }
430
+ }
431
+
432
+ override async handleToolEnd(output: string, runId: string, parentRunId?: string): Promise<void> {
433
+ try {
434
+ console.debug(`Tool end with ID: ${runId}`);
435
+
436
+ this.handleStepEnd({
437
+ runId,
438
+ output: output,
439
+ });
440
+ } catch (e) {
441
+ console.debug(e instanceof Error ? e.message : String(e));
442
+ }
443
+ }
444
+
445
+ override async handleToolError(err: any, runId: string, parentRunId?: string): Promise<void> {
446
+ try {
447
+ console.debug(`Tool error ${err} with ID: ${runId}`);
448
+
449
+ this.handleStepEnd({
450
+ runId,
451
+ error: err.toString(),
452
+ });
453
+ } catch (e) {
454
+ console.debug(e instanceof Error ? e.message : String(e));
455
+ }
456
+ }
457
+
458
+ // ============================================================================
459
+ // Retriever Handlers
460
+ // ============================================================================
461
+
462
+ override async handleRetrieverStart(
463
+ retriever: Serialized,
464
+ query: string,
465
+ runId: string,
466
+ parentRunId?: string,
467
+ tags?: string[],
468
+ metadata?: Record<string, unknown>,
469
+ name?: string,
470
+ ): Promise<void> {
471
+ try {
472
+ console.debug(`Retriever start with ID: ${runId}`);
473
+
474
+ const { step, endStep } = addRetrieverStepToTrace({
475
+ name: name ?? retriever.id.at(-1)?.toString() ?? 'Retriever',
476
+ inputs: query,
477
+ metadata: this.joinTagsAndMetaData(tags, metadata) || {},
478
+ });
479
+
480
+ this.runMap.set(runId, { step, endStep });
481
+ } catch (e) {
482
+ console.debug(e instanceof Error ? e.message : String(e));
483
+ }
484
+ }
485
+
486
+ override async handleRetrieverEnd(
487
+ documents: Document<Record<string, any>>[],
488
+ runId: string,
489
+ parentRunId?: string,
490
+ ): Promise<void> {
491
+ try {
492
+ console.debug(`Retriever end with ID: ${runId}`);
493
+
494
+ this.handleStepEnd({
495
+ runId,
496
+ output: documents,
497
+ });
498
+ } catch (e) {
499
+ console.debug(e instanceof Error ? e.message : String(e));
500
+ }
501
+ }
502
+
503
+ override async handleRetrieverError(err: any, runId: string, parentRunId?: string): Promise<void> {
504
+ try {
505
+ console.debug(`Retriever error: ${err} with ID: ${runId}`);
506
+
507
+ this.handleStepEnd({
508
+ runId,
509
+ error: err.toString(),
510
+ });
511
+ } catch (e) {
512
+ console.debug(e instanceof Error ? e.message : String(e));
513
+ }
514
+ }
515
+
516
+ // ============================================================================
517
+ // Private Helper Methods
518
+ // ============================================================================
519
+
520
+ private async handleGenerationStart(
82
521
  llm: Serialized,
83
- prompts: string[],
522
+ messages: (LlmMessage | MessageContent | AnonymousLlmMessage)[],
84
523
  runId: string,
85
524
  parentRunId?: string,
86
525
  extraParams?: Record<string, unknown>,
87
526
  tags?: string[],
88
527
  metadata?: Record<string, unknown>,
89
- runName?: string,
90
- ) {
91
- this.initializeRun(extraParams || {}, metadata || {});
92
- this.prompt = prompts.map((p) => ({ role: 'user', content: p }));
93
- this.startTime = performanceNow();
528
+ name?: string,
529
+ ): Promise<void> {
530
+ console.debug(`Generation start with ID: ${runId} and parentRunId ${parentRunId}`);
531
+
532
+ const runName =
533
+ name ?? llm.id?.at?.(-1)?.toString() ?? llm.id?.slice?.(-1)?.[0]?.toString() ?? 'Langchain Generation';
534
+
535
+ // Extract comprehensive model parameters
536
+ const modelParameters: Record<string, any> = {};
537
+ const invocationParams = extraParams?.['invocation_params'];
538
+
539
+ // Standard parameters
540
+ const standardParams = {
541
+ temperature: (invocationParams as any)?.['temperature'],
542
+ max_tokens: (invocationParams as any)?.['max_tokens'],
543
+ top_p: (invocationParams as any)?.['top_p'],
544
+ top_k: (invocationParams as any)?.['top_k'],
545
+ frequency_penalty: (invocationParams as any)?.['frequency_penalty'],
546
+ presence_penalty: (invocationParams as any)?.['presence_penalty'],
547
+ request_timeout: (invocationParams as any)?.['request_timeout'],
548
+ stop: (invocationParams as any)?.['stop'],
549
+ seed: (invocationParams as any)?.['seed'],
550
+ response_format: (invocationParams as any)?.['response_format'],
551
+ tools: (invocationParams as any)?.['tools'],
552
+ tool_choice: (invocationParams as any)?.['tool_choice'],
553
+ };
554
+
555
+ for (const [key, value] of Object.entries(standardParams)) {
556
+ if (value !== undefined && value !== null) {
557
+ modelParameters[key] = value;
558
+ }
559
+ }
560
+
561
+ // Add any additional parameters that weren't in the standard list
562
+ if (invocationParams && typeof invocationParams === 'object') {
563
+ for (const [key, value] of Object.entries(invocationParams)) {
564
+ if (!(key in standardParams) && value !== undefined && value !== null) {
565
+ modelParameters[key] = value;
566
+ }
567
+ }
568
+ }
569
+
570
+ // Extract model name
571
+ interface InvocationParams {
572
+ _type?: string;
573
+ model?: string;
574
+ model_name?: string;
575
+ repo_id?: string;
576
+ }
577
+
578
+ let extractedModelName: string | undefined;
579
+ if (extraParams) {
580
+ const invocationParamsModelName = (extraParams['invocation_params'] as InvocationParams)?.model;
581
+ const metadataModelName =
582
+ metadata && 'ls_model_name' in metadata ? (metadata['ls_model_name'] as string) : undefined;
583
+
584
+ extractedModelName = invocationParamsModelName ?? metadataModelName;
585
+ }
586
+
587
+ // Extract provider with multiple fallbacks
588
+ let provider = metadata?.['ls_provider'] as string;
589
+
590
+ // Fallback provider detection if not in metadata
591
+ if (!provider) {
592
+ // Try to detect from model name
593
+ if (extractedModelName) {
594
+ if (extractedModelName.includes('gpt') || extractedModelName.includes('openai')) {
595
+ provider = 'openai';
596
+ } else if (extractedModelName.includes('claude')) {
597
+ provider = 'anthropic';
598
+ } else if (extractedModelName.includes('gemini') || extractedModelName.includes('google')) {
599
+ provider = 'google';
600
+ } else if (extractedModelName.includes('llama') || extractedModelName.includes('meta')) {
601
+ provider = 'meta';
602
+ }
603
+ }
604
+
605
+ // Try to detect from LLM class name
606
+ if (!provider && llm.id && Array.isArray(llm.id)) {
607
+ const className = llm.id[0]?.toLowerCase() || '';
608
+ if (className.includes('openai') || className.includes('chatgpt')) {
609
+ provider = 'openai';
610
+ } else if (className.includes('anthropic') || className.includes('claude')) {
611
+ provider = 'anthropic';
612
+ } else if (className.includes('google') || className.includes('gemini')) {
613
+ provider = 'google';
614
+ }
615
+ }
616
+ }
617
+
618
+ const mappedProvider =
619
+ provider && LANGCHAIN_TO_OPENLAYER_PROVIDER_MAP[provider] ?
620
+ LANGCHAIN_TO_OPENLAYER_PROVIDER_MAP[provider]
621
+ : 'Unknown';
622
+
623
+ // Get registered prompt if available
624
+ const registeredPrompt = this.promptToParentRunMap.get(parentRunId ?? 'root');
625
+ if (registeredPrompt && parentRunId) {
626
+ this.deregisterOpenlayerPrompt(parentRunId);
627
+ }
628
+
629
+ // Create step but don't end it yet - we'll update it in handleLLMEnd
630
+ const stepName =
631
+ mappedProvider && PROVIDER_TO_STEP_NAME[mappedProvider] ?
632
+ PROVIDER_TO_STEP_NAME[mappedProvider]
633
+ : runName;
634
+
635
+ // For generations, we need to track the start time and other data to use in handleLLMEnd
636
+ const startTime = performance.now();
637
+
638
+ // Enhanced metadata collection
639
+ const enhancedMetadata = this.joinTagsAndMetaData(tags, metadata, {
640
+ // LangChain specific metadata
641
+ langchain_provider: provider,
642
+ langchain_model: extractedModelName,
643
+ langchain_run_id: runId,
644
+ langchain_parent_run_id: parentRunId,
645
+
646
+ // Invocation details
647
+ invocation_params: invocationParams,
648
+
649
+ // Timing
650
+ start_time: new Date().toISOString(),
651
+
652
+ // LLM info
653
+ llm_class:
654
+ llm.id ?
655
+ Array.isArray(llm.id) ?
656
+ llm.id.join('.')
657
+ : llm.id
658
+ : 'unknown',
659
+
660
+ // Additional context
661
+ ...(Object.keys(modelParameters).length > 0 && { model_parameters: modelParameters }),
662
+ ...(extraParams && { extra_params: extraParams }),
663
+ });
664
+
665
+ this.runMap.set(runId, {
666
+ step: {
667
+ name: stepName,
668
+ inputs: { prompt: messages },
669
+ startTime,
670
+ provider: mappedProvider,
671
+ model: extractedModelName,
672
+ modelParameters,
673
+ metadata: enhancedMetadata,
674
+ prompt: registeredPrompt,
675
+ },
676
+ endStep: () => {}, // Will be replaced in handleLLMEnd
677
+ });
678
+ }
679
+
680
+ private handleStepEnd(params: {
681
+ runId: string;
682
+ output?: any;
683
+ rawOutput?: string | null;
684
+ error?: string;
685
+ modelName?: string | undefined;
686
+ usageDetails?: Record<string, any>;
687
+ completionStartTime?: Date | undefined;
688
+ }): void {
689
+ const { runId, output, rawOutput, error, modelName, usageDetails, completionStartTime } = params;
690
+
691
+ const runData = this.runMap.get(runId);
692
+ if (!runData) {
693
+ console.warn('Step not found in runMap. Skipping operation');
694
+ return;
695
+ }
696
+
697
+ const { step } = runData;
698
+
699
+ // Handle LLM/Generation steps specially
700
+ if (step.provider) {
701
+ const endTime = performance.now();
702
+ const latency = endTime - step.startTime;
703
+
704
+ addChatCompletionStepToTrace({
705
+ name: step.name || 'Unknown Generation',
706
+ inputs: step.inputs || {},
707
+ output: output || '',
708
+ latency,
709
+ tokens: usageDetails?.['total'] || null,
710
+ promptTokens: usageDetails?.['input'] || null,
711
+ completionTokens: usageDetails?.['output'] || null,
712
+ model: modelName || step.model || null,
713
+ modelParameters: step.modelParameters || null,
714
+ metadata:
715
+ error ?
716
+ { ...step.metadata, error, rawOutput: rawOutput || null }
717
+ : { rawOutput: rawOutput || null, ...step.metadata },
718
+ provider: step.provider || 'Unknown',
719
+ });
720
+ } else {
721
+ // For other step types, update and end the existing step
722
+ if (step.log) {
723
+ step.log({
724
+ output: output,
725
+ metadata: error ? { ...step.metadata, error } : step.metadata,
726
+ });
727
+ }
728
+ if (runData.endStep) {
729
+ runData.endStep();
730
+ }
731
+ }
732
+
733
+ this.runMap.delete(runId);
94
734
  }
95
735
 
96
- override async handleLLMEnd(output: LLMResult, runId: string, parentRunId?: string, tags?: string[]) {
97
- this.endTime = performanceNow();
98
- this.latency = this.endTime - this.startTime!;
99
- this.extractTokenInformation(output);
100
- this.extractOutput(output);
101
- this.addToTrace();
736
+ private registerOpenlayerPrompt(parentRunId?: string, metadata?: Record<string, unknown>): void {
737
+ if (metadata && 'openlayerPrompt' in metadata && parentRunId) {
738
+ this.promptToParentRunMap.set(parentRunId, metadata['openlayerPrompt'] as OpenlayerPrompt);
739
+ }
102
740
  }
103
741
 
104
- private extractTokenInformation(output: LLMResult) {
105
- if (this.provider === 'OpenAI') {
106
- this.openaiTokenInformation(output);
742
+ private deregisterOpenlayerPrompt(runId: string): void {
743
+ this.promptToParentRunMap.delete(runId);
744
+ }
745
+
746
+ private parseAzureRefusalError(err: any): string {
747
+ let azureRefusalError = '';
748
+ if (typeof err == 'object' && 'error' in err) {
749
+ try {
750
+ azureRefusalError = '\n\nError details:\n' + JSON.stringify(err['error'], null, 2);
751
+ } catch {}
107
752
  }
753
+ return azureRefusalError;
108
754
  }
109
755
 
110
- private openaiTokenInformation(output: LLMResult) {
111
- if (output.llmOutput && 'tokenUsage' in output.llmOutput) {
112
- this.promptTokens = output.llmOutput?.['tokenUsage']?.promptTokens ?? 0;
113
- this.completionTokens = output.llmOutput?.['tokenUsage']?.completionTokens ?? 0;
114
- this.totalTokens = output.llmOutput?.['tokenUsage']?.totalTokens ?? 0;
756
+ private joinTagsAndMetaData(
757
+ tags?: string[],
758
+ metadata1?: Record<string, unknown>,
759
+ metadata2?: Record<string, unknown>,
760
+ ): Record<string, unknown> | undefined {
761
+ const finalDict: Record<string, unknown> = {};
762
+ if (tags && tags.length > 0) {
763
+ finalDict['tags'] = tags;
115
764
  }
765
+ if (metadata1) {
766
+ Object.assign(finalDict, metadata1);
767
+ }
768
+ if (metadata2) {
769
+ Object.assign(finalDict, metadata2);
770
+ }
771
+ return this.stripOpenlayerKeysFromMetadata(finalDict);
116
772
  }
117
773
 
118
- private extractOutput(output: LLMResult) {
119
- const lastResponse = output?.generations?.at(-1)?.at(-1) ?? undefined;
120
- this.output += lastResponse?.text ?? '';
774
+ private stripOpenlayerKeysFromMetadata(
775
+ metadata?: Record<string, unknown>,
776
+ ): Record<string, unknown> | undefined {
777
+ if (!metadata) {
778
+ return;
779
+ }
780
+
781
+ const openlayerKeys = ['openlayerPrompt', 'openlayerUserId', 'openlayerSessionId'];
782
+
783
+ return Object.fromEntries(Object.entries(metadata).filter(([key, _]) => !openlayerKeys.includes(key)));
121
784
  }
122
785
 
123
- private addToTrace() {
124
- let name = 'Chat Completion Model';
125
- if (this.provider && this.provider in PROVIDER_TO_STEP_NAME) {
126
- name = PROVIDER_TO_STEP_NAME[this.provider] ?? 'Chat Completion Model';
786
+ private extractUsageMetadata(generation: Generation): UsageMetadata | undefined {
787
+ try {
788
+ const usageMetadata =
789
+ (
790
+ 'message' in generation &&
791
+ (generation['message'] instanceof AIMessage || generation['message'] instanceof AIMessageChunk)
792
+ ) ?
793
+ generation['message'].usage_metadata
794
+ : undefined;
795
+
796
+ return usageMetadata;
797
+ } catch (err) {
798
+ console.debug(`Error extracting usage metadata: ${err}`);
799
+ return;
127
800
  }
128
- addChatCompletionStepToTrace({
129
- name: name,
130
- inputs: { prompt: this.prompt },
131
- output: this.output,
132
- latency: this.latency,
133
- tokens: this.totalTokens,
134
- promptTokens: this.promptTokens,
135
- completionTokens: this.completionTokens,
136
- model: this.model,
137
- modelParameters: this.modelParameters,
138
- metadata: this.metadata,
139
- provider: this.provider ?? '',
140
- startTime: this.startTime,
141
- endTime: this.endTime,
142
- });
801
+ }
802
+
803
+ private extractModelNameFromMetadata(generation: any): string | undefined {
804
+ try {
805
+ return (
806
+ 'message' in generation &&
807
+ (generation['message'] instanceof AIMessage || generation['message'] instanceof AIMessageChunk)
808
+ ) ?
809
+ generation['message'].response_metadata?.['model_name']
810
+ : undefined;
811
+ } catch {
812
+ return undefined;
813
+ }
814
+ }
815
+
816
+ private extractChatMessageContent(message: BaseMessage): LlmMessage | AnonymousLlmMessage | MessageContent {
817
+ let response: any = undefined;
818
+
819
+ if (message instanceof HumanMessage) {
820
+ response = { content: message.content, role: 'user' };
821
+ } else if (message instanceof ChatMessage) {
822
+ response = { content: message.content, role: message.role };
823
+ } else if (message instanceof AIMessage) {
824
+ response = { content: message.content, role: 'assistant' };
825
+
826
+ if ('tool_calls' in message && (message.tool_calls?.length ?? 0) > 0) {
827
+ response['tool_calls'] = message['tool_calls'];
828
+ }
829
+ } else if (message instanceof SystemMessage) {
830
+ response = { content: message.content, role: 'system' };
831
+ } else if (message instanceof FunctionMessage) {
832
+ response = {
833
+ content: message.content,
834
+ additional_kwargs: message.additional_kwargs,
835
+ role: message.name,
836
+ };
837
+ } else if (message instanceof ToolMessage) {
838
+ response = {
839
+ content: message.content,
840
+ additional_kwargs: message.additional_kwargs,
841
+ role: message.name,
842
+ };
843
+ } else if (!message.name) {
844
+ response = { content: message.content };
845
+ } else {
846
+ response = {
847
+ role: message.name,
848
+ content: message.content,
849
+ };
850
+ }
851
+
852
+ if (message.additional_kwargs?.function_call || message.additional_kwargs?.tool_calls) {
853
+ return { ...response, additional_kwargs: message.additional_kwargs };
854
+ }
855
+
856
+ return response;
143
857
  }
144
858
  }