@traccia2/sdk 0.0.7 → 0.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/dist/exporter/http-exporter.d.ts +8 -2
  2. package/dist/exporter/http-exporter.d.ts.map +1 -1
  3. package/dist/exporter/http-exporter.js +51 -12
  4. package/dist/exporter/http-exporter.js.map +1 -1
  5. package/dist/integrations/index.d.ts +1 -1
  6. package/dist/integrations/index.js +3 -3
  7. package/dist/integrations/langchain-callback.d.ts +57 -91
  8. package/dist/integrations/langchain-callback.d.ts.map +1 -1
  9. package/dist/integrations/langchain-callback.js +464 -316
  10. package/dist/integrations/langchain-callback.js.map +1 -1
  11. package/dist/integrations/langchain-callback.old.d.ts +96 -0
  12. package/dist/integrations/langchain-callback.old.d.ts.map +1 -0
  13. package/dist/integrations/langchain-callback.old.js +371 -0
  14. package/dist/integrations/langchain-callback.old.js.map +1 -0
  15. package/dist/tracer/provider.d.ts +3 -3
  16. package/dist/tracer/provider.d.ts.map +1 -1
  17. package/dist/tracer/provider.js +9 -0
  18. package/dist/tracer/provider.js.map +1 -1
  19. package/dist/types.d.ts +4 -0
  20. package/dist/types.d.ts.map +1 -1
  21. package/package.json +1 -1
  22. package/src/__tests__/integrations-langchain.test.ts +354 -340
  23. package/src/exporter/http-exporter.ts +57 -13
  24. package/src/integrations/index.ts +1 -1
  25. package/src/integrations/langchain-callback.old.ts +438 -0
  26. package/src/integrations/langchain-callback.ts +723 -351
  27. package/src/tracer/provider.ts +9 -4
  28. package/src/types.ts +4 -0
  29. package/dist/integrations/langchain-callback.new.d.ts +0 -62
  30. package/dist/integrations/langchain-callback.new.d.ts.map +0 -1
  31. package/dist/integrations/langchain-callback.new.js +0 -519
  32. package/dist/integrations/langchain-callback.new.js.map +0 -1
  33. package/src/integrations/langchain-callback.new.ts +0 -810
@@ -4,7 +4,7 @@
4
4
 
5
5
  import * as https from 'https';
6
6
  import * as http from 'http';
7
- import { ISpan, ISpanExporter } from '../types';
7
+ import { ISpan, ISpanExporter, Resource } from '../types';
8
8
 
9
9
  export const DEFAULT_ENDPOINT = 'https://api.dashboard.com/api/v1/traces';
10
10
 
@@ -20,6 +20,7 @@ export interface HttpExporterOptions {
20
20
  maxRetries?: number;
21
21
  backoffBase?: number;
22
22
  backoffJitter?: number;
23
+ resource?: Resource;
23
24
  }
24
25
 
25
26
  /**
@@ -32,6 +33,7 @@ export class HttpExporter implements ISpanExporter {
32
33
  private maxRetries: number;
33
34
  private backoffBase: number;
34
35
  private backoffJitter: number;
36
+ private resource: Resource;
35
37
 
36
38
  constructor(options: HttpExporterOptions = {}) {
37
39
  this.endpoint = options.endpoint || DEFAULT_ENDPOINT;
@@ -40,6 +42,12 @@ export class HttpExporter implements ISpanExporter {
40
42
  this.maxRetries = options.maxRetries || 5;
41
43
  this.backoffBase = options.backoffBase || 1;
42
44
  this.backoffJitter = options.backoffJitter || 0.5;
45
+ this.resource = options.resource || {
46
+ 'sdk.name': 'traccia-sdk-ts',
47
+ 'sdk.version': '1.0.0',
48
+ 'service.name': 'unknown-service',
49
+ 'service.version': 'unknown',
50
+ };
43
51
  }
44
52
 
45
53
  /**
@@ -87,26 +95,62 @@ export class HttpExporter implements ISpanExporter {
87
95
  }
88
96
 
89
97
  /**
90
- * Serialize spans to JSON bytes.
98
+ * Serialize spans to JSON bytes in OpenTelemetry-like format.
91
99
  */
92
100
  private serializeSpans(spans: ISpan[]): string {
93
- const serialized = spans.map((span) => ({
101
+ // Group all spans under a single scope for now
102
+ const scope = {
103
+ name: this.resource['sdk.name'] || 'agent-tracing-sdk',
104
+ version: this.resource['sdk.version'] || '0.1.0',
105
+ };
106
+
107
+ const otelSpans = spans.map((span) => ({
94
108
  traceId: span.context.traceId,
95
109
  spanId: span.context.spanId,
96
- parentSpanId: span.parentSpanId,
110
+ parentSpanId: span.parentSpanId ?? null,
97
111
  name: span.name,
98
- startTimeNs: span.startTimeNs,
99
- endTimeNs: span.endTimeNs,
100
- durationNs: span.durationNs,
112
+ startTimeUnixNano: span.startTimeNs,
113
+ endTimeUnixNano: span.endTimeNs,
101
114
  attributes: span.attributes,
102
- events: span.events,
103
- status: span.status,
104
- statusDescription: span.statusDescription,
105
- traceFlags: span.context.traceFlags,
106
- traceState: span.context.traceState,
115
+ events: span.events || [],
116
+ status: typeof span.status === 'object' && 'code' in span.status ? span.status : { code: this.statusToString(span.status), message: span.statusDescription || '' },
107
117
  }));
108
118
 
109
- return JSON.stringify(serialized);
119
+ const payload = {
120
+ resource: {
121
+ 'service.name': this.resource['service.name'],
122
+ 'service.version': this.resource['service.version'],
123
+ ...this.resource,
124
+ },
125
+ items: [
126
+ {
127
+ scopeSpans: [
128
+ {
129
+ scope,
130
+ spans: otelSpans,
131
+ },
132
+ ],
133
+ },
134
+ ],
135
+ };
136
+
137
+ return JSON.stringify(payload);
138
+ }
139
+
140
+ /**
141
+ * Convert numeric status to string code for OTLP compatibility.
142
+ */
143
+ private statusToString(status: any): string {
144
+ switch (status) {
145
+ case 0:
146
+ return 'UNSET';
147
+ case 1:
148
+ return 'OK';
149
+ case 2:
150
+ return 'ERROR';
151
+ default:
152
+ return 'UNSET';
153
+ }
110
154
  }
111
155
 
112
156
  /**
@@ -7,7 +7,7 @@
7
7
 
8
8
  // LangChain integrations
9
9
  export { TracciaCallbackHandler } from './langchain-callback';
10
- export { TracciaCallbackHandlerNew } from './langchain-callback.new';
10
+ export { TracciaCallbackHandlerOld } from './langchain-callback.old';
11
11
  export {
12
12
  getTraciaHandler,
13
13
  withTracing,
@@ -0,0 +1,438 @@
1
+ /**
2
+ * LangChain callback handler for automatic tracing.
3
+ * Integrates with LangChain's callback system to automatically instrument
4
+ * LLM calls, chains, agents, and tools.
5
+ */
6
+
7
+ import { ISpan } from '../types';
8
+ import { getTracer } from '../auto';
9
+ import { BaseCallbackHandler } from '@langchain/core/callbacks/base';
10
+
11
+ /**
12
+ * LangChain Callback Handler for Traccia SDK.
13
+ * Automatically traces LLM calls, chains, agents, and tools.
14
+ *
15
+ * Extends LangChain's BaseCallbackHandler for proper interface compliance.
16
+ * Compatible with LangChain 0.0.x, 0.1.x, 0.2.x, and 1.x versions.
17
+ *
18
+ * @example
19
+ * import { ChatOpenAI } from '@langchain/openai';
20
+ * import { TracciaCallbackHandler } from '@traccia/sdk/integrations';
21
+ *
22
+ * const handler = new TracciaCallbackHandler();
23
+ * const model = new ChatOpenAI({ callbacks: [handler] });
24
+ *
25
+ * const response = await model.invoke({ input: 'Hello!' });
26
+ * // Automatically traced with spans for LLM calls, tokens, latency, etc.
27
+ */
28
+ export class TracciaCallbackHandlerOld extends BaseCallbackHandler {
29
+ name = 'TracciaCallbackHandlerOld';
30
+ private tracer = getTracer('langchain');
31
+ private spanStack: Map<string, ISpan> = new Map();
32
+ private streamingStartTimes: Record<string, Date> = {};
33
+
34
+ /**
35
+ * Extract model name from LLM instance, checking multiple property locations.
36
+ * Different LLM implementations store the model name in different properties.
37
+ */
38
+ private extractModelName(llm: any): string {
39
+ // Check common model name properties
40
+ if (llm.modelName) return llm.modelName; // ChatOpenAI, ChatAnthropic, etc.
41
+ if (llm.model) return llm.model; // Ollama, etc.
42
+ if (llm.name && !llm.name.startsWith('langchain')) return llm.name; // Generic name
43
+ if (llm._modelType) return llm._modelType; // Fallback to type
44
+ if (llm.client?.model) return llm.client.model; // Nested model property
45
+ return 'unknown';
46
+ }
47
+
48
+ /**
49
+ * Handle LLM start - called when an LLM begins execution.
50
+ */
51
+ public async handleLLMStart(
52
+ llm: any,
53
+ prompts: string[],
54
+ runId: string,
55
+ _parentRunId?: string
56
+ ): Promise<void> {
57
+ const modelName = this.extractModelName(llm);
58
+
59
+ const attributes: Record<string, any> = {
60
+ type: 'llm',
61
+ model: modelName,
62
+ prompt_count: prompts.length,
63
+ first_prompt_length: prompts[0]?.length || 0,
64
+ };
65
+
66
+ // Capture temperature if available
67
+ if (llm.temperature !== undefined) {
68
+ attributes.temperature = llm.temperature;
69
+ }
70
+
71
+ // Capture max tokens if available
72
+ if (llm.maxTokens !== undefined) {
73
+ attributes.max_tokens = llm.maxTokens;
74
+ }
75
+ if (llm.max_tokens !== undefined) {
76
+ attributes.max_tokens = llm.max_tokens;
77
+ }
78
+
79
+ // Capture top_p if available
80
+ if (llm.topP !== undefined) {
81
+ attributes.top_p = llm.topP;
82
+ }
83
+
84
+ // Capture top_k if available
85
+ if (llm.topK !== undefined) {
86
+ attributes.top_k = llm.topK;
87
+ }
88
+
89
+ // Capture base URL for local models (Ollama, etc.)
90
+ if (llm.baseUrl) {
91
+ attributes.base_url = llm.baseUrl;
92
+ }
93
+
94
+ const span = this.tracer.startSpan('llm', { attributes });
95
+ this.spanStack.set(runId, span);
96
+ }
97
+
98
+ /**
99
+ * Handle LLM end - called when an LLM finishes execution.
100
+ */
101
+ public async handleLLMEnd(output: any, runId: string): Promise<void> {
102
+ const span = this.spanStack.get(runId);
103
+ if (span) {
104
+ try {
105
+ // Try multiple ways to get token usage
106
+ // OpenAI format and new @langchain/core format
107
+ const tokenUsage =
108
+ output?.llmOutput?.token_usage ||
109
+ output?.llmOutput?.tokenUsage ||
110
+ output?.token_usage ||
111
+ output?.metadata?.token_usage;
112
+
113
+ if (tokenUsage) {
114
+ // Handle standard token counts
115
+ const promptTokens = tokenUsage.prompt_tokens ?? tokenUsage.promptTokens;
116
+ const completionTokens = tokenUsage.completion_tokens ?? tokenUsage.completionTokens;
117
+ const totalTokens = tokenUsage.total_tokens ?? tokenUsage.totalTokens;
118
+
119
+ if (promptTokens !== undefined) {
120
+ span.setAttribute('llm.tokens.prompt', promptTokens);
121
+ }
122
+ if (completionTokens !== undefined) {
123
+ span.setAttribute('llm.tokens.completion', completionTokens);
124
+ }
125
+ if (totalTokens !== undefined) {
126
+ span.setAttribute('llm.tokens.total', totalTokens);
127
+ }
128
+
129
+ // Handle detailed token breakdown for models like GPT-4o vision
130
+ // input_token_details contains breakdown of prompt token usage
131
+ if (tokenUsage.input_token_details && typeof tokenUsage.input_token_details === 'object') {
132
+ for (const [key, value] of Object.entries(tokenUsage.input_token_details)) {
133
+ if (typeof value === 'number') {
134
+ span.setAttribute(`llm.tokens.input_${key}`, value);
135
+ }
136
+ }
137
+ }
138
+
139
+ // output_token_details contains breakdown of completion token usage
140
+ if (tokenUsage.output_token_details && typeof tokenUsage.output_token_details === 'object') {
141
+ for (const [key, value] of Object.entries(tokenUsage.output_token_details)) {
142
+ if (typeof value === 'number') {
143
+ span.setAttribute(`llm.tokens.output_${key}`, value);
144
+ }
145
+ }
146
+ }
147
+ }
148
+
149
+ // Capture output text length
150
+ if (output?.text) {
151
+ span.setAttribute('output_length', output.text.length);
152
+ } else if (output?.generations && Array.isArray(output.generations)) {
153
+ const firstGeneration = output.generations[0];
154
+ if (firstGeneration?.[0]?.text) {
155
+ span.setAttribute('output_length', firstGeneration[0].text.length);
156
+ }
157
+ } else if (output?.message?.content) {
158
+ // Ollama format (uses message.content)
159
+ const content = output.message.content;
160
+ const contentStr = typeof content === 'string' ? content : JSON.stringify(content);
161
+ span.setAttribute('output_length', contentStr.length);
162
+ } else if (typeof output === 'string') {
163
+ // Direct string output
164
+ span.setAttribute('output_length', output.length);
165
+ }
166
+
167
+ // Capture finish reason if available
168
+ if (output?.llmOutput?.finish_reason) {
169
+ span.setAttribute('finish_reason', output.llmOutput.finish_reason);
170
+ }
171
+ } catch (error) {
172
+ // Silently fail on attribute setting
173
+ }
174
+
175
+ span.end();
176
+ this.spanStack.delete(runId);
177
+ }
178
+ }
179
+
180
+ /**
181
+ * Handle LLM error.
182
+ */
183
+ public async handleLLMError(error: Error, runId: string): Promise<void> {
184
+ const span = this.spanStack.get(runId);
185
+ if (span) {
186
+ span.recordException(error, { source: 'langchain-llm' });
187
+ span.end();
188
+ this.spanStack.delete(runId);
189
+ }
190
+ }
191
+
192
+ /**
193
+ * Handle LLM new token - called when a new token is generated during streaming.
194
+ * Tracks first token latency and token count for streaming scenarios.
195
+ */
196
+ public async handleLLMNewToken(
197
+ _token: string,
198
+ _idx?: any,
199
+ runId?: string
200
+ ): Promise<void> {
201
+ if (runId && !(runId in this.streamingStartTimes)) {
202
+ // Record the time of the first streaming token
203
+ this.streamingStartTimes[runId] = new Date();
204
+ const span = this.spanStack.get(runId);
205
+ if (span) {
206
+ try {
207
+ span.setAttribute('stream.first_token_generated', true);
208
+ } catch (error) {
209
+ // Silently fail
210
+ }
211
+ }
212
+ }
213
+ }
214
+
215
+ /**
216
+ * Handle chain start - called when a chain begins execution.
217
+ */
218
+ public async handleChainStart(
219
+ chain: any,
220
+ inputs: any,
221
+ runId: string,
222
+ _parentRunId?: string
223
+ ): Promise<void> {
224
+ const chainName = chain.name || chain._chainType || 'chain';
225
+
226
+ const attributes: Record<string, any> = {
227
+ type: 'chain',
228
+ chain_name: chainName,
229
+ chain_type: chain._chainType,
230
+ input_keys: Object.keys(inputs || {}).join(','),
231
+ input_count: Object.keys(inputs || {}).length,
232
+ };
233
+
234
+ // Capture total input length
235
+ try {
236
+ const inputStr = JSON.stringify(inputs);
237
+ attributes.input_length = inputStr.length;
238
+ } catch (error) {
239
+ // Silently fail
240
+ }
241
+
242
+ const span = this.tracer.startSpan(`chain:${chainName}`, { attributes });
243
+ this.spanStack.set(runId, span);
244
+ }
245
+
246
+ /**
247
+ * Handle chain end - called when a chain finishes execution.
248
+ */
249
+ public async handleChainEnd(output: any, runId: string): Promise<void> {
250
+ const span = this.spanStack.get(runId);
251
+ if (span) {
252
+ try {
253
+ if (output) {
254
+ const outputStr = typeof output === 'string' ? output : JSON.stringify(output);
255
+ span.setAttribute('output_length', outputStr.length);
256
+ }
257
+ } catch (error) {
258
+ // Silently fail
259
+ }
260
+
261
+ span.end();
262
+ this.spanStack.delete(runId);
263
+ }
264
+ }
265
+
266
+ /**
267
+ * Handle chain error.
268
+ */
269
+ public async handleChainError(error: Error, runId: string): Promise<void> {
270
+ const span = this.spanStack.get(runId);
271
+ if (span) {
272
+ span.recordException(error, { source: 'langchain-chain' });
273
+ span.end();
274
+ this.spanStack.delete(runId);
275
+ }
276
+ }
277
+
278
+ /**
279
+ * Handle tool start - called when a tool is invoked.
280
+ */
281
+ public async handleToolStart(
282
+ tool: any,
283
+ input: string,
284
+ runId: string,
285
+ _parentRunId?: string
286
+ ): Promise<void> {
287
+ const toolName = tool.name || 'unknown-tool';
288
+
289
+ const attributes: Record<string, any> = {
290
+ type: 'tool',
291
+ tool_name: toolName,
292
+ tool_description: tool.description || '',
293
+ input_length: typeof input === 'string' ? input.length : (typeof input === 'object' ? JSON.stringify(input).length : 0),
294
+ };
295
+
296
+ // Try to capture structured input
297
+ try {
298
+ if (typeof input === 'object') {
299
+ attributes.input_keys = Object.keys(input).join(',');
300
+ }
301
+ } catch (error) {
302
+ // Silently fail
303
+ }
304
+
305
+ const span = this.tracer.startSpan(`tool:${toolName}`, { attributes });
306
+ this.spanStack.set(runId, span);
307
+ }
308
+
309
+ /**
310
+ * Handle tool end - called when a tool finishes execution.
311
+ */
312
+ public async handleToolEnd(output: string, runId: string): Promise<void> {
313
+ const span = this.spanStack.get(runId);
314
+ if (span) {
315
+ try {
316
+ span.setAttribute('output_length', output?.length || 0);
317
+ } catch (error) {
318
+ // Silently fail
319
+ }
320
+
321
+ span.end();
322
+ this.spanStack.delete(runId);
323
+ }
324
+ }
325
+
326
+ /**
327
+ * Handle tool error.
328
+ */
329
+ public async handleToolError(error: Error, runId: string): Promise<void> {
330
+ const span = this.spanStack.get(runId);
331
+ if (span) {
332
+ span.recordException(error, { source: 'langchain-tool' });
333
+ span.end();
334
+ this.spanStack.delete(runId);
335
+ }
336
+ }
337
+
338
+ /**
339
+ * Handle agent action.
340
+ */
341
+ public async handleAgentAction(action: any, runId: string): Promise<void> {
342
+ const span = this.spanStack.get(runId);
343
+ if (span) {
344
+ try {
345
+ span.setAttribute('agent_action', action.tool);
346
+ } catch (error) {
347
+ // Silently fail
348
+ }
349
+ }
350
+ }
351
+
352
+ /**
353
+ * Handle agent finish.
354
+ */
355
+ public async handleAgentFinish(finish: any, runId: string): Promise<void> {
356
+ const span = this.spanStack.get(runId);
357
+ if (span) {
358
+ try {
359
+ span.setAttribute('agent_finish_output', JSON.stringify(finish.returnValues));
360
+ } catch (error) {
361
+ // Silently fail
362
+ }
363
+
364
+ span.end();
365
+ this.spanStack.delete(runId);
366
+ }
367
+ }
368
+
369
+ // LangChain uses 'on*' prefix for callback methods
370
+ // Provide aliases for compatibility
371
+ public async onLLMStart(
372
+ llm: any,
373
+ prompts: string[],
374
+ runId: string,
375
+ parentRunId?: string
376
+ ): Promise<void> {
377
+ return this.handleLLMStart(llm, prompts, runId, parentRunId);
378
+ }
379
+
380
+ public async onLLMEnd(output: any, runId: string): Promise<void> {
381
+ return this.handleLLMEnd(output, runId);
382
+ }
383
+
384
+ public async onLLMError(error: Error, runId: string): Promise<void> {
385
+ return this.handleLLMError(error, runId);
386
+ }
387
+
388
+ public async onLLMNewToken(
389
+ _token: string,
390
+ idx?: any,
391
+ runId?: string
392
+ ): Promise<void> {
393
+ return this.handleLLMNewToken(_token, idx, runId);
394
+ }
395
+
396
+ public async onChainStart(
397
+ chain: any,
398
+ inputs: any,
399
+ runId: string,
400
+ parentRunId?: string
401
+ ): Promise<void> {
402
+ return this.handleChainStart(chain, inputs, runId, parentRunId);
403
+ }
404
+
405
+ public async onChainEnd(output: any, runId: string): Promise<void> {
406
+ return this.handleChainEnd(output, runId);
407
+ }
408
+
409
+ public async onChainError(error: Error, runId: string): Promise<void> {
410
+ return this.handleChainError(error, runId);
411
+ }
412
+
413
+ public async onToolStart(
414
+ tool: any,
415
+ input: string,
416
+ runId: string,
417
+ parentRunId?: string
418
+ ): Promise<void> {
419
+ return this.handleToolStart(tool, input, runId, parentRunId);
420
+ }
421
+
422
+ public async onToolEnd(output: string, runId: string): Promise<void> {
423
+ return this.handleToolEnd(output, runId);
424
+ }
425
+
426
+ public async onToolError(error: Error, runId: string): Promise<void> {
427
+ return this.handleToolError(error, runId);
428
+ }
429
+
430
+ public async onAgentAction(action: any, runId: string): Promise<void> {
431
+ return this.handleAgentAction(action, runId);
432
+ }
433
+
434
+ public async onAgentFinish(finish: any, runId: string): Promise<void> {
435
+ return this.handleAgentFinish(finish, runId);
436
+ }
437
+ }
438
+