tracia 0.2.0 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,5 +1,7 @@
1
1
  interface TraciaOptions {
2
2
  apiKey: string;
3
+ /** Called when background trace creation fails */
4
+ onTraceError?: (error: Error, traceId: string) => void;
3
5
  }
4
6
  interface RunVariables {
5
7
  [key: string]: string;
@@ -33,7 +35,16 @@ declare enum TraciaErrorCode {
33
35
  INVALID_REQUEST = "INVALID_REQUEST",
34
36
  NETWORK_ERROR = "NETWORK_ERROR",
35
37
  TIMEOUT = "TIMEOUT",
36
- UNKNOWN = "UNKNOWN"
38
+ ABORTED = "ABORTED",
39
+ UNKNOWN = "UNKNOWN",
40
+ MISSING_PROVIDER_SDK = "MISSING_PROVIDER_SDK",
41
+ MISSING_PROVIDER_API_KEY = "MISSING_PROVIDER_API_KEY",
42
+ UNSUPPORTED_MODEL = "UNSUPPORTED_MODEL"
43
+ }
44
+ declare enum LLMProvider {
45
+ OPENAI = "openai",
46
+ ANTHROPIC = "anthropic",
47
+ GOOGLE = "google"
37
48
  }
38
49
  interface ApiSuccessResponse {
39
50
  text: string;
@@ -43,7 +54,7 @@ interface ApiSuccessResponse {
43
54
  usage: TokenUsage;
44
55
  cost: number;
45
56
  }
46
- type MessageRole = 'system' | 'user' | 'assistant';
57
+ type MessageRole = 'system' | 'user' | 'assistant' | 'tool';
47
58
  interface PromptMessage {
48
59
  id: string;
49
60
  role: MessageRole;
@@ -151,6 +162,344 @@ interface EvaluateResult {
151
162
  note: string | null;
152
163
  createdAt: string;
153
164
  }
165
+ interface ToolDefinition {
166
+ name: string;
167
+ description: string;
168
+ parameters: ToolParameters;
169
+ }
170
+ interface ToolParameters {
171
+ type: 'object';
172
+ properties: Record<string, JsonSchemaProperty>;
173
+ required?: string[];
174
+ }
175
+ interface JsonSchemaProperty {
176
+ type: 'string' | 'number' | 'integer' | 'boolean' | 'array' | 'object';
177
+ description?: string;
178
+ enum?: (string | number)[];
179
+ items?: JsonSchemaProperty;
180
+ properties?: Record<string, JsonSchemaProperty>;
181
+ required?: string[];
182
+ }
183
+ /**
184
+ * Tool call returned in results - user-friendly format.
185
+ */
186
+ interface ToolCall {
187
+ id: string;
188
+ name: string;
189
+ arguments: Record<string, unknown>;
190
+ }
191
+ type ToolChoice = 'auto' | 'none' | 'required' | {
192
+ tool: string;
193
+ };
194
+ type FinishReason = 'stop' | 'tool_calls' | 'max_tokens';
195
+ /**
196
+ * Text content part for messages.
197
+ */
198
+ interface TextPart {
199
+ type: 'text';
200
+ text: string;
201
+ }
202
+ /**
203
+ * Tool call part in assistant messages.
204
+ */
205
+ interface ToolCallPart {
206
+ type: 'tool_call';
207
+ id: string;
208
+ name: string;
209
+ arguments: Record<string, unknown>;
210
+ }
211
+ type ContentPart = TextPart | ToolCallPart;
212
+ /**
213
+ * Message format for LLM conversations.
214
+ *
215
+ * @example System message
216
+ * ```typescript
217
+ * { role: 'system', content: 'You are a helpful assistant.' }
218
+ * ```
219
+ *
220
+ * @example User message
221
+ * ```typescript
222
+ * { role: 'user', content: 'What is the weather?' }
223
+ * ```
224
+ *
225
+ * @example Assistant message with tool calls
226
+ * ```typescript
227
+ * {
228
+ * role: 'assistant',
229
+ * content: [
230
+ * { type: 'text', text: 'Let me check the weather.' },
231
+ * { type: 'tool_call', id: 'call_123', name: 'get_weather', arguments: { location: 'Paris' } }
232
+ * ]
233
+ * }
234
+ * ```
235
+ *
236
+ * @example Tool result message (simple format)
237
+ * ```typescript
238
+ * { role: 'tool', toolCallId: 'call_123', toolName: 'get_weather', content: '{"temp": 22, "unit": "celsius"}' }
239
+ * ```
240
+ */
241
+ interface LocalPromptMessage {
242
+ role: MessageRole;
243
+ content: string | ContentPart[];
244
+ /** Required when role is 'tool' - the ID of the tool call this is responding to */
245
+ toolCallId?: string;
246
+ /** Required when role is 'tool' - the name of the tool that was called */
247
+ toolName?: string;
248
+ }
249
+ interface RunLocalInput {
250
+ messages: LocalPromptMessage[];
251
+ model: string;
252
+ /** Enable streaming. When true, returns LocalStream. When false/undefined, returns Promise<RunLocalResult>. */
253
+ stream?: boolean;
254
+ /** Explicitly specify the provider. Use for new/custom models not in the built-in list. */
255
+ provider?: LLMProvider;
256
+ temperature?: number;
257
+ maxOutputTokens?: number;
258
+ topP?: number;
259
+ stopSequences?: string[];
260
+ /** Timeout in milliseconds for the LLM call */
261
+ timeoutMs?: number;
262
+ /** Provider-specific options passed directly to the SDK */
263
+ customOptions?: Record<string, unknown>;
264
+ variables?: Record<string, string>;
265
+ providerApiKey?: string;
266
+ tags?: string[];
267
+ userId?: string;
268
+ sessionId?: string;
269
+ sendTrace?: boolean;
270
+ /** Custom trace ID. Must match format: tr_ + 16 hex characters */
271
+ traceId?: string;
272
+ /** Tool definitions for function calling */
273
+ tools?: ToolDefinition[];
274
+ /** Control which tools the model can use */
275
+ toolChoice?: ToolChoice;
276
+ /** AbortSignal to cancel the request (only used when stream: true) */
277
+ signal?: AbortSignal;
278
+ }
279
+ interface RunLocalResult {
280
+ text: string;
281
+ traceId: string;
282
+ latencyMs: number;
283
+ usage: TokenUsage;
284
+ cost: number | null;
285
+ provider: LLMProvider;
286
+ model: string;
287
+ /** Tool calls made by the model, empty array if none */
288
+ toolCalls: ToolCall[];
289
+ /** Reason the model stopped generating */
290
+ finishReason: FinishReason;
291
+ /** Full assistant message for round-tripping in multi-turn conversations */
292
+ message: LocalPromptMessage;
293
+ }
294
+ interface CreateTracePayload {
295
+ traceId: string;
296
+ model: string;
297
+ provider: LLMProvider;
298
+ input: {
299
+ messages: LocalPromptMessage[];
300
+ };
301
+ variables: Record<string, string> | null;
302
+ output: string | null;
303
+ status: TraceStatus;
304
+ error: string | null;
305
+ latencyMs: number;
306
+ inputTokens: number;
307
+ outputTokens: number;
308
+ totalTokens: number;
309
+ tags?: string[];
310
+ userId?: string;
311
+ sessionId?: string;
312
+ temperature?: number;
313
+ maxOutputTokens?: number;
314
+ topP?: number;
315
+ tools?: ToolDefinition[];
316
+ toolCalls?: ToolCall[];
317
+ }
318
+ interface CreateTraceResult {
319
+ traceId: string;
320
+ cost: number | null;
321
+ }
322
+ /**
323
+ * Final result returned after a stream completes.
324
+ * Includes all fields from RunLocalResult plus abort status.
325
+ */
326
+ interface StreamResult extends RunLocalResult {
327
+ /** Whether the stream was aborted before completion */
328
+ aborted: boolean;
329
+ }
330
+ /**
331
+ * A streaming response from runLocal({ stream: true }).
332
+ *
333
+ * @example
334
+ * ```typescript
335
+ * const stream = tracia.runLocal({
336
+ * model: 'gpt-4o',
337
+ * messages: [{ role: 'user', content: 'Write a haiku' }],
338
+ * stream: true,
339
+ * })
340
+ *
341
+ * // traceId is available immediately
342
+ * console.log('Trace:', stream.traceId)
343
+ *
344
+ * // Iterate over text chunks as they arrive
345
+ * for await (const chunk of stream) {
346
+ * process.stdout.write(chunk)
347
+ * }
348
+ *
349
+ * // Get final result with usage stats after iteration completes
350
+ * const result = await stream.result
351
+ * console.log(result.usage)
352
+ * ```
353
+ *
354
+ * @remarks
355
+ * - You must iterate over the stream for the result promise to resolve
356
+ * - Calling abort() will stop the stream and resolve result with aborted: true
357
+ * - The stream can only be iterated once
358
+ */
359
+ interface LocalStream {
360
+ /** Trace ID for this request, available immediately */
361
+ readonly traceId: string;
362
+ /** Async iterator yielding text chunks */
363
+ [Symbol.asyncIterator](): AsyncIterator<string>;
364
+ /**
365
+ * Promise that resolves to the final result after stream completes.
366
+ * Only resolves after the stream has been fully iterated or aborted.
367
+ */
368
+ readonly result: Promise<StreamResult>;
369
+ /** Abort the stream. The result promise will resolve with aborted: true */
370
+ abort(): void;
371
+ }
372
+ /**
373
+ * Input item for the Responses API.
374
+ * Can be a message (developer/user) or a function call output.
375
+ */
376
+ type ResponsesInputItem = {
377
+ role: 'developer' | 'user';
378
+ content: string;
379
+ } | {
380
+ type: 'function_call_output';
381
+ call_id: string;
382
+ output: string;
383
+ } | ResponsesOutputItem;
384
+ /**
385
+ * Output item from a Responses API call.
386
+ * These can be added back to input for multi-turn conversations.
387
+ */
388
+ interface ResponsesOutputItem {
389
+ type: 'message' | 'function_call' | 'reasoning';
390
+ [key: string]: unknown;
391
+ }
392
+ /**
393
+ * Event yielded during Responses API streaming.
394
+ */
395
+ type ResponsesEvent = {
396
+ type: 'text_delta';
397
+ data: string;
398
+ } | {
399
+ type: 'text';
400
+ data: string;
401
+ } | {
402
+ type: 'reasoning';
403
+ content: string;
404
+ } | {
405
+ type: 'tool_call';
406
+ id: string;
407
+ callId: string;
408
+ name: string;
409
+ arguments: Record<string, unknown>;
410
+ } | {
411
+ type: 'done';
412
+ usage: TokenUsage;
413
+ };
414
+ /**
415
+ * Input options for runResponses().
416
+ */
417
+ interface RunResponsesInput {
418
+ /** Model to use (e.g., 'gpt-4o', 'o1', 'o3-mini') */
419
+ model: string;
420
+ /** Input items for the conversation */
421
+ input: ResponsesInputItem[];
422
+ /** Enable streaming. When true, returns ResponsesStream. When false/undefined, returns Promise<RunResponsesResult>. */
423
+ stream?: boolean;
424
+ /** Tool definitions for function calling */
425
+ tools?: ToolDefinition[];
426
+ /** Maximum output tokens */
427
+ maxOutputTokens?: number;
428
+ /** Provider API key override */
429
+ providerApiKey?: string;
430
+ /** AbortSignal to cancel the request (only used when stream: true) */
431
+ signal?: AbortSignal;
432
+ /** Timeout in milliseconds */
433
+ timeoutMs?: number;
434
+ /** Whether to send trace to Tracia (default: true) */
435
+ sendTrace?: boolean;
436
+ /** Custom trace ID */
437
+ traceId?: string;
438
+ /** Tags for the trace */
439
+ tags?: string[];
440
+ /** User ID for the trace */
441
+ userId?: string;
442
+ /** Session ID for the trace */
443
+ sessionId?: string;
444
+ }
445
+ /**
446
+ * Final result from a Responses API call.
447
+ */
448
+ interface RunResponsesResult {
449
+ /** Final text output */
450
+ text: string;
451
+ /** Trace ID for this request */
452
+ traceId: string;
453
+ /** Latency in milliseconds */
454
+ latencyMs: number;
455
+ /** Token usage */
456
+ usage: TokenUsage;
457
+ /** Output items that can be added back to input for multi-turn */
458
+ outputItems: ResponsesOutputItem[];
459
+ /** Tool calls made by the model */
460
+ toolCalls: Array<{
461
+ id: string;
462
+ callId: string;
463
+ name: string;
464
+ arguments: Record<string, unknown>;
465
+ }>;
466
+ /** Whether the stream was aborted */
467
+ aborted: boolean;
468
+ }
469
+ /**
470
+ * A streaming response from runResponses({ stream: true }).
471
+ *
472
+ * @example
473
+ * ```typescript
474
+ * const stream = tracia.runResponses({
475
+ * model: 'o3-mini',
476
+ * input: [
477
+ * { role: 'developer', content: 'You are a helpful assistant.' },
478
+ * { role: 'user', content: 'What is 2+2?' },
479
+ * ],
480
+ * stream: true,
481
+ * })
482
+ *
483
+ * for await (const event of stream) {
484
+ * if (event.type === 'text_delta') process.stdout.write(event.data)
485
+ * if (event.type === 'reasoning') console.log('Thinking:', event.content)
486
+ * if (event.type === 'tool_call') console.log('Tool:', event.name)
487
+ * }
488
+ *
489
+ * const result = await stream.result
490
+ * console.log('Output items:', result.outputItems)
491
+ * ```
492
+ */
493
+ interface ResponsesStream {
494
+ /** Trace ID for this request, available immediately */
495
+ readonly traceId: string;
496
+ /** Async iterator yielding events */
497
+ [Symbol.asyncIterator](): AsyncIterator<ResponsesEvent>;
498
+ /** Promise that resolves to the final result after stream completes */
499
+ readonly result: Promise<RunResponsesResult>;
500
+ /** Abort the stream */
501
+ abort(): void;
502
+ }
154
503
 
155
504
  interface HttpClientOptions {
156
505
  apiKey: string;
@@ -178,9 +527,15 @@ declare class Prompts {
178
527
  run(slug: string, variables?: RunVariables, options?: RunOptions): Promise<RunResult>;
179
528
  }
180
529
 
530
+ /** @internal Symbol for setting pending traces map - not part of public API */
531
+ declare const INTERNAL_SET_PENDING_TRACES: unique symbol;
181
532
  declare class Traces {
182
533
  private readonly client;
534
+ private pendingTraces;
183
535
  constructor(client: HttpClient);
536
+ /** @internal */
537
+ [INTERNAL_SET_PENDING_TRACES](map: Map<string, Promise<void>>): void;
538
+ create(payload: CreateTracePayload): Promise<CreateTraceResult>;
184
539
  get(traceId: string): Promise<Trace>;
185
540
  list(options?: ListTracesOptions): Promise<ListTracesResult>;
186
541
  evaluate(traceId: string, options: EvaluateOptions): Promise<EvaluateResult>;
@@ -198,9 +553,108 @@ declare const Eval: {
198
553
  };
199
554
  declare class Tracia {
200
555
  private readonly client;
556
+ private readonly pendingTraces;
557
+ private readonly onTraceError?;
201
558
  readonly prompts: Prompts;
202
559
  readonly traces: Traces;
203
560
  constructor(options: TraciaOptions);
561
+ /**
562
+ * Execute an LLM call locally using the Vercel AI SDK.
563
+ *
564
+ * @example Non-streaming (default)
565
+ * ```typescript
566
+ * const result = await tracia.runLocal({
567
+ * model: 'gpt-4o',
568
+ * messages: [{ role: 'user', content: 'Hello' }],
569
+ * })
570
+ * console.log(result.text)
571
+ * ```
572
+ *
573
+ * @example Streaming
574
+ * ```typescript
575
+ * const stream = tracia.runLocal({
576
+ * model: 'gpt-4o',
577
+ * messages: [{ role: 'user', content: 'Write a poem' }],
578
+ * stream: true,
579
+ * })
580
+ *
581
+ * for await (const chunk of stream) {
582
+ * process.stdout.write(chunk)
583
+ * }
584
+ *
585
+ * const result = await stream.result
586
+ * console.log('Tokens used:', result.usage.totalTokens)
587
+ * ```
588
+ */
589
+ runLocal(input: RunLocalInput & {
590
+ stream: true;
591
+ }): LocalStream;
592
+ runLocal(input: RunLocalInput & {
593
+ stream?: false;
594
+ }): Promise<RunLocalResult>;
595
+ private runLocalNonStreaming;
596
+ private runLocalStreaming;
597
+ /**
598
+ * Execute an LLM call using OpenAI's Responses API.
599
+ *
600
+ * The Responses API is OpenAI-specific and supports:
601
+ * - Reasoning models (o1, o3-mini) with reasoning summaries
602
+ * - Multi-turn conversations with output items
603
+ * - Different input format (developer/user roles, function_call_output)
604
+ *
605
+ * @example Non-streaming (default)
606
+ * ```typescript
607
+ * const result = await tracia.runResponses({
608
+ * model: 'o3-mini',
609
+ * input: [
610
+ * { role: 'developer', content: 'You are helpful.' },
611
+ * { role: 'user', content: 'What is 2+2?' },
612
+ * ],
613
+ * })
614
+ * console.log(result.text)
615
+ * ```
616
+ *
617
+ * @example Streaming
618
+ * ```typescript
619
+ * const stream = tracia.runResponses({
620
+ * model: 'o3-mini',
621
+ * input: [
622
+ * { role: 'developer', content: 'You are helpful.' },
623
+ * { role: 'user', content: 'What is 2+2?' },
624
+ * ],
625
+ * stream: true,
626
+ * })
627
+ *
628
+ * for await (const event of stream) {
629
+ * if (event.type === 'text_delta') process.stdout.write(event.data)
630
+ * if (event.type === 'reasoning') console.log('Reasoning:', event.content)
631
+ * if (event.type === 'tool_call') console.log('Tool:', event.name, event.arguments)
632
+ * }
633
+ *
634
+ * const result = await stream.result
635
+ * console.log('Output items:', result.outputItems)
636
+ * ```
637
+ */
638
+ runResponses(input: RunResponsesInput & {
639
+ stream: true;
640
+ }): ResponsesStream;
641
+ runResponses(input: RunResponsesInput & {
642
+ stream?: false;
643
+ }): Promise<RunResponsesResult>;
644
+ private runResponsesNonStreaming;
645
+ private runResponsesStreaming;
646
+ private validateResponsesInput;
647
+ private createResponsesStream;
648
+ private createLocalStream;
649
+ private combineAbortSignals;
650
+ flush(): Promise<void>;
651
+ private validateRunLocalInput;
652
+ private scheduleTraceCreation;
653
+ private createTraceWithRetry;
654
+ private delay;
655
+ private interpolateMessages;
656
+ private buildAssistantMessage;
657
+ private getProviderApiKey;
204
658
  }
205
659
 
206
- export { type CreatePromptOptions, Eval, type EvaluateOptions, type EvaluateResult, type ListTracesOptions, type ListTracesResult, type MessageRole, type Prompt, type PromptListItem, type PromptMessage, type RunOptions, type RunResult, type RunVariables, type TokenUsage, type Trace, type TraceListItem, type TraceStatus, Tracia, TraciaError, TraciaErrorCode, type TraciaOptions, type UpdatePromptOptions };
660
+ export { type ContentPart, type CreatePromptOptions, type CreateTracePayload, type CreateTraceResult, Eval, type EvaluateOptions, type EvaluateResult, type FinishReason, type JsonSchemaProperty, LLMProvider, type ListTracesOptions, type ListTracesResult, type LocalPromptMessage, type LocalStream, type MessageRole, type Prompt, type PromptListItem, type PromptMessage, type ResponsesEvent, type ResponsesInputItem, type ResponsesOutputItem, type ResponsesStream, type RunLocalInput, type RunLocalResult, type RunOptions, type RunResponsesInput, type RunResponsesResult, type RunResult, type RunVariables, type StreamResult, type TextPart, type TokenUsage, type ToolCall, type ToolCallPart, type ToolChoice, type ToolDefinition, type ToolParameters, type Trace, type TraceListItem, type TraceStatus, Tracia, TraciaError, TraciaErrorCode, type TraciaOptions, type UpdatePromptOptions };