tracia 0.2.0 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -20,12 +20,22 @@ var TraciaErrorCode = /* @__PURE__ */ ((TraciaErrorCode2) => {
20
20
  TraciaErrorCode2["INVALID_REQUEST"] = "INVALID_REQUEST";
21
21
  TraciaErrorCode2["NETWORK_ERROR"] = "NETWORK_ERROR";
22
22
  TraciaErrorCode2["TIMEOUT"] = "TIMEOUT";
23
+ TraciaErrorCode2["ABORTED"] = "ABORTED";
23
24
  TraciaErrorCode2["UNKNOWN"] = "UNKNOWN";
25
+ TraciaErrorCode2["MISSING_PROVIDER_SDK"] = "MISSING_PROVIDER_SDK";
26
+ TraciaErrorCode2["MISSING_PROVIDER_API_KEY"] = "MISSING_PROVIDER_API_KEY";
27
+ TraciaErrorCode2["UNSUPPORTED_MODEL"] = "UNSUPPORTED_MODEL";
24
28
  return TraciaErrorCode2;
25
29
  })(TraciaErrorCode || {});
30
+ var LLMProvider = /* @__PURE__ */ ((LLMProvider2) => {
31
+ LLMProvider2["OPENAI"] = "openai";
32
+ LLMProvider2["ANTHROPIC"] = "anthropic";
33
+ LLMProvider2["GOOGLE"] = "google";
34
+ return LLMProvider2;
35
+ })(LLMProvider || {});
26
36
 
27
37
  // src/client.ts
28
- var SDK_VERSION = "0.2.0";
38
+ var SDK_VERSION = "0.2.3";
29
39
  var DEFAULT_TIMEOUT_MS = 12e4;
30
40
  function mapApiErrorCodeToTraciaErrorCode(apiCode) {
31
41
  const codeMap = {
@@ -174,10 +184,510 @@ var Prompts = class {
174
184
  }
175
185
  };
176
186
 
187
+ // src/models.ts
188
+ var MODEL_TO_PROVIDER = {
189
+ // OpenAI
190
+ "chatgpt-4o-latest": "openai" /* OPENAI */,
191
+ "gpt-3.5-turbo": "openai" /* OPENAI */,
192
+ "gpt-3.5-turbo-0125": "openai" /* OPENAI */,
193
+ "gpt-3.5-turbo-1106": "openai" /* OPENAI */,
194
+ "gpt-3.5-turbo-16k": "openai" /* OPENAI */,
195
+ "gpt-4": "openai" /* OPENAI */,
196
+ "gpt-4-0125-preview": "openai" /* OPENAI */,
197
+ "gpt-4-1106-preview": "openai" /* OPENAI */,
198
+ "gpt-4-turbo": "openai" /* OPENAI */,
199
+ "gpt-4-turbo-2024-04-09": "openai" /* OPENAI */,
200
+ "gpt-4-turbo-preview": "openai" /* OPENAI */,
201
+ "gpt-4.1": "openai" /* OPENAI */,
202
+ "gpt-4.1-2025-04-14": "openai" /* OPENAI */,
203
+ "gpt-4.1-mini": "openai" /* OPENAI */,
204
+ "gpt-4.1-mini-2025-04-14": "openai" /* OPENAI */,
205
+ "gpt-4.1-nano": "openai" /* OPENAI */,
206
+ "gpt-4.1-nano-2025-04-14": "openai" /* OPENAI */,
207
+ "gpt-4o": "openai" /* OPENAI */,
208
+ "gpt-4o-2024-05-13": "openai" /* OPENAI */,
209
+ "gpt-4o-2024-08-06": "openai" /* OPENAI */,
210
+ "gpt-4o-2024-11-20": "openai" /* OPENAI */,
211
+ "gpt-4o-mini": "openai" /* OPENAI */,
212
+ "gpt-4o-mini-2024-07-18": "openai" /* OPENAI */,
213
+ "gpt-4o-mini-search-preview": "openai" /* OPENAI */,
214
+ "gpt-4o-mini-search-preview-2025-03-11": "openai" /* OPENAI */,
215
+ "gpt-4o-search-preview": "openai" /* OPENAI */,
216
+ "gpt-4o-search-preview-2025-03-11": "openai" /* OPENAI */,
217
+ "gpt-5": "openai" /* OPENAI */,
218
+ "gpt-5.1": "openai" /* OPENAI */,
219
+ "gpt-5.1-2025-11-13": "openai" /* OPENAI */,
220
+ "gpt-5.1-chat-latest": "openai" /* OPENAI */,
221
+ "gpt-5.2": "openai" /* OPENAI */,
222
+ "gpt-5.2-2025-12-11": "openai" /* OPENAI */,
223
+ "gpt-5.2-chat-latest": "openai" /* OPENAI */,
224
+ "gpt-5.2-pro": "openai" /* OPENAI */,
225
+ "gpt-5.2-pro-2025-12-11": "openai" /* OPENAI */,
226
+ "gpt-5-pro": "openai" /* OPENAI */,
227
+ "gpt-5-pro-2025-10-06": "openai" /* OPENAI */,
228
+ "gpt-5-2025-08-07": "openai" /* OPENAI */,
229
+ "gpt-5-chat-latest": "openai" /* OPENAI */,
230
+ "gpt-5-codex": "openai" /* OPENAI */,
231
+ "gpt-5.1-codex": "openai" /* OPENAI */,
232
+ "gpt-5.1-codex-max": "openai" /* OPENAI */,
233
+ "gpt-5.1-codex-mini": "openai" /* OPENAI */,
234
+ "gpt-5-mini": "openai" /* OPENAI */,
235
+ "gpt-5-mini-2025-08-07": "openai" /* OPENAI */,
236
+ "gpt-5-nano": "openai" /* OPENAI */,
237
+ "gpt-5-nano-2025-08-07": "openai" /* OPENAI */,
238
+ "o1": "openai" /* OPENAI */,
239
+ "o1-2024-12-17": "openai" /* OPENAI */,
240
+ "o1-pro": "openai" /* OPENAI */,
241
+ "o1-pro-2025-03-19": "openai" /* OPENAI */,
242
+ "o3": "openai" /* OPENAI */,
243
+ "o3-2025-04-16": "openai" /* OPENAI */,
244
+ "o3-mini": "openai" /* OPENAI */,
245
+ "o3-mini-2025-01-31": "openai" /* OPENAI */,
246
+ "o4-mini": "openai" /* OPENAI */,
247
+ "o4-mini-2025-04-16": "openai" /* OPENAI */,
248
+ // Anthropic
249
+ "claude-haiku-4-5-20251001": "anthropic" /* ANTHROPIC */,
250
+ "claude-haiku-4-5": "anthropic" /* ANTHROPIC */,
251
+ "claude-3-7-sonnet-20250219": "anthropic" /* ANTHROPIC */,
252
+ "claude-3-haiku-20240307": "anthropic" /* ANTHROPIC */,
253
+ "claude-3-opus-20240229": "anthropic" /* ANTHROPIC */,
254
+ "claude-4-opus-20250514": "anthropic" /* ANTHROPIC */,
255
+ "claude-4-sonnet-20250514": "anthropic" /* ANTHROPIC */,
256
+ "claude-sonnet-4-5": "anthropic" /* ANTHROPIC */,
257
+ "claude-sonnet-4-5-20250929": "anthropic" /* ANTHROPIC */,
258
+ "claude-opus-4-1": "anthropic" /* ANTHROPIC */,
259
+ "claude-opus-4-1-20250805": "anthropic" /* ANTHROPIC */,
260
+ "claude-opus-4-20250514": "anthropic" /* ANTHROPIC */,
261
+ "claude-opus-4-5-20251101": "anthropic" /* ANTHROPIC */,
262
+ "claude-opus-4-5": "anthropic" /* ANTHROPIC */,
263
+ "claude-sonnet-4-20250514": "anthropic" /* ANTHROPIC */,
264
+ // Google
265
+ "gemini-2.0-flash": "google" /* GOOGLE */,
266
+ "gemini-2.0-flash-001": "google" /* GOOGLE */,
267
+ "gemini-2.0-flash-exp": "google" /* GOOGLE */,
268
+ "gemini-2.0-flash-lite": "google" /* GOOGLE */,
269
+ "gemini-2.0-flash-lite-001": "google" /* GOOGLE */,
270
+ "gemini-2.5-flash": "google" /* GOOGLE */,
271
+ "gemini-2.5-flash-lite": "google" /* GOOGLE */,
272
+ "gemini-2.5-flash-lite-preview-09-2025": "google" /* GOOGLE */,
273
+ "gemini-2.5-flash-preview-09-2025": "google" /* GOOGLE */,
274
+ "gemini-2.5-pro": "google" /* GOOGLE */,
275
+ "gemini-3-pro-preview": "google" /* GOOGLE */,
276
+ "gemini-3-flash-preview": "google" /* GOOGLE */
277
+ };
278
+ function getProviderForModel(modelId) {
279
+ return MODEL_TO_PROVIDER[modelId];
280
+ }
281
+
282
+ // src/providers/ai-sdk.ts
283
+ var aiSdk = null;
284
+ var openaiProvider = null;
285
+ var anthropicProvider = null;
286
+ var googleProvider = null;
287
+ async function loadAISdk() {
288
+ if (aiSdk) return aiSdk;
289
+ try {
290
+ aiSdk = await import("ai");
291
+ return aiSdk;
292
+ } catch {
293
+ throw new TraciaError(
294
+ "MISSING_PROVIDER_SDK" /* MISSING_PROVIDER_SDK */,
295
+ "Vercel AI SDK not installed. Run: npm install ai"
296
+ );
297
+ }
298
+ }
299
+ async function loadOpenAIProvider() {
300
+ if (openaiProvider) return openaiProvider;
301
+ try {
302
+ openaiProvider = await import("@ai-sdk/openai");
303
+ return openaiProvider;
304
+ } catch {
305
+ throw new TraciaError(
306
+ "MISSING_PROVIDER_SDK" /* MISSING_PROVIDER_SDK */,
307
+ "OpenAI provider not installed. Run: npm install @ai-sdk/openai"
308
+ );
309
+ }
310
+ }
311
+ async function loadAnthropicProvider() {
312
+ if (anthropicProvider) return anthropicProvider;
313
+ try {
314
+ anthropicProvider = await import("@ai-sdk/anthropic");
315
+ return anthropicProvider;
316
+ } catch {
317
+ throw new TraciaError(
318
+ "MISSING_PROVIDER_SDK" /* MISSING_PROVIDER_SDK */,
319
+ "Anthropic provider not installed. Run: npm install @ai-sdk/anthropic"
320
+ );
321
+ }
322
+ }
323
+ async function loadGoogleProvider() {
324
+ if (googleProvider) return googleProvider;
325
+ try {
326
+ googleProvider = await import("@ai-sdk/google");
327
+ return googleProvider;
328
+ } catch {
329
+ throw new TraciaError(
330
+ "MISSING_PROVIDER_SDK" /* MISSING_PROVIDER_SDK */,
331
+ "Google provider not installed. Run: npm install @ai-sdk/google"
332
+ );
333
+ }
334
+ }
335
+ function combineAbortSignals(userSignal, timeoutMs) {
336
+ if (!timeoutMs && !userSignal) return void 0;
337
+ if (timeoutMs && !userSignal) return AbortSignal.timeout(timeoutMs);
338
+ if (!timeoutMs && userSignal) return userSignal;
339
+ const timeoutSignal = AbortSignal.timeout(timeoutMs);
340
+ const controller = new AbortController();
341
+ const cleanup = () => {
342
+ userSignal.removeEventListener("abort", onAbort);
343
+ timeoutSignal.removeEventListener("abort", onAbort);
344
+ };
345
+ const onAbort = () => {
346
+ cleanup();
347
+ controller.abort();
348
+ };
349
+ userSignal.addEventListener("abort", onAbort, { once: true });
350
+ timeoutSignal.addEventListener("abort", onAbort, { once: true });
351
+ return controller.signal;
352
+ }
353
+ function sanitizeErrorMessage(message) {
354
+ return message.replace(/\b(sk-|tr_|key-|api[_-]?key[=:\s]+)[a-zA-Z0-9_-]{10,}\b/gi, "[REDACTED]").replace(/Bearer\s+[a-zA-Z0-9_.-]+/gi, "Bearer [REDACTED]").replace(/Basic\s+[a-zA-Z0-9+/=]{20,}/gi, "Basic [REDACTED]").replace(/(authorization[=:\s]+)[^\s,}]+/gi, "$1[REDACTED]");
355
+ }
356
+ function resolveProvider(model, explicitProvider) {
357
+ if (explicitProvider) return explicitProvider;
358
+ const fromRegistry = getProviderForModel(model);
359
+ if (fromRegistry) return fromRegistry;
360
+ if (model.startsWith("gpt-") || model.startsWith("o1") || model.startsWith("o3") || model.startsWith("o4")) {
361
+ return "openai" /* OPENAI */;
362
+ }
363
+ if (model.startsWith("claude-")) {
364
+ return "anthropic" /* ANTHROPIC */;
365
+ }
366
+ if (model.startsWith("gemini-")) {
367
+ return "google" /* GOOGLE */;
368
+ }
369
+ throw new TraciaError(
370
+ "UNSUPPORTED_MODEL" /* UNSUPPORTED_MODEL */,
371
+ `Cannot determine provider for model: ${model}. Specify provider explicitly.`
372
+ );
373
+ }
374
+ async function getLanguageModel(provider, model, apiKey) {
375
+ switch (provider) {
376
+ case "openai" /* OPENAI */: {
377
+ const { createOpenAI } = await loadOpenAIProvider();
378
+ const openai = createOpenAI({ apiKey });
379
+ return openai(model);
380
+ }
381
+ case "anthropic" /* ANTHROPIC */: {
382
+ const { createAnthropic } = await loadAnthropicProvider();
383
+ const anthropic = createAnthropic({ apiKey });
384
+ return anthropic(model);
385
+ }
386
+ case "google" /* GOOGLE */: {
387
+ const { createGoogleGenerativeAI } = await loadGoogleProvider();
388
+ const google = createGoogleGenerativeAI({ apiKey });
389
+ return google(model);
390
+ }
391
+ default:
392
+ throw new TraciaError(
393
+ "UNSUPPORTED_MODEL" /* UNSUPPORTED_MODEL */,
394
+ `Unsupported provider: ${provider}`
395
+ );
396
+ }
397
+ }
398
+ function convertMessages(messages) {
399
+ return messages.map((msg) => {
400
+ if (msg.role === "tool") {
401
+ return {
402
+ role: "tool",
403
+ content: [{
404
+ type: "tool-result",
405
+ toolCallId: msg.toolCallId,
406
+ toolName: msg.toolName ?? msg.toolCallId,
407
+ // Use toolName, fallback to toolCallId
408
+ output: { type: "text", value: msg.content }
409
+ }]
410
+ };
411
+ }
412
+ if (msg.role === "assistant" && Array.isArray(msg.content)) {
413
+ if (msg.content.length === 0) {
414
+ return { role: "assistant", content: "" };
415
+ }
416
+ const convertedContent = msg.content.map((part) => {
417
+ if (part.type === "tool_call") {
418
+ const toolCall = part;
419
+ return {
420
+ type: "tool-call",
421
+ toolCallId: toolCall.id,
422
+ toolName: toolCall.name,
423
+ input: toolCall.arguments
424
+ };
425
+ }
426
+ return part;
427
+ });
428
+ return {
429
+ role: "assistant",
430
+ content: convertedContent
431
+ };
432
+ }
433
+ return {
434
+ role: msg.role,
435
+ content: typeof msg.content === "string" ? msg.content : msg.content.map((b) => b.type === "text" ? b.text : "").join("")
436
+ };
437
+ });
438
+ }
439
+ async function convertTools(tools) {
440
+ if (!tools || tools.length === 0) return void 0;
441
+ const { tool, jsonSchema } = await loadAISdk();
442
+ const result = {};
443
+ for (const toolDef of tools) {
444
+ result[toolDef.name] = tool({
445
+ description: toolDef.description,
446
+ inputSchema: jsonSchema(toolDef.parameters),
447
+ execute: async (args) => args
448
+ // No-op execute function
449
+ });
450
+ }
451
+ return result;
452
+ }
453
+ function convertToolChoice(toolChoice) {
454
+ if (!toolChoice) return void 0;
455
+ if (toolChoice === "auto") return "auto";
456
+ if (toolChoice === "none") return "none";
457
+ if (toolChoice === "required") return "required";
458
+ return { type: "tool", toolName: toolChoice.tool };
459
+ }
460
+ function parseFinishReason(reason) {
461
+ if (reason === "tool-calls") return "tool_calls";
462
+ if (reason === "length") return "max_tokens";
463
+ return "stop";
464
+ }
465
+ function extractToolCalls(toolCalls) {
466
+ if (!toolCalls) return [];
467
+ return toolCalls.filter((tc) => tc.toolCallId && tc.toolName).map((tc) => ({
468
+ id: tc.toolCallId,
469
+ name: tc.toolName,
470
+ arguments: tc.input ?? {}
471
+ }));
472
+ }
473
+ async function complete(options) {
474
+ const { generateText } = await loadAISdk();
475
+ const provider = resolveProvider(options.model, options.provider);
476
+ const model = await getLanguageModel(provider, options.model, options.apiKey);
477
+ const convertedMessages = convertMessages(options.messages);
478
+ const convertedTools = await convertTools(options.tools);
479
+ const convertedToolChoice = convertToolChoice(options.toolChoice);
480
+ try {
481
+ const result = await generateText({
482
+ model,
483
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
484
+ messages: convertedMessages,
485
+ temperature: options.temperature,
486
+ maxOutputTokens: options.maxOutputTokens,
487
+ topP: options.topP,
488
+ stopSequences: options.stopSequences,
489
+ tools: convertedTools,
490
+ toolChoice: convertedToolChoice,
491
+ abortSignal: options.timeoutMs ? AbortSignal.timeout(options.timeoutMs) : void 0
492
+ });
493
+ const toolCalls = extractToolCalls(result.toolCalls);
494
+ return {
495
+ text: result.text,
496
+ inputTokens: result.usage?.inputTokens ?? 0,
497
+ outputTokens: result.usage?.outputTokens ?? 0,
498
+ totalTokens: result.usage?.totalTokens ?? 0,
499
+ toolCalls,
500
+ finishReason: parseFinishReason(result.finishReason),
501
+ provider
502
+ };
503
+ } catch (error) {
504
+ if (error instanceof TraciaError) throw error;
505
+ const rawMessage = error instanceof Error ? error.message : String(error);
506
+ throw new TraciaError(
507
+ "PROVIDER_ERROR" /* PROVIDER_ERROR */,
508
+ `${provider} error: ${sanitizeErrorMessage(rawMessage)}`
509
+ );
510
+ }
511
+ }
512
+ function stream(options) {
513
+ const provider = resolveProvider(options.model, options.provider);
514
+ let resolveResult;
515
+ let rejectResult;
516
+ const resultPromise = new Promise((resolve, reject) => {
517
+ resolveResult = resolve;
518
+ rejectResult = reject;
519
+ });
520
+ async function* generateChunks() {
521
+ try {
522
+ const { streamText } = await loadAISdk();
523
+ const model = await getLanguageModel(provider, options.model, options.apiKey);
524
+ const convertedMessages = convertMessages(options.messages);
525
+ const convertedTools = await convertTools(options.tools);
526
+ const convertedToolChoice = convertToolChoice(options.toolChoice);
527
+ const abortSignal = combineAbortSignals(options.signal, options.timeoutMs);
528
+ const result = streamText({
529
+ model,
530
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
531
+ messages: convertedMessages,
532
+ temperature: options.temperature,
533
+ maxOutputTokens: options.maxOutputTokens,
534
+ topP: options.topP,
535
+ stopSequences: options.stopSequences,
536
+ tools: convertedTools,
537
+ toolChoice: convertedToolChoice,
538
+ abortSignal
539
+ });
540
+ for await (const chunk of result.textStream) {
541
+ yield chunk;
542
+ }
543
+ const [text, usageData, toolCallsData, finishReasonData] = await Promise.all([
544
+ result.text,
545
+ result.usage,
546
+ result.toolCalls,
547
+ result.finishReason
548
+ ]);
549
+ const toolCalls = extractToolCalls(toolCallsData);
550
+ resolveResult({
551
+ text,
552
+ inputTokens: usageData?.inputTokens ?? 0,
553
+ outputTokens: usageData?.outputTokens ?? 0,
554
+ totalTokens: usageData?.totalTokens ?? 0,
555
+ toolCalls,
556
+ finishReason: parseFinishReason(finishReasonData),
557
+ provider
558
+ });
559
+ } catch (error) {
560
+ if (error instanceof Error && error.name === "AbortError") {
561
+ const traciaError2 = new TraciaError("ABORTED" /* ABORTED */, "Stream aborted");
562
+ rejectResult(traciaError2);
563
+ throw traciaError2;
564
+ }
565
+ const rawMessage = error instanceof Error ? error.message : String(error);
566
+ const traciaError = error instanceof TraciaError ? error : new TraciaError(
567
+ "PROVIDER_ERROR" /* PROVIDER_ERROR */,
568
+ `${provider} error: ${sanitizeErrorMessage(rawMessage)}`
569
+ );
570
+ rejectResult(traciaError);
571
+ throw traciaError;
572
+ }
573
+ }
574
+ return {
575
+ chunks: generateChunks(),
576
+ result: resultPromise
577
+ };
578
+ }
579
+ function responsesStream(options) {
580
+ let resolveResult;
581
+ let rejectResult;
582
+ const resultPromise = new Promise((resolve, reject) => {
583
+ resolveResult = resolve;
584
+ rejectResult = reject;
585
+ });
586
+ async function* generateEvents() {
587
+ let fullText = "";
588
+ let usage = { inputTokens: 0, outputTokens: 0, totalTokens: 0 };
589
+ const outputItems = [];
590
+ const toolCalls = [];
591
+ let aborted = false;
592
+ try {
593
+ const { createOpenAI } = await loadOpenAIProvider();
594
+ const openai = createOpenAI({ apiKey: options.apiKey });
595
+ const model = openai.responses(options.model);
596
+ const { streamText } = await loadAISdk();
597
+ const convertedTools = options.tools ? await convertTools(options.tools) : void 0;
598
+ const abortSignal = combineAbortSignals(options.signal, options.timeoutMs);
599
+ const result = streamText({
600
+ model,
601
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
602
+ messages: options.input,
603
+ maxOutputTokens: options.maxOutputTokens,
604
+ tools: convertedTools,
605
+ abortSignal
606
+ });
607
+ for await (const chunk of result.textStream) {
608
+ fullText += chunk;
609
+ yield { type: "text_delta", data: chunk };
610
+ }
611
+ const [usageData, toolCallsData] = await Promise.all([
612
+ result.usage,
613
+ result.toolCalls
614
+ ]);
615
+ usage = {
616
+ inputTokens: usageData?.inputTokens ?? 0,
617
+ outputTokens: usageData?.outputTokens ?? 0,
618
+ totalTokens: usageData?.totalTokens ?? 0
619
+ };
620
+ if (toolCallsData) {
621
+ for (const tc of toolCallsData) {
622
+ if (!tc.toolCallId || !tc.toolName) continue;
623
+ const toolCall = {
624
+ id: tc.toolCallId,
625
+ callId: tc.toolCallId,
626
+ name: tc.toolName,
627
+ arguments: tc.input ?? {}
628
+ };
629
+ toolCalls.push(toolCall);
630
+ yield {
631
+ type: "tool_call",
632
+ id: toolCall.id,
633
+ callId: toolCall.callId,
634
+ name: toolCall.name,
635
+ arguments: toolCall.arguments
636
+ };
637
+ }
638
+ }
639
+ if (fullText) {
640
+ yield { type: "text", data: fullText };
641
+ outputItems.push({ type: "message", content: fullText });
642
+ }
643
+ yield { type: "done", usage };
644
+ resolveResult({
645
+ text: fullText,
646
+ usage,
647
+ outputItems,
648
+ toolCalls,
649
+ aborted
650
+ });
651
+ } catch (error) {
652
+ if (error instanceof Error && error.name === "AbortError") {
653
+ aborted = true;
654
+ resolveResult({
655
+ text: fullText,
656
+ usage,
657
+ outputItems,
658
+ toolCalls,
659
+ aborted
660
+ });
661
+ return;
662
+ }
663
+ const rawMessage = error instanceof Error ? error.message : String(error);
664
+ const traciaError = new TraciaError(
665
+ "PROVIDER_ERROR" /* PROVIDER_ERROR */,
666
+ `OpenAI Responses API error: ${sanitizeErrorMessage(rawMessage)}`
667
+ );
668
+ rejectResult(traciaError);
669
+ throw traciaError;
670
+ }
671
+ }
672
+ return {
673
+ events: generateEvents(),
674
+ result: resultPromise
675
+ };
676
+ }
677
+
177
678
  // src/traces.ts
679
+ var INTERNAL_SET_PENDING_TRACES = /* @__PURE__ */ Symbol("setPendingTracesMap");
178
680
  var Traces = class {
179
681
  constructor(client) {
180
682
  this.client = client;
683
+ this.pendingTraces = null;
684
+ }
685
+ /** @internal */
686
+ [INTERNAL_SET_PENDING_TRACES](map) {
687
+ this.pendingTraces = map;
688
+ }
689
+ async create(payload) {
690
+ return this.client.post("/api/v1/traces", payload);
181
691
  }
182
692
  async get(traceId) {
183
693
  return this.client.get(`/api/v1/traces/${encodeURIComponent(traceId)}`);
@@ -216,6 +726,12 @@ var Traces = class {
216
726
  return this.client.get(path);
217
727
  }
218
728
  async evaluate(traceId, options) {
729
+ if (this.pendingTraces) {
730
+ const pendingTrace = this.pendingTraces.get(traceId);
731
+ if (pendingTrace) {
732
+ await pendingTrace;
733
+ }
734
+ }
219
735
  if (typeof options.value !== "number") {
220
736
  throw new TraciaError(
221
737
  "INVALID_REQUEST" /* INVALID_REQUEST */,
@@ -236,27 +752,653 @@ var Traces = class {
236
752
  }
237
753
  };
238
754
 
755
+ // src/utils.ts
756
+ import crypto from "crypto";
757
+ var TRACE_ID_REGEX = /^tr_[a-f0-9]{16}$/i;
758
+ function generateTraceId() {
759
+ const randomPart = crypto.randomBytes(8).toString("hex");
760
+ return `tr_${randomPart}`;
761
+ }
762
+ function isValidTraceIdFormat(traceId) {
763
+ return TRACE_ID_REGEX.test(traceId);
764
+ }
765
+
239
766
  // src/index.ts
240
767
  var Eval = {
241
768
  POSITIVE: 1,
242
769
  NEGATIVE: 0
243
770
  };
244
771
  var DEFAULT_BASE_URL = "https://app.tracia.io";
772
+ var MAX_PENDING_TRACES = 1e3;
773
+ var TRACE_RETRY_ATTEMPTS = 2;
774
+ var TRACE_RETRY_DELAY_MS = 500;
775
+ var TRACE_STATUS_SUCCESS = "SUCCESS";
776
+ var TRACE_STATUS_ERROR = "ERROR";
777
+ var ENV_VAR_MAP = {
778
+ ["openai" /* OPENAI */]: "OPENAI_API_KEY",
779
+ ["anthropic" /* ANTHROPIC */]: "ANTHROPIC_API_KEY",
780
+ ["google" /* GOOGLE */]: "GOOGLE_API_KEY"
781
+ };
782
+ function convertResponsesItemToMessage(item) {
783
+ if ("role" in item && (item.role === "developer" || item.role === "user")) {
784
+ const messageItem = item;
785
+ return {
786
+ role: messageItem.role === "developer" ? "system" : "user",
787
+ content: messageItem.content
788
+ };
789
+ }
790
+ if ("type" in item && item.type === "function_call_output") {
791
+ const outputItem = item;
792
+ return {
793
+ role: "tool",
794
+ toolCallId: outputItem.call_id,
795
+ content: outputItem.output
796
+ };
797
+ }
798
+ if ("type" in item) {
799
+ return {
800
+ role: "assistant",
801
+ content: JSON.stringify(item)
802
+ };
803
+ }
804
+ return {
805
+ role: "user",
806
+ content: JSON.stringify(item)
807
+ };
808
+ }
245
809
  var Tracia = class {
246
810
  constructor(options) {
811
+ this.pendingTraces = /* @__PURE__ */ new Map();
247
812
  if (!options.apiKey) {
248
- throw new Error("apiKey is required");
813
+ throw new TraciaError(
814
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
815
+ "apiKey is required"
816
+ );
249
817
  }
250
818
  this.client = new HttpClient({
251
819
  apiKey: options.apiKey,
252
820
  baseUrl: DEFAULT_BASE_URL
253
821
  });
822
+ this.onTraceError = options.onTraceError;
254
823
  this.prompts = new Prompts(this.client);
255
824
  this.traces = new Traces(this.client);
825
+ this.traces[INTERNAL_SET_PENDING_TRACES](this.pendingTraces);
826
+ }
827
+ runLocal(input) {
828
+ if (input.stream === true) {
829
+ return this.runLocalStreaming(input);
830
+ }
831
+ return this.runLocalNonStreaming(input);
832
+ }
833
+ async runLocalNonStreaming(input) {
834
+ this.validateRunLocalInput(input);
835
+ let traceId = "";
836
+ if (input.sendTrace !== false) {
837
+ if (input.traceId && !isValidTraceIdFormat(input.traceId)) {
838
+ throw new TraciaError(
839
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
840
+ `Invalid trace ID format. Must match: tr_ + 16 hex characters (e.g., tr_1234567890abcdef)`
841
+ );
842
+ }
843
+ traceId = input.traceId || generateTraceId();
844
+ }
845
+ const interpolatedMessages = this.interpolateMessages(input.messages, input.variables);
846
+ const provider = resolveProvider(input.model, input.provider);
847
+ const apiKey = this.getProviderApiKey(provider, input.providerApiKey);
848
+ const startTime = Date.now();
849
+ let completionResult = null;
850
+ let errorMessage = null;
851
+ try {
852
+ completionResult = await complete({
853
+ model: input.model,
854
+ messages: interpolatedMessages,
855
+ apiKey,
856
+ provider: input.provider,
857
+ temperature: input.temperature,
858
+ maxOutputTokens: input.maxOutputTokens,
859
+ topP: input.topP,
860
+ stopSequences: input.stopSequences,
861
+ tools: input.tools,
862
+ toolChoice: input.toolChoice,
863
+ timeoutMs: input.timeoutMs
864
+ });
865
+ } catch (error) {
866
+ if (error instanceof TraciaError) {
867
+ errorMessage = error.message;
868
+ } else {
869
+ errorMessage = error instanceof Error ? error.message : String(error);
870
+ }
871
+ }
872
+ const latencyMs = Date.now() - startTime;
873
+ if (traceId) {
874
+ this.scheduleTraceCreation(traceId, {
875
+ traceId,
876
+ model: input.model,
877
+ provider: completionResult?.provider ?? provider,
878
+ input: { messages: interpolatedMessages },
879
+ variables: input.variables ?? null,
880
+ output: completionResult?.text ?? null,
881
+ status: errorMessage ? TRACE_STATUS_ERROR : TRACE_STATUS_SUCCESS,
882
+ error: errorMessage,
883
+ latencyMs,
884
+ inputTokens: completionResult?.inputTokens ?? 0,
885
+ outputTokens: completionResult?.outputTokens ?? 0,
886
+ totalTokens: completionResult?.totalTokens ?? 0,
887
+ tags: input.tags,
888
+ userId: input.userId,
889
+ sessionId: input.sessionId,
890
+ temperature: input.temperature,
891
+ maxOutputTokens: input.maxOutputTokens,
892
+ topP: input.topP,
893
+ tools: input.tools,
894
+ toolCalls: completionResult?.toolCalls
895
+ });
896
+ }
897
+ if (errorMessage) {
898
+ throw new TraciaError("PROVIDER_ERROR" /* PROVIDER_ERROR */, errorMessage);
899
+ }
900
+ const toolCalls = completionResult.toolCalls;
901
+ const finishReason = completionResult.finishReason;
902
+ const message = this.buildAssistantMessage(completionResult.text, toolCalls);
903
+ return {
904
+ text: completionResult.text,
905
+ traceId,
906
+ latencyMs,
907
+ usage: {
908
+ inputTokens: completionResult.inputTokens,
909
+ outputTokens: completionResult.outputTokens,
910
+ totalTokens: completionResult.totalTokens
911
+ },
912
+ cost: null,
913
+ provider: completionResult.provider,
914
+ model: input.model,
915
+ toolCalls,
916
+ finishReason,
917
+ message
918
+ };
919
+ }
920
+ runLocalStreaming(input) {
921
+ this.validateRunLocalInput(input);
922
+ let traceId = "";
923
+ if (input.sendTrace !== false) {
924
+ if (input.traceId && !isValidTraceIdFormat(input.traceId)) {
925
+ throw new TraciaError(
926
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
927
+ `Invalid trace ID format. Must match: tr_ + 16 hex characters (e.g., tr_1234567890abcdef)`
928
+ );
929
+ }
930
+ traceId = input.traceId || generateTraceId();
931
+ }
932
+ const interpolatedMessages = this.interpolateMessages(input.messages, input.variables);
933
+ const provider = resolveProvider(input.model, input.provider);
934
+ const apiKey = this.getProviderApiKey(provider, input.providerApiKey);
935
+ const abortController = new AbortController();
936
+ const combinedSignal = input.signal ? this.combineAbortSignals(input.signal, abortController.signal) : abortController.signal;
937
+ return this.createLocalStream(
938
+ input,
939
+ interpolatedMessages,
940
+ provider,
941
+ apiKey,
942
+ traceId,
943
+ combinedSignal,
944
+ abortController
945
+ );
946
+ }
947
+ runResponses(input) {
948
+ if (input.stream === true) {
949
+ return this.runResponsesStreaming(input);
950
+ }
951
+ return this.runResponsesNonStreaming(input);
952
+ }
953
+ async runResponsesNonStreaming(input) {
954
+ const stream2 = this.runResponsesStreaming(input);
955
+ for await (const _event of stream2) {
956
+ }
957
+ return stream2.result;
958
+ }
959
+ runResponsesStreaming(input) {
960
+ this.validateResponsesInput(input);
961
+ let traceId = "";
962
+ if (input.sendTrace !== false) {
963
+ if (input.traceId && !isValidTraceIdFormat(input.traceId)) {
964
+ throw new TraciaError(
965
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
966
+ `Invalid trace ID format. Must match: tr_ + 16 hex characters (e.g., tr_1234567890abcdef)`
967
+ );
968
+ }
969
+ traceId = input.traceId || generateTraceId();
970
+ }
971
+ const apiKey = this.getProviderApiKey("openai" /* OPENAI */, input.providerApiKey);
972
+ const abortController = new AbortController();
973
+ const combinedSignal = input.signal ? this.combineAbortSignals(input.signal, abortController.signal) : abortController.signal;
974
+ return this.createResponsesStream(
975
+ input,
976
+ apiKey,
977
+ traceId,
978
+ combinedSignal,
979
+ abortController
980
+ );
981
+ }
982
+ validateResponsesInput(input) {
983
+ if (!input.model || input.model.trim() === "") {
984
+ throw new TraciaError(
985
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
986
+ "model is required and cannot be empty"
987
+ );
988
+ }
989
+ if (!input.input || input.input.length === 0) {
990
+ throw new TraciaError(
991
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
992
+ "input array is required and cannot be empty"
993
+ );
994
+ }
995
+ }
996
+ createResponsesStream(input, apiKey, traceId, signal, abortController) {
997
+ const startTime = Date.now();
998
+ let aborted = false;
999
+ let resolveResult;
1000
+ let rejectResult;
1001
+ const resultPromise = new Promise((resolve, reject) => {
1002
+ resolveResult = resolve;
1003
+ rejectResult = reject;
1004
+ });
1005
+ const providerStream = responsesStream({
1006
+ model: input.model,
1007
+ input: input.input,
1008
+ apiKey,
1009
+ tools: input.tools,
1010
+ maxOutputTokens: input.maxOutputTokens,
1011
+ timeoutMs: input.timeoutMs,
1012
+ signal
1013
+ });
1014
+ let collectedText = "";
1015
+ const scheduleTrace = this.scheduleTraceCreation.bind(this);
1016
+ async function* wrappedEvents() {
1017
+ try {
1018
+ for await (const event of providerStream.events) {
1019
+ if (event.type === "text_delta") {
1020
+ collectedText += event.data;
1021
+ }
1022
+ yield event;
1023
+ }
1024
+ const providerResult = await providerStream.result;
1025
+ const latencyMs = Date.now() - startTime;
1026
+ if (traceId) {
1027
+ scheduleTrace(traceId, {
1028
+ traceId,
1029
+ model: input.model,
1030
+ provider: "openai" /* OPENAI */,
1031
+ input: { messages: input.input.map((item) => convertResponsesItemToMessage(item)) },
1032
+ variables: null,
1033
+ output: providerResult.text,
1034
+ status: providerResult.aborted ? TRACE_STATUS_ERROR : TRACE_STATUS_SUCCESS,
1035
+ error: providerResult.aborted ? "Stream aborted" : null,
1036
+ latencyMs,
1037
+ inputTokens: providerResult.usage.inputTokens,
1038
+ outputTokens: providerResult.usage.outputTokens,
1039
+ totalTokens: providerResult.usage.totalTokens,
1040
+ tags: input.tags,
1041
+ userId: input.userId,
1042
+ sessionId: input.sessionId,
1043
+ tools: input.tools,
1044
+ toolCalls: providerResult.toolCalls.map((tc) => ({
1045
+ id: tc.id,
1046
+ name: tc.name,
1047
+ arguments: tc.arguments
1048
+ }))
1049
+ });
1050
+ }
1051
+ resolveResult({
1052
+ text: providerResult.text,
1053
+ traceId,
1054
+ latencyMs,
1055
+ usage: providerResult.usage,
1056
+ outputItems: providerResult.outputItems,
1057
+ toolCalls: providerResult.toolCalls,
1058
+ aborted: providerResult.aborted
1059
+ });
1060
+ } catch (error) {
1061
+ const latencyMs = Date.now() - startTime;
1062
+ const isAborted = aborted || signal.aborted;
1063
+ const errorMessage = isAborted ? "Stream aborted" : error instanceof Error ? error.message : String(error);
1064
+ if (traceId) {
1065
+ scheduleTrace(traceId, {
1066
+ traceId,
1067
+ model: input.model,
1068
+ provider: "openai" /* OPENAI */,
1069
+ input: { messages: input.input.map((item) => convertResponsesItemToMessage(item)) },
1070
+ variables: null,
1071
+ output: collectedText || null,
1072
+ status: TRACE_STATUS_ERROR,
1073
+ error: errorMessage,
1074
+ latencyMs,
1075
+ inputTokens: 0,
1076
+ outputTokens: 0,
1077
+ totalTokens: 0,
1078
+ tags: input.tags,
1079
+ userId: input.userId,
1080
+ sessionId: input.sessionId,
1081
+ tools: input.tools
1082
+ });
1083
+ }
1084
+ if (isAborted) {
1085
+ resolveResult({
1086
+ text: collectedText,
1087
+ traceId,
1088
+ latencyMs,
1089
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
1090
+ outputItems: [],
1091
+ toolCalls: [],
1092
+ aborted: true
1093
+ });
1094
+ } else {
1095
+ const traciaError = error instanceof TraciaError ? error : new TraciaError("PROVIDER_ERROR" /* PROVIDER_ERROR */, errorMessage);
1096
+ rejectResult(traciaError);
1097
+ throw traciaError;
1098
+ }
1099
+ }
1100
+ }
1101
+ const asyncIterator = wrappedEvents();
1102
+ return {
1103
+ traceId,
1104
+ [Symbol.asyncIterator]() {
1105
+ return asyncIterator;
1106
+ },
1107
+ result: resultPromise,
1108
+ abort() {
1109
+ aborted = true;
1110
+ abortController.abort();
1111
+ }
1112
+ };
1113
+ }
1114
+ createLocalStream(input, interpolatedMessages, provider, apiKey, traceId, signal, abortController) {
1115
+ const startTime = Date.now();
1116
+ let aborted = false;
1117
+ let resolveResult;
1118
+ let rejectResult;
1119
+ const resultPromise = new Promise((resolve, reject) => {
1120
+ resolveResult = resolve;
1121
+ rejectResult = reject;
1122
+ });
1123
+ const providerStream = stream({
1124
+ model: input.model,
1125
+ messages: interpolatedMessages,
1126
+ apiKey,
1127
+ provider: input.provider,
1128
+ temperature: input.temperature,
1129
+ maxOutputTokens: input.maxOutputTokens,
1130
+ topP: input.topP,
1131
+ stopSequences: input.stopSequences,
1132
+ tools: input.tools,
1133
+ toolChoice: input.toolChoice,
1134
+ timeoutMs: input.timeoutMs,
1135
+ signal
1136
+ });
1137
+ let collectedText = "";
1138
+ const scheduleTrace = this.scheduleTraceCreation.bind(this);
1139
+ const buildAssistantMessage = this.buildAssistantMessage.bind(this);
1140
+ async function* wrappedChunks() {
1141
+ try {
1142
+ for await (const chunk of providerStream.chunks) {
1143
+ collectedText += chunk;
1144
+ yield chunk;
1145
+ }
1146
+ const completionResult = await providerStream.result;
1147
+ const latencyMs = Date.now() - startTime;
1148
+ if (traceId) {
1149
+ scheduleTrace(traceId, {
1150
+ traceId,
1151
+ model: input.model,
1152
+ provider: completionResult.provider,
1153
+ input: { messages: interpolatedMessages },
1154
+ variables: input.variables ?? null,
1155
+ output: completionResult.text,
1156
+ status: TRACE_STATUS_SUCCESS,
1157
+ error: null,
1158
+ latencyMs,
1159
+ inputTokens: completionResult.inputTokens,
1160
+ outputTokens: completionResult.outputTokens,
1161
+ totalTokens: completionResult.totalTokens,
1162
+ tags: input.tags,
1163
+ userId: input.userId,
1164
+ sessionId: input.sessionId,
1165
+ temperature: input.temperature,
1166
+ maxOutputTokens: input.maxOutputTokens,
1167
+ topP: input.topP,
1168
+ tools: input.tools,
1169
+ toolCalls: completionResult.toolCalls
1170
+ });
1171
+ }
1172
+ const toolCalls = completionResult.toolCalls;
1173
+ const finishReason = completionResult.finishReason;
1174
+ const message = buildAssistantMessage(completionResult.text, toolCalls);
1175
+ resolveResult({
1176
+ text: completionResult.text,
1177
+ traceId,
1178
+ latencyMs,
1179
+ usage: {
1180
+ inputTokens: completionResult.inputTokens,
1181
+ outputTokens: completionResult.outputTokens,
1182
+ totalTokens: completionResult.totalTokens
1183
+ },
1184
+ cost: null,
1185
+ provider: completionResult.provider,
1186
+ model: input.model,
1187
+ aborted: false,
1188
+ toolCalls,
1189
+ finishReason,
1190
+ message
1191
+ });
1192
+ } catch (error) {
1193
+ const latencyMs = Date.now() - startTime;
1194
+ const isAborted = aborted || signal.aborted;
1195
+ const errorMessage = isAborted ? "Stream aborted" : error instanceof Error ? error.message : String(error);
1196
+ if (traceId) {
1197
+ scheduleTrace(traceId, {
1198
+ traceId,
1199
+ model: input.model,
1200
+ provider,
1201
+ input: { messages: interpolatedMessages },
1202
+ variables: input.variables ?? null,
1203
+ output: collectedText || null,
1204
+ status: TRACE_STATUS_ERROR,
1205
+ error: errorMessage,
1206
+ latencyMs,
1207
+ inputTokens: 0,
1208
+ outputTokens: 0,
1209
+ totalTokens: 0,
1210
+ tags: input.tags,
1211
+ userId: input.userId,
1212
+ sessionId: input.sessionId,
1213
+ temperature: input.temperature,
1214
+ maxOutputTokens: input.maxOutputTokens,
1215
+ topP: input.topP
1216
+ });
1217
+ }
1218
+ if (isAborted) {
1219
+ const abortedMessage = buildAssistantMessage(collectedText, []);
1220
+ resolveResult({
1221
+ text: collectedText,
1222
+ traceId,
1223
+ latencyMs,
1224
+ usage: {
1225
+ inputTokens: 0,
1226
+ outputTokens: 0,
1227
+ totalTokens: 0
1228
+ },
1229
+ cost: null,
1230
+ provider,
1231
+ model: input.model,
1232
+ aborted: true,
1233
+ toolCalls: [],
1234
+ finishReason: "stop",
1235
+ message: abortedMessage
1236
+ });
1237
+ } else {
1238
+ const traciaError = error instanceof TraciaError ? error : new TraciaError("PROVIDER_ERROR" /* PROVIDER_ERROR */, errorMessage);
1239
+ rejectResult(traciaError);
1240
+ throw traciaError;
1241
+ }
1242
+ }
1243
+ }
1244
+ const asyncIterator = wrappedChunks();
1245
+ return {
1246
+ traceId,
1247
+ [Symbol.asyncIterator]() {
1248
+ return asyncIterator;
1249
+ },
1250
+ result: resultPromise,
1251
+ abort() {
1252
+ aborted = true;
1253
+ abortController.abort();
1254
+ }
1255
+ };
1256
+ }
1257
+ combineAbortSignals(signal1, signal2) {
1258
+ const controller = new AbortController();
1259
+ if (signal1.aborted || signal2.aborted) {
1260
+ controller.abort();
1261
+ return controller.signal;
1262
+ }
1263
+ const onAbort = () => {
1264
+ signal1.removeEventListener("abort", onAbort);
1265
+ signal2.removeEventListener("abort", onAbort);
1266
+ controller.abort();
1267
+ };
1268
+ signal1.addEventListener("abort", onAbort, { once: true });
1269
+ signal2.addEventListener("abort", onAbort, { once: true });
1270
+ return controller.signal;
1271
+ }
1272
+ async flush() {
1273
+ await Promise.all(this.pendingTraces.values());
1274
+ }
1275
+ validateRunLocalInput(input) {
1276
+ if (!input.model || input.model.trim() === "") {
1277
+ throw new TraciaError(
1278
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
1279
+ "model is required and cannot be empty"
1280
+ );
1281
+ }
1282
+ if (!input.messages || input.messages.length === 0) {
1283
+ throw new TraciaError(
1284
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
1285
+ "messages array is required and cannot be empty"
1286
+ );
1287
+ }
1288
+ for (const message of input.messages) {
1289
+ if (message.role === "tool") {
1290
+ if (!message.toolCallId) {
1291
+ throw new TraciaError(
1292
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
1293
+ `Tool messages must include toolCallId. Example: { role: "tool", toolCallId: "call_123", content: '{"result": "data"}' }`
1294
+ );
1295
+ }
1296
+ if (typeof message.content !== "string") {
1297
+ throw new TraciaError(
1298
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
1299
+ `Tool message content must be a string (the tool result). Example: { role: "tool", toolCallId: "call_123", content: '{"result": "data"}' }`
1300
+ );
1301
+ }
1302
+ }
1303
+ }
1304
+ }
1305
+ scheduleTraceCreation(traceId, payload) {
1306
+ if (this.pendingTraces.size >= MAX_PENDING_TRACES) {
1307
+ const oldestTraceId = this.pendingTraces.keys().next().value;
1308
+ if (oldestTraceId) {
1309
+ this.pendingTraces.delete(oldestTraceId);
1310
+ }
1311
+ }
1312
+ const tracePromise = this.createTraceWithRetry(traceId, payload);
1313
+ this.pendingTraces.set(traceId, tracePromise);
1314
+ tracePromise.finally(() => this.pendingTraces.delete(traceId));
1315
+ }
1316
+ async createTraceWithRetry(traceId, payload) {
1317
+ let lastError = null;
1318
+ for (let attempt = 0; attempt <= TRACE_RETRY_ATTEMPTS; attempt++) {
1319
+ try {
1320
+ await this.traces.create(payload);
1321
+ return;
1322
+ } catch (error) {
1323
+ lastError = error instanceof Error ? error : new Error(String(error));
1324
+ if (attempt < TRACE_RETRY_ATTEMPTS) {
1325
+ await this.delay(TRACE_RETRY_DELAY_MS * (attempt + 1));
1326
+ }
1327
+ }
1328
+ }
1329
+ if (this.onTraceError && lastError) {
1330
+ this.onTraceError(lastError, traceId);
1331
+ }
1332
+ }
1333
+ delay(ms) {
1334
+ return new Promise((resolve) => setTimeout(resolve, ms));
1335
+ }
1336
+ interpolateMessages(messages, variables) {
1337
+ if (!variables) return messages;
1338
+ return messages.map((message) => {
1339
+ if (typeof message.content === "string") {
1340
+ return {
1341
+ ...message,
1342
+ content: message.content.replace(
1343
+ /\{\{(\w+)\}\}/g,
1344
+ (match, key) => variables[key] ?? match
1345
+ )
1346
+ };
1347
+ }
1348
+ if (message.role === "tool") {
1349
+ return message;
1350
+ }
1351
+ return {
1352
+ ...message,
1353
+ content: message.content.map((block) => {
1354
+ if (block.type === "text") {
1355
+ return {
1356
+ ...block,
1357
+ text: block.text.replace(
1358
+ /\{\{(\w+)\}\}/g,
1359
+ (match, key) => variables[key] ?? match
1360
+ )
1361
+ };
1362
+ }
1363
+ return block;
1364
+ })
1365
+ };
1366
+ });
1367
+ }
1368
+ buildAssistantMessage(text, toolCalls) {
1369
+ if (toolCalls.length === 0) {
1370
+ return { role: "assistant", content: text };
1371
+ }
1372
+ const contentParts = [];
1373
+ if (text) {
1374
+ contentParts.push({ type: "text", text });
1375
+ }
1376
+ for (const toolCall of toolCalls) {
1377
+ contentParts.push({
1378
+ type: "tool_call",
1379
+ id: toolCall.id,
1380
+ name: toolCall.name,
1381
+ arguments: toolCall.arguments
1382
+ });
1383
+ }
1384
+ return { role: "assistant", content: contentParts };
1385
+ }
1386
+ getProviderApiKey(provider, override) {
1387
+ if (override) return override;
1388
+ const envVar = ENV_VAR_MAP[provider];
1389
+ const key = process.env[envVar];
1390
+ if (!key) {
1391
+ throw new TraciaError(
1392
+ "MISSING_PROVIDER_API_KEY" /* MISSING_PROVIDER_API_KEY */,
1393
+ `Missing API key for ${provider}. Set the ${envVar} environment variable or provide providerApiKey in options.`
1394
+ );
1395
+ }
1396
+ return key;
256
1397
  }
257
1398
  };
258
1399
  export {
259
1400
  Eval,
1401
+ LLMProvider,
260
1402
  Tracia,
261
1403
  TraciaError,
262
1404
  TraciaErrorCode