tracia 0.1.1 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -20,13 +20,23 @@ var TraciaErrorCode = /* @__PURE__ */ ((TraciaErrorCode2) => {
20
20
  TraciaErrorCode2["INVALID_REQUEST"] = "INVALID_REQUEST";
21
21
  TraciaErrorCode2["NETWORK_ERROR"] = "NETWORK_ERROR";
22
22
  TraciaErrorCode2["TIMEOUT"] = "TIMEOUT";
23
+ TraciaErrorCode2["ABORTED"] = "ABORTED";
23
24
  TraciaErrorCode2["UNKNOWN"] = "UNKNOWN";
25
+ TraciaErrorCode2["MISSING_PROVIDER_SDK"] = "MISSING_PROVIDER_SDK";
26
+ TraciaErrorCode2["MISSING_PROVIDER_API_KEY"] = "MISSING_PROVIDER_API_KEY";
27
+ TraciaErrorCode2["UNSUPPORTED_MODEL"] = "UNSUPPORTED_MODEL";
24
28
  return TraciaErrorCode2;
25
29
  })(TraciaErrorCode || {});
30
+ var LLMProvider = /* @__PURE__ */ ((LLMProvider2) => {
31
+ LLMProvider2["OPENAI"] = "openai";
32
+ LLMProvider2["ANTHROPIC"] = "anthropic";
33
+ LLMProvider2["GOOGLE"] = "google";
34
+ return LLMProvider2;
35
+ })(LLMProvider || {});
26
36
 
27
37
  // src/client.ts
28
- var SDK_VERSION = "0.1.1";
29
- var DEFAULT_TIMEOUT_MS = 3e4;
38
+ var SDK_VERSION = "0.2.2";
39
+ var DEFAULT_TIMEOUT_MS = 12e4;
30
40
  function mapApiErrorCodeToTraciaErrorCode(apiCode) {
31
41
  const codeMap = {
32
42
  UNAUTHORIZED: "UNAUTHORIZED" /* UNAUTHORIZED */,
@@ -174,10 +184,511 @@ var Prompts = class {
174
184
  }
175
185
  };
176
186
 
187
+ // src/models.ts
188
+ var MODEL_TO_PROVIDER = {
189
+ // OpenAI
190
+ "chatgpt-4o-latest": "openai" /* OPENAI */,
191
+ "gpt-3.5-turbo": "openai" /* OPENAI */,
192
+ "gpt-3.5-turbo-0125": "openai" /* OPENAI */,
193
+ "gpt-3.5-turbo-1106": "openai" /* OPENAI */,
194
+ "gpt-3.5-turbo-16k": "openai" /* OPENAI */,
195
+ "gpt-4": "openai" /* OPENAI */,
196
+ "gpt-4-0125-preview": "openai" /* OPENAI */,
197
+ "gpt-4-1106-preview": "openai" /* OPENAI */,
198
+ "gpt-4-turbo": "openai" /* OPENAI */,
199
+ "gpt-4-turbo-2024-04-09": "openai" /* OPENAI */,
200
+ "gpt-4-turbo-preview": "openai" /* OPENAI */,
201
+ "gpt-4.1": "openai" /* OPENAI */,
202
+ "gpt-4.1-2025-04-14": "openai" /* OPENAI */,
203
+ "gpt-4.1-mini": "openai" /* OPENAI */,
204
+ "gpt-4.1-mini-2025-04-14": "openai" /* OPENAI */,
205
+ "gpt-4.1-nano": "openai" /* OPENAI */,
206
+ "gpt-4.1-nano-2025-04-14": "openai" /* OPENAI */,
207
+ "gpt-4o": "openai" /* OPENAI */,
208
+ "gpt-4o-2024-05-13": "openai" /* OPENAI */,
209
+ "gpt-4o-2024-08-06": "openai" /* OPENAI */,
210
+ "gpt-4o-2024-11-20": "openai" /* OPENAI */,
211
+ "gpt-4o-mini": "openai" /* OPENAI */,
212
+ "gpt-4o-mini-2024-07-18": "openai" /* OPENAI */,
213
+ "gpt-4o-mini-search-preview": "openai" /* OPENAI */,
214
+ "gpt-4o-mini-search-preview-2025-03-11": "openai" /* OPENAI */,
215
+ "gpt-4o-search-preview": "openai" /* OPENAI */,
216
+ "gpt-4o-search-preview-2025-03-11": "openai" /* OPENAI */,
217
+ "gpt-5": "openai" /* OPENAI */,
218
+ "gpt-5.1": "openai" /* OPENAI */,
219
+ "gpt-5.1-2025-11-13": "openai" /* OPENAI */,
220
+ "gpt-5.1-chat-latest": "openai" /* OPENAI */,
221
+ "gpt-5.2": "openai" /* OPENAI */,
222
+ "gpt-5.2-2025-12-11": "openai" /* OPENAI */,
223
+ "gpt-5.2-chat-latest": "openai" /* OPENAI */,
224
+ "gpt-5.2-pro": "openai" /* OPENAI */,
225
+ "gpt-5.2-pro-2025-12-11": "openai" /* OPENAI */,
226
+ "gpt-5-pro": "openai" /* OPENAI */,
227
+ "gpt-5-pro-2025-10-06": "openai" /* OPENAI */,
228
+ "gpt-5-2025-08-07": "openai" /* OPENAI */,
229
+ "gpt-5-chat-latest": "openai" /* OPENAI */,
230
+ "gpt-5-codex": "openai" /* OPENAI */,
231
+ "gpt-5.1-codex": "openai" /* OPENAI */,
232
+ "gpt-5.1-codex-max": "openai" /* OPENAI */,
233
+ "gpt-5.1-codex-mini": "openai" /* OPENAI */,
234
+ "gpt-5-mini": "openai" /* OPENAI */,
235
+ "gpt-5-mini-2025-08-07": "openai" /* OPENAI */,
236
+ "gpt-5-nano": "openai" /* OPENAI */,
237
+ "gpt-5-nano-2025-08-07": "openai" /* OPENAI */,
238
+ "o1": "openai" /* OPENAI */,
239
+ "o1-2024-12-17": "openai" /* OPENAI */,
240
+ "o1-pro": "openai" /* OPENAI */,
241
+ "o1-pro-2025-03-19": "openai" /* OPENAI */,
242
+ "o3": "openai" /* OPENAI */,
243
+ "o3-2025-04-16": "openai" /* OPENAI */,
244
+ "o3-mini": "openai" /* OPENAI */,
245
+ "o3-mini-2025-01-31": "openai" /* OPENAI */,
246
+ "o4-mini": "openai" /* OPENAI */,
247
+ "o4-mini-2025-04-16": "openai" /* OPENAI */,
248
+ // Anthropic
249
+ "claude-haiku-4-5-20251001": "anthropic" /* ANTHROPIC */,
250
+ "claude-haiku-4-5": "anthropic" /* ANTHROPIC */,
251
+ "claude-3-7-sonnet-20250219": "anthropic" /* ANTHROPIC */,
252
+ "claude-3-haiku-20240307": "anthropic" /* ANTHROPIC */,
253
+ "claude-3-opus-20240229": "anthropic" /* ANTHROPIC */,
254
+ "claude-4-opus-20250514": "anthropic" /* ANTHROPIC */,
255
+ "claude-4-sonnet-20250514": "anthropic" /* ANTHROPIC */,
256
+ "claude-sonnet-4-5": "anthropic" /* ANTHROPIC */,
257
+ "claude-sonnet-4-5-20250929": "anthropic" /* ANTHROPIC */,
258
+ "claude-opus-4-1": "anthropic" /* ANTHROPIC */,
259
+ "claude-opus-4-1-20250805": "anthropic" /* ANTHROPIC */,
260
+ "claude-opus-4-20250514": "anthropic" /* ANTHROPIC */,
261
+ "claude-opus-4-5-20251101": "anthropic" /* ANTHROPIC */,
262
+ "claude-opus-4-5": "anthropic" /* ANTHROPIC */,
263
+ "claude-sonnet-4-20250514": "anthropic" /* ANTHROPIC */,
264
+ // Google
265
+ "gemini-2.0-flash": "google" /* GOOGLE */,
266
+ "gemini-2.0-flash-001": "google" /* GOOGLE */,
267
+ "gemini-2.0-flash-exp": "google" /* GOOGLE */,
268
+ "gemini-2.0-flash-lite": "google" /* GOOGLE */,
269
+ "gemini-2.0-flash-lite-001": "google" /* GOOGLE */,
270
+ "gemini-2.5-flash": "google" /* GOOGLE */,
271
+ "gemini-2.5-flash-lite": "google" /* GOOGLE */,
272
+ "gemini-2.5-flash-lite-preview-09-2025": "google" /* GOOGLE */,
273
+ "gemini-2.5-flash-preview-09-2025": "google" /* GOOGLE */,
274
+ "gemini-2.5-pro": "google" /* GOOGLE */,
275
+ "gemini-3-pro-preview": "google" /* GOOGLE */,
276
+ "gemini-3-flash-preview": "google" /* GOOGLE */
277
+ };
278
+ function getProviderForModel(modelId) {
279
+ return MODEL_TO_PROVIDER[modelId];
280
+ }
281
+
282
+ // src/providers/ai-sdk.ts
283
+ var aiSdk = null;
284
+ var openaiProvider = null;
285
+ var anthropicProvider = null;
286
+ var googleProvider = null;
287
+ async function loadAISdk() {
288
+ if (aiSdk) return aiSdk;
289
+ try {
290
+ aiSdk = await import("ai");
291
+ return aiSdk;
292
+ } catch {
293
+ throw new TraciaError(
294
+ "MISSING_PROVIDER_SDK" /* MISSING_PROVIDER_SDK */,
295
+ "Vercel AI SDK not installed. Run: npm install ai"
296
+ );
297
+ }
298
+ }
299
+ async function loadOpenAIProvider() {
300
+ if (openaiProvider) return openaiProvider;
301
+ try {
302
+ openaiProvider = await import("@ai-sdk/openai");
303
+ return openaiProvider;
304
+ } catch {
305
+ throw new TraciaError(
306
+ "MISSING_PROVIDER_SDK" /* MISSING_PROVIDER_SDK */,
307
+ "OpenAI provider not installed. Run: npm install @ai-sdk/openai"
308
+ );
309
+ }
310
+ }
311
+ async function loadAnthropicProvider() {
312
+ if (anthropicProvider) return anthropicProvider;
313
+ try {
314
+ anthropicProvider = await import("@ai-sdk/anthropic");
315
+ return anthropicProvider;
316
+ } catch {
317
+ throw new TraciaError(
318
+ "MISSING_PROVIDER_SDK" /* MISSING_PROVIDER_SDK */,
319
+ "Anthropic provider not installed. Run: npm install @ai-sdk/anthropic"
320
+ );
321
+ }
322
+ }
323
+ async function loadGoogleProvider() {
324
+ if (googleProvider) return googleProvider;
325
+ try {
326
+ googleProvider = await import("@ai-sdk/google");
327
+ return googleProvider;
328
+ } catch {
329
+ throw new TraciaError(
330
+ "MISSING_PROVIDER_SDK" /* MISSING_PROVIDER_SDK */,
331
+ "Google provider not installed. Run: npm install @ai-sdk/google"
332
+ );
333
+ }
334
+ }
335
+ function combineAbortSignals(userSignal, timeoutMs) {
336
+ if (!timeoutMs && !userSignal) return void 0;
337
+ if (timeoutMs && !userSignal) return AbortSignal.timeout(timeoutMs);
338
+ if (!timeoutMs && userSignal) return userSignal;
339
+ const timeoutSignal = AbortSignal.timeout(timeoutMs);
340
+ const controller = new AbortController();
341
+ const cleanup = () => {
342
+ userSignal.removeEventListener("abort", onAbort);
343
+ timeoutSignal.removeEventListener("abort", onAbort);
344
+ };
345
+ const onAbort = () => {
346
+ cleanup();
347
+ controller.abort();
348
+ };
349
+ userSignal.addEventListener("abort", onAbort, { once: true });
350
+ timeoutSignal.addEventListener("abort", onAbort, { once: true });
351
+ return controller.signal;
352
+ }
353
+ function sanitizeErrorMessage(message) {
354
+ return message.replace(/\b(sk-|tr_|key-|api[_-]?key[=:\s]+)[a-zA-Z0-9_-]{10,}\b/gi, "[REDACTED]").replace(/Bearer\s+[a-zA-Z0-9_.-]+/gi, "Bearer [REDACTED]").replace(/Basic\s+[a-zA-Z0-9+/=]{20,}/gi, "Basic [REDACTED]").replace(/(authorization[=:\s]+)[^\s,}]+/gi, "$1[REDACTED]");
355
+ }
356
+ function resolveProvider(model, explicitProvider) {
357
+ if (explicitProvider) return explicitProvider;
358
+ const fromRegistry = getProviderForModel(model);
359
+ if (fromRegistry) return fromRegistry;
360
+ if (model.startsWith("gpt-") || model.startsWith("o1") || model.startsWith("o3") || model.startsWith("o4")) {
361
+ return "openai" /* OPENAI */;
362
+ }
363
+ if (model.startsWith("claude-")) {
364
+ return "anthropic" /* ANTHROPIC */;
365
+ }
366
+ if (model.startsWith("gemini-")) {
367
+ return "google" /* GOOGLE */;
368
+ }
369
+ throw new TraciaError(
370
+ "UNSUPPORTED_MODEL" /* UNSUPPORTED_MODEL */,
371
+ `Cannot determine provider for model: ${model}. Specify provider explicitly.`
372
+ );
373
+ }
374
+ async function getLanguageModel(provider, model, apiKey) {
375
+ switch (provider) {
376
+ case "openai" /* OPENAI */: {
377
+ const { createOpenAI } = await loadOpenAIProvider();
378
+ const openai = createOpenAI({ apiKey });
379
+ return openai(model);
380
+ }
381
+ case "anthropic" /* ANTHROPIC */: {
382
+ const { createAnthropic } = await loadAnthropicProvider();
383
+ const anthropic = createAnthropic({ apiKey });
384
+ return anthropic(model);
385
+ }
386
+ case "google" /* GOOGLE */: {
387
+ const { createGoogleGenerativeAI } = await loadGoogleProvider();
388
+ const google = createGoogleGenerativeAI({ apiKey });
389
+ return google(model);
390
+ }
391
+ default:
392
+ throw new TraciaError(
393
+ "UNSUPPORTED_MODEL" /* UNSUPPORTED_MODEL */,
394
+ `Unsupported provider: ${provider}`
395
+ );
396
+ }
397
+ }
398
+ function convertMessages(messages) {
399
+ return messages.map((msg) => {
400
+ if (msg.role === "tool") {
401
+ return {
402
+ role: "tool",
403
+ content: [{
404
+ type: "tool-result",
405
+ toolCallId: msg.toolCallId,
406
+ toolName: msg.toolCallId,
407
+ // AI SDK requires toolName, use toolCallId as fallback
408
+ input: {},
409
+ output: msg.content
410
+ }]
411
+ };
412
+ }
413
+ if (msg.role === "assistant" && Array.isArray(msg.content)) {
414
+ if (msg.content.length === 0) {
415
+ return { role: "assistant", content: "" };
416
+ }
417
+ const convertedContent = msg.content.map((part) => {
418
+ if (part.type === "tool_call") {
419
+ const toolCall = part;
420
+ return {
421
+ type: "tool-call",
422
+ toolCallId: toolCall.id,
423
+ toolName: toolCall.name,
424
+ input: toolCall.arguments
425
+ };
426
+ }
427
+ return part;
428
+ });
429
+ return {
430
+ role: "assistant",
431
+ content: convertedContent
432
+ };
433
+ }
434
+ return {
435
+ role: msg.role,
436
+ content: typeof msg.content === "string" ? msg.content : msg.content.map((b) => b.type === "text" ? b.text : "").join("")
437
+ };
438
+ });
439
+ }
440
+ async function convertTools(tools) {
441
+ if (!tools || tools.length === 0) return void 0;
442
+ const { tool, jsonSchema } = await loadAISdk();
443
+ const result = {};
444
+ for (const toolDef of tools) {
445
+ result[toolDef.name] = tool({
446
+ description: toolDef.description,
447
+ inputSchema: jsonSchema(toolDef.parameters),
448
+ execute: async (args) => args
449
+ // No-op execute function
450
+ });
451
+ }
452
+ return result;
453
+ }
454
+ function convertToolChoice(toolChoice) {
455
+ if (!toolChoice) return void 0;
456
+ if (toolChoice === "auto") return "auto";
457
+ if (toolChoice === "none") return "none";
458
+ if (toolChoice === "required") return "required";
459
+ return { type: "tool", toolName: toolChoice.tool };
460
+ }
461
+ function parseFinishReason(reason) {
462
+ if (reason === "tool-calls") return "tool_calls";
463
+ if (reason === "length") return "max_tokens";
464
+ return "stop";
465
+ }
466
+ function extractToolCalls(toolCalls) {
467
+ if (!toolCalls) return [];
468
+ return toolCalls.filter((tc) => tc.toolCallId && tc.toolName).map((tc) => ({
469
+ id: tc.toolCallId,
470
+ name: tc.toolName,
471
+ arguments: tc.input ?? {}
472
+ }));
473
+ }
474
+ async function complete(options) {
475
+ const { generateText } = await loadAISdk();
476
+ const provider = resolveProvider(options.model, options.provider);
477
+ const model = await getLanguageModel(provider, options.model, options.apiKey);
478
+ const convertedMessages = convertMessages(options.messages);
479
+ const convertedTools = await convertTools(options.tools);
480
+ const convertedToolChoice = convertToolChoice(options.toolChoice);
481
+ try {
482
+ const result = await generateText({
483
+ model,
484
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
485
+ messages: convertedMessages,
486
+ temperature: options.temperature,
487
+ maxOutputTokens: options.maxOutputTokens,
488
+ topP: options.topP,
489
+ stopSequences: options.stopSequences,
490
+ tools: convertedTools,
491
+ toolChoice: convertedToolChoice,
492
+ abortSignal: options.timeoutMs ? AbortSignal.timeout(options.timeoutMs) : void 0
493
+ });
494
+ const toolCalls = extractToolCalls(result.toolCalls);
495
+ return {
496
+ text: result.text,
497
+ inputTokens: result.usage?.inputTokens ?? 0,
498
+ outputTokens: result.usage?.outputTokens ?? 0,
499
+ totalTokens: result.usage?.totalTokens ?? 0,
500
+ toolCalls,
501
+ finishReason: parseFinishReason(result.finishReason),
502
+ provider
503
+ };
504
+ } catch (error) {
505
+ if (error instanceof TraciaError) throw error;
506
+ const rawMessage = error instanceof Error ? error.message : String(error);
507
+ throw new TraciaError(
508
+ "PROVIDER_ERROR" /* PROVIDER_ERROR */,
509
+ `${provider} error: ${sanitizeErrorMessage(rawMessage)}`
510
+ );
511
+ }
512
+ }
513
+ function stream(options) {
514
+ const provider = resolveProvider(options.model, options.provider);
515
+ let resolveResult;
516
+ let rejectResult;
517
+ const resultPromise = new Promise((resolve, reject) => {
518
+ resolveResult = resolve;
519
+ rejectResult = reject;
520
+ });
521
+ async function* generateChunks() {
522
+ try {
523
+ const { streamText } = await loadAISdk();
524
+ const model = await getLanguageModel(provider, options.model, options.apiKey);
525
+ const convertedMessages = convertMessages(options.messages);
526
+ const convertedTools = await convertTools(options.tools);
527
+ const convertedToolChoice = convertToolChoice(options.toolChoice);
528
+ const abortSignal = combineAbortSignals(options.signal, options.timeoutMs);
529
+ const result = streamText({
530
+ model,
531
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
532
+ messages: convertedMessages,
533
+ temperature: options.temperature,
534
+ maxOutputTokens: options.maxOutputTokens,
535
+ topP: options.topP,
536
+ stopSequences: options.stopSequences,
537
+ tools: convertedTools,
538
+ toolChoice: convertedToolChoice,
539
+ abortSignal
540
+ });
541
+ for await (const chunk of result.textStream) {
542
+ yield chunk;
543
+ }
544
+ const [text, usageData, toolCallsData, finishReasonData] = await Promise.all([
545
+ result.text,
546
+ result.usage,
547
+ result.toolCalls,
548
+ result.finishReason
549
+ ]);
550
+ const toolCalls = extractToolCalls(toolCallsData);
551
+ resolveResult({
552
+ text,
553
+ inputTokens: usageData?.inputTokens ?? 0,
554
+ outputTokens: usageData?.outputTokens ?? 0,
555
+ totalTokens: usageData?.totalTokens ?? 0,
556
+ toolCalls,
557
+ finishReason: parseFinishReason(finishReasonData),
558
+ provider
559
+ });
560
+ } catch (error) {
561
+ if (error instanceof Error && error.name === "AbortError") {
562
+ const traciaError2 = new TraciaError("ABORTED" /* ABORTED */, "Stream aborted");
563
+ rejectResult(traciaError2);
564
+ throw traciaError2;
565
+ }
566
+ const rawMessage = error instanceof Error ? error.message : String(error);
567
+ const traciaError = error instanceof TraciaError ? error : new TraciaError(
568
+ "PROVIDER_ERROR" /* PROVIDER_ERROR */,
569
+ `${provider} error: ${sanitizeErrorMessage(rawMessage)}`
570
+ );
571
+ rejectResult(traciaError);
572
+ throw traciaError;
573
+ }
574
+ }
575
+ return {
576
+ chunks: generateChunks(),
577
+ result: resultPromise
578
+ };
579
+ }
580
+ function responsesStream(options) {
581
+ let resolveResult;
582
+ let rejectResult;
583
+ const resultPromise = new Promise((resolve, reject) => {
584
+ resolveResult = resolve;
585
+ rejectResult = reject;
586
+ });
587
+ async function* generateEvents() {
588
+ let fullText = "";
589
+ let usage = { inputTokens: 0, outputTokens: 0, totalTokens: 0 };
590
+ const outputItems = [];
591
+ const toolCalls = [];
592
+ let aborted = false;
593
+ try {
594
+ const { createOpenAI } = await loadOpenAIProvider();
595
+ const openai = createOpenAI({ apiKey: options.apiKey });
596
+ const model = openai.responses(options.model);
597
+ const { streamText } = await loadAISdk();
598
+ const convertedTools = options.tools ? await convertTools(options.tools) : void 0;
599
+ const abortSignal = combineAbortSignals(options.signal, options.timeoutMs);
600
+ const result = streamText({
601
+ model,
602
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
603
+ messages: options.input,
604
+ maxOutputTokens: options.maxOutputTokens,
605
+ tools: convertedTools,
606
+ abortSignal
607
+ });
608
+ for await (const chunk of result.textStream) {
609
+ fullText += chunk;
610
+ yield { type: "text_delta", data: chunk };
611
+ }
612
+ const [usageData, toolCallsData] = await Promise.all([
613
+ result.usage,
614
+ result.toolCalls
615
+ ]);
616
+ usage = {
617
+ inputTokens: usageData?.inputTokens ?? 0,
618
+ outputTokens: usageData?.outputTokens ?? 0,
619
+ totalTokens: usageData?.totalTokens ?? 0
620
+ };
621
+ if (toolCallsData) {
622
+ for (const tc of toolCallsData) {
623
+ if (!tc.toolCallId || !tc.toolName) continue;
624
+ const toolCall = {
625
+ id: tc.toolCallId,
626
+ callId: tc.toolCallId,
627
+ name: tc.toolName,
628
+ arguments: tc.input ?? {}
629
+ };
630
+ toolCalls.push(toolCall);
631
+ yield {
632
+ type: "tool_call",
633
+ id: toolCall.id,
634
+ callId: toolCall.callId,
635
+ name: toolCall.name,
636
+ arguments: toolCall.arguments
637
+ };
638
+ }
639
+ }
640
+ if (fullText) {
641
+ yield { type: "text", data: fullText };
642
+ outputItems.push({ type: "message", content: fullText });
643
+ }
644
+ yield { type: "done", usage };
645
+ resolveResult({
646
+ text: fullText,
647
+ usage,
648
+ outputItems,
649
+ toolCalls,
650
+ aborted
651
+ });
652
+ } catch (error) {
653
+ if (error instanceof Error && error.name === "AbortError") {
654
+ aborted = true;
655
+ resolveResult({
656
+ text: fullText,
657
+ usage,
658
+ outputItems,
659
+ toolCalls,
660
+ aborted
661
+ });
662
+ return;
663
+ }
664
+ const rawMessage = error instanceof Error ? error.message : String(error);
665
+ const traciaError = new TraciaError(
666
+ "PROVIDER_ERROR" /* PROVIDER_ERROR */,
667
+ `OpenAI Responses API error: ${sanitizeErrorMessage(rawMessage)}`
668
+ );
669
+ rejectResult(traciaError);
670
+ throw traciaError;
671
+ }
672
+ }
673
+ return {
674
+ events: generateEvents(),
675
+ result: resultPromise
676
+ };
677
+ }
678
+
177
679
  // src/traces.ts
680
+ var INTERNAL_SET_PENDING_TRACES = /* @__PURE__ */ Symbol("setPendingTracesMap");
178
681
  var Traces = class {
179
682
  constructor(client) {
180
683
  this.client = client;
684
+ this.pendingTraces = null;
685
+ }
686
+ /** @internal */
687
+ [INTERNAL_SET_PENDING_TRACES](map) {
688
+ this.pendingTraces = map;
689
+ }
690
+ async create(payload) {
691
+ return this.client.post("/api/v1/traces", payload);
181
692
  }
182
693
  async get(traceId) {
183
694
  return this.client.get(`/api/v1/traces/${encodeURIComponent(traceId)}`);
@@ -215,24 +726,680 @@ var Traces = class {
215
726
  const path = query ? `/api/v1/traces?${query}` : "/api/v1/traces";
216
727
  return this.client.get(path);
217
728
  }
729
+ async evaluate(traceId, options) {
730
+ if (this.pendingTraces) {
731
+ const pendingTrace = this.pendingTraces.get(traceId);
732
+ if (pendingTrace) {
733
+ await pendingTrace;
734
+ }
735
+ }
736
+ if (typeof options.value !== "number") {
737
+ throw new TraciaError(
738
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
739
+ `Invalid evaluation value. Must be a number.`
740
+ );
741
+ }
742
+ const body = {
743
+ evaluatorKey: options.evaluator,
744
+ value: options.value
745
+ };
746
+ if (options.note !== void 0) {
747
+ body.note = options.note;
748
+ }
749
+ return this.client.post(
750
+ `/api/v1/traces/${encodeURIComponent(traceId)}/evaluations`,
751
+ body
752
+ );
753
+ }
218
754
  };
219
755
 
756
+ // src/utils.ts
757
+ import crypto from "crypto";
758
+ var TRACE_ID_REGEX = /^tr_[a-f0-9]{16}$/i;
759
+ function generateTraceId() {
760
+ const randomPart = crypto.randomBytes(8).toString("hex");
761
+ return `tr_${randomPart}`;
762
+ }
763
+ function isValidTraceIdFormat(traceId) {
764
+ return TRACE_ID_REGEX.test(traceId);
765
+ }
766
+
220
767
  // src/index.ts
768
+ var Eval = {
769
+ POSITIVE: 1,
770
+ NEGATIVE: 0
771
+ };
221
772
  var DEFAULT_BASE_URL = "https://app.tracia.io";
773
+ var MAX_PENDING_TRACES = 1e3;
774
+ var TRACE_RETRY_ATTEMPTS = 2;
775
+ var TRACE_RETRY_DELAY_MS = 500;
776
+ var TRACE_STATUS_SUCCESS = "SUCCESS";
777
+ var TRACE_STATUS_ERROR = "ERROR";
778
+ var ENV_VAR_MAP = {
779
+ ["openai" /* OPENAI */]: "OPENAI_API_KEY",
780
+ ["anthropic" /* ANTHROPIC */]: "ANTHROPIC_API_KEY",
781
+ ["google" /* GOOGLE */]: "GOOGLE_API_KEY"
782
+ };
783
+ function convertResponsesItemToMessage(item) {
784
+ if ("role" in item && (item.role === "developer" || item.role === "user")) {
785
+ const messageItem = item;
786
+ return {
787
+ role: messageItem.role === "developer" ? "system" : "user",
788
+ content: messageItem.content
789
+ };
790
+ }
791
+ if ("type" in item && item.type === "function_call_output") {
792
+ const outputItem = item;
793
+ return {
794
+ role: "tool",
795
+ toolCallId: outputItem.call_id,
796
+ content: outputItem.output
797
+ };
798
+ }
799
+ if ("type" in item) {
800
+ return {
801
+ role: "assistant",
802
+ content: JSON.stringify(item)
803
+ };
804
+ }
805
+ return {
806
+ role: "user",
807
+ content: JSON.stringify(item)
808
+ };
809
+ }
222
810
  var Tracia = class {
223
811
  constructor(options) {
812
+ this.pendingTraces = /* @__PURE__ */ new Map();
224
813
  if (!options.apiKey) {
225
- throw new Error("apiKey is required");
814
+ throw new TraciaError(
815
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
816
+ "apiKey is required"
817
+ );
226
818
  }
227
819
  this.client = new HttpClient({
228
820
  apiKey: options.apiKey,
229
821
  baseUrl: DEFAULT_BASE_URL
230
822
  });
823
+ this.onTraceError = options.onTraceError;
231
824
  this.prompts = new Prompts(this.client);
232
825
  this.traces = new Traces(this.client);
826
+ this.traces[INTERNAL_SET_PENDING_TRACES](this.pendingTraces);
827
+ }
828
+ runLocal(input) {
829
+ if (input.stream === true) {
830
+ return this.runLocalStreaming(input);
831
+ }
832
+ return this.runLocalNonStreaming(input);
833
+ }
834
+ async runLocalNonStreaming(input) {
835
+ this.validateRunLocalInput(input);
836
+ let traceId = "";
837
+ if (input.sendTrace !== false) {
838
+ if (input.traceId && !isValidTraceIdFormat(input.traceId)) {
839
+ throw new TraciaError(
840
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
841
+ `Invalid trace ID format. Must match: tr_ + 16 hex characters (e.g., tr_1234567890abcdef)`
842
+ );
843
+ }
844
+ traceId = input.traceId || generateTraceId();
845
+ }
846
+ const interpolatedMessages = this.interpolateMessages(input.messages, input.variables);
847
+ const provider = resolveProvider(input.model, input.provider);
848
+ const apiKey = this.getProviderApiKey(provider, input.providerApiKey);
849
+ const startTime = Date.now();
850
+ let completionResult = null;
851
+ let errorMessage = null;
852
+ try {
853
+ completionResult = await complete({
854
+ model: input.model,
855
+ messages: interpolatedMessages,
856
+ apiKey,
857
+ provider: input.provider,
858
+ temperature: input.temperature,
859
+ maxOutputTokens: input.maxOutputTokens,
860
+ topP: input.topP,
861
+ stopSequences: input.stopSequences,
862
+ tools: input.tools,
863
+ toolChoice: input.toolChoice,
864
+ timeoutMs: input.timeoutMs
865
+ });
866
+ } catch (error) {
867
+ if (error instanceof TraciaError) {
868
+ errorMessage = error.message;
869
+ } else {
870
+ errorMessage = error instanceof Error ? error.message : String(error);
871
+ }
872
+ }
873
+ const latencyMs = Date.now() - startTime;
874
+ if (traceId) {
875
+ this.scheduleTraceCreation(traceId, {
876
+ traceId,
877
+ model: input.model,
878
+ provider: completionResult?.provider ?? provider,
879
+ input: { messages: interpolatedMessages },
880
+ variables: input.variables ?? null,
881
+ output: completionResult?.text ?? null,
882
+ status: errorMessage ? TRACE_STATUS_ERROR : TRACE_STATUS_SUCCESS,
883
+ error: errorMessage,
884
+ latencyMs,
885
+ inputTokens: completionResult?.inputTokens ?? 0,
886
+ outputTokens: completionResult?.outputTokens ?? 0,
887
+ totalTokens: completionResult?.totalTokens ?? 0,
888
+ tags: input.tags,
889
+ userId: input.userId,
890
+ sessionId: input.sessionId,
891
+ temperature: input.temperature,
892
+ maxOutputTokens: input.maxOutputTokens,
893
+ topP: input.topP,
894
+ tools: input.tools,
895
+ toolCalls: completionResult?.toolCalls
896
+ });
897
+ }
898
+ if (errorMessage) {
899
+ throw new TraciaError("PROVIDER_ERROR" /* PROVIDER_ERROR */, errorMessage);
900
+ }
901
+ const toolCalls = completionResult.toolCalls;
902
+ const finishReason = completionResult.finishReason;
903
+ const message = this.buildAssistantMessage(completionResult.text, toolCalls);
904
+ return {
905
+ text: completionResult.text,
906
+ traceId,
907
+ latencyMs,
908
+ usage: {
909
+ inputTokens: completionResult.inputTokens,
910
+ outputTokens: completionResult.outputTokens,
911
+ totalTokens: completionResult.totalTokens
912
+ },
913
+ cost: null,
914
+ provider: completionResult.provider,
915
+ model: input.model,
916
+ toolCalls,
917
+ finishReason,
918
+ message
919
+ };
920
+ }
921
+ runLocalStreaming(input) {
922
+ this.validateRunLocalInput(input);
923
+ let traceId = "";
924
+ if (input.sendTrace !== false) {
925
+ if (input.traceId && !isValidTraceIdFormat(input.traceId)) {
926
+ throw new TraciaError(
927
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
928
+ `Invalid trace ID format. Must match: tr_ + 16 hex characters (e.g., tr_1234567890abcdef)`
929
+ );
930
+ }
931
+ traceId = input.traceId || generateTraceId();
932
+ }
933
+ const interpolatedMessages = this.interpolateMessages(input.messages, input.variables);
934
+ const provider = resolveProvider(input.model, input.provider);
935
+ const apiKey = this.getProviderApiKey(provider, input.providerApiKey);
936
+ const abortController = new AbortController();
937
+ const combinedSignal = input.signal ? this.combineAbortSignals(input.signal, abortController.signal) : abortController.signal;
938
+ return this.createLocalStream(
939
+ input,
940
+ interpolatedMessages,
941
+ provider,
942
+ apiKey,
943
+ traceId,
944
+ combinedSignal,
945
+ abortController
946
+ );
947
+ }
948
+ runResponses(input) {
949
+ if (input.stream === true) {
950
+ return this.runResponsesStreaming(input);
951
+ }
952
+ return this.runResponsesNonStreaming(input);
953
+ }
954
+ async runResponsesNonStreaming(input) {
955
+ const stream2 = this.runResponsesStreaming(input);
956
+ for await (const _event of stream2) {
957
+ }
958
+ return stream2.result;
959
+ }
960
+ runResponsesStreaming(input) {
961
+ this.validateResponsesInput(input);
962
+ let traceId = "";
963
+ if (input.sendTrace !== false) {
964
+ if (input.traceId && !isValidTraceIdFormat(input.traceId)) {
965
+ throw new TraciaError(
966
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
967
+ `Invalid trace ID format. Must match: tr_ + 16 hex characters (e.g., tr_1234567890abcdef)`
968
+ );
969
+ }
970
+ traceId = input.traceId || generateTraceId();
971
+ }
972
+ const apiKey = this.getProviderApiKey("openai" /* OPENAI */, input.providerApiKey);
973
+ const abortController = new AbortController();
974
+ const combinedSignal = input.signal ? this.combineAbortSignals(input.signal, abortController.signal) : abortController.signal;
975
+ return this.createResponsesStream(
976
+ input,
977
+ apiKey,
978
+ traceId,
979
+ combinedSignal,
980
+ abortController
981
+ );
982
+ }
983
+ validateResponsesInput(input) {
984
+ if (!input.model || input.model.trim() === "") {
985
+ throw new TraciaError(
986
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
987
+ "model is required and cannot be empty"
988
+ );
989
+ }
990
+ if (!input.input || input.input.length === 0) {
991
+ throw new TraciaError(
992
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
993
+ "input array is required and cannot be empty"
994
+ );
995
+ }
996
+ }
997
+ createResponsesStream(input, apiKey, traceId, signal, abortController) {
998
+ const startTime = Date.now();
999
+ let aborted = false;
1000
+ let resolveResult;
1001
+ let rejectResult;
1002
+ const resultPromise = new Promise((resolve, reject) => {
1003
+ resolveResult = resolve;
1004
+ rejectResult = reject;
1005
+ });
1006
+ const providerStream = responsesStream({
1007
+ model: input.model,
1008
+ input: input.input,
1009
+ apiKey,
1010
+ tools: input.tools,
1011
+ maxOutputTokens: input.maxOutputTokens,
1012
+ timeoutMs: input.timeoutMs,
1013
+ signal
1014
+ });
1015
+ let collectedText = "";
1016
+ const scheduleTrace = this.scheduleTraceCreation.bind(this);
1017
+ async function* wrappedEvents() {
1018
+ try {
1019
+ for await (const event of providerStream.events) {
1020
+ if (event.type === "text_delta") {
1021
+ collectedText += event.data;
1022
+ }
1023
+ yield event;
1024
+ }
1025
+ const providerResult = await providerStream.result;
1026
+ const latencyMs = Date.now() - startTime;
1027
+ if (traceId) {
1028
+ scheduleTrace(traceId, {
1029
+ traceId,
1030
+ model: input.model,
1031
+ provider: "openai" /* OPENAI */,
1032
+ input: { messages: input.input.map((item) => convertResponsesItemToMessage(item)) },
1033
+ variables: null,
1034
+ output: providerResult.text,
1035
+ status: providerResult.aborted ? TRACE_STATUS_ERROR : TRACE_STATUS_SUCCESS,
1036
+ error: providerResult.aborted ? "Stream aborted" : null,
1037
+ latencyMs,
1038
+ inputTokens: providerResult.usage.inputTokens,
1039
+ outputTokens: providerResult.usage.outputTokens,
1040
+ totalTokens: providerResult.usage.totalTokens,
1041
+ tags: input.tags,
1042
+ userId: input.userId,
1043
+ sessionId: input.sessionId,
1044
+ tools: input.tools,
1045
+ toolCalls: providerResult.toolCalls.map((tc) => ({
1046
+ id: tc.id,
1047
+ name: tc.name,
1048
+ arguments: tc.arguments
1049
+ }))
1050
+ });
1051
+ }
1052
+ resolveResult({
1053
+ text: providerResult.text,
1054
+ traceId,
1055
+ latencyMs,
1056
+ usage: providerResult.usage,
1057
+ outputItems: providerResult.outputItems,
1058
+ toolCalls: providerResult.toolCalls,
1059
+ aborted: providerResult.aborted
1060
+ });
1061
+ } catch (error) {
1062
+ const latencyMs = Date.now() - startTime;
1063
+ const isAborted = aborted || signal.aborted;
1064
+ const errorMessage = isAborted ? "Stream aborted" : error instanceof Error ? error.message : String(error);
1065
+ if (traceId) {
1066
+ scheduleTrace(traceId, {
1067
+ traceId,
1068
+ model: input.model,
1069
+ provider: "openai" /* OPENAI */,
1070
+ input: { messages: input.input.map((item) => convertResponsesItemToMessage(item)) },
1071
+ variables: null,
1072
+ output: collectedText || null,
1073
+ status: TRACE_STATUS_ERROR,
1074
+ error: errorMessage,
1075
+ latencyMs,
1076
+ inputTokens: 0,
1077
+ outputTokens: 0,
1078
+ totalTokens: 0,
1079
+ tags: input.tags,
1080
+ userId: input.userId,
1081
+ sessionId: input.sessionId,
1082
+ tools: input.tools
1083
+ });
1084
+ }
1085
+ if (isAborted) {
1086
+ resolveResult({
1087
+ text: collectedText,
1088
+ traceId,
1089
+ latencyMs,
1090
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
1091
+ outputItems: [],
1092
+ toolCalls: [],
1093
+ aborted: true
1094
+ });
1095
+ } else {
1096
+ const traciaError = error instanceof TraciaError ? error : new TraciaError("PROVIDER_ERROR" /* PROVIDER_ERROR */, errorMessage);
1097
+ rejectResult(traciaError);
1098
+ throw traciaError;
1099
+ }
1100
+ }
1101
+ }
1102
+ const asyncIterator = wrappedEvents();
1103
+ return {
1104
+ traceId,
1105
+ [Symbol.asyncIterator]() {
1106
+ return asyncIterator;
1107
+ },
1108
+ result: resultPromise,
1109
+ abort() {
1110
+ aborted = true;
1111
+ abortController.abort();
1112
+ }
1113
+ };
1114
+ }
1115
+ createLocalStream(input, interpolatedMessages, provider, apiKey, traceId, signal, abortController) {
1116
+ const startTime = Date.now();
1117
+ let aborted = false;
1118
+ let resolveResult;
1119
+ let rejectResult;
1120
+ const resultPromise = new Promise((resolve, reject) => {
1121
+ resolveResult = resolve;
1122
+ rejectResult = reject;
1123
+ });
1124
+ const providerStream = stream({
1125
+ model: input.model,
1126
+ messages: interpolatedMessages,
1127
+ apiKey,
1128
+ provider: input.provider,
1129
+ temperature: input.temperature,
1130
+ maxOutputTokens: input.maxOutputTokens,
1131
+ topP: input.topP,
1132
+ stopSequences: input.stopSequences,
1133
+ tools: input.tools,
1134
+ toolChoice: input.toolChoice,
1135
+ timeoutMs: input.timeoutMs,
1136
+ signal
1137
+ });
1138
+ let collectedText = "";
1139
+ const scheduleTrace = this.scheduleTraceCreation.bind(this);
1140
+ const buildAssistantMessage = this.buildAssistantMessage.bind(this);
1141
+ async function* wrappedChunks() {
1142
+ try {
1143
+ for await (const chunk of providerStream.chunks) {
1144
+ collectedText += chunk;
1145
+ yield chunk;
1146
+ }
1147
+ const completionResult = await providerStream.result;
1148
+ const latencyMs = Date.now() - startTime;
1149
+ if (traceId) {
1150
+ scheduleTrace(traceId, {
1151
+ traceId,
1152
+ model: input.model,
1153
+ provider: completionResult.provider,
1154
+ input: { messages: interpolatedMessages },
1155
+ variables: input.variables ?? null,
1156
+ output: completionResult.text,
1157
+ status: TRACE_STATUS_SUCCESS,
1158
+ error: null,
1159
+ latencyMs,
1160
+ inputTokens: completionResult.inputTokens,
1161
+ outputTokens: completionResult.outputTokens,
1162
+ totalTokens: completionResult.totalTokens,
1163
+ tags: input.tags,
1164
+ userId: input.userId,
1165
+ sessionId: input.sessionId,
1166
+ temperature: input.temperature,
1167
+ maxOutputTokens: input.maxOutputTokens,
1168
+ topP: input.topP,
1169
+ tools: input.tools,
1170
+ toolCalls: completionResult.toolCalls
1171
+ });
1172
+ }
1173
+ const toolCalls = completionResult.toolCalls;
1174
+ const finishReason = completionResult.finishReason;
1175
+ const message = buildAssistantMessage(completionResult.text, toolCalls);
1176
+ resolveResult({
1177
+ text: completionResult.text,
1178
+ traceId,
1179
+ latencyMs,
1180
+ usage: {
1181
+ inputTokens: completionResult.inputTokens,
1182
+ outputTokens: completionResult.outputTokens,
1183
+ totalTokens: completionResult.totalTokens
1184
+ },
1185
+ cost: null,
1186
+ provider: completionResult.provider,
1187
+ model: input.model,
1188
+ aborted: false,
1189
+ toolCalls,
1190
+ finishReason,
1191
+ message
1192
+ });
1193
+ } catch (error) {
1194
+ const latencyMs = Date.now() - startTime;
1195
+ const isAborted = aborted || signal.aborted;
1196
+ const errorMessage = isAborted ? "Stream aborted" : error instanceof Error ? error.message : String(error);
1197
+ if (traceId) {
1198
+ scheduleTrace(traceId, {
1199
+ traceId,
1200
+ model: input.model,
1201
+ provider,
1202
+ input: { messages: interpolatedMessages },
1203
+ variables: input.variables ?? null,
1204
+ output: collectedText || null,
1205
+ status: TRACE_STATUS_ERROR,
1206
+ error: errorMessage,
1207
+ latencyMs,
1208
+ inputTokens: 0,
1209
+ outputTokens: 0,
1210
+ totalTokens: 0,
1211
+ tags: input.tags,
1212
+ userId: input.userId,
1213
+ sessionId: input.sessionId,
1214
+ temperature: input.temperature,
1215
+ maxOutputTokens: input.maxOutputTokens,
1216
+ topP: input.topP
1217
+ });
1218
+ }
1219
+ if (isAborted) {
1220
+ const abortedMessage = buildAssistantMessage(collectedText, []);
1221
+ resolveResult({
1222
+ text: collectedText,
1223
+ traceId,
1224
+ latencyMs,
1225
+ usage: {
1226
+ inputTokens: 0,
1227
+ outputTokens: 0,
1228
+ totalTokens: 0
1229
+ },
1230
+ cost: null,
1231
+ provider,
1232
+ model: input.model,
1233
+ aborted: true,
1234
+ toolCalls: [],
1235
+ finishReason: "stop",
1236
+ message: abortedMessage
1237
+ });
1238
+ } else {
1239
+ const traciaError = error instanceof TraciaError ? error : new TraciaError("PROVIDER_ERROR" /* PROVIDER_ERROR */, errorMessage);
1240
+ rejectResult(traciaError);
1241
+ throw traciaError;
1242
+ }
1243
+ }
1244
+ }
1245
+ const asyncIterator = wrappedChunks();
1246
+ return {
1247
+ traceId,
1248
+ [Symbol.asyncIterator]() {
1249
+ return asyncIterator;
1250
+ },
1251
+ result: resultPromise,
1252
+ abort() {
1253
+ aborted = true;
1254
+ abortController.abort();
1255
+ }
1256
+ };
1257
+ }
1258
+ combineAbortSignals(signal1, signal2) {
1259
+ const controller = new AbortController();
1260
+ if (signal1.aborted || signal2.aborted) {
1261
+ controller.abort();
1262
+ return controller.signal;
1263
+ }
1264
+ const onAbort = () => {
1265
+ signal1.removeEventListener("abort", onAbort);
1266
+ signal2.removeEventListener("abort", onAbort);
1267
+ controller.abort();
1268
+ };
1269
+ signal1.addEventListener("abort", onAbort, { once: true });
1270
+ signal2.addEventListener("abort", onAbort, { once: true });
1271
+ return controller.signal;
1272
+ }
1273
+ async flush() {
1274
+ await Promise.all(this.pendingTraces.values());
1275
+ }
1276
+ validateRunLocalInput(input) {
1277
+ if (!input.model || input.model.trim() === "") {
1278
+ throw new TraciaError(
1279
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
1280
+ "model is required and cannot be empty"
1281
+ );
1282
+ }
1283
+ if (!input.messages || input.messages.length === 0) {
1284
+ throw new TraciaError(
1285
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
1286
+ "messages array is required and cannot be empty"
1287
+ );
1288
+ }
1289
+ for (const message of input.messages) {
1290
+ if (message.role === "tool") {
1291
+ if (!message.toolCallId) {
1292
+ throw new TraciaError(
1293
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
1294
+ `Tool messages must include toolCallId. Example: { role: "tool", toolCallId: "call_123", content: '{"result": "data"}' }`
1295
+ );
1296
+ }
1297
+ if (typeof message.content !== "string") {
1298
+ throw new TraciaError(
1299
+ "INVALID_REQUEST" /* INVALID_REQUEST */,
1300
+ `Tool message content must be a string (the tool result). Example: { role: "tool", toolCallId: "call_123", content: '{"result": "data"}' }`
1301
+ );
1302
+ }
1303
+ }
1304
+ }
1305
+ }
1306
+ scheduleTraceCreation(traceId, payload) {
1307
+ if (this.pendingTraces.size >= MAX_PENDING_TRACES) {
1308
+ const oldestTraceId = this.pendingTraces.keys().next().value;
1309
+ if (oldestTraceId) {
1310
+ this.pendingTraces.delete(oldestTraceId);
1311
+ }
1312
+ }
1313
+ const tracePromise = this.createTraceWithRetry(traceId, payload);
1314
+ this.pendingTraces.set(traceId, tracePromise);
1315
+ tracePromise.finally(() => this.pendingTraces.delete(traceId));
1316
+ }
1317
+ async createTraceWithRetry(traceId, payload) {
1318
+ let lastError = null;
1319
+ for (let attempt = 0; attempt <= TRACE_RETRY_ATTEMPTS; attempt++) {
1320
+ try {
1321
+ await this.traces.create(payload);
1322
+ return;
1323
+ } catch (error) {
1324
+ lastError = error instanceof Error ? error : new Error(String(error));
1325
+ if (attempt < TRACE_RETRY_ATTEMPTS) {
1326
+ await this.delay(TRACE_RETRY_DELAY_MS * (attempt + 1));
1327
+ }
1328
+ }
1329
+ }
1330
+ if (this.onTraceError && lastError) {
1331
+ this.onTraceError(lastError, traceId);
1332
+ }
1333
+ }
1334
+ delay(ms) {
1335
+ return new Promise((resolve) => setTimeout(resolve, ms));
1336
+ }
1337
+ interpolateMessages(messages, variables) {
1338
+ if (!variables) return messages;
1339
+ return messages.map((message) => {
1340
+ if (typeof message.content === "string") {
1341
+ return {
1342
+ ...message,
1343
+ content: message.content.replace(
1344
+ /\{\{(\w+)\}\}/g,
1345
+ (match, key) => variables[key] ?? match
1346
+ )
1347
+ };
1348
+ }
1349
+ if (message.role === "tool") {
1350
+ return message;
1351
+ }
1352
+ return {
1353
+ ...message,
1354
+ content: message.content.map((block) => {
1355
+ if (block.type === "text") {
1356
+ return {
1357
+ ...block,
1358
+ text: block.text.replace(
1359
+ /\{\{(\w+)\}\}/g,
1360
+ (match, key) => variables[key] ?? match
1361
+ )
1362
+ };
1363
+ }
1364
+ return block;
1365
+ })
1366
+ };
1367
+ });
1368
+ }
1369
+ buildAssistantMessage(text, toolCalls) {
1370
+ if (toolCalls.length === 0) {
1371
+ return { role: "assistant", content: text };
1372
+ }
1373
+ const contentParts = [];
1374
+ if (text) {
1375
+ contentParts.push({ type: "text", text });
1376
+ }
1377
+ for (const toolCall of toolCalls) {
1378
+ contentParts.push({
1379
+ type: "tool_call",
1380
+ id: toolCall.id,
1381
+ name: toolCall.name,
1382
+ arguments: toolCall.arguments
1383
+ });
1384
+ }
1385
+ return { role: "assistant", content: contentParts };
1386
+ }
1387
+ getProviderApiKey(provider, override) {
1388
+ if (override) return override;
1389
+ const envVar = ENV_VAR_MAP[provider];
1390
+ const key = process.env[envVar];
1391
+ if (!key) {
1392
+ throw new TraciaError(
1393
+ "MISSING_PROVIDER_API_KEY" /* MISSING_PROVIDER_API_KEY */,
1394
+ `Missing API key for ${provider}. Set the ${envVar} environment variable or provide providerApiKey in options.`
1395
+ );
1396
+ }
1397
+ return key;
233
1398
  }
234
1399
  };
235
1400
  export {
1401
+ Eval,
1402
+ LLMProvider,
236
1403
  Tracia,
237
1404
  TraciaError,
238
1405
  TraciaErrorCode