@superatomai/sdk-node 0.0.39 → 0.0.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1254,13 +1254,16 @@ var init_prompt_loader = __esm({
1254
1254
  const contextMarker = "---\n\n## CONTEXT";
1255
1255
  if (template.system.includes(contextMarker)) {
1256
1256
  const [staticPart, contextPart] = template.system.split(contextMarker);
1257
- logger.debug(`\u2713 Prompt caching enabled for '${promptName}' (static: ${staticPart.length} chars, context: ${contextPart.length} chars)`);
1257
+ const processedStatic = this.replaceVariables(staticPart, variables);
1258
1258
  const processedContext = this.replaceVariables(contextMarker + contextPart, variables);
1259
+ const staticLength = processedStatic.length;
1260
+ const contextLength = processedContext.length;
1261
+ logger.debug(`\u2713 Prompt caching enabled for '${promptName}' (cached: ${staticLength} chars, dynamic: ${contextLength} chars)`);
1259
1262
  return {
1260
1263
  system: [
1261
1264
  {
1262
1265
  type: "text",
1263
- text: staticPart.trim(),
1266
+ text: processedStatic.trim(),
1264
1267
  cache_control: { type: "ephemeral" }
1265
1268
  },
1266
1269
  {
@@ -3378,6 +3381,448 @@ import Groq from "groq-sdk";
3378
3381
  import { GoogleGenerativeAI, SchemaType } from "@google/generative-ai";
3379
3382
  import OpenAI from "openai";
3380
3383
  import { jsonrepair } from "jsonrepair";
3384
+
3385
+ // src/utils/llm-usage-logger.ts
3386
+ import fs5 from "fs";
3387
+ import path4 from "path";
3388
+ var PRICING = {
3389
+ // Anthropic
3390
+ "claude-3-5-sonnet-20241022": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3391
+ "claude-sonnet-4-5": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3392
+ "claude-sonnet-4-5-20250929": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3393
+ "claude-3-5-haiku-20241022": { input: 0.8, output: 4, cacheRead: 0.08, cacheWrite: 1 },
3394
+ "claude-haiku-4-5-20251001": { input: 0.8, output: 4, cacheRead: 0.08, cacheWrite: 1 },
3395
+ "claude-3-opus-20240229": { input: 15, output: 75, cacheRead: 1.5, cacheWrite: 18.75 },
3396
+ "claude-3-sonnet-20240229": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3397
+ "claude-3-haiku-20240307": { input: 0.25, output: 1.25, cacheRead: 0.03, cacheWrite: 0.3 },
3398
+ // OpenAI
3399
+ "gpt-4o": { input: 2.5, output: 10 },
3400
+ "gpt-4o-mini": { input: 0.15, output: 0.6 },
3401
+ "gpt-4-turbo": { input: 10, output: 30 },
3402
+ "gpt-4": { input: 30, output: 60 },
3403
+ "gpt-3.5-turbo": { input: 0.5, output: 1.5 },
3404
+ // Gemini
3405
+ "gemini-1.5-pro": { input: 1.25, output: 5 },
3406
+ "gemini-1.5-flash": { input: 0.075, output: 0.3 },
3407
+ "gemini-2.0-flash-exp": { input: 0.1, output: 0.4 },
3408
+ // Groq (very cheap)
3409
+ "llama-3.3-70b-versatile": { input: 0.59, output: 0.79 },
3410
+ "llama-3.1-70b-versatile": { input: 0.59, output: 0.79 },
3411
+ "llama-3.1-8b-instant": { input: 0.05, output: 0.08 },
3412
+ "mixtral-8x7b-32768": { input: 0.24, output: 0.24 }
3413
+ };
3414
+ var DEFAULT_PRICING = { input: 3, output: 15 };
3415
+ var LLMUsageLogger = class {
3416
+ constructor() {
3417
+ this.logStream = null;
3418
+ this.sessionStats = {
3419
+ totalCalls: 0,
3420
+ totalInputTokens: 0,
3421
+ totalOutputTokens: 0,
3422
+ totalCacheReadTokens: 0,
3423
+ totalCacheWriteTokens: 0,
3424
+ totalCostUSD: 0,
3425
+ totalDurationMs: 0
3426
+ };
3427
+ this.logPath = process.env.LLM_USAGE_LOG_PATH || path4.join(process.cwd(), "llm-usage-logs");
3428
+ this.enabled = process.env.LLM_USAGE_LOGGING !== "false";
3429
+ if (this.enabled) {
3430
+ this.initLogStream();
3431
+ }
3432
+ }
3433
+ initLogStream() {
3434
+ try {
3435
+ const dir = path4.dirname(this.logPath);
3436
+ if (!fs5.existsSync(dir)) {
3437
+ fs5.mkdirSync(dir, { recursive: true });
3438
+ }
3439
+ this.logStream = fs5.createWriteStream(this.logPath, { flags: "a" });
3440
+ if (!fs5.existsSync(this.logPath) || fs5.statSync(this.logPath).size === 0) {
3441
+ this.writeHeader();
3442
+ }
3443
+ } catch (error) {
3444
+ console.error("[LLM-Usage-Logger] Failed to initialize log stream:", error);
3445
+ this.enabled = false;
3446
+ }
3447
+ }
3448
+ writeHeader() {
3449
+ const header = `
3450
+ ================================================================================
3451
+ LLM USAGE LOG - Session Started: ${(/* @__PURE__ */ new Date()).toISOString()}
3452
+ ================================================================================
3453
+ Format: [TIMESTAMP] [REQUEST_ID] [PROVIDER/MODEL] [METHOD]
3454
+ Tokens: IN=input OUT=output CACHE_R=cache_read CACHE_W=cache_write TOTAL=total
3455
+ Cost: $X.XXXXXX | Time: Xms
3456
+ ================================================================================
3457
+
3458
+ `;
3459
+ this.logStream?.write(header);
3460
+ }
3461
+ /**
3462
+ * Calculate cost based on token usage and model
3463
+ */
3464
+ calculateCost(model, inputTokens, outputTokens, cacheReadTokens = 0, cacheWriteTokens = 0) {
3465
+ let pricing = PRICING[model];
3466
+ if (!pricing) {
3467
+ const modelLower = model.toLowerCase();
3468
+ for (const [key, value] of Object.entries(PRICING)) {
3469
+ if (modelLower.includes(key.toLowerCase()) || key.toLowerCase().includes(modelLower)) {
3470
+ pricing = value;
3471
+ break;
3472
+ }
3473
+ }
3474
+ }
3475
+ pricing = pricing || DEFAULT_PRICING;
3476
+ const inputCost = inputTokens / 1e6 * pricing.input;
3477
+ const outputCost = outputTokens / 1e6 * pricing.output;
3478
+ const cacheReadCost = cacheReadTokens / 1e6 * (pricing.cacheRead || pricing.input * 0.1);
3479
+ const cacheWriteCost = cacheWriteTokens / 1e6 * (pricing.cacheWrite || pricing.input * 1.25);
3480
+ return inputCost + outputCost + cacheReadCost + cacheWriteCost;
3481
+ }
3482
+ /**
3483
+ * Log an LLM API call
3484
+ */
3485
+ log(entry) {
3486
+ if (!this.enabled) return;
3487
+ this.sessionStats.totalCalls++;
3488
+ this.sessionStats.totalInputTokens += entry.inputTokens;
3489
+ this.sessionStats.totalOutputTokens += entry.outputTokens;
3490
+ this.sessionStats.totalCacheReadTokens += entry.cacheReadTokens || 0;
3491
+ this.sessionStats.totalCacheWriteTokens += entry.cacheWriteTokens || 0;
3492
+ this.sessionStats.totalCostUSD += entry.costUSD;
3493
+ this.sessionStats.totalDurationMs += entry.durationMs;
3494
+ const cacheInfo = entry.cacheReadTokens || entry.cacheWriteTokens ? ` CACHE_R=${entry.cacheReadTokens || 0} CACHE_W=${entry.cacheWriteTokens || 0}` : "";
3495
+ const toolInfo = entry.toolCalls ? ` | Tools: ${entry.toolCalls}` : "";
3496
+ const errorInfo = entry.error ? ` | ERROR: ${entry.error}` : "";
3497
+ const status = entry.success ? "\u2713" : "\u2717";
3498
+ let cacheStatus = "";
3499
+ if (entry.cacheReadTokens && entry.cacheReadTokens > 0) {
3500
+ const savedCost = entry.cacheReadTokens / 1e6 * 2.7;
3501
+ cacheStatus = ` \u26A1 CACHE HIT! Saved ~$${savedCost.toFixed(4)}`;
3502
+ } else if (entry.cacheWriteTokens && entry.cacheWriteTokens > 0) {
3503
+ cacheStatus = " \u{1F4DD} Cache created (next request will be cheaper)";
3504
+ }
3505
+ const logLine = `[${entry.timestamp}] [${entry.requestId}] ${status} ${entry.provider}/${entry.model} [${entry.method}]
3506
+ Tokens: IN=${entry.inputTokens} OUT=${entry.outputTokens}${cacheInfo} TOTAL=${entry.totalTokens}
3507
+ Cost: $${entry.costUSD.toFixed(6)} | Time: ${entry.durationMs}ms${toolInfo}${errorInfo}${cacheStatus}
3508
+ `;
3509
+ this.logStream?.write(logLine);
3510
+ if (entry.cacheReadTokens && entry.cacheReadTokens > 0) {
3511
+ console.log(`[LLM] \u26A1 CACHE HIT: ${entry.cacheReadTokens.toLocaleString()} tokens read from cache (${entry.method})`);
3512
+ } else if (entry.cacheWriteTokens && entry.cacheWriteTokens > 0) {
3513
+ console.log(`[LLM] \u{1F4DD} CACHE WRITE: ${entry.cacheWriteTokens.toLocaleString()} tokens cached for future requests (${entry.method})`);
3514
+ }
3515
+ if (process.env.SUPERATOM_LOG_LEVEL === "verbose") {
3516
+ console.log("\n[LLM-Usage]", logLine);
3517
+ }
3518
+ }
3519
+ /**
3520
+ * Log session summary (call at end of request)
3521
+ */
3522
+ logSessionSummary(requestContext) {
3523
+ if (!this.enabled || this.sessionStats.totalCalls === 0) return;
3524
+ const cacheReadSavings = this.sessionStats.totalCacheReadTokens / 1e6 * 2.7;
3525
+ const hasCaching = this.sessionStats.totalCacheReadTokens > 0 || this.sessionStats.totalCacheWriteTokens > 0;
3526
+ let cacheSection = "";
3527
+ if (hasCaching) {
3528
+ cacheSection = `
3529
+ Cache Statistics:
3530
+ Cache Read Tokens: ${this.sessionStats.totalCacheReadTokens.toLocaleString()}${this.sessionStats.totalCacheReadTokens > 0 ? " \u26A1" : ""}
3531
+ Cache Write Tokens: ${this.sessionStats.totalCacheWriteTokens.toLocaleString()}${this.sessionStats.totalCacheWriteTokens > 0 ? " \u{1F4DD}" : ""}
3532
+ Estimated Savings: $${cacheReadSavings.toFixed(4)}`;
3533
+ }
3534
+ const summary = `
3535
+ --------------------------------------------------------------------------------
3536
+ SESSION SUMMARY${requestContext ? ` (${requestContext})` : ""}
3537
+ --------------------------------------------------------------------------------
3538
+ Total LLM Calls: ${this.sessionStats.totalCalls}
3539
+ Total Input Tokens: ${this.sessionStats.totalInputTokens.toLocaleString()}
3540
+ Total Output Tokens: ${this.sessionStats.totalOutputTokens.toLocaleString()}
3541
+ Total Tokens: ${(this.sessionStats.totalInputTokens + this.sessionStats.totalOutputTokens).toLocaleString()}
3542
+ Total Cost: $${this.sessionStats.totalCostUSD.toFixed(6)}
3543
+ Total Time: ${this.sessionStats.totalDurationMs}ms (${(this.sessionStats.totalDurationMs / 1e3).toFixed(2)}s)
3544
+ Avg Cost/Call: $${(this.sessionStats.totalCostUSD / this.sessionStats.totalCalls).toFixed(6)}
3545
+ Avg Time/Call: ${Math.round(this.sessionStats.totalDurationMs / this.sessionStats.totalCalls)}ms${cacheSection}
3546
+ --------------------------------------------------------------------------------
3547
+
3548
+ `;
3549
+ this.logStream?.write(summary);
3550
+ console.log("\n[LLM-Usage] Session Summary:");
3551
+ console.log(` Calls: ${this.sessionStats.totalCalls} | Tokens: ${(this.sessionStats.totalInputTokens + this.sessionStats.totalOutputTokens).toLocaleString()} | Cost: $${this.sessionStats.totalCostUSD.toFixed(4)} | Time: ${(this.sessionStats.totalDurationMs / 1e3).toFixed(2)}s`);
3552
+ if (hasCaching) {
3553
+ console.log(` Cache: ${this.sessionStats.totalCacheReadTokens.toLocaleString()} read, ${this.sessionStats.totalCacheWriteTokens.toLocaleString()} written | Savings: ~$${cacheReadSavings.toFixed(4)}`);
3554
+ }
3555
+ }
3556
+ /**
3557
+ * Reset session stats (call at start of new user request)
3558
+ */
3559
+ resetSession() {
3560
+ this.sessionStats = {
3561
+ totalCalls: 0,
3562
+ totalInputTokens: 0,
3563
+ totalOutputTokens: 0,
3564
+ totalCacheReadTokens: 0,
3565
+ totalCacheWriteTokens: 0,
3566
+ totalCostUSD: 0,
3567
+ totalDurationMs: 0
3568
+ };
3569
+ }
3570
+ /**
3571
+ * Reset the log file for a new request (clears previous logs)
3572
+ * Call this at the start of each USER_PROMPT_REQ
3573
+ */
3574
+ resetLogFile(requestContext) {
3575
+ if (!this.enabled) return;
3576
+ try {
3577
+ if (this.logStream) {
3578
+ this.logStream.end();
3579
+ this.logStream = null;
3580
+ }
3581
+ this.logStream = fs5.createWriteStream(this.logPath, { flags: "w" });
3582
+ const header = `
3583
+ ================================================================================
3584
+ LLM USAGE LOG - Request Started: ${(/* @__PURE__ */ new Date()).toISOString()}
3585
+ ${requestContext ? `Context: ${requestContext}` : ""}
3586
+ ================================================================================
3587
+ Format: [TIMESTAMP] [REQUEST_ID] [PROVIDER/MODEL] [METHOD]
3588
+ Tokens: IN=input OUT=output CACHE_R=cache_read CACHE_W=cache_write TOTAL=total
3589
+ Cost: $X.XXXXXX | Time: Xms
3590
+ ================================================================================
3591
+
3592
+ `;
3593
+ this.logStream.write(header);
3594
+ this.resetSession();
3595
+ console.log(`[LLM-Usage] Log file reset for new request: ${this.logPath}`);
3596
+ } catch (error) {
3597
+ console.error("[LLM-Usage-Logger] Failed to reset log file:", error);
3598
+ }
3599
+ }
3600
+ /**
3601
+ * Get current session stats
3602
+ */
3603
+ getSessionStats() {
3604
+ return { ...this.sessionStats };
3605
+ }
3606
+ /**
3607
+ * Generate a unique request ID
3608
+ */
3609
+ generateRequestId() {
3610
+ return `req-${Date.now()}-${Math.random().toString(36).substring(2, 8)}`;
3611
+ }
3612
+ };
3613
+ var llmUsageLogger = new LLMUsageLogger();
3614
+
3615
+ // src/utils/user-prompt-error-logger.ts
3616
+ import fs6 from "fs";
3617
+ import path5 from "path";
3618
+ var UserPromptErrorLogger = class {
3619
+ constructor() {
3620
+ this.logStream = null;
3621
+ this.hasErrors = false;
3622
+ this.logPath = process.env.USER_PROMPT_ERROR_LOG_PATH || path5.join(process.cwd(), "user-prompt-req-errors");
3623
+ this.enabled = process.env.USER_PROMPT_ERROR_LOGGING !== "false";
3624
+ }
3625
+ /**
3626
+ * Reset the error log file for a new request
3627
+ */
3628
+ resetLogFile(requestContext) {
3629
+ if (!this.enabled) return;
3630
+ try {
3631
+ if (this.logStream) {
3632
+ this.logStream.end();
3633
+ this.logStream = null;
3634
+ }
3635
+ const dir = path5.dirname(this.logPath);
3636
+ if (dir !== "." && !fs6.existsSync(dir)) {
3637
+ fs6.mkdirSync(dir, { recursive: true });
3638
+ }
3639
+ this.logStream = fs6.createWriteStream(this.logPath, { flags: "w" });
3640
+ this.hasErrors = false;
3641
+ const header = `================================================================================
3642
+ USER PROMPT REQUEST ERROR LOG
3643
+ Request Started: ${(/* @__PURE__ */ new Date()).toISOString()}
3644
+ ${requestContext ? `Context: ${requestContext}` : ""}
3645
+ ================================================================================
3646
+
3647
+ `;
3648
+ this.logStream.write(header);
3649
+ } catch (error) {
3650
+ console.error("[UserPromptErrorLogger] Failed to reset log file:", error);
3651
+ }
3652
+ }
3653
+ /**
3654
+ * Log a JSON parse error with the raw string that failed
3655
+ */
3656
+ logJsonParseError(context, rawString, error) {
3657
+ if (!this.enabled) return;
3658
+ this.hasErrors = true;
3659
+ const entry = `
3660
+ --------------------------------------------------------------------------------
3661
+ [${(/* @__PURE__ */ new Date()).toISOString()}] JSON PARSE ERROR
3662
+ --------------------------------------------------------------------------------
3663
+ Context: ${context}
3664
+ Error: ${error.message}
3665
+
3666
+ Raw String (${rawString.length} chars):
3667
+ --------------------------------------------------------------------------------
3668
+ ${rawString}
3669
+ --------------------------------------------------------------------------------
3670
+
3671
+ Stack Trace:
3672
+ ${error.stack || "No stack trace available"}
3673
+
3674
+ `;
3675
+ this.write(entry);
3676
+ console.error(`[UserPromptError] JSON Parse Error in ${context}: ${error.message}`);
3677
+ }
3678
+ /**
3679
+ * Log a general error with full details
3680
+ */
3681
+ logError(context, error, additionalData) {
3682
+ if (!this.enabled) return;
3683
+ this.hasErrors = true;
3684
+ const errorMessage = error instanceof Error ? error.message : error;
3685
+ const errorStack = error instanceof Error ? error.stack : void 0;
3686
+ let entry = `
3687
+ --------------------------------------------------------------------------------
3688
+ [${(/* @__PURE__ */ new Date()).toISOString()}] ERROR
3689
+ --------------------------------------------------------------------------------
3690
+ Context: ${context}
3691
+ Error: ${errorMessage}
3692
+ `;
3693
+ if (additionalData) {
3694
+ entry += `
3695
+ Additional Data:
3696
+ ${JSON.stringify(additionalData, null, 2)}
3697
+ `;
3698
+ }
3699
+ if (errorStack) {
3700
+ entry += `
3701
+ Stack Trace:
3702
+ ${errorStack}
3703
+ `;
3704
+ }
3705
+ entry += `--------------------------------------------------------------------------------
3706
+
3707
+ `;
3708
+ this.write(entry);
3709
+ console.error(`[UserPromptError] ${context}: ${errorMessage}`);
3710
+ }
3711
+ /**
3712
+ * Log a SQL query error with the full query
3713
+ */
3714
+ logSqlError(query, error, params) {
3715
+ if (!this.enabled) return;
3716
+ this.hasErrors = true;
3717
+ const errorMessage = error instanceof Error ? error.message : error;
3718
+ const entry = `
3719
+ --------------------------------------------------------------------------------
3720
+ [${(/* @__PURE__ */ new Date()).toISOString()}] SQL QUERY ERROR
3721
+ --------------------------------------------------------------------------------
3722
+ Error: ${errorMessage}
3723
+
3724
+ Query (${query.length} chars):
3725
+ --------------------------------------------------------------------------------
3726
+ ${query}
3727
+ --------------------------------------------------------------------------------
3728
+ ${params ? `
3729
+ Parameters: ${JSON.stringify(params)}` : ""}
3730
+
3731
+ `;
3732
+ this.write(entry);
3733
+ console.error(`[UserPromptError] SQL Error: ${errorMessage}`);
3734
+ }
3735
+ /**
3736
+ * Log an LLM API error
3737
+ */
3738
+ logLlmError(provider, model, method, error, requestData) {
3739
+ if (!this.enabled) return;
3740
+ this.hasErrors = true;
3741
+ const errorMessage = error instanceof Error ? error.message : error;
3742
+ const errorStack = error instanceof Error ? error.stack : void 0;
3743
+ let entry = `
3744
+ --------------------------------------------------------------------------------
3745
+ [${(/* @__PURE__ */ new Date()).toISOString()}] LLM API ERROR
3746
+ --------------------------------------------------------------------------------
3747
+ Provider: ${provider}
3748
+ Model: ${model}
3749
+ Method: ${method}
3750
+ Error: ${errorMessage}
3751
+ `;
3752
+ if (requestData) {
3753
+ const dataStr = JSON.stringify(requestData, null, 2);
3754
+ const truncated = dataStr.length > 5e3 ? dataStr.substring(0, 5e3) + "\n... [truncated]" : dataStr;
3755
+ entry += `
3756
+ Request Data:
3757
+ ${truncated}
3758
+ `;
3759
+ }
3760
+ if (errorStack) {
3761
+ entry += `
3762
+ Stack Trace:
3763
+ ${errorStack}
3764
+ `;
3765
+ }
3766
+ entry += `--------------------------------------------------------------------------------
3767
+
3768
+ `;
3769
+ this.write(entry);
3770
+ console.error(`[UserPromptError] LLM Error (${provider}/${model}): ${errorMessage}`);
3771
+ }
3772
+ /**
3773
+ * Log tool execution error
3774
+ */
3775
+ logToolError(toolName, toolInput, error) {
3776
+ if (!this.enabled) return;
3777
+ this.hasErrors = true;
3778
+ const errorMessage = error instanceof Error ? error.message : error;
3779
+ const errorStack = error instanceof Error ? error.stack : void 0;
3780
+ const entry = `
3781
+ --------------------------------------------------------------------------------
3782
+ [${(/* @__PURE__ */ new Date()).toISOString()}] TOOL EXECUTION ERROR
3783
+ --------------------------------------------------------------------------------
3784
+ Tool: ${toolName}
3785
+ Error: ${errorMessage}
3786
+
3787
+ Tool Input:
3788
+ ${JSON.stringify(toolInput, null, 2)}
3789
+ ${errorStack ? `
3790
+ Stack Trace:
3791
+ ${errorStack}` : ""}
3792
+ --------------------------------------------------------------------------------
3793
+
3794
+ `;
3795
+ this.write(entry);
3796
+ console.error(`[UserPromptError] Tool Error (${toolName}): ${errorMessage}`);
3797
+ }
3798
+ /**
3799
+ * Write final summary if there were errors
3800
+ */
3801
+ writeSummary() {
3802
+ if (!this.enabled || !this.hasErrors) return;
3803
+ const summary = `
3804
+ ================================================================================
3805
+ REQUEST COMPLETED WITH ERRORS
3806
+ Time: ${(/* @__PURE__ */ new Date()).toISOString()}
3807
+ ================================================================================
3808
+ `;
3809
+ this.write(summary);
3810
+ }
3811
+ /**
3812
+ * Check if any errors were logged
3813
+ */
3814
+ hadErrors() {
3815
+ return this.hasErrors;
3816
+ }
3817
+ write(content) {
3818
+ if (this.logStream) {
3819
+ this.logStream.write(content);
3820
+ }
3821
+ }
3822
+ };
3823
+ var userPromptErrorLogger = new UserPromptErrorLogger();
3824
+
3825
+ // src/llm.ts
3381
3826
  var LLM = class {
3382
3827
  /* Get a complete text response from an LLM (Anthropic or Groq) */
3383
3828
  static async text(messages, options = {}) {
@@ -3520,68 +3965,156 @@ var LLM = class {
3520
3965
  // ANTHROPIC IMPLEMENTATION
3521
3966
  // ============================================================
3522
3967
  static async _anthropicText(messages, modelName, options) {
3968
+ const startTime = Date.now();
3969
+ const requestId = llmUsageLogger.generateRequestId();
3523
3970
  const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY || "";
3524
3971
  const client = new Anthropic({
3525
3972
  apiKey
3526
3973
  });
3527
- const response = await client.messages.create({
3528
- model: modelName,
3529
- max_tokens: options.maxTokens || 1e3,
3530
- temperature: options.temperature,
3531
- system: this._normalizeSystemPrompt(messages.sys),
3532
- messages: [{
3533
- role: "user",
3534
- content: messages.user
3535
- }]
3536
- });
3537
- const textBlock = response.content.find((block) => block.type === "text");
3538
- return textBlock?.type === "text" ? textBlock.text : "";
3974
+ try {
3975
+ const response = await client.messages.create({
3976
+ model: modelName,
3977
+ max_tokens: options.maxTokens || 1e3,
3978
+ temperature: options.temperature,
3979
+ system: this._normalizeSystemPrompt(messages.sys),
3980
+ messages: [{
3981
+ role: "user",
3982
+ content: messages.user
3983
+ }]
3984
+ });
3985
+ const durationMs = Date.now() - startTime;
3986
+ const usage = response.usage;
3987
+ const inputTokens = usage?.input_tokens || 0;
3988
+ const outputTokens = usage?.output_tokens || 0;
3989
+ const cacheReadTokens = usage?.cache_read_input_tokens || 0;
3990
+ const cacheWriteTokens = usage?.cache_creation_input_tokens || 0;
3991
+ llmUsageLogger.log({
3992
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
3993
+ requestId,
3994
+ provider: "anthropic",
3995
+ model: modelName,
3996
+ method: "text",
3997
+ inputTokens,
3998
+ outputTokens,
3999
+ cacheReadTokens,
4000
+ cacheWriteTokens,
4001
+ totalTokens: inputTokens + outputTokens + cacheReadTokens + cacheWriteTokens,
4002
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens, cacheReadTokens, cacheWriteTokens),
4003
+ durationMs,
4004
+ success: true
4005
+ });
4006
+ const textBlock = response.content.find((block) => block.type === "text");
4007
+ return textBlock?.type === "text" ? textBlock.text : "";
4008
+ } catch (error) {
4009
+ const durationMs = Date.now() - startTime;
4010
+ llmUsageLogger.log({
4011
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4012
+ requestId,
4013
+ provider: "anthropic",
4014
+ model: modelName,
4015
+ method: "text",
4016
+ inputTokens: 0,
4017
+ outputTokens: 0,
4018
+ totalTokens: 0,
4019
+ costUSD: 0,
4020
+ durationMs,
4021
+ success: false,
4022
+ error: error instanceof Error ? error.message : String(error)
4023
+ });
4024
+ throw error;
4025
+ }
3539
4026
  }
3540
4027
  static async _anthropicStream(messages, modelName, options, json) {
4028
+ const startTime = Date.now();
4029
+ const requestId = llmUsageLogger.generateRequestId();
3541
4030
  const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY || "";
3542
4031
  const client = new Anthropic({
3543
4032
  apiKey
3544
4033
  });
3545
- const apiMessages = [{
3546
- role: "user",
3547
- content: messages.user
3548
- }];
3549
- const prefill = messages.prefill || (json ? "{" : void 0);
3550
- if (prefill) {
3551
- apiMessages.push({
3552
- role: "assistant",
3553
- content: prefill
4034
+ try {
4035
+ const apiMessages = [{
4036
+ role: "user",
4037
+ content: messages.user
4038
+ }];
4039
+ const prefill = messages.prefill || (json ? "{" : void 0);
4040
+ if (prefill) {
4041
+ apiMessages.push({
4042
+ role: "assistant",
4043
+ content: prefill
4044
+ });
4045
+ }
4046
+ const stream = await client.messages.create({
4047
+ model: modelName,
4048
+ max_tokens: options.maxTokens || 1e3,
4049
+ temperature: options.temperature,
4050
+ system: this._normalizeSystemPrompt(messages.sys),
4051
+ messages: apiMessages,
4052
+ stream: true
3554
4053
  });
3555
- }
3556
- const stream = await client.messages.create({
3557
- model: modelName,
3558
- max_tokens: options.maxTokens || 1e3,
3559
- temperature: options.temperature,
3560
- system: this._normalizeSystemPrompt(messages.sys),
3561
- messages: apiMessages,
3562
- stream: true
3563
- });
3564
- let fullText = prefill || "";
3565
- let usage = null;
3566
- for await (const chunk of stream) {
3567
- if (chunk.type === "content_block_delta" && chunk.delta.type === "text_delta") {
3568
- const text = chunk.delta.text;
3569
- fullText += text;
3570
- if (options.partial) {
3571
- options.partial(text);
4054
+ let fullText = prefill || "";
4055
+ let usage = null;
4056
+ let inputTokens = 0;
4057
+ let outputTokens = 0;
4058
+ let cacheReadTokens = 0;
4059
+ let cacheWriteTokens = 0;
4060
+ for await (const chunk of stream) {
4061
+ if (chunk.type === "content_block_delta" && chunk.delta.type === "text_delta") {
4062
+ const text = chunk.delta.text;
4063
+ fullText += text;
4064
+ if (options.partial) {
4065
+ options.partial(text);
4066
+ }
4067
+ } else if (chunk.type === "message_start" && chunk.message?.usage) {
4068
+ const msgUsage = chunk.message.usage;
4069
+ inputTokens = msgUsage.input_tokens || 0;
4070
+ cacheReadTokens = msgUsage.cache_read_input_tokens || 0;
4071
+ cacheWriteTokens = msgUsage.cache_creation_input_tokens || 0;
4072
+ } else if (chunk.type === "message_delta" && chunk.usage) {
4073
+ usage = chunk.usage;
4074
+ outputTokens = usage.output_tokens || 0;
3572
4075
  }
3573
- } else if (chunk.type === "message_delta" && chunk.usage) {
3574
- usage = chunk.usage;
3575
4076
  }
4077
+ const durationMs = Date.now() - startTime;
4078
+ llmUsageLogger.log({
4079
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4080
+ requestId,
4081
+ provider: "anthropic",
4082
+ model: modelName,
4083
+ method: "stream",
4084
+ inputTokens,
4085
+ outputTokens,
4086
+ cacheReadTokens,
4087
+ cacheWriteTokens,
4088
+ totalTokens: inputTokens + outputTokens + cacheReadTokens + cacheWriteTokens,
4089
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens, cacheReadTokens, cacheWriteTokens),
4090
+ durationMs,
4091
+ success: true
4092
+ });
4093
+ if (json) {
4094
+ return this._parseJSON(fullText);
4095
+ }
4096
+ return fullText;
4097
+ } catch (error) {
4098
+ const durationMs = Date.now() - startTime;
4099
+ llmUsageLogger.log({
4100
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4101
+ requestId,
4102
+ provider: "anthropic",
4103
+ model: modelName,
4104
+ method: "stream",
4105
+ inputTokens: 0,
4106
+ outputTokens: 0,
4107
+ totalTokens: 0,
4108
+ costUSD: 0,
4109
+ durationMs,
4110
+ success: false,
4111
+ error: error instanceof Error ? error.message : String(error)
4112
+ });
4113
+ throw error;
3576
4114
  }
3577
- if (usage) {
3578
- }
3579
- if (json) {
3580
- return this._parseJSON(fullText);
3581
- }
3582
- return fullText;
3583
4115
  }
3584
4116
  static async _anthropicStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
4117
+ const methodStartTime = Date.now();
3585
4118
  const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY || "";
3586
4119
  const client = new Anthropic({
3587
4120
  apiKey
@@ -3592,8 +4125,15 @@ var LLM = class {
3592
4125
  }];
3593
4126
  let iterations = 0;
3594
4127
  let finalText = "";
4128
+ let totalToolCalls = 0;
4129
+ let totalInputTokens = 0;
4130
+ let totalOutputTokens = 0;
4131
+ let totalCacheReadTokens = 0;
4132
+ let totalCacheWriteTokens = 0;
3595
4133
  while (iterations < maxIterations) {
3596
4134
  iterations++;
4135
+ const iterationStartTime = Date.now();
4136
+ const requestId = llmUsageLogger.generateRequestId();
3597
4137
  const stream = await client.messages.create({
3598
4138
  model: modelName,
3599
4139
  max_tokens: options.maxTokens || 4e3,
@@ -3608,12 +4148,21 @@ var LLM = class {
3608
4148
  const contentBlocks = [];
3609
4149
  let currentTextBlock = "";
3610
4150
  let currentToolUse = null;
3611
- let usage = null;
4151
+ let inputTokens = 0;
4152
+ let outputTokens = 0;
4153
+ let cacheReadTokens = 0;
4154
+ let cacheWriteTokens = 0;
3612
4155
  for await (const chunk of stream) {
3613
4156
  if (chunk.type === "message_start") {
3614
4157
  contentBlocks.length = 0;
3615
4158
  currentTextBlock = "";
3616
4159
  currentToolUse = null;
4160
+ const msgUsage = chunk.message?.usage;
4161
+ if (msgUsage) {
4162
+ inputTokens = msgUsage.input_tokens || 0;
4163
+ cacheReadTokens = msgUsage.cache_read_input_tokens || 0;
4164
+ cacheWriteTokens = msgUsage.cache_creation_input_tokens || 0;
4165
+ }
3617
4166
  }
3618
4167
  if (chunk.type === "content_block_start") {
3619
4168
  if (chunk.content_block.type === "text") {
@@ -3660,15 +4209,36 @@ var LLM = class {
3660
4209
  if (chunk.type === "message_delta") {
3661
4210
  stopReason = chunk.delta.stop_reason || stopReason;
3662
4211
  if (chunk.usage) {
3663
- usage = chunk.usage;
4212
+ outputTokens = chunk.usage.output_tokens || 0;
3664
4213
  }
3665
4214
  }
3666
4215
  if (chunk.type === "message_stop") {
3667
4216
  break;
3668
4217
  }
3669
4218
  }
3670
- if (usage) {
3671
- }
4219
+ const iterationDuration = Date.now() - iterationStartTime;
4220
+ const toolUsesInIteration = contentBlocks.filter((block) => block.type === "tool_use").length;
4221
+ totalToolCalls += toolUsesInIteration;
4222
+ totalInputTokens += inputTokens;
4223
+ totalOutputTokens += outputTokens;
4224
+ totalCacheReadTokens += cacheReadTokens;
4225
+ totalCacheWriteTokens += cacheWriteTokens;
4226
+ llmUsageLogger.log({
4227
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4228
+ requestId,
4229
+ provider: "anthropic",
4230
+ model: modelName,
4231
+ method: `streamWithTools[iter=${iterations}]`,
4232
+ inputTokens,
4233
+ outputTokens,
4234
+ cacheReadTokens,
4235
+ cacheWriteTokens,
4236
+ totalTokens: inputTokens + outputTokens + cacheReadTokens + cacheWriteTokens,
4237
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens, cacheReadTokens, cacheWriteTokens),
4238
+ durationMs: iterationDuration,
4239
+ toolCalls: toolUsesInIteration,
4240
+ success: true
4241
+ });
3672
4242
  if (stopReason === "end_turn") {
3673
4243
  break;
3674
4244
  }
@@ -3712,6 +4282,25 @@ var LLM = class {
3712
4282
  break;
3713
4283
  }
3714
4284
  }
4285
+ const totalDuration = Date.now() - methodStartTime;
4286
+ if (iterations > 1) {
4287
+ llmUsageLogger.log({
4288
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4289
+ requestId: llmUsageLogger.generateRequestId(),
4290
+ provider: "anthropic",
4291
+ model: modelName,
4292
+ method: `streamWithTools[TOTAL:${iterations}iters]`,
4293
+ inputTokens: totalInputTokens,
4294
+ outputTokens: totalOutputTokens,
4295
+ cacheReadTokens: totalCacheReadTokens,
4296
+ cacheWriteTokens: totalCacheWriteTokens,
4297
+ totalTokens: totalInputTokens + totalOutputTokens + totalCacheReadTokens + totalCacheWriteTokens,
4298
+ costUSD: llmUsageLogger.calculateCost(modelName, totalInputTokens, totalOutputTokens, totalCacheReadTokens, totalCacheWriteTokens),
4299
+ durationMs: totalDuration,
4300
+ toolCalls: totalToolCalls,
4301
+ success: true
4302
+ });
4303
+ }
3715
4304
  if (iterations >= maxIterations) {
3716
4305
  throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
3717
4306
  }
@@ -3721,100 +4310,272 @@ var LLM = class {
3721
4310
  // GROQ IMPLEMENTATION
3722
4311
  // ============================================================
3723
4312
  static async _groqText(messages, modelName, options) {
4313
+ const startTime = Date.now();
4314
+ const requestId = llmUsageLogger.generateRequestId();
3724
4315
  const client = new Groq({
3725
4316
  apiKey: options.apiKey || process.env.GROQ_API_KEY || ""
3726
4317
  });
3727
- const response = await client.chat.completions.create({
3728
- model: modelName,
3729
- messages: [
3730
- { role: "system", content: messages.sys },
3731
- { role: "user", content: messages.user }
3732
- ],
3733
- temperature: options.temperature,
3734
- max_tokens: options.maxTokens || 1e3
3735
- });
3736
- return response.choices[0]?.message?.content || "";
4318
+ try {
4319
+ const response = await client.chat.completions.create({
4320
+ model: modelName,
4321
+ messages: [
4322
+ { role: "system", content: messages.sys },
4323
+ { role: "user", content: messages.user }
4324
+ ],
4325
+ temperature: options.temperature,
4326
+ max_tokens: options.maxTokens || 1e3
4327
+ });
4328
+ const durationMs = Date.now() - startTime;
4329
+ const usage = response.usage;
4330
+ const inputTokens = usage?.prompt_tokens || 0;
4331
+ const outputTokens = usage?.completion_tokens || 0;
4332
+ llmUsageLogger.log({
4333
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4334
+ requestId,
4335
+ provider: "groq",
4336
+ model: modelName,
4337
+ method: "text",
4338
+ inputTokens,
4339
+ outputTokens,
4340
+ totalTokens: inputTokens + outputTokens,
4341
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4342
+ durationMs,
4343
+ success: true
4344
+ });
4345
+ return response.choices[0]?.message?.content || "";
4346
+ } catch (error) {
4347
+ const durationMs = Date.now() - startTime;
4348
+ llmUsageLogger.log({
4349
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4350
+ requestId,
4351
+ provider: "groq",
4352
+ model: modelName,
4353
+ method: "text",
4354
+ inputTokens: 0,
4355
+ outputTokens: 0,
4356
+ totalTokens: 0,
4357
+ costUSD: 0,
4358
+ durationMs,
4359
+ success: false,
4360
+ error: error instanceof Error ? error.message : String(error)
4361
+ });
4362
+ throw error;
4363
+ }
3737
4364
  }
3738
4365
  static async _groqStream(messages, modelName, options, json) {
4366
+ const startTime = Date.now();
4367
+ const requestId = llmUsageLogger.generateRequestId();
3739
4368
  const apiKey = options.apiKey || process.env.GROQ_API_KEY || "";
3740
4369
  const client = new Groq({
3741
4370
  apiKey
3742
4371
  });
3743
- const stream = await client.chat.completions.create({
3744
- model: modelName,
3745
- messages: [
3746
- { role: "system", content: messages.sys },
3747
- { role: "user", content: messages.user }
3748
- ],
3749
- temperature: options.temperature,
3750
- max_tokens: options.maxTokens || 1e3,
3751
- stream: true,
3752
- response_format: json ? { type: "json_object" } : void 0
3753
- });
3754
- let fullText = "";
3755
- for await (const chunk of stream) {
3756
- const text = chunk.choices[0]?.delta?.content || "";
3757
- if (text) {
3758
- fullText += text;
3759
- if (options.partial) {
3760
- options.partial(text);
4372
+ try {
4373
+ const stream = await client.chat.completions.create({
4374
+ model: modelName,
4375
+ messages: [
4376
+ { role: "system", content: messages.sys },
4377
+ { role: "user", content: messages.user }
4378
+ ],
4379
+ temperature: options.temperature,
4380
+ max_tokens: options.maxTokens || 1e3,
4381
+ stream: true,
4382
+ response_format: json ? { type: "json_object" } : void 0
4383
+ });
4384
+ let fullText = "";
4385
+ let inputTokens = 0;
4386
+ let outputTokens = 0;
4387
+ for await (const chunk of stream) {
4388
+ const text = chunk.choices[0]?.delta?.content || "";
4389
+ if (text) {
4390
+ fullText += text;
4391
+ if (options.partial) {
4392
+ options.partial(text);
4393
+ }
4394
+ }
4395
+ if (chunk.x_groq?.usage) {
4396
+ inputTokens = chunk.x_groq.usage.prompt_tokens || 0;
4397
+ outputTokens = chunk.x_groq.usage.completion_tokens || 0;
3761
4398
  }
3762
4399
  }
4400
+ const durationMs = Date.now() - startTime;
4401
+ if (inputTokens === 0) {
4402
+ const sysPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((b) => b.text).join("");
4403
+ inputTokens = Math.ceil((sysPrompt.length + messages.user.length) / 4);
4404
+ }
4405
+ if (outputTokens === 0) {
4406
+ outputTokens = Math.ceil(fullText.length / 4);
4407
+ }
4408
+ llmUsageLogger.log({
4409
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4410
+ requestId,
4411
+ provider: "groq",
4412
+ model: modelName,
4413
+ method: "stream",
4414
+ inputTokens,
4415
+ outputTokens,
4416
+ totalTokens: inputTokens + outputTokens,
4417
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4418
+ durationMs,
4419
+ success: true
4420
+ });
4421
+ if (json) {
4422
+ return this._parseJSON(fullText);
4423
+ }
4424
+ return fullText;
4425
+ } catch (error) {
4426
+ const durationMs = Date.now() - startTime;
4427
+ llmUsageLogger.log({
4428
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4429
+ requestId,
4430
+ provider: "groq",
4431
+ model: modelName,
4432
+ method: "stream",
4433
+ inputTokens: 0,
4434
+ outputTokens: 0,
4435
+ totalTokens: 0,
4436
+ costUSD: 0,
4437
+ durationMs,
4438
+ success: false,
4439
+ error: error instanceof Error ? error.message : String(error)
4440
+ });
4441
+ throw error;
3763
4442
  }
3764
- if (json) {
3765
- return this._parseJSON(fullText);
3766
- }
3767
- return fullText;
3768
4443
  }
3769
4444
  // ============================================================
3770
4445
  // GEMINI IMPLEMENTATION
3771
4446
  // ============================================================
3772
4447
  static async _geminiText(messages, modelName, options) {
4448
+ const startTime = Date.now();
4449
+ const requestId = llmUsageLogger.generateRequestId();
3773
4450
  const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
3774
4451
  const genAI = new GoogleGenerativeAI(apiKey);
3775
4452
  const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3776
- const model = genAI.getGenerativeModel({
3777
- model: modelName,
3778
- systemInstruction: systemPrompt,
3779
- generationConfig: {
3780
- maxOutputTokens: options.maxTokens || 1e3,
3781
- temperature: options.temperature,
3782
- topP: options.topP
3783
- }
3784
- });
3785
- const result = await model.generateContent(messages.user);
3786
- const response = await result.response;
3787
- return response.text();
4453
+ try {
4454
+ const model = genAI.getGenerativeModel({
4455
+ model: modelName,
4456
+ systemInstruction: systemPrompt,
4457
+ generationConfig: {
4458
+ maxOutputTokens: options.maxTokens || 1e3,
4459
+ temperature: options.temperature,
4460
+ topP: options.topP
4461
+ }
4462
+ });
4463
+ const result = await model.generateContent(messages.user);
4464
+ const response = await result.response;
4465
+ const text = response.text();
4466
+ const durationMs = Date.now() - startTime;
4467
+ const usage = response.usageMetadata;
4468
+ const inputTokens = usage?.promptTokenCount || Math.ceil((systemPrompt.length + messages.user.length) / 4);
4469
+ const outputTokens = usage?.candidatesTokenCount || Math.ceil(text.length / 4);
4470
+ llmUsageLogger.log({
4471
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4472
+ requestId,
4473
+ provider: "gemini",
4474
+ model: modelName,
4475
+ method: "text",
4476
+ inputTokens,
4477
+ outputTokens,
4478
+ totalTokens: inputTokens + outputTokens,
4479
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4480
+ durationMs,
4481
+ success: true
4482
+ });
4483
+ return text;
4484
+ } catch (error) {
4485
+ const durationMs = Date.now() - startTime;
4486
+ llmUsageLogger.log({
4487
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4488
+ requestId,
4489
+ provider: "gemini",
4490
+ model: modelName,
4491
+ method: "text",
4492
+ inputTokens: 0,
4493
+ outputTokens: 0,
4494
+ totalTokens: 0,
4495
+ costUSD: 0,
4496
+ durationMs,
4497
+ success: false,
4498
+ error: error instanceof Error ? error.message : String(error)
4499
+ });
4500
+ throw error;
4501
+ }
3788
4502
  }
3789
4503
  static async _geminiStream(messages, modelName, options, json) {
4504
+ const startTime = Date.now();
4505
+ const requestId = llmUsageLogger.generateRequestId();
3790
4506
  const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
3791
4507
  const genAI = new GoogleGenerativeAI(apiKey);
3792
4508
  const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3793
- const model = genAI.getGenerativeModel({
3794
- model: modelName,
3795
- systemInstruction: systemPrompt,
3796
- generationConfig: {
3797
- maxOutputTokens: options.maxTokens || 1e3,
3798
- temperature: options.temperature,
3799
- topP: options.topP,
3800
- responseMimeType: json ? "application/json" : void 0
3801
- }
3802
- });
3803
- const result = await model.generateContentStream(messages.user);
3804
- let fullText = "";
3805
- for await (const chunk of result.stream) {
3806
- const text = chunk.text();
3807
- if (text) {
3808
- fullText += text;
3809
- if (options.partial) {
3810
- options.partial(text);
4509
+ try {
4510
+ const model = genAI.getGenerativeModel({
4511
+ model: modelName,
4512
+ systemInstruction: systemPrompt,
4513
+ generationConfig: {
4514
+ maxOutputTokens: options.maxTokens || 1e3,
4515
+ temperature: options.temperature,
4516
+ topP: options.topP,
4517
+ responseMimeType: json ? "application/json" : void 0
4518
+ }
4519
+ });
4520
+ const result = await model.generateContentStream(messages.user);
4521
+ let fullText = "";
4522
+ let inputTokens = 0;
4523
+ let outputTokens = 0;
4524
+ for await (const chunk of result.stream) {
4525
+ const text = chunk.text();
4526
+ if (text) {
4527
+ fullText += text;
4528
+ if (options.partial) {
4529
+ options.partial(text);
4530
+ }
4531
+ }
4532
+ if (chunk.usageMetadata) {
4533
+ inputTokens = chunk.usageMetadata.promptTokenCount || 0;
4534
+ outputTokens = chunk.usageMetadata.candidatesTokenCount || 0;
3811
4535
  }
3812
4536
  }
4537
+ const durationMs = Date.now() - startTime;
4538
+ if (inputTokens === 0) {
4539
+ inputTokens = Math.ceil((systemPrompt.length + messages.user.length) / 4);
4540
+ }
4541
+ if (outputTokens === 0) {
4542
+ outputTokens = Math.ceil(fullText.length / 4);
4543
+ }
4544
+ llmUsageLogger.log({
4545
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4546
+ requestId,
4547
+ provider: "gemini",
4548
+ model: modelName,
4549
+ method: "stream",
4550
+ inputTokens,
4551
+ outputTokens,
4552
+ totalTokens: inputTokens + outputTokens,
4553
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4554
+ durationMs,
4555
+ success: true
4556
+ });
4557
+ if (json) {
4558
+ return this._parseJSON(fullText);
4559
+ }
4560
+ return fullText;
4561
+ } catch (error) {
4562
+ const durationMs = Date.now() - startTime;
4563
+ llmUsageLogger.log({
4564
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4565
+ requestId,
4566
+ provider: "gemini",
4567
+ model: modelName,
4568
+ method: "stream",
4569
+ inputTokens: 0,
4570
+ outputTokens: 0,
4571
+ totalTokens: 0,
4572
+ costUSD: 0,
4573
+ durationMs,
4574
+ success: false,
4575
+ error: error instanceof Error ? error.message : String(error)
4576
+ });
4577
+ throw error;
3813
4578
  }
3814
- if (json) {
3815
- return this._parseJSON(fullText);
3816
- }
3817
- return fullText;
3818
4579
  }
3819
4580
  static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
3820
4581
  const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
@@ -3908,51 +4669,138 @@ var LLM = class {
3908
4669
  // OPENAI IMPLEMENTATION
3909
4670
  // ============================================================
3910
4671
  static async _openaiText(messages, modelName, options) {
4672
+ const startTime = Date.now();
4673
+ const requestId = llmUsageLogger.generateRequestId();
3911
4674
  const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
3912
4675
  const openai = new OpenAI({ apiKey });
3913
4676
  const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3914
- const response = await openai.chat.completions.create({
3915
- model: modelName,
3916
- messages: [
3917
- { role: "system", content: systemPrompt },
3918
- { role: "user", content: messages.user }
3919
- ],
3920
- max_tokens: options.maxTokens || 1e3,
3921
- temperature: options.temperature,
3922
- top_p: options.topP
3923
- });
3924
- return response.choices[0]?.message?.content || "";
4677
+ try {
4678
+ const response = await openai.chat.completions.create({
4679
+ model: modelName,
4680
+ messages: [
4681
+ { role: "system", content: systemPrompt },
4682
+ { role: "user", content: messages.user }
4683
+ ],
4684
+ max_tokens: options.maxTokens || 1e3,
4685
+ temperature: options.temperature,
4686
+ top_p: options.topP
4687
+ });
4688
+ const durationMs = Date.now() - startTime;
4689
+ const usage = response.usage;
4690
+ const inputTokens = usage?.prompt_tokens || 0;
4691
+ const outputTokens = usage?.completion_tokens || 0;
4692
+ llmUsageLogger.log({
4693
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4694
+ requestId,
4695
+ provider: "openai",
4696
+ model: modelName,
4697
+ method: "text",
4698
+ inputTokens,
4699
+ outputTokens,
4700
+ totalTokens: inputTokens + outputTokens,
4701
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4702
+ durationMs,
4703
+ success: true
4704
+ });
4705
+ return response.choices[0]?.message?.content || "";
4706
+ } catch (error) {
4707
+ const durationMs = Date.now() - startTime;
4708
+ llmUsageLogger.log({
4709
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4710
+ requestId,
4711
+ provider: "openai",
4712
+ model: modelName,
4713
+ method: "text",
4714
+ inputTokens: 0,
4715
+ outputTokens: 0,
4716
+ totalTokens: 0,
4717
+ costUSD: 0,
4718
+ durationMs,
4719
+ success: false,
4720
+ error: error instanceof Error ? error.message : String(error)
4721
+ });
4722
+ throw error;
4723
+ }
3925
4724
  }
3926
4725
  static async _openaiStream(messages, modelName, options, json) {
4726
+ const startTime = Date.now();
4727
+ const requestId = llmUsageLogger.generateRequestId();
3927
4728
  const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
3928
4729
  const openai = new OpenAI({ apiKey });
3929
4730
  const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3930
- const stream = await openai.chat.completions.create({
3931
- model: modelName,
3932
- messages: [
3933
- { role: "system", content: systemPrompt },
3934
- { role: "user", content: messages.user }
3935
- ],
3936
- max_tokens: options.maxTokens || 1e3,
3937
- temperature: options.temperature,
3938
- top_p: options.topP,
3939
- response_format: json ? { type: "json_object" } : void 0,
3940
- stream: true
3941
- });
3942
- let fullText = "";
3943
- for await (const chunk of stream) {
3944
- const content = chunk.choices[0]?.delta?.content || "";
3945
- if (content) {
3946
- fullText += content;
3947
- if (options.partial) {
3948
- options.partial(content);
4731
+ try {
4732
+ const stream = await openai.chat.completions.create({
4733
+ model: modelName,
4734
+ messages: [
4735
+ { role: "system", content: systemPrompt },
4736
+ { role: "user", content: messages.user }
4737
+ ],
4738
+ max_tokens: options.maxTokens || 1e3,
4739
+ temperature: options.temperature,
4740
+ top_p: options.topP,
4741
+ response_format: json ? { type: "json_object" } : void 0,
4742
+ stream: true,
4743
+ stream_options: { include_usage: true }
4744
+ // Request usage info in stream
4745
+ });
4746
+ let fullText = "";
4747
+ let inputTokens = 0;
4748
+ let outputTokens = 0;
4749
+ for await (const chunk of stream) {
4750
+ const content = chunk.choices[0]?.delta?.content || "";
4751
+ if (content) {
4752
+ fullText += content;
4753
+ if (options.partial) {
4754
+ options.partial(content);
4755
+ }
4756
+ }
4757
+ if (chunk.usage) {
4758
+ inputTokens = chunk.usage.prompt_tokens || 0;
4759
+ outputTokens = chunk.usage.completion_tokens || 0;
3949
4760
  }
3950
4761
  }
4762
+ const durationMs = Date.now() - startTime;
4763
+ if (inputTokens === 0) {
4764
+ inputTokens = Math.ceil((systemPrompt.length + messages.user.length) / 4);
4765
+ }
4766
+ if (outputTokens === 0) {
4767
+ outputTokens = Math.ceil(fullText.length / 4);
4768
+ }
4769
+ llmUsageLogger.log({
4770
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4771
+ requestId,
4772
+ provider: "openai",
4773
+ model: modelName,
4774
+ method: "stream",
4775
+ inputTokens,
4776
+ outputTokens,
4777
+ totalTokens: inputTokens + outputTokens,
4778
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4779
+ durationMs,
4780
+ success: true
4781
+ });
4782
+ if (json) {
4783
+ return this._parseJSON(fullText);
4784
+ }
4785
+ return fullText;
4786
+ } catch (error) {
4787
+ const durationMs = Date.now() - startTime;
4788
+ llmUsageLogger.log({
4789
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4790
+ requestId,
4791
+ provider: "openai",
4792
+ model: modelName,
4793
+ method: "stream",
4794
+ inputTokens: 0,
4795
+ outputTokens: 0,
4796
+ totalTokens: 0,
4797
+ costUSD: 0,
4798
+ durationMs,
4799
+ success: false,
4800
+ error: error instanceof Error ? error.message : String(error)
4801
+ });
4802
+ throw error;
3951
4803
  }
3952
- if (json) {
3953
- return this._parseJSON(fullText);
3954
- }
3955
- return fullText;
3956
4804
  }
3957
4805
  static async _openaiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
3958
4806
  const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
@@ -4098,11 +4946,9 @@ var LLM = class {
4098
4946
  closeChar = "]";
4099
4947
  }
4100
4948
  if (startIdx === -1) {
4101
- const preview = text.length > 500 ? text.substring(0, 500) + "..." : text;
4102
- throw new Error(`No JSON found in response. LLM returned plain text instead of JSON.
4103
-
4104
- Full response:
4105
- ${preview}`);
4949
+ const error = new Error(`No JSON found in response. LLM returned plain text instead of JSON.`);
4950
+ userPromptErrorLogger.logJsonParseError("LLM._parseJSON - No JSON structure found", text, error);
4951
+ throw error;
4106
4952
  }
4107
4953
  let depth = 0;
4108
4954
  let inString = false;
@@ -4129,24 +4975,17 @@ ${preview}`);
4129
4975
  if (endIdx !== -1) {
4130
4976
  jsonText = jsonText.substring(startIdx, endIdx + 1);
4131
4977
  } else {
4132
- const preview = text.length > 500 ? text.substring(0, 500) + "..." : text;
4133
- throw new Error(`Incomplete JSON - no matching closing ${closeChar} found.
4134
-
4135
- Full response:
4136
- ${preview}`);
4978
+ const error = new Error(`Incomplete JSON - no matching closing ${closeChar} found.`);
4979
+ userPromptErrorLogger.logJsonParseError("LLM._parseJSON - Incomplete JSON", text, error);
4980
+ throw error;
4137
4981
  }
4138
4982
  try {
4139
4983
  const repairedJson = jsonrepair(jsonText);
4140
4984
  return JSON.parse(repairedJson);
4141
4985
  } catch (error) {
4142
- const preview = text.length > 500 ? text.substring(0, 500) + "..." : text;
4143
- throw new Error(`Failed to parse JSON: ${error instanceof Error ? error.message : String(error)}
4144
-
4145
- Extracted JSON:
4146
- ${jsonText.substring(0, 300)}...
4147
-
4148
- Full response:
4149
- ${preview}`);
4986
+ const parseError = error instanceof Error ? error : new Error(String(error));
4987
+ userPromptErrorLogger.logJsonParseError("LLM._parseJSON - JSON parse/repair failed", text, parseError);
4988
+ throw new Error(`Failed to parse JSON: ${parseError.message}`);
4150
4989
  }
4151
4990
  }
4152
4991
  };
@@ -4494,6 +5333,7 @@ var conversation_search_default = ConversationSearch;
4494
5333
  var BaseLLM = class {
4495
5334
  constructor(config) {
4496
5335
  this.model = config?.model || this.getDefaultModel();
5336
+ this.fastModel = config?.fastModel || this.getDefaultFastModel();
4497
5337
  this.defaultLimit = config?.defaultLimit || 50;
4498
5338
  this.apiKey = config?.apiKey;
4499
5339
  }
@@ -4803,7 +5643,7 @@ ${JSON.stringify(tool.requiredFields || [], null, 2)}`;
4803
5643
  user: prompts.user
4804
5644
  },
4805
5645
  {
4806
- model: this.model,
5646
+ model: this.fastModel,
4807
5647
  maxTokens: 1500,
4808
5648
  temperature: 0.2,
4809
5649
  apiKey: this.getApiKey(apiKey)
@@ -5235,6 +6075,7 @@ ${sql}
5235
6075
  const errorMsg = error instanceof Error ? error.message : String(error);
5236
6076
  logger.error(`[${this.getProviderName()}] Query execution failed (attempt ${attempts}/${MAX_QUERY_ATTEMPTS}): ${errorMsg}`);
5237
6077
  logCollector?.error(`Query failed (attempt ${attempts}/${MAX_QUERY_ATTEMPTS}): ${errorMsg}`);
6078
+ userPromptErrorLogger.logSqlError(sql, error instanceof Error ? error : new Error(errorMsg), Object.keys(params).length > 0 ? Object.values(params) : void 0);
5238
6079
  if (wrappedStreamCallback) {
5239
6080
  wrappedStreamCallback(`\u274C **Query execution failed:**
5240
6081
  \`\`\`
@@ -5325,6 +6166,7 @@ Please try rephrasing your request or contact support.
5325
6166
  const errorMsg = error instanceof Error ? error.message : String(error);
5326
6167
  logger.error(`[${this.getProviderName()}] External tool ${externalTool.name} failed (attempt ${attempts}/${MAX_TOOL_ATTEMPTS}): ${errorMsg}`);
5327
6168
  logCollector?.error(`\u2717 ${externalTool.name} failed: ${errorMsg}`);
6169
+ userPromptErrorLogger.logToolError(externalTool.name, toolInput, error instanceof Error ? error : new Error(errorMsg));
5328
6170
  if (wrappedStreamCallback) {
5329
6171
  wrappedStreamCallback(`\u274C **${externalTool.name} failed:**
5330
6172
  \`\`\`
@@ -5467,6 +6309,13 @@ ${errorMsg}
5467
6309
  const errorMsg = error instanceof Error ? error.message : String(error);
5468
6310
  logger.error(`[${this.getProviderName()}] Error generating text response: ${errorMsg}`);
5469
6311
  logCollector?.error(`Error generating text response: ${errorMsg}`);
6312
+ userPromptErrorLogger.logLlmError(
6313
+ this.getProviderName(),
6314
+ this.model,
6315
+ "generateTextResponse",
6316
+ error instanceof Error ? error : new Error(errorMsg),
6317
+ { userPrompt }
6318
+ );
5470
6319
  errors.push(errorMsg);
5471
6320
  return {
5472
6321
  success: false,
@@ -5669,6 +6518,11 @@ ${errorMsg}
5669
6518
  logger.error(`[${this.getProviderName()}] Error in handleUserRequest: ${errorMsg}`);
5670
6519
  logger.debug(`[${this.getProviderName()}] Error details:`, error);
5671
6520
  logCollector?.error(`Error processing request: ${errorMsg}`);
6521
+ userPromptErrorLogger.logError(
6522
+ "handleUserRequest",
6523
+ error instanceof Error ? error : new Error(errorMsg),
6524
+ { userPrompt }
6525
+ );
5672
6526
  const elapsedTime = Date.now() - startTime;
5673
6527
  logger.info(`[${this.getProviderName()}] Total time taken: ${elapsedTime}ms (${(elapsedTime / 1e3).toFixed(2)}s)`);
5674
6528
  logCollector?.info(`Total time taken: ${elapsedTime}ms (${(elapsedTime / 1e3).toFixed(2)}s)`);
@@ -5707,7 +6561,7 @@ ${errorMsg}
5707
6561
  user: prompts.user
5708
6562
  },
5709
6563
  {
5710
- model: this.model,
6564
+ model: this.fastModel,
5711
6565
  maxTokens: 1200,
5712
6566
  temperature: 0.7,
5713
6567
  apiKey: this.getApiKey(apiKey)
@@ -5744,6 +6598,9 @@ var GroqLLM = class extends BaseLLM {
5744
6598
  getDefaultModel() {
5745
6599
  return "groq/openai/gpt-oss-120b";
5746
6600
  }
6601
+ getDefaultFastModel() {
6602
+ return "groq/llama-3.1-8b-instant";
6603
+ }
5747
6604
  getDefaultApiKey() {
5748
6605
  return process.env.GROQ_API_KEY;
5749
6606
  }
@@ -5763,6 +6620,9 @@ var AnthropicLLM = class extends BaseLLM {
5763
6620
  getDefaultModel() {
5764
6621
  return "anthropic/claude-sonnet-4-5-20250929";
5765
6622
  }
6623
+ getDefaultFastModel() {
6624
+ return "anthropic/claude-haiku-4-5-20251001";
6625
+ }
5766
6626
  getDefaultApiKey() {
5767
6627
  return process.env.ANTHROPIC_API_KEY;
5768
6628
  }
@@ -5782,6 +6642,9 @@ var GeminiLLM = class extends BaseLLM {
5782
6642
  getDefaultModel() {
5783
6643
  return "gemini/gemini-2.5-flash";
5784
6644
  }
6645
+ getDefaultFastModel() {
6646
+ return "gemini/gemini-2.0-flash-exp";
6647
+ }
5785
6648
  getDefaultApiKey() {
5786
6649
  return process.env.GEMINI_API_KEY;
5787
6650
  }
@@ -5801,6 +6664,9 @@ var OpenAILLM = class extends BaseLLM {
5801
6664
  getDefaultModel() {
5802
6665
  return "openai/gpt-4.1";
5803
6666
  }
6667
+ getDefaultFastModel() {
6668
+ return "openai/gpt-4o-mini";
6669
+ }
5804
6670
  getDefaultApiKey() {
5805
6671
  return process.env.OPENAI_API_KEY;
5806
6672
  }
@@ -6307,6 +7173,9 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
6307
7173
  const prompt = payload.prompt;
6308
7174
  const SA_RUNTIME = payload.SA_RUNTIME;
6309
7175
  const wsId = userPromptRequest.from.id || "unknown";
7176
+ const promptContext = `User Prompt: ${prompt?.substring(0, 50)}${(prompt?.length || 0) > 50 ? "..." : ""}`;
7177
+ llmUsageLogger.resetLogFile(promptContext);
7178
+ userPromptErrorLogger.resetLogFile(promptContext);
6310
7179
  if (!SA_RUNTIME) {
6311
7180
  errors.push("SA_RUNTIME is required");
6312
7181
  }
@@ -6380,6 +7249,14 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
6380
7249
  const uiBlockId = existingUiBlockId;
6381
7250
  if (!userResponse.success) {
6382
7251
  logger.error(`User prompt request failed with errors: ${userResponse.errors.join(", ")}`);
7252
+ userPromptErrorLogger.logError("User Response Failed", userResponse.errors.join("\n"), {
7253
+ prompt,
7254
+ uiBlockId,
7255
+ threadId,
7256
+ responseData: userResponse.data
7257
+ });
7258
+ userPromptErrorLogger.writeSummary();
7259
+ llmUsageLogger.logSessionSummary(`FAILED: ${prompt?.substring(0, 30)}`);
6383
7260
  return {
6384
7261
  success: false,
6385
7262
  data: userResponse.data,
@@ -6455,6 +7332,7 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
6455
7332
  }
6456
7333
  }
6457
7334
  }
7335
+ llmUsageLogger.logSessionSummary(prompt?.substring(0, 50));
6458
7336
  return {
6459
7337
  success: userResponse.success,
6460
7338
  data: userResponse.data,
@@ -9357,8 +10235,8 @@ function sendDashCompResponse(id, res, sendMessage, clientId) {
9357
10235
 
9358
10236
  // src/auth/user-manager.ts
9359
10237
  init_logger();
9360
- import fs5 from "fs";
9361
- import path4 from "path";
10238
+ import fs7 from "fs";
10239
+ import path6 from "path";
9362
10240
  import os from "os";
9363
10241
  var UserManager = class {
9364
10242
  /**
@@ -9371,7 +10249,7 @@ var UserManager = class {
9371
10249
  this.hasChanged = false;
9372
10250
  this.syncInterval = null;
9373
10251
  this.isInitialized = false;
9374
- this.filePath = path4.join(os.homedir(), ".superatom", "projects", projectId, "users.json");
10252
+ this.filePath = path6.join(os.homedir(), ".superatom", "projects", projectId, "users.json");
9375
10253
  this.syncIntervalMs = syncIntervalMs;
9376
10254
  }
9377
10255
  /**
@@ -9396,20 +10274,20 @@ var UserManager = class {
9396
10274
  */
9397
10275
  async loadUsersFromFile() {
9398
10276
  try {
9399
- const dir = path4.dirname(this.filePath);
9400
- if (!fs5.existsSync(dir)) {
10277
+ const dir = path6.dirname(this.filePath);
10278
+ if (!fs7.existsSync(dir)) {
9401
10279
  logger.info(`Creating directory structure: ${dir}`);
9402
- fs5.mkdirSync(dir, { recursive: true });
10280
+ fs7.mkdirSync(dir, { recursive: true });
9403
10281
  }
9404
- if (!fs5.existsSync(this.filePath)) {
10282
+ if (!fs7.existsSync(this.filePath)) {
9405
10283
  logger.info(`Users file does not exist at ${this.filePath}, creating with empty users`);
9406
10284
  const initialData = { users: [] };
9407
- fs5.writeFileSync(this.filePath, JSON.stringify(initialData, null, 4));
10285
+ fs7.writeFileSync(this.filePath, JSON.stringify(initialData, null, 4));
9408
10286
  this.users = [];
9409
10287
  this.hasChanged = false;
9410
10288
  return;
9411
10289
  }
9412
- const fileContent = fs5.readFileSync(this.filePath, "utf-8");
10290
+ const fileContent = fs7.readFileSync(this.filePath, "utf-8");
9413
10291
  const rawData = JSON.parse(fileContent);
9414
10292
  const validatedData = UsersDataSchema.parse(rawData);
9415
10293
  this.users = validatedData.users;
@@ -9428,16 +10306,16 @@ var UserManager = class {
9428
10306
  return;
9429
10307
  }
9430
10308
  try {
9431
- const dir = path4.dirname(this.filePath);
9432
- if (!fs5.existsSync(dir)) {
9433
- fs5.mkdirSync(dir, { recursive: true });
10309
+ const dir = path6.dirname(this.filePath);
10310
+ if (!fs7.existsSync(dir)) {
10311
+ fs7.mkdirSync(dir, { recursive: true });
9434
10312
  }
9435
10313
  const usersToSave = this.users.map((user) => {
9436
10314
  const { wsIds, ...userWithoutWsIds } = user;
9437
10315
  return userWithoutWsIds;
9438
10316
  });
9439
10317
  const data = { users: usersToSave };
9440
- fs5.writeFileSync(this.filePath, JSON.stringify(data, null, 4));
10318
+ fs7.writeFileSync(this.filePath, JSON.stringify(data, null, 4));
9441
10319
  this.hasChanged = false;
9442
10320
  logger.debug(`Synced ${this.users.length} users to file (wsIds excluded)`);
9443
10321
  } catch (error) {
@@ -9656,8 +10534,8 @@ var UserManager = class {
9656
10534
 
9657
10535
  // src/dashboards/dashboard-manager.ts
9658
10536
  init_logger();
9659
- import fs6 from "fs";
9660
- import path5 from "path";
10537
+ import fs8 from "fs";
10538
+ import path7 from "path";
9661
10539
  import os2 from "os";
9662
10540
  var DashboardManager = class {
9663
10541
  /**
@@ -9666,7 +10544,7 @@ var DashboardManager = class {
9666
10544
  */
9667
10545
  constructor(projectId = "snowflake-dataset") {
9668
10546
  this.projectId = projectId;
9669
- this.dashboardsBasePath = path5.join(
10547
+ this.dashboardsBasePath = path7.join(
9670
10548
  os2.homedir(),
9671
10549
  ".superatom",
9672
10550
  "projects",
@@ -9680,7 +10558,7 @@ var DashboardManager = class {
9680
10558
  * @returns Full path to dashboard data.json file
9681
10559
  */
9682
10560
  getDashboardPath(dashboardId) {
9683
- return path5.join(this.dashboardsBasePath, dashboardId, "data.json");
10561
+ return path7.join(this.dashboardsBasePath, dashboardId, "data.json");
9684
10562
  }
9685
10563
  /**
9686
10564
  * Create a new dashboard
@@ -9690,13 +10568,13 @@ var DashboardManager = class {
9690
10568
  */
9691
10569
  createDashboard(dashboardId, dashboard) {
9692
10570
  const dashboardPath = this.getDashboardPath(dashboardId);
9693
- const dashboardDir = path5.dirname(dashboardPath);
9694
- if (fs6.existsSync(dashboardPath)) {
10571
+ const dashboardDir = path7.dirname(dashboardPath);
10572
+ if (fs8.existsSync(dashboardPath)) {
9695
10573
  throw new Error(`Dashboard '${dashboardId}' already exists`);
9696
10574
  }
9697
10575
  const validated = DSLRendererPropsSchema.parse(dashboard);
9698
- fs6.mkdirSync(dashboardDir, { recursive: true });
9699
- fs6.writeFileSync(dashboardPath, JSON.stringify(validated, null, 4));
10576
+ fs8.mkdirSync(dashboardDir, { recursive: true });
10577
+ fs8.writeFileSync(dashboardPath, JSON.stringify(validated, null, 4));
9700
10578
  logger.info(`Dashboard created: ${dashboardId}`);
9701
10579
  return validated;
9702
10580
  }
@@ -9707,12 +10585,12 @@ var DashboardManager = class {
9707
10585
  */
9708
10586
  getDashboard(dashboardId) {
9709
10587
  const dashboardPath = this.getDashboardPath(dashboardId);
9710
- if (!fs6.existsSync(dashboardPath)) {
10588
+ if (!fs8.existsSync(dashboardPath)) {
9711
10589
  logger.warn(`Dashboard not found: ${dashboardId}`);
9712
10590
  return null;
9713
10591
  }
9714
10592
  try {
9715
- const fileContent = fs6.readFileSync(dashboardPath, "utf-8");
10593
+ const fileContent = fs8.readFileSync(dashboardPath, "utf-8");
9716
10594
  const dashboard = JSON.parse(fileContent);
9717
10595
  const validated = DSLRendererPropsSchema.parse(dashboard);
9718
10596
  return validated;
@@ -9726,16 +10604,16 @@ var DashboardManager = class {
9726
10604
  * @returns Array of dashboard objects with their IDs
9727
10605
  */
9728
10606
  getAllDashboards() {
9729
- if (!fs6.existsSync(this.dashboardsBasePath)) {
9730
- fs6.mkdirSync(this.dashboardsBasePath, { recursive: true });
10607
+ if (!fs8.existsSync(this.dashboardsBasePath)) {
10608
+ fs8.mkdirSync(this.dashboardsBasePath, { recursive: true });
9731
10609
  return [];
9732
10610
  }
9733
10611
  const dashboards = [];
9734
10612
  try {
9735
- const dashboardDirs = fs6.readdirSync(this.dashboardsBasePath);
10613
+ const dashboardDirs = fs8.readdirSync(this.dashboardsBasePath);
9736
10614
  for (const dashboardId of dashboardDirs) {
9737
10615
  const dashboardPath = this.getDashboardPath(dashboardId);
9738
- if (fs6.existsSync(dashboardPath)) {
10616
+ if (fs8.existsSync(dashboardPath)) {
9739
10617
  const dashboard = this.getDashboard(dashboardId);
9740
10618
  if (dashboard) {
9741
10619
  dashboards.push({ dashboardId, dashboard });
@@ -9757,13 +10635,13 @@ var DashboardManager = class {
9757
10635
  */
9758
10636
  updateDashboard(dashboardId, dashboard) {
9759
10637
  const dashboardPath = this.getDashboardPath(dashboardId);
9760
- if (!fs6.existsSync(dashboardPath)) {
10638
+ if (!fs8.existsSync(dashboardPath)) {
9761
10639
  logger.warn(`Dashboard not found for update: ${dashboardId}`);
9762
10640
  return null;
9763
10641
  }
9764
10642
  try {
9765
10643
  const validated = DSLRendererPropsSchema.parse(dashboard);
9766
- fs6.writeFileSync(dashboardPath, JSON.stringify(validated, null, 4));
10644
+ fs8.writeFileSync(dashboardPath, JSON.stringify(validated, null, 4));
9767
10645
  logger.info(`Dashboard updated: ${dashboardId}`);
9768
10646
  return validated;
9769
10647
  } catch (error) {
@@ -9778,13 +10656,13 @@ var DashboardManager = class {
9778
10656
  */
9779
10657
  deleteDashboard(dashboardId) {
9780
10658
  const dashboardPath = this.getDashboardPath(dashboardId);
9781
- const dashboardDir = path5.dirname(dashboardPath);
9782
- if (!fs6.existsSync(dashboardPath)) {
10659
+ const dashboardDir = path7.dirname(dashboardPath);
10660
+ if (!fs8.existsSync(dashboardPath)) {
9783
10661
  logger.warn(`Dashboard not found for deletion: ${dashboardId}`);
9784
10662
  return false;
9785
10663
  }
9786
10664
  try {
9787
- fs6.rmSync(dashboardDir, { recursive: true, force: true });
10665
+ fs8.rmSync(dashboardDir, { recursive: true, force: true });
9788
10666
  logger.info(`Dashboard deleted: ${dashboardId}`);
9789
10667
  return true;
9790
10668
  } catch (error) {
@@ -9799,21 +10677,21 @@ var DashboardManager = class {
9799
10677
  */
9800
10678
  dashboardExists(dashboardId) {
9801
10679
  const dashboardPath = this.getDashboardPath(dashboardId);
9802
- return fs6.existsSync(dashboardPath);
10680
+ return fs8.existsSync(dashboardPath);
9803
10681
  }
9804
10682
  /**
9805
10683
  * Get dashboard count
9806
10684
  * @returns Number of dashboards
9807
10685
  */
9808
10686
  getDashboardCount() {
9809
- if (!fs6.existsSync(this.dashboardsBasePath)) {
10687
+ if (!fs8.existsSync(this.dashboardsBasePath)) {
9810
10688
  return 0;
9811
10689
  }
9812
10690
  try {
9813
- const dashboardDirs = fs6.readdirSync(this.dashboardsBasePath);
10691
+ const dashboardDirs = fs8.readdirSync(this.dashboardsBasePath);
9814
10692
  return dashboardDirs.filter((dir) => {
9815
10693
  const dashboardPath = this.getDashboardPath(dir);
9816
- return fs6.existsSync(dashboardPath);
10694
+ return fs8.existsSync(dashboardPath);
9817
10695
  }).length;
9818
10696
  } catch (error) {
9819
10697
  logger.error("Failed to get dashboard count:", error);
@@ -9824,8 +10702,8 @@ var DashboardManager = class {
9824
10702
 
9825
10703
  // src/reports/report-manager.ts
9826
10704
  init_logger();
9827
- import fs7 from "fs";
9828
- import path6 from "path";
10705
+ import fs9 from "fs";
10706
+ import path8 from "path";
9829
10707
  import os3 from "os";
9830
10708
  var ReportManager = class {
9831
10709
  /**
@@ -9834,7 +10712,7 @@ var ReportManager = class {
9834
10712
  */
9835
10713
  constructor(projectId = "snowflake-dataset") {
9836
10714
  this.projectId = projectId;
9837
- this.reportsBasePath = path6.join(
10715
+ this.reportsBasePath = path8.join(
9838
10716
  os3.homedir(),
9839
10717
  ".superatom",
9840
10718
  "projects",
@@ -9848,7 +10726,7 @@ var ReportManager = class {
9848
10726
  * @returns Full path to report data.json file
9849
10727
  */
9850
10728
  getReportPath(reportId) {
9851
- return path6.join(this.reportsBasePath, reportId, "data.json");
10729
+ return path8.join(this.reportsBasePath, reportId, "data.json");
9852
10730
  }
9853
10731
  /**
9854
10732
  * Create a new report
@@ -9858,13 +10736,13 @@ var ReportManager = class {
9858
10736
  */
9859
10737
  createReport(reportId, report) {
9860
10738
  const reportPath = this.getReportPath(reportId);
9861
- const reportDir = path6.dirname(reportPath);
9862
- if (fs7.existsSync(reportPath)) {
10739
+ const reportDir = path8.dirname(reportPath);
10740
+ if (fs9.existsSync(reportPath)) {
9863
10741
  throw new Error(`Report '${reportId}' already exists`);
9864
10742
  }
9865
10743
  const validated = DSLRendererPropsSchema2.parse(report);
9866
- fs7.mkdirSync(reportDir, { recursive: true });
9867
- fs7.writeFileSync(reportPath, JSON.stringify(validated, null, 4));
10744
+ fs9.mkdirSync(reportDir, { recursive: true });
10745
+ fs9.writeFileSync(reportPath, JSON.stringify(validated, null, 4));
9868
10746
  logger.info(`Report created: ${reportId}`);
9869
10747
  return validated;
9870
10748
  }
@@ -9875,12 +10753,12 @@ var ReportManager = class {
9875
10753
  */
9876
10754
  getReport(reportId) {
9877
10755
  const reportPath = this.getReportPath(reportId);
9878
- if (!fs7.existsSync(reportPath)) {
10756
+ if (!fs9.existsSync(reportPath)) {
9879
10757
  logger.warn(`Report not found: ${reportId}`);
9880
10758
  return null;
9881
10759
  }
9882
10760
  try {
9883
- const fileContent = fs7.readFileSync(reportPath, "utf-8");
10761
+ const fileContent = fs9.readFileSync(reportPath, "utf-8");
9884
10762
  const report = JSON.parse(fileContent);
9885
10763
  const validated = DSLRendererPropsSchema2.parse(report);
9886
10764
  return validated;
@@ -9894,16 +10772,16 @@ var ReportManager = class {
9894
10772
  * @returns Array of report objects with their IDs
9895
10773
  */
9896
10774
  getAllReports() {
9897
- if (!fs7.existsSync(this.reportsBasePath)) {
9898
- fs7.mkdirSync(this.reportsBasePath, { recursive: true });
10775
+ if (!fs9.existsSync(this.reportsBasePath)) {
10776
+ fs9.mkdirSync(this.reportsBasePath, { recursive: true });
9899
10777
  return [];
9900
10778
  }
9901
10779
  const reports = [];
9902
10780
  try {
9903
- const reportDirs = fs7.readdirSync(this.reportsBasePath);
10781
+ const reportDirs = fs9.readdirSync(this.reportsBasePath);
9904
10782
  for (const reportId of reportDirs) {
9905
10783
  const reportPath = this.getReportPath(reportId);
9906
- if (fs7.existsSync(reportPath)) {
10784
+ if (fs9.existsSync(reportPath)) {
9907
10785
  const report = this.getReport(reportId);
9908
10786
  if (report) {
9909
10787
  reports.push({ reportId, report });
@@ -9925,13 +10803,13 @@ var ReportManager = class {
9925
10803
  */
9926
10804
  updateReport(reportId, report) {
9927
10805
  const reportPath = this.getReportPath(reportId);
9928
- if (!fs7.existsSync(reportPath)) {
10806
+ if (!fs9.existsSync(reportPath)) {
9929
10807
  logger.warn(`Report not found for update: ${reportId}`);
9930
10808
  return null;
9931
10809
  }
9932
10810
  try {
9933
10811
  const validated = DSLRendererPropsSchema2.parse(report);
9934
- fs7.writeFileSync(reportPath, JSON.stringify(validated, null, 4));
10812
+ fs9.writeFileSync(reportPath, JSON.stringify(validated, null, 4));
9935
10813
  logger.info(`Report updated: ${reportId}`);
9936
10814
  return validated;
9937
10815
  } catch (error) {
@@ -9946,13 +10824,13 @@ var ReportManager = class {
9946
10824
  */
9947
10825
  deleteReport(reportId) {
9948
10826
  const reportPath = this.getReportPath(reportId);
9949
- const reportDir = path6.dirname(reportPath);
9950
- if (!fs7.existsSync(reportPath)) {
10827
+ const reportDir = path8.dirname(reportPath);
10828
+ if (!fs9.existsSync(reportPath)) {
9951
10829
  logger.warn(`Report not found for deletion: ${reportId}`);
9952
10830
  return false;
9953
10831
  }
9954
10832
  try {
9955
- fs7.rmSync(reportDir, { recursive: true, force: true });
10833
+ fs9.rmSync(reportDir, { recursive: true, force: true });
9956
10834
  logger.info(`Report deleted: ${reportId}`);
9957
10835
  return true;
9958
10836
  } catch (error) {
@@ -9967,21 +10845,21 @@ var ReportManager = class {
9967
10845
  */
9968
10846
  reportExists(reportId) {
9969
10847
  const reportPath = this.getReportPath(reportId);
9970
- return fs7.existsSync(reportPath);
10848
+ return fs9.existsSync(reportPath);
9971
10849
  }
9972
10850
  /**
9973
10851
  * Get report count
9974
10852
  * @returns Number of reports
9975
10853
  */
9976
10854
  getReportCount() {
9977
- if (!fs7.existsSync(this.reportsBasePath)) {
10855
+ if (!fs9.existsSync(this.reportsBasePath)) {
9978
10856
  return 0;
9979
10857
  }
9980
10858
  try {
9981
- const reportDirs = fs7.readdirSync(this.reportsBasePath);
10859
+ const reportDirs = fs9.readdirSync(this.reportsBasePath);
9982
10860
  return reportDirs.filter((dir) => {
9983
10861
  const reportPath = this.getReportPath(dir);
9984
- return fs7.existsSync(reportPath);
10862
+ return fs9.existsSync(reportPath);
9985
10863
  }).length;
9986
10864
  } catch (error) {
9987
10865
  logger.error("Failed to get report count:", error);
@@ -10604,8 +11482,10 @@ export {
10604
11482
  UILogCollector,
10605
11483
  UserManager,
10606
11484
  hybridRerank,
11485
+ llmUsageLogger,
10607
11486
  logger,
10608
11487
  rerankChromaResults,
10609
- rerankConversationResults
11488
+ rerankConversationResults,
11489
+ userPromptErrorLogger
10610
11490
  };
10611
11491
  //# sourceMappingURL=index.mjs.map