@superatomai/sdk-node 0.0.39 → 0.0.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1276,13 +1276,16 @@ var init_prompt_loader = __esm({
1276
1276
  const contextMarker = "---\n\n## CONTEXT";
1277
1277
  if (template.system.includes(contextMarker)) {
1278
1278
  const [staticPart, contextPart] = template.system.split(contextMarker);
1279
- logger.debug(`\u2713 Prompt caching enabled for '${promptName}' (static: ${staticPart.length} chars, context: ${contextPart.length} chars)`);
1279
+ const processedStatic = this.replaceVariables(staticPart, variables);
1280
1280
  const processedContext = this.replaceVariables(contextMarker + contextPart, variables);
1281
+ const staticLength = processedStatic.length;
1282
+ const contextLength = processedContext.length;
1283
+ logger.debug(`\u2713 Prompt caching enabled for '${promptName}' (cached: ${staticLength} chars, dynamic: ${contextLength} chars)`);
1281
1284
  return {
1282
1285
  system: [
1283
1286
  {
1284
1287
  type: "text",
1285
- text: staticPart.trim(),
1288
+ text: processedStatic.trim(),
1286
1289
  cache_control: { type: "ephemeral" }
1287
1290
  },
1288
1291
  {
@@ -1621,9 +1624,11 @@ __export(index_exports, {
1621
1624
  UILogCollector: () => UILogCollector,
1622
1625
  UserManager: () => UserManager,
1623
1626
  hybridRerank: () => hybridRerank,
1627
+ llmUsageLogger: () => llmUsageLogger,
1624
1628
  logger: () => logger,
1625
1629
  rerankChromaResults: () => rerankChromaResults,
1626
- rerankConversationResults: () => rerankConversationResults
1630
+ rerankConversationResults: () => rerankConversationResults,
1631
+ userPromptErrorLogger: () => userPromptErrorLogger
1627
1632
  });
1628
1633
  module.exports = __toCommonJS(index_exports);
1629
1634
 
@@ -3422,6 +3427,448 @@ var import_groq_sdk = __toESM(require("groq-sdk"));
3422
3427
  var import_generative_ai = require("@google/generative-ai");
3423
3428
  var import_openai = __toESM(require("openai"));
3424
3429
  var import_jsonrepair = require("jsonrepair");
3430
+
3431
+ // src/utils/llm-usage-logger.ts
3432
+ var import_fs4 = __toESM(require("fs"));
3433
+ var import_path3 = __toESM(require("path"));
3434
+ var PRICING = {
3435
+ // Anthropic
3436
+ "claude-3-5-sonnet-20241022": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3437
+ "claude-sonnet-4-5": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3438
+ "claude-sonnet-4-5-20250929": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3439
+ "claude-3-5-haiku-20241022": { input: 0.8, output: 4, cacheRead: 0.08, cacheWrite: 1 },
3440
+ "claude-haiku-4-5-20251001": { input: 0.8, output: 4, cacheRead: 0.08, cacheWrite: 1 },
3441
+ "claude-3-opus-20240229": { input: 15, output: 75, cacheRead: 1.5, cacheWrite: 18.75 },
3442
+ "claude-3-sonnet-20240229": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3443
+ "claude-3-haiku-20240307": { input: 0.25, output: 1.25, cacheRead: 0.03, cacheWrite: 0.3 },
3444
+ // OpenAI
3445
+ "gpt-4o": { input: 2.5, output: 10 },
3446
+ "gpt-4o-mini": { input: 0.15, output: 0.6 },
3447
+ "gpt-4-turbo": { input: 10, output: 30 },
3448
+ "gpt-4": { input: 30, output: 60 },
3449
+ "gpt-3.5-turbo": { input: 0.5, output: 1.5 },
3450
+ // Gemini
3451
+ "gemini-1.5-pro": { input: 1.25, output: 5 },
3452
+ "gemini-1.5-flash": { input: 0.075, output: 0.3 },
3453
+ "gemini-2.0-flash-exp": { input: 0.1, output: 0.4 },
3454
+ // Groq (very cheap)
3455
+ "llama-3.3-70b-versatile": { input: 0.59, output: 0.79 },
3456
+ "llama-3.1-70b-versatile": { input: 0.59, output: 0.79 },
3457
+ "llama-3.1-8b-instant": { input: 0.05, output: 0.08 },
3458
+ "mixtral-8x7b-32768": { input: 0.24, output: 0.24 }
3459
+ };
3460
+ var DEFAULT_PRICING = { input: 3, output: 15 };
3461
+ var LLMUsageLogger = class {
3462
+ constructor() {
3463
+ this.logStream = null;
3464
+ this.sessionStats = {
3465
+ totalCalls: 0,
3466
+ totalInputTokens: 0,
3467
+ totalOutputTokens: 0,
3468
+ totalCacheReadTokens: 0,
3469
+ totalCacheWriteTokens: 0,
3470
+ totalCostUSD: 0,
3471
+ totalDurationMs: 0
3472
+ };
3473
+ this.logPath = process.env.LLM_USAGE_LOG_PATH || import_path3.default.join(process.cwd(), "llm-usage-logs");
3474
+ this.enabled = process.env.LLM_USAGE_LOGGING !== "false";
3475
+ if (this.enabled) {
3476
+ this.initLogStream();
3477
+ }
3478
+ }
3479
+ initLogStream() {
3480
+ try {
3481
+ const dir = import_path3.default.dirname(this.logPath);
3482
+ if (!import_fs4.default.existsSync(dir)) {
3483
+ import_fs4.default.mkdirSync(dir, { recursive: true });
3484
+ }
3485
+ this.logStream = import_fs4.default.createWriteStream(this.logPath, { flags: "a" });
3486
+ if (!import_fs4.default.existsSync(this.logPath) || import_fs4.default.statSync(this.logPath).size === 0) {
3487
+ this.writeHeader();
3488
+ }
3489
+ } catch (error) {
3490
+ console.error("[LLM-Usage-Logger] Failed to initialize log stream:", error);
3491
+ this.enabled = false;
3492
+ }
3493
+ }
3494
+ writeHeader() {
3495
+ const header = `
3496
+ ================================================================================
3497
+ LLM USAGE LOG - Session Started: ${(/* @__PURE__ */ new Date()).toISOString()}
3498
+ ================================================================================
3499
+ Format: [TIMESTAMP] [REQUEST_ID] [PROVIDER/MODEL] [METHOD]
3500
+ Tokens: IN=input OUT=output CACHE_R=cache_read CACHE_W=cache_write TOTAL=total
3501
+ Cost: $X.XXXXXX | Time: Xms
3502
+ ================================================================================
3503
+
3504
+ `;
3505
+ this.logStream?.write(header);
3506
+ }
3507
+ /**
3508
+ * Calculate cost based on token usage and model
3509
+ */
3510
+ calculateCost(model, inputTokens, outputTokens, cacheReadTokens = 0, cacheWriteTokens = 0) {
3511
+ let pricing = PRICING[model];
3512
+ if (!pricing) {
3513
+ const modelLower = model.toLowerCase();
3514
+ for (const [key, value] of Object.entries(PRICING)) {
3515
+ if (modelLower.includes(key.toLowerCase()) || key.toLowerCase().includes(modelLower)) {
3516
+ pricing = value;
3517
+ break;
3518
+ }
3519
+ }
3520
+ }
3521
+ pricing = pricing || DEFAULT_PRICING;
3522
+ const inputCost = inputTokens / 1e6 * pricing.input;
3523
+ const outputCost = outputTokens / 1e6 * pricing.output;
3524
+ const cacheReadCost = cacheReadTokens / 1e6 * (pricing.cacheRead || pricing.input * 0.1);
3525
+ const cacheWriteCost = cacheWriteTokens / 1e6 * (pricing.cacheWrite || pricing.input * 1.25);
3526
+ return inputCost + outputCost + cacheReadCost + cacheWriteCost;
3527
+ }
3528
+ /**
3529
+ * Log an LLM API call
3530
+ */
3531
+ log(entry) {
3532
+ if (!this.enabled) return;
3533
+ this.sessionStats.totalCalls++;
3534
+ this.sessionStats.totalInputTokens += entry.inputTokens;
3535
+ this.sessionStats.totalOutputTokens += entry.outputTokens;
3536
+ this.sessionStats.totalCacheReadTokens += entry.cacheReadTokens || 0;
3537
+ this.sessionStats.totalCacheWriteTokens += entry.cacheWriteTokens || 0;
3538
+ this.sessionStats.totalCostUSD += entry.costUSD;
3539
+ this.sessionStats.totalDurationMs += entry.durationMs;
3540
+ const cacheInfo = entry.cacheReadTokens || entry.cacheWriteTokens ? ` CACHE_R=${entry.cacheReadTokens || 0} CACHE_W=${entry.cacheWriteTokens || 0}` : "";
3541
+ const toolInfo = entry.toolCalls ? ` | Tools: ${entry.toolCalls}` : "";
3542
+ const errorInfo = entry.error ? ` | ERROR: ${entry.error}` : "";
3543
+ const status = entry.success ? "\u2713" : "\u2717";
3544
+ let cacheStatus = "";
3545
+ if (entry.cacheReadTokens && entry.cacheReadTokens > 0) {
3546
+ const savedCost = entry.cacheReadTokens / 1e6 * 2.7;
3547
+ cacheStatus = ` \u26A1 CACHE HIT! Saved ~$${savedCost.toFixed(4)}`;
3548
+ } else if (entry.cacheWriteTokens && entry.cacheWriteTokens > 0) {
3549
+ cacheStatus = " \u{1F4DD} Cache created (next request will be cheaper)";
3550
+ }
3551
+ const logLine = `[${entry.timestamp}] [${entry.requestId}] ${status} ${entry.provider}/${entry.model} [${entry.method}]
3552
+ Tokens: IN=${entry.inputTokens} OUT=${entry.outputTokens}${cacheInfo} TOTAL=${entry.totalTokens}
3553
+ Cost: $${entry.costUSD.toFixed(6)} | Time: ${entry.durationMs}ms${toolInfo}${errorInfo}${cacheStatus}
3554
+ `;
3555
+ this.logStream?.write(logLine);
3556
+ if (entry.cacheReadTokens && entry.cacheReadTokens > 0) {
3557
+ console.log(`[LLM] \u26A1 CACHE HIT: ${entry.cacheReadTokens.toLocaleString()} tokens read from cache (${entry.method})`);
3558
+ } else if (entry.cacheWriteTokens && entry.cacheWriteTokens > 0) {
3559
+ console.log(`[LLM] \u{1F4DD} CACHE WRITE: ${entry.cacheWriteTokens.toLocaleString()} tokens cached for future requests (${entry.method})`);
3560
+ }
3561
+ if (process.env.SUPERATOM_LOG_LEVEL === "verbose") {
3562
+ console.log("\n[LLM-Usage]", logLine);
3563
+ }
3564
+ }
3565
+ /**
3566
+ * Log session summary (call at end of request)
3567
+ */
3568
+ logSessionSummary(requestContext) {
3569
+ if (!this.enabled || this.sessionStats.totalCalls === 0) return;
3570
+ const cacheReadSavings = this.sessionStats.totalCacheReadTokens / 1e6 * 2.7;
3571
+ const hasCaching = this.sessionStats.totalCacheReadTokens > 0 || this.sessionStats.totalCacheWriteTokens > 0;
3572
+ let cacheSection = "";
3573
+ if (hasCaching) {
3574
+ cacheSection = `
3575
+ Cache Statistics:
3576
+ Cache Read Tokens: ${this.sessionStats.totalCacheReadTokens.toLocaleString()}${this.sessionStats.totalCacheReadTokens > 0 ? " \u26A1" : ""}
3577
+ Cache Write Tokens: ${this.sessionStats.totalCacheWriteTokens.toLocaleString()}${this.sessionStats.totalCacheWriteTokens > 0 ? " \u{1F4DD}" : ""}
3578
+ Estimated Savings: $${cacheReadSavings.toFixed(4)}`;
3579
+ }
3580
+ const summary = `
3581
+ --------------------------------------------------------------------------------
3582
+ SESSION SUMMARY${requestContext ? ` (${requestContext})` : ""}
3583
+ --------------------------------------------------------------------------------
3584
+ Total LLM Calls: ${this.sessionStats.totalCalls}
3585
+ Total Input Tokens: ${this.sessionStats.totalInputTokens.toLocaleString()}
3586
+ Total Output Tokens: ${this.sessionStats.totalOutputTokens.toLocaleString()}
3587
+ Total Tokens: ${(this.sessionStats.totalInputTokens + this.sessionStats.totalOutputTokens).toLocaleString()}
3588
+ Total Cost: $${this.sessionStats.totalCostUSD.toFixed(6)}
3589
+ Total Time: ${this.sessionStats.totalDurationMs}ms (${(this.sessionStats.totalDurationMs / 1e3).toFixed(2)}s)
3590
+ Avg Cost/Call: $${(this.sessionStats.totalCostUSD / this.sessionStats.totalCalls).toFixed(6)}
3591
+ Avg Time/Call: ${Math.round(this.sessionStats.totalDurationMs / this.sessionStats.totalCalls)}ms${cacheSection}
3592
+ --------------------------------------------------------------------------------
3593
+
3594
+ `;
3595
+ this.logStream?.write(summary);
3596
+ console.log("\n[LLM-Usage] Session Summary:");
3597
+ console.log(` Calls: ${this.sessionStats.totalCalls} | Tokens: ${(this.sessionStats.totalInputTokens + this.sessionStats.totalOutputTokens).toLocaleString()} | Cost: $${this.sessionStats.totalCostUSD.toFixed(4)} | Time: ${(this.sessionStats.totalDurationMs / 1e3).toFixed(2)}s`);
3598
+ if (hasCaching) {
3599
+ console.log(` Cache: ${this.sessionStats.totalCacheReadTokens.toLocaleString()} read, ${this.sessionStats.totalCacheWriteTokens.toLocaleString()} written | Savings: ~$${cacheReadSavings.toFixed(4)}`);
3600
+ }
3601
+ }
3602
+ /**
3603
+ * Reset session stats (call at start of new user request)
3604
+ */
3605
+ resetSession() {
3606
+ this.sessionStats = {
3607
+ totalCalls: 0,
3608
+ totalInputTokens: 0,
3609
+ totalOutputTokens: 0,
3610
+ totalCacheReadTokens: 0,
3611
+ totalCacheWriteTokens: 0,
3612
+ totalCostUSD: 0,
3613
+ totalDurationMs: 0
3614
+ };
3615
+ }
3616
+ /**
3617
+ * Reset the log file for a new request (clears previous logs)
3618
+ * Call this at the start of each USER_PROMPT_REQ
3619
+ */
3620
+ resetLogFile(requestContext) {
3621
+ if (!this.enabled) return;
3622
+ try {
3623
+ if (this.logStream) {
3624
+ this.logStream.end();
3625
+ this.logStream = null;
3626
+ }
3627
+ this.logStream = import_fs4.default.createWriteStream(this.logPath, { flags: "w" });
3628
+ const header = `
3629
+ ================================================================================
3630
+ LLM USAGE LOG - Request Started: ${(/* @__PURE__ */ new Date()).toISOString()}
3631
+ ${requestContext ? `Context: ${requestContext}` : ""}
3632
+ ================================================================================
3633
+ Format: [TIMESTAMP] [REQUEST_ID] [PROVIDER/MODEL] [METHOD]
3634
+ Tokens: IN=input OUT=output CACHE_R=cache_read CACHE_W=cache_write TOTAL=total
3635
+ Cost: $X.XXXXXX | Time: Xms
3636
+ ================================================================================
3637
+
3638
+ `;
3639
+ this.logStream.write(header);
3640
+ this.resetSession();
3641
+ console.log(`[LLM-Usage] Log file reset for new request: ${this.logPath}`);
3642
+ } catch (error) {
3643
+ console.error("[LLM-Usage-Logger] Failed to reset log file:", error);
3644
+ }
3645
+ }
3646
+ /**
3647
+ * Get current session stats
3648
+ */
3649
+ getSessionStats() {
3650
+ return { ...this.sessionStats };
3651
+ }
3652
+ /**
3653
+ * Generate a unique request ID
3654
+ */
3655
+ generateRequestId() {
3656
+ return `req-${Date.now()}-${Math.random().toString(36).substring(2, 8)}`;
3657
+ }
3658
+ };
3659
+ var llmUsageLogger = new LLMUsageLogger();
3660
+
3661
+ // src/utils/user-prompt-error-logger.ts
3662
+ var import_fs5 = __toESM(require("fs"));
3663
+ var import_path4 = __toESM(require("path"));
3664
+ var UserPromptErrorLogger = class {
3665
+ constructor() {
3666
+ this.logStream = null;
3667
+ this.hasErrors = false;
3668
+ this.logPath = process.env.USER_PROMPT_ERROR_LOG_PATH || import_path4.default.join(process.cwd(), "user-prompt-req-errors");
3669
+ this.enabled = process.env.USER_PROMPT_ERROR_LOGGING !== "false";
3670
+ }
3671
+ /**
3672
+ * Reset the error log file for a new request
3673
+ */
3674
+ resetLogFile(requestContext) {
3675
+ if (!this.enabled) return;
3676
+ try {
3677
+ if (this.logStream) {
3678
+ this.logStream.end();
3679
+ this.logStream = null;
3680
+ }
3681
+ const dir = import_path4.default.dirname(this.logPath);
3682
+ if (dir !== "." && !import_fs5.default.existsSync(dir)) {
3683
+ import_fs5.default.mkdirSync(dir, { recursive: true });
3684
+ }
3685
+ this.logStream = import_fs5.default.createWriteStream(this.logPath, { flags: "w" });
3686
+ this.hasErrors = false;
3687
+ const header = `================================================================================
3688
+ USER PROMPT REQUEST ERROR LOG
3689
+ Request Started: ${(/* @__PURE__ */ new Date()).toISOString()}
3690
+ ${requestContext ? `Context: ${requestContext}` : ""}
3691
+ ================================================================================
3692
+
3693
+ `;
3694
+ this.logStream.write(header);
3695
+ } catch (error) {
3696
+ console.error("[UserPromptErrorLogger] Failed to reset log file:", error);
3697
+ }
3698
+ }
3699
+ /**
3700
+ * Log a JSON parse error with the raw string that failed
3701
+ */
3702
+ logJsonParseError(context, rawString, error) {
3703
+ if (!this.enabled) return;
3704
+ this.hasErrors = true;
3705
+ const entry = `
3706
+ --------------------------------------------------------------------------------
3707
+ [${(/* @__PURE__ */ new Date()).toISOString()}] JSON PARSE ERROR
3708
+ --------------------------------------------------------------------------------
3709
+ Context: ${context}
3710
+ Error: ${error.message}
3711
+
3712
+ Raw String (${rawString.length} chars):
3713
+ --------------------------------------------------------------------------------
3714
+ ${rawString}
3715
+ --------------------------------------------------------------------------------
3716
+
3717
+ Stack Trace:
3718
+ ${error.stack || "No stack trace available"}
3719
+
3720
+ `;
3721
+ this.write(entry);
3722
+ console.error(`[UserPromptError] JSON Parse Error in ${context}: ${error.message}`);
3723
+ }
3724
+ /**
3725
+ * Log a general error with full details
3726
+ */
3727
+ logError(context, error, additionalData) {
3728
+ if (!this.enabled) return;
3729
+ this.hasErrors = true;
3730
+ const errorMessage = error instanceof Error ? error.message : error;
3731
+ const errorStack = error instanceof Error ? error.stack : void 0;
3732
+ let entry = `
3733
+ --------------------------------------------------------------------------------
3734
+ [${(/* @__PURE__ */ new Date()).toISOString()}] ERROR
3735
+ --------------------------------------------------------------------------------
3736
+ Context: ${context}
3737
+ Error: ${errorMessage}
3738
+ `;
3739
+ if (additionalData) {
3740
+ entry += `
3741
+ Additional Data:
3742
+ ${JSON.stringify(additionalData, null, 2)}
3743
+ `;
3744
+ }
3745
+ if (errorStack) {
3746
+ entry += `
3747
+ Stack Trace:
3748
+ ${errorStack}
3749
+ `;
3750
+ }
3751
+ entry += `--------------------------------------------------------------------------------
3752
+
3753
+ `;
3754
+ this.write(entry);
3755
+ console.error(`[UserPromptError] ${context}: ${errorMessage}`);
3756
+ }
3757
+ /**
3758
+ * Log a SQL query error with the full query
3759
+ */
3760
+ logSqlError(query, error, params) {
3761
+ if (!this.enabled) return;
3762
+ this.hasErrors = true;
3763
+ const errorMessage = error instanceof Error ? error.message : error;
3764
+ const entry = `
3765
+ --------------------------------------------------------------------------------
3766
+ [${(/* @__PURE__ */ new Date()).toISOString()}] SQL QUERY ERROR
3767
+ --------------------------------------------------------------------------------
3768
+ Error: ${errorMessage}
3769
+
3770
+ Query (${query.length} chars):
3771
+ --------------------------------------------------------------------------------
3772
+ ${query}
3773
+ --------------------------------------------------------------------------------
3774
+ ${params ? `
3775
+ Parameters: ${JSON.stringify(params)}` : ""}
3776
+
3777
+ `;
3778
+ this.write(entry);
3779
+ console.error(`[UserPromptError] SQL Error: ${errorMessage}`);
3780
+ }
3781
+ /**
3782
+ * Log an LLM API error
3783
+ */
3784
+ logLlmError(provider, model, method, error, requestData) {
3785
+ if (!this.enabled) return;
3786
+ this.hasErrors = true;
3787
+ const errorMessage = error instanceof Error ? error.message : error;
3788
+ const errorStack = error instanceof Error ? error.stack : void 0;
3789
+ let entry = `
3790
+ --------------------------------------------------------------------------------
3791
+ [${(/* @__PURE__ */ new Date()).toISOString()}] LLM API ERROR
3792
+ --------------------------------------------------------------------------------
3793
+ Provider: ${provider}
3794
+ Model: ${model}
3795
+ Method: ${method}
3796
+ Error: ${errorMessage}
3797
+ `;
3798
+ if (requestData) {
3799
+ const dataStr = JSON.stringify(requestData, null, 2);
3800
+ const truncated = dataStr.length > 5e3 ? dataStr.substring(0, 5e3) + "\n... [truncated]" : dataStr;
3801
+ entry += `
3802
+ Request Data:
3803
+ ${truncated}
3804
+ `;
3805
+ }
3806
+ if (errorStack) {
3807
+ entry += `
3808
+ Stack Trace:
3809
+ ${errorStack}
3810
+ `;
3811
+ }
3812
+ entry += `--------------------------------------------------------------------------------
3813
+
3814
+ `;
3815
+ this.write(entry);
3816
+ console.error(`[UserPromptError] LLM Error (${provider}/${model}): ${errorMessage}`);
3817
+ }
3818
+ /**
3819
+ * Log tool execution error
3820
+ */
3821
+ logToolError(toolName, toolInput, error) {
3822
+ if (!this.enabled) return;
3823
+ this.hasErrors = true;
3824
+ const errorMessage = error instanceof Error ? error.message : error;
3825
+ const errorStack = error instanceof Error ? error.stack : void 0;
3826
+ const entry = `
3827
+ --------------------------------------------------------------------------------
3828
+ [${(/* @__PURE__ */ new Date()).toISOString()}] TOOL EXECUTION ERROR
3829
+ --------------------------------------------------------------------------------
3830
+ Tool: ${toolName}
3831
+ Error: ${errorMessage}
3832
+
3833
+ Tool Input:
3834
+ ${JSON.stringify(toolInput, null, 2)}
3835
+ ${errorStack ? `
3836
+ Stack Trace:
3837
+ ${errorStack}` : ""}
3838
+ --------------------------------------------------------------------------------
3839
+
3840
+ `;
3841
+ this.write(entry);
3842
+ console.error(`[UserPromptError] Tool Error (${toolName}): ${errorMessage}`);
3843
+ }
3844
+ /**
3845
+ * Write final summary if there were errors
3846
+ */
3847
+ writeSummary() {
3848
+ if (!this.enabled || !this.hasErrors) return;
3849
+ const summary = `
3850
+ ================================================================================
3851
+ REQUEST COMPLETED WITH ERRORS
3852
+ Time: ${(/* @__PURE__ */ new Date()).toISOString()}
3853
+ ================================================================================
3854
+ `;
3855
+ this.write(summary);
3856
+ }
3857
+ /**
3858
+ * Check if any errors were logged
3859
+ */
3860
+ hadErrors() {
3861
+ return this.hasErrors;
3862
+ }
3863
+ write(content) {
3864
+ if (this.logStream) {
3865
+ this.logStream.write(content);
3866
+ }
3867
+ }
3868
+ };
3869
+ var userPromptErrorLogger = new UserPromptErrorLogger();
3870
+
3871
+ // src/llm.ts
3425
3872
  var LLM = class {
3426
3873
  /* Get a complete text response from an LLM (Anthropic or Groq) */
3427
3874
  static async text(messages, options = {}) {
@@ -3564,68 +4011,156 @@ var LLM = class {
3564
4011
  // ANTHROPIC IMPLEMENTATION
3565
4012
  // ============================================================
3566
4013
  static async _anthropicText(messages, modelName, options) {
4014
+ const startTime = Date.now();
4015
+ const requestId = llmUsageLogger.generateRequestId();
3567
4016
  const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY || "";
3568
4017
  const client = new import_sdk.default({
3569
4018
  apiKey
3570
4019
  });
3571
- const response = await client.messages.create({
3572
- model: modelName,
3573
- max_tokens: options.maxTokens || 1e3,
3574
- temperature: options.temperature,
3575
- system: this._normalizeSystemPrompt(messages.sys),
3576
- messages: [{
3577
- role: "user",
3578
- content: messages.user
3579
- }]
3580
- });
3581
- const textBlock = response.content.find((block) => block.type === "text");
3582
- return textBlock?.type === "text" ? textBlock.text : "";
4020
+ try {
4021
+ const response = await client.messages.create({
4022
+ model: modelName,
4023
+ max_tokens: options.maxTokens || 1e3,
4024
+ temperature: options.temperature,
4025
+ system: this._normalizeSystemPrompt(messages.sys),
4026
+ messages: [{
4027
+ role: "user",
4028
+ content: messages.user
4029
+ }]
4030
+ });
4031
+ const durationMs = Date.now() - startTime;
4032
+ const usage = response.usage;
4033
+ const inputTokens = usage?.input_tokens || 0;
4034
+ const outputTokens = usage?.output_tokens || 0;
4035
+ const cacheReadTokens = usage?.cache_read_input_tokens || 0;
4036
+ const cacheWriteTokens = usage?.cache_creation_input_tokens || 0;
4037
+ llmUsageLogger.log({
4038
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4039
+ requestId,
4040
+ provider: "anthropic",
4041
+ model: modelName,
4042
+ method: "text",
4043
+ inputTokens,
4044
+ outputTokens,
4045
+ cacheReadTokens,
4046
+ cacheWriteTokens,
4047
+ totalTokens: inputTokens + outputTokens + cacheReadTokens + cacheWriteTokens,
4048
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens, cacheReadTokens, cacheWriteTokens),
4049
+ durationMs,
4050
+ success: true
4051
+ });
4052
+ const textBlock = response.content.find((block) => block.type === "text");
4053
+ return textBlock?.type === "text" ? textBlock.text : "";
4054
+ } catch (error) {
4055
+ const durationMs = Date.now() - startTime;
4056
+ llmUsageLogger.log({
4057
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4058
+ requestId,
4059
+ provider: "anthropic",
4060
+ model: modelName,
4061
+ method: "text",
4062
+ inputTokens: 0,
4063
+ outputTokens: 0,
4064
+ totalTokens: 0,
4065
+ costUSD: 0,
4066
+ durationMs,
4067
+ success: false,
4068
+ error: error instanceof Error ? error.message : String(error)
4069
+ });
4070
+ throw error;
4071
+ }
3583
4072
  }
3584
4073
  static async _anthropicStream(messages, modelName, options, json) {
4074
+ const startTime = Date.now();
4075
+ const requestId = llmUsageLogger.generateRequestId();
3585
4076
  const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY || "";
3586
4077
  const client = new import_sdk.default({
3587
4078
  apiKey
3588
4079
  });
3589
- const apiMessages = [{
3590
- role: "user",
3591
- content: messages.user
3592
- }];
3593
- const prefill = messages.prefill || (json ? "{" : void 0);
3594
- if (prefill) {
3595
- apiMessages.push({
3596
- role: "assistant",
3597
- content: prefill
4080
+ try {
4081
+ const apiMessages = [{
4082
+ role: "user",
4083
+ content: messages.user
4084
+ }];
4085
+ const prefill = messages.prefill || (json ? "{" : void 0);
4086
+ if (prefill) {
4087
+ apiMessages.push({
4088
+ role: "assistant",
4089
+ content: prefill
4090
+ });
4091
+ }
4092
+ const stream = await client.messages.create({
4093
+ model: modelName,
4094
+ max_tokens: options.maxTokens || 1e3,
4095
+ temperature: options.temperature,
4096
+ system: this._normalizeSystemPrompt(messages.sys),
4097
+ messages: apiMessages,
4098
+ stream: true
3598
4099
  });
3599
- }
3600
- const stream = await client.messages.create({
3601
- model: modelName,
3602
- max_tokens: options.maxTokens || 1e3,
3603
- temperature: options.temperature,
3604
- system: this._normalizeSystemPrompt(messages.sys),
3605
- messages: apiMessages,
3606
- stream: true
3607
- });
3608
- let fullText = prefill || "";
3609
- let usage = null;
3610
- for await (const chunk of stream) {
3611
- if (chunk.type === "content_block_delta" && chunk.delta.type === "text_delta") {
3612
- const text = chunk.delta.text;
3613
- fullText += text;
3614
- if (options.partial) {
3615
- options.partial(text);
4100
+ let fullText = prefill || "";
4101
+ let usage = null;
4102
+ let inputTokens = 0;
4103
+ let outputTokens = 0;
4104
+ let cacheReadTokens = 0;
4105
+ let cacheWriteTokens = 0;
4106
+ for await (const chunk of stream) {
4107
+ if (chunk.type === "content_block_delta" && chunk.delta.type === "text_delta") {
4108
+ const text = chunk.delta.text;
4109
+ fullText += text;
4110
+ if (options.partial) {
4111
+ options.partial(text);
4112
+ }
4113
+ } else if (chunk.type === "message_start" && chunk.message?.usage) {
4114
+ const msgUsage = chunk.message.usage;
4115
+ inputTokens = msgUsage.input_tokens || 0;
4116
+ cacheReadTokens = msgUsage.cache_read_input_tokens || 0;
4117
+ cacheWriteTokens = msgUsage.cache_creation_input_tokens || 0;
4118
+ } else if (chunk.type === "message_delta" && chunk.usage) {
4119
+ usage = chunk.usage;
4120
+ outputTokens = usage.output_tokens || 0;
3616
4121
  }
3617
- } else if (chunk.type === "message_delta" && chunk.usage) {
3618
- usage = chunk.usage;
3619
4122
  }
4123
+ const durationMs = Date.now() - startTime;
4124
+ llmUsageLogger.log({
4125
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4126
+ requestId,
4127
+ provider: "anthropic",
4128
+ model: modelName,
4129
+ method: "stream",
4130
+ inputTokens,
4131
+ outputTokens,
4132
+ cacheReadTokens,
4133
+ cacheWriteTokens,
4134
+ totalTokens: inputTokens + outputTokens + cacheReadTokens + cacheWriteTokens,
4135
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens, cacheReadTokens, cacheWriteTokens),
4136
+ durationMs,
4137
+ success: true
4138
+ });
4139
+ if (json) {
4140
+ return this._parseJSON(fullText);
4141
+ }
4142
+ return fullText;
4143
+ } catch (error) {
4144
+ const durationMs = Date.now() - startTime;
4145
+ llmUsageLogger.log({
4146
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4147
+ requestId,
4148
+ provider: "anthropic",
4149
+ model: modelName,
4150
+ method: "stream",
4151
+ inputTokens: 0,
4152
+ outputTokens: 0,
4153
+ totalTokens: 0,
4154
+ costUSD: 0,
4155
+ durationMs,
4156
+ success: false,
4157
+ error: error instanceof Error ? error.message : String(error)
4158
+ });
4159
+ throw error;
3620
4160
  }
3621
- if (usage) {
3622
- }
3623
- if (json) {
3624
- return this._parseJSON(fullText);
3625
- }
3626
- return fullText;
3627
4161
  }
3628
4162
  static async _anthropicStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
4163
+ const methodStartTime = Date.now();
3629
4164
  const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY || "";
3630
4165
  const client = new import_sdk.default({
3631
4166
  apiKey
@@ -3636,8 +4171,15 @@ var LLM = class {
3636
4171
  }];
3637
4172
  let iterations = 0;
3638
4173
  let finalText = "";
4174
+ let totalToolCalls = 0;
4175
+ let totalInputTokens = 0;
4176
+ let totalOutputTokens = 0;
4177
+ let totalCacheReadTokens = 0;
4178
+ let totalCacheWriteTokens = 0;
3639
4179
  while (iterations < maxIterations) {
3640
4180
  iterations++;
4181
+ const iterationStartTime = Date.now();
4182
+ const requestId = llmUsageLogger.generateRequestId();
3641
4183
  const stream = await client.messages.create({
3642
4184
  model: modelName,
3643
4185
  max_tokens: options.maxTokens || 4e3,
@@ -3652,12 +4194,21 @@ var LLM = class {
3652
4194
  const contentBlocks = [];
3653
4195
  let currentTextBlock = "";
3654
4196
  let currentToolUse = null;
3655
- let usage = null;
4197
+ let inputTokens = 0;
4198
+ let outputTokens = 0;
4199
+ let cacheReadTokens = 0;
4200
+ let cacheWriteTokens = 0;
3656
4201
  for await (const chunk of stream) {
3657
4202
  if (chunk.type === "message_start") {
3658
4203
  contentBlocks.length = 0;
3659
4204
  currentTextBlock = "";
3660
4205
  currentToolUse = null;
4206
+ const msgUsage = chunk.message?.usage;
4207
+ if (msgUsage) {
4208
+ inputTokens = msgUsage.input_tokens || 0;
4209
+ cacheReadTokens = msgUsage.cache_read_input_tokens || 0;
4210
+ cacheWriteTokens = msgUsage.cache_creation_input_tokens || 0;
4211
+ }
3661
4212
  }
3662
4213
  if (chunk.type === "content_block_start") {
3663
4214
  if (chunk.content_block.type === "text") {
@@ -3704,15 +4255,36 @@ var LLM = class {
3704
4255
  if (chunk.type === "message_delta") {
3705
4256
  stopReason = chunk.delta.stop_reason || stopReason;
3706
4257
  if (chunk.usage) {
3707
- usage = chunk.usage;
4258
+ outputTokens = chunk.usage.output_tokens || 0;
3708
4259
  }
3709
4260
  }
3710
4261
  if (chunk.type === "message_stop") {
3711
4262
  break;
3712
4263
  }
3713
4264
  }
3714
- if (usage) {
3715
- }
4265
+ const iterationDuration = Date.now() - iterationStartTime;
4266
+ const toolUsesInIteration = contentBlocks.filter((block) => block.type === "tool_use").length;
4267
+ totalToolCalls += toolUsesInIteration;
4268
+ totalInputTokens += inputTokens;
4269
+ totalOutputTokens += outputTokens;
4270
+ totalCacheReadTokens += cacheReadTokens;
4271
+ totalCacheWriteTokens += cacheWriteTokens;
4272
+ llmUsageLogger.log({
4273
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4274
+ requestId,
4275
+ provider: "anthropic",
4276
+ model: modelName,
4277
+ method: `streamWithTools[iter=${iterations}]`,
4278
+ inputTokens,
4279
+ outputTokens,
4280
+ cacheReadTokens,
4281
+ cacheWriteTokens,
4282
+ totalTokens: inputTokens + outputTokens + cacheReadTokens + cacheWriteTokens,
4283
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens, cacheReadTokens, cacheWriteTokens),
4284
+ durationMs: iterationDuration,
4285
+ toolCalls: toolUsesInIteration,
4286
+ success: true
4287
+ });
3716
4288
  if (stopReason === "end_turn") {
3717
4289
  break;
3718
4290
  }
@@ -3756,6 +4328,25 @@ var LLM = class {
3756
4328
  break;
3757
4329
  }
3758
4330
  }
4331
+ const totalDuration = Date.now() - methodStartTime;
4332
+ if (iterations > 1) {
4333
+ llmUsageLogger.log({
4334
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4335
+ requestId: llmUsageLogger.generateRequestId(),
4336
+ provider: "anthropic",
4337
+ model: modelName,
4338
+ method: `streamWithTools[TOTAL:${iterations}iters]`,
4339
+ inputTokens: totalInputTokens,
4340
+ outputTokens: totalOutputTokens,
4341
+ cacheReadTokens: totalCacheReadTokens,
4342
+ cacheWriteTokens: totalCacheWriteTokens,
4343
+ totalTokens: totalInputTokens + totalOutputTokens + totalCacheReadTokens + totalCacheWriteTokens,
4344
+ costUSD: llmUsageLogger.calculateCost(modelName, totalInputTokens, totalOutputTokens, totalCacheReadTokens, totalCacheWriteTokens),
4345
+ durationMs: totalDuration,
4346
+ toolCalls: totalToolCalls,
4347
+ success: true
4348
+ });
4349
+ }
3759
4350
  if (iterations >= maxIterations) {
3760
4351
  throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
3761
4352
  }
@@ -3765,100 +4356,272 @@ var LLM = class {
3765
4356
  // GROQ IMPLEMENTATION
3766
4357
  // ============================================================
3767
4358
  static async _groqText(messages, modelName, options) {
4359
+ const startTime = Date.now();
4360
+ const requestId = llmUsageLogger.generateRequestId();
3768
4361
  const client = new import_groq_sdk.default({
3769
4362
  apiKey: options.apiKey || process.env.GROQ_API_KEY || ""
3770
4363
  });
3771
- const response = await client.chat.completions.create({
3772
- model: modelName,
3773
- messages: [
3774
- { role: "system", content: messages.sys },
3775
- { role: "user", content: messages.user }
3776
- ],
3777
- temperature: options.temperature,
3778
- max_tokens: options.maxTokens || 1e3
3779
- });
3780
- return response.choices[0]?.message?.content || "";
4364
+ try {
4365
+ const response = await client.chat.completions.create({
4366
+ model: modelName,
4367
+ messages: [
4368
+ { role: "system", content: messages.sys },
4369
+ { role: "user", content: messages.user }
4370
+ ],
4371
+ temperature: options.temperature,
4372
+ max_tokens: options.maxTokens || 1e3
4373
+ });
4374
+ const durationMs = Date.now() - startTime;
4375
+ const usage = response.usage;
4376
+ const inputTokens = usage?.prompt_tokens || 0;
4377
+ const outputTokens = usage?.completion_tokens || 0;
4378
+ llmUsageLogger.log({
4379
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4380
+ requestId,
4381
+ provider: "groq",
4382
+ model: modelName,
4383
+ method: "text",
4384
+ inputTokens,
4385
+ outputTokens,
4386
+ totalTokens: inputTokens + outputTokens,
4387
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4388
+ durationMs,
4389
+ success: true
4390
+ });
4391
+ return response.choices[0]?.message?.content || "";
4392
+ } catch (error) {
4393
+ const durationMs = Date.now() - startTime;
4394
+ llmUsageLogger.log({
4395
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4396
+ requestId,
4397
+ provider: "groq",
4398
+ model: modelName,
4399
+ method: "text",
4400
+ inputTokens: 0,
4401
+ outputTokens: 0,
4402
+ totalTokens: 0,
4403
+ costUSD: 0,
4404
+ durationMs,
4405
+ success: false,
4406
+ error: error instanceof Error ? error.message : String(error)
4407
+ });
4408
+ throw error;
4409
+ }
3781
4410
  }
3782
4411
  static async _groqStream(messages, modelName, options, json) {
4412
+ const startTime = Date.now();
4413
+ const requestId = llmUsageLogger.generateRequestId();
3783
4414
  const apiKey = options.apiKey || process.env.GROQ_API_KEY || "";
3784
4415
  const client = new import_groq_sdk.default({
3785
4416
  apiKey
3786
4417
  });
3787
- const stream = await client.chat.completions.create({
3788
- model: modelName,
3789
- messages: [
3790
- { role: "system", content: messages.sys },
3791
- { role: "user", content: messages.user }
3792
- ],
3793
- temperature: options.temperature,
3794
- max_tokens: options.maxTokens || 1e3,
3795
- stream: true,
3796
- response_format: json ? { type: "json_object" } : void 0
3797
- });
3798
- let fullText = "";
3799
- for await (const chunk of stream) {
3800
- const text = chunk.choices[0]?.delta?.content || "";
3801
- if (text) {
3802
- fullText += text;
3803
- if (options.partial) {
3804
- options.partial(text);
4418
+ try {
4419
+ const stream = await client.chat.completions.create({
4420
+ model: modelName,
4421
+ messages: [
4422
+ { role: "system", content: messages.sys },
4423
+ { role: "user", content: messages.user }
4424
+ ],
4425
+ temperature: options.temperature,
4426
+ max_tokens: options.maxTokens || 1e3,
4427
+ stream: true,
4428
+ response_format: json ? { type: "json_object" } : void 0
4429
+ });
4430
+ let fullText = "";
4431
+ let inputTokens = 0;
4432
+ let outputTokens = 0;
4433
+ for await (const chunk of stream) {
4434
+ const text = chunk.choices[0]?.delta?.content || "";
4435
+ if (text) {
4436
+ fullText += text;
4437
+ if (options.partial) {
4438
+ options.partial(text);
4439
+ }
4440
+ }
4441
+ if (chunk.x_groq?.usage) {
4442
+ inputTokens = chunk.x_groq.usage.prompt_tokens || 0;
4443
+ outputTokens = chunk.x_groq.usage.completion_tokens || 0;
3805
4444
  }
3806
4445
  }
4446
+ const durationMs = Date.now() - startTime;
4447
+ if (inputTokens === 0) {
4448
+ const sysPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((b) => b.text).join("");
4449
+ inputTokens = Math.ceil((sysPrompt.length + messages.user.length) / 4);
4450
+ }
4451
+ if (outputTokens === 0) {
4452
+ outputTokens = Math.ceil(fullText.length / 4);
4453
+ }
4454
+ llmUsageLogger.log({
4455
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4456
+ requestId,
4457
+ provider: "groq",
4458
+ model: modelName,
4459
+ method: "stream",
4460
+ inputTokens,
4461
+ outputTokens,
4462
+ totalTokens: inputTokens + outputTokens,
4463
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4464
+ durationMs,
4465
+ success: true
4466
+ });
4467
+ if (json) {
4468
+ return this._parseJSON(fullText);
4469
+ }
4470
+ return fullText;
4471
+ } catch (error) {
4472
+ const durationMs = Date.now() - startTime;
4473
+ llmUsageLogger.log({
4474
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4475
+ requestId,
4476
+ provider: "groq",
4477
+ model: modelName,
4478
+ method: "stream",
4479
+ inputTokens: 0,
4480
+ outputTokens: 0,
4481
+ totalTokens: 0,
4482
+ costUSD: 0,
4483
+ durationMs,
4484
+ success: false,
4485
+ error: error instanceof Error ? error.message : String(error)
4486
+ });
4487
+ throw error;
3807
4488
  }
3808
- if (json) {
3809
- return this._parseJSON(fullText);
3810
- }
3811
- return fullText;
3812
4489
  }
3813
4490
  // ============================================================
3814
4491
  // GEMINI IMPLEMENTATION
3815
4492
  // ============================================================
3816
4493
  static async _geminiText(messages, modelName, options) {
4494
+ const startTime = Date.now();
4495
+ const requestId = llmUsageLogger.generateRequestId();
3817
4496
  const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
3818
4497
  const genAI = new import_generative_ai.GoogleGenerativeAI(apiKey);
3819
4498
  const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3820
- const model = genAI.getGenerativeModel({
3821
- model: modelName,
3822
- systemInstruction: systemPrompt,
3823
- generationConfig: {
3824
- maxOutputTokens: options.maxTokens || 1e3,
3825
- temperature: options.temperature,
3826
- topP: options.topP
3827
- }
3828
- });
3829
- const result = await model.generateContent(messages.user);
3830
- const response = await result.response;
3831
- return response.text();
4499
+ try {
4500
+ const model = genAI.getGenerativeModel({
4501
+ model: modelName,
4502
+ systemInstruction: systemPrompt,
4503
+ generationConfig: {
4504
+ maxOutputTokens: options.maxTokens || 1e3,
4505
+ temperature: options.temperature,
4506
+ topP: options.topP
4507
+ }
4508
+ });
4509
+ const result = await model.generateContent(messages.user);
4510
+ const response = await result.response;
4511
+ const text = response.text();
4512
+ const durationMs = Date.now() - startTime;
4513
+ const usage = response.usageMetadata;
4514
+ const inputTokens = usage?.promptTokenCount || Math.ceil((systemPrompt.length + messages.user.length) / 4);
4515
+ const outputTokens = usage?.candidatesTokenCount || Math.ceil(text.length / 4);
4516
+ llmUsageLogger.log({
4517
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4518
+ requestId,
4519
+ provider: "gemini",
4520
+ model: modelName,
4521
+ method: "text",
4522
+ inputTokens,
4523
+ outputTokens,
4524
+ totalTokens: inputTokens + outputTokens,
4525
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4526
+ durationMs,
4527
+ success: true
4528
+ });
4529
+ return text;
4530
+ } catch (error) {
4531
+ const durationMs = Date.now() - startTime;
4532
+ llmUsageLogger.log({
4533
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4534
+ requestId,
4535
+ provider: "gemini",
4536
+ model: modelName,
4537
+ method: "text",
4538
+ inputTokens: 0,
4539
+ outputTokens: 0,
4540
+ totalTokens: 0,
4541
+ costUSD: 0,
4542
+ durationMs,
4543
+ success: false,
4544
+ error: error instanceof Error ? error.message : String(error)
4545
+ });
4546
+ throw error;
4547
+ }
3832
4548
  }
3833
4549
  static async _geminiStream(messages, modelName, options, json) {
4550
+ const startTime = Date.now();
4551
+ const requestId = llmUsageLogger.generateRequestId();
3834
4552
  const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
3835
4553
  const genAI = new import_generative_ai.GoogleGenerativeAI(apiKey);
3836
4554
  const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3837
- const model = genAI.getGenerativeModel({
3838
- model: modelName,
3839
- systemInstruction: systemPrompt,
3840
- generationConfig: {
3841
- maxOutputTokens: options.maxTokens || 1e3,
3842
- temperature: options.temperature,
3843
- topP: options.topP,
3844
- responseMimeType: json ? "application/json" : void 0
3845
- }
3846
- });
3847
- const result = await model.generateContentStream(messages.user);
3848
- let fullText = "";
3849
- for await (const chunk of result.stream) {
3850
- const text = chunk.text();
3851
- if (text) {
3852
- fullText += text;
3853
- if (options.partial) {
3854
- options.partial(text);
4555
+ try {
4556
+ const model = genAI.getGenerativeModel({
4557
+ model: modelName,
4558
+ systemInstruction: systemPrompt,
4559
+ generationConfig: {
4560
+ maxOutputTokens: options.maxTokens || 1e3,
4561
+ temperature: options.temperature,
4562
+ topP: options.topP,
4563
+ responseMimeType: json ? "application/json" : void 0
4564
+ }
4565
+ });
4566
+ const result = await model.generateContentStream(messages.user);
4567
+ let fullText = "";
4568
+ let inputTokens = 0;
4569
+ let outputTokens = 0;
4570
+ for await (const chunk of result.stream) {
4571
+ const text = chunk.text();
4572
+ if (text) {
4573
+ fullText += text;
4574
+ if (options.partial) {
4575
+ options.partial(text);
4576
+ }
4577
+ }
4578
+ if (chunk.usageMetadata) {
4579
+ inputTokens = chunk.usageMetadata.promptTokenCount || 0;
4580
+ outputTokens = chunk.usageMetadata.candidatesTokenCount || 0;
3855
4581
  }
3856
4582
  }
4583
+ const durationMs = Date.now() - startTime;
4584
+ if (inputTokens === 0) {
4585
+ inputTokens = Math.ceil((systemPrompt.length + messages.user.length) / 4);
4586
+ }
4587
+ if (outputTokens === 0) {
4588
+ outputTokens = Math.ceil(fullText.length / 4);
4589
+ }
4590
+ llmUsageLogger.log({
4591
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4592
+ requestId,
4593
+ provider: "gemini",
4594
+ model: modelName,
4595
+ method: "stream",
4596
+ inputTokens,
4597
+ outputTokens,
4598
+ totalTokens: inputTokens + outputTokens,
4599
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4600
+ durationMs,
4601
+ success: true
4602
+ });
4603
+ if (json) {
4604
+ return this._parseJSON(fullText);
4605
+ }
4606
+ return fullText;
4607
+ } catch (error) {
4608
+ const durationMs = Date.now() - startTime;
4609
+ llmUsageLogger.log({
4610
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4611
+ requestId,
4612
+ provider: "gemini",
4613
+ model: modelName,
4614
+ method: "stream",
4615
+ inputTokens: 0,
4616
+ outputTokens: 0,
4617
+ totalTokens: 0,
4618
+ costUSD: 0,
4619
+ durationMs,
4620
+ success: false,
4621
+ error: error instanceof Error ? error.message : String(error)
4622
+ });
4623
+ throw error;
3857
4624
  }
3858
- if (json) {
3859
- return this._parseJSON(fullText);
3860
- }
3861
- return fullText;
3862
4625
  }
3863
4626
  static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
3864
4627
  const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
@@ -3952,51 +4715,138 @@ var LLM = class {
3952
4715
  // OPENAI IMPLEMENTATION
3953
4716
  // ============================================================
3954
4717
  static async _openaiText(messages, modelName, options) {
4718
+ const startTime = Date.now();
4719
+ const requestId = llmUsageLogger.generateRequestId();
3955
4720
  const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
3956
4721
  const openai = new import_openai.default({ apiKey });
3957
4722
  const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3958
- const response = await openai.chat.completions.create({
3959
- model: modelName,
3960
- messages: [
3961
- { role: "system", content: systemPrompt },
3962
- { role: "user", content: messages.user }
3963
- ],
3964
- max_tokens: options.maxTokens || 1e3,
3965
- temperature: options.temperature,
3966
- top_p: options.topP
3967
- });
3968
- return response.choices[0]?.message?.content || "";
4723
+ try {
4724
+ const response = await openai.chat.completions.create({
4725
+ model: modelName,
4726
+ messages: [
4727
+ { role: "system", content: systemPrompt },
4728
+ { role: "user", content: messages.user }
4729
+ ],
4730
+ max_tokens: options.maxTokens || 1e3,
4731
+ temperature: options.temperature,
4732
+ top_p: options.topP
4733
+ });
4734
+ const durationMs = Date.now() - startTime;
4735
+ const usage = response.usage;
4736
+ const inputTokens = usage?.prompt_tokens || 0;
4737
+ const outputTokens = usage?.completion_tokens || 0;
4738
+ llmUsageLogger.log({
4739
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4740
+ requestId,
4741
+ provider: "openai",
4742
+ model: modelName,
4743
+ method: "text",
4744
+ inputTokens,
4745
+ outputTokens,
4746
+ totalTokens: inputTokens + outputTokens,
4747
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4748
+ durationMs,
4749
+ success: true
4750
+ });
4751
+ return response.choices[0]?.message?.content || "";
4752
+ } catch (error) {
4753
+ const durationMs = Date.now() - startTime;
4754
+ llmUsageLogger.log({
4755
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4756
+ requestId,
4757
+ provider: "openai",
4758
+ model: modelName,
4759
+ method: "text",
4760
+ inputTokens: 0,
4761
+ outputTokens: 0,
4762
+ totalTokens: 0,
4763
+ costUSD: 0,
4764
+ durationMs,
4765
+ success: false,
4766
+ error: error instanceof Error ? error.message : String(error)
4767
+ });
4768
+ throw error;
4769
+ }
3969
4770
  }
3970
4771
  static async _openaiStream(messages, modelName, options, json) {
4772
+ const startTime = Date.now();
4773
+ const requestId = llmUsageLogger.generateRequestId();
3971
4774
  const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
3972
4775
  const openai = new import_openai.default({ apiKey });
3973
4776
  const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3974
- const stream = await openai.chat.completions.create({
3975
- model: modelName,
3976
- messages: [
3977
- { role: "system", content: systemPrompt },
3978
- { role: "user", content: messages.user }
3979
- ],
3980
- max_tokens: options.maxTokens || 1e3,
3981
- temperature: options.temperature,
3982
- top_p: options.topP,
3983
- response_format: json ? { type: "json_object" } : void 0,
3984
- stream: true
3985
- });
3986
- let fullText = "";
3987
- for await (const chunk of stream) {
3988
- const content = chunk.choices[0]?.delta?.content || "";
3989
- if (content) {
3990
- fullText += content;
3991
- if (options.partial) {
3992
- options.partial(content);
4777
+ try {
4778
+ const stream = await openai.chat.completions.create({
4779
+ model: modelName,
4780
+ messages: [
4781
+ { role: "system", content: systemPrompt },
4782
+ { role: "user", content: messages.user }
4783
+ ],
4784
+ max_tokens: options.maxTokens || 1e3,
4785
+ temperature: options.temperature,
4786
+ top_p: options.topP,
4787
+ response_format: json ? { type: "json_object" } : void 0,
4788
+ stream: true,
4789
+ stream_options: { include_usage: true }
4790
+ // Request usage info in stream
4791
+ });
4792
+ let fullText = "";
4793
+ let inputTokens = 0;
4794
+ let outputTokens = 0;
4795
+ for await (const chunk of stream) {
4796
+ const content = chunk.choices[0]?.delta?.content || "";
4797
+ if (content) {
4798
+ fullText += content;
4799
+ if (options.partial) {
4800
+ options.partial(content);
4801
+ }
4802
+ }
4803
+ if (chunk.usage) {
4804
+ inputTokens = chunk.usage.prompt_tokens || 0;
4805
+ outputTokens = chunk.usage.completion_tokens || 0;
3993
4806
  }
3994
4807
  }
4808
+ const durationMs = Date.now() - startTime;
4809
+ if (inputTokens === 0) {
4810
+ inputTokens = Math.ceil((systemPrompt.length + messages.user.length) / 4);
4811
+ }
4812
+ if (outputTokens === 0) {
4813
+ outputTokens = Math.ceil(fullText.length / 4);
4814
+ }
4815
+ llmUsageLogger.log({
4816
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4817
+ requestId,
4818
+ provider: "openai",
4819
+ model: modelName,
4820
+ method: "stream",
4821
+ inputTokens,
4822
+ outputTokens,
4823
+ totalTokens: inputTokens + outputTokens,
4824
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4825
+ durationMs,
4826
+ success: true
4827
+ });
4828
+ if (json) {
4829
+ return this._parseJSON(fullText);
4830
+ }
4831
+ return fullText;
4832
+ } catch (error) {
4833
+ const durationMs = Date.now() - startTime;
4834
+ llmUsageLogger.log({
4835
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4836
+ requestId,
4837
+ provider: "openai",
4838
+ model: modelName,
4839
+ method: "stream",
4840
+ inputTokens: 0,
4841
+ outputTokens: 0,
4842
+ totalTokens: 0,
4843
+ costUSD: 0,
4844
+ durationMs,
4845
+ success: false,
4846
+ error: error instanceof Error ? error.message : String(error)
4847
+ });
4848
+ throw error;
3995
4849
  }
3996
- if (json) {
3997
- return this._parseJSON(fullText);
3998
- }
3999
- return fullText;
4000
4850
  }
4001
4851
  static async _openaiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
4002
4852
  const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
@@ -4142,11 +4992,9 @@ var LLM = class {
4142
4992
  closeChar = "]";
4143
4993
  }
4144
4994
  if (startIdx === -1) {
4145
- const preview = text.length > 500 ? text.substring(0, 500) + "..." : text;
4146
- throw new Error(`No JSON found in response. LLM returned plain text instead of JSON.
4147
-
4148
- Full response:
4149
- ${preview}`);
4995
+ const error = new Error(`No JSON found in response. LLM returned plain text instead of JSON.`);
4996
+ userPromptErrorLogger.logJsonParseError("LLM._parseJSON - No JSON structure found", text, error);
4997
+ throw error;
4150
4998
  }
4151
4999
  let depth = 0;
4152
5000
  let inString = false;
@@ -4173,24 +5021,17 @@ ${preview}`);
4173
5021
  if (endIdx !== -1) {
4174
5022
  jsonText = jsonText.substring(startIdx, endIdx + 1);
4175
5023
  } else {
4176
- const preview = text.length > 500 ? text.substring(0, 500) + "..." : text;
4177
- throw new Error(`Incomplete JSON - no matching closing ${closeChar} found.
4178
-
4179
- Full response:
4180
- ${preview}`);
5024
+ const error = new Error(`Incomplete JSON - no matching closing ${closeChar} found.`);
5025
+ userPromptErrorLogger.logJsonParseError("LLM._parseJSON - Incomplete JSON", text, error);
5026
+ throw error;
4181
5027
  }
4182
5028
  try {
4183
5029
  const repairedJson = (0, import_jsonrepair.jsonrepair)(jsonText);
4184
5030
  return JSON.parse(repairedJson);
4185
5031
  } catch (error) {
4186
- const preview = text.length > 500 ? text.substring(0, 500) + "..." : text;
4187
- throw new Error(`Failed to parse JSON: ${error instanceof Error ? error.message : String(error)}
4188
-
4189
- Extracted JSON:
4190
- ${jsonText.substring(0, 300)}...
4191
-
4192
- Full response:
4193
- ${preview}`);
5032
+ const parseError = error instanceof Error ? error : new Error(String(error));
5033
+ userPromptErrorLogger.logJsonParseError("LLM._parseJSON - JSON parse/repair failed", text, parseError);
5034
+ throw new Error(`Failed to parse JSON: ${parseError.message}`);
4194
5035
  }
4195
5036
  }
4196
5037
  };
@@ -4538,6 +5379,7 @@ var conversation_search_default = ConversationSearch;
4538
5379
  var BaseLLM = class {
4539
5380
  constructor(config) {
4540
5381
  this.model = config?.model || this.getDefaultModel();
5382
+ this.fastModel = config?.fastModel || this.getDefaultFastModel();
4541
5383
  this.defaultLimit = config?.defaultLimit || 50;
4542
5384
  this.apiKey = config?.apiKey;
4543
5385
  }
@@ -4847,7 +5689,7 @@ ${JSON.stringify(tool.requiredFields || [], null, 2)}`;
4847
5689
  user: prompts.user
4848
5690
  },
4849
5691
  {
4850
- model: this.model,
5692
+ model: this.fastModel,
4851
5693
  maxTokens: 1500,
4852
5694
  temperature: 0.2,
4853
5695
  apiKey: this.getApiKey(apiKey)
@@ -5279,6 +6121,7 @@ ${sql}
5279
6121
  const errorMsg = error instanceof Error ? error.message : String(error);
5280
6122
  logger.error(`[${this.getProviderName()}] Query execution failed (attempt ${attempts}/${MAX_QUERY_ATTEMPTS}): ${errorMsg}`);
5281
6123
  logCollector?.error(`Query failed (attempt ${attempts}/${MAX_QUERY_ATTEMPTS}): ${errorMsg}`);
6124
+ userPromptErrorLogger.logSqlError(sql, error instanceof Error ? error : new Error(errorMsg), Object.keys(params).length > 0 ? Object.values(params) : void 0);
5282
6125
  if (wrappedStreamCallback) {
5283
6126
  wrappedStreamCallback(`\u274C **Query execution failed:**
5284
6127
  \`\`\`
@@ -5369,6 +6212,7 @@ Please try rephrasing your request or contact support.
5369
6212
  const errorMsg = error instanceof Error ? error.message : String(error);
5370
6213
  logger.error(`[${this.getProviderName()}] External tool ${externalTool.name} failed (attempt ${attempts}/${MAX_TOOL_ATTEMPTS}): ${errorMsg}`);
5371
6214
  logCollector?.error(`\u2717 ${externalTool.name} failed: ${errorMsg}`);
6215
+ userPromptErrorLogger.logToolError(externalTool.name, toolInput, error instanceof Error ? error : new Error(errorMsg));
5372
6216
  if (wrappedStreamCallback) {
5373
6217
  wrappedStreamCallback(`\u274C **${externalTool.name} failed:**
5374
6218
  \`\`\`
@@ -5511,6 +6355,13 @@ ${errorMsg}
5511
6355
  const errorMsg = error instanceof Error ? error.message : String(error);
5512
6356
  logger.error(`[${this.getProviderName()}] Error generating text response: ${errorMsg}`);
5513
6357
  logCollector?.error(`Error generating text response: ${errorMsg}`);
6358
+ userPromptErrorLogger.logLlmError(
6359
+ this.getProviderName(),
6360
+ this.model,
6361
+ "generateTextResponse",
6362
+ error instanceof Error ? error : new Error(errorMsg),
6363
+ { userPrompt }
6364
+ );
5514
6365
  errors.push(errorMsg);
5515
6366
  return {
5516
6367
  success: false,
@@ -5713,6 +6564,11 @@ ${errorMsg}
5713
6564
  logger.error(`[${this.getProviderName()}] Error in handleUserRequest: ${errorMsg}`);
5714
6565
  logger.debug(`[${this.getProviderName()}] Error details:`, error);
5715
6566
  logCollector?.error(`Error processing request: ${errorMsg}`);
6567
+ userPromptErrorLogger.logError(
6568
+ "handleUserRequest",
6569
+ error instanceof Error ? error : new Error(errorMsg),
6570
+ { userPrompt }
6571
+ );
5716
6572
  const elapsedTime = Date.now() - startTime;
5717
6573
  logger.info(`[${this.getProviderName()}] Total time taken: ${elapsedTime}ms (${(elapsedTime / 1e3).toFixed(2)}s)`);
5718
6574
  logCollector?.info(`Total time taken: ${elapsedTime}ms (${(elapsedTime / 1e3).toFixed(2)}s)`);
@@ -5751,7 +6607,7 @@ ${errorMsg}
5751
6607
  user: prompts.user
5752
6608
  },
5753
6609
  {
5754
- model: this.model,
6610
+ model: this.fastModel,
5755
6611
  maxTokens: 1200,
5756
6612
  temperature: 0.7,
5757
6613
  apiKey: this.getApiKey(apiKey)
@@ -5788,6 +6644,9 @@ var GroqLLM = class extends BaseLLM {
5788
6644
  getDefaultModel() {
5789
6645
  return "groq/openai/gpt-oss-120b";
5790
6646
  }
6647
+ getDefaultFastModel() {
6648
+ return "groq/llama-3.1-8b-instant";
6649
+ }
5791
6650
  getDefaultApiKey() {
5792
6651
  return process.env.GROQ_API_KEY;
5793
6652
  }
@@ -5807,6 +6666,9 @@ var AnthropicLLM = class extends BaseLLM {
5807
6666
  getDefaultModel() {
5808
6667
  return "anthropic/claude-sonnet-4-5-20250929";
5809
6668
  }
6669
+ getDefaultFastModel() {
6670
+ return "anthropic/claude-haiku-4-5-20251001";
6671
+ }
5810
6672
  getDefaultApiKey() {
5811
6673
  return process.env.ANTHROPIC_API_KEY;
5812
6674
  }
@@ -5826,6 +6688,9 @@ var GeminiLLM = class extends BaseLLM {
5826
6688
  getDefaultModel() {
5827
6689
  return "gemini/gemini-2.5-flash";
5828
6690
  }
6691
+ getDefaultFastModel() {
6692
+ return "gemini/gemini-2.0-flash-exp";
6693
+ }
5829
6694
  getDefaultApiKey() {
5830
6695
  return process.env.GEMINI_API_KEY;
5831
6696
  }
@@ -5845,6 +6710,9 @@ var OpenAILLM = class extends BaseLLM {
5845
6710
  getDefaultModel() {
5846
6711
  return "openai/gpt-4.1";
5847
6712
  }
6713
+ getDefaultFastModel() {
6714
+ return "openai/gpt-4o-mini";
6715
+ }
5848
6716
  getDefaultApiKey() {
5849
6717
  return process.env.OPENAI_API_KEY;
5850
6718
  }
@@ -6351,6 +7219,9 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
6351
7219
  const prompt = payload.prompt;
6352
7220
  const SA_RUNTIME = payload.SA_RUNTIME;
6353
7221
  const wsId = userPromptRequest.from.id || "unknown";
7222
+ const promptContext = `User Prompt: ${prompt?.substring(0, 50)}${(prompt?.length || 0) > 50 ? "..." : ""}`;
7223
+ llmUsageLogger.resetLogFile(promptContext);
7224
+ userPromptErrorLogger.resetLogFile(promptContext);
6354
7225
  if (!SA_RUNTIME) {
6355
7226
  errors.push("SA_RUNTIME is required");
6356
7227
  }
@@ -6424,6 +7295,14 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
6424
7295
  const uiBlockId = existingUiBlockId;
6425
7296
  if (!userResponse.success) {
6426
7297
  logger.error(`User prompt request failed with errors: ${userResponse.errors.join(", ")}`);
7298
+ userPromptErrorLogger.logError("User Response Failed", userResponse.errors.join("\n"), {
7299
+ prompt,
7300
+ uiBlockId,
7301
+ threadId,
7302
+ responseData: userResponse.data
7303
+ });
7304
+ userPromptErrorLogger.writeSummary();
7305
+ llmUsageLogger.logSessionSummary(`FAILED: ${prompt?.substring(0, 30)}`);
6427
7306
  return {
6428
7307
  success: false,
6429
7308
  data: userResponse.data,
@@ -6499,6 +7378,7 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
6499
7378
  }
6500
7379
  }
6501
7380
  }
7381
+ llmUsageLogger.logSessionSummary(prompt?.substring(0, 50));
6502
7382
  return {
6503
7383
  success: userResponse.success,
6504
7384
  data: userResponse.data,
@@ -9400,8 +10280,8 @@ function sendDashCompResponse(id, res, sendMessage, clientId) {
9400
10280
  }
9401
10281
 
9402
10282
  // src/auth/user-manager.ts
9403
- var import_fs4 = __toESM(require("fs"));
9404
- var import_path3 = __toESM(require("path"));
10283
+ var import_fs6 = __toESM(require("fs"));
10284
+ var import_path5 = __toESM(require("path"));
9405
10285
  var import_os = __toESM(require("os"));
9406
10286
  init_logger();
9407
10287
  var UserManager = class {
@@ -9415,7 +10295,7 @@ var UserManager = class {
9415
10295
  this.hasChanged = false;
9416
10296
  this.syncInterval = null;
9417
10297
  this.isInitialized = false;
9418
- this.filePath = import_path3.default.join(import_os.default.homedir(), ".superatom", "projects", projectId, "users.json");
10298
+ this.filePath = import_path5.default.join(import_os.default.homedir(), ".superatom", "projects", projectId, "users.json");
9419
10299
  this.syncIntervalMs = syncIntervalMs;
9420
10300
  }
9421
10301
  /**
@@ -9440,20 +10320,20 @@ var UserManager = class {
9440
10320
  */
9441
10321
  async loadUsersFromFile() {
9442
10322
  try {
9443
- const dir = import_path3.default.dirname(this.filePath);
9444
- if (!import_fs4.default.existsSync(dir)) {
10323
+ const dir = import_path5.default.dirname(this.filePath);
10324
+ if (!import_fs6.default.existsSync(dir)) {
9445
10325
  logger.info(`Creating directory structure: ${dir}`);
9446
- import_fs4.default.mkdirSync(dir, { recursive: true });
10326
+ import_fs6.default.mkdirSync(dir, { recursive: true });
9447
10327
  }
9448
- if (!import_fs4.default.existsSync(this.filePath)) {
10328
+ if (!import_fs6.default.existsSync(this.filePath)) {
9449
10329
  logger.info(`Users file does not exist at ${this.filePath}, creating with empty users`);
9450
10330
  const initialData = { users: [] };
9451
- import_fs4.default.writeFileSync(this.filePath, JSON.stringify(initialData, null, 4));
10331
+ import_fs6.default.writeFileSync(this.filePath, JSON.stringify(initialData, null, 4));
9452
10332
  this.users = [];
9453
10333
  this.hasChanged = false;
9454
10334
  return;
9455
10335
  }
9456
- const fileContent = import_fs4.default.readFileSync(this.filePath, "utf-8");
10336
+ const fileContent = import_fs6.default.readFileSync(this.filePath, "utf-8");
9457
10337
  const rawData = JSON.parse(fileContent);
9458
10338
  const validatedData = UsersDataSchema.parse(rawData);
9459
10339
  this.users = validatedData.users;
@@ -9472,16 +10352,16 @@ var UserManager = class {
9472
10352
  return;
9473
10353
  }
9474
10354
  try {
9475
- const dir = import_path3.default.dirname(this.filePath);
9476
- if (!import_fs4.default.existsSync(dir)) {
9477
- import_fs4.default.mkdirSync(dir, { recursive: true });
10355
+ const dir = import_path5.default.dirname(this.filePath);
10356
+ if (!import_fs6.default.existsSync(dir)) {
10357
+ import_fs6.default.mkdirSync(dir, { recursive: true });
9478
10358
  }
9479
10359
  const usersToSave = this.users.map((user) => {
9480
10360
  const { wsIds, ...userWithoutWsIds } = user;
9481
10361
  return userWithoutWsIds;
9482
10362
  });
9483
10363
  const data = { users: usersToSave };
9484
- import_fs4.default.writeFileSync(this.filePath, JSON.stringify(data, null, 4));
10364
+ import_fs6.default.writeFileSync(this.filePath, JSON.stringify(data, null, 4));
9485
10365
  this.hasChanged = false;
9486
10366
  logger.debug(`Synced ${this.users.length} users to file (wsIds excluded)`);
9487
10367
  } catch (error) {
@@ -9699,8 +10579,8 @@ var UserManager = class {
9699
10579
  };
9700
10580
 
9701
10581
  // src/dashboards/dashboard-manager.ts
9702
- var import_fs5 = __toESM(require("fs"));
9703
- var import_path4 = __toESM(require("path"));
10582
+ var import_fs7 = __toESM(require("fs"));
10583
+ var import_path6 = __toESM(require("path"));
9704
10584
  var import_os2 = __toESM(require("os"));
9705
10585
  init_logger();
9706
10586
  var DashboardManager = class {
@@ -9710,7 +10590,7 @@ var DashboardManager = class {
9710
10590
  */
9711
10591
  constructor(projectId = "snowflake-dataset") {
9712
10592
  this.projectId = projectId;
9713
- this.dashboardsBasePath = import_path4.default.join(
10593
+ this.dashboardsBasePath = import_path6.default.join(
9714
10594
  import_os2.default.homedir(),
9715
10595
  ".superatom",
9716
10596
  "projects",
@@ -9724,7 +10604,7 @@ var DashboardManager = class {
9724
10604
  * @returns Full path to dashboard data.json file
9725
10605
  */
9726
10606
  getDashboardPath(dashboardId) {
9727
- return import_path4.default.join(this.dashboardsBasePath, dashboardId, "data.json");
10607
+ return import_path6.default.join(this.dashboardsBasePath, dashboardId, "data.json");
9728
10608
  }
9729
10609
  /**
9730
10610
  * Create a new dashboard
@@ -9734,13 +10614,13 @@ var DashboardManager = class {
9734
10614
  */
9735
10615
  createDashboard(dashboardId, dashboard) {
9736
10616
  const dashboardPath = this.getDashboardPath(dashboardId);
9737
- const dashboardDir = import_path4.default.dirname(dashboardPath);
9738
- if (import_fs5.default.existsSync(dashboardPath)) {
10617
+ const dashboardDir = import_path6.default.dirname(dashboardPath);
10618
+ if (import_fs7.default.existsSync(dashboardPath)) {
9739
10619
  throw new Error(`Dashboard '${dashboardId}' already exists`);
9740
10620
  }
9741
10621
  const validated = DSLRendererPropsSchema.parse(dashboard);
9742
- import_fs5.default.mkdirSync(dashboardDir, { recursive: true });
9743
- import_fs5.default.writeFileSync(dashboardPath, JSON.stringify(validated, null, 4));
10622
+ import_fs7.default.mkdirSync(dashboardDir, { recursive: true });
10623
+ import_fs7.default.writeFileSync(dashboardPath, JSON.stringify(validated, null, 4));
9744
10624
  logger.info(`Dashboard created: ${dashboardId}`);
9745
10625
  return validated;
9746
10626
  }
@@ -9751,12 +10631,12 @@ var DashboardManager = class {
9751
10631
  */
9752
10632
  getDashboard(dashboardId) {
9753
10633
  const dashboardPath = this.getDashboardPath(dashboardId);
9754
- if (!import_fs5.default.existsSync(dashboardPath)) {
10634
+ if (!import_fs7.default.existsSync(dashboardPath)) {
9755
10635
  logger.warn(`Dashboard not found: ${dashboardId}`);
9756
10636
  return null;
9757
10637
  }
9758
10638
  try {
9759
- const fileContent = import_fs5.default.readFileSync(dashboardPath, "utf-8");
10639
+ const fileContent = import_fs7.default.readFileSync(dashboardPath, "utf-8");
9760
10640
  const dashboard = JSON.parse(fileContent);
9761
10641
  const validated = DSLRendererPropsSchema.parse(dashboard);
9762
10642
  return validated;
@@ -9770,16 +10650,16 @@ var DashboardManager = class {
9770
10650
  * @returns Array of dashboard objects with their IDs
9771
10651
  */
9772
10652
  getAllDashboards() {
9773
- if (!import_fs5.default.existsSync(this.dashboardsBasePath)) {
9774
- import_fs5.default.mkdirSync(this.dashboardsBasePath, { recursive: true });
10653
+ if (!import_fs7.default.existsSync(this.dashboardsBasePath)) {
10654
+ import_fs7.default.mkdirSync(this.dashboardsBasePath, { recursive: true });
9775
10655
  return [];
9776
10656
  }
9777
10657
  const dashboards = [];
9778
10658
  try {
9779
- const dashboardDirs = import_fs5.default.readdirSync(this.dashboardsBasePath);
10659
+ const dashboardDirs = import_fs7.default.readdirSync(this.dashboardsBasePath);
9780
10660
  for (const dashboardId of dashboardDirs) {
9781
10661
  const dashboardPath = this.getDashboardPath(dashboardId);
9782
- if (import_fs5.default.existsSync(dashboardPath)) {
10662
+ if (import_fs7.default.existsSync(dashboardPath)) {
9783
10663
  const dashboard = this.getDashboard(dashboardId);
9784
10664
  if (dashboard) {
9785
10665
  dashboards.push({ dashboardId, dashboard });
@@ -9801,13 +10681,13 @@ var DashboardManager = class {
9801
10681
  */
9802
10682
  updateDashboard(dashboardId, dashboard) {
9803
10683
  const dashboardPath = this.getDashboardPath(dashboardId);
9804
- if (!import_fs5.default.existsSync(dashboardPath)) {
10684
+ if (!import_fs7.default.existsSync(dashboardPath)) {
9805
10685
  logger.warn(`Dashboard not found for update: ${dashboardId}`);
9806
10686
  return null;
9807
10687
  }
9808
10688
  try {
9809
10689
  const validated = DSLRendererPropsSchema.parse(dashboard);
9810
- import_fs5.default.writeFileSync(dashboardPath, JSON.stringify(validated, null, 4));
10690
+ import_fs7.default.writeFileSync(dashboardPath, JSON.stringify(validated, null, 4));
9811
10691
  logger.info(`Dashboard updated: ${dashboardId}`);
9812
10692
  return validated;
9813
10693
  } catch (error) {
@@ -9822,13 +10702,13 @@ var DashboardManager = class {
9822
10702
  */
9823
10703
  deleteDashboard(dashboardId) {
9824
10704
  const dashboardPath = this.getDashboardPath(dashboardId);
9825
- const dashboardDir = import_path4.default.dirname(dashboardPath);
9826
- if (!import_fs5.default.existsSync(dashboardPath)) {
10705
+ const dashboardDir = import_path6.default.dirname(dashboardPath);
10706
+ if (!import_fs7.default.existsSync(dashboardPath)) {
9827
10707
  logger.warn(`Dashboard not found for deletion: ${dashboardId}`);
9828
10708
  return false;
9829
10709
  }
9830
10710
  try {
9831
- import_fs5.default.rmSync(dashboardDir, { recursive: true, force: true });
10711
+ import_fs7.default.rmSync(dashboardDir, { recursive: true, force: true });
9832
10712
  logger.info(`Dashboard deleted: ${dashboardId}`);
9833
10713
  return true;
9834
10714
  } catch (error) {
@@ -9843,21 +10723,21 @@ var DashboardManager = class {
9843
10723
  */
9844
10724
  dashboardExists(dashboardId) {
9845
10725
  const dashboardPath = this.getDashboardPath(dashboardId);
9846
- return import_fs5.default.existsSync(dashboardPath);
10726
+ return import_fs7.default.existsSync(dashboardPath);
9847
10727
  }
9848
10728
  /**
9849
10729
  * Get dashboard count
9850
10730
  * @returns Number of dashboards
9851
10731
  */
9852
10732
  getDashboardCount() {
9853
- if (!import_fs5.default.existsSync(this.dashboardsBasePath)) {
10733
+ if (!import_fs7.default.existsSync(this.dashboardsBasePath)) {
9854
10734
  return 0;
9855
10735
  }
9856
10736
  try {
9857
- const dashboardDirs = import_fs5.default.readdirSync(this.dashboardsBasePath);
10737
+ const dashboardDirs = import_fs7.default.readdirSync(this.dashboardsBasePath);
9858
10738
  return dashboardDirs.filter((dir) => {
9859
10739
  const dashboardPath = this.getDashboardPath(dir);
9860
- return import_fs5.default.existsSync(dashboardPath);
10740
+ return import_fs7.default.existsSync(dashboardPath);
9861
10741
  }).length;
9862
10742
  } catch (error) {
9863
10743
  logger.error("Failed to get dashboard count:", error);
@@ -9867,8 +10747,8 @@ var DashboardManager = class {
9867
10747
  };
9868
10748
 
9869
10749
  // src/reports/report-manager.ts
9870
- var import_fs6 = __toESM(require("fs"));
9871
- var import_path5 = __toESM(require("path"));
10750
+ var import_fs8 = __toESM(require("fs"));
10751
+ var import_path7 = __toESM(require("path"));
9872
10752
  var import_os3 = __toESM(require("os"));
9873
10753
  init_logger();
9874
10754
  var ReportManager = class {
@@ -9878,7 +10758,7 @@ var ReportManager = class {
9878
10758
  */
9879
10759
  constructor(projectId = "snowflake-dataset") {
9880
10760
  this.projectId = projectId;
9881
- this.reportsBasePath = import_path5.default.join(
10761
+ this.reportsBasePath = import_path7.default.join(
9882
10762
  import_os3.default.homedir(),
9883
10763
  ".superatom",
9884
10764
  "projects",
@@ -9892,7 +10772,7 @@ var ReportManager = class {
9892
10772
  * @returns Full path to report data.json file
9893
10773
  */
9894
10774
  getReportPath(reportId) {
9895
- return import_path5.default.join(this.reportsBasePath, reportId, "data.json");
10775
+ return import_path7.default.join(this.reportsBasePath, reportId, "data.json");
9896
10776
  }
9897
10777
  /**
9898
10778
  * Create a new report
@@ -9902,13 +10782,13 @@ var ReportManager = class {
9902
10782
  */
9903
10783
  createReport(reportId, report) {
9904
10784
  const reportPath = this.getReportPath(reportId);
9905
- const reportDir = import_path5.default.dirname(reportPath);
9906
- if (import_fs6.default.existsSync(reportPath)) {
10785
+ const reportDir = import_path7.default.dirname(reportPath);
10786
+ if (import_fs8.default.existsSync(reportPath)) {
9907
10787
  throw new Error(`Report '${reportId}' already exists`);
9908
10788
  }
9909
10789
  const validated = DSLRendererPropsSchema2.parse(report);
9910
- import_fs6.default.mkdirSync(reportDir, { recursive: true });
9911
- import_fs6.default.writeFileSync(reportPath, JSON.stringify(validated, null, 4));
10790
+ import_fs8.default.mkdirSync(reportDir, { recursive: true });
10791
+ import_fs8.default.writeFileSync(reportPath, JSON.stringify(validated, null, 4));
9912
10792
  logger.info(`Report created: ${reportId}`);
9913
10793
  return validated;
9914
10794
  }
@@ -9919,12 +10799,12 @@ var ReportManager = class {
9919
10799
  */
9920
10800
  getReport(reportId) {
9921
10801
  const reportPath = this.getReportPath(reportId);
9922
- if (!import_fs6.default.existsSync(reportPath)) {
10802
+ if (!import_fs8.default.existsSync(reportPath)) {
9923
10803
  logger.warn(`Report not found: ${reportId}`);
9924
10804
  return null;
9925
10805
  }
9926
10806
  try {
9927
- const fileContent = import_fs6.default.readFileSync(reportPath, "utf-8");
10807
+ const fileContent = import_fs8.default.readFileSync(reportPath, "utf-8");
9928
10808
  const report = JSON.parse(fileContent);
9929
10809
  const validated = DSLRendererPropsSchema2.parse(report);
9930
10810
  return validated;
@@ -9938,16 +10818,16 @@ var ReportManager = class {
9938
10818
  * @returns Array of report objects with their IDs
9939
10819
  */
9940
10820
  getAllReports() {
9941
- if (!import_fs6.default.existsSync(this.reportsBasePath)) {
9942
- import_fs6.default.mkdirSync(this.reportsBasePath, { recursive: true });
10821
+ if (!import_fs8.default.existsSync(this.reportsBasePath)) {
10822
+ import_fs8.default.mkdirSync(this.reportsBasePath, { recursive: true });
9943
10823
  return [];
9944
10824
  }
9945
10825
  const reports = [];
9946
10826
  try {
9947
- const reportDirs = import_fs6.default.readdirSync(this.reportsBasePath);
10827
+ const reportDirs = import_fs8.default.readdirSync(this.reportsBasePath);
9948
10828
  for (const reportId of reportDirs) {
9949
10829
  const reportPath = this.getReportPath(reportId);
9950
- if (import_fs6.default.existsSync(reportPath)) {
10830
+ if (import_fs8.default.existsSync(reportPath)) {
9951
10831
  const report = this.getReport(reportId);
9952
10832
  if (report) {
9953
10833
  reports.push({ reportId, report });
@@ -9969,13 +10849,13 @@ var ReportManager = class {
9969
10849
  */
9970
10850
  updateReport(reportId, report) {
9971
10851
  const reportPath = this.getReportPath(reportId);
9972
- if (!import_fs6.default.existsSync(reportPath)) {
10852
+ if (!import_fs8.default.existsSync(reportPath)) {
9973
10853
  logger.warn(`Report not found for update: ${reportId}`);
9974
10854
  return null;
9975
10855
  }
9976
10856
  try {
9977
10857
  const validated = DSLRendererPropsSchema2.parse(report);
9978
- import_fs6.default.writeFileSync(reportPath, JSON.stringify(validated, null, 4));
10858
+ import_fs8.default.writeFileSync(reportPath, JSON.stringify(validated, null, 4));
9979
10859
  logger.info(`Report updated: ${reportId}`);
9980
10860
  return validated;
9981
10861
  } catch (error) {
@@ -9990,13 +10870,13 @@ var ReportManager = class {
9990
10870
  */
9991
10871
  deleteReport(reportId) {
9992
10872
  const reportPath = this.getReportPath(reportId);
9993
- const reportDir = import_path5.default.dirname(reportPath);
9994
- if (!import_fs6.default.existsSync(reportPath)) {
10873
+ const reportDir = import_path7.default.dirname(reportPath);
10874
+ if (!import_fs8.default.existsSync(reportPath)) {
9995
10875
  logger.warn(`Report not found for deletion: ${reportId}`);
9996
10876
  return false;
9997
10877
  }
9998
10878
  try {
9999
- import_fs6.default.rmSync(reportDir, { recursive: true, force: true });
10879
+ import_fs8.default.rmSync(reportDir, { recursive: true, force: true });
10000
10880
  logger.info(`Report deleted: ${reportId}`);
10001
10881
  return true;
10002
10882
  } catch (error) {
@@ -10011,21 +10891,21 @@ var ReportManager = class {
10011
10891
  */
10012
10892
  reportExists(reportId) {
10013
10893
  const reportPath = this.getReportPath(reportId);
10014
- return import_fs6.default.existsSync(reportPath);
10894
+ return import_fs8.default.existsSync(reportPath);
10015
10895
  }
10016
10896
  /**
10017
10897
  * Get report count
10018
10898
  * @returns Number of reports
10019
10899
  */
10020
10900
  getReportCount() {
10021
- if (!import_fs6.default.existsSync(this.reportsBasePath)) {
10901
+ if (!import_fs8.default.existsSync(this.reportsBasePath)) {
10022
10902
  return 0;
10023
10903
  }
10024
10904
  try {
10025
- const reportDirs = import_fs6.default.readdirSync(this.reportsBasePath);
10905
+ const reportDirs = import_fs8.default.readdirSync(this.reportsBasePath);
10026
10906
  return reportDirs.filter((dir) => {
10027
10907
  const reportPath = this.getReportPath(dir);
10028
- return import_fs6.default.existsSync(reportPath);
10908
+ return import_fs8.default.existsSync(reportPath);
10029
10909
  }).length;
10030
10910
  } catch (error) {
10031
10911
  logger.error("Failed to get report count:", error);
@@ -10649,8 +11529,10 @@ var SuperatomSDK = class {
10649
11529
  UILogCollector,
10650
11530
  UserManager,
10651
11531
  hybridRerank,
11532
+ llmUsageLogger,
10652
11533
  logger,
10653
11534
  rerankChromaResults,
10654
- rerankConversationResults
11535
+ rerankConversationResults,
11536
+ userPromptErrorLogger
10655
11537
  });
10656
11538
  //# sourceMappingURL=index.js.map