@superatomai/sdk-node 0.0.72 → 0.0.75

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -3310,24 +3310,20 @@ If adaptation is not possible or would fundamentally change the component:
3310
3310
  "dash-comp-picker": {
3311
3311
  system: `You are a component selection expert that picks the best dashboard component and generates complete props based on user requests.
3312
3312
 
3313
+ ## CRITICAL - READ FIRST
3314
+
3315
+ 1. Your ENTIRE response must be ONLY a raw JSON object - start with { end with }
3316
+ 2. DO NOT explain or answer the user's question in natural language
3317
+ 3. DO NOT use markdown code blocks (no \`\`\`)
3318
+ 4. DO NOT add any text before or after the JSON
3319
+ 5. After executing tools (if needed), return JSON with component selection - NOT a text summary of results
3320
+
3313
3321
  ## Your Task
3314
3322
 
3315
3323
  Analyze the user's request and:
3316
3324
  1. **Select the most appropriate component** from the available components list
3317
- 2. **Determine the data source**: Database query OR External tool
3318
- 3. **Generate complete props** for the selected component
3319
-
3320
- ## Available External Tools
3321
-
3322
- The following external tools are available:
3323
-
3324
- {{AVAILABLE_TOOLS}}
3325
-
3326
- When a tool is needed to complete the user's request:
3327
- 1. **Analyze the request** to determine which tool is needed
3328
- 2. **Extract parameters** from the user's question
3329
- 3. **Execute the tool** by calling it with the extracted parameters
3330
- 4. **Use the results** to configure the component (field names for axes, columns, etc.)
3325
+ 2. **Determine the data source**: Database query OR External tool (ERP)
3326
+ 3. **Generate complete props** for the selected component including the data retrieval/modification method
3331
3327
 
3332
3328
  ## Component Selection Rules
3333
3329
 
@@ -3357,7 +3353,7 @@ The user prompt may contain an **existing component** to update. Detect this by
3357
3353
 
3358
3354
  ### Use DATABASE when:
3359
3355
  - User asks about data that exists in the database schema
3360
- - Questions about internal business data
3356
+ - Questions about internal business data
3361
3357
  - CRUD operations on database tables
3362
3358
 
3363
3359
  ### Use EXTERNAL TOOL when:
@@ -3370,6 +3366,12 @@ The user prompt may contain an **existing component** to update. Detect this by
3370
3366
 
3371
3367
  **CRITICAL**: Look at each component's "Props Structure" in the available components list. Generate ALL props that the component expects.
3372
3368
 
3369
+ **CRITICAL: Each component uses EXACTLY ONE data source - never both!**
3370
+ - If using \`query\`, set \`externalTool: null\`
3371
+ - If using \`externalTool\`, set \`query: null\`
3372
+ - NEVER copy placeholder/description text from component metadata as actual values
3373
+ - \`externalTool.parameters\` MUST be an object, never a string
3374
+
3373
3375
  ### For Data Viewing Components (charts, tables, KPIs):
3374
3376
 
3375
3377
  **Option A: Database Query** (when data is in database)
@@ -3378,21 +3380,19 @@ The user prompt may contain an **existing component** to update. Detect this by
3378
3380
  "query": {
3379
3381
  "sql": "SELECT column1, column2 FROM table WHERE condition = $param LIMIT 32",
3380
3382
  "params": { "param": "value" }
3381
- }
3383
+ },
3384
+ "externalTool": null
3382
3385
  }
3383
3386
  \`\`\`
3384
3387
 
3385
3388
  **Option B: External Tool** (when data is from ERP/external system)
3386
3389
  \`\`\`json
3387
3390
  {
3391
+ "query": null,
3388
3392
  "externalTool": {
3389
3393
  "toolId": "tool_id_from_list",
3390
3394
  "toolName": "Tool Display Name",
3391
- "action": "get",
3392
- "params": {
3393
- "param1": "value1",
3394
- "param2": "value2"
3395
- }
3395
+ "parameters": { "param1": "value1", "param2": "value2" }
3396
3396
  }
3397
3397
  }
3398
3398
  \`\`\`
@@ -3406,6 +3406,7 @@ The user prompt may contain an **existing component** to update. Detect this by
3406
3406
  "sql": "INSERT INTO table (col1, col2) VALUES ($col1, $col2)",
3407
3407
  "params": {}
3408
3408
  },
3409
+ "externalTool": null,
3409
3410
  "fields": [
3410
3411
  { "name": "col1", "type": "text", "required": true },
3411
3412
  { "name": "col2", "type": "number", "required": false }
@@ -3413,16 +3414,38 @@ The user prompt may contain an **existing component** to update. Detect this by
3413
3414
  }
3414
3415
  \`\`\`
3415
3416
 
3417
+ For UPDATE:
3418
+ \`\`\`json
3419
+ {
3420
+ "query": {
3421
+ "sql": "UPDATE table SET col1 = $col1, col2 = $col2 WHERE id = $id",
3422
+ "params": { "id": "record_id" }
3423
+ },
3424
+ "externalTool": null
3425
+ }
3426
+ \`\`\`
3427
+
3428
+ For DELETE:
3429
+ \`\`\`json
3430
+ {
3431
+ "query": {
3432
+ "sql": "DELETE FROM table WHERE id = $id",
3433
+ "params": { "id": "record_id" }
3434
+ },
3435
+ "externalTool": null,
3436
+ "submitButtonText": "Confirm Delete",
3437
+ "submitButtonColor": "danger"
3438
+ }
3439
+ \`\`\`
3440
+
3416
3441
  **Option B: External Tool Mutation**
3417
3442
  \`\`\`json
3418
3443
  {
3444
+ "query": null,
3419
3445
  "externalTool": {
3420
3446
  "toolId": "tool_id_from_list",
3421
3447
  "toolName": "Tool Display Name",
3422
- "action": "create|update|delete",
3423
- "params": {
3424
- "param1": "value_or_placeholder"
3425
- }
3448
+ "parameters": { "param1": "value_or_placeholder" }
3426
3449
  },
3427
3450
  "fields": [
3428
3451
  { "name": "param1", "type": "text", "required": true }
@@ -3437,6 +3460,7 @@ The user prompt may contain an **existing component** to update. Detect this by
3437
3460
 
3438
3461
  You MUST respond with ONLY a valid JSON object (no markdown, no code blocks):
3439
3462
 
3463
+ \`\`\`json
3440
3464
  {
3441
3465
  "componentId": "id_from_available_list_or_existing_component_id",
3442
3466
  "componentName": "name_of_component",
@@ -3451,6 +3475,7 @@ You MUST respond with ONLY a valid JSON object (no markdown, no code blocks):
3451
3475
  // Include all other required props (title, description, config, fields, etc.)
3452
3476
  }
3453
3477
  }
3478
+ \`\`\`
3454
3479
 
3455
3480
  **CRITICAL:**
3456
3481
  - Return ONLY valid JSON (no markdown code blocks, no text before/after)
@@ -3473,7 +3498,8 @@ You MUST respond with ONLY a valid JSON object (no markdown, no code blocks):
3473
3498
 
3474
3499
  ---
3475
3500
 
3476
- ## CONTEXT`,
3501
+ ## CONTEXT
3502
+ `,
3477
3503
  user: `{{USER_PROMPT}}`
3478
3504
  },
3479
3505
  "dash-filter-picker": {
@@ -3619,9 +3645,7 @@ var PromptLoader = class {
3619
3645
  this.databaseRulesCache = /* @__PURE__ */ new Map();
3620
3646
  this.isInitialized = false;
3621
3647
  this.databaseType = "postgresql";
3622
- logger.debug("Initializing PromptLoader...");
3623
3648
  this.promptsDir = config?.promptsDir || path2.join(process.cwd(), ".prompts");
3624
- logger.debug(`Prompts directory set to: ${this.promptsDir}`);
3625
3649
  }
3626
3650
  /**
3627
3651
  * Load a prompt template from file system OR fallback to hardcoded prompts
@@ -3635,7 +3659,6 @@ var PromptLoader = class {
3635
3659
  if (fs3.existsSync(systemPath) && fs3.existsSync(userPath)) {
3636
3660
  const system = fs3.readFileSync(systemPath, "utf-8");
3637
3661
  const user = fs3.readFileSync(userPath, "utf-8");
3638
- logger.info(`\u2713 Loaded prompt '${promptName}' from file system: ${this.promptsDir}`);
3639
3662
  return { system, user };
3640
3663
  }
3641
3664
  } catch (error) {
@@ -3643,7 +3666,6 @@ var PromptLoader = class {
3643
3666
  }
3644
3667
  const hardcodedPrompt = PROMPTS[promptName];
3645
3668
  if (hardcodedPrompt) {
3646
- logger.info(`\u2713 Loaded prompt '${promptName}' from hardcoded fallback`);
3647
3669
  return hardcodedPrompt;
3648
3670
  }
3649
3671
  throw new Error(`Prompt template '${promptName}' not found in either ${this.promptsDir} or hardcoded prompts. Available prompts: ${Object.keys(PROMPTS).join(", ")}`);
@@ -3657,7 +3679,6 @@ var PromptLoader = class {
3657
3679
  logger.debug("PromptLoader already initialized, skipping...");
3658
3680
  return;
3659
3681
  }
3660
- logger.info("Loading prompts into memory...");
3661
3682
  const promptTypes = Object.keys(PROMPTS);
3662
3683
  for (const promptName of promptTypes) {
3663
3684
  try {
@@ -3669,7 +3690,6 @@ var PromptLoader = class {
3669
3690
  }
3670
3691
  }
3671
3692
  this.isInitialized = true;
3672
- logger.info(`Successfully loaded ${this.promptCache.size} prompt templates into memory`);
3673
3693
  }
3674
3694
  /**
3675
3695
  * Replace variables in a template string using {{VARIABLE_NAME}} pattern
@@ -3709,7 +3729,6 @@ var PromptLoader = class {
3709
3729
  const processedContext = this.replaceVariables(contextMarker + contextPart, variables);
3710
3730
  const staticLength = processedStatic.length;
3711
3731
  const contextLength = processedContext.length;
3712
- logger.debug(`\u2713 Prompt caching enabled for '${promptName}' (cached: ${staticLength} chars, dynamic: ${contextLength} chars)`);
3713
3732
  return {
3714
3733
  system: [
3715
3734
  {
@@ -3746,7 +3765,6 @@ var PromptLoader = class {
3746
3765
  this.promptsDir = dir;
3747
3766
  this.isInitialized = false;
3748
3767
  this.promptCache.clear();
3749
- logger.debug(`Prompts directory changed to: ${dir}`);
3750
3768
  }
3751
3769
  /**
3752
3770
  * Get current prompts directory
@@ -3774,7 +3792,6 @@ var PromptLoader = class {
3774
3792
  setDatabaseType(type) {
3775
3793
  this.databaseType = type;
3776
3794
  this.databaseRulesCache.clear();
3777
- logger.debug(`Database type set to: ${type}`);
3778
3795
  }
3779
3796
  /**
3780
3797
  * Get current database type
@@ -3790,7 +3807,6 @@ var PromptLoader = class {
3790
3807
  */
3791
3808
  async loadDatabaseRules() {
3792
3809
  if (this.databaseRulesCache.has(this.databaseType)) {
3793
- logger.debug(`\u2713 Database rules for '${this.databaseType}' loaded from cache`);
3794
3810
  return this.databaseRulesCache.get(this.databaseType);
3795
3811
  }
3796
3812
  const rulesPath = path2.join(this.promptsDir, "database-rules", `${this.databaseType}.md`);
@@ -3798,7 +3814,6 @@ var PromptLoader = class {
3798
3814
  if (fs3.existsSync(rulesPath)) {
3799
3815
  const rules = fs3.readFileSync(rulesPath, "utf-8");
3800
3816
  this.databaseRulesCache.set(this.databaseType, rules);
3801
- logger.info(`\u2713 Loaded database rules for '${this.databaseType}' from ${rulesPath}`);
3802
3817
  return rules;
3803
3818
  }
3804
3819
  } catch (error) {
@@ -4074,7 +4089,6 @@ var Schema = class {
4074
4089
  * @returns Parsed schema object or null if error occurs
4075
4090
  */
4076
4091
  getDatabaseSchema() {
4077
- logger.info(`SCHEMA_FILE_PATH: ${this.schemaFilePath}`);
4078
4092
  try {
4079
4093
  const dir = path3.dirname(this.schemaFilePath);
4080
4094
  if (!fs4.existsSync(dir)) {
@@ -4343,14 +4357,6 @@ Format: [TIMESTAMP] [REQUEST_ID] [PROVIDER/MODEL] [METHOD]
4343
4357
  Cost: $${entry.costUSD.toFixed(6)} | Time: ${entry.durationMs}ms${toolInfo}${errorInfo}${cacheStatus}
4344
4358
  `;
4345
4359
  this.logStream?.write(logLine);
4346
- if (entry.cacheReadTokens && entry.cacheReadTokens > 0) {
4347
- console.log(`[LLM] \u26A1 CACHE HIT: ${entry.cacheReadTokens.toLocaleString()} tokens read from cache (${entry.method})`);
4348
- } else if (entry.cacheWriteTokens && entry.cacheWriteTokens > 0) {
4349
- console.log(`[LLM] \u{1F4DD} CACHE WRITE: ${entry.cacheWriteTokens.toLocaleString()} tokens cached for future requests (${entry.method})`);
4350
- }
4351
- if (process.env.SUPERATOM_LOG_LEVEL === "verbose") {
4352
- console.log("\n[LLM-Usage]", logLine);
4353
- }
4354
4360
  }
4355
4361
  /**
4356
4362
  * Log session summary (call at end of request)
@@ -4383,11 +4389,6 @@ Avg Time/Call: ${Math.round(this.sessionStats.totalDurationMs / this.sessionStat
4383
4389
 
4384
4390
  `;
4385
4391
  this.logStream?.write(summary);
4386
- console.log("\n[LLM-Usage] Session Summary:");
4387
- console.log(` Calls: ${this.sessionStats.totalCalls} | Tokens: ${(this.sessionStats.totalInputTokens + this.sessionStats.totalOutputTokens).toLocaleString()} | Cost: $${this.sessionStats.totalCostUSD.toFixed(4)} | Time: ${(this.sessionStats.totalDurationMs / 1e3).toFixed(2)}s`);
4388
- if (hasCaching) {
4389
- console.log(` Cache: ${this.sessionStats.totalCacheReadTokens.toLocaleString()} read, ${this.sessionStats.totalCacheWriteTokens.toLocaleString()} written | Savings: ~$${cacheReadSavings.toFixed(4)}`);
4390
- }
4391
4392
  }
4392
4393
  /**
4393
4394
  * Reset session stats (call at start of new user request)
@@ -4428,7 +4429,6 @@ Format: [TIMESTAMP] [REQUEST_ID] [PROVIDER/MODEL] [METHOD]
4428
4429
  `;
4429
4430
  this.logStream.write(header);
4430
4431
  this.resetSession();
4431
- console.log(`[LLM-Usage] Log file reset for new request: ${this.logPath}`);
4432
4432
  } catch (error) {
4433
4433
  console.error("[LLM-Usage-Logger] Failed to reset log file:", error);
4434
4434
  }
@@ -5343,14 +5343,97 @@ var LLM = class {
5343
5343
  const genAI = new GoogleGenerativeAI(apiKey);
5344
5344
  const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
5345
5345
  try {
5346
+ if (json && options.partial) {
5347
+ const model2 = genAI.getGenerativeModel({
5348
+ model: modelName,
5349
+ systemInstruction: systemPrompt,
5350
+ generationConfig: {
5351
+ maxOutputTokens: options.maxTokens || 1e3,
5352
+ temperature: options.temperature,
5353
+ topP: options.topP,
5354
+ responseMimeType: "application/json"
5355
+ }
5356
+ });
5357
+ const result2 = await model2.generateContentStream(messages.user);
5358
+ let fullText2 = "";
5359
+ let inputTokens2 = 0;
5360
+ let outputTokens2 = 0;
5361
+ for await (const chunk of result2.stream) {
5362
+ try {
5363
+ const text = chunk.text();
5364
+ if (text) {
5365
+ fullText2 += text;
5366
+ options.partial(text);
5367
+ }
5368
+ } catch (chunkError) {
5369
+ }
5370
+ if (chunk.usageMetadata) {
5371
+ inputTokens2 = chunk.usageMetadata.promptTokenCount || 0;
5372
+ outputTokens2 = chunk.usageMetadata.candidatesTokenCount || 0;
5373
+ }
5374
+ }
5375
+ const durationMs2 = Date.now() - startTime;
5376
+ if (inputTokens2 === 0) {
5377
+ inputTokens2 = Math.ceil((systemPrompt.length + messages.user.length) / 4);
5378
+ }
5379
+ if (outputTokens2 === 0) {
5380
+ outputTokens2 = Math.ceil(fullText2.length / 4);
5381
+ }
5382
+ llmUsageLogger.log({
5383
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
5384
+ requestId,
5385
+ provider: "gemini",
5386
+ model: modelName,
5387
+ method: "stream-json-partial",
5388
+ inputTokens: inputTokens2,
5389
+ outputTokens: outputTokens2,
5390
+ totalTokens: inputTokens2 + outputTokens2,
5391
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens2, outputTokens2),
5392
+ durationMs: durationMs2,
5393
+ success: true
5394
+ });
5395
+ return this._parseJSON(fullText2);
5396
+ }
5397
+ if (json) {
5398
+ const model2 = genAI.getGenerativeModel({
5399
+ model: modelName,
5400
+ systemInstruction: systemPrompt,
5401
+ generationConfig: {
5402
+ maxOutputTokens: options.maxTokens || 1e3,
5403
+ temperature: options.temperature,
5404
+ topP: options.topP,
5405
+ responseMimeType: "application/json"
5406
+ }
5407
+ });
5408
+ const result2 = await model2.generateContent(messages.user);
5409
+ const response = result2.response;
5410
+ const fullText2 = response.text();
5411
+ const durationMs2 = Date.now() - startTime;
5412
+ const usage = response.usageMetadata;
5413
+ const inputTokens2 = usage?.promptTokenCount || Math.ceil((systemPrompt.length + messages.user.length) / 4);
5414
+ const outputTokens2 = usage?.candidatesTokenCount || Math.ceil(fullText2.length / 4);
5415
+ llmUsageLogger.log({
5416
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
5417
+ requestId,
5418
+ provider: "gemini",
5419
+ model: modelName,
5420
+ method: "stream-json",
5421
+ inputTokens: inputTokens2,
5422
+ outputTokens: outputTokens2,
5423
+ totalTokens: inputTokens2 + outputTokens2,
5424
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens2, outputTokens2),
5425
+ durationMs: durationMs2,
5426
+ success: true
5427
+ });
5428
+ return this._parseJSON(fullText2);
5429
+ }
5346
5430
  const model = genAI.getGenerativeModel({
5347
5431
  model: modelName,
5348
5432
  systemInstruction: systemPrompt,
5349
5433
  generationConfig: {
5350
5434
  maxOutputTokens: options.maxTokens || 1e3,
5351
5435
  temperature: options.temperature,
5352
- topP: options.topP,
5353
- responseMimeType: json ? "application/json" : void 0
5436
+ topP: options.topP
5354
5437
  }
5355
5438
  });
5356
5439
  const result = await model.generateContentStream(messages.user);
@@ -5358,12 +5441,15 @@ var LLM = class {
5358
5441
  let inputTokens = 0;
5359
5442
  let outputTokens = 0;
5360
5443
  for await (const chunk of result.stream) {
5361
- const text = chunk.text();
5362
- if (text) {
5363
- fullText += text;
5364
- if (options.partial) {
5365
- options.partial(text);
5444
+ try {
5445
+ const text = chunk.text();
5446
+ if (text) {
5447
+ fullText += text;
5448
+ if (options.partial) {
5449
+ options.partial(text);
5450
+ }
5366
5451
  }
5452
+ } catch (chunkError) {
5367
5453
  }
5368
5454
  if (chunk.usageMetadata) {
5369
5455
  inputTokens = chunk.usageMetadata.promptTokenCount || 0;
@@ -5390,9 +5476,6 @@ var LLM = class {
5390
5476
  durationMs,
5391
5477
  success: true
5392
5478
  });
5393
- if (json) {
5394
- return this._parseJSON(fullText);
5395
- }
5396
5479
  return fullText;
5397
5480
  } catch (error) {
5398
5481
  const durationMs = Date.now() - startTime;
@@ -5413,6 +5496,26 @@ var LLM = class {
5413
5496
  throw error;
5414
5497
  }
5415
5498
  }
5499
+ /**
5500
+ * Recursively strip unsupported JSON Schema properties for Gemini
5501
+ * Gemini doesn't support: additionalProperties, $schema, etc.
5502
+ */
5503
+ static _cleanSchemaForGemini(obj) {
5504
+ if (obj === null || typeof obj !== "object") {
5505
+ return obj;
5506
+ }
5507
+ if (Array.isArray(obj)) {
5508
+ return obj.map((item) => this._cleanSchemaForGemini(item));
5509
+ }
5510
+ const cleaned = {};
5511
+ for (const [key, value] of Object.entries(obj)) {
5512
+ if (key === "additionalProperties" || key === "$schema") {
5513
+ continue;
5514
+ }
5515
+ cleaned[key] = this._cleanSchemaForGemini(value);
5516
+ }
5517
+ return cleaned;
5518
+ }
5416
5519
  static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
5417
5520
  const methodStartTime = Date.now();
5418
5521
  const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
@@ -5423,7 +5526,7 @@ var LLM = class {
5423
5526
  description: tool.description,
5424
5527
  parameters: {
5425
5528
  type: SchemaType.OBJECT,
5426
- properties: tool.input_schema.properties,
5529
+ properties: this._cleanSchemaForGemini(tool.input_schema.properties),
5427
5530
  required: tool.input_schema.required || []
5428
5531
  }
5429
5532
  }));
@@ -5902,21 +6005,20 @@ var getKnowledgeBase = async ({
5902
6005
  }) => {
5903
6006
  try {
5904
6007
  if (!collections || !collections["knowledge-base"] || !collections["knowledge-base"]["query"]) {
5905
- logger.info("[KnowledgeBase] knowledge-base.query collection not registered, skipping");
6008
+ logger.warn("[KnowledgeBase] knowledge-base.query collection not registered, skipping");
5906
6009
  return "";
5907
6010
  }
5908
- logger.info(`[KnowledgeBase] Querying knowledge base for: "${prompt.substring(0, 50)}..."`);
5909
6011
  const result = await collections["knowledge-base"]["query"]({
5910
6012
  prompt,
5911
6013
  topK
5912
6014
  });
5913
6015
  if (!result || !result.content) {
5914
- logger.info("[KnowledgeBase] No knowledge base results returned");
6016
+ logger.warn("[KnowledgeBase] No knowledge base results returned");
5915
6017
  return "";
5916
6018
  }
5917
6019
  logger.info(`[KnowledgeBase] Retrieved knowledge base context (${result.content.length} chars)`);
5918
6020
  if (result.metadata?.sources && result.metadata.sources.length > 0) {
5919
- logger.debug(`[KnowledgeBase] Sources: ${result.metadata.sources.map((s) => s.title).join(", ")}`);
6021
+ logger.warn(`[KnowledgeBase] Sources: ${result.metadata.sources.map((s) => s.title).join(", ")}`);
5920
6022
  }
5921
6023
  return result.content;
5922
6024
  } catch (error) {
@@ -5931,13 +6033,12 @@ var getGlobalKnowledgeBase = async ({
5931
6033
  }) => {
5932
6034
  try {
5933
6035
  if (!collections || !collections["knowledge-base"] || !collections["knowledge-base"]["getGlobal"]) {
5934
- logger.info("[KnowledgeBase] knowledge-base.getGlobal collection not registered, skipping");
6036
+ logger.warn("[KnowledgeBase] knowledge-base.getGlobal collection not registered, skipping");
5935
6037
  return "";
5936
6038
  }
5937
- logger.info("[KnowledgeBase] Fetching global knowledge base nodes...");
5938
6039
  const result = await collections["knowledge-base"]["getGlobal"]({ limit });
5939
6040
  if (!result || !result.content) {
5940
- logger.info("[KnowledgeBase] No global knowledge base nodes found");
6041
+ logger.warn("[KnowledgeBase] No global knowledge base nodes found");
5941
6042
  return "";
5942
6043
  }
5943
6044
  logger.info(`[KnowledgeBase] Retrieved ${result.count || 0} global knowledge base nodes`);
@@ -5955,14 +6056,13 @@ var getUserKnowledgeBase = async ({
5955
6056
  }) => {
5956
6057
  try {
5957
6058
  if (!userId) {
5958
- logger.info("[KnowledgeBase] No userId provided, skipping user knowledge base");
6059
+ logger.warn("[KnowledgeBase] No userId provided, skipping user knowledge base");
5959
6060
  return "";
5960
6061
  }
5961
6062
  if (!collections || !collections["knowledge-base"] || !collections["knowledge-base"]["getByUser"]) {
5962
- logger.info("[KnowledgeBase] knowledge-base.getByUser collection not registered, skipping");
6063
+ logger.warn("[KnowledgeBase] knowledge-base.getByUser collection not registered, skipping");
5963
6064
  return "";
5964
6065
  }
5965
- logger.info(`[KnowledgeBase] Fetching user knowledge base nodes for userId: ${userId}...`);
5966
6066
  const result = await collections["knowledge-base"]["getByUser"]({
5967
6067
  userId: Number(userId),
5968
6068
  limit
@@ -5985,7 +6085,6 @@ var getAllKnowledgeBase = async ({
5985
6085
  userId,
5986
6086
  topK = 3
5987
6087
  }) => {
5988
- logger.info("[KnowledgeBase] Fetching all knowledge base contexts...");
5989
6088
  const [globalContext, userContext, queryContext] = await Promise.all([
5990
6089
  getGlobalKnowledgeBase({ collections }),
5991
6090
  getUserKnowledgeBase({ collections, userId }),
@@ -6007,7 +6106,6 @@ var getAllKnowledgeBase = async ({
6007
6106
  combinedContext += "The following information is semantically relevant to the current query:\n\n";
6008
6107
  combinedContext += queryContext + "\n\n";
6009
6108
  }
6010
- logger.info(`[KnowledgeBase] Combined knowledge base context: global=${globalContext.length} chars, user=${userContext.length} chars, query=${queryContext.length} chars`);
6011
6109
  return {
6012
6110
  globalContext,
6013
6111
  userContext,
@@ -6244,11 +6342,11 @@ var searchConversationsWithReranking = async (options) => {
6244
6342
  } = options;
6245
6343
  try {
6246
6344
  if (!collections || !collections["conversation-history"]) {
6247
- logger.info("[ConversationSearch] conversation-history collection not registered, skipping");
6345
+ logger.warn("[ConversationSearch] conversation-history collection not registered, skipping");
6248
6346
  return null;
6249
6347
  }
6250
6348
  if (!collections["conversation-history"]["searchMultiple"]) {
6251
- logger.info("[ConversationSearch] searchMultiple not available, falling back to standard search");
6349
+ logger.warn("[ConversationSearch] searchMultiple not available, falling back to standard search");
6252
6350
  return searchConversations({
6253
6351
  userPrompt,
6254
6352
  collections,
@@ -6256,9 +6354,6 @@ var searchConversationsWithReranking = async (options) => {
6256
6354
  similarityThreshold
6257
6355
  });
6258
6356
  }
6259
- logger.info(`[ConversationSearch] Hybrid search for: "${userPrompt.substring(0, 50)}..."`);
6260
- logger.info(`[ConversationSearch] Fetching ${rerankCandidates} candidates for reranking`);
6261
- logger.info(`[ConversationSearch] Weights - Semantic: ${hybridOptions.semanticWeight}, BM25: ${hybridOptions.bm25Weight}`);
6262
6357
  const results = await collections["conversation-history"]["searchMultiple"]({
6263
6358
  userPrompt,
6264
6359
  userId,
@@ -6299,7 +6394,6 @@ var searchConversationsWithReranking = async (options) => {
6299
6394
  logger.info(
6300
6395
  `[ConversationSearch] \u2713 Found match with semantic score ${(semanticScore * 100).toFixed(2)}%`
6301
6396
  );
6302
- logger.info(` - Returning cached result for: "${matchedUserPrompt}"`);
6303
6397
  return {
6304
6398
  uiBlock: best.uiBlock,
6305
6399
  similarity: semanticScore,
@@ -6741,10 +6835,9 @@ Fixed SQL query:`;
6741
6835
  * @param component - The component to validate
6742
6836
  * @param collections - Collections object containing database execute function
6743
6837
  * @param apiKey - Optional API key for LLM calls
6744
- * @param logCollector - Optional log collector for logging
6745
6838
  * @returns Validation result with component, query key, and result
6746
6839
  */
6747
- async validateSingleQuery(component, collections, apiKey, logCollector) {
6840
+ async validateSingleQuery(component, collections, apiKey) {
6748
6841
  const query = component.props?.query;
6749
6842
  const originalQueryKey = this.getQueryCacheKey(query);
6750
6843
  const queryStr = typeof query === "string" ? query : query?.sql || "";
@@ -6765,7 +6858,6 @@ Fixed SQL query:`;
6765
6858
  validated = true;
6766
6859
  queryCache.set(validationResult.cacheKey, result);
6767
6860
  logger.info(`[${this.config.providerName}] \u2713 Query validated for ${component.name} (attempt ${attempts}) - cached for frontend`);
6768
- logCollector?.info(`\u2713 Query validated for ${component.name}`);
6769
6861
  if (currentQueryStr !== queryStr) {
6770
6862
  const fixedQuery = typeof query === "string" ? currentQueryStr : { ...query, sql: currentQueryStr };
6771
6863
  component.props = {
@@ -6778,14 +6870,11 @@ Fixed SQL query:`;
6778
6870
  } catch (error) {
6779
6871
  lastError = error instanceof Error ? error.message : String(error);
6780
6872
  logger.warn(`[${this.config.providerName}] Query validation failed for ${component.name} (attempt ${attempts}/${MAX_QUERY_VALIDATION_RETRIES}): ${lastError}`);
6781
- logCollector?.warn(`Query validation failed for ${component.name}: ${lastError}`);
6782
6873
  if (attempts >= MAX_QUERY_VALIDATION_RETRIES) {
6783
6874
  logger.error(`[${this.config.providerName}] \u2717 Max retries reached for ${component.name}, excluding from response`);
6784
- logCollector?.error(`Max retries reached for ${component.name}, component excluded from response`);
6785
6875
  break;
6786
6876
  }
6787
6877
  logger.info(`[${this.config.providerName}] Requesting query fix from LLM for ${component.name}...`);
6788
- logCollector?.info(`Requesting query fix for ${component.name}...`);
6789
6878
  try {
6790
6879
  const fixedQueryStr = await this.requestQueryFix(
6791
6880
  currentQueryStr,
@@ -6819,7 +6908,6 @@ Fixed SQL query:`;
6819
6908
  }
6820
6909
  if (!validated) {
6821
6910
  logger.warn(`[${this.config.providerName}] Component ${component.name} excluded from response due to failed query validation`);
6822
- logCollector?.warn(`Component ${component.name} excluded from response`);
6823
6911
  }
6824
6912
  return {
6825
6913
  component: validated ? component : null,
@@ -6833,10 +6921,9 @@ Fixed SQL query:`;
6833
6921
  * @param components - Array of components with potential queries
6834
6922
  * @param collections - Collections object containing database execute function
6835
6923
  * @param apiKey - Optional API key for LLM calls
6836
- * @param logCollector - Optional log collector for logging
6837
6924
  * @returns Object with validated components and query results map
6838
6925
  */
6839
- async validateComponentQueries(components, collections, apiKey, logCollector) {
6926
+ async validateComponentQueries(components, collections, apiKey) {
6840
6927
  const queryResults = /* @__PURE__ */ new Map();
6841
6928
  const validatedComponents = [];
6842
6929
  const componentsWithoutQuery = [];
@@ -6853,9 +6940,8 @@ Fixed SQL query:`;
6853
6940
  return { components: validatedComponents, queryResults };
6854
6941
  }
6855
6942
  logger.info(`[${this.config.providerName}] Validating ${componentsWithQuery.length} component queries in parallel...`);
6856
- logCollector?.info(`Validating ${componentsWithQuery.length} component queries in parallel...`);
6857
6943
  const validationPromises = componentsWithQuery.map(
6858
- (component) => this.validateSingleQuery(component, collections, apiKey, logCollector)
6944
+ (component) => this.validateSingleQuery(component, collections, apiKey)
6859
6945
  );
6860
6946
  const results = await Promise.allSettled(validationPromises);
6861
6947
  for (let i = 0; i < results.length; i++) {
@@ -6872,7 +6958,6 @@ Fixed SQL query:`;
6872
6958
  }
6873
6959
  } else {
6874
6960
  logger.error(`[${this.config.providerName}] Unexpected error validating ${component.name}: ${result.reason}`);
6875
- logCollector?.error(`Unexpected error validating ${component.name}: ${result.reason}`);
6876
6961
  }
6877
6962
  }
6878
6963
  logger.info(`[${this.config.providerName}] Parallel validation complete: ${validatedComponents.length}/${components.length} components validated`);
@@ -6934,22 +7019,17 @@ var ToolExecutorService = class {
6934
7019
  let sql = toolInput.sql;
6935
7020
  const params = toolInput.params || {};
6936
7021
  const reasoning = toolInput.reasoning;
6937
- const { streamBuffer, collections, logCollector, providerName } = this.config;
7022
+ const { streamBuffer, collections, providerName } = this.config;
6938
7023
  sql = ensureQueryLimit(sql, MAX_COMPONENT_QUERY_LIMIT, MAX_COMPONENT_QUERY_LIMIT);
6939
7024
  const queryKey = sql.toLowerCase().replace(/\s+/g, " ").trim();
6940
7025
  const attempts = (this.queryAttempts.get(queryKey) || 0) + 1;
6941
7026
  this.queryAttempts.set(queryKey, attempts);
6942
- logger.info(`[${providerName}] Executing query (attempt ${attempts}/${MAX_QUERY_ATTEMPTS}): ${sql.substring(0, 100)}...`);
6943
7027
  if (Object.keys(params).length > 0) {
6944
7028
  logger.info(`[${providerName}] Query params: ${JSON.stringify(params)}`);
6945
7029
  }
6946
- if (reasoning) {
6947
- logCollector?.info(`Query reasoning: ${reasoning}`);
6948
- }
6949
7030
  if (attempts > MAX_QUERY_ATTEMPTS) {
6950
7031
  const errorMsg = `Maximum query attempts (${MAX_QUERY_ATTEMPTS}) reached. Unable to generate a valid query for your question.`;
6951
7032
  logger.error(`[${providerName}] ${errorMsg}`);
6952
- logCollector?.error(errorMsg);
6953
7033
  this.maxAttemptsReached = true;
6954
7034
  if (streamBuffer.hasCallback()) {
6955
7035
  streamBuffer.write(`
@@ -7009,11 +7089,6 @@ ${sql}
7009
7089
  await streamDelay();
7010
7090
  }
7011
7091
  }
7012
- logCollector?.logQuery?.(
7013
- `Executing SQL query (attempt ${attempts})`,
7014
- { sql, params },
7015
- { reasoning, attempt: attempts }
7016
- );
7017
7092
  if (!collections?.["database"]?.["execute"]) {
7018
7093
  throw new Error("Database collection not registered. Please register database.execute collection to execute queries.");
7019
7094
  }
@@ -7025,8 +7100,6 @@ ${sql}
7025
7100
  );
7026
7101
  const data = result?.data || result;
7027
7102
  const rowCount = result?.count ?? (Array.isArray(data) ? data.length : "N/A");
7028
- logger.info(`[${providerName}] Query executed successfully, rows returned: ${rowCount}`);
7029
- logCollector?.info(`Query successful, returned ${rowCount} rows`);
7030
7103
  if (streamBuffer.hasCallback()) {
7031
7104
  streamBuffer.write(`
7032
7105
  \u2705 **Query executed successfully!**
@@ -7075,7 +7148,6 @@ ${sql}
7075
7148
  maxRows: DEFAULT_MAX_ROWS_FOR_LLM,
7076
7149
  maxCharsPerField: DEFAULT_MAX_CHARS_PER_FIELD2
7077
7150
  });
7078
- logger.info(`[${providerName}] Query result formatted: ${formattedResult.summary.recordsShown}/${formattedResult.summary.totalRecords} records`);
7079
7151
  if (formattedResult.truncationNote) {
7080
7152
  logger.info(`[${providerName}] Truncation: ${formattedResult.truncationNote}`);
7081
7153
  }
@@ -7083,7 +7155,6 @@ ${sql}
7083
7155
  } catch (error) {
7084
7156
  const errorMsg = error instanceof Error ? error.message : String(error);
7085
7157
  logger.error(`[${providerName}] Query execution failed (attempt ${attempts}/${MAX_QUERY_ATTEMPTS}): ${errorMsg}`);
7086
- logCollector?.error(`Query failed (attempt ${attempts}/${MAX_QUERY_ATTEMPTS}): ${errorMsg}`);
7087
7158
  userPromptErrorLogger.logSqlError(sql, error instanceof Error ? error : new Error(errorMsg), Object.keys(params).length > 0 ? Object.values(params) : void 0);
7088
7159
  if (streamBuffer.hasCallback()) {
7089
7160
  streamBuffer.write(`\u274C **Query execution failed:**
@@ -7105,19 +7176,16 @@ ${errorMsg}
7105
7176
  * Execute an external tool with retry tracking and streaming feedback
7106
7177
  */
7107
7178
  async executeExternalTool(toolName, toolInput, externalTools) {
7108
- const { streamBuffer, logCollector, providerName } = this.config;
7179
+ const { streamBuffer, providerName } = this.config;
7109
7180
  const externalTool = externalTools?.find((t) => t.id === toolName);
7110
7181
  if (!externalTool) {
7111
7182
  throw new Error(`Unknown tool: ${toolName}`);
7112
7183
  }
7113
7184
  const attempts = (this.toolAttempts.get(toolName) || 0) + 1;
7114
7185
  this.toolAttempts.set(toolName, attempts);
7115
- logger.info(`[${providerName}] Executing external tool: ${externalTool.name} (attempt ${attempts}/${MAX_TOOL_ATTEMPTS})`);
7116
- logCollector?.info(`Executing external tool: ${externalTool.name} (attempt ${attempts}/${MAX_TOOL_ATTEMPTS})...`);
7117
7186
  if (attempts > MAX_TOOL_ATTEMPTS) {
7118
7187
  const errorMsg = `Maximum attempts (${MAX_TOOL_ATTEMPTS}) reached for tool: ${externalTool.name}`;
7119
7188
  logger.error(`[${providerName}] ${errorMsg}`);
7120
- logCollector?.error(errorMsg);
7121
7189
  if (streamBuffer.hasCallback()) {
7122
7190
  streamBuffer.write(`
7123
7191
 
@@ -7152,8 +7220,6 @@ Please try rephrasing your request or contact support.
7152
7220
  `Running ${externalTool.name}`,
7153
7221
  streamBuffer
7154
7222
  );
7155
- logger.info(`[${providerName}] External tool ${externalTool.name} executed successfully`);
7156
- logCollector?.info(`\u2713 ${externalTool.name} executed successfully`);
7157
7223
  if (!this.executedToolsList.find((t) => t.id === externalTool.id)) {
7158
7224
  const formattedForTracking = formatToolResultForLLM(result, {
7159
7225
  toolName: externalTool.name,
@@ -7173,7 +7239,6 @@ Please try rephrasing your request or contact support.
7173
7239
  },
7174
7240
  outputSchema: externalTool.outputSchema
7175
7241
  });
7176
- logger.info(`[${providerName}] Tracked executed tool: ${externalTool.name} with ${formattedForTracking.summary.totalRecords} total records`);
7177
7242
  }
7178
7243
  if (streamBuffer.hasCallback()) {
7179
7244
  streamBuffer.write(`\u2705 **${externalTool.name} completed successfully**
@@ -7187,7 +7252,6 @@ Please try rephrasing your request or contact support.
7187
7252
  maxRows: DEFAULT_MAX_ROWS_FOR_LLM,
7188
7253
  maxCharsPerField: DEFAULT_MAX_CHARS_PER_FIELD2
7189
7254
  });
7190
- logger.info(`[${providerName}] Tool result formatted: ${formattedToolResult.summary.recordsShown}/${formattedToolResult.summary.totalRecords} records`);
7191
7255
  if (formattedToolResult.truncationNote) {
7192
7256
  logger.info(`[${providerName}] Truncation: ${formattedToolResult.truncationNote}`);
7193
7257
  }
@@ -7195,7 +7259,6 @@ Please try rephrasing your request or contact support.
7195
7259
  } catch (error) {
7196
7260
  const errorMsg = error instanceof Error ? error.message : String(error);
7197
7261
  logger.error(`[${providerName}] External tool ${externalTool.name} failed (attempt ${attempts}/${MAX_TOOL_ATTEMPTS}): ${errorMsg}`);
7198
- logCollector?.error(`\u2717 ${externalTool.name} failed: ${errorMsg}`);
7199
7262
  userPromptErrorLogger.logToolError(externalTool.name, toolInput, error instanceof Error ? error : new Error(errorMsg));
7200
7263
  if (streamBuffer.hasCallback()) {
7201
7264
  streamBuffer.write(`\u274C **${externalTool.name} failed:**
@@ -7273,7 +7336,6 @@ var BaseLLM = class {
7273
7336
  return;
7274
7337
  }
7275
7338
  this.conversationSimilarityThreshold = threshold;
7276
- logger.info(`[${this.getProviderName()}] Conversation similarity threshold set to: ${threshold}`);
7277
7339
  }
7278
7340
  /**
7279
7341
  * Get the current conversation similarity threshold
@@ -7316,16 +7378,14 @@ var BaseLLM = class {
7316
7378
  * @param analysisContent - The text response containing component suggestions
7317
7379
  * @param components - List of available components
7318
7380
  * @param apiKey - Optional API key
7319
- * @param logCollector - Optional log collector
7320
7381
  * @param componentStreamCallback - Optional callback to stream primary KPI component as soon as it's identified
7321
7382
  * @returns Object containing matched components, layout title/description, and follow-up actions
7322
7383
  */
7323
- async matchComponentsFromAnalysis(analysisContent, components, userPrompt, apiKey, logCollector, componentStreamCallback, deferredTools, executedTools, collections, userId) {
7384
+ async matchComponentsFromAnalysis(analysisContent, components, userPrompt, apiKey, componentStreamCallback, deferredTools, executedTools, collections, userId) {
7324
7385
  const methodStartTime = Date.now();
7325
7386
  const methodName = "matchComponentsFromAnalysis";
7326
7387
  logger.info(`[${this.getProviderName()}] [TIMING] START ${methodName} | model: ${this.getModelForTask("complex")}`);
7327
7388
  try {
7328
- logger.debug(`[${this.getProviderName()}] Starting component matching from text response`);
7329
7389
  let availableComponentsText = "No components available";
7330
7390
  if (components && components.length > 0) {
7331
7391
  availableComponentsText = components.map((comp, idx) => {
@@ -7341,7 +7401,6 @@ var BaseLLM = class {
7341
7401
  }
7342
7402
  let deferredToolsText = "No deferred external tools for this request.";
7343
7403
  if (deferredTools && deferredTools.length > 0) {
7344
- logger.info(`[${this.getProviderName()}] Passing ${deferredTools.length} deferred tools to component matching`);
7345
7404
  deferredToolsText = "The following external tools need user input via a Form component.\n**IMPORTANT: Use these EXACT values when generating Form externalTool prop.**\n\n" + deferredTools.map((tool, idx) => {
7346
7405
  return `${idx + 1}. **${tool.name}**
7347
7406
  toolId: "${tool.id}" (USE THIS EXACT VALUE - do not modify!)
@@ -7353,7 +7412,6 @@ var BaseLLM = class {
7353
7412
  }
7354
7413
  let executedToolsText = "No external tools were executed for data fetching.";
7355
7414
  if (executedTools && executedTools.length > 0) {
7356
- logger.info(`[${this.getProviderName()}] Passing ${executedTools.length} executed tools to component matching`);
7357
7415
  executedToolsText = "The following external tools were executed to fetch data.\n" + executedTools.map((tool, idx) => {
7358
7416
  let outputSchemaText = "Not available";
7359
7417
  let fieldNamesList = "";
@@ -7409,14 +7467,12 @@ ${fieldsText}`;
7409
7467
  KNOWLEDGE_BASE_CONTEXT: knowledgeBaseContext,
7410
7468
  CURRENT_DATETIME: getCurrentDateTimeForPrompt()
7411
7469
  });
7412
- logger.debug(`[${this.getProviderName()}] Loaded match-text-components prompts`);
7413
7470
  logger.logLLMPrompt("matchComponentsFromAnalysis", "system", extractPromptText(prompts.system));
7414
7471
  logger.logLLMPrompt("matchComponentsFromAnalysis", "user", `Text Analysis:
7415
7472
  ${analysisContent}
7416
7473
 
7417
7474
  Executed Tools:
7418
7475
  ${executedToolsText}`);
7419
- logCollector?.info("Matching components from text response...");
7420
7476
  let fullResponseText = "";
7421
7477
  let answerComponentExtracted = false;
7422
7478
  const answerCallback = componentStreamCallback;
@@ -7476,18 +7532,7 @@ ${executedToolsText}`);
7476
7532
  ...answerComponentData.props
7477
7533
  }
7478
7534
  };
7479
- const streamTime = (/* @__PURE__ */ new Date()).toISOString();
7480
- logger.info(`[${this.getProviderName()}] \u2713 [${streamTime}] Answer component detected in stream: ${answerComponent.name} (${answerComponent.type})`);
7481
- logCollector?.info(`\u2713 Answer component: ${answerComponent.name} (${answerComponent.type}) - detected at ${streamTime}`);
7482
- if (answerComponentData.props?.query) {
7483
- logCollector?.logQuery(
7484
- "Answer component query",
7485
- answerComponentData.props.query,
7486
- { componentName: answerComponent.name, componentType: answerComponent.type, reasoning: answerComponentData.reasoning }
7487
- );
7488
- }
7489
7535
  let answerQuery = answerComponent.props?.query;
7490
- logger.info(`[${this.getProviderName()}] Answer component detected: ${answerComponent.name} (${answerComponent.type}), hasQuery: ${!!answerQuery}, hasDbExecute: ${!!collections?.["database"]?.["execute"]}`);
7491
7536
  if (answerQuery) {
7492
7537
  if (typeof answerQuery === "string") {
7493
7538
  answerQuery = ensureQueryLimit(answerQuery, this.defaultLimit, MAX_COMPONENT_QUERY_LIMIT);
@@ -7505,24 +7550,18 @@ ${executedToolsText}`);
7505
7550
  let currentQuery = answerQuery;
7506
7551
  let currentQueryStr = typeof answerQuery === "string" ? answerQuery : answerQuery?.sql || "";
7507
7552
  let lastError = "";
7508
- logger.info(`[${this.getProviderName()}] Validating answer component query before streaming...`);
7509
7553
  while (attempts < maxRetries && !validated) {
7510
7554
  attempts++;
7511
7555
  try {
7512
7556
  const cacheKey = this.queryService.getQueryCacheKey(currentQuery);
7513
7557
  if (cacheKey) {
7514
- logger.debug(`[${this.getProviderName()}] Answer component query validation attempt ${attempts}/${maxRetries}`);
7515
7558
  const result2 = await collections["database"]["execute"]({ sql: cacheKey });
7516
7559
  queryCache.set(cacheKey, result2);
7517
7560
  validated = true;
7518
7561
  if (currentQuery !== answerQuery) {
7519
7562
  answerComponent.props.query = currentQuery;
7520
7563
  }
7521
- logger.info(`[${this.getProviderName()}] \u2713 Answer component query validated (attempt ${attempts}) - STREAMING TO FRONTEND NOW`);
7522
- logCollector?.info(`\u2713 Answer component query validated - streaming to frontend`);
7523
- logger.info(`[${this.getProviderName()}] Calling answerCallback for: ${answerComponent.name}`);
7524
7564
  answerCallback(answerComponent);
7525
- logger.info(`[${this.getProviderName()}] answerCallback completed for: ${answerComponent.name}`);
7526
7565
  }
7527
7566
  } catch (validationError) {
7528
7567
  lastError = validationError instanceof Error ? validationError.message : String(validationError);
@@ -7558,7 +7597,6 @@ ${executedToolsText}`);
7558
7597
  }
7559
7598
  if (!validated) {
7560
7599
  logger.warn(`[${this.getProviderName()}] Answer component query validation failed after ${attempts} attempts - component will be excluded`);
7561
- logCollector?.warn(`Answer component query validation failed: ${lastError} - component will be excluded from response`);
7562
7600
  }
7563
7601
  })();
7564
7602
  } else {
@@ -7569,7 +7607,7 @@ ${executedToolsText}`);
7569
7607
  }
7570
7608
  }
7571
7609
  } catch (e) {
7572
- logger.debug(`[${this.getProviderName()}] Partial answerComponent parse failed, waiting for more data...`);
7610
+ logger.error(`[${this.getProviderName()}] Partial answerComponent parse failed, waiting for more data...`);
7573
7611
  }
7574
7612
  }
7575
7613
  }
@@ -7599,18 +7637,6 @@ ${executedToolsText}`);
7599
7637
  logger.file("\n=============================\nFull LLM response:", JSON.stringify(result, null, 2));
7600
7638
  const rawActions = result.actions || [];
7601
7639
  const actions = convertQuestionsToActions(rawActions);
7602
- if (matchedComponents.length > 0) {
7603
- matchedComponents.forEach((comp, idx) => {
7604
- logCollector?.info(` ${idx + 1}. ${comp.componentName} (${comp.componentType}): ${comp.reasoning}`);
7605
- if (comp.props?.query) {
7606
- logCollector?.logQuery(
7607
- `Component ${idx + 1} query`,
7608
- comp.props.query,
7609
- { componentName: comp.componentName, title: comp.props.title }
7610
- );
7611
- }
7612
- });
7613
- }
7614
7640
  const finalComponents = matchedComponents.map((mc) => {
7615
7641
  const originalComponent = components.find((c) => c.id === mc.componentId);
7616
7642
  if (!originalComponent) {
@@ -7635,27 +7661,22 @@ ${executedToolsText}`);
7635
7661
  }).filter(Boolean);
7636
7662
  let validatedComponents = finalComponents;
7637
7663
  if (collections?.["database"]?.["execute"]) {
7638
- logger.info(`[${this.getProviderName()}] Starting query validation for ${finalComponents.length} components...`);
7639
- logCollector?.info(`Validating queries for ${finalComponents.length} components...`);
7640
7664
  try {
7641
7665
  const validationResult = await this.queryService.validateComponentQueries(
7642
7666
  finalComponents,
7643
7667
  collections,
7644
- apiKey,
7645
- logCollector
7668
+ apiKey
7646
7669
  );
7647
7670
  validatedComponents = validationResult.components;
7648
7671
  const queriedComponents = finalComponents.filter((c) => c.props?.query);
7649
7672
  const validatedQueries = validatedComponents.filter((c) => c.props?.query);
7650
7673
  logger.info(`[${this.getProviderName()}] Query validation complete: ${validatedQueries.length}/${queriedComponents.length} queries validated`);
7651
- logCollector?.info(`Query validation complete: ${validatedQueries.length}/${queriedComponents.length} queries validated`);
7652
7674
  } catch (validationError) {
7653
7675
  const validationErrorMsg = validationError instanceof Error ? validationError.message : String(validationError);
7654
7676
  logger.error(`[${this.getProviderName()}] Query validation error: ${validationErrorMsg}`);
7655
- logCollector?.error(`Query validation error: ${validationErrorMsg}`);
7656
7677
  }
7657
7678
  } else {
7658
- logger.debug(`[${this.getProviderName()}] Skipping query validation - database execute function not available`);
7679
+ logger.error(`[${this.getProviderName()}] Skipping query validation - database execute function not available`);
7659
7680
  }
7660
7681
  const methodDuration = Date.now() - methodStartTime;
7661
7682
  logger.info(`[${this.getProviderName()}] [TIMING] DONE ${methodName} in ${methodDuration}ms | components: ${validatedComponents.length} | actions: ${actions.length}`);
@@ -7669,7 +7690,6 @@ ${executedToolsText}`);
7669
7690
  const methodDuration = Date.now() - methodStartTime;
7670
7691
  const errorMsg = error instanceof Error ? error.message : String(error);
7671
7692
  logger.error(`[${this.getProviderName()}] [TIMING] FAILED ${methodName} in ${methodDuration}ms | error: ${errorMsg}`);
7672
- logCollector?.error(`Failed to match components: ${errorMsg}`);
7673
7693
  return {
7674
7694
  components: [],
7675
7695
  layoutTitle: "Dashboard",
@@ -7682,7 +7702,7 @@ ${executedToolsText}`);
7682
7702
  * Classify user question into category and detect external tools needed
7683
7703
  * Determines if question is for data analysis, requires external tools, or needs text response
7684
7704
  */
7685
- async classifyQuestionCategory(userPrompt, apiKey, logCollector, conversationHistory, externalTools) {
7705
+ async classifyQuestionCategory(userPrompt, apiKey, conversationHistory, externalTools) {
7686
7706
  const methodStartTime = Date.now();
7687
7707
  const methodName = "classifyQuestionCategory";
7688
7708
  const promptPreview = userPrompt.substring(0, 50) + (userPrompt.length > 50 ? "..." : "");
@@ -7718,16 +7738,6 @@ ${executedToolsText}`);
7718
7738
  true
7719
7739
  // Parse as JSON
7720
7740
  );
7721
- logCollector?.logExplanation(
7722
- "Question category classified",
7723
- result.reasoning || "No reasoning provided",
7724
- {
7725
- category: result.category,
7726
- externalTools: result.externalTools || [],
7727
- dataAnalysisType: result.dataAnalysisType,
7728
- confidence: result.confidence
7729
- }
7730
- );
7731
7741
  const methodDuration = Date.now() - methodStartTime;
7732
7742
  logger.info(`[${this.getProviderName()}] [TIMING] DONE ${methodName} in ${methodDuration}ms | category: ${result.category} | confidence: ${result.confidence}% | tools: ${(result.externalTools || []).length}`);
7733
7743
  return {
@@ -7741,7 +7751,6 @@ ${executedToolsText}`);
7741
7751
  const methodDuration = Date.now() - methodStartTime;
7742
7752
  const errorMsg = error instanceof Error ? error.message : String(error);
7743
7753
  logger.error(`[${this.getProviderName()}] [TIMING] FAILED ${methodName} in ${methodDuration}ms | error: ${errorMsg}`);
7744
- logger.debug(`[${this.getProviderName()}] Category classification error details:`, error);
7745
7754
  throw error;
7746
7755
  }
7747
7756
  }
@@ -7750,7 +7759,7 @@ ${executedToolsText}`);
7750
7759
  * Takes a matched UI block from semantic search and modifies its props to answer the new question
7751
7760
  * Also adapts the cached text response to match the new question
7752
7761
  */
7753
- async adaptUIBlockParameters(currentUserPrompt, originalUserPrompt, matchedUIBlock, apiKey, logCollector, cachedTextResponse) {
7762
+ async adaptUIBlockParameters(currentUserPrompt, originalUserPrompt, matchedUIBlock, apiKey, cachedTextResponse) {
7754
7763
  const methodStartTime = Date.now();
7755
7764
  const methodName = "adaptUIBlockParameters";
7756
7765
  const promptPreview = currentUserPrompt.substring(0, 50) + (currentUserPrompt.length > 50 ? "..." : "");
@@ -7794,11 +7803,6 @@ ${executedToolsText}`);
7794
7803
  if (!result.success) {
7795
7804
  const methodDuration2 = Date.now() - methodStartTime;
7796
7805
  logger.info(`[${this.getProviderName()}] [TIMING] DONE ${methodName} in ${methodDuration2}ms | result: adaptation failed - ${result.reason}`);
7797
- logCollector?.warn(
7798
- "Could not adapt matched UI block",
7799
- "explanation",
7800
- { reason: result.reason }
7801
- );
7802
7806
  return {
7803
7807
  success: false,
7804
7808
  explanation: result.explanation || "Adaptation not possible"
@@ -7810,14 +7814,6 @@ ${executedToolsText}`);
7810
7814
  this.defaultLimit
7811
7815
  );
7812
7816
  }
7813
- logCollector?.logExplanation(
7814
- "UI block parameters adapted",
7815
- result.explanation || "Parameters adapted successfully",
7816
- {
7817
- parametersChanged: result.parametersChanged || [],
7818
- componentType: result.adaptedComponent?.type
7819
- }
7820
- );
7821
7817
  const methodDuration = Date.now() - methodStartTime;
7822
7818
  logger.info(`[${this.getProviderName()}] [TIMING] DONE ${methodName} in ${methodDuration}ms | result: success | changes: ${(result.parametersChanged || []).length}`);
7823
7819
  return {
@@ -7831,7 +7827,6 @@ ${executedToolsText}`);
7831
7827
  const methodDuration = Date.now() - methodStartTime;
7832
7828
  const errorMsg = error instanceof Error ? error.message : String(error);
7833
7829
  logger.error(`[${this.getProviderName()}] [TIMING] FAILED ${methodName} in ${methodDuration}ms | error: ${errorMsg}`);
7834
- logger.debug(`[${this.getProviderName()}] Adaptation error details:`, error);
7835
7830
  return {
7836
7831
  success: false,
7837
7832
  explanation: `Error adapting parameters: ${errorMsg}`
@@ -7844,14 +7839,12 @@ ${executedToolsText}`);
7844
7839
  * Supports tool calling for query execution with automatic retry on errors (max 3 attempts)
7845
7840
  * After generating text response, if components are provided, matches suggested components
7846
7841
  */
7847
- async generateTextResponse(userPrompt, apiKey, logCollector, conversationHistory, streamCallback, collections, components, externalTools, category, userId) {
7842
+ async generateTextResponse(userPrompt, apiKey, conversationHistory, streamCallback, collections, components, externalTools, category, userId) {
7848
7843
  const methodStartTime = Date.now();
7849
7844
  const methodName = "generateTextResponse";
7850
7845
  const promptPreview = userPrompt.substring(0, 50) + (userPrompt.length > 50 ? "..." : "");
7851
7846
  logger.info(`[${this.getProviderName()}] [TIMING] START ${methodName} | model: ${this.getModelForTask("complex")} | category: ${category} | prompt: "${promptPreview}"`);
7852
7847
  const errors = [];
7853
- logger.debug(`[${this.getProviderName()}] Starting text response generation`);
7854
- logger.debug(`[${this.getProviderName()}] User prompt: "${userPrompt.substring(0, 50)}..."`);
7855
7848
  try {
7856
7849
  let availableToolsDoc = "No external tools are available for this request.";
7857
7850
  if (externalTools && externalTools.length > 0) {
@@ -7915,9 +7908,6 @@ ${executedToolsText}`);
7915
7908
  });
7916
7909
  logger.logLLMPrompt("generateTextResponse", "system", extractPromptText(prompts.system));
7917
7910
  logger.logLLMPrompt("generateTextResponse", "user", extractPromptText(prompts.user));
7918
- logger.debug(`[${this.getProviderName()}] Loaded text-response prompts with schema`);
7919
- logger.debug(`[${this.getProviderName()}] System prompt length: ${prompts.system.length}, User prompt length: ${prompts.user.length}`);
7920
- logCollector?.info("Generating text response with query execution capability...");
7921
7911
  const tools = [{
7922
7912
  name: "execute_query",
7923
7913
  description: "Executes a parameterized SQL query against the database. CRITICAL: NEVER hardcode literal values in WHERE/HAVING conditions - ALWAYS use $paramName placeholders and pass actual values in params object.",
@@ -7946,7 +7936,6 @@ ${executedToolsText}`);
7946
7936
  const executableTools = externalTools.filter(
7947
7937
  (t) => t.executionType === "immediate" || t.executionType === "deferred" && t.userProvidedData
7948
7938
  );
7949
- logger.info(`[${this.getProviderName()}] Executable tools: ${executableTools.length} of ${externalTools.length} total`);
7950
7939
  const addedToolIds = /* @__PURE__ */ new Set();
7951
7940
  executableTools.forEach((tool) => {
7952
7941
  if (addedToolIds.has(tool.id)) {
@@ -7954,7 +7943,6 @@ ${executedToolsText}`);
7954
7943
  return;
7955
7944
  }
7956
7945
  addedToolIds.add(tool.id);
7957
- logger.info(`[${this.getProviderName()}] Processing executable tool:`, JSON.stringify(tool, null, 2));
7958
7946
  const properties = {};
7959
7947
  const required = [];
7960
7948
  Object.entries(tool.params || {}).forEach(([key, typeOrValue]) => {
@@ -8024,14 +8012,13 @@ ${executedToolsText}`);
8024
8012
  });
8025
8013
  });
8026
8014
  logger.info(`[${this.getProviderName()}] Added ${addedToolIds.size} unique tool definitions from ${executableTools.length} tool calls (${externalTools.length - executableTools.length} deferred tools await form input)`);
8027
- logger.info(`[${this.getProviderName()}] Complete tools array:`, JSON.stringify(tools, null, 2));
8015
+ logger.debug(`[${this.getProviderName()}] Complete tools array:`, JSON.stringify(tools, null, 2));
8028
8016
  }
8029
8017
  const streamBuffer = new StreamBuffer(streamCallback);
8030
8018
  const toolExecutor = new ToolExecutorService({
8031
8019
  providerName: this.getProviderName(),
8032
8020
  collections,
8033
- streamBuffer,
8034
- logCollector
8021
+ streamBuffer
8035
8022
  });
8036
8023
  const executableExternalTools = externalTools?.map((t) => ({
8037
8024
  id: t.id,
@@ -8060,12 +8047,10 @@ ${executedToolsText}`);
8060
8047
  },
8061
8048
  MAX_TOOL_CALLING_ITERATIONS
8062
8049
  );
8063
- logger.info(`[${this.getProviderName()}] Text response stream completed`);
8064
8050
  const textResponse = streamBuffer.getFullText() || result || "I apologize, but I was unable to generate a response.";
8065
8051
  if (toolExecutor.isMaxAttemptsReached()) {
8066
8052
  const methodDuration2 = Date.now() - methodStartTime;
8067
8053
  logger.warn(`[${this.getProviderName()}] [TIMING] DONE ${methodName} in ${methodDuration2}ms | result: max attempts reached`);
8068
- logCollector?.error("Failed to generate valid query after maximum attempts");
8069
8054
  return {
8070
8055
  success: false,
8071
8056
  errors: [`Maximum query attempts (${MAX_QUERY_ATTEMPTS}) reached. Unable to generate a valid query for your question.`],
@@ -8077,14 +8062,6 @@ ${executedToolsText}`);
8077
8062
  }
8078
8063
  };
8079
8064
  }
8080
- logCollector?.info(`Text response: ${textResponse.substring(0, 100)}${textResponse.length > 100 ? "..." : ""}`);
8081
- logCollector?.logExplanation(
8082
- "Text response generated",
8083
- "Generated plain text response with component suggestions",
8084
- {
8085
- textLength: textResponse.length
8086
- }
8087
- );
8088
8065
  streamBuffer.flush();
8089
8066
  if (streamBuffer.hasCallback() && components && components.length > 0 && category !== "general") {
8090
8067
  streamBuffer.write("\n\n\u{1F4CA} **Generating visualization components...**\n\n");
@@ -8096,8 +8073,6 @@ ${executedToolsText}`);
8096
8073
  let actions = [];
8097
8074
  if (category === "general") {
8098
8075
  logger.info(`[${this.getProviderName()}] Skipping component generation for general/conversational question`);
8099
- logCollector?.info("Skipping component generation for general question");
8100
- logger.info(`[${this.getProviderName()}] Generating actions for general question...`);
8101
8076
  const nextQuestions = await this.generateNextQuestions(
8102
8077
  userPrompt,
8103
8078
  null,
@@ -8105,23 +8080,16 @@ ${executedToolsText}`);
8105
8080
  void 0,
8106
8081
  // no component data
8107
8082
  apiKey,
8108
- logCollector,
8109
8083
  conversationHistory,
8110
8084
  textResponse
8111
8085
  // pass text response as context
8112
8086
  );
8113
8087
  actions = convertQuestionsToActions(nextQuestions);
8114
- logger.info(`[${this.getProviderName()}] Generated ${actions.length} follow-up actions for general question`);
8115
8088
  } else if (components && components.length > 0) {
8116
- logger.info(`[${this.getProviderName()}] Matching components from text response...`);
8117
- logger.info(`[${this.getProviderName()}] componentStreamCallback setup: hasCallback=${streamBuffer.hasCallback()}, category=${category}`);
8118
- const componentStreamCallback = streamBuffer.hasCallback() && category !== "data_modification" ? (component) => {
8119
- logger.info(`[${this.getProviderName()}] componentStreamCallback INVOKED for: ${component.name} (${component.type})`);
8089
+ const componentStreamCallback = streamBuffer.hasCallback() && category === "data_analysis" ? (component) => {
8120
8090
  const answerMarker = `__ANSWER_COMPONENT_START__${JSON.stringify(component)}__ANSWER_COMPONENT_END__`;
8121
8091
  streamBuffer.write(answerMarker);
8122
- logger.info(`[${this.getProviderName()}] Streamed answer component to frontend: ${component.name} (${component.type})`);
8123
8092
  } : void 0;
8124
- logger.info(`[${this.getProviderName()}] componentStreamCallback created: ${!!componentStreamCallback}`);
8125
8093
  const deferredTools = externalTools?.filter((t) => {
8126
8094
  if (t.executionType === "deferred" && !t.userProvidedData) return true;
8127
8095
  if (category === "data_modification" && !t.userProvidedData) {
@@ -8142,7 +8110,6 @@ ${executedToolsText}`);
8142
8110
  components,
8143
8111
  userPrompt,
8144
8112
  apiKey,
8145
- logCollector,
8146
8113
  componentStreamCallback,
8147
8114
  deferredTools,
8148
8115
  toolExecutor.getExecutedTools(),
@@ -8156,8 +8123,6 @@ ${executedToolsText}`);
8156
8123
  }
8157
8124
  let container_componet = null;
8158
8125
  if (matchedComponents.length > 0) {
8159
- logger.info(`[${this.getProviderName()}] Created MultiComponentContainer: "${layoutTitle}" with ${matchedComponents.length} components and ${actions.length} actions`);
8160
- logCollector?.info(`Created dashboard: "${layoutTitle}" with ${matchedComponents.length} components and ${actions.length} actions`);
8161
8126
  container_componet = {
8162
8127
  id: `container_${Date.now()}`,
8163
8128
  name: "MultiComponentContainer",
@@ -8189,7 +8154,6 @@ ${executedToolsText}`);
8189
8154
  const methodDuration = Date.now() - methodStartTime;
8190
8155
  const errorMsg = error instanceof Error ? error.message : String(error);
8191
8156
  logger.error(`[${this.getProviderName()}] [TIMING] FAILED ${methodName} in ${methodDuration}ms | error: ${errorMsg}`);
8192
- logCollector?.error(`Error generating text response: ${errorMsg}`);
8193
8157
  userPromptErrorLogger.logLlmError(
8194
8158
  this.getProviderName(),
8195
8159
  this.model,
@@ -8217,14 +8181,11 @@ ${executedToolsText}`);
8217
8181
  * 2. Category classification: Determine if data_analysis, requires_external_tools, or text_response
8218
8182
  * 3. Route appropriately based on category and response mode
8219
8183
  */
8220
- async handleUserRequest(userPrompt, components, apiKey, logCollector, conversationHistory, responseMode = "text", streamCallback, collections, externalTools, userId) {
8184
+ async handleUserRequest(userPrompt, components, apiKey, conversationHistory, responseMode = "text", streamCallback, collections, externalTools, userId) {
8221
8185
  const startTime = Date.now();
8222
- logger.info(`[${this.getProviderName()}] handleUserRequest called for user prompt: ${userPrompt}`);
8223
- logCollector?.info(`Starting request processing with mode: ${responseMode}`);
8224
8186
  logger.clearFile();
8225
8187
  logger.logLLMPrompt("handleUserRequest", "user", `User Prompt: ${userPrompt}`);
8226
8188
  try {
8227
- logger.info(`[${this.getProviderName()}] Step 1: Searching previous conversations...`);
8228
8189
  const conversationMatch = await conversation_search_default.searchConversationsWithReranking({
8229
8190
  userPrompt,
8230
8191
  collections,
@@ -8233,22 +8194,16 @@ ${executedToolsText}`);
8233
8194
  });
8234
8195
  if (conversationMatch) {
8235
8196
  logger.info(`[${this.getProviderName()}] \u2713 Found matching conversation with ${(conversationMatch.similarity * 100).toFixed(2)}% similarity`);
8236
- logCollector?.info(
8237
- `\u2713 Found similar conversation (${(conversationMatch.similarity * 100).toFixed(2)}% match)`
8238
- );
8239
8197
  const rawComponent = conversationMatch.uiBlock?.component || conversationMatch.uiBlock?.generatedComponentMetadata;
8240
8198
  const isValidComponent = rawComponent && typeof rawComponent === "object" && Object.keys(rawComponent).length > 0;
8241
8199
  const component = isValidComponent ? rawComponent : null;
8242
8200
  const cachedTextResponse = conversationMatch.uiBlock?.analysis || conversationMatch.uiBlock?.textResponse || conversationMatch.uiBlock?.text || "";
8243
8201
  if (this.containsFormComponent(component)) {
8244
8202
  logger.info(`[${this.getProviderName()}] Skipping cached result - Form components contain stale defaultValues, fetching fresh data`);
8245
- logCollector?.info("Skipping cache for form - fetching current values from database...");
8246
8203
  } else if (!component) {
8247
8204
  if (conversationMatch.similarity >= EXACT_MATCH_SIMILARITY_THRESHOLD) {
8248
8205
  const elapsedTime2 = Date.now() - startTime;
8249
- logger.info(`[${this.getProviderName()}] \u2713 Exact match for general question - returning cached text response`);
8250
- logCollector?.info(`\u2713 Exact match for general question - returning cached response`);
8251
- logCollector?.info(`Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
8206
+ logger.info(`[${this.getProviderName()}] \u2713 Exact match for general question - returning cached text response (${elapsedTime2}ms)`);
8252
8207
  return {
8253
8208
  success: true,
8254
8209
  data: {
@@ -8263,14 +8218,11 @@ ${executedToolsText}`);
8263
8218
  };
8264
8219
  } else {
8265
8220
  logger.info(`[${this.getProviderName()}] Similar match but no component (general question) - processing fresh`);
8266
- logCollector?.info("Similar match found but was a general conversation - processing as new question");
8267
8221
  }
8268
8222
  } else {
8269
8223
  if (conversationMatch.similarity >= EXACT_MATCH_SIMILARITY_THRESHOLD) {
8270
8224
  const elapsedTime2 = Date.now() - startTime;
8271
- logger.info(`[${this.getProviderName()}] \u2713 100% match - returning UI block directly without adaptation`);
8272
- logCollector?.info(`\u2713 Exact match (${(conversationMatch.similarity * 100).toFixed(2)}%) - returning cached result`);
8273
- logCollector?.info(`Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
8225
+ logger.info(`[${this.getProviderName()}] \u2713 100% match - returning UI block directly without adaptation (${elapsedTime2}ms)`);
8274
8226
  if (streamCallback && cachedTextResponse) {
8275
8227
  logger.info(`[${this.getProviderName()}] Streaming cached text response to frontend`);
8276
8228
  streamCallback(cachedTextResponse);
@@ -8289,22 +8241,18 @@ ${executedToolsText}`);
8289
8241
  errors: []
8290
8242
  };
8291
8243
  }
8292
- logCollector?.info(`Adapting parameters for similar question...`);
8244
+ logger.info(`[${this.getProviderName()}] Adapting parameters for similar question...`);
8293
8245
  const originalPrompt = conversationMatch.metadata?.userPrompt || "Previous question";
8294
8246
  const adaptResult = await this.adaptUIBlockParameters(
8295
8247
  userPrompt,
8296
8248
  originalPrompt,
8297
8249
  conversationMatch.uiBlock,
8298
8250
  apiKey,
8299
- logCollector,
8300
8251
  cachedTextResponse
8301
8252
  );
8302
8253
  if (adaptResult.success && adaptResult.adaptedComponent) {
8303
8254
  const elapsedTime2 = Date.now() - startTime;
8304
- logger.info(`[${this.getProviderName()}] \u2713 Successfully adapted UI block parameters`);
8305
- logger.info(`[${this.getProviderName()}] Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
8306
- logCollector?.info(`\u2713 UI block adapted successfully`);
8307
- logCollector?.info(`Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
8255
+ logger.info(`[${this.getProviderName()}] \u2713 Successfully adapted UI block parameters (${elapsedTime2}ms)`);
8308
8256
  const textResponseToUse = adaptResult.adaptedTextResponse || cachedTextResponse;
8309
8257
  if (streamCallback && textResponseToUse) {
8310
8258
  logger.info(`[${this.getProviderName()}] Streaming ${adaptResult.adaptedTextResponse ? "adapted" : "cached"} text response to frontend`);
@@ -8325,65 +8273,57 @@ ${executedToolsText}`);
8325
8273
  errors: []
8326
8274
  };
8327
8275
  } else {
8328
- logger.info(`[${this.getProviderName()}] Could not adapt matched conversation, continuing to category classification`);
8329
- logCollector?.warn(`Could not adapt matched conversation: ${adaptResult.explanation}`);
8276
+ logger.info(`[${this.getProviderName()}] Could not adapt matched conversation: ${adaptResult.explanation}, continuing to category classification`);
8330
8277
  }
8331
8278
  }
8332
8279
  } else {
8333
8280
  logger.info(`[${this.getProviderName()}] No matching previous conversations found, proceeding to category classification`);
8334
- logCollector?.info("No similar previous conversations found. Proceeding to category classification...");
8335
8281
  }
8336
8282
  logger.info(`[${this.getProviderName()}] Step 2: Classifying question category...`);
8337
- logCollector?.info("Step 2: Classifying question category...");
8338
8283
  const categoryClassification = await this.classifyQuestionCategory(
8339
8284
  userPrompt,
8340
8285
  apiKey,
8341
- logCollector,
8342
8286
  conversationHistory,
8343
8287
  externalTools
8344
8288
  );
8345
8289
  logger.info(
8346
8290
  `[${this.getProviderName()}] Question classified as: ${categoryClassification.category} (confidence: ${categoryClassification.confidence}%)`
8347
8291
  );
8348
- logCollector?.info(
8349
- `Category: ${categoryClassification.category} | Confidence: ${categoryClassification.confidence}%`
8350
- );
8351
8292
  let toolsToUse = [];
8352
8293
  if (categoryClassification.externalTools && categoryClassification.externalTools.length > 0) {
8353
- logger.info(`[${this.getProviderName()}] Identified ${categoryClassification.externalTools.length} external tools needed`);
8354
- logCollector?.info(`Identified external tools: ${categoryClassification.externalTools.map((t) => t.name || t.type).join(", ")}`);
8355
- logger.info(`[${this.getProviderName()}] Raw external tools from classification: ${JSON.stringify(categoryClassification.externalTools, null, 2)}`);
8356
- toolsToUse = categoryClassification.externalTools?.map((t) => {
8294
+ logger.info(`[${this.getProviderName()}] Identified ${categoryClassification.externalTools.length} external tools needed: ${categoryClassification.externalTools.map((t) => t.name || t.type).join(", ")}`);
8295
+ logger.debug(`[${this.getProviderName()}] Raw external tools from classification: ${JSON.stringify(categoryClassification.externalTools, null, 2)}`);
8296
+ toolsToUse = categoryClassification.externalTools.reduce((acc, t) => {
8357
8297
  const realTool = externalTools?.find((tool) => tool.id === t.type);
8358
- logger.info(`[${this.getProviderName()}] Tool ${t.name}: executionType=${t.executionType}, userProvidedData=${t.userProvidedData ? "present" : "null"}`);
8359
- return {
8298
+ if (!realTool) {
8299
+ logger.warn(`[${this.getProviderName()}] Tool ${t.type} (${t.name}) not found in registered tools - skipping (likely hallucinated)`);
8300
+ return acc;
8301
+ }
8302
+ acc.push({
8360
8303
  id: t.type,
8361
8304
  name: t.name,
8362
8305
  description: t.description,
8363
8306
  params: t.parameters || {},
8364
- // NEW: Include execution type info from category classification
8307
+ // Include execution type info from category classification
8365
8308
  executionType: t.executionType || "immediate",
8366
8309
  executionReason: t.executionReason || "",
8367
8310
  requiredFields: t.requiredFields || [],
8368
8311
  userProvidedData: t.userProvidedData || null,
8369
- // CRITICAL: Include outputSchema from real tool for component config generation
8370
- outputSchema: realTool?.outputSchema,
8371
- fn: (() => {
8372
- if (realTool) {
8373
- logger.info(`[${this.getProviderName()}] Using real tool implementation for ${t.type}`);
8374
- return realTool.fn;
8375
- } else {
8376
- logger.warn(`[${this.getProviderName()}] Tool ${t.type} not found in registered tools`);
8377
- return async () => ({ success: false, message: `Tool ${t.name || t.type} not registered` });
8378
- }
8379
- })()
8380
- };
8381
- }) || [];
8312
+ // Include outputSchema from real tool for component config generation
8313
+ outputSchema: realTool.outputSchema,
8314
+ fn: realTool.fn
8315
+ });
8316
+ return acc;
8317
+ }, []);
8318
+ const validCount = toolsToUse.length;
8319
+ const hallucinatedCount = categoryClassification.externalTools.length - validCount;
8320
+ if (hallucinatedCount > 0) {
8321
+ logger.warn(`[${this.getProviderName()}] Filtered out ${hallucinatedCount} hallucinated/non-existent tools, ${validCount} valid tools remaining`);
8322
+ }
8382
8323
  }
8383
8324
  const textResponse = await this.generateTextResponse(
8384
8325
  userPrompt,
8385
8326
  apiKey,
8386
- logCollector,
8387
8327
  conversationHistory,
8388
8328
  streamCallback,
8389
8329
  collections,
@@ -8394,13 +8334,10 @@ ${executedToolsText}`);
8394
8334
  );
8395
8335
  const elapsedTime = Date.now() - startTime;
8396
8336
  logger.info(`[${this.getProviderName()}] Total time taken: ${elapsedTime}ms (${(elapsedTime / 1e3).toFixed(2)}s)`);
8397
- logCollector?.info(`Total time taken: ${elapsedTime}ms (${(elapsedTime / 1e3).toFixed(2)}s)`);
8398
8337
  return textResponse;
8399
8338
  } catch (error) {
8400
8339
  const errorMsg = error instanceof Error ? error.message : String(error);
8401
8340
  logger.error(`[${this.getProviderName()}] Error in handleUserRequest: ${errorMsg}`);
8402
- logger.debug(`[${this.getProviderName()}] Error details:`, error);
8403
- logCollector?.error(`Error processing request: ${errorMsg}`);
8404
8341
  userPromptErrorLogger.logError(
8405
8342
  "handleUserRequest",
8406
8343
  error instanceof Error ? error : new Error(errorMsg),
@@ -8408,7 +8345,6 @@ ${executedToolsText}`);
8408
8345
  );
8409
8346
  const elapsedTime = Date.now() - startTime;
8410
8347
  logger.info(`[${this.getProviderName()}] Total time taken: ${elapsedTime}ms (${(elapsedTime / 1e3).toFixed(2)}s)`);
8411
- logCollector?.info(`Total time taken: ${elapsedTime}ms (${(elapsedTime / 1e3).toFixed(2)}s)`);
8412
8348
  return {
8413
8349
  success: false,
8414
8350
  errors: [errorMsg],
@@ -8424,7 +8360,7 @@ ${executedToolsText}`);
8424
8360
  * This helps provide intelligent suggestions for follow-up queries
8425
8361
  * For general/conversational questions without components, pass textResponse instead
8426
8362
  */
8427
- async generateNextQuestions(originalUserPrompt, component, componentData, apiKey, logCollector, conversationHistory, textResponse) {
8363
+ async generateNextQuestions(originalUserPrompt, component, componentData, apiKey, conversationHistory, textResponse) {
8428
8364
  const methodStartTime = Date.now();
8429
8365
  const methodName = "generateNextQuestions";
8430
8366
  const promptPreview = originalUserPrompt.substring(0, 50) + (originalUserPrompt.length > 50 ? "..." : "");
@@ -8469,14 +8405,6 @@ ${executedToolsText}`);
8469
8405
  // Parse as JSON
8470
8406
  );
8471
8407
  const nextQuestions = result.nextQuestions || [];
8472
- logCollector?.logExplanation(
8473
- "Next questions generated",
8474
- "Generated intelligent follow-up questions based on component",
8475
- {
8476
- count: nextQuestions.length,
8477
- questions: nextQuestions
8478
- }
8479
- );
8480
8408
  const methodDuration = Date.now() - methodStartTime;
8481
8409
  logger.info(`[${this.getProviderName()}] [TIMING] DONE ${methodName} in ${methodDuration}ms | questions: ${nextQuestions.length}`);
8482
8410
  return nextQuestions;
@@ -8484,8 +8412,6 @@ ${executedToolsText}`);
8484
8412
  const methodDuration = Date.now() - methodStartTime;
8485
8413
  const errorMsg = error instanceof Error ? error.message : String(error);
8486
8414
  logger.error(`[${this.getProviderName()}] [TIMING] FAILED ${methodName} in ${methodDuration}ms | error: ${errorMsg}`);
8487
- logger.debug(`[${this.getProviderName()}] Next questions generation error details:`, error);
8488
- logCollector?.error(`Error generating next questions: ${errorMsg}`);
8489
8415
  return [];
8490
8416
  }
8491
8417
  }
@@ -8542,10 +8468,10 @@ var GeminiLLM = class extends BaseLLM {
8542
8468
  super(config);
8543
8469
  }
8544
8470
  getDefaultModel() {
8545
- return "gemini/gemini-3-pro-preview";
8471
+ return "gemini/gemini-2.5-flash";
8546
8472
  }
8547
8473
  getDefaultFastModel() {
8548
- return "gemini/gemini-3-flash-preview";
8474
+ return "gemini/gemini-2.5-flash";
8549
8475
  }
8550
8476
  getDefaultApiKey() {
8551
8477
  return process.env.GEMINI_API_KEY;
@@ -8599,332 +8525,96 @@ function getLLMProviders() {
8599
8525
  return DEFAULT_PROVIDERS;
8600
8526
  }
8601
8527
  }
8602
- var useAnthropicMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8603
- logger.debug("[useAnthropicMethod] Initializing Anthropic Claude matching method");
8604
- logger.debug(`[useAnthropicMethod] Response mode: ${responseMode}`);
8605
- const msg = `Using Anthropic Claude ${responseMode === "text" ? "text response" : "matching"} method...`;
8606
- logCollector?.info(msg);
8528
+ var useAnthropicMethod = async (prompt, components, apiKey, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8607
8529
  if (responseMode === "component" && components.length === 0) {
8608
8530
  const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
8609
8531
  logger.error("[useAnthropicMethod] No components available");
8610
- logCollector?.error(emptyMsg);
8611
8532
  return { success: false, errors: [emptyMsg] };
8612
8533
  }
8613
- logger.debug(`[useAnthropicMethod] Processing with ${components.length} components`);
8614
- const matchResult = await anthropicLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8534
+ const matchResult = await anthropicLLM.handleUserRequest(prompt, components, apiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8615
8535
  logger.info(`[useAnthropicMethod] Successfully generated ${responseMode} using Anthropic`);
8616
8536
  return matchResult;
8617
8537
  };
8618
- var useGroqMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8538
+ var useGroqMethod = async (prompt, components, apiKey, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8619
8539
  logger.debug("[useGroqMethod] Initializing Groq LLM matching method");
8620
8540
  logger.debug(`[useGroqMethod] Response mode: ${responseMode}`);
8621
- const msg = `Using Groq LLM ${responseMode === "text" ? "text response" : "matching"} method...`;
8622
- logger.info(msg);
8623
- logCollector?.info(msg);
8624
8541
  if (responseMode === "component" && components.length === 0) {
8625
8542
  const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
8626
8543
  logger.error("[useGroqMethod] No components available");
8627
- logCollector?.error(emptyMsg);
8628
8544
  return { success: false, errors: [emptyMsg] };
8629
8545
  }
8630
8546
  logger.debug(`[useGroqMethod] Processing with ${components.length} components`);
8631
- const matchResult = await groqLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8547
+ const matchResult = await groqLLM.handleUserRequest(prompt, components, apiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8632
8548
  logger.info(`[useGroqMethod] Successfully generated ${responseMode} using Groq`);
8633
8549
  return matchResult;
8634
8550
  };
8635
- var useGeminiMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8551
+ var useGeminiMethod = async (prompt, components, apiKey, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8636
8552
  logger.debug("[useGeminiMethod] Initializing Gemini LLM matching method");
8637
8553
  logger.debug(`[useGeminiMethod] Response mode: ${responseMode}`);
8638
- const msg = `Using Gemini LLM ${responseMode === "text" ? "text response" : "matching"} method...`;
8639
- logger.info(msg);
8640
- logCollector?.info(msg);
8641
8554
  if (responseMode === "component" && components.length === 0) {
8642
8555
  const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
8643
8556
  logger.error("[useGeminiMethod] No components available");
8644
- logCollector?.error(emptyMsg);
8645
8557
  return { success: false, errors: [emptyMsg] };
8646
8558
  }
8647
8559
  logger.debug(`[useGeminiMethod] Processing with ${components.length} components`);
8648
- const matchResult = await geminiLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8560
+ const matchResult = await geminiLLM.handleUserRequest(prompt, components, apiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8649
8561
  logger.info(`[useGeminiMethod] Successfully generated ${responseMode} using Gemini`);
8650
8562
  return matchResult;
8651
8563
  };
8652
- var useOpenAIMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8564
+ var useOpenAIMethod = async (prompt, components, apiKey, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8653
8565
  logger.debug("[useOpenAIMethod] Initializing OpenAI GPT matching method");
8654
8566
  logger.debug(`[useOpenAIMethod] Response mode: ${responseMode}`);
8655
- const msg = `Using OpenAI GPT ${responseMode === "text" ? "text response" : "matching"} method...`;
8656
- logger.info(msg);
8657
- logCollector?.info(msg);
8658
8567
  if (responseMode === "component" && components.length === 0) {
8659
8568
  const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
8660
8569
  logger.error("[useOpenAIMethod] No components available");
8661
- logCollector?.error(emptyMsg);
8662
8570
  return { success: false, errors: [emptyMsg] };
8663
8571
  }
8664
8572
  logger.debug(`[useOpenAIMethod] Processing with ${components.length} components`);
8665
- const matchResult = await openaiLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8573
+ const matchResult = await openaiLLM.handleUserRequest(prompt, components, apiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8666
8574
  logger.info(`[useOpenAIMethod] Successfully generated ${responseMode} using OpenAI`);
8667
8575
  return matchResult;
8668
8576
  };
8669
- var getUserResponseFromCache = async (prompt) => {
8670
- return false;
8671
- };
8672
- var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8673
- logger.debug(`[get_user_response] Starting user response generation for prompt: "${prompt.substring(0, 50)}..."`);
8674
- logger.debug(`[get_user_response] Response mode: ${responseMode}`);
8675
- logger.debug("[get_user_response] Checking cache for existing response");
8676
- const userResponse = await getUserResponseFromCache(prompt);
8677
- if (userResponse) {
8678
- logger.info("[get_user_response] User response found in cache - returning cached result");
8679
- logCollector?.info("User response found in cache");
8680
- return {
8681
- success: true,
8682
- data: userResponse,
8683
- errors: []
8684
- };
8685
- }
8686
- logger.debug("[get_user_response] No cached response found, proceeding with LLM providers");
8577
+ var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8687
8578
  const providers = llmProviders || getLLMProviders();
8688
8579
  const errors = [];
8689
- const providerOrder = providers.join(", ");
8690
- logCollector?.info(`LLM Provider order: [${providerOrder}]`);
8691
- if (conversationHistory && conversationHistory.length > 0) {
8692
- const exchangeCount = conversationHistory.split("\n").filter((l) => l.startsWith("Q")).length;
8693
- logger.debug(`[get_user_response] Using conversation history with ${exchangeCount} previous exchanges`);
8694
- logCollector?.info(`Using conversation history with ${exchangeCount} previous exchanges`);
8695
- } else {
8696
- logger.debug("[get_user_response] No conversation history available");
8697
- }
8580
+ logger.info(`[get_user_response] LLM Provider order: [${providers.join(", ")}]`);
8698
8581
  for (let i = 0; i < providers.length; i++) {
8699
8582
  const provider = providers[i];
8700
8583
  const isLastProvider = i === providers.length - 1;
8701
- const attemptMsg = `Attempting provider: ${provider} (${i + 1}/${providers.length})`;
8702
- logCollector?.info(attemptMsg);
8584
+ logger.info(`[get_user_response] Attempting provider: ${provider} (${i + 1}/${providers.length})`);
8703
8585
  let result;
8704
8586
  if (provider === "anthropic") {
8705
- result = await useAnthropicMethod(prompt, components, anthropicApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8587
+ result = await useAnthropicMethod(prompt, components, anthropicApiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8706
8588
  } else if (provider === "groq") {
8707
- result = await useGroqMethod(prompt, components, groqApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8589
+ result = await useGroqMethod(prompt, components, groqApiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8708
8590
  } else if (provider === "gemini") {
8709
- result = await useGeminiMethod(prompt, components, geminiApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8591
+ result = await useGeminiMethod(prompt, components, geminiApiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8710
8592
  } else if (provider === "openai") {
8711
- result = await useOpenAIMethod(prompt, components, openaiApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8593
+ result = await useOpenAIMethod(prompt, components, openaiApiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8712
8594
  } else {
8713
8595
  logger.warn(`[get_user_response] Unknown provider: ${provider} - skipping`);
8714
8596
  errors.push(`Unknown provider: ${provider}`);
8715
8597
  continue;
8716
8598
  }
8717
8599
  if (result.success) {
8718
- const successMsg = `Success with provider: ${provider}`;
8719
- logger.info(`${successMsg}`);
8720
- logCollector?.info(successMsg);
8600
+ logger.info(`[get_user_response] Success with provider: ${provider}`);
8721
8601
  return result;
8722
8602
  } else {
8723
8603
  const providerErrors = result.errors.map((err) => `${provider}: ${err}`);
8724
8604
  errors.push(...providerErrors);
8725
- const warnMsg = `Provider ${provider} returned unsuccessful result: ${result.errors.join(", ")}`;
8726
- logger.warn(`[get_user_response] ${warnMsg}`);
8727
- logCollector?.warn(warnMsg);
8605
+ logger.warn(`[get_user_response] Provider ${provider} returned unsuccessful result: ${result.errors.join(", ")}`);
8728
8606
  if (!isLastProvider) {
8729
- const fallbackMsg = "Falling back to next provider...";
8730
- logger.info(`[get_user_response] ${fallbackMsg}`);
8731
- logCollector?.info(fallbackMsg);
8607
+ logger.info("[get_user_response] Falling back to next provider...");
8732
8608
  }
8733
8609
  }
8734
8610
  }
8735
- const failureMsg = `All LLM providers failed`;
8736
- logger.error(`[get_user_response] ${failureMsg}. Errors: ${errors.join("; ")}`);
8737
- logCollector?.error(`${failureMsg}. Errors: ${errors.join("; ")}`);
8611
+ logger.error(`[get_user_response] All LLM providers failed. Errors: ${errors.join("; ")}`);
8738
8612
  return {
8739
8613
  success: false,
8740
8614
  errors
8741
8615
  };
8742
8616
  };
8743
8617
 
8744
- // src/utils/log-collector.ts
8745
- var LOG_LEVEL_PRIORITY2 = {
8746
- errors: 0,
8747
- warnings: 1,
8748
- info: 2,
8749
- verbose: 3
8750
- };
8751
- var MESSAGE_LEVEL_PRIORITY2 = {
8752
- error: 0,
8753
- warn: 1,
8754
- info: 2,
8755
- debug: 3
8756
- };
8757
- var UILogCollector = class {
8758
- constructor(clientId, sendMessage, uiBlockId) {
8759
- this.logs = [];
8760
- this.uiBlockId = uiBlockId || null;
8761
- this.clientId = clientId;
8762
- this.sendMessage = sendMessage;
8763
- this.currentLogLevel = logger.getLogLevel();
8764
- }
8765
- /**
8766
- * Check if logging is enabled (uiBlockId is provided)
8767
- */
8768
- isEnabled() {
8769
- return this.uiBlockId !== null;
8770
- }
8771
- /**
8772
- * Check if a message should be logged based on current log level
8773
- */
8774
- shouldLog(messageLevel) {
8775
- const currentLevelPriority = LOG_LEVEL_PRIORITY2[this.currentLogLevel];
8776
- const messagePriority = MESSAGE_LEVEL_PRIORITY2[messageLevel];
8777
- return messagePriority <= currentLevelPriority;
8778
- }
8779
- /**
8780
- * Add a log entry with timestamp and immediately send to runtime
8781
- * Only logs that pass the log level filter are captured and sent
8782
- */
8783
- addLog(level, message, type, data) {
8784
- if (!this.shouldLog(level)) {
8785
- return;
8786
- }
8787
- const log = {
8788
- timestamp: Date.now(),
8789
- level,
8790
- message,
8791
- ...type && { type },
8792
- ...data && { data }
8793
- };
8794
- this.logs.push(log);
8795
- this.sendLogImmediately(log);
8796
- switch (level) {
8797
- case "error":
8798
- logger.error("UILogCollector:", log);
8799
- break;
8800
- case "warn":
8801
- logger.warn("UILogCollector:", log);
8802
- break;
8803
- case "info":
8804
- logger.info("UILogCollector:", log);
8805
- break;
8806
- case "debug":
8807
- logger.debug("UILogCollector:", log);
8808
- break;
8809
- }
8810
- }
8811
- /**
8812
- * Send a single log to runtime immediately
8813
- */
8814
- sendLogImmediately(log) {
8815
- if (!this.isEnabled()) {
8816
- return;
8817
- }
8818
- const response = {
8819
- id: this.uiBlockId,
8820
- type: "UI_LOGS",
8821
- from: { type: "data-agent" },
8822
- to: {
8823
- type: "runtime",
8824
- id: this.clientId
8825
- },
8826
- payload: {
8827
- logs: [log]
8828
- // Send single log in array
8829
- }
8830
- };
8831
- this.sendMessage(response);
8832
- }
8833
- /**
8834
- * Log info message
8835
- */
8836
- info(message, type, data) {
8837
- if (this.isEnabled()) {
8838
- this.addLog("info", message, type, data);
8839
- }
8840
- }
8841
- /**
8842
- * Log error message
8843
- */
8844
- error(message, type, data) {
8845
- if (this.isEnabled()) {
8846
- this.addLog("error", message, type, data);
8847
- }
8848
- }
8849
- /**
8850
- * Log warning message
8851
- */
8852
- warn(message, type, data) {
8853
- if (this.isEnabled()) {
8854
- this.addLog("warn", message, type, data);
8855
- }
8856
- }
8857
- /**
8858
- * Log debug message
8859
- */
8860
- debug(message, type, data) {
8861
- if (this.isEnabled()) {
8862
- this.addLog("debug", message, type, data);
8863
- }
8864
- }
8865
- /**
8866
- * Log LLM explanation with typed metadata
8867
- */
8868
- logExplanation(message, explanation, data) {
8869
- if (this.isEnabled()) {
8870
- this.addLog("info", message, "explanation", {
8871
- explanation,
8872
- ...data
8873
- });
8874
- }
8875
- }
8876
- /**
8877
- * Log generated query with typed metadata
8878
- */
8879
- logQuery(message, query, data) {
8880
- if (this.isEnabled()) {
8881
- this.addLog("info", message, "query", {
8882
- query,
8883
- ...data
8884
- });
8885
- }
8886
- }
8887
- /**
8888
- * Send all collected logs at once (optional, for final summary)
8889
- */
8890
- sendAllLogs() {
8891
- if (!this.isEnabled() || this.logs.length === 0) {
8892
- return;
8893
- }
8894
- const response = {
8895
- id: this.uiBlockId,
8896
- type: "UI_LOGS",
8897
- from: { type: "data-agent" },
8898
- to: {
8899
- type: "runtime",
8900
- id: this.clientId
8901
- },
8902
- payload: {
8903
- logs: this.logs
8904
- }
8905
- };
8906
- this.sendMessage(response);
8907
- }
8908
- /**
8909
- * Get all collected logs
8910
- */
8911
- getLogs() {
8912
- return [...this.logs];
8913
- }
8914
- /**
8915
- * Clear all logs
8916
- */
8917
- clearLogs() {
8918
- this.logs = [];
8919
- }
8920
- /**
8921
- * Set uiBlockId (in case it's provided later)
8922
- */
8923
- setUIBlockId(uiBlockId) {
8924
- this.uiBlockId = uiBlockId;
8925
- }
8926
- };
8927
-
8928
8618
  // src/utils/conversation-saver.ts
8929
8619
  function transformUIBlockForDB(uiblock, userPrompt, uiBlockId) {
8930
8620
  const component = uiblock?.generatedComponentMetadata && Object.keys(uiblock.generatedComponentMetadata).length > 0 ? uiblock.generatedComponentMetadata : null;
@@ -9055,7 +8745,6 @@ var CONTEXT_CONFIG = {
9055
8745
  // src/handlers/user-prompt-request.ts
9056
8746
  var get_user_request = async (data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools) => {
9057
8747
  const errors = [];
9058
- logger.debug("[USER_PROMPT_REQ] Parsing incoming message data");
9059
8748
  const parseResult = UserPromptRequestMessageSchema.safeParse(data);
9060
8749
  if (!parseResult.success) {
9061
8750
  const zodError = parseResult.error;
@@ -9087,27 +8776,23 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
9087
8776
  if (!prompt) {
9088
8777
  errors.push("Prompt not found");
9089
8778
  }
9090
- logger.debug(`[REQUEST ${id}] Full request details - uiBlockId: ${existingUiBlockId}, threadId: ${threadId}, prompt: ${prompt}`);
9091
8779
  if (errors.length > 0) {
9092
8780
  return { success: false, errors, id, wsId };
9093
8781
  }
9094
- const logCollector = new UILogCollector(wsId, sendMessage, existingUiBlockId);
9095
8782
  const threadManager = ThreadManager.getInstance();
9096
8783
  let thread = threadManager.getThread(threadId);
9097
8784
  if (!thread) {
9098
8785
  thread = threadManager.createThread(threadId);
9099
8786
  logger.info(`Created new thread: ${threadId}`);
9100
8787
  }
9101
- logCollector.info(`Starting user prompt request with ${components.length} components`);
8788
+ logger.info(`Starting user prompt request with ${components.length} components`);
9102
8789
  const conversationHistory = thread.getConversationContext(CONTEXT_CONFIG.MAX_CONVERSATION_CONTEXT_BLOCKS, existingUiBlockId);
9103
8790
  const responseMode = payload.responseMode || "component";
9104
- logger.info("responseMode", responseMode);
9105
8791
  let streamCallback;
9106
8792
  let accumulatedStreamResponse = "";
9107
8793
  if (responseMode === "text") {
9108
8794
  streamCallback = (chunk) => {
9109
8795
  accumulatedStreamResponse += chunk;
9110
- logger.debug(`[STREAM] Sending chunk (${chunk.length} chars): "${chunk.substring(0, 20)}..."`);
9111
8796
  const streamMessage = {
9112
8797
  id: `stream_${existingUiBlockId}`,
9113
8798
  // Different ID pattern for streaming
@@ -9123,7 +8808,6 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
9123
8808
  }
9124
8809
  };
9125
8810
  sendMessage(streamMessage);
9126
- logger.debug(`[STREAM] Chunk sent to wsId: ${wsId}`);
9127
8811
  };
9128
8812
  }
9129
8813
  const userResponse = await get_user_response(
@@ -9134,7 +8818,6 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
9134
8818
  geminiApiKey,
9135
8819
  openaiApiKey,
9136
8820
  llmProviders,
9137
- logCollector,
9138
8821
  conversationHistory,
9139
8822
  responseMode,
9140
8823
  streamCallback,
@@ -9142,7 +8825,7 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
9142
8825
  externalTools,
9143
8826
  userId
9144
8827
  );
9145
- logCollector.info("User prompt request completed");
8828
+ logger.info("User prompt request completed");
9146
8829
  const uiBlockId = existingUiBlockId;
9147
8830
  if (!userResponse.success) {
9148
8831
  logger.error(`User prompt request failed with errors: ${userResponse.errors.join(", ")}`);
@@ -9209,9 +8892,6 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
9209
8892
  logger.info(
9210
8893
  `Skipping conversation save - response from exact semantic match (${(semanticSimilarity * 100).toFixed(2)}% similarity)`
9211
8894
  );
9212
- logCollector.info(
9213
- `Using exact cached result (${(semanticSimilarity * 100).toFixed(2)}% match) - not saving duplicate conversation`
9214
- );
9215
8895
  } else {
9216
8896
  const uiBlockData = uiBlock.toJSON();
9217
8897
  const saveResult = await saveConversation({
@@ -9419,7 +9099,7 @@ function sendResponse(id, res, sendMessage, clientId) {
9419
9099
  }
9420
9100
 
9421
9101
  // src/userResponse/next-questions.ts
9422
- async function generateNextQuestions(originalUserPrompt, component, componentData, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, logCollector, conversationHistory) {
9102
+ async function generateNextQuestions(originalUserPrompt, component, componentData, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, conversationHistory) {
9423
9103
  try {
9424
9104
  logger.debug("[generateNextQuestions] Starting next questions generation");
9425
9105
  logger.debug(`[generateNextQuestions] User prompt: "${originalUserPrompt?.substring(0, 50)}..."`);
@@ -9438,7 +9118,6 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
9438
9118
  const isLastProvider = i === providers.length - 1;
9439
9119
  try {
9440
9120
  logger.info(`[generateNextQuestions] Attempting provider: ${provider} (${i + 1}/${providers.length})`);
9441
- logCollector?.info(`Generating questions with ${provider}...`);
9442
9121
  let result = [];
9443
9122
  if (provider === "groq") {
9444
9123
  logger.debug("[generateNextQuestions] Using Groq LLM for next questions");
@@ -9447,7 +9126,6 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
9447
9126
  component,
9448
9127
  componentData,
9449
9128
  groqApiKey,
9450
- logCollector,
9451
9129
  conversationHistory
9452
9130
  );
9453
9131
  } else if (provider === "gemini") {
@@ -9457,7 +9135,6 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
9457
9135
  component,
9458
9136
  componentData,
9459
9137
  geminiApiKey,
9460
- logCollector,
9461
9138
  conversationHistory
9462
9139
  );
9463
9140
  } else if (provider === "openai") {
@@ -9467,7 +9144,6 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
9467
9144
  component,
9468
9145
  componentData,
9469
9146
  openaiApiKey,
9470
- logCollector,
9471
9147
  conversationHistory
9472
9148
  );
9473
9149
  } else {
@@ -9477,44 +9153,32 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
9477
9153
  component,
9478
9154
  componentData,
9479
9155
  anthropicApiKey,
9480
- logCollector,
9481
9156
  conversationHistory
9482
9157
  );
9483
9158
  }
9484
9159
  if (result && result.length > 0) {
9485
9160
  logger.info(`[generateNextQuestions] Successfully generated ${result.length} questions with ${provider}`);
9486
9161
  logger.debug(`[generateNextQuestions] Questions: ${JSON.stringify(result)}`);
9487
- logCollector?.info(`Generated ${result.length} follow-up questions`);
9488
9162
  return result;
9489
9163
  }
9490
- const warnMsg = `No questions generated from ${provider}${!isLastProvider ? ", trying next provider..." : ""}`;
9491
- logger.warn(`[generateNextQuestions] ${warnMsg}`);
9492
- if (!isLastProvider) {
9493
- logCollector?.warn(warnMsg);
9494
- }
9164
+ logger.warn(`[generateNextQuestions] No questions generated from ${provider}${!isLastProvider ? ", trying next provider..." : ""}`);
9495
9165
  } catch (providerError) {
9496
9166
  const errorMsg = providerError instanceof Error ? providerError.message : String(providerError);
9497
9167
  logger.error(`[generateNextQuestions] Provider ${provider} failed: ${errorMsg}`);
9498
9168
  logger.debug(`[generateNextQuestions] Provider error details:`, providerError);
9499
9169
  if (!isLastProvider) {
9500
- const fallbackMsg = `Provider ${provider} failed, trying next provider...`;
9501
- logger.info(`[generateNextQuestions] ${fallbackMsg}`);
9502
- logCollector?.warn(fallbackMsg);
9503
- } else {
9504
- logCollector?.error(`Failed to generate questions with ${provider}`);
9170
+ logger.info(`[generateNextQuestions] Provider ${provider} failed, trying next provider...`);
9505
9171
  }
9506
9172
  continue;
9507
9173
  }
9508
9174
  }
9509
9175
  logger.warn("[generateNextQuestions] All providers failed or returned no questions");
9510
- logCollector?.warn("Unable to generate follow-up questions");
9511
9176
  return [];
9512
9177
  } catch (error) {
9513
9178
  const errorMsg = error instanceof Error ? error.message : String(error);
9514
9179
  const errorStack = error instanceof Error ? error.stack : void 0;
9515
9180
  logger.error(`[generateNextQuestions] Error generating next questions: ${errorMsg}`);
9516
9181
  logger.debug("[generateNextQuestions] Error stack trace:", errorStack);
9517
- logCollector?.error(`Error generating next questions: ${errorMsg}`);
9518
9182
  return [];
9519
9183
  }
9520
9184
  }
@@ -9562,9 +9226,6 @@ async function handleActionsRequest(data, sendMessage, anthropicApiKey, groqApiK
9562
9226
  return;
9563
9227
  }
9564
9228
  logger.info(`[ACTIONS_REQ ${id}] UIBlock retrieved successfully`);
9565
- logger.debug(`[ACTIONS_REQ ${id}] Creating UILogCollector for uiBlockId: ${uiBlockId}`);
9566
- const logCollector = new UILogCollector(wsId, sendMessage, uiBlockId);
9567
- logger.info(`[ACTIONS_REQ ${id}] UILogCollector initialized`);
9568
9229
  logger.debug(`[ACTIONS_REQ ${id}] Extracting data from UIBlock`);
9569
9230
  const userQuestion = uiBlock.getUserQuestion();
9570
9231
  const component = uiBlock.getComponentMetadata();
@@ -9578,13 +9239,11 @@ async function handleActionsRequest(data, sendMessage, anthropicApiKey, groqApiK
9578
9239
  logger.info(`[ACTIONS_REQ ${id}] Conversation history extracted: ${historyLineCount} lines`);
9579
9240
  logger.debug(`[ACTIONS_REQ ${id}] Conversation history preview:
9580
9241
  ${conversationHistory.substring(0, 200)}...`);
9581
- logCollector.info(`Generating actions for UIBlock: ${uiBlockId}`);
9582
- logger.info(`[ACTIONS_REQ ${id}] Generating actions for component: ${component?.name || "unknown"}`);
9242
+ logger.info(`[ACTIONS_REQ ${id}] Generating actions for UIBlock: ${uiBlockId}, component: ${component?.name || "unknown"}`);
9583
9243
  logger.debug(`[ACTIONS_REQ ${id}] Checking if actions are already cached`);
9584
9244
  const startTime = Date.now();
9585
9245
  const actions = await uiBlock.getOrFetchActions(async () => {
9586
9246
  logger.info(`[ACTIONS_REQ ${id}] Actions not cached, generating new actions...`);
9587
- logCollector.info("Generating follow-up questions...");
9588
9247
  logger.info(`[ACTIONS_REQ ${id}] Starting next questions generation with ${llmProviders?.join(", ") || "default"} providers`);
9589
9248
  const nextQuestions = await generateNextQuestions(
9590
9249
  userQuestion,
@@ -9595,7 +9254,6 @@ ${conversationHistory.substring(0, 200)}...`);
9595
9254
  geminiApiKey,
9596
9255
  openaiApiKey,
9597
9256
  llmProviders,
9598
- logCollector,
9599
9257
  conversationHistory
9600
9258
  );
9601
9259
  logger.info(`[ACTIONS_REQ ${id}] Generated ${nextQuestions.length} questions`);
@@ -9613,11 +9271,10 @@ ${conversationHistory.substring(0, 200)}...`);
9613
9271
  const processingTime = Date.now() - startTime;
9614
9272
  logger.info(`[ACTIONS_REQ ${id}] Actions retrieved in ${processingTime}ms - ${actions.length} actions total`);
9615
9273
  if (actions.length > 0) {
9616
- logCollector.info(`Generated ${actions.length} follow-up questions successfully`);
9274
+ logger.info(`[ACTIONS_REQ ${id}] Generated ${actions.length} follow-up questions successfully`);
9617
9275
  logger.debug(`[ACTIONS_REQ ${id}] Actions: ${actions.map((a) => a.name).join(", ")}`);
9618
9276
  } else {
9619
9277
  logger.warn(`[ACTIONS_REQ ${id}] No actions generated`);
9620
- logCollector.warn("No follow-up questions could be generated");
9621
9278
  }
9622
9279
  logger.debug(`[ACTIONS_REQ ${id}] Sending successful response to client`);
9623
9280
  sendResponse2(id, {
@@ -9636,15 +9293,6 @@ ${conversationHistory.substring(0, 200)}...`);
9636
9293
  const errorStack = error instanceof Error ? error.stack : void 0;
9637
9294
  logger.error(`[ACTIONS_REQ] Failed to handle actions request: ${errorMessage}`);
9638
9295
  logger.debug(`[ACTIONS_REQ] Error stack trace:`, errorStack);
9639
- try {
9640
- const parsedData = data;
9641
- if (parsedData?.id && parsedData?.from?.id) {
9642
- const logCollector = parsedData?.payload?.SA_RUNTIME?.uiBlockId ? new UILogCollector(parsedData.from.id, sendMessage, parsedData.payload.SA_RUNTIME.uiBlockId) : void 0;
9643
- logCollector?.error(`Failed to generate actions: ${errorMessage}`);
9644
- }
9645
- } catch (logError) {
9646
- logger.debug("[ACTIONS_REQ] Failed to send error logs to UI:", logError);
9647
- }
9648
9296
  sendResponse2(null, {
9649
9297
  success: false,
9650
9298
  error: errorMessage
@@ -10225,7 +9873,6 @@ function sendResponse3(id, res, sendMessage, clientId) {
10225
9873
  var dashboardManager = null;
10226
9874
  function setDashboardManager(manager) {
10227
9875
  dashboardManager = manager;
10228
- logger.info("DashboardManager instance set");
10229
9876
  }
10230
9877
  function getDashboardManager() {
10231
9878
  if (!dashboardManager) {
@@ -13552,6 +13199,190 @@ var ReportManager = class {
13552
13199
  }
13553
13200
  };
13554
13201
 
13202
+ // src/utils/log-collector.ts
13203
+ var LOG_LEVEL_PRIORITY2 = {
13204
+ errors: 0,
13205
+ warnings: 1,
13206
+ info: 2,
13207
+ verbose: 3
13208
+ };
13209
+ var MESSAGE_LEVEL_PRIORITY2 = {
13210
+ error: 0,
13211
+ warn: 1,
13212
+ info: 2,
13213
+ debug: 3
13214
+ };
13215
+ var UILogCollector = class {
13216
+ constructor(clientId, sendMessage, uiBlockId) {
13217
+ this.logs = [];
13218
+ this.uiBlockId = uiBlockId || null;
13219
+ this.clientId = clientId;
13220
+ this.sendMessage = sendMessage;
13221
+ this.currentLogLevel = logger.getLogLevel();
13222
+ }
13223
+ /**
13224
+ * Check if logging is enabled (uiBlockId is provided)
13225
+ */
13226
+ isEnabled() {
13227
+ return this.uiBlockId !== null;
13228
+ }
13229
+ /**
13230
+ * Check if a message should be logged based on current log level
13231
+ */
13232
+ shouldLog(messageLevel) {
13233
+ const currentLevelPriority = LOG_LEVEL_PRIORITY2[this.currentLogLevel];
13234
+ const messagePriority = MESSAGE_LEVEL_PRIORITY2[messageLevel];
13235
+ return messagePriority <= currentLevelPriority;
13236
+ }
13237
+ /**
13238
+ * Add a log entry with timestamp and immediately send to runtime
13239
+ * Only logs that pass the log level filter are captured and sent
13240
+ */
13241
+ addLog(level, message, type, data) {
13242
+ if (!this.shouldLog(level)) {
13243
+ return;
13244
+ }
13245
+ const log = {
13246
+ timestamp: Date.now(),
13247
+ level,
13248
+ message,
13249
+ ...type && { type },
13250
+ ...data && { data }
13251
+ };
13252
+ this.logs.push(log);
13253
+ this.sendLogImmediately(log);
13254
+ switch (level) {
13255
+ case "error":
13256
+ logger.error("UILogCollector:", log);
13257
+ break;
13258
+ case "warn":
13259
+ logger.warn("UILogCollector:", log);
13260
+ break;
13261
+ case "info":
13262
+ logger.info("UILogCollector:", log);
13263
+ break;
13264
+ case "debug":
13265
+ logger.debug("UILogCollector:", log);
13266
+ break;
13267
+ }
13268
+ }
13269
+ /**
13270
+ * Send a single log to runtime immediately
13271
+ */
13272
+ sendLogImmediately(log) {
13273
+ if (!this.isEnabled()) {
13274
+ return;
13275
+ }
13276
+ const response = {
13277
+ id: this.uiBlockId,
13278
+ type: "UI_LOGS",
13279
+ from: { type: "data-agent" },
13280
+ to: {
13281
+ type: "runtime",
13282
+ id: this.clientId
13283
+ },
13284
+ payload: {
13285
+ logs: [log]
13286
+ // Send single log in array
13287
+ }
13288
+ };
13289
+ this.sendMessage(response);
13290
+ }
13291
+ /**
13292
+ * Log info message
13293
+ */
13294
+ info(message, type, data) {
13295
+ if (this.isEnabled()) {
13296
+ this.addLog("info", message, type, data);
13297
+ }
13298
+ }
13299
+ /**
13300
+ * Log error message
13301
+ */
13302
+ error(message, type, data) {
13303
+ if (this.isEnabled()) {
13304
+ this.addLog("error", message, type, data);
13305
+ }
13306
+ }
13307
+ /**
13308
+ * Log warning message
13309
+ */
13310
+ warn(message, type, data) {
13311
+ if (this.isEnabled()) {
13312
+ this.addLog("warn", message, type, data);
13313
+ }
13314
+ }
13315
+ /**
13316
+ * Log debug message
13317
+ */
13318
+ debug(message, type, data) {
13319
+ if (this.isEnabled()) {
13320
+ this.addLog("debug", message, type, data);
13321
+ }
13322
+ }
13323
+ /**
13324
+ * Log LLM explanation with typed metadata
13325
+ */
13326
+ logExplanation(message, explanation, data) {
13327
+ if (this.isEnabled()) {
13328
+ this.addLog("info", message, "explanation", {
13329
+ explanation,
13330
+ ...data
13331
+ });
13332
+ }
13333
+ }
13334
+ /**
13335
+ * Log generated query with typed metadata
13336
+ */
13337
+ logQuery(message, query, data) {
13338
+ if (this.isEnabled()) {
13339
+ this.addLog("info", message, "query", {
13340
+ query,
13341
+ ...data
13342
+ });
13343
+ }
13344
+ }
13345
+ /**
13346
+ * Send all collected logs at once (optional, for final summary)
13347
+ */
13348
+ sendAllLogs() {
13349
+ if (!this.isEnabled() || this.logs.length === 0) {
13350
+ return;
13351
+ }
13352
+ const response = {
13353
+ id: this.uiBlockId,
13354
+ type: "UI_LOGS",
13355
+ from: { type: "data-agent" },
13356
+ to: {
13357
+ type: "runtime",
13358
+ id: this.clientId
13359
+ },
13360
+ payload: {
13361
+ logs: this.logs
13362
+ }
13363
+ };
13364
+ this.sendMessage(response);
13365
+ }
13366
+ /**
13367
+ * Get all collected logs
13368
+ */
13369
+ getLogs() {
13370
+ return [...this.logs];
13371
+ }
13372
+ /**
13373
+ * Clear all logs
13374
+ */
13375
+ clearLogs() {
13376
+ this.logs = [];
13377
+ }
13378
+ /**
13379
+ * Set uiBlockId (in case it's provided later)
13380
+ */
13381
+ setUIBlockId(uiBlockId) {
13382
+ this.uiBlockId = uiBlockId;
13383
+ }
13384
+ };
13385
+
13555
13386
  // src/services/cleanup-service.ts
13556
13387
  var CleanupService = class _CleanupService {
13557
13388
  constructor() {
@@ -13732,7 +13563,6 @@ var CleanupService = class _CleanupService {
13732
13563
  };
13733
13564
 
13734
13565
  // src/index.ts
13735
- var SDK_VERSION = "0.0.8";
13736
13566
  var DEFAULT_WS_URL = "wss://ws.superatom.ai/websocket";
13737
13567
  var SuperatomSDK = class {
13738
13568
  // 3.5 minutes (PING_INTERVAL + 30s grace)
@@ -13773,7 +13603,7 @@ var SuperatomSDK = class {
13773
13603
  if (config.queryCacheTTL !== void 0) {
13774
13604
  queryCache.setTTL(config.queryCacheTTL);
13775
13605
  }
13776
- logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, database type: ${this.databaseType}, model strategy: ${this.modelStrategy}, conversation similarity threshold: ${this.conversationSimilarityThreshold}, query cache TTL: ${queryCache.getTTL()} minutes`);
13606
+ logger.info(`Initializing Superatom SDK for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, database type: ${this.databaseType}, model strategy: ${this.modelStrategy}, query cache TTL: ${queryCache.getTTL()} minutes`);
13777
13607
  this.userManager = new UserManager(this.projectId, 5e3);
13778
13608
  this.dashboardManager = new DashboardManager(this.projectId);
13779
13609
  this.reportManager = new ReportManager(this.projectId);
@@ -13827,7 +13657,6 @@ var SuperatomSDK = class {
13827
13657
  */
13828
13658
  initializeDashboardManager() {
13829
13659
  setDashboardManager(this.dashboardManager);
13830
- logger.info(`DashboardManager initialized for project: ${this.projectId}`);
13831
13660
  }
13832
13661
  /**
13833
13662
  * Get the DashboardManager instance for this SDK
@@ -13840,7 +13669,6 @@ var SuperatomSDK = class {
13840
13669
  */
13841
13670
  initializeReportManager() {
13842
13671
  setReportManager(this.reportManager);
13843
- logger.info(`ReportManager initialized for project: ${this.projectId}`);
13844
13672
  }
13845
13673
  /**
13846
13674
  * Get the ReportManager instance for this SDK
@@ -14217,7 +14045,6 @@ export {
14217
14045
  CONTEXT_CONFIG,
14218
14046
  CleanupService,
14219
14047
  LLM,
14220
- SDK_VERSION,
14221
14048
  STORAGE_CONFIG,
14222
14049
  SuperatomSDK,
14223
14050
  Thread,