@superatomai/sdk-node 0.0.72 → 0.0.75

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -34,7 +34,6 @@ __export(index_exports, {
34
34
  CONTEXT_CONFIG: () => CONTEXT_CONFIG,
35
35
  CleanupService: () => CleanupService,
36
36
  LLM: () => LLM,
37
- SDK_VERSION: () => SDK_VERSION,
38
37
  STORAGE_CONFIG: () => STORAGE_CONFIG,
39
38
  SuperatomSDK: () => SuperatomSDK,
40
39
  Thread: () => Thread,
@@ -3368,24 +3367,20 @@ If adaptation is not possible or would fundamentally change the component:
3368
3367
  "dash-comp-picker": {
3369
3368
  system: `You are a component selection expert that picks the best dashboard component and generates complete props based on user requests.
3370
3369
 
3370
+ ## CRITICAL - READ FIRST
3371
+
3372
+ 1. Your ENTIRE response must be ONLY a raw JSON object - start with { end with }
3373
+ 2. DO NOT explain or answer the user's question in natural language
3374
+ 3. DO NOT use markdown code blocks (no \`\`\`)
3375
+ 4. DO NOT add any text before or after the JSON
3376
+ 5. After executing tools (if needed), return JSON with component selection - NOT a text summary of results
3377
+
3371
3378
  ## Your Task
3372
3379
 
3373
3380
  Analyze the user's request and:
3374
3381
  1. **Select the most appropriate component** from the available components list
3375
- 2. **Determine the data source**: Database query OR External tool
3376
- 3. **Generate complete props** for the selected component
3377
-
3378
- ## Available External Tools
3379
-
3380
- The following external tools are available:
3381
-
3382
- {{AVAILABLE_TOOLS}}
3383
-
3384
- When a tool is needed to complete the user's request:
3385
- 1. **Analyze the request** to determine which tool is needed
3386
- 2. **Extract parameters** from the user's question
3387
- 3. **Execute the tool** by calling it with the extracted parameters
3388
- 4. **Use the results** to configure the component (field names for axes, columns, etc.)
3382
+ 2. **Determine the data source**: Database query OR External tool (ERP)
3383
+ 3. **Generate complete props** for the selected component including the data retrieval/modification method
3389
3384
 
3390
3385
  ## Component Selection Rules
3391
3386
 
@@ -3415,7 +3410,7 @@ The user prompt may contain an **existing component** to update. Detect this by
3415
3410
 
3416
3411
  ### Use DATABASE when:
3417
3412
  - User asks about data that exists in the database schema
3418
- - Questions about internal business data
3413
+ - Questions about internal business data
3419
3414
  - CRUD operations on database tables
3420
3415
 
3421
3416
  ### Use EXTERNAL TOOL when:
@@ -3428,6 +3423,12 @@ The user prompt may contain an **existing component** to update. Detect this by
3428
3423
 
3429
3424
  **CRITICAL**: Look at each component's "Props Structure" in the available components list. Generate ALL props that the component expects.
3430
3425
 
3426
+ **CRITICAL: Each component uses EXACTLY ONE data source - never both!**
3427
+ - If using \`query\`, set \`externalTool: null\`
3428
+ - If using \`externalTool\`, set \`query: null\`
3429
+ - NEVER copy placeholder/description text from component metadata as actual values
3430
+ - \`externalTool.parameters\` MUST be an object, never a string
3431
+
3431
3432
  ### For Data Viewing Components (charts, tables, KPIs):
3432
3433
 
3433
3434
  **Option A: Database Query** (when data is in database)
@@ -3436,21 +3437,19 @@ The user prompt may contain an **existing component** to update. Detect this by
3436
3437
  "query": {
3437
3438
  "sql": "SELECT column1, column2 FROM table WHERE condition = $param LIMIT 32",
3438
3439
  "params": { "param": "value" }
3439
- }
3440
+ },
3441
+ "externalTool": null
3440
3442
  }
3441
3443
  \`\`\`
3442
3444
 
3443
3445
  **Option B: External Tool** (when data is from ERP/external system)
3444
3446
  \`\`\`json
3445
3447
  {
3448
+ "query": null,
3446
3449
  "externalTool": {
3447
3450
  "toolId": "tool_id_from_list",
3448
3451
  "toolName": "Tool Display Name",
3449
- "action": "get",
3450
- "params": {
3451
- "param1": "value1",
3452
- "param2": "value2"
3453
- }
3452
+ "parameters": { "param1": "value1", "param2": "value2" }
3454
3453
  }
3455
3454
  }
3456
3455
  \`\`\`
@@ -3464,6 +3463,7 @@ The user prompt may contain an **existing component** to update. Detect this by
3464
3463
  "sql": "INSERT INTO table (col1, col2) VALUES ($col1, $col2)",
3465
3464
  "params": {}
3466
3465
  },
3466
+ "externalTool": null,
3467
3467
  "fields": [
3468
3468
  { "name": "col1", "type": "text", "required": true },
3469
3469
  { "name": "col2", "type": "number", "required": false }
@@ -3471,16 +3471,38 @@ The user prompt may contain an **existing component** to update. Detect this by
3471
3471
  }
3472
3472
  \`\`\`
3473
3473
 
3474
+ For UPDATE:
3475
+ \`\`\`json
3476
+ {
3477
+ "query": {
3478
+ "sql": "UPDATE table SET col1 = $col1, col2 = $col2 WHERE id = $id",
3479
+ "params": { "id": "record_id" }
3480
+ },
3481
+ "externalTool": null
3482
+ }
3483
+ \`\`\`
3484
+
3485
+ For DELETE:
3486
+ \`\`\`json
3487
+ {
3488
+ "query": {
3489
+ "sql": "DELETE FROM table WHERE id = $id",
3490
+ "params": { "id": "record_id" }
3491
+ },
3492
+ "externalTool": null,
3493
+ "submitButtonText": "Confirm Delete",
3494
+ "submitButtonColor": "danger"
3495
+ }
3496
+ \`\`\`
3497
+
3474
3498
  **Option B: External Tool Mutation**
3475
3499
  \`\`\`json
3476
3500
  {
3501
+ "query": null,
3477
3502
  "externalTool": {
3478
3503
  "toolId": "tool_id_from_list",
3479
3504
  "toolName": "Tool Display Name",
3480
- "action": "create|update|delete",
3481
- "params": {
3482
- "param1": "value_or_placeholder"
3483
- }
3505
+ "parameters": { "param1": "value_or_placeholder" }
3484
3506
  },
3485
3507
  "fields": [
3486
3508
  { "name": "param1", "type": "text", "required": true }
@@ -3495,6 +3517,7 @@ The user prompt may contain an **existing component** to update. Detect this by
3495
3517
 
3496
3518
  You MUST respond with ONLY a valid JSON object (no markdown, no code blocks):
3497
3519
 
3520
+ \`\`\`json
3498
3521
  {
3499
3522
  "componentId": "id_from_available_list_or_existing_component_id",
3500
3523
  "componentName": "name_of_component",
@@ -3509,6 +3532,7 @@ You MUST respond with ONLY a valid JSON object (no markdown, no code blocks):
3509
3532
  // Include all other required props (title, description, config, fields, etc.)
3510
3533
  }
3511
3534
  }
3535
+ \`\`\`
3512
3536
 
3513
3537
  **CRITICAL:**
3514
3538
  - Return ONLY valid JSON (no markdown code blocks, no text before/after)
@@ -3531,7 +3555,8 @@ You MUST respond with ONLY a valid JSON object (no markdown, no code blocks):
3531
3555
 
3532
3556
  ---
3533
3557
 
3534
- ## CONTEXT`,
3558
+ ## CONTEXT
3559
+ `,
3535
3560
  user: `{{USER_PROMPT}}`
3536
3561
  },
3537
3562
  "dash-filter-picker": {
@@ -3677,9 +3702,7 @@ var PromptLoader = class {
3677
3702
  this.databaseRulesCache = /* @__PURE__ */ new Map();
3678
3703
  this.isInitialized = false;
3679
3704
  this.databaseType = "postgresql";
3680
- logger.debug("Initializing PromptLoader...");
3681
3705
  this.promptsDir = config?.promptsDir || import_path.default.join(process.cwd(), ".prompts");
3682
- logger.debug(`Prompts directory set to: ${this.promptsDir}`);
3683
3706
  }
3684
3707
  /**
3685
3708
  * Load a prompt template from file system OR fallback to hardcoded prompts
@@ -3693,7 +3716,6 @@ var PromptLoader = class {
3693
3716
  if (import_fs2.default.existsSync(systemPath) && import_fs2.default.existsSync(userPath)) {
3694
3717
  const system = import_fs2.default.readFileSync(systemPath, "utf-8");
3695
3718
  const user = import_fs2.default.readFileSync(userPath, "utf-8");
3696
- logger.info(`\u2713 Loaded prompt '${promptName}' from file system: ${this.promptsDir}`);
3697
3719
  return { system, user };
3698
3720
  }
3699
3721
  } catch (error) {
@@ -3701,7 +3723,6 @@ var PromptLoader = class {
3701
3723
  }
3702
3724
  const hardcodedPrompt = PROMPTS[promptName];
3703
3725
  if (hardcodedPrompt) {
3704
- logger.info(`\u2713 Loaded prompt '${promptName}' from hardcoded fallback`);
3705
3726
  return hardcodedPrompt;
3706
3727
  }
3707
3728
  throw new Error(`Prompt template '${promptName}' not found in either ${this.promptsDir} or hardcoded prompts. Available prompts: ${Object.keys(PROMPTS).join(", ")}`);
@@ -3715,7 +3736,6 @@ var PromptLoader = class {
3715
3736
  logger.debug("PromptLoader already initialized, skipping...");
3716
3737
  return;
3717
3738
  }
3718
- logger.info("Loading prompts into memory...");
3719
3739
  const promptTypes = Object.keys(PROMPTS);
3720
3740
  for (const promptName of promptTypes) {
3721
3741
  try {
@@ -3727,7 +3747,6 @@ var PromptLoader = class {
3727
3747
  }
3728
3748
  }
3729
3749
  this.isInitialized = true;
3730
- logger.info(`Successfully loaded ${this.promptCache.size} prompt templates into memory`);
3731
3750
  }
3732
3751
  /**
3733
3752
  * Replace variables in a template string using {{VARIABLE_NAME}} pattern
@@ -3767,7 +3786,6 @@ var PromptLoader = class {
3767
3786
  const processedContext = this.replaceVariables(contextMarker + contextPart, variables);
3768
3787
  const staticLength = processedStatic.length;
3769
3788
  const contextLength = processedContext.length;
3770
- logger.debug(`\u2713 Prompt caching enabled for '${promptName}' (cached: ${staticLength} chars, dynamic: ${contextLength} chars)`);
3771
3789
  return {
3772
3790
  system: [
3773
3791
  {
@@ -3804,7 +3822,6 @@ var PromptLoader = class {
3804
3822
  this.promptsDir = dir;
3805
3823
  this.isInitialized = false;
3806
3824
  this.promptCache.clear();
3807
- logger.debug(`Prompts directory changed to: ${dir}`);
3808
3825
  }
3809
3826
  /**
3810
3827
  * Get current prompts directory
@@ -3832,7 +3849,6 @@ var PromptLoader = class {
3832
3849
  setDatabaseType(type) {
3833
3850
  this.databaseType = type;
3834
3851
  this.databaseRulesCache.clear();
3835
- logger.debug(`Database type set to: ${type}`);
3836
3852
  }
3837
3853
  /**
3838
3854
  * Get current database type
@@ -3848,7 +3864,6 @@ var PromptLoader = class {
3848
3864
  */
3849
3865
  async loadDatabaseRules() {
3850
3866
  if (this.databaseRulesCache.has(this.databaseType)) {
3851
- logger.debug(`\u2713 Database rules for '${this.databaseType}' loaded from cache`);
3852
3867
  return this.databaseRulesCache.get(this.databaseType);
3853
3868
  }
3854
3869
  const rulesPath = import_path.default.join(this.promptsDir, "database-rules", `${this.databaseType}.md`);
@@ -3856,7 +3871,6 @@ var PromptLoader = class {
3856
3871
  if (import_fs2.default.existsSync(rulesPath)) {
3857
3872
  const rules = import_fs2.default.readFileSync(rulesPath, "utf-8");
3858
3873
  this.databaseRulesCache.set(this.databaseType, rules);
3859
- logger.info(`\u2713 Loaded database rules for '${this.databaseType}' from ${rulesPath}`);
3860
3874
  return rules;
3861
3875
  }
3862
3876
  } catch (error) {
@@ -4132,7 +4146,6 @@ var Schema = class {
4132
4146
  * @returns Parsed schema object or null if error occurs
4133
4147
  */
4134
4148
  getDatabaseSchema() {
4135
- logger.info(`SCHEMA_FILE_PATH: ${this.schemaFilePath}`);
4136
4149
  try {
4137
4150
  const dir = import_path2.default.dirname(this.schemaFilePath);
4138
4151
  if (!import_fs3.default.existsSync(dir)) {
@@ -4401,14 +4414,6 @@ Format: [TIMESTAMP] [REQUEST_ID] [PROVIDER/MODEL] [METHOD]
4401
4414
  Cost: $${entry.costUSD.toFixed(6)} | Time: ${entry.durationMs}ms${toolInfo}${errorInfo}${cacheStatus}
4402
4415
  `;
4403
4416
  this.logStream?.write(logLine);
4404
- if (entry.cacheReadTokens && entry.cacheReadTokens > 0) {
4405
- console.log(`[LLM] \u26A1 CACHE HIT: ${entry.cacheReadTokens.toLocaleString()} tokens read from cache (${entry.method})`);
4406
- } else if (entry.cacheWriteTokens && entry.cacheWriteTokens > 0) {
4407
- console.log(`[LLM] \u{1F4DD} CACHE WRITE: ${entry.cacheWriteTokens.toLocaleString()} tokens cached for future requests (${entry.method})`);
4408
- }
4409
- if (process.env.SUPERATOM_LOG_LEVEL === "verbose") {
4410
- console.log("\n[LLM-Usage]", logLine);
4411
- }
4412
4417
  }
4413
4418
  /**
4414
4419
  * Log session summary (call at end of request)
@@ -4441,11 +4446,6 @@ Avg Time/Call: ${Math.round(this.sessionStats.totalDurationMs / this.sessionStat
4441
4446
 
4442
4447
  `;
4443
4448
  this.logStream?.write(summary);
4444
- console.log("\n[LLM-Usage] Session Summary:");
4445
- console.log(` Calls: ${this.sessionStats.totalCalls} | Tokens: ${(this.sessionStats.totalInputTokens + this.sessionStats.totalOutputTokens).toLocaleString()} | Cost: $${this.sessionStats.totalCostUSD.toFixed(4)} | Time: ${(this.sessionStats.totalDurationMs / 1e3).toFixed(2)}s`);
4446
- if (hasCaching) {
4447
- console.log(` Cache: ${this.sessionStats.totalCacheReadTokens.toLocaleString()} read, ${this.sessionStats.totalCacheWriteTokens.toLocaleString()} written | Savings: ~$${cacheReadSavings.toFixed(4)}`);
4448
- }
4449
4449
  }
4450
4450
  /**
4451
4451
  * Reset session stats (call at start of new user request)
@@ -4486,7 +4486,6 @@ Format: [TIMESTAMP] [REQUEST_ID] [PROVIDER/MODEL] [METHOD]
4486
4486
  `;
4487
4487
  this.logStream.write(header);
4488
4488
  this.resetSession();
4489
- console.log(`[LLM-Usage] Log file reset for new request: ${this.logPath}`);
4490
4489
  } catch (error) {
4491
4490
  console.error("[LLM-Usage-Logger] Failed to reset log file:", error);
4492
4491
  }
@@ -5401,14 +5400,97 @@ var LLM = class {
5401
5400
  const genAI = new import_generative_ai.GoogleGenerativeAI(apiKey);
5402
5401
  const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
5403
5402
  try {
5403
+ if (json && options.partial) {
5404
+ const model2 = genAI.getGenerativeModel({
5405
+ model: modelName,
5406
+ systemInstruction: systemPrompt,
5407
+ generationConfig: {
5408
+ maxOutputTokens: options.maxTokens || 1e3,
5409
+ temperature: options.temperature,
5410
+ topP: options.topP,
5411
+ responseMimeType: "application/json"
5412
+ }
5413
+ });
5414
+ const result2 = await model2.generateContentStream(messages.user);
5415
+ let fullText2 = "";
5416
+ let inputTokens2 = 0;
5417
+ let outputTokens2 = 0;
5418
+ for await (const chunk of result2.stream) {
5419
+ try {
5420
+ const text = chunk.text();
5421
+ if (text) {
5422
+ fullText2 += text;
5423
+ options.partial(text);
5424
+ }
5425
+ } catch (chunkError) {
5426
+ }
5427
+ if (chunk.usageMetadata) {
5428
+ inputTokens2 = chunk.usageMetadata.promptTokenCount || 0;
5429
+ outputTokens2 = chunk.usageMetadata.candidatesTokenCount || 0;
5430
+ }
5431
+ }
5432
+ const durationMs2 = Date.now() - startTime;
5433
+ if (inputTokens2 === 0) {
5434
+ inputTokens2 = Math.ceil((systemPrompt.length + messages.user.length) / 4);
5435
+ }
5436
+ if (outputTokens2 === 0) {
5437
+ outputTokens2 = Math.ceil(fullText2.length / 4);
5438
+ }
5439
+ llmUsageLogger.log({
5440
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
5441
+ requestId,
5442
+ provider: "gemini",
5443
+ model: modelName,
5444
+ method: "stream-json-partial",
5445
+ inputTokens: inputTokens2,
5446
+ outputTokens: outputTokens2,
5447
+ totalTokens: inputTokens2 + outputTokens2,
5448
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens2, outputTokens2),
5449
+ durationMs: durationMs2,
5450
+ success: true
5451
+ });
5452
+ return this._parseJSON(fullText2);
5453
+ }
5454
+ if (json) {
5455
+ const model2 = genAI.getGenerativeModel({
5456
+ model: modelName,
5457
+ systemInstruction: systemPrompt,
5458
+ generationConfig: {
5459
+ maxOutputTokens: options.maxTokens || 1e3,
5460
+ temperature: options.temperature,
5461
+ topP: options.topP,
5462
+ responseMimeType: "application/json"
5463
+ }
5464
+ });
5465
+ const result2 = await model2.generateContent(messages.user);
5466
+ const response = result2.response;
5467
+ const fullText2 = response.text();
5468
+ const durationMs2 = Date.now() - startTime;
5469
+ const usage = response.usageMetadata;
5470
+ const inputTokens2 = usage?.promptTokenCount || Math.ceil((systemPrompt.length + messages.user.length) / 4);
5471
+ const outputTokens2 = usage?.candidatesTokenCount || Math.ceil(fullText2.length / 4);
5472
+ llmUsageLogger.log({
5473
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
5474
+ requestId,
5475
+ provider: "gemini",
5476
+ model: modelName,
5477
+ method: "stream-json",
5478
+ inputTokens: inputTokens2,
5479
+ outputTokens: outputTokens2,
5480
+ totalTokens: inputTokens2 + outputTokens2,
5481
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens2, outputTokens2),
5482
+ durationMs: durationMs2,
5483
+ success: true
5484
+ });
5485
+ return this._parseJSON(fullText2);
5486
+ }
5404
5487
  const model = genAI.getGenerativeModel({
5405
5488
  model: modelName,
5406
5489
  systemInstruction: systemPrompt,
5407
5490
  generationConfig: {
5408
5491
  maxOutputTokens: options.maxTokens || 1e3,
5409
5492
  temperature: options.temperature,
5410
- topP: options.topP,
5411
- responseMimeType: json ? "application/json" : void 0
5493
+ topP: options.topP
5412
5494
  }
5413
5495
  });
5414
5496
  const result = await model.generateContentStream(messages.user);
@@ -5416,12 +5498,15 @@ var LLM = class {
5416
5498
  let inputTokens = 0;
5417
5499
  let outputTokens = 0;
5418
5500
  for await (const chunk of result.stream) {
5419
- const text = chunk.text();
5420
- if (text) {
5421
- fullText += text;
5422
- if (options.partial) {
5423
- options.partial(text);
5501
+ try {
5502
+ const text = chunk.text();
5503
+ if (text) {
5504
+ fullText += text;
5505
+ if (options.partial) {
5506
+ options.partial(text);
5507
+ }
5424
5508
  }
5509
+ } catch (chunkError) {
5425
5510
  }
5426
5511
  if (chunk.usageMetadata) {
5427
5512
  inputTokens = chunk.usageMetadata.promptTokenCount || 0;
@@ -5448,9 +5533,6 @@ var LLM = class {
5448
5533
  durationMs,
5449
5534
  success: true
5450
5535
  });
5451
- if (json) {
5452
- return this._parseJSON(fullText);
5453
- }
5454
5536
  return fullText;
5455
5537
  } catch (error) {
5456
5538
  const durationMs = Date.now() - startTime;
@@ -5471,6 +5553,26 @@ var LLM = class {
5471
5553
  throw error;
5472
5554
  }
5473
5555
  }
5556
+ /**
5557
+ * Recursively strip unsupported JSON Schema properties for Gemini
5558
+ * Gemini doesn't support: additionalProperties, $schema, etc.
5559
+ */
5560
+ static _cleanSchemaForGemini(obj) {
5561
+ if (obj === null || typeof obj !== "object") {
5562
+ return obj;
5563
+ }
5564
+ if (Array.isArray(obj)) {
5565
+ return obj.map((item) => this._cleanSchemaForGemini(item));
5566
+ }
5567
+ const cleaned = {};
5568
+ for (const [key, value] of Object.entries(obj)) {
5569
+ if (key === "additionalProperties" || key === "$schema") {
5570
+ continue;
5571
+ }
5572
+ cleaned[key] = this._cleanSchemaForGemini(value);
5573
+ }
5574
+ return cleaned;
5575
+ }
5474
5576
  static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
5475
5577
  const methodStartTime = Date.now();
5476
5578
  const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
@@ -5481,7 +5583,7 @@ var LLM = class {
5481
5583
  description: tool.description,
5482
5584
  parameters: {
5483
5585
  type: import_generative_ai.SchemaType.OBJECT,
5484
- properties: tool.input_schema.properties,
5586
+ properties: this._cleanSchemaForGemini(tool.input_schema.properties),
5485
5587
  required: tool.input_schema.required || []
5486
5588
  }
5487
5589
  }));
@@ -5960,21 +6062,20 @@ var getKnowledgeBase = async ({
5960
6062
  }) => {
5961
6063
  try {
5962
6064
  if (!collections || !collections["knowledge-base"] || !collections["knowledge-base"]["query"]) {
5963
- logger.info("[KnowledgeBase] knowledge-base.query collection not registered, skipping");
6065
+ logger.warn("[KnowledgeBase] knowledge-base.query collection not registered, skipping");
5964
6066
  return "";
5965
6067
  }
5966
- logger.info(`[KnowledgeBase] Querying knowledge base for: "${prompt.substring(0, 50)}..."`);
5967
6068
  const result = await collections["knowledge-base"]["query"]({
5968
6069
  prompt,
5969
6070
  topK
5970
6071
  });
5971
6072
  if (!result || !result.content) {
5972
- logger.info("[KnowledgeBase] No knowledge base results returned");
6073
+ logger.warn("[KnowledgeBase] No knowledge base results returned");
5973
6074
  return "";
5974
6075
  }
5975
6076
  logger.info(`[KnowledgeBase] Retrieved knowledge base context (${result.content.length} chars)`);
5976
6077
  if (result.metadata?.sources && result.metadata.sources.length > 0) {
5977
- logger.debug(`[KnowledgeBase] Sources: ${result.metadata.sources.map((s) => s.title).join(", ")}`);
6078
+ logger.warn(`[KnowledgeBase] Sources: ${result.metadata.sources.map((s) => s.title).join(", ")}`);
5978
6079
  }
5979
6080
  return result.content;
5980
6081
  } catch (error) {
@@ -5989,13 +6090,12 @@ var getGlobalKnowledgeBase = async ({
5989
6090
  }) => {
5990
6091
  try {
5991
6092
  if (!collections || !collections["knowledge-base"] || !collections["knowledge-base"]["getGlobal"]) {
5992
- logger.info("[KnowledgeBase] knowledge-base.getGlobal collection not registered, skipping");
6093
+ logger.warn("[KnowledgeBase] knowledge-base.getGlobal collection not registered, skipping");
5993
6094
  return "";
5994
6095
  }
5995
- logger.info("[KnowledgeBase] Fetching global knowledge base nodes...");
5996
6096
  const result = await collections["knowledge-base"]["getGlobal"]({ limit });
5997
6097
  if (!result || !result.content) {
5998
- logger.info("[KnowledgeBase] No global knowledge base nodes found");
6098
+ logger.warn("[KnowledgeBase] No global knowledge base nodes found");
5999
6099
  return "";
6000
6100
  }
6001
6101
  logger.info(`[KnowledgeBase] Retrieved ${result.count || 0} global knowledge base nodes`);
@@ -6013,14 +6113,13 @@ var getUserKnowledgeBase = async ({
6013
6113
  }) => {
6014
6114
  try {
6015
6115
  if (!userId) {
6016
- logger.info("[KnowledgeBase] No userId provided, skipping user knowledge base");
6116
+ logger.warn("[KnowledgeBase] No userId provided, skipping user knowledge base");
6017
6117
  return "";
6018
6118
  }
6019
6119
  if (!collections || !collections["knowledge-base"] || !collections["knowledge-base"]["getByUser"]) {
6020
- logger.info("[KnowledgeBase] knowledge-base.getByUser collection not registered, skipping");
6120
+ logger.warn("[KnowledgeBase] knowledge-base.getByUser collection not registered, skipping");
6021
6121
  return "";
6022
6122
  }
6023
- logger.info(`[KnowledgeBase] Fetching user knowledge base nodes for userId: ${userId}...`);
6024
6123
  const result = await collections["knowledge-base"]["getByUser"]({
6025
6124
  userId: Number(userId),
6026
6125
  limit
@@ -6043,7 +6142,6 @@ var getAllKnowledgeBase = async ({
6043
6142
  userId,
6044
6143
  topK = 3
6045
6144
  }) => {
6046
- logger.info("[KnowledgeBase] Fetching all knowledge base contexts...");
6047
6145
  const [globalContext, userContext, queryContext] = await Promise.all([
6048
6146
  getGlobalKnowledgeBase({ collections }),
6049
6147
  getUserKnowledgeBase({ collections, userId }),
@@ -6065,7 +6163,6 @@ var getAllKnowledgeBase = async ({
6065
6163
  combinedContext += "The following information is semantically relevant to the current query:\n\n";
6066
6164
  combinedContext += queryContext + "\n\n";
6067
6165
  }
6068
- logger.info(`[KnowledgeBase] Combined knowledge base context: global=${globalContext.length} chars, user=${userContext.length} chars, query=${queryContext.length} chars`);
6069
6166
  return {
6070
6167
  globalContext,
6071
6168
  userContext,
@@ -6302,11 +6399,11 @@ var searchConversationsWithReranking = async (options) => {
6302
6399
  } = options;
6303
6400
  try {
6304
6401
  if (!collections || !collections["conversation-history"]) {
6305
- logger.info("[ConversationSearch] conversation-history collection not registered, skipping");
6402
+ logger.warn("[ConversationSearch] conversation-history collection not registered, skipping");
6306
6403
  return null;
6307
6404
  }
6308
6405
  if (!collections["conversation-history"]["searchMultiple"]) {
6309
- logger.info("[ConversationSearch] searchMultiple not available, falling back to standard search");
6406
+ logger.warn("[ConversationSearch] searchMultiple not available, falling back to standard search");
6310
6407
  return searchConversations({
6311
6408
  userPrompt,
6312
6409
  collections,
@@ -6314,9 +6411,6 @@ var searchConversationsWithReranking = async (options) => {
6314
6411
  similarityThreshold
6315
6412
  });
6316
6413
  }
6317
- logger.info(`[ConversationSearch] Hybrid search for: "${userPrompt.substring(0, 50)}..."`);
6318
- logger.info(`[ConversationSearch] Fetching ${rerankCandidates} candidates for reranking`);
6319
- logger.info(`[ConversationSearch] Weights - Semantic: ${hybridOptions.semanticWeight}, BM25: ${hybridOptions.bm25Weight}`);
6320
6414
  const results = await collections["conversation-history"]["searchMultiple"]({
6321
6415
  userPrompt,
6322
6416
  userId,
@@ -6357,7 +6451,6 @@ var searchConversationsWithReranking = async (options) => {
6357
6451
  logger.info(
6358
6452
  `[ConversationSearch] \u2713 Found match with semantic score ${(semanticScore * 100).toFixed(2)}%`
6359
6453
  );
6360
- logger.info(` - Returning cached result for: "${matchedUserPrompt}"`);
6361
6454
  return {
6362
6455
  uiBlock: best.uiBlock,
6363
6456
  similarity: semanticScore,
@@ -6799,10 +6892,9 @@ Fixed SQL query:`;
6799
6892
  * @param component - The component to validate
6800
6893
  * @param collections - Collections object containing database execute function
6801
6894
  * @param apiKey - Optional API key for LLM calls
6802
- * @param logCollector - Optional log collector for logging
6803
6895
  * @returns Validation result with component, query key, and result
6804
6896
  */
6805
- async validateSingleQuery(component, collections, apiKey, logCollector) {
6897
+ async validateSingleQuery(component, collections, apiKey) {
6806
6898
  const query = component.props?.query;
6807
6899
  const originalQueryKey = this.getQueryCacheKey(query);
6808
6900
  const queryStr = typeof query === "string" ? query : query?.sql || "";
@@ -6823,7 +6915,6 @@ Fixed SQL query:`;
6823
6915
  validated = true;
6824
6916
  queryCache.set(validationResult.cacheKey, result);
6825
6917
  logger.info(`[${this.config.providerName}] \u2713 Query validated for ${component.name} (attempt ${attempts}) - cached for frontend`);
6826
- logCollector?.info(`\u2713 Query validated for ${component.name}`);
6827
6918
  if (currentQueryStr !== queryStr) {
6828
6919
  const fixedQuery = typeof query === "string" ? currentQueryStr : { ...query, sql: currentQueryStr };
6829
6920
  component.props = {
@@ -6836,14 +6927,11 @@ Fixed SQL query:`;
6836
6927
  } catch (error) {
6837
6928
  lastError = error instanceof Error ? error.message : String(error);
6838
6929
  logger.warn(`[${this.config.providerName}] Query validation failed for ${component.name} (attempt ${attempts}/${MAX_QUERY_VALIDATION_RETRIES}): ${lastError}`);
6839
- logCollector?.warn(`Query validation failed for ${component.name}: ${lastError}`);
6840
6930
  if (attempts >= MAX_QUERY_VALIDATION_RETRIES) {
6841
6931
  logger.error(`[${this.config.providerName}] \u2717 Max retries reached for ${component.name}, excluding from response`);
6842
- logCollector?.error(`Max retries reached for ${component.name}, component excluded from response`);
6843
6932
  break;
6844
6933
  }
6845
6934
  logger.info(`[${this.config.providerName}] Requesting query fix from LLM for ${component.name}...`);
6846
- logCollector?.info(`Requesting query fix for ${component.name}...`);
6847
6935
  try {
6848
6936
  const fixedQueryStr = await this.requestQueryFix(
6849
6937
  currentQueryStr,
@@ -6877,7 +6965,6 @@ Fixed SQL query:`;
6877
6965
  }
6878
6966
  if (!validated) {
6879
6967
  logger.warn(`[${this.config.providerName}] Component ${component.name} excluded from response due to failed query validation`);
6880
- logCollector?.warn(`Component ${component.name} excluded from response`);
6881
6968
  }
6882
6969
  return {
6883
6970
  component: validated ? component : null,
@@ -6891,10 +6978,9 @@ Fixed SQL query:`;
6891
6978
  * @param components - Array of components with potential queries
6892
6979
  * @param collections - Collections object containing database execute function
6893
6980
  * @param apiKey - Optional API key for LLM calls
6894
- * @param logCollector - Optional log collector for logging
6895
6981
  * @returns Object with validated components and query results map
6896
6982
  */
6897
- async validateComponentQueries(components, collections, apiKey, logCollector) {
6983
+ async validateComponentQueries(components, collections, apiKey) {
6898
6984
  const queryResults = /* @__PURE__ */ new Map();
6899
6985
  const validatedComponents = [];
6900
6986
  const componentsWithoutQuery = [];
@@ -6911,9 +6997,8 @@ Fixed SQL query:`;
6911
6997
  return { components: validatedComponents, queryResults };
6912
6998
  }
6913
6999
  logger.info(`[${this.config.providerName}] Validating ${componentsWithQuery.length} component queries in parallel...`);
6914
- logCollector?.info(`Validating ${componentsWithQuery.length} component queries in parallel...`);
6915
7000
  const validationPromises = componentsWithQuery.map(
6916
- (component) => this.validateSingleQuery(component, collections, apiKey, logCollector)
7001
+ (component) => this.validateSingleQuery(component, collections, apiKey)
6917
7002
  );
6918
7003
  const results = await Promise.allSettled(validationPromises);
6919
7004
  for (let i = 0; i < results.length; i++) {
@@ -6930,7 +7015,6 @@ Fixed SQL query:`;
6930
7015
  }
6931
7016
  } else {
6932
7017
  logger.error(`[${this.config.providerName}] Unexpected error validating ${component.name}: ${result.reason}`);
6933
- logCollector?.error(`Unexpected error validating ${component.name}: ${result.reason}`);
6934
7018
  }
6935
7019
  }
6936
7020
  logger.info(`[${this.config.providerName}] Parallel validation complete: ${validatedComponents.length}/${components.length} components validated`);
@@ -6992,22 +7076,17 @@ var ToolExecutorService = class {
6992
7076
  let sql = toolInput.sql;
6993
7077
  const params = toolInput.params || {};
6994
7078
  const reasoning = toolInput.reasoning;
6995
- const { streamBuffer, collections, logCollector, providerName } = this.config;
7079
+ const { streamBuffer, collections, providerName } = this.config;
6996
7080
  sql = ensureQueryLimit(sql, MAX_COMPONENT_QUERY_LIMIT, MAX_COMPONENT_QUERY_LIMIT);
6997
7081
  const queryKey = sql.toLowerCase().replace(/\s+/g, " ").trim();
6998
7082
  const attempts = (this.queryAttempts.get(queryKey) || 0) + 1;
6999
7083
  this.queryAttempts.set(queryKey, attempts);
7000
- logger.info(`[${providerName}] Executing query (attempt ${attempts}/${MAX_QUERY_ATTEMPTS}): ${sql.substring(0, 100)}...`);
7001
7084
  if (Object.keys(params).length > 0) {
7002
7085
  logger.info(`[${providerName}] Query params: ${JSON.stringify(params)}`);
7003
7086
  }
7004
- if (reasoning) {
7005
- logCollector?.info(`Query reasoning: ${reasoning}`);
7006
- }
7007
7087
  if (attempts > MAX_QUERY_ATTEMPTS) {
7008
7088
  const errorMsg = `Maximum query attempts (${MAX_QUERY_ATTEMPTS}) reached. Unable to generate a valid query for your question.`;
7009
7089
  logger.error(`[${providerName}] ${errorMsg}`);
7010
- logCollector?.error(errorMsg);
7011
7090
  this.maxAttemptsReached = true;
7012
7091
  if (streamBuffer.hasCallback()) {
7013
7092
  streamBuffer.write(`
@@ -7067,11 +7146,6 @@ ${sql}
7067
7146
  await streamDelay();
7068
7147
  }
7069
7148
  }
7070
- logCollector?.logQuery?.(
7071
- `Executing SQL query (attempt ${attempts})`,
7072
- { sql, params },
7073
- { reasoning, attempt: attempts }
7074
- );
7075
7149
  if (!collections?.["database"]?.["execute"]) {
7076
7150
  throw new Error("Database collection not registered. Please register database.execute collection to execute queries.");
7077
7151
  }
@@ -7083,8 +7157,6 @@ ${sql}
7083
7157
  );
7084
7158
  const data = result?.data || result;
7085
7159
  const rowCount = result?.count ?? (Array.isArray(data) ? data.length : "N/A");
7086
- logger.info(`[${providerName}] Query executed successfully, rows returned: ${rowCount}`);
7087
- logCollector?.info(`Query successful, returned ${rowCount} rows`);
7088
7160
  if (streamBuffer.hasCallback()) {
7089
7161
  streamBuffer.write(`
7090
7162
  \u2705 **Query executed successfully!**
@@ -7133,7 +7205,6 @@ ${sql}
7133
7205
  maxRows: DEFAULT_MAX_ROWS_FOR_LLM,
7134
7206
  maxCharsPerField: DEFAULT_MAX_CHARS_PER_FIELD2
7135
7207
  });
7136
- logger.info(`[${providerName}] Query result formatted: ${formattedResult.summary.recordsShown}/${formattedResult.summary.totalRecords} records`);
7137
7208
  if (formattedResult.truncationNote) {
7138
7209
  logger.info(`[${providerName}] Truncation: ${formattedResult.truncationNote}`);
7139
7210
  }
@@ -7141,7 +7212,6 @@ ${sql}
7141
7212
  } catch (error) {
7142
7213
  const errorMsg = error instanceof Error ? error.message : String(error);
7143
7214
  logger.error(`[${providerName}] Query execution failed (attempt ${attempts}/${MAX_QUERY_ATTEMPTS}): ${errorMsg}`);
7144
- logCollector?.error(`Query failed (attempt ${attempts}/${MAX_QUERY_ATTEMPTS}): ${errorMsg}`);
7145
7215
  userPromptErrorLogger.logSqlError(sql, error instanceof Error ? error : new Error(errorMsg), Object.keys(params).length > 0 ? Object.values(params) : void 0);
7146
7216
  if (streamBuffer.hasCallback()) {
7147
7217
  streamBuffer.write(`\u274C **Query execution failed:**
@@ -7163,19 +7233,16 @@ ${errorMsg}
7163
7233
  * Execute an external tool with retry tracking and streaming feedback
7164
7234
  */
7165
7235
  async executeExternalTool(toolName, toolInput, externalTools) {
7166
- const { streamBuffer, logCollector, providerName } = this.config;
7236
+ const { streamBuffer, providerName } = this.config;
7167
7237
  const externalTool = externalTools?.find((t) => t.id === toolName);
7168
7238
  if (!externalTool) {
7169
7239
  throw new Error(`Unknown tool: ${toolName}`);
7170
7240
  }
7171
7241
  const attempts = (this.toolAttempts.get(toolName) || 0) + 1;
7172
7242
  this.toolAttempts.set(toolName, attempts);
7173
- logger.info(`[${providerName}] Executing external tool: ${externalTool.name} (attempt ${attempts}/${MAX_TOOL_ATTEMPTS})`);
7174
- logCollector?.info(`Executing external tool: ${externalTool.name} (attempt ${attempts}/${MAX_TOOL_ATTEMPTS})...`);
7175
7243
  if (attempts > MAX_TOOL_ATTEMPTS) {
7176
7244
  const errorMsg = `Maximum attempts (${MAX_TOOL_ATTEMPTS}) reached for tool: ${externalTool.name}`;
7177
7245
  logger.error(`[${providerName}] ${errorMsg}`);
7178
- logCollector?.error(errorMsg);
7179
7246
  if (streamBuffer.hasCallback()) {
7180
7247
  streamBuffer.write(`
7181
7248
 
@@ -7210,8 +7277,6 @@ Please try rephrasing your request or contact support.
7210
7277
  `Running ${externalTool.name}`,
7211
7278
  streamBuffer
7212
7279
  );
7213
- logger.info(`[${providerName}] External tool ${externalTool.name} executed successfully`);
7214
- logCollector?.info(`\u2713 ${externalTool.name} executed successfully`);
7215
7280
  if (!this.executedToolsList.find((t) => t.id === externalTool.id)) {
7216
7281
  const formattedForTracking = formatToolResultForLLM(result, {
7217
7282
  toolName: externalTool.name,
@@ -7231,7 +7296,6 @@ Please try rephrasing your request or contact support.
7231
7296
  },
7232
7297
  outputSchema: externalTool.outputSchema
7233
7298
  });
7234
- logger.info(`[${providerName}] Tracked executed tool: ${externalTool.name} with ${formattedForTracking.summary.totalRecords} total records`);
7235
7299
  }
7236
7300
  if (streamBuffer.hasCallback()) {
7237
7301
  streamBuffer.write(`\u2705 **${externalTool.name} completed successfully**
@@ -7245,7 +7309,6 @@ Please try rephrasing your request or contact support.
7245
7309
  maxRows: DEFAULT_MAX_ROWS_FOR_LLM,
7246
7310
  maxCharsPerField: DEFAULT_MAX_CHARS_PER_FIELD2
7247
7311
  });
7248
- logger.info(`[${providerName}] Tool result formatted: ${formattedToolResult.summary.recordsShown}/${formattedToolResult.summary.totalRecords} records`);
7249
7312
  if (formattedToolResult.truncationNote) {
7250
7313
  logger.info(`[${providerName}] Truncation: ${formattedToolResult.truncationNote}`);
7251
7314
  }
@@ -7253,7 +7316,6 @@ Please try rephrasing your request or contact support.
7253
7316
  } catch (error) {
7254
7317
  const errorMsg = error instanceof Error ? error.message : String(error);
7255
7318
  logger.error(`[${providerName}] External tool ${externalTool.name} failed (attempt ${attempts}/${MAX_TOOL_ATTEMPTS}): ${errorMsg}`);
7256
- logCollector?.error(`\u2717 ${externalTool.name} failed: ${errorMsg}`);
7257
7319
  userPromptErrorLogger.logToolError(externalTool.name, toolInput, error instanceof Error ? error : new Error(errorMsg));
7258
7320
  if (streamBuffer.hasCallback()) {
7259
7321
  streamBuffer.write(`\u274C **${externalTool.name} failed:**
@@ -7331,7 +7393,6 @@ var BaseLLM = class {
7331
7393
  return;
7332
7394
  }
7333
7395
  this.conversationSimilarityThreshold = threshold;
7334
- logger.info(`[${this.getProviderName()}] Conversation similarity threshold set to: ${threshold}`);
7335
7396
  }
7336
7397
  /**
7337
7398
  * Get the current conversation similarity threshold
@@ -7374,16 +7435,14 @@ var BaseLLM = class {
7374
7435
  * @param analysisContent - The text response containing component suggestions
7375
7436
  * @param components - List of available components
7376
7437
  * @param apiKey - Optional API key
7377
- * @param logCollector - Optional log collector
7378
7438
  * @param componentStreamCallback - Optional callback to stream primary KPI component as soon as it's identified
7379
7439
  * @returns Object containing matched components, layout title/description, and follow-up actions
7380
7440
  */
7381
- async matchComponentsFromAnalysis(analysisContent, components, userPrompt, apiKey, logCollector, componentStreamCallback, deferredTools, executedTools, collections, userId) {
7441
+ async matchComponentsFromAnalysis(analysisContent, components, userPrompt, apiKey, componentStreamCallback, deferredTools, executedTools, collections, userId) {
7382
7442
  const methodStartTime = Date.now();
7383
7443
  const methodName = "matchComponentsFromAnalysis";
7384
7444
  logger.info(`[${this.getProviderName()}] [TIMING] START ${methodName} | model: ${this.getModelForTask("complex")}`);
7385
7445
  try {
7386
- logger.debug(`[${this.getProviderName()}] Starting component matching from text response`);
7387
7446
  let availableComponentsText = "No components available";
7388
7447
  if (components && components.length > 0) {
7389
7448
  availableComponentsText = components.map((comp, idx) => {
@@ -7399,7 +7458,6 @@ var BaseLLM = class {
7399
7458
  }
7400
7459
  let deferredToolsText = "No deferred external tools for this request.";
7401
7460
  if (deferredTools && deferredTools.length > 0) {
7402
- logger.info(`[${this.getProviderName()}] Passing ${deferredTools.length} deferred tools to component matching`);
7403
7461
  deferredToolsText = "The following external tools need user input via a Form component.\n**IMPORTANT: Use these EXACT values when generating Form externalTool prop.**\n\n" + deferredTools.map((tool, idx) => {
7404
7462
  return `${idx + 1}. **${tool.name}**
7405
7463
  toolId: "${tool.id}" (USE THIS EXACT VALUE - do not modify!)
@@ -7411,7 +7469,6 @@ var BaseLLM = class {
7411
7469
  }
7412
7470
  let executedToolsText = "No external tools were executed for data fetching.";
7413
7471
  if (executedTools && executedTools.length > 0) {
7414
- logger.info(`[${this.getProviderName()}] Passing ${executedTools.length} executed tools to component matching`);
7415
7472
  executedToolsText = "The following external tools were executed to fetch data.\n" + executedTools.map((tool, idx) => {
7416
7473
  let outputSchemaText = "Not available";
7417
7474
  let fieldNamesList = "";
@@ -7467,14 +7524,12 @@ ${fieldsText}`;
7467
7524
  KNOWLEDGE_BASE_CONTEXT: knowledgeBaseContext,
7468
7525
  CURRENT_DATETIME: getCurrentDateTimeForPrompt()
7469
7526
  });
7470
- logger.debug(`[${this.getProviderName()}] Loaded match-text-components prompts`);
7471
7527
  logger.logLLMPrompt("matchComponentsFromAnalysis", "system", extractPromptText(prompts.system));
7472
7528
  logger.logLLMPrompt("matchComponentsFromAnalysis", "user", `Text Analysis:
7473
7529
  ${analysisContent}
7474
7530
 
7475
7531
  Executed Tools:
7476
7532
  ${executedToolsText}`);
7477
- logCollector?.info("Matching components from text response...");
7478
7533
  let fullResponseText = "";
7479
7534
  let answerComponentExtracted = false;
7480
7535
  const answerCallback = componentStreamCallback;
@@ -7534,18 +7589,7 @@ ${executedToolsText}`);
7534
7589
  ...answerComponentData.props
7535
7590
  }
7536
7591
  };
7537
- const streamTime = (/* @__PURE__ */ new Date()).toISOString();
7538
- logger.info(`[${this.getProviderName()}] \u2713 [${streamTime}] Answer component detected in stream: ${answerComponent.name} (${answerComponent.type})`);
7539
- logCollector?.info(`\u2713 Answer component: ${answerComponent.name} (${answerComponent.type}) - detected at ${streamTime}`);
7540
- if (answerComponentData.props?.query) {
7541
- logCollector?.logQuery(
7542
- "Answer component query",
7543
- answerComponentData.props.query,
7544
- { componentName: answerComponent.name, componentType: answerComponent.type, reasoning: answerComponentData.reasoning }
7545
- );
7546
- }
7547
7592
  let answerQuery = answerComponent.props?.query;
7548
- logger.info(`[${this.getProviderName()}] Answer component detected: ${answerComponent.name} (${answerComponent.type}), hasQuery: ${!!answerQuery}, hasDbExecute: ${!!collections?.["database"]?.["execute"]}`);
7549
7593
  if (answerQuery) {
7550
7594
  if (typeof answerQuery === "string") {
7551
7595
  answerQuery = ensureQueryLimit(answerQuery, this.defaultLimit, MAX_COMPONENT_QUERY_LIMIT);
@@ -7563,24 +7607,18 @@ ${executedToolsText}`);
7563
7607
  let currentQuery = answerQuery;
7564
7608
  let currentQueryStr = typeof answerQuery === "string" ? answerQuery : answerQuery?.sql || "";
7565
7609
  let lastError = "";
7566
- logger.info(`[${this.getProviderName()}] Validating answer component query before streaming...`);
7567
7610
  while (attempts < maxRetries && !validated) {
7568
7611
  attempts++;
7569
7612
  try {
7570
7613
  const cacheKey = this.queryService.getQueryCacheKey(currentQuery);
7571
7614
  if (cacheKey) {
7572
- logger.debug(`[${this.getProviderName()}] Answer component query validation attempt ${attempts}/${maxRetries}`);
7573
7615
  const result2 = await collections["database"]["execute"]({ sql: cacheKey });
7574
7616
  queryCache.set(cacheKey, result2);
7575
7617
  validated = true;
7576
7618
  if (currentQuery !== answerQuery) {
7577
7619
  answerComponent.props.query = currentQuery;
7578
7620
  }
7579
- logger.info(`[${this.getProviderName()}] \u2713 Answer component query validated (attempt ${attempts}) - STREAMING TO FRONTEND NOW`);
7580
- logCollector?.info(`\u2713 Answer component query validated - streaming to frontend`);
7581
- logger.info(`[${this.getProviderName()}] Calling answerCallback for: ${answerComponent.name}`);
7582
7621
  answerCallback(answerComponent);
7583
- logger.info(`[${this.getProviderName()}] answerCallback completed for: ${answerComponent.name}`);
7584
7622
  }
7585
7623
  } catch (validationError) {
7586
7624
  lastError = validationError instanceof Error ? validationError.message : String(validationError);
@@ -7616,7 +7654,6 @@ ${executedToolsText}`);
7616
7654
  }
7617
7655
  if (!validated) {
7618
7656
  logger.warn(`[${this.getProviderName()}] Answer component query validation failed after ${attempts} attempts - component will be excluded`);
7619
- logCollector?.warn(`Answer component query validation failed: ${lastError} - component will be excluded from response`);
7620
7657
  }
7621
7658
  })();
7622
7659
  } else {
@@ -7627,7 +7664,7 @@ ${executedToolsText}`);
7627
7664
  }
7628
7665
  }
7629
7666
  } catch (e) {
7630
- logger.debug(`[${this.getProviderName()}] Partial answerComponent parse failed, waiting for more data...`);
7667
+ logger.error(`[${this.getProviderName()}] Partial answerComponent parse failed, waiting for more data...`);
7631
7668
  }
7632
7669
  }
7633
7670
  }
@@ -7657,18 +7694,6 @@ ${executedToolsText}`);
7657
7694
  logger.file("\n=============================\nFull LLM response:", JSON.stringify(result, null, 2));
7658
7695
  const rawActions = result.actions || [];
7659
7696
  const actions = convertQuestionsToActions(rawActions);
7660
- if (matchedComponents.length > 0) {
7661
- matchedComponents.forEach((comp, idx) => {
7662
- logCollector?.info(` ${idx + 1}. ${comp.componentName} (${comp.componentType}): ${comp.reasoning}`);
7663
- if (comp.props?.query) {
7664
- logCollector?.logQuery(
7665
- `Component ${idx + 1} query`,
7666
- comp.props.query,
7667
- { componentName: comp.componentName, title: comp.props.title }
7668
- );
7669
- }
7670
- });
7671
- }
7672
7697
  const finalComponents = matchedComponents.map((mc) => {
7673
7698
  const originalComponent = components.find((c) => c.id === mc.componentId);
7674
7699
  if (!originalComponent) {
@@ -7693,27 +7718,22 @@ ${executedToolsText}`);
7693
7718
  }).filter(Boolean);
7694
7719
  let validatedComponents = finalComponents;
7695
7720
  if (collections?.["database"]?.["execute"]) {
7696
- logger.info(`[${this.getProviderName()}] Starting query validation for ${finalComponents.length} components...`);
7697
- logCollector?.info(`Validating queries for ${finalComponents.length} components...`);
7698
7721
  try {
7699
7722
  const validationResult = await this.queryService.validateComponentQueries(
7700
7723
  finalComponents,
7701
7724
  collections,
7702
- apiKey,
7703
- logCollector
7725
+ apiKey
7704
7726
  );
7705
7727
  validatedComponents = validationResult.components;
7706
7728
  const queriedComponents = finalComponents.filter((c) => c.props?.query);
7707
7729
  const validatedQueries = validatedComponents.filter((c) => c.props?.query);
7708
7730
  logger.info(`[${this.getProviderName()}] Query validation complete: ${validatedQueries.length}/${queriedComponents.length} queries validated`);
7709
- logCollector?.info(`Query validation complete: ${validatedQueries.length}/${queriedComponents.length} queries validated`);
7710
7731
  } catch (validationError) {
7711
7732
  const validationErrorMsg = validationError instanceof Error ? validationError.message : String(validationError);
7712
7733
  logger.error(`[${this.getProviderName()}] Query validation error: ${validationErrorMsg}`);
7713
- logCollector?.error(`Query validation error: ${validationErrorMsg}`);
7714
7734
  }
7715
7735
  } else {
7716
- logger.debug(`[${this.getProviderName()}] Skipping query validation - database execute function not available`);
7736
+ logger.error(`[${this.getProviderName()}] Skipping query validation - database execute function not available`);
7717
7737
  }
7718
7738
  const methodDuration = Date.now() - methodStartTime;
7719
7739
  logger.info(`[${this.getProviderName()}] [TIMING] DONE ${methodName} in ${methodDuration}ms | components: ${validatedComponents.length} | actions: ${actions.length}`);
@@ -7727,7 +7747,6 @@ ${executedToolsText}`);
7727
7747
  const methodDuration = Date.now() - methodStartTime;
7728
7748
  const errorMsg = error instanceof Error ? error.message : String(error);
7729
7749
  logger.error(`[${this.getProviderName()}] [TIMING] FAILED ${methodName} in ${methodDuration}ms | error: ${errorMsg}`);
7730
- logCollector?.error(`Failed to match components: ${errorMsg}`);
7731
7750
  return {
7732
7751
  components: [],
7733
7752
  layoutTitle: "Dashboard",
@@ -7740,7 +7759,7 @@ ${executedToolsText}`);
7740
7759
  * Classify user question into category and detect external tools needed
7741
7760
  * Determines if question is for data analysis, requires external tools, or needs text response
7742
7761
  */
7743
- async classifyQuestionCategory(userPrompt, apiKey, logCollector, conversationHistory, externalTools) {
7762
+ async classifyQuestionCategory(userPrompt, apiKey, conversationHistory, externalTools) {
7744
7763
  const methodStartTime = Date.now();
7745
7764
  const methodName = "classifyQuestionCategory";
7746
7765
  const promptPreview = userPrompt.substring(0, 50) + (userPrompt.length > 50 ? "..." : "");
@@ -7776,16 +7795,6 @@ ${executedToolsText}`);
7776
7795
  true
7777
7796
  // Parse as JSON
7778
7797
  );
7779
- logCollector?.logExplanation(
7780
- "Question category classified",
7781
- result.reasoning || "No reasoning provided",
7782
- {
7783
- category: result.category,
7784
- externalTools: result.externalTools || [],
7785
- dataAnalysisType: result.dataAnalysisType,
7786
- confidence: result.confidence
7787
- }
7788
- );
7789
7798
  const methodDuration = Date.now() - methodStartTime;
7790
7799
  logger.info(`[${this.getProviderName()}] [TIMING] DONE ${methodName} in ${methodDuration}ms | category: ${result.category} | confidence: ${result.confidence}% | tools: ${(result.externalTools || []).length}`);
7791
7800
  return {
@@ -7799,7 +7808,6 @@ ${executedToolsText}`);
7799
7808
  const methodDuration = Date.now() - methodStartTime;
7800
7809
  const errorMsg = error instanceof Error ? error.message : String(error);
7801
7810
  logger.error(`[${this.getProviderName()}] [TIMING] FAILED ${methodName} in ${methodDuration}ms | error: ${errorMsg}`);
7802
- logger.debug(`[${this.getProviderName()}] Category classification error details:`, error);
7803
7811
  throw error;
7804
7812
  }
7805
7813
  }
@@ -7808,7 +7816,7 @@ ${executedToolsText}`);
7808
7816
  * Takes a matched UI block from semantic search and modifies its props to answer the new question
7809
7817
  * Also adapts the cached text response to match the new question
7810
7818
  */
7811
- async adaptUIBlockParameters(currentUserPrompt, originalUserPrompt, matchedUIBlock, apiKey, logCollector, cachedTextResponse) {
7819
+ async adaptUIBlockParameters(currentUserPrompt, originalUserPrompt, matchedUIBlock, apiKey, cachedTextResponse) {
7812
7820
  const methodStartTime = Date.now();
7813
7821
  const methodName = "adaptUIBlockParameters";
7814
7822
  const promptPreview = currentUserPrompt.substring(0, 50) + (currentUserPrompt.length > 50 ? "..." : "");
@@ -7852,11 +7860,6 @@ ${executedToolsText}`);
7852
7860
  if (!result.success) {
7853
7861
  const methodDuration2 = Date.now() - methodStartTime;
7854
7862
  logger.info(`[${this.getProviderName()}] [TIMING] DONE ${methodName} in ${methodDuration2}ms | result: adaptation failed - ${result.reason}`);
7855
- logCollector?.warn(
7856
- "Could not adapt matched UI block",
7857
- "explanation",
7858
- { reason: result.reason }
7859
- );
7860
7863
  return {
7861
7864
  success: false,
7862
7865
  explanation: result.explanation || "Adaptation not possible"
@@ -7868,14 +7871,6 @@ ${executedToolsText}`);
7868
7871
  this.defaultLimit
7869
7872
  );
7870
7873
  }
7871
- logCollector?.logExplanation(
7872
- "UI block parameters adapted",
7873
- result.explanation || "Parameters adapted successfully",
7874
- {
7875
- parametersChanged: result.parametersChanged || [],
7876
- componentType: result.adaptedComponent?.type
7877
- }
7878
- );
7879
7874
  const methodDuration = Date.now() - methodStartTime;
7880
7875
  logger.info(`[${this.getProviderName()}] [TIMING] DONE ${methodName} in ${methodDuration}ms | result: success | changes: ${(result.parametersChanged || []).length}`);
7881
7876
  return {
@@ -7889,7 +7884,6 @@ ${executedToolsText}`);
7889
7884
  const methodDuration = Date.now() - methodStartTime;
7890
7885
  const errorMsg = error instanceof Error ? error.message : String(error);
7891
7886
  logger.error(`[${this.getProviderName()}] [TIMING] FAILED ${methodName} in ${methodDuration}ms | error: ${errorMsg}`);
7892
- logger.debug(`[${this.getProviderName()}] Adaptation error details:`, error);
7893
7887
  return {
7894
7888
  success: false,
7895
7889
  explanation: `Error adapting parameters: ${errorMsg}`
@@ -7902,14 +7896,12 @@ ${executedToolsText}`);
7902
7896
  * Supports tool calling for query execution with automatic retry on errors (max 3 attempts)
7903
7897
  * After generating text response, if components are provided, matches suggested components
7904
7898
  */
7905
- async generateTextResponse(userPrompt, apiKey, logCollector, conversationHistory, streamCallback, collections, components, externalTools, category, userId) {
7899
+ async generateTextResponse(userPrompt, apiKey, conversationHistory, streamCallback, collections, components, externalTools, category, userId) {
7906
7900
  const methodStartTime = Date.now();
7907
7901
  const methodName = "generateTextResponse";
7908
7902
  const promptPreview = userPrompt.substring(0, 50) + (userPrompt.length > 50 ? "..." : "");
7909
7903
  logger.info(`[${this.getProviderName()}] [TIMING] START ${methodName} | model: ${this.getModelForTask("complex")} | category: ${category} | prompt: "${promptPreview}"`);
7910
7904
  const errors = [];
7911
- logger.debug(`[${this.getProviderName()}] Starting text response generation`);
7912
- logger.debug(`[${this.getProviderName()}] User prompt: "${userPrompt.substring(0, 50)}..."`);
7913
7905
  try {
7914
7906
  let availableToolsDoc = "No external tools are available for this request.";
7915
7907
  if (externalTools && externalTools.length > 0) {
@@ -7973,9 +7965,6 @@ ${executedToolsText}`);
7973
7965
  });
7974
7966
  logger.logLLMPrompt("generateTextResponse", "system", extractPromptText(prompts.system));
7975
7967
  logger.logLLMPrompt("generateTextResponse", "user", extractPromptText(prompts.user));
7976
- logger.debug(`[${this.getProviderName()}] Loaded text-response prompts with schema`);
7977
- logger.debug(`[${this.getProviderName()}] System prompt length: ${prompts.system.length}, User prompt length: ${prompts.user.length}`);
7978
- logCollector?.info("Generating text response with query execution capability...");
7979
7968
  const tools = [{
7980
7969
  name: "execute_query",
7981
7970
  description: "Executes a parameterized SQL query against the database. CRITICAL: NEVER hardcode literal values in WHERE/HAVING conditions - ALWAYS use $paramName placeholders and pass actual values in params object.",
@@ -8004,7 +7993,6 @@ ${executedToolsText}`);
8004
7993
  const executableTools = externalTools.filter(
8005
7994
  (t) => t.executionType === "immediate" || t.executionType === "deferred" && t.userProvidedData
8006
7995
  );
8007
- logger.info(`[${this.getProviderName()}] Executable tools: ${executableTools.length} of ${externalTools.length} total`);
8008
7996
  const addedToolIds = /* @__PURE__ */ new Set();
8009
7997
  executableTools.forEach((tool) => {
8010
7998
  if (addedToolIds.has(tool.id)) {
@@ -8012,7 +8000,6 @@ ${executedToolsText}`);
8012
8000
  return;
8013
8001
  }
8014
8002
  addedToolIds.add(tool.id);
8015
- logger.info(`[${this.getProviderName()}] Processing executable tool:`, JSON.stringify(tool, null, 2));
8016
8003
  const properties = {};
8017
8004
  const required = [];
8018
8005
  Object.entries(tool.params || {}).forEach(([key, typeOrValue]) => {
@@ -8082,14 +8069,13 @@ ${executedToolsText}`);
8082
8069
  });
8083
8070
  });
8084
8071
  logger.info(`[${this.getProviderName()}] Added ${addedToolIds.size} unique tool definitions from ${executableTools.length} tool calls (${externalTools.length - executableTools.length} deferred tools await form input)`);
8085
- logger.info(`[${this.getProviderName()}] Complete tools array:`, JSON.stringify(tools, null, 2));
8072
+ logger.debug(`[${this.getProviderName()}] Complete tools array:`, JSON.stringify(tools, null, 2));
8086
8073
  }
8087
8074
  const streamBuffer = new StreamBuffer(streamCallback);
8088
8075
  const toolExecutor = new ToolExecutorService({
8089
8076
  providerName: this.getProviderName(),
8090
8077
  collections,
8091
- streamBuffer,
8092
- logCollector
8078
+ streamBuffer
8093
8079
  });
8094
8080
  const executableExternalTools = externalTools?.map((t) => ({
8095
8081
  id: t.id,
@@ -8118,12 +8104,10 @@ ${executedToolsText}`);
8118
8104
  },
8119
8105
  MAX_TOOL_CALLING_ITERATIONS
8120
8106
  );
8121
- logger.info(`[${this.getProviderName()}] Text response stream completed`);
8122
8107
  const textResponse = streamBuffer.getFullText() || result || "I apologize, but I was unable to generate a response.";
8123
8108
  if (toolExecutor.isMaxAttemptsReached()) {
8124
8109
  const methodDuration2 = Date.now() - methodStartTime;
8125
8110
  logger.warn(`[${this.getProviderName()}] [TIMING] DONE ${methodName} in ${methodDuration2}ms | result: max attempts reached`);
8126
- logCollector?.error("Failed to generate valid query after maximum attempts");
8127
8111
  return {
8128
8112
  success: false,
8129
8113
  errors: [`Maximum query attempts (${MAX_QUERY_ATTEMPTS}) reached. Unable to generate a valid query for your question.`],
@@ -8135,14 +8119,6 @@ ${executedToolsText}`);
8135
8119
  }
8136
8120
  };
8137
8121
  }
8138
- logCollector?.info(`Text response: ${textResponse.substring(0, 100)}${textResponse.length > 100 ? "..." : ""}`);
8139
- logCollector?.logExplanation(
8140
- "Text response generated",
8141
- "Generated plain text response with component suggestions",
8142
- {
8143
- textLength: textResponse.length
8144
- }
8145
- );
8146
8122
  streamBuffer.flush();
8147
8123
  if (streamBuffer.hasCallback() && components && components.length > 0 && category !== "general") {
8148
8124
  streamBuffer.write("\n\n\u{1F4CA} **Generating visualization components...**\n\n");
@@ -8154,8 +8130,6 @@ ${executedToolsText}`);
8154
8130
  let actions = [];
8155
8131
  if (category === "general") {
8156
8132
  logger.info(`[${this.getProviderName()}] Skipping component generation for general/conversational question`);
8157
- logCollector?.info("Skipping component generation for general question");
8158
- logger.info(`[${this.getProviderName()}] Generating actions for general question...`);
8159
8133
  const nextQuestions = await this.generateNextQuestions(
8160
8134
  userPrompt,
8161
8135
  null,
@@ -8163,23 +8137,16 @@ ${executedToolsText}`);
8163
8137
  void 0,
8164
8138
  // no component data
8165
8139
  apiKey,
8166
- logCollector,
8167
8140
  conversationHistory,
8168
8141
  textResponse
8169
8142
  // pass text response as context
8170
8143
  );
8171
8144
  actions = convertQuestionsToActions(nextQuestions);
8172
- logger.info(`[${this.getProviderName()}] Generated ${actions.length} follow-up actions for general question`);
8173
8145
  } else if (components && components.length > 0) {
8174
- logger.info(`[${this.getProviderName()}] Matching components from text response...`);
8175
- logger.info(`[${this.getProviderName()}] componentStreamCallback setup: hasCallback=${streamBuffer.hasCallback()}, category=${category}`);
8176
- const componentStreamCallback = streamBuffer.hasCallback() && category !== "data_modification" ? (component) => {
8177
- logger.info(`[${this.getProviderName()}] componentStreamCallback INVOKED for: ${component.name} (${component.type})`);
8146
+ const componentStreamCallback = streamBuffer.hasCallback() && category === "data_analysis" ? (component) => {
8178
8147
  const answerMarker = `__ANSWER_COMPONENT_START__${JSON.stringify(component)}__ANSWER_COMPONENT_END__`;
8179
8148
  streamBuffer.write(answerMarker);
8180
- logger.info(`[${this.getProviderName()}] Streamed answer component to frontend: ${component.name} (${component.type})`);
8181
8149
  } : void 0;
8182
- logger.info(`[${this.getProviderName()}] componentStreamCallback created: ${!!componentStreamCallback}`);
8183
8150
  const deferredTools = externalTools?.filter((t) => {
8184
8151
  if (t.executionType === "deferred" && !t.userProvidedData) return true;
8185
8152
  if (category === "data_modification" && !t.userProvidedData) {
@@ -8200,7 +8167,6 @@ ${executedToolsText}`);
8200
8167
  components,
8201
8168
  userPrompt,
8202
8169
  apiKey,
8203
- logCollector,
8204
8170
  componentStreamCallback,
8205
8171
  deferredTools,
8206
8172
  toolExecutor.getExecutedTools(),
@@ -8214,8 +8180,6 @@ ${executedToolsText}`);
8214
8180
  }
8215
8181
  let container_componet = null;
8216
8182
  if (matchedComponents.length > 0) {
8217
- logger.info(`[${this.getProviderName()}] Created MultiComponentContainer: "${layoutTitle}" with ${matchedComponents.length} components and ${actions.length} actions`);
8218
- logCollector?.info(`Created dashboard: "${layoutTitle}" with ${matchedComponents.length} components and ${actions.length} actions`);
8219
8183
  container_componet = {
8220
8184
  id: `container_${Date.now()}`,
8221
8185
  name: "MultiComponentContainer",
@@ -8247,7 +8211,6 @@ ${executedToolsText}`);
8247
8211
  const methodDuration = Date.now() - methodStartTime;
8248
8212
  const errorMsg = error instanceof Error ? error.message : String(error);
8249
8213
  logger.error(`[${this.getProviderName()}] [TIMING] FAILED ${methodName} in ${methodDuration}ms | error: ${errorMsg}`);
8250
- logCollector?.error(`Error generating text response: ${errorMsg}`);
8251
8214
  userPromptErrorLogger.logLlmError(
8252
8215
  this.getProviderName(),
8253
8216
  this.model,
@@ -8275,14 +8238,11 @@ ${executedToolsText}`);
8275
8238
  * 2. Category classification: Determine if data_analysis, requires_external_tools, or text_response
8276
8239
  * 3. Route appropriately based on category and response mode
8277
8240
  */
8278
- async handleUserRequest(userPrompt, components, apiKey, logCollector, conversationHistory, responseMode = "text", streamCallback, collections, externalTools, userId) {
8241
+ async handleUserRequest(userPrompt, components, apiKey, conversationHistory, responseMode = "text", streamCallback, collections, externalTools, userId) {
8279
8242
  const startTime = Date.now();
8280
- logger.info(`[${this.getProviderName()}] handleUserRequest called for user prompt: ${userPrompt}`);
8281
- logCollector?.info(`Starting request processing with mode: ${responseMode}`);
8282
8243
  logger.clearFile();
8283
8244
  logger.logLLMPrompt("handleUserRequest", "user", `User Prompt: ${userPrompt}`);
8284
8245
  try {
8285
- logger.info(`[${this.getProviderName()}] Step 1: Searching previous conversations...`);
8286
8246
  const conversationMatch = await conversation_search_default.searchConversationsWithReranking({
8287
8247
  userPrompt,
8288
8248
  collections,
@@ -8291,22 +8251,16 @@ ${executedToolsText}`);
8291
8251
  });
8292
8252
  if (conversationMatch) {
8293
8253
  logger.info(`[${this.getProviderName()}] \u2713 Found matching conversation with ${(conversationMatch.similarity * 100).toFixed(2)}% similarity`);
8294
- logCollector?.info(
8295
- `\u2713 Found similar conversation (${(conversationMatch.similarity * 100).toFixed(2)}% match)`
8296
- );
8297
8254
  const rawComponent = conversationMatch.uiBlock?.component || conversationMatch.uiBlock?.generatedComponentMetadata;
8298
8255
  const isValidComponent = rawComponent && typeof rawComponent === "object" && Object.keys(rawComponent).length > 0;
8299
8256
  const component = isValidComponent ? rawComponent : null;
8300
8257
  const cachedTextResponse = conversationMatch.uiBlock?.analysis || conversationMatch.uiBlock?.textResponse || conversationMatch.uiBlock?.text || "";
8301
8258
  if (this.containsFormComponent(component)) {
8302
8259
  logger.info(`[${this.getProviderName()}] Skipping cached result - Form components contain stale defaultValues, fetching fresh data`);
8303
- logCollector?.info("Skipping cache for form - fetching current values from database...");
8304
8260
  } else if (!component) {
8305
8261
  if (conversationMatch.similarity >= EXACT_MATCH_SIMILARITY_THRESHOLD) {
8306
8262
  const elapsedTime2 = Date.now() - startTime;
8307
- logger.info(`[${this.getProviderName()}] \u2713 Exact match for general question - returning cached text response`);
8308
- logCollector?.info(`\u2713 Exact match for general question - returning cached response`);
8309
- logCollector?.info(`Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
8263
+ logger.info(`[${this.getProviderName()}] \u2713 Exact match for general question - returning cached text response (${elapsedTime2}ms)`);
8310
8264
  return {
8311
8265
  success: true,
8312
8266
  data: {
@@ -8321,14 +8275,11 @@ ${executedToolsText}`);
8321
8275
  };
8322
8276
  } else {
8323
8277
  logger.info(`[${this.getProviderName()}] Similar match but no component (general question) - processing fresh`);
8324
- logCollector?.info("Similar match found but was a general conversation - processing as new question");
8325
8278
  }
8326
8279
  } else {
8327
8280
  if (conversationMatch.similarity >= EXACT_MATCH_SIMILARITY_THRESHOLD) {
8328
8281
  const elapsedTime2 = Date.now() - startTime;
8329
- logger.info(`[${this.getProviderName()}] \u2713 100% match - returning UI block directly without adaptation`);
8330
- logCollector?.info(`\u2713 Exact match (${(conversationMatch.similarity * 100).toFixed(2)}%) - returning cached result`);
8331
- logCollector?.info(`Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
8282
+ logger.info(`[${this.getProviderName()}] \u2713 100% match - returning UI block directly without adaptation (${elapsedTime2}ms)`);
8332
8283
  if (streamCallback && cachedTextResponse) {
8333
8284
  logger.info(`[${this.getProviderName()}] Streaming cached text response to frontend`);
8334
8285
  streamCallback(cachedTextResponse);
@@ -8347,22 +8298,18 @@ ${executedToolsText}`);
8347
8298
  errors: []
8348
8299
  };
8349
8300
  }
8350
- logCollector?.info(`Adapting parameters for similar question...`);
8301
+ logger.info(`[${this.getProviderName()}] Adapting parameters for similar question...`);
8351
8302
  const originalPrompt = conversationMatch.metadata?.userPrompt || "Previous question";
8352
8303
  const adaptResult = await this.adaptUIBlockParameters(
8353
8304
  userPrompt,
8354
8305
  originalPrompt,
8355
8306
  conversationMatch.uiBlock,
8356
8307
  apiKey,
8357
- logCollector,
8358
8308
  cachedTextResponse
8359
8309
  );
8360
8310
  if (adaptResult.success && adaptResult.adaptedComponent) {
8361
8311
  const elapsedTime2 = Date.now() - startTime;
8362
- logger.info(`[${this.getProviderName()}] \u2713 Successfully adapted UI block parameters`);
8363
- logger.info(`[${this.getProviderName()}] Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
8364
- logCollector?.info(`\u2713 UI block adapted successfully`);
8365
- logCollector?.info(`Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
8312
+ logger.info(`[${this.getProviderName()}] \u2713 Successfully adapted UI block parameters (${elapsedTime2}ms)`);
8366
8313
  const textResponseToUse = adaptResult.adaptedTextResponse || cachedTextResponse;
8367
8314
  if (streamCallback && textResponseToUse) {
8368
8315
  logger.info(`[${this.getProviderName()}] Streaming ${adaptResult.adaptedTextResponse ? "adapted" : "cached"} text response to frontend`);
@@ -8383,65 +8330,57 @@ ${executedToolsText}`);
8383
8330
  errors: []
8384
8331
  };
8385
8332
  } else {
8386
- logger.info(`[${this.getProviderName()}] Could not adapt matched conversation, continuing to category classification`);
8387
- logCollector?.warn(`Could not adapt matched conversation: ${adaptResult.explanation}`);
8333
+ logger.info(`[${this.getProviderName()}] Could not adapt matched conversation: ${adaptResult.explanation}, continuing to category classification`);
8388
8334
  }
8389
8335
  }
8390
8336
  } else {
8391
8337
  logger.info(`[${this.getProviderName()}] No matching previous conversations found, proceeding to category classification`);
8392
- logCollector?.info("No similar previous conversations found. Proceeding to category classification...");
8393
8338
  }
8394
8339
  logger.info(`[${this.getProviderName()}] Step 2: Classifying question category...`);
8395
- logCollector?.info("Step 2: Classifying question category...");
8396
8340
  const categoryClassification = await this.classifyQuestionCategory(
8397
8341
  userPrompt,
8398
8342
  apiKey,
8399
- logCollector,
8400
8343
  conversationHistory,
8401
8344
  externalTools
8402
8345
  );
8403
8346
  logger.info(
8404
8347
  `[${this.getProviderName()}] Question classified as: ${categoryClassification.category} (confidence: ${categoryClassification.confidence}%)`
8405
8348
  );
8406
- logCollector?.info(
8407
- `Category: ${categoryClassification.category} | Confidence: ${categoryClassification.confidence}%`
8408
- );
8409
8349
  let toolsToUse = [];
8410
8350
  if (categoryClassification.externalTools && categoryClassification.externalTools.length > 0) {
8411
- logger.info(`[${this.getProviderName()}] Identified ${categoryClassification.externalTools.length} external tools needed`);
8412
- logCollector?.info(`Identified external tools: ${categoryClassification.externalTools.map((t) => t.name || t.type).join(", ")}`);
8413
- logger.info(`[${this.getProviderName()}] Raw external tools from classification: ${JSON.stringify(categoryClassification.externalTools, null, 2)}`);
8414
- toolsToUse = categoryClassification.externalTools?.map((t) => {
8351
+ logger.info(`[${this.getProviderName()}] Identified ${categoryClassification.externalTools.length} external tools needed: ${categoryClassification.externalTools.map((t) => t.name || t.type).join(", ")}`);
8352
+ logger.debug(`[${this.getProviderName()}] Raw external tools from classification: ${JSON.stringify(categoryClassification.externalTools, null, 2)}`);
8353
+ toolsToUse = categoryClassification.externalTools.reduce((acc, t) => {
8415
8354
  const realTool = externalTools?.find((tool) => tool.id === t.type);
8416
- logger.info(`[${this.getProviderName()}] Tool ${t.name}: executionType=${t.executionType}, userProvidedData=${t.userProvidedData ? "present" : "null"}`);
8417
- return {
8355
+ if (!realTool) {
8356
+ logger.warn(`[${this.getProviderName()}] Tool ${t.type} (${t.name}) not found in registered tools - skipping (likely hallucinated)`);
8357
+ return acc;
8358
+ }
8359
+ acc.push({
8418
8360
  id: t.type,
8419
8361
  name: t.name,
8420
8362
  description: t.description,
8421
8363
  params: t.parameters || {},
8422
- // NEW: Include execution type info from category classification
8364
+ // Include execution type info from category classification
8423
8365
  executionType: t.executionType || "immediate",
8424
8366
  executionReason: t.executionReason || "",
8425
8367
  requiredFields: t.requiredFields || [],
8426
8368
  userProvidedData: t.userProvidedData || null,
8427
- // CRITICAL: Include outputSchema from real tool for component config generation
8428
- outputSchema: realTool?.outputSchema,
8429
- fn: (() => {
8430
- if (realTool) {
8431
- logger.info(`[${this.getProviderName()}] Using real tool implementation for ${t.type}`);
8432
- return realTool.fn;
8433
- } else {
8434
- logger.warn(`[${this.getProviderName()}] Tool ${t.type} not found in registered tools`);
8435
- return async () => ({ success: false, message: `Tool ${t.name || t.type} not registered` });
8436
- }
8437
- })()
8438
- };
8439
- }) || [];
8369
+ // Include outputSchema from real tool for component config generation
8370
+ outputSchema: realTool.outputSchema,
8371
+ fn: realTool.fn
8372
+ });
8373
+ return acc;
8374
+ }, []);
8375
+ const validCount = toolsToUse.length;
8376
+ const hallucinatedCount = categoryClassification.externalTools.length - validCount;
8377
+ if (hallucinatedCount > 0) {
8378
+ logger.warn(`[${this.getProviderName()}] Filtered out ${hallucinatedCount} hallucinated/non-existent tools, ${validCount} valid tools remaining`);
8379
+ }
8440
8380
  }
8441
8381
  const textResponse = await this.generateTextResponse(
8442
8382
  userPrompt,
8443
8383
  apiKey,
8444
- logCollector,
8445
8384
  conversationHistory,
8446
8385
  streamCallback,
8447
8386
  collections,
@@ -8452,13 +8391,10 @@ ${executedToolsText}`);
8452
8391
  );
8453
8392
  const elapsedTime = Date.now() - startTime;
8454
8393
  logger.info(`[${this.getProviderName()}] Total time taken: ${elapsedTime}ms (${(elapsedTime / 1e3).toFixed(2)}s)`);
8455
- logCollector?.info(`Total time taken: ${elapsedTime}ms (${(elapsedTime / 1e3).toFixed(2)}s)`);
8456
8394
  return textResponse;
8457
8395
  } catch (error) {
8458
8396
  const errorMsg = error instanceof Error ? error.message : String(error);
8459
8397
  logger.error(`[${this.getProviderName()}] Error in handleUserRequest: ${errorMsg}`);
8460
- logger.debug(`[${this.getProviderName()}] Error details:`, error);
8461
- logCollector?.error(`Error processing request: ${errorMsg}`);
8462
8398
  userPromptErrorLogger.logError(
8463
8399
  "handleUserRequest",
8464
8400
  error instanceof Error ? error : new Error(errorMsg),
@@ -8466,7 +8402,6 @@ ${executedToolsText}`);
8466
8402
  );
8467
8403
  const elapsedTime = Date.now() - startTime;
8468
8404
  logger.info(`[${this.getProviderName()}] Total time taken: ${elapsedTime}ms (${(elapsedTime / 1e3).toFixed(2)}s)`);
8469
- logCollector?.info(`Total time taken: ${elapsedTime}ms (${(elapsedTime / 1e3).toFixed(2)}s)`);
8470
8405
  return {
8471
8406
  success: false,
8472
8407
  errors: [errorMsg],
@@ -8482,7 +8417,7 @@ ${executedToolsText}`);
8482
8417
  * This helps provide intelligent suggestions for follow-up queries
8483
8418
  * For general/conversational questions without components, pass textResponse instead
8484
8419
  */
8485
- async generateNextQuestions(originalUserPrompt, component, componentData, apiKey, logCollector, conversationHistory, textResponse) {
8420
+ async generateNextQuestions(originalUserPrompt, component, componentData, apiKey, conversationHistory, textResponse) {
8486
8421
  const methodStartTime = Date.now();
8487
8422
  const methodName = "generateNextQuestions";
8488
8423
  const promptPreview = originalUserPrompt.substring(0, 50) + (originalUserPrompt.length > 50 ? "..." : "");
@@ -8527,14 +8462,6 @@ ${executedToolsText}`);
8527
8462
  // Parse as JSON
8528
8463
  );
8529
8464
  const nextQuestions = result.nextQuestions || [];
8530
- logCollector?.logExplanation(
8531
- "Next questions generated",
8532
- "Generated intelligent follow-up questions based on component",
8533
- {
8534
- count: nextQuestions.length,
8535
- questions: nextQuestions
8536
- }
8537
- );
8538
8465
  const methodDuration = Date.now() - methodStartTime;
8539
8466
  logger.info(`[${this.getProviderName()}] [TIMING] DONE ${methodName} in ${methodDuration}ms | questions: ${nextQuestions.length}`);
8540
8467
  return nextQuestions;
@@ -8542,8 +8469,6 @@ ${executedToolsText}`);
8542
8469
  const methodDuration = Date.now() - methodStartTime;
8543
8470
  const errorMsg = error instanceof Error ? error.message : String(error);
8544
8471
  logger.error(`[${this.getProviderName()}] [TIMING] FAILED ${methodName} in ${methodDuration}ms | error: ${errorMsg}`);
8545
- logger.debug(`[${this.getProviderName()}] Next questions generation error details:`, error);
8546
- logCollector?.error(`Error generating next questions: ${errorMsg}`);
8547
8472
  return [];
8548
8473
  }
8549
8474
  }
@@ -8600,10 +8525,10 @@ var GeminiLLM = class extends BaseLLM {
8600
8525
  super(config);
8601
8526
  }
8602
8527
  getDefaultModel() {
8603
- return "gemini/gemini-3-pro-preview";
8528
+ return "gemini/gemini-2.5-flash";
8604
8529
  }
8605
8530
  getDefaultFastModel() {
8606
- return "gemini/gemini-3-flash-preview";
8531
+ return "gemini/gemini-2.5-flash";
8607
8532
  }
8608
8533
  getDefaultApiKey() {
8609
8534
  return process.env.GEMINI_API_KEY;
@@ -8657,332 +8582,96 @@ function getLLMProviders() {
8657
8582
  return DEFAULT_PROVIDERS;
8658
8583
  }
8659
8584
  }
8660
- var useAnthropicMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8661
- logger.debug("[useAnthropicMethod] Initializing Anthropic Claude matching method");
8662
- logger.debug(`[useAnthropicMethod] Response mode: ${responseMode}`);
8663
- const msg = `Using Anthropic Claude ${responseMode === "text" ? "text response" : "matching"} method...`;
8664
- logCollector?.info(msg);
8585
+ var useAnthropicMethod = async (prompt, components, apiKey, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8665
8586
  if (responseMode === "component" && components.length === 0) {
8666
8587
  const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
8667
8588
  logger.error("[useAnthropicMethod] No components available");
8668
- logCollector?.error(emptyMsg);
8669
8589
  return { success: false, errors: [emptyMsg] };
8670
8590
  }
8671
- logger.debug(`[useAnthropicMethod] Processing with ${components.length} components`);
8672
- const matchResult = await anthropicLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8591
+ const matchResult = await anthropicLLM.handleUserRequest(prompt, components, apiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8673
8592
  logger.info(`[useAnthropicMethod] Successfully generated ${responseMode} using Anthropic`);
8674
8593
  return matchResult;
8675
8594
  };
8676
- var useGroqMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8595
+ var useGroqMethod = async (prompt, components, apiKey, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8677
8596
  logger.debug("[useGroqMethod] Initializing Groq LLM matching method");
8678
8597
  logger.debug(`[useGroqMethod] Response mode: ${responseMode}`);
8679
- const msg = `Using Groq LLM ${responseMode === "text" ? "text response" : "matching"} method...`;
8680
- logger.info(msg);
8681
- logCollector?.info(msg);
8682
8598
  if (responseMode === "component" && components.length === 0) {
8683
8599
  const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
8684
8600
  logger.error("[useGroqMethod] No components available");
8685
- logCollector?.error(emptyMsg);
8686
8601
  return { success: false, errors: [emptyMsg] };
8687
8602
  }
8688
8603
  logger.debug(`[useGroqMethod] Processing with ${components.length} components`);
8689
- const matchResult = await groqLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8604
+ const matchResult = await groqLLM.handleUserRequest(prompt, components, apiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8690
8605
  logger.info(`[useGroqMethod] Successfully generated ${responseMode} using Groq`);
8691
8606
  return matchResult;
8692
8607
  };
8693
- var useGeminiMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8608
+ var useGeminiMethod = async (prompt, components, apiKey, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8694
8609
  logger.debug("[useGeminiMethod] Initializing Gemini LLM matching method");
8695
8610
  logger.debug(`[useGeminiMethod] Response mode: ${responseMode}`);
8696
- const msg = `Using Gemini LLM ${responseMode === "text" ? "text response" : "matching"} method...`;
8697
- logger.info(msg);
8698
- logCollector?.info(msg);
8699
8611
  if (responseMode === "component" && components.length === 0) {
8700
8612
  const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
8701
8613
  logger.error("[useGeminiMethod] No components available");
8702
- logCollector?.error(emptyMsg);
8703
8614
  return { success: false, errors: [emptyMsg] };
8704
8615
  }
8705
8616
  logger.debug(`[useGeminiMethod] Processing with ${components.length} components`);
8706
- const matchResult = await geminiLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8617
+ const matchResult = await geminiLLM.handleUserRequest(prompt, components, apiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8707
8618
  logger.info(`[useGeminiMethod] Successfully generated ${responseMode} using Gemini`);
8708
8619
  return matchResult;
8709
8620
  };
8710
- var useOpenAIMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8621
+ var useOpenAIMethod = async (prompt, components, apiKey, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8711
8622
  logger.debug("[useOpenAIMethod] Initializing OpenAI GPT matching method");
8712
8623
  logger.debug(`[useOpenAIMethod] Response mode: ${responseMode}`);
8713
- const msg = `Using OpenAI GPT ${responseMode === "text" ? "text response" : "matching"} method...`;
8714
- logger.info(msg);
8715
- logCollector?.info(msg);
8716
8624
  if (responseMode === "component" && components.length === 0) {
8717
8625
  const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
8718
8626
  logger.error("[useOpenAIMethod] No components available");
8719
- logCollector?.error(emptyMsg);
8720
8627
  return { success: false, errors: [emptyMsg] };
8721
8628
  }
8722
8629
  logger.debug(`[useOpenAIMethod] Processing with ${components.length} components`);
8723
- const matchResult = await openaiLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8630
+ const matchResult = await openaiLLM.handleUserRequest(prompt, components, apiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8724
8631
  logger.info(`[useOpenAIMethod] Successfully generated ${responseMode} using OpenAI`);
8725
8632
  return matchResult;
8726
8633
  };
8727
- var getUserResponseFromCache = async (prompt) => {
8728
- return false;
8729
- };
8730
- var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8731
- logger.debug(`[get_user_response] Starting user response generation for prompt: "${prompt.substring(0, 50)}..."`);
8732
- logger.debug(`[get_user_response] Response mode: ${responseMode}`);
8733
- logger.debug("[get_user_response] Checking cache for existing response");
8734
- const userResponse = await getUserResponseFromCache(prompt);
8735
- if (userResponse) {
8736
- logger.info("[get_user_response] User response found in cache - returning cached result");
8737
- logCollector?.info("User response found in cache");
8738
- return {
8739
- success: true,
8740
- data: userResponse,
8741
- errors: []
8742
- };
8743
- }
8744
- logger.debug("[get_user_response] No cached response found, proceeding with LLM providers");
8634
+ var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
8745
8635
  const providers = llmProviders || getLLMProviders();
8746
8636
  const errors = [];
8747
- const providerOrder = providers.join(", ");
8748
- logCollector?.info(`LLM Provider order: [${providerOrder}]`);
8749
- if (conversationHistory && conversationHistory.length > 0) {
8750
- const exchangeCount = conversationHistory.split("\n").filter((l) => l.startsWith("Q")).length;
8751
- logger.debug(`[get_user_response] Using conversation history with ${exchangeCount} previous exchanges`);
8752
- logCollector?.info(`Using conversation history with ${exchangeCount} previous exchanges`);
8753
- } else {
8754
- logger.debug("[get_user_response] No conversation history available");
8755
- }
8637
+ logger.info(`[get_user_response] LLM Provider order: [${providers.join(", ")}]`);
8756
8638
  for (let i = 0; i < providers.length; i++) {
8757
8639
  const provider = providers[i];
8758
8640
  const isLastProvider = i === providers.length - 1;
8759
- const attemptMsg = `Attempting provider: ${provider} (${i + 1}/${providers.length})`;
8760
- logCollector?.info(attemptMsg);
8641
+ logger.info(`[get_user_response] Attempting provider: ${provider} (${i + 1}/${providers.length})`);
8761
8642
  let result;
8762
8643
  if (provider === "anthropic") {
8763
- result = await useAnthropicMethod(prompt, components, anthropicApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8644
+ result = await useAnthropicMethod(prompt, components, anthropicApiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8764
8645
  } else if (provider === "groq") {
8765
- result = await useGroqMethod(prompt, components, groqApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8646
+ result = await useGroqMethod(prompt, components, groqApiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8766
8647
  } else if (provider === "gemini") {
8767
- result = await useGeminiMethod(prompt, components, geminiApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8648
+ result = await useGeminiMethod(prompt, components, geminiApiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8768
8649
  } else if (provider === "openai") {
8769
- result = await useOpenAIMethod(prompt, components, openaiApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8650
+ result = await useOpenAIMethod(prompt, components, openaiApiKey, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
8770
8651
  } else {
8771
8652
  logger.warn(`[get_user_response] Unknown provider: ${provider} - skipping`);
8772
8653
  errors.push(`Unknown provider: ${provider}`);
8773
8654
  continue;
8774
8655
  }
8775
8656
  if (result.success) {
8776
- const successMsg = `Success with provider: ${provider}`;
8777
- logger.info(`${successMsg}`);
8778
- logCollector?.info(successMsg);
8657
+ logger.info(`[get_user_response] Success with provider: ${provider}`);
8779
8658
  return result;
8780
8659
  } else {
8781
8660
  const providerErrors = result.errors.map((err) => `${provider}: ${err}`);
8782
8661
  errors.push(...providerErrors);
8783
- const warnMsg = `Provider ${provider} returned unsuccessful result: ${result.errors.join(", ")}`;
8784
- logger.warn(`[get_user_response] ${warnMsg}`);
8785
- logCollector?.warn(warnMsg);
8662
+ logger.warn(`[get_user_response] Provider ${provider} returned unsuccessful result: ${result.errors.join(", ")}`);
8786
8663
  if (!isLastProvider) {
8787
- const fallbackMsg = "Falling back to next provider...";
8788
- logger.info(`[get_user_response] ${fallbackMsg}`);
8789
- logCollector?.info(fallbackMsg);
8664
+ logger.info("[get_user_response] Falling back to next provider...");
8790
8665
  }
8791
8666
  }
8792
8667
  }
8793
- const failureMsg = `All LLM providers failed`;
8794
- logger.error(`[get_user_response] ${failureMsg}. Errors: ${errors.join("; ")}`);
8795
- logCollector?.error(`${failureMsg}. Errors: ${errors.join("; ")}`);
8668
+ logger.error(`[get_user_response] All LLM providers failed. Errors: ${errors.join("; ")}`);
8796
8669
  return {
8797
8670
  success: false,
8798
8671
  errors
8799
8672
  };
8800
8673
  };
8801
8674
 
8802
- // src/utils/log-collector.ts
8803
- var LOG_LEVEL_PRIORITY2 = {
8804
- errors: 0,
8805
- warnings: 1,
8806
- info: 2,
8807
- verbose: 3
8808
- };
8809
- var MESSAGE_LEVEL_PRIORITY2 = {
8810
- error: 0,
8811
- warn: 1,
8812
- info: 2,
8813
- debug: 3
8814
- };
8815
- var UILogCollector = class {
8816
- constructor(clientId, sendMessage, uiBlockId) {
8817
- this.logs = [];
8818
- this.uiBlockId = uiBlockId || null;
8819
- this.clientId = clientId;
8820
- this.sendMessage = sendMessage;
8821
- this.currentLogLevel = logger.getLogLevel();
8822
- }
8823
- /**
8824
- * Check if logging is enabled (uiBlockId is provided)
8825
- */
8826
- isEnabled() {
8827
- return this.uiBlockId !== null;
8828
- }
8829
- /**
8830
- * Check if a message should be logged based on current log level
8831
- */
8832
- shouldLog(messageLevel) {
8833
- const currentLevelPriority = LOG_LEVEL_PRIORITY2[this.currentLogLevel];
8834
- const messagePriority = MESSAGE_LEVEL_PRIORITY2[messageLevel];
8835
- return messagePriority <= currentLevelPriority;
8836
- }
8837
- /**
8838
- * Add a log entry with timestamp and immediately send to runtime
8839
- * Only logs that pass the log level filter are captured and sent
8840
- */
8841
- addLog(level, message, type, data) {
8842
- if (!this.shouldLog(level)) {
8843
- return;
8844
- }
8845
- const log = {
8846
- timestamp: Date.now(),
8847
- level,
8848
- message,
8849
- ...type && { type },
8850
- ...data && { data }
8851
- };
8852
- this.logs.push(log);
8853
- this.sendLogImmediately(log);
8854
- switch (level) {
8855
- case "error":
8856
- logger.error("UILogCollector:", log);
8857
- break;
8858
- case "warn":
8859
- logger.warn("UILogCollector:", log);
8860
- break;
8861
- case "info":
8862
- logger.info("UILogCollector:", log);
8863
- break;
8864
- case "debug":
8865
- logger.debug("UILogCollector:", log);
8866
- break;
8867
- }
8868
- }
8869
- /**
8870
- * Send a single log to runtime immediately
8871
- */
8872
- sendLogImmediately(log) {
8873
- if (!this.isEnabled()) {
8874
- return;
8875
- }
8876
- const response = {
8877
- id: this.uiBlockId,
8878
- type: "UI_LOGS",
8879
- from: { type: "data-agent" },
8880
- to: {
8881
- type: "runtime",
8882
- id: this.clientId
8883
- },
8884
- payload: {
8885
- logs: [log]
8886
- // Send single log in array
8887
- }
8888
- };
8889
- this.sendMessage(response);
8890
- }
8891
- /**
8892
- * Log info message
8893
- */
8894
- info(message, type, data) {
8895
- if (this.isEnabled()) {
8896
- this.addLog("info", message, type, data);
8897
- }
8898
- }
8899
- /**
8900
- * Log error message
8901
- */
8902
- error(message, type, data) {
8903
- if (this.isEnabled()) {
8904
- this.addLog("error", message, type, data);
8905
- }
8906
- }
8907
- /**
8908
- * Log warning message
8909
- */
8910
- warn(message, type, data) {
8911
- if (this.isEnabled()) {
8912
- this.addLog("warn", message, type, data);
8913
- }
8914
- }
8915
- /**
8916
- * Log debug message
8917
- */
8918
- debug(message, type, data) {
8919
- if (this.isEnabled()) {
8920
- this.addLog("debug", message, type, data);
8921
- }
8922
- }
8923
- /**
8924
- * Log LLM explanation with typed metadata
8925
- */
8926
- logExplanation(message, explanation, data) {
8927
- if (this.isEnabled()) {
8928
- this.addLog("info", message, "explanation", {
8929
- explanation,
8930
- ...data
8931
- });
8932
- }
8933
- }
8934
- /**
8935
- * Log generated query with typed metadata
8936
- */
8937
- logQuery(message, query, data) {
8938
- if (this.isEnabled()) {
8939
- this.addLog("info", message, "query", {
8940
- query,
8941
- ...data
8942
- });
8943
- }
8944
- }
8945
- /**
8946
- * Send all collected logs at once (optional, for final summary)
8947
- */
8948
- sendAllLogs() {
8949
- if (!this.isEnabled() || this.logs.length === 0) {
8950
- return;
8951
- }
8952
- const response = {
8953
- id: this.uiBlockId,
8954
- type: "UI_LOGS",
8955
- from: { type: "data-agent" },
8956
- to: {
8957
- type: "runtime",
8958
- id: this.clientId
8959
- },
8960
- payload: {
8961
- logs: this.logs
8962
- }
8963
- };
8964
- this.sendMessage(response);
8965
- }
8966
- /**
8967
- * Get all collected logs
8968
- */
8969
- getLogs() {
8970
- return [...this.logs];
8971
- }
8972
- /**
8973
- * Clear all logs
8974
- */
8975
- clearLogs() {
8976
- this.logs = [];
8977
- }
8978
- /**
8979
- * Set uiBlockId (in case it's provided later)
8980
- */
8981
- setUIBlockId(uiBlockId) {
8982
- this.uiBlockId = uiBlockId;
8983
- }
8984
- };
8985
-
8986
8675
  // src/utils/conversation-saver.ts
8987
8676
  function transformUIBlockForDB(uiblock, userPrompt, uiBlockId) {
8988
8677
  const component = uiblock?.generatedComponentMetadata && Object.keys(uiblock.generatedComponentMetadata).length > 0 ? uiblock.generatedComponentMetadata : null;
@@ -9113,7 +8802,6 @@ var CONTEXT_CONFIG = {
9113
8802
  // src/handlers/user-prompt-request.ts
9114
8803
  var get_user_request = async (data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools) => {
9115
8804
  const errors = [];
9116
- logger.debug("[USER_PROMPT_REQ] Parsing incoming message data");
9117
8805
  const parseResult = UserPromptRequestMessageSchema.safeParse(data);
9118
8806
  if (!parseResult.success) {
9119
8807
  const zodError = parseResult.error;
@@ -9145,27 +8833,23 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
9145
8833
  if (!prompt) {
9146
8834
  errors.push("Prompt not found");
9147
8835
  }
9148
- logger.debug(`[REQUEST ${id}] Full request details - uiBlockId: ${existingUiBlockId}, threadId: ${threadId}, prompt: ${prompt}`);
9149
8836
  if (errors.length > 0) {
9150
8837
  return { success: false, errors, id, wsId };
9151
8838
  }
9152
- const logCollector = new UILogCollector(wsId, sendMessage, existingUiBlockId);
9153
8839
  const threadManager = ThreadManager.getInstance();
9154
8840
  let thread = threadManager.getThread(threadId);
9155
8841
  if (!thread) {
9156
8842
  thread = threadManager.createThread(threadId);
9157
8843
  logger.info(`Created new thread: ${threadId}`);
9158
8844
  }
9159
- logCollector.info(`Starting user prompt request with ${components.length} components`);
8845
+ logger.info(`Starting user prompt request with ${components.length} components`);
9160
8846
  const conversationHistory = thread.getConversationContext(CONTEXT_CONFIG.MAX_CONVERSATION_CONTEXT_BLOCKS, existingUiBlockId);
9161
8847
  const responseMode = payload.responseMode || "component";
9162
- logger.info("responseMode", responseMode);
9163
8848
  let streamCallback;
9164
8849
  let accumulatedStreamResponse = "";
9165
8850
  if (responseMode === "text") {
9166
8851
  streamCallback = (chunk) => {
9167
8852
  accumulatedStreamResponse += chunk;
9168
- logger.debug(`[STREAM] Sending chunk (${chunk.length} chars): "${chunk.substring(0, 20)}..."`);
9169
8853
  const streamMessage = {
9170
8854
  id: `stream_${existingUiBlockId}`,
9171
8855
  // Different ID pattern for streaming
@@ -9181,7 +8865,6 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
9181
8865
  }
9182
8866
  };
9183
8867
  sendMessage(streamMessage);
9184
- logger.debug(`[STREAM] Chunk sent to wsId: ${wsId}`);
9185
8868
  };
9186
8869
  }
9187
8870
  const userResponse = await get_user_response(
@@ -9192,7 +8875,6 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
9192
8875
  geminiApiKey,
9193
8876
  openaiApiKey,
9194
8877
  llmProviders,
9195
- logCollector,
9196
8878
  conversationHistory,
9197
8879
  responseMode,
9198
8880
  streamCallback,
@@ -9200,7 +8882,7 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
9200
8882
  externalTools,
9201
8883
  userId
9202
8884
  );
9203
- logCollector.info("User prompt request completed");
8885
+ logger.info("User prompt request completed");
9204
8886
  const uiBlockId = existingUiBlockId;
9205
8887
  if (!userResponse.success) {
9206
8888
  logger.error(`User prompt request failed with errors: ${userResponse.errors.join(", ")}`);
@@ -9267,9 +8949,6 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
9267
8949
  logger.info(
9268
8950
  `Skipping conversation save - response from exact semantic match (${(semanticSimilarity * 100).toFixed(2)}% similarity)`
9269
8951
  );
9270
- logCollector.info(
9271
- `Using exact cached result (${(semanticSimilarity * 100).toFixed(2)}% match) - not saving duplicate conversation`
9272
- );
9273
8952
  } else {
9274
8953
  const uiBlockData = uiBlock.toJSON();
9275
8954
  const saveResult = await saveConversation({
@@ -9477,7 +9156,7 @@ function sendResponse(id, res, sendMessage, clientId) {
9477
9156
  }
9478
9157
 
9479
9158
  // src/userResponse/next-questions.ts
9480
- async function generateNextQuestions(originalUserPrompt, component, componentData, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, logCollector, conversationHistory) {
9159
+ async function generateNextQuestions(originalUserPrompt, component, componentData, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, conversationHistory) {
9481
9160
  try {
9482
9161
  logger.debug("[generateNextQuestions] Starting next questions generation");
9483
9162
  logger.debug(`[generateNextQuestions] User prompt: "${originalUserPrompt?.substring(0, 50)}..."`);
@@ -9496,7 +9175,6 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
9496
9175
  const isLastProvider = i === providers.length - 1;
9497
9176
  try {
9498
9177
  logger.info(`[generateNextQuestions] Attempting provider: ${provider} (${i + 1}/${providers.length})`);
9499
- logCollector?.info(`Generating questions with ${provider}...`);
9500
9178
  let result = [];
9501
9179
  if (provider === "groq") {
9502
9180
  logger.debug("[generateNextQuestions] Using Groq LLM for next questions");
@@ -9505,7 +9183,6 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
9505
9183
  component,
9506
9184
  componentData,
9507
9185
  groqApiKey,
9508
- logCollector,
9509
9186
  conversationHistory
9510
9187
  );
9511
9188
  } else if (provider === "gemini") {
@@ -9515,7 +9192,6 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
9515
9192
  component,
9516
9193
  componentData,
9517
9194
  geminiApiKey,
9518
- logCollector,
9519
9195
  conversationHistory
9520
9196
  );
9521
9197
  } else if (provider === "openai") {
@@ -9525,7 +9201,6 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
9525
9201
  component,
9526
9202
  componentData,
9527
9203
  openaiApiKey,
9528
- logCollector,
9529
9204
  conversationHistory
9530
9205
  );
9531
9206
  } else {
@@ -9535,44 +9210,32 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
9535
9210
  component,
9536
9211
  componentData,
9537
9212
  anthropicApiKey,
9538
- logCollector,
9539
9213
  conversationHistory
9540
9214
  );
9541
9215
  }
9542
9216
  if (result && result.length > 0) {
9543
9217
  logger.info(`[generateNextQuestions] Successfully generated ${result.length} questions with ${provider}`);
9544
9218
  logger.debug(`[generateNextQuestions] Questions: ${JSON.stringify(result)}`);
9545
- logCollector?.info(`Generated ${result.length} follow-up questions`);
9546
9219
  return result;
9547
9220
  }
9548
- const warnMsg = `No questions generated from ${provider}${!isLastProvider ? ", trying next provider..." : ""}`;
9549
- logger.warn(`[generateNextQuestions] ${warnMsg}`);
9550
- if (!isLastProvider) {
9551
- logCollector?.warn(warnMsg);
9552
- }
9221
+ logger.warn(`[generateNextQuestions] No questions generated from ${provider}${!isLastProvider ? ", trying next provider..." : ""}`);
9553
9222
  } catch (providerError) {
9554
9223
  const errorMsg = providerError instanceof Error ? providerError.message : String(providerError);
9555
9224
  logger.error(`[generateNextQuestions] Provider ${provider} failed: ${errorMsg}`);
9556
9225
  logger.debug(`[generateNextQuestions] Provider error details:`, providerError);
9557
9226
  if (!isLastProvider) {
9558
- const fallbackMsg = `Provider ${provider} failed, trying next provider...`;
9559
- logger.info(`[generateNextQuestions] ${fallbackMsg}`);
9560
- logCollector?.warn(fallbackMsg);
9561
- } else {
9562
- logCollector?.error(`Failed to generate questions with ${provider}`);
9227
+ logger.info(`[generateNextQuestions] Provider ${provider} failed, trying next provider...`);
9563
9228
  }
9564
9229
  continue;
9565
9230
  }
9566
9231
  }
9567
9232
  logger.warn("[generateNextQuestions] All providers failed or returned no questions");
9568
- logCollector?.warn("Unable to generate follow-up questions");
9569
9233
  return [];
9570
9234
  } catch (error) {
9571
9235
  const errorMsg = error instanceof Error ? error.message : String(error);
9572
9236
  const errorStack = error instanceof Error ? error.stack : void 0;
9573
9237
  logger.error(`[generateNextQuestions] Error generating next questions: ${errorMsg}`);
9574
9238
  logger.debug("[generateNextQuestions] Error stack trace:", errorStack);
9575
- logCollector?.error(`Error generating next questions: ${errorMsg}`);
9576
9239
  return [];
9577
9240
  }
9578
9241
  }
@@ -9620,9 +9283,6 @@ async function handleActionsRequest(data, sendMessage, anthropicApiKey, groqApiK
9620
9283
  return;
9621
9284
  }
9622
9285
  logger.info(`[ACTIONS_REQ ${id}] UIBlock retrieved successfully`);
9623
- logger.debug(`[ACTIONS_REQ ${id}] Creating UILogCollector for uiBlockId: ${uiBlockId}`);
9624
- const logCollector = new UILogCollector(wsId, sendMessage, uiBlockId);
9625
- logger.info(`[ACTIONS_REQ ${id}] UILogCollector initialized`);
9626
9286
  logger.debug(`[ACTIONS_REQ ${id}] Extracting data from UIBlock`);
9627
9287
  const userQuestion = uiBlock.getUserQuestion();
9628
9288
  const component = uiBlock.getComponentMetadata();
@@ -9636,13 +9296,11 @@ async function handleActionsRequest(data, sendMessage, anthropicApiKey, groqApiK
9636
9296
  logger.info(`[ACTIONS_REQ ${id}] Conversation history extracted: ${historyLineCount} lines`);
9637
9297
  logger.debug(`[ACTIONS_REQ ${id}] Conversation history preview:
9638
9298
  ${conversationHistory.substring(0, 200)}...`);
9639
- logCollector.info(`Generating actions for UIBlock: ${uiBlockId}`);
9640
- logger.info(`[ACTIONS_REQ ${id}] Generating actions for component: ${component?.name || "unknown"}`);
9299
+ logger.info(`[ACTIONS_REQ ${id}] Generating actions for UIBlock: ${uiBlockId}, component: ${component?.name || "unknown"}`);
9641
9300
  logger.debug(`[ACTIONS_REQ ${id}] Checking if actions are already cached`);
9642
9301
  const startTime = Date.now();
9643
9302
  const actions = await uiBlock.getOrFetchActions(async () => {
9644
9303
  logger.info(`[ACTIONS_REQ ${id}] Actions not cached, generating new actions...`);
9645
- logCollector.info("Generating follow-up questions...");
9646
9304
  logger.info(`[ACTIONS_REQ ${id}] Starting next questions generation with ${llmProviders?.join(", ") || "default"} providers`);
9647
9305
  const nextQuestions = await generateNextQuestions(
9648
9306
  userQuestion,
@@ -9653,7 +9311,6 @@ ${conversationHistory.substring(0, 200)}...`);
9653
9311
  geminiApiKey,
9654
9312
  openaiApiKey,
9655
9313
  llmProviders,
9656
- logCollector,
9657
9314
  conversationHistory
9658
9315
  );
9659
9316
  logger.info(`[ACTIONS_REQ ${id}] Generated ${nextQuestions.length} questions`);
@@ -9671,11 +9328,10 @@ ${conversationHistory.substring(0, 200)}...`);
9671
9328
  const processingTime = Date.now() - startTime;
9672
9329
  logger.info(`[ACTIONS_REQ ${id}] Actions retrieved in ${processingTime}ms - ${actions.length} actions total`);
9673
9330
  if (actions.length > 0) {
9674
- logCollector.info(`Generated ${actions.length} follow-up questions successfully`);
9331
+ logger.info(`[ACTIONS_REQ ${id}] Generated ${actions.length} follow-up questions successfully`);
9675
9332
  logger.debug(`[ACTIONS_REQ ${id}] Actions: ${actions.map((a) => a.name).join(", ")}`);
9676
9333
  } else {
9677
9334
  logger.warn(`[ACTIONS_REQ ${id}] No actions generated`);
9678
- logCollector.warn("No follow-up questions could be generated");
9679
9335
  }
9680
9336
  logger.debug(`[ACTIONS_REQ ${id}] Sending successful response to client`);
9681
9337
  sendResponse2(id, {
@@ -9694,15 +9350,6 @@ ${conversationHistory.substring(0, 200)}...`);
9694
9350
  const errorStack = error instanceof Error ? error.stack : void 0;
9695
9351
  logger.error(`[ACTIONS_REQ] Failed to handle actions request: ${errorMessage}`);
9696
9352
  logger.debug(`[ACTIONS_REQ] Error stack trace:`, errorStack);
9697
- try {
9698
- const parsedData = data;
9699
- if (parsedData?.id && parsedData?.from?.id) {
9700
- const logCollector = parsedData?.payload?.SA_RUNTIME?.uiBlockId ? new UILogCollector(parsedData.from.id, sendMessage, parsedData.payload.SA_RUNTIME.uiBlockId) : void 0;
9701
- logCollector?.error(`Failed to generate actions: ${errorMessage}`);
9702
- }
9703
- } catch (logError) {
9704
- logger.debug("[ACTIONS_REQ] Failed to send error logs to UI:", logError);
9705
- }
9706
9353
  sendResponse2(null, {
9707
9354
  success: false,
9708
9355
  error: errorMessage
@@ -10283,7 +9930,6 @@ function sendResponse3(id, res, sendMessage, clientId) {
10283
9930
  var dashboardManager = null;
10284
9931
  function setDashboardManager(manager) {
10285
9932
  dashboardManager = manager;
10286
- logger.info("DashboardManager instance set");
10287
9933
  }
10288
9934
  function getDashboardManager() {
10289
9935
  if (!dashboardManager) {
@@ -13610,6 +13256,190 @@ var ReportManager = class {
13610
13256
  }
13611
13257
  };
13612
13258
 
13259
+ // src/utils/log-collector.ts
13260
+ var LOG_LEVEL_PRIORITY2 = {
13261
+ errors: 0,
13262
+ warnings: 1,
13263
+ info: 2,
13264
+ verbose: 3
13265
+ };
13266
+ var MESSAGE_LEVEL_PRIORITY2 = {
13267
+ error: 0,
13268
+ warn: 1,
13269
+ info: 2,
13270
+ debug: 3
13271
+ };
13272
+ var UILogCollector = class {
13273
+ constructor(clientId, sendMessage, uiBlockId) {
13274
+ this.logs = [];
13275
+ this.uiBlockId = uiBlockId || null;
13276
+ this.clientId = clientId;
13277
+ this.sendMessage = sendMessage;
13278
+ this.currentLogLevel = logger.getLogLevel();
13279
+ }
13280
+ /**
13281
+ * Check if logging is enabled (uiBlockId is provided)
13282
+ */
13283
+ isEnabled() {
13284
+ return this.uiBlockId !== null;
13285
+ }
13286
+ /**
13287
+ * Check if a message should be logged based on current log level
13288
+ */
13289
+ shouldLog(messageLevel) {
13290
+ const currentLevelPriority = LOG_LEVEL_PRIORITY2[this.currentLogLevel];
13291
+ const messagePriority = MESSAGE_LEVEL_PRIORITY2[messageLevel];
13292
+ return messagePriority <= currentLevelPriority;
13293
+ }
13294
+ /**
13295
+ * Add a log entry with timestamp and immediately send to runtime
13296
+ * Only logs that pass the log level filter are captured and sent
13297
+ */
13298
+ addLog(level, message, type, data) {
13299
+ if (!this.shouldLog(level)) {
13300
+ return;
13301
+ }
13302
+ const log = {
13303
+ timestamp: Date.now(),
13304
+ level,
13305
+ message,
13306
+ ...type && { type },
13307
+ ...data && { data }
13308
+ };
13309
+ this.logs.push(log);
13310
+ this.sendLogImmediately(log);
13311
+ switch (level) {
13312
+ case "error":
13313
+ logger.error("UILogCollector:", log);
13314
+ break;
13315
+ case "warn":
13316
+ logger.warn("UILogCollector:", log);
13317
+ break;
13318
+ case "info":
13319
+ logger.info("UILogCollector:", log);
13320
+ break;
13321
+ case "debug":
13322
+ logger.debug("UILogCollector:", log);
13323
+ break;
13324
+ }
13325
+ }
13326
+ /**
13327
+ * Send a single log to runtime immediately
13328
+ */
13329
+ sendLogImmediately(log) {
13330
+ if (!this.isEnabled()) {
13331
+ return;
13332
+ }
13333
+ const response = {
13334
+ id: this.uiBlockId,
13335
+ type: "UI_LOGS",
13336
+ from: { type: "data-agent" },
13337
+ to: {
13338
+ type: "runtime",
13339
+ id: this.clientId
13340
+ },
13341
+ payload: {
13342
+ logs: [log]
13343
+ // Send single log in array
13344
+ }
13345
+ };
13346
+ this.sendMessage(response);
13347
+ }
13348
+ /**
13349
+ * Log info message
13350
+ */
13351
+ info(message, type, data) {
13352
+ if (this.isEnabled()) {
13353
+ this.addLog("info", message, type, data);
13354
+ }
13355
+ }
13356
+ /**
13357
+ * Log error message
13358
+ */
13359
+ error(message, type, data) {
13360
+ if (this.isEnabled()) {
13361
+ this.addLog("error", message, type, data);
13362
+ }
13363
+ }
13364
+ /**
13365
+ * Log warning message
13366
+ */
13367
+ warn(message, type, data) {
13368
+ if (this.isEnabled()) {
13369
+ this.addLog("warn", message, type, data);
13370
+ }
13371
+ }
13372
+ /**
13373
+ * Log debug message
13374
+ */
13375
+ debug(message, type, data) {
13376
+ if (this.isEnabled()) {
13377
+ this.addLog("debug", message, type, data);
13378
+ }
13379
+ }
13380
+ /**
13381
+ * Log LLM explanation with typed metadata
13382
+ */
13383
+ logExplanation(message, explanation, data) {
13384
+ if (this.isEnabled()) {
13385
+ this.addLog("info", message, "explanation", {
13386
+ explanation,
13387
+ ...data
13388
+ });
13389
+ }
13390
+ }
13391
+ /**
13392
+ * Log generated query with typed metadata
13393
+ */
13394
+ logQuery(message, query, data) {
13395
+ if (this.isEnabled()) {
13396
+ this.addLog("info", message, "query", {
13397
+ query,
13398
+ ...data
13399
+ });
13400
+ }
13401
+ }
13402
+ /**
13403
+ * Send all collected logs at once (optional, for final summary)
13404
+ */
13405
+ sendAllLogs() {
13406
+ if (!this.isEnabled() || this.logs.length === 0) {
13407
+ return;
13408
+ }
13409
+ const response = {
13410
+ id: this.uiBlockId,
13411
+ type: "UI_LOGS",
13412
+ from: { type: "data-agent" },
13413
+ to: {
13414
+ type: "runtime",
13415
+ id: this.clientId
13416
+ },
13417
+ payload: {
13418
+ logs: this.logs
13419
+ }
13420
+ };
13421
+ this.sendMessage(response);
13422
+ }
13423
+ /**
13424
+ * Get all collected logs
13425
+ */
13426
+ getLogs() {
13427
+ return [...this.logs];
13428
+ }
13429
+ /**
13430
+ * Clear all logs
13431
+ */
13432
+ clearLogs() {
13433
+ this.logs = [];
13434
+ }
13435
+ /**
13436
+ * Set uiBlockId (in case it's provided later)
13437
+ */
13438
+ setUIBlockId(uiBlockId) {
13439
+ this.uiBlockId = uiBlockId;
13440
+ }
13441
+ };
13442
+
13613
13443
  // src/services/cleanup-service.ts
13614
13444
  var CleanupService = class _CleanupService {
13615
13445
  constructor() {
@@ -13790,7 +13620,6 @@ var CleanupService = class _CleanupService {
13790
13620
  };
13791
13621
 
13792
13622
  // src/index.ts
13793
- var SDK_VERSION = "0.0.8";
13794
13623
  var DEFAULT_WS_URL = "wss://ws.superatom.ai/websocket";
13795
13624
  var SuperatomSDK = class {
13796
13625
  // 3.5 minutes (PING_INTERVAL + 30s grace)
@@ -13831,7 +13660,7 @@ var SuperatomSDK = class {
13831
13660
  if (config.queryCacheTTL !== void 0) {
13832
13661
  queryCache.setTTL(config.queryCacheTTL);
13833
13662
  }
13834
- logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, database type: ${this.databaseType}, model strategy: ${this.modelStrategy}, conversation similarity threshold: ${this.conversationSimilarityThreshold}, query cache TTL: ${queryCache.getTTL()} minutes`);
13663
+ logger.info(`Initializing Superatom SDK for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, database type: ${this.databaseType}, model strategy: ${this.modelStrategy}, query cache TTL: ${queryCache.getTTL()} minutes`);
13835
13664
  this.userManager = new UserManager(this.projectId, 5e3);
13836
13665
  this.dashboardManager = new DashboardManager(this.projectId);
13837
13666
  this.reportManager = new ReportManager(this.projectId);
@@ -13885,7 +13714,6 @@ var SuperatomSDK = class {
13885
13714
  */
13886
13715
  initializeDashboardManager() {
13887
13716
  setDashboardManager(this.dashboardManager);
13888
- logger.info(`DashboardManager initialized for project: ${this.projectId}`);
13889
13717
  }
13890
13718
  /**
13891
13719
  * Get the DashboardManager instance for this SDK
@@ -13898,7 +13726,6 @@ var SuperatomSDK = class {
13898
13726
  */
13899
13727
  initializeReportManager() {
13900
13728
  setReportManager(this.reportManager);
13901
- logger.info(`ReportManager initialized for project: ${this.projectId}`);
13902
13729
  }
13903
13730
  /**
13904
13731
  * Get the ReportManager instance for this SDK
@@ -14276,7 +14103,6 @@ var SuperatomSDK = class {
14276
14103
  CONTEXT_CONFIG,
14277
14104
  CleanupService,
14278
14105
  LLM,
14279
- SDK_VERSION,
14280
14106
  STORAGE_CONFIG,
14281
14107
  SuperatomSDK,
14282
14108
  Thread,