@superatomai/sdk-node 0.0.1 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1883,53 +1883,126 @@ var import_fs2 = __toESM(require("fs"));
1883
1883
  var import_path2 = __toESM(require("path"));
1884
1884
  var PromptLoader = class {
1885
1885
  constructor(config) {
1886
+ this.promptCache = /* @__PURE__ */ new Map();
1887
+ this.isInitialized = false;
1888
+ logger.debug("Initializing PromptLoader...", process.cwd());
1886
1889
  this.promptsDir = config?.promptsDir || import_path2.default.join(process.cwd(), ".prompts");
1890
+ this.defaultPromptsDir = import_path2.default.join(__dirname, "..", "..", ".prompts");
1887
1891
  }
1888
1892
  /**
1889
- * Load a single prompt file and replace variables using {{VARIABLE_NAME}} pattern
1893
+ * Initialize and cache all prompts into memory
1894
+ * This should be called once at SDK startup
1895
+ */
1896
+ async initialize() {
1897
+ if (this.isInitialized) {
1898
+ logger.debug("PromptLoader already initialized, skipping...");
1899
+ return;
1900
+ }
1901
+ logger.info("Loading prompts into memory...");
1902
+ const promptTypes = [
1903
+ "classify",
1904
+ "match-component",
1905
+ "modify-props",
1906
+ "single-component",
1907
+ "mutli-component",
1908
+ "actions",
1909
+ "container-metadata"
1910
+ ];
1911
+ for (const promptType of promptTypes) {
1912
+ try {
1913
+ const template = await this.loadPromptTemplate(promptType);
1914
+ this.promptCache.set(promptType, template);
1915
+ logger.debug(`Cached prompt: ${promptType}`);
1916
+ } catch (error) {
1917
+ logger.error(`Failed to load prompt '${promptType}':`, error);
1918
+ throw error;
1919
+ }
1920
+ }
1921
+ this.isInitialized = true;
1922
+ logger.info(`Successfully loaded ${this.promptCache.size} prompt templates into memory`);
1923
+ }
1924
+ /**
1925
+ * Load a prompt template from file system (tries custom dir first, then defaults to SDK dir)
1890
1926
  * @param promptName - Name of the prompt folder
1891
- * @param promptType - Type of prompt ('system' or 'user')
1892
- * @param variables - Variables to replace in the template
1893
- * @returns Processed prompt string
1927
+ * @returns Template with system and user prompts
1894
1928
  */
1895
- async loadPrompt(promptName, promptType, variables) {
1896
- try {
1897
- const promptPath = import_path2.default.join(
1898
- this.promptsDir,
1899
- promptName,
1900
- `${promptType}.md`
1901
- );
1902
- let content = import_fs2.default.readFileSync(promptPath, "utf-8");
1903
- for (const [key, value] of Object.entries(variables)) {
1904
- const pattern = new RegExp(`{{${key}}}`, "g");
1905
- const replacementValue = typeof value === "string" ? value : JSON.stringify(value);
1906
- content = content.replace(pattern, replacementValue);
1929
+ async loadPromptTemplate(promptName) {
1930
+ const tryLoadFromDir = (dir) => {
1931
+ try {
1932
+ const systemPath = import_path2.default.join(dir, promptName, "system.md");
1933
+ const userPath = import_path2.default.join(dir, promptName, "user.md");
1934
+ if (import_fs2.default.existsSync(systemPath) && import_fs2.default.existsSync(userPath)) {
1935
+ const system = import_fs2.default.readFileSync(systemPath, "utf-8");
1936
+ const user = import_fs2.default.readFileSync(userPath, "utf-8");
1937
+ logger.debug(`Loaded prompt '${promptName}' from ${dir}`);
1938
+ return { system, user };
1939
+ }
1940
+ return null;
1941
+ } catch (error) {
1942
+ return null;
1907
1943
  }
1908
- return content;
1909
- } catch (error) {
1910
- console.error(`Error loading prompt '${promptName}/${promptType}.md':`, error);
1911
- throw error;
1944
+ };
1945
+ let template = tryLoadFromDir(this.promptsDir);
1946
+ if (!template) {
1947
+ logger.warn(`Prompt '${promptName}' not found in ${this.promptsDir}, trying default location...`);
1948
+ template = tryLoadFromDir(this.defaultPromptsDir);
1949
+ }
1950
+ if (!template) {
1951
+ throw new Error(`Prompt template '${promptName}' not found in either ${this.promptsDir} or ${this.defaultPromptsDir}`);
1912
1952
  }
1953
+ return template;
1913
1954
  }
1914
1955
  /**
1915
- * Load both system and user prompts and replace variables
1956
+ * Replace variables in a template string using {{VARIABLE_NAME}} pattern
1957
+ * @param template - Template string with placeholders
1958
+ * @param variables - Variables to replace in the template
1959
+ * @returns Processed string
1960
+ */
1961
+ replaceVariables(template, variables) {
1962
+ let content = template;
1963
+ for (const [key, value] of Object.entries(variables)) {
1964
+ const pattern = new RegExp(`{{${key}}}`, "g");
1965
+ const replacementValue = typeof value === "string" ? value : JSON.stringify(value);
1966
+ content = content.replace(pattern, replacementValue);
1967
+ }
1968
+ return content;
1969
+ }
1970
+ /**
1971
+ * Load both system and user prompts from cache and replace variables
1916
1972
  * @param promptName - Name of the prompt folder
1917
1973
  * @param variables - Variables to replace in the templates
1918
1974
  * @returns Object containing both system and user prompts
1919
1975
  */
1920
1976
  async loadPrompts(promptName, variables) {
1921
- const [system, user] = await Promise.all([
1922
- this.loadPrompt(promptName, "system", variables),
1923
- this.loadPrompt(promptName, "user", variables)
1924
- ]);
1925
- return { system, user };
1977
+ if (!this.isInitialized) {
1978
+ logger.warn("PromptLoader not initialized, loading prompts on-demand (not recommended)");
1979
+ await this.initialize();
1980
+ }
1981
+ const template = this.promptCache.get(promptName);
1982
+ if (!template) {
1983
+ throw new Error(`Prompt template '${promptName}' not found in cache. Available prompts: ${Array.from(this.promptCache.keys()).join(", ")}`);
1984
+ }
1985
+ return {
1986
+ system: this.replaceVariables(template.system, variables),
1987
+ user: this.replaceVariables(template.user, variables)
1988
+ };
1926
1989
  }
1927
1990
  /**
1928
- * Set custom prompts directory
1991
+ * DEPRECATED: Use loadPrompts instead
1992
+ * Load a single prompt file and replace variables using {{VARIABLE_NAME}} pattern
1993
+ */
1994
+ async loadPrompt(promptName, promptType, variables) {
1995
+ const prompts = await this.loadPrompts(promptName, variables);
1996
+ return promptType === "system" ? prompts.system : prompts.user;
1997
+ }
1998
+ /**
1999
+ * Set custom prompts directory (requires re-initialization)
1929
2000
  * @param dir - Path to the prompts directory
1930
2001
  */
1931
2002
  setPromptsDir(dir) {
1932
2003
  this.promptsDir = dir;
2004
+ this.isInitialized = false;
2005
+ this.promptCache.clear();
1933
2006
  }
1934
2007
  /**
1935
2008
  * Get current prompts directory
@@ -1938,8 +2011,23 @@ var PromptLoader = class {
1938
2011
  getPromptsDir() {
1939
2012
  return this.promptsDir;
1940
2013
  }
2014
+ /**
2015
+ * Check if prompts are loaded in memory
2016
+ */
2017
+ isReady() {
2018
+ return this.isInitialized;
2019
+ }
2020
+ /**
2021
+ * Get the number of cached prompts
2022
+ */
2023
+ getCacheSize() {
2024
+ return this.promptCache.size;
2025
+ }
1941
2026
  };
1942
- var promptLoader = new PromptLoader();
2027
+ var defaultPromptsPath = process.env.PROMPTS_DIR || import_path2.default.join(process.cwd(), ".prompts");
2028
+ var promptLoader = new PromptLoader({
2029
+ promptsDir: defaultPromptsPath
2030
+ });
1943
2031
 
1944
2032
  // src/llm.ts
1945
2033
  var import_sdk = __toESM(require("@anthropic-ai/sdk"));
@@ -1977,7 +2065,7 @@ var LLM = class {
1977
2065
  *
1978
2066
  * @example
1979
2067
  * "anthropic/claude-sonnet-4-5" → ["anthropic", "claude-sonnet-4-5"]
1980
- * "groq/gpt-oss-120b" → ["groq", "gpt-oss-120b"]
2068
+ * "groq/openai/gpt-oss-120b" → ["groq", "openai/gpt-oss-120b"]
1981
2069
  * "claude-sonnet-4-5" → ["anthropic", "claude-sonnet-4-5"] (default)
1982
2070
  */
1983
2071
  static _parseModel(modelString) {
@@ -1985,8 +2073,10 @@ var LLM = class {
1985
2073
  return ["anthropic", "claude-sonnet-4-5"];
1986
2074
  }
1987
2075
  if (modelString.includes("/")) {
1988
- const [provider, model] = modelString.split("/");
1989
- return [provider.toLowerCase().trim(), model.trim()];
2076
+ const firstSlashIndex = modelString.indexOf("/");
2077
+ const provider = modelString.substring(0, firstSlashIndex).toLowerCase().trim();
2078
+ const model = modelString.substring(firstSlashIndex + 1).trim();
2079
+ return [provider, model];
1990
2080
  }
1991
2081
  return ["anthropic", modelString];
1992
2082
  }
@@ -1994,8 +2084,9 @@ var LLM = class {
1994
2084
  // ANTHROPIC IMPLEMENTATION
1995
2085
  // ============================================================
1996
2086
  static async _anthropicText(messages, modelName, options) {
2087
+ const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY || "";
1997
2088
  const client = new import_sdk.default({
1998
- apiKey: options.apiKey || process.env.ANTHROPIC_API_KEY || ""
2089
+ apiKey
1999
2090
  });
2000
2091
  const response = await client.messages.create({
2001
2092
  model: modelName,
@@ -2011,8 +2102,9 @@ var LLM = class {
2011
2102
  return textBlock?.type === "text" ? textBlock.text : "";
2012
2103
  }
2013
2104
  static async _anthropicStream(messages, modelName, options, json) {
2105
+ const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY || "";
2014
2106
  const client = new import_sdk.default({
2015
- apiKey: options.apiKey || process.env.ANTHROPIC_API_KEY || ""
2107
+ apiKey
2016
2108
  });
2017
2109
  const stream = await client.messages.create({
2018
2110
  model: modelName,
@@ -2059,8 +2151,9 @@ var LLM = class {
2059
2151
  return response.choices[0]?.message?.content || "";
2060
2152
  }
2061
2153
  static async _groqStream(messages, modelName, options, json) {
2154
+ const apiKey = options.apiKey || process.env.GROQ_API_KEY || "";
2062
2155
  const client = new import_groq_sdk.default({
2063
- apiKey: options.apiKey || process.env.GROQ_API_KEY || ""
2156
+ apiKey
2064
2157
  });
2065
2158
  const stream = await client.chat.completions.create({
2066
2159
  model: modelName,
@@ -2130,10 +2223,8 @@ var BaseLLM = class {
2130
2223
  * Classify user question to determine the type and required visualizations
2131
2224
  */
2132
2225
  async classifyUserQuestion(userPrompt, apiKey, logCollector, conversationHistory) {
2133
- const schemaDoc = schema.generateSchemaDocumentation();
2134
2226
  try {
2135
2227
  const prompts = await promptLoader.loadPrompts("classify", {
2136
- SCHEMA_DOC: schemaDoc || "No schema available",
2137
2228
  USER_PROMPT: userPrompt,
2138
2229
  CONVERSATION_HISTORY: conversationHistory || "No previous conversation"
2139
2230
  });
@@ -2188,6 +2279,7 @@ var BaseLLM = class {
2188
2279
  CURRENT_PROPS: JSON.stringify(originalProps, null, 2),
2189
2280
  CONVERSATION_HISTORY: conversationHistory || "No previous conversation"
2190
2281
  });
2282
+ logger.debug("props-modification: System prompt\n", prompts.system.substring(0, 100), "\n\n\n", "User prompt:", prompts.user.substring(0, 50));
2191
2283
  const result = await LLM.stream(
2192
2284
  {
2193
2285
  sys: prompts.system,
@@ -2235,21 +2327,47 @@ var BaseLLM = class {
2235
2327
  }
2236
2328
  }
2237
2329
  /**
2238
- * Generate a dynamic component for analytical questions when no matching component exists
2239
- * This creates a custom component with appropriate visualization and query
2330
+ * Match and select a component from available components filtered by type
2331
+ * This picks the best matching component based on user prompt and modifies its props
2240
2332
  */
2241
- async generateAnalyticalComponent(userPrompt, preferredVisualizationType, apiKey, logCollector, conversationHistory) {
2242
- const schemaDoc = schema.generateSchemaDocumentation();
2333
+ async generateAnalyticalComponent(userPrompt, components, preferredVisualizationType, apiKey, logCollector, conversationHistory) {
2243
2334
  try {
2335
+ const filteredComponents = preferredVisualizationType ? components.filter((c) => c.type === preferredVisualizationType) : components;
2336
+ if (filteredComponents.length === 0) {
2337
+ logCollector?.warn(
2338
+ `No components found of type ${preferredVisualizationType}`,
2339
+ "explanation",
2340
+ { reason: "No matching components available for this visualization type" }
2341
+ );
2342
+ return {
2343
+ component: null,
2344
+ reasoning: `No components available of type ${preferredVisualizationType}`,
2345
+ isGenerated: false
2346
+ };
2347
+ }
2348
+ const componentsText = filteredComponents.map((comp, idx) => {
2349
+ const keywords = comp.keywords ? comp.keywords.join(", ") : "";
2350
+ const category = comp.category || "general";
2351
+ const propsPreview = comp.props ? JSON.stringify(comp.props, null, 2) : "No props";
2352
+ return `${idx + 1}. ID: ${comp.id}
2353
+ Name: ${comp.name}
2354
+ Type: ${comp.type}
2355
+ Category: ${category}
2356
+ Description: ${comp.description || "No description"}
2357
+ Keywords: ${keywords}
2358
+ Props Preview: ${propsPreview}`;
2359
+ }).join("\n\n");
2244
2360
  const visualizationConstraint = preferredVisualizationType ? `
2245
- **IMPORTANT: The user has specifically requested a ${preferredVisualizationType} visualization. You MUST use this type.**
2361
+ **IMPORTANT: Components are filtered to type ${preferredVisualizationType}. Select the best match.**
2246
2362
  ` : "";
2247
2363
  const prompts = await promptLoader.loadPrompts("single-component", {
2248
- SCHEMA_DOC: schemaDoc || "No schema available",
2364
+ COMPONENT_TYPE: preferredVisualizationType || "any",
2365
+ COMPONENTS_LIST: componentsText,
2249
2366
  VISUALIZATION_CONSTRAINT: visualizationConstraint,
2250
2367
  USER_PROMPT: userPrompt,
2251
2368
  CONVERSATION_HISTORY: conversationHistory || "No previous conversation"
2252
2369
  });
2370
+ logger.debug("single-component: System prompt\n", prompts.system.substring(0, 100), "\n\n\n", "User prompt:", prompts.user.substring(0, 50));
2253
2371
  const result = await LLM.stream(
2254
2372
  {
2255
2373
  sys: prompts.system,
@@ -2264,53 +2382,63 @@ var BaseLLM = class {
2264
2382
  true
2265
2383
  // Parse as JSON
2266
2384
  );
2267
- if (!result.canGenerate) {
2385
+ if (!result.canGenerate || result.confidence < 50) {
2268
2386
  logCollector?.warn(
2269
- "Cannot generate component",
2387
+ "Cannot match component",
2270
2388
  "explanation",
2271
- { reason: result.reasoning || "Unable to generate component for this question" }
2389
+ { reason: result.reasoning || "Unable to find matching component for this question" }
2272
2390
  );
2273
2391
  return {
2274
2392
  component: null,
2275
- reasoning: result.reasoning || "Unable to generate component for this question",
2393
+ reasoning: result.reasoning || "Unable to find matching component for this question",
2276
2394
  isGenerated: false
2277
2395
  };
2278
2396
  }
2279
- const query = ensureQueryLimit(result.query, this.defaultLimit);
2280
- logCollector?.logQuery(
2281
- "Analytical component query generated",
2282
- query,
2283
- {
2284
- componentType: result.componentType,
2285
- visualization: preferredVisualizationType || result.componentType,
2286
- title: result.title
2287
- }
2397
+ const componentIndex = result.componentIndex;
2398
+ const componentId = result.componentId;
2399
+ let matchedComponent = null;
2400
+ if (componentId) {
2401
+ matchedComponent = filteredComponents.find((c) => c.id === componentId);
2402
+ }
2403
+ if (!matchedComponent && componentIndex) {
2404
+ matchedComponent = filteredComponents[componentIndex - 1];
2405
+ }
2406
+ if (!matchedComponent) {
2407
+ logCollector?.warn("Component not found in filtered list");
2408
+ return {
2409
+ component: null,
2410
+ reasoning: "Component not found in filtered list",
2411
+ isGenerated: false
2412
+ };
2413
+ }
2414
+ logCollector?.info(`Matched component: ${matchedComponent.name} (confidence: ${result.confidence}%)`);
2415
+ const propsValidation = await this.validateAndModifyProps(
2416
+ userPrompt,
2417
+ matchedComponent.props,
2418
+ matchedComponent.name,
2419
+ matchedComponent.type,
2420
+ matchedComponent.description,
2421
+ apiKey,
2422
+ logCollector,
2423
+ conversationHistory
2288
2424
  );
2425
+ const modifiedComponent = {
2426
+ ...matchedComponent,
2427
+ props: propsValidation.props
2428
+ };
2289
2429
  logCollector?.logExplanation(
2290
- "Analytical component generated",
2291
- result.reasoning || "Generated dynamic component based on analytical question",
2430
+ "Analytical component selected and modified",
2431
+ result.reasoning || "Selected component based on analytical question",
2292
2432
  {
2293
- componentType: result.componentType,
2294
- description: result.description
2433
+ componentName: matchedComponent.name,
2434
+ componentType: matchedComponent.type,
2435
+ confidence: result.confidence,
2436
+ propsModified: propsValidation.isModified
2295
2437
  }
2296
2438
  );
2297
- const dynamicComponent = {
2298
- id: `dynamic_${Date.now()}`,
2299
- name: `Dynamic${result.componentType}`,
2300
- type: result.componentType,
2301
- description: result.description,
2302
- category: "dynamic",
2303
- keywords: [],
2304
- props: {
2305
- query,
2306
- title: result.title,
2307
- description: result.description,
2308
- config: result.config || {}
2309
- }
2310
- };
2311
2439
  return {
2312
- component: dynamicComponent,
2313
- reasoning: result.reasoning || "Generated dynamic component based on analytical question",
2440
+ component: modifiedComponent,
2441
+ reasoning: result.reasoning || "Selected and modified component based on analytical question",
2314
2442
  isGenerated: true
2315
2443
  };
2316
2444
  } catch (error) {
@@ -2318,6 +2446,51 @@ var BaseLLM = class {
2318
2446
  throw error;
2319
2447
  }
2320
2448
  }
2449
+ /**
2450
+ * Generate container metadata (title and description) for multi-component dashboard
2451
+ */
2452
+ async generateContainerMetadata(userPrompt, visualizationTypes, apiKey, logCollector, conversationHistory) {
2453
+ try {
2454
+ const prompts = await promptLoader.loadPrompts("container-metadata", {
2455
+ USER_PROMPT: userPrompt,
2456
+ VISUALIZATION_TYPES: visualizationTypes.join(", "),
2457
+ CONVERSATION_HISTORY: conversationHistory || "No previous conversation"
2458
+ });
2459
+ const result = await LLM.stream(
2460
+ {
2461
+ sys: prompts.system,
2462
+ user: prompts.user
2463
+ },
2464
+ {
2465
+ model: this.model,
2466
+ maxTokens: 500,
2467
+ temperature: 0.3,
2468
+ apiKey: this.getApiKey(apiKey)
2469
+ },
2470
+ true
2471
+ // Parse as JSON
2472
+ );
2473
+ logCollector?.logExplanation(
2474
+ "Container metadata generated",
2475
+ `Generated title and description for multi-component dashboard`,
2476
+ {
2477
+ title: result.title,
2478
+ description: result.description,
2479
+ visualizationTypes
2480
+ }
2481
+ );
2482
+ return {
2483
+ title: result.title || `${userPrompt} - Dashboard`,
2484
+ description: result.description || `Multi-component dashboard showing ${visualizationTypes.join(", ")}`
2485
+ };
2486
+ } catch (error) {
2487
+ console.error("Error generating container metadata:", error);
2488
+ return {
2489
+ title: `${userPrompt} - Dashboard`,
2490
+ description: `Multi-component dashboard showing ${visualizationTypes.join(", ")}`
2491
+ };
2492
+ }
2493
+ }
2321
2494
  /**
2322
2495
  * Match component from a list with enhanced props modification
2323
2496
  */
@@ -2379,12 +2552,12 @@ var BaseLLM = class {
2379
2552
  const noMatchMsg = `No matching component found (confidence: ${confidence}%)`;
2380
2553
  console.log("\u2717", noMatchMsg);
2381
2554
  logCollector?.warn(noMatchMsg);
2382
- const genMsg = "Attempting to generate dynamic component from analytical question...";
2555
+ const genMsg = "Attempting to match component from analytical question...";
2383
2556
  console.log("\u2713", genMsg);
2384
2557
  logCollector?.info(genMsg);
2385
- const generatedResult = await this.generateAnalyticalComponent(userPrompt, void 0, apiKey, logCollector, conversationHistory);
2558
+ const generatedResult = await this.generateAnalyticalComponent(userPrompt, components, void 0, apiKey, logCollector, conversationHistory);
2386
2559
  if (generatedResult.component) {
2387
- const genSuccessMsg = `Successfully generated component: ${generatedResult.component.name}`;
2560
+ const genSuccessMsg = `Successfully matched component: ${generatedResult.component.name}`;
2388
2561
  logCollector?.info(genSuccessMsg);
2389
2562
  return {
2390
2563
  component: generatedResult.component,
@@ -2396,10 +2569,10 @@ var BaseLLM = class {
2396
2569
  queryModified: false
2397
2570
  };
2398
2571
  }
2399
- logCollector?.error("Failed to generate dynamic component");
2572
+ logCollector?.error("Failed to match component");
2400
2573
  return {
2401
2574
  component: null,
2402
- reasoning: result.reasoning || "No matching component found and unable to generate dynamic component",
2575
+ reasoning: result.reasoning || "No matching component found and unable to match component",
2403
2576
  method: `${this.getProviderName()}-llm`,
2404
2577
  confidence
2405
2578
  };
@@ -2447,15 +2620,15 @@ var BaseLLM = class {
2447
2620
  }
2448
2621
  }
2449
2622
  /**
2450
- * Generate multiple dynamic components for analytical questions
2623
+ * Match multiple components for analytical questions by visualization types
2451
2624
  * This is used when the user needs multiple visualizations
2452
2625
  */
2453
- async generateMultipleAnalyticalComponents(userPrompt, visualizationTypes, apiKey, logCollector, conversationHistory) {
2626
+ async generateMultipleAnalyticalComponents(userPrompt, availableComponents, visualizationTypes, apiKey, logCollector, conversationHistory) {
2454
2627
  try {
2455
- console.log("\u2713 Generating multiple components:", visualizationTypes);
2628
+ console.log("\u2713 Matching multiple components:", visualizationTypes);
2456
2629
  const components = [];
2457
2630
  for (const vizType of visualizationTypes) {
2458
- const result = await this.generateAnalyticalComponent(userPrompt, vizType, apiKey, logCollector, conversationHistory);
2631
+ const result = await this.generateAnalyticalComponent(userPrompt, availableComponents, vizType, apiKey, logCollector, conversationHistory);
2459
2632
  if (result.component) {
2460
2633
  components.push(result.component);
2461
2634
  }
@@ -2463,75 +2636,45 @@ var BaseLLM = class {
2463
2636
  if (components.length === 0) {
2464
2637
  return {
2465
2638
  components: [],
2466
- reasoning: "Failed to generate any components",
2639
+ reasoning: "Failed to match any components",
2467
2640
  isGenerated: false
2468
2641
  };
2469
2642
  }
2470
2643
  return {
2471
2644
  components,
2472
- reasoning: `Generated ${components.length} components: ${visualizationTypes.join(", ")}`,
2645
+ reasoning: `Matched ${components.length} components: ${visualizationTypes.join(", ")}`,
2473
2646
  isGenerated: true
2474
2647
  };
2475
2648
  } catch (error) {
2476
- console.error("Error generating multiple analytical components:", error);
2649
+ console.error("Error matching multiple analytical components:", error);
2477
2650
  return {
2478
2651
  components: [],
2479
- reasoning: "Error occurred while generating components",
2652
+ reasoning: "Error occurred while matching components",
2480
2653
  isGenerated: false
2481
2654
  };
2482
2655
  }
2483
2656
  }
2484
2657
  /**
2485
- * Generate a complete multi-component response with intelligent container and component props
2658
+ * Match multiple components and wrap them in a container
2486
2659
  */
2487
- async generateMultiComponentResponse(userPrompt, visualizationTypes, apiKey, logCollector, conversationHistory) {
2488
- const schemaDoc = schema.generateSchemaDocumentation();
2660
+ async generateMultiComponentResponse(userPrompt, availableComponents, visualizationTypes, apiKey, logCollector, conversationHistory) {
2489
2661
  try {
2490
- const prompts = await promptLoader.loadPrompts("mutli-component", {
2491
- SCHEMA_DOC: schemaDoc || "No schema available",
2492
- DEFAULT_LIMIT: this.defaultLimit,
2493
- USER_PROMPT: userPrompt,
2494
- VISUALIZATION_TYPES: visualizationTypes.join(", "),
2495
- CONVERSATION_HISTORY: conversationHistory || "No previous conversation"
2496
- });
2497
- const result = await LLM.stream(
2498
- {
2499
- sys: prompts.system,
2500
- user: prompts.user
2501
- },
2502
- {
2503
- model: this.model,
2504
- maxTokens: 3e3,
2505
- temperature: 0.2,
2506
- apiKey: this.getApiKey(apiKey)
2507
- },
2508
- true
2509
- // Parse as JSON
2662
+ const matchResult = await this.generateMultipleAnalyticalComponents(
2663
+ userPrompt,
2664
+ availableComponents,
2665
+ visualizationTypes,
2666
+ apiKey,
2667
+ logCollector,
2668
+ conversationHistory
2510
2669
  );
2511
- if (!result.canGenerate || !result.components || result.components.length === 0) {
2670
+ if (!matchResult.isGenerated || matchResult.components.length === 0) {
2512
2671
  return {
2513
2672
  containerComponent: null,
2514
- reasoning: result.reasoning || "Unable to generate multi-component dashboard",
2673
+ reasoning: matchResult.reasoning || "Unable to match multi-component dashboard",
2515
2674
  isGenerated: false
2516
2675
  };
2517
2676
  }
2518
- const generatedComponents = result.components.map((compData, index) => {
2519
- const query = ensureQueryLimit(compData.query, this.defaultLimit);
2520
- return {
2521
- id: `dynamic_${compData.componentType.toLowerCase()}_${Date.now()}_${index}`,
2522
- name: `Dynamic${compData.componentType}`,
2523
- type: compData.componentType,
2524
- description: compData.description,
2525
- category: "dynamic",
2526
- keywords: [],
2527
- props: {
2528
- query,
2529
- title: compData.title,
2530
- description: compData.description,
2531
- config: compData.config || {}
2532
- }
2533
- };
2534
- });
2677
+ const generatedComponents = matchResult.components;
2535
2678
  generatedComponents.forEach((component, index) => {
2536
2679
  if (component.props.query) {
2537
2680
  logCollector?.logQuery(
@@ -2546,21 +2689,24 @@ var BaseLLM = class {
2546
2689
  );
2547
2690
  }
2548
2691
  });
2692
+ const containerTitle = `${userPrompt} - Dashboard`;
2693
+ const containerDescription = `Multi-component dashboard showing ${visualizationTypes.join(", ")}`;
2549
2694
  logCollector?.logExplanation(
2550
- "Multi-component dashboard generated",
2551
- result.reasoning || `Generated ${generatedComponents.length} components for comprehensive analysis`,
2695
+ "Multi-component dashboard matched",
2696
+ matchResult.reasoning || `Matched ${generatedComponents.length} components for comprehensive analysis`,
2552
2697
  {
2553
2698
  totalComponents: generatedComponents.length,
2554
2699
  componentTypes: generatedComponents.map((c) => c.type),
2555
- containerTitle: result.containerTitle,
2556
- containerDescription: result.containerDescription
2700
+ componentNames: generatedComponents.map((c) => c.name),
2701
+ containerTitle,
2702
+ containerDescription
2557
2703
  }
2558
2704
  );
2559
2705
  const containerComponent = {
2560
2706
  id: `multi_container_${Date.now()}`,
2561
2707
  name: "MultiComponentContainer",
2562
2708
  type: "Container",
2563
- description: result.containerDescription,
2709
+ description: containerDescription,
2564
2710
  category: "dynamic",
2565
2711
  keywords: ["multi", "container", "dashboard"],
2566
2712
  props: {
@@ -2568,14 +2714,14 @@ var BaseLLM = class {
2568
2714
  components: generatedComponents,
2569
2715
  layout: "grid",
2570
2716
  spacing: 24,
2571
- title: result.containerTitle,
2572
- description: result.containerDescription
2717
+ title: containerTitle,
2718
+ description: containerDescription
2573
2719
  }
2574
2720
  }
2575
2721
  };
2576
2722
  return {
2577
2723
  containerComponent,
2578
- reasoning: result.reasoning || `Generated multi-component dashboard with ${generatedComponents.length} components`,
2724
+ reasoning: matchResult.reasoning || `Matched multi-component dashboard with ${generatedComponents.length} components`,
2579
2725
  isGenerated: true
2580
2726
  };
2581
2727
  } catch (error) {
@@ -2596,41 +2742,89 @@ var BaseLLM = class {
2596
2742
  const classInfo = `Question type: ${classification.questionType}, Visualizations: ${classification.visualizations.join(", ") || "None"}, Multiple components: ${classification.needsMultipleComponents}`;
2597
2743
  logCollector?.info(classInfo);
2598
2744
  if (classification.questionType === "analytical") {
2599
- if (classification.visualizations.length > 0) {
2600
- if (classification.needsMultipleComponents && classification.visualizations.length > 1) {
2601
- const multiMsg = "Generating multi-component dashboard...";
2602
- logCollector?.info(multiMsg);
2603
- const result = await this.generateMultiComponentResponse(
2745
+ if (classification.visualizations.length > 1) {
2746
+ const multiMsg = `Matching ${classification.visualizations.length} components for types: ${classification.visualizations.join(", ")}`;
2747
+ logCollector?.info(multiMsg);
2748
+ const matchedComponents = [];
2749
+ for (const vizType of classification.visualizations) {
2750
+ logCollector?.info(`Matching component for type: ${vizType}`);
2751
+ const result = await this.generateAnalyticalComponent(
2604
2752
  userPrompt,
2605
- classification.visualizations,
2753
+ components,
2754
+ vizType,
2606
2755
  apiKey,
2607
2756
  logCollector,
2608
2757
  conversationHistory
2609
2758
  );
2759
+ if (result.component) {
2760
+ matchedComponents.push(result.component);
2761
+ logCollector?.info(`Matched: ${result.component.name}`);
2762
+ } else {
2763
+ logCollector?.warn(`Failed to match component for type: ${vizType}`);
2764
+ }
2765
+ }
2766
+ if (matchedComponents.length === 0) {
2610
2767
  return {
2611
- component: result.containerComponent,
2612
- reasoning: result.reasoning,
2613
- method: "classification-multi-generated",
2768
+ component: null,
2769
+ reasoning: "Failed to match any components for the requested visualization types",
2770
+ method: "classification-multi-failed",
2614
2771
  questionType: classification.questionType,
2615
2772
  needsMultipleComponents: true,
2616
2773
  propsModified: false,
2617
2774
  queryModified: false
2618
2775
  };
2619
- } else {
2620
- const vizType = classification.visualizations[0];
2621
- const result = await this.generateAnalyticalComponent(userPrompt, vizType, apiKey, logCollector, conversationHistory);
2622
- return {
2623
- component: result.component,
2624
- reasoning: result.reasoning,
2625
- method: "classification-generated",
2626
- questionType: classification.questionType,
2627
- needsMultipleComponents: false,
2628
- propsModified: false,
2629
- queryModified: false
2630
- };
2631
2776
  }
2777
+ logCollector?.info("Generating container metadata...");
2778
+ const containerMetadata = await this.generateContainerMetadata(
2779
+ userPrompt,
2780
+ classification.visualizations,
2781
+ apiKey,
2782
+ logCollector,
2783
+ conversationHistory
2784
+ );
2785
+ const containerComponent = {
2786
+ id: `multi_container_${Date.now()}`,
2787
+ name: "MultiComponentContainer",
2788
+ type: "Container",
2789
+ description: containerMetadata.description,
2790
+ category: "dynamic",
2791
+ keywords: ["multi", "container", "dashboard"],
2792
+ props: {
2793
+ config: {
2794
+ components: matchedComponents,
2795
+ layout: "grid",
2796
+ spacing: 24,
2797
+ title: containerMetadata.title,
2798
+ description: containerMetadata.description
2799
+ }
2800
+ }
2801
+ };
2802
+ logCollector?.info(`Created multi-component container with ${matchedComponents.length} components: "${containerMetadata.title}"`);
2803
+ return {
2804
+ component: containerComponent,
2805
+ reasoning: `Matched ${matchedComponents.length} components for visualization types: ${classification.visualizations.join(", ")}`,
2806
+ method: "classification-multi-generated",
2807
+ questionType: classification.questionType,
2808
+ needsMultipleComponents: true,
2809
+ propsModified: false,
2810
+ queryModified: false
2811
+ };
2812
+ } else if (classification.visualizations.length === 1) {
2813
+ const vizType = classification.visualizations[0];
2814
+ logCollector?.info(`Matching single component for type: ${vizType}`);
2815
+ const result = await this.generateAnalyticalComponent(userPrompt, components, vizType, apiKey, logCollector, conversationHistory);
2816
+ return {
2817
+ component: result.component,
2818
+ reasoning: result.reasoning,
2819
+ method: "classification-generated",
2820
+ questionType: classification.questionType,
2821
+ needsMultipleComponents: false,
2822
+ propsModified: false,
2823
+ queryModified: false
2824
+ };
2632
2825
  } else {
2633
- const result = await this.generateAnalyticalComponent(userPrompt, void 0, apiKey, logCollector, conversationHistory);
2826
+ logCollector?.info("No specific visualization type - matching from all components");
2827
+ const result = await this.generateAnalyticalComponent(userPrompt, components, void 0, apiKey, logCollector, conversationHistory);
2634
2828
  return {
2635
2829
  component: result.component,
2636
2830
  reasoning: result.reasoning,
@@ -2641,7 +2835,7 @@ var BaseLLM = class {
2641
2835
  queryModified: false
2642
2836
  };
2643
2837
  }
2644
- } else if (classification.questionType === "data_modification") {
2838
+ } else if (classification.questionType === "data_modification" || classification.questionType === "general") {
2645
2839
  const matchMsg = "Using component matching for data modification...";
2646
2840
  logCollector?.info(matchMsg);
2647
2841
  const matchResult = await this.matchComponent(userPrompt, components, apiKey, logCollector, conversationHistory);
@@ -2787,8 +2981,14 @@ var useAnthropicMethod = async (prompt, components, apiKey, logCollector, conver
2787
2981
  logCollector?.error(emptyMsg);
2788
2982
  return { success: false, reason: emptyMsg };
2789
2983
  }
2790
- const matchResult = await anthropicLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory);
2791
- return { success: true, data: matchResult };
2984
+ try {
2985
+ const matchResult = await anthropicLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory);
2986
+ return { success: true, data: matchResult };
2987
+ } catch (error) {
2988
+ const errorMsg = error instanceof Error ? error.message : String(error);
2989
+ logCollector?.error(`Anthropic method failed: ${errorMsg}`);
2990
+ throw error;
2991
+ }
2792
2992
  };
2793
2993
  var useGroqMethod = async (prompt, components, apiKey, logCollector, conversationHistory) => {
2794
2994
  const msg = "Using Groq LLM matching method...";
@@ -2799,8 +2999,14 @@ var useGroqMethod = async (prompt, components, apiKey, logCollector, conversatio
2799
2999
  logCollector?.error(emptyMsg);
2800
3000
  return { success: false, reason: emptyMsg };
2801
3001
  }
2802
- const matchResult = await groqLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory);
2803
- return { success: true, data: matchResult };
3002
+ try {
3003
+ const matchResult = await groqLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory);
3004
+ return { success: true, data: matchResult };
3005
+ } catch (error) {
3006
+ const errorMsg = error instanceof Error ? error.message : String(error);
3007
+ logCollector?.error(`Groq method failed: ${errorMsg}`);
3008
+ throw error;
3009
+ }
2804
3010
  };
2805
3011
  var getUserResponseFromCache = async (prompt) => {
2806
3012
  return false;
@@ -3021,6 +3227,7 @@ var CONTEXT_CONFIG = {
3021
3227
  };
3022
3228
 
3023
3229
  // src/handlers/user-prompt-request.ts
3230
+ var processedMessageIds = /* @__PURE__ */ new Set();
3024
3231
  async function handleUserPromptRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders) {
3025
3232
  try {
3026
3233
  const userPromptRequest = UserPromptRequestMessageSchema.parse(data);
@@ -3028,6 +3235,18 @@ async function handleUserPromptRequest(data, components, sendMessage, anthropicA
3028
3235
  const prompt = payload.prompt;
3029
3236
  const SA_RUNTIME = payload.SA_RUNTIME;
3030
3237
  const wsId = userPromptRequest.from.id || "unknown";
3238
+ logger.info(`[REQUEST ${id}] Processing user prompt: "${prompt.substring(0, 50)}..."`);
3239
+ if (processedMessageIds.has(id)) {
3240
+ logger.warn(`[REQUEST ${id}] Duplicate request detected - ignoring`);
3241
+ return;
3242
+ }
3243
+ processedMessageIds.add(id);
3244
+ if (processedMessageIds.size > 100) {
3245
+ const firstId = processedMessageIds.values().next().value;
3246
+ if (firstId) {
3247
+ processedMessageIds.delete(firstId);
3248
+ }
3249
+ }
3031
3250
  if (!SA_RUNTIME) {
3032
3251
  sendDataResponse4(id, {
3033
3252
  success: false,
@@ -3067,7 +3286,6 @@ async function handleUserPromptRequest(data, components, sendMessage, anthropicA
3067
3286
  return;
3068
3287
  }
3069
3288
  logCollector.info(`Starting user prompt request with ${components.length} components`);
3070
- logger.info(`components length: ${components.length}`);
3071
3289
  const threadManager = ThreadManager.getInstance();
3072
3290
  let thread = threadManager.getThread(threadId);
3073
3291
  if (!thread) {
@@ -4227,6 +4445,9 @@ var UserManager = class {
4227
4445
  if (!user) {
4228
4446
  return false;
4229
4447
  }
4448
+ if (!user.wsIds || !Array.isArray(user.wsIds)) {
4449
+ user.wsIds = [];
4450
+ }
4230
4451
  if (!user.wsIds.includes(wsId)) {
4231
4452
  user.wsIds.push(wsId);
4232
4453
  this.hasChanged = true;
@@ -4245,6 +4466,9 @@ var UserManager = class {
4245
4466
  if (!user) {
4246
4467
  return false;
4247
4468
  }
4469
+ if (!user.wsIds || !Array.isArray(user.wsIds)) {
4470
+ return false;
4471
+ }
4248
4472
  const initialLength = user.wsIds.length;
4249
4473
  user.wsIds = user.wsIds.filter((id) => id !== wsId);
4250
4474
  if (user.wsIds.length < initialLength) {
@@ -4810,6 +5034,9 @@ var SuperatomSDK = class {
4810
5034
  this.userManager = new UserManager(this.projectId, 5e3);
4811
5035
  this.dashboardManager = new DashboardManager(this.projectId);
4812
5036
  this.reportManager = new ReportManager(this.projectId);
5037
+ this.initializePromptLoader(config.promptsDir).catch((error) => {
5038
+ logger.error("Failed to initialize PromptLoader:", error);
5039
+ });
4813
5040
  this.initializeUserManager().catch((error) => {
4814
5041
  logger.error("Failed to initialize UserManager:", error);
4815
5042
  });
@@ -4819,6 +5046,21 @@ var SuperatomSDK = class {
4819
5046
  logger.error("Failed to connect to Superatom:", error);
4820
5047
  });
4821
5048
  }
5049
+ /**
5050
+ * Initialize PromptLoader and load prompts into memory
5051
+ */
5052
+ async initializePromptLoader(promptsDir) {
5053
+ try {
5054
+ if (promptsDir) {
5055
+ promptLoader.setPromptsDir(promptsDir);
5056
+ }
5057
+ await promptLoader.initialize();
5058
+ logger.info(`PromptLoader initialized with ${promptLoader.getCacheSize()} prompts from ${promptLoader.getPromptsDir()}`);
5059
+ } catch (error) {
5060
+ logger.error("Failed to initialize PromptLoader:", error);
5061
+ throw error;
5062
+ }
5063
+ }
4822
5064
  /**
4823
5065
  * Initialize UserManager for the project
4824
5066
  */