@superatomai/sdk-node 0.0.2 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1883,55 +1883,126 @@ var import_fs2 = __toESM(require("fs"));
1883
1883
  var import_path2 = __toESM(require("path"));
1884
1884
  var PromptLoader = class {
1885
1885
  constructor(config) {
1886
+ this.promptCache = /* @__PURE__ */ new Map();
1887
+ this.isInitialized = false;
1886
1888
  logger.debug("Initializing PromptLoader...", process.cwd());
1887
1889
  this.promptsDir = config?.promptsDir || import_path2.default.join(process.cwd(), ".prompts");
1890
+ this.defaultPromptsDir = import_path2.default.join(__dirname, "..", "..", ".prompts");
1888
1891
  }
1889
1892
  /**
1890
- * Load a single prompt file and replace variables using {{VARIABLE_NAME}} pattern
1893
+ * Initialize and cache all prompts into memory
1894
+ * This should be called once at SDK startup
1895
+ */
1896
+ async initialize() {
1897
+ if (this.isInitialized) {
1898
+ logger.debug("PromptLoader already initialized, skipping...");
1899
+ return;
1900
+ }
1901
+ logger.info("Loading prompts into memory...");
1902
+ const promptTypes = [
1903
+ "classify",
1904
+ "match-component",
1905
+ "modify-props",
1906
+ "single-component",
1907
+ "mutli-component",
1908
+ "actions",
1909
+ "container-metadata"
1910
+ ];
1911
+ for (const promptType of promptTypes) {
1912
+ try {
1913
+ const template = await this.loadPromptTemplate(promptType);
1914
+ this.promptCache.set(promptType, template);
1915
+ logger.debug(`Cached prompt: ${promptType}`);
1916
+ } catch (error) {
1917
+ logger.error(`Failed to load prompt '${promptType}':`, error);
1918
+ throw error;
1919
+ }
1920
+ }
1921
+ this.isInitialized = true;
1922
+ logger.info(`Successfully loaded ${this.promptCache.size} prompt templates into memory`);
1923
+ }
1924
+ /**
1925
+ * Load a prompt template from file system (tries custom dir first, then defaults to SDK dir)
1891
1926
  * @param promptName - Name of the prompt folder
1892
- * @param promptType - Type of prompt ('system' or 'user')
1893
- * @param variables - Variables to replace in the template
1894
- * @returns Processed prompt string
1927
+ * @returns Template with system and user prompts
1895
1928
  */
1896
- async loadPrompt(promptName, promptType, variables) {
1897
- try {
1898
- const promptPath = import_path2.default.join(
1899
- this.promptsDir,
1900
- promptName,
1901
- `${promptType}.md`
1902
- );
1903
- logger.debug(`Loading prompt '${promptName}/${promptType}.md' from ${promptPath} process path: ${process.cwd()}`);
1904
- let content = import_fs2.default.readFileSync(promptPath, "utf-8");
1905
- for (const [key, value] of Object.entries(variables)) {
1906
- const pattern = new RegExp(`{{${key}}}`, "g");
1907
- const replacementValue = typeof value === "string" ? value : JSON.stringify(value);
1908
- content = content.replace(pattern, replacementValue);
1929
+ async loadPromptTemplate(promptName) {
1930
+ const tryLoadFromDir = (dir) => {
1931
+ try {
1932
+ const systemPath = import_path2.default.join(dir, promptName, "system.md");
1933
+ const userPath = import_path2.default.join(dir, promptName, "user.md");
1934
+ if (import_fs2.default.existsSync(systemPath) && import_fs2.default.existsSync(userPath)) {
1935
+ const system = import_fs2.default.readFileSync(systemPath, "utf-8");
1936
+ const user = import_fs2.default.readFileSync(userPath, "utf-8");
1937
+ logger.debug(`Loaded prompt '${promptName}' from ${dir}`);
1938
+ return { system, user };
1939
+ }
1940
+ return null;
1941
+ } catch (error) {
1942
+ return null;
1909
1943
  }
1910
- return content;
1911
- } catch (error) {
1912
- console.error(`Error loading prompt '${promptName}/${promptType}.md':`, error);
1913
- throw error;
1944
+ };
1945
+ let template = tryLoadFromDir(this.promptsDir);
1946
+ if (!template) {
1947
+ logger.warn(`Prompt '${promptName}' not found in ${this.promptsDir}, trying default location...`);
1948
+ template = tryLoadFromDir(this.defaultPromptsDir);
1949
+ }
1950
+ if (!template) {
1951
+ throw new Error(`Prompt template '${promptName}' not found in either ${this.promptsDir} or ${this.defaultPromptsDir}`);
1952
+ }
1953
+ return template;
1954
+ }
1955
+ /**
1956
+ * Replace variables in a template string using {{VARIABLE_NAME}} pattern
1957
+ * @param template - Template string with placeholders
1958
+ * @param variables - Variables to replace in the template
1959
+ * @returns Processed string
1960
+ */
1961
+ replaceVariables(template, variables) {
1962
+ let content = template;
1963
+ for (const [key, value] of Object.entries(variables)) {
1964
+ const pattern = new RegExp(`{{${key}}}`, "g");
1965
+ const replacementValue = typeof value === "string" ? value : JSON.stringify(value);
1966
+ content = content.replace(pattern, replacementValue);
1914
1967
  }
1968
+ return content;
1915
1969
  }
1916
1970
  /**
1917
- * Load both system and user prompts and replace variables
1971
+ * Load both system and user prompts from cache and replace variables
1918
1972
  * @param promptName - Name of the prompt folder
1919
1973
  * @param variables - Variables to replace in the templates
1920
1974
  * @returns Object containing both system and user prompts
1921
1975
  */
1922
1976
  async loadPrompts(promptName, variables) {
1923
- const [system, user] = await Promise.all([
1924
- this.loadPrompt(promptName, "system", variables),
1925
- this.loadPrompt(promptName, "user", variables)
1926
- ]);
1927
- return { system, user };
1977
+ if (!this.isInitialized) {
1978
+ logger.warn("PromptLoader not initialized, loading prompts on-demand (not recommended)");
1979
+ await this.initialize();
1980
+ }
1981
+ const template = this.promptCache.get(promptName);
1982
+ if (!template) {
1983
+ throw new Error(`Prompt template '${promptName}' not found in cache. Available prompts: ${Array.from(this.promptCache.keys()).join(", ")}`);
1984
+ }
1985
+ return {
1986
+ system: this.replaceVariables(template.system, variables),
1987
+ user: this.replaceVariables(template.user, variables)
1988
+ };
1928
1989
  }
1929
1990
  /**
1930
- * Set custom prompts directory
1991
+ * DEPRECATED: Use loadPrompts instead
1992
+ * Load a single prompt file and replace variables using {{VARIABLE_NAME}} pattern
1993
+ */
1994
+ async loadPrompt(promptName, promptType, variables) {
1995
+ const prompts = await this.loadPrompts(promptName, variables);
1996
+ return promptType === "system" ? prompts.system : prompts.user;
1997
+ }
1998
+ /**
1999
+ * Set custom prompts directory (requires re-initialization)
1931
2000
  * @param dir - Path to the prompts directory
1932
2001
  */
1933
2002
  setPromptsDir(dir) {
1934
2003
  this.promptsDir = dir;
2004
+ this.isInitialized = false;
2005
+ this.promptCache.clear();
1935
2006
  }
1936
2007
  /**
1937
2008
  * Get current prompts directory
@@ -1940,8 +2011,23 @@ var PromptLoader = class {
1940
2011
  getPromptsDir() {
1941
2012
  return this.promptsDir;
1942
2013
  }
2014
+ /**
2015
+ * Check if prompts are loaded in memory
2016
+ */
2017
+ isReady() {
2018
+ return this.isInitialized;
2019
+ }
2020
+ /**
2021
+ * Get the number of cached prompts
2022
+ */
2023
+ getCacheSize() {
2024
+ return this.promptCache.size;
2025
+ }
1943
2026
  };
1944
- var promptLoader = new PromptLoader();
2027
+ var defaultPromptsPath = process.env.PROMPTS_DIR || import_path2.default.join(process.cwd(), ".prompts");
2028
+ var promptLoader = new PromptLoader({
2029
+ promptsDir: defaultPromptsPath
2030
+ });
1945
2031
 
1946
2032
  // src/llm.ts
1947
2033
  var import_sdk = __toESM(require("@anthropic-ai/sdk"));
@@ -1999,8 +2085,6 @@ var LLM = class {
1999
2085
  // ============================================================
2000
2086
  static async _anthropicText(messages, modelName, options) {
2001
2087
  const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY || "";
2002
- console.log("[LLM DEBUG] Anthropic Text - apiKey from options:", options.apiKey ? `${options.apiKey.substring(0, 10)}...` : "NOT SET");
2003
- console.log("[LLM DEBUG] Anthropic Text - final apiKey:", apiKey ? `${apiKey.substring(0, 10)}...` : "EMPTY STRING");
2004
2088
  const client = new import_sdk.default({
2005
2089
  apiKey
2006
2090
  });
@@ -2019,9 +2103,6 @@ var LLM = class {
2019
2103
  }
2020
2104
  static async _anthropicStream(messages, modelName, options, json) {
2021
2105
  const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY || "";
2022
- console.log("[LLM DEBUG] Anthropic - apiKey from options:", options.apiKey ? `${options.apiKey.substring(0, 10)}...` : "NOT SET");
2023
- console.log("[LLM DEBUG] Anthropic - apiKey from env:", process.env.ANTHROPIC_API_KEY ? `${process.env.ANTHROPIC_API_KEY.substring(0, 10)}...` : "NOT SET");
2024
- console.log("[LLM DEBUG] Anthropic - final apiKey:", apiKey ? `${apiKey.substring(0, 10)}...` : "EMPTY STRING");
2025
2106
  const client = new import_sdk.default({
2026
2107
  apiKey
2027
2108
  });
@@ -2071,9 +2152,6 @@ var LLM = class {
2071
2152
  }
2072
2153
  static async _groqStream(messages, modelName, options, json) {
2073
2154
  const apiKey = options.apiKey || process.env.GROQ_API_KEY || "";
2074
- console.log("[LLM DEBUG] Groq - apiKey from options:", options.apiKey ? `${options.apiKey.substring(0, 10)}...` : "NOT SET");
2075
- console.log("[LLM DEBUG] Groq - model:", modelName);
2076
- console.log("[LLM DEBUG] Groq - final apiKey:", apiKey ? `${apiKey.substring(0, 10)}...` : "EMPTY STRING");
2077
2155
  const client = new import_groq_sdk.default({
2078
2156
  apiKey
2079
2157
  });
@@ -2145,11 +2223,8 @@ var BaseLLM = class {
2145
2223
  * Classify user question to determine the type and required visualizations
2146
2224
  */
2147
2225
  async classifyUserQuestion(userPrompt, apiKey, logCollector, conversationHistory) {
2148
- const schemaDoc = schema.generateSchemaDocumentation();
2149
- logger.info("Generating prompts...", userPrompt, conversationHistory);
2150
2226
  try {
2151
2227
  const prompts = await promptLoader.loadPrompts("classify", {
2152
- SCHEMA_DOC: schemaDoc || "No schema available",
2153
2228
  USER_PROMPT: userPrompt,
2154
2229
  CONVERSATION_HISTORY: conversationHistory || "No previous conversation"
2155
2230
  });
@@ -2204,6 +2279,7 @@ var BaseLLM = class {
2204
2279
  CURRENT_PROPS: JSON.stringify(originalProps, null, 2),
2205
2280
  CONVERSATION_HISTORY: conversationHistory || "No previous conversation"
2206
2281
  });
2282
+ logger.debug("props-modification: System prompt\n", prompts.system.substring(0, 100), "\n\n\n", "User prompt:", prompts.user.substring(0, 50));
2207
2283
  const result = await LLM.stream(
2208
2284
  {
2209
2285
  sys: prompts.system,
@@ -2251,21 +2327,47 @@ var BaseLLM = class {
2251
2327
  }
2252
2328
  }
2253
2329
  /**
2254
- * Generate a dynamic component for analytical questions when no matching component exists
2255
- * This creates a custom component with appropriate visualization and query
2330
+ * Match and select a component from available components filtered by type
2331
+ * This picks the best matching component based on user prompt and modifies its props
2256
2332
  */
2257
- async generateAnalyticalComponent(userPrompt, preferredVisualizationType, apiKey, logCollector, conversationHistory) {
2258
- const schemaDoc = schema.generateSchemaDocumentation();
2333
+ async generateAnalyticalComponent(userPrompt, components, preferredVisualizationType, apiKey, logCollector, conversationHistory) {
2259
2334
  try {
2335
+ const filteredComponents = preferredVisualizationType ? components.filter((c) => c.type === preferredVisualizationType) : components;
2336
+ if (filteredComponents.length === 0) {
2337
+ logCollector?.warn(
2338
+ `No components found of type ${preferredVisualizationType}`,
2339
+ "explanation",
2340
+ { reason: "No matching components available for this visualization type" }
2341
+ );
2342
+ return {
2343
+ component: null,
2344
+ reasoning: `No components available of type ${preferredVisualizationType}`,
2345
+ isGenerated: false
2346
+ };
2347
+ }
2348
+ const componentsText = filteredComponents.map((comp, idx) => {
2349
+ const keywords = comp.keywords ? comp.keywords.join(", ") : "";
2350
+ const category = comp.category || "general";
2351
+ const propsPreview = comp.props ? JSON.stringify(comp.props, null, 2) : "No props";
2352
+ return `${idx + 1}. ID: ${comp.id}
2353
+ Name: ${comp.name}
2354
+ Type: ${comp.type}
2355
+ Category: ${category}
2356
+ Description: ${comp.description || "No description"}
2357
+ Keywords: ${keywords}
2358
+ Props Preview: ${propsPreview}`;
2359
+ }).join("\n\n");
2260
2360
  const visualizationConstraint = preferredVisualizationType ? `
2261
- **IMPORTANT: The user has specifically requested a ${preferredVisualizationType} visualization. You MUST use this type.**
2361
+ **IMPORTANT: Components are filtered to type ${preferredVisualizationType}. Select the best match.**
2262
2362
  ` : "";
2263
2363
  const prompts = await promptLoader.loadPrompts("single-component", {
2264
- SCHEMA_DOC: schemaDoc || "No schema available",
2364
+ COMPONENT_TYPE: preferredVisualizationType || "any",
2365
+ COMPONENTS_LIST: componentsText,
2265
2366
  VISUALIZATION_CONSTRAINT: visualizationConstraint,
2266
2367
  USER_PROMPT: userPrompt,
2267
2368
  CONVERSATION_HISTORY: conversationHistory || "No previous conversation"
2268
2369
  });
2370
+ logger.debug("single-component: System prompt\n", prompts.system.substring(0, 100), "\n\n\n", "User prompt:", prompts.user.substring(0, 50));
2269
2371
  const result = await LLM.stream(
2270
2372
  {
2271
2373
  sys: prompts.system,
@@ -2280,53 +2382,63 @@ var BaseLLM = class {
2280
2382
  true
2281
2383
  // Parse as JSON
2282
2384
  );
2283
- if (!result.canGenerate) {
2385
+ if (!result.canGenerate || result.confidence < 50) {
2284
2386
  logCollector?.warn(
2285
- "Cannot generate component",
2387
+ "Cannot match component",
2286
2388
  "explanation",
2287
- { reason: result.reasoning || "Unable to generate component for this question" }
2389
+ { reason: result.reasoning || "Unable to find matching component for this question" }
2288
2390
  );
2289
2391
  return {
2290
2392
  component: null,
2291
- reasoning: result.reasoning || "Unable to generate component for this question",
2393
+ reasoning: result.reasoning || "Unable to find matching component for this question",
2292
2394
  isGenerated: false
2293
2395
  };
2294
2396
  }
2295
- const query = ensureQueryLimit(result.query, this.defaultLimit);
2296
- logCollector?.logQuery(
2297
- "Analytical component query generated",
2298
- query,
2299
- {
2300
- componentType: result.componentType,
2301
- visualization: preferredVisualizationType || result.componentType,
2302
- title: result.title
2303
- }
2397
+ const componentIndex = result.componentIndex;
2398
+ const componentId = result.componentId;
2399
+ let matchedComponent = null;
2400
+ if (componentId) {
2401
+ matchedComponent = filteredComponents.find((c) => c.id === componentId);
2402
+ }
2403
+ if (!matchedComponent && componentIndex) {
2404
+ matchedComponent = filteredComponents[componentIndex - 1];
2405
+ }
2406
+ if (!matchedComponent) {
2407
+ logCollector?.warn("Component not found in filtered list");
2408
+ return {
2409
+ component: null,
2410
+ reasoning: "Component not found in filtered list",
2411
+ isGenerated: false
2412
+ };
2413
+ }
2414
+ logCollector?.info(`Matched component: ${matchedComponent.name} (confidence: ${result.confidence}%)`);
2415
+ const propsValidation = await this.validateAndModifyProps(
2416
+ userPrompt,
2417
+ matchedComponent.props,
2418
+ matchedComponent.name,
2419
+ matchedComponent.type,
2420
+ matchedComponent.description,
2421
+ apiKey,
2422
+ logCollector,
2423
+ conversationHistory
2304
2424
  );
2425
+ const modifiedComponent = {
2426
+ ...matchedComponent,
2427
+ props: propsValidation.props
2428
+ };
2305
2429
  logCollector?.logExplanation(
2306
- "Analytical component generated",
2307
- result.reasoning || "Generated dynamic component based on analytical question",
2430
+ "Analytical component selected and modified",
2431
+ result.reasoning || "Selected component based on analytical question",
2308
2432
  {
2309
- componentType: result.componentType,
2310
- description: result.description
2433
+ componentName: matchedComponent.name,
2434
+ componentType: matchedComponent.type,
2435
+ confidence: result.confidence,
2436
+ propsModified: propsValidation.isModified
2311
2437
  }
2312
2438
  );
2313
- const dynamicComponent = {
2314
- id: `dynamic_${Date.now()}`,
2315
- name: `Dynamic${result.componentType}`,
2316
- type: result.componentType,
2317
- description: result.description,
2318
- category: "dynamic",
2319
- keywords: [],
2320
- props: {
2321
- query,
2322
- title: result.title,
2323
- description: result.description,
2324
- config: result.config || {}
2325
- }
2326
- };
2327
2439
  return {
2328
- component: dynamicComponent,
2329
- reasoning: result.reasoning || "Generated dynamic component based on analytical question",
2440
+ component: modifiedComponent,
2441
+ reasoning: result.reasoning || "Selected and modified component based on analytical question",
2330
2442
  isGenerated: true
2331
2443
  };
2332
2444
  } catch (error) {
@@ -2334,6 +2446,51 @@ var BaseLLM = class {
2334
2446
  throw error;
2335
2447
  }
2336
2448
  }
2449
+ /**
2450
+ * Generate container metadata (title and description) for multi-component dashboard
2451
+ */
2452
+ async generateContainerMetadata(userPrompt, visualizationTypes, apiKey, logCollector, conversationHistory) {
2453
+ try {
2454
+ const prompts = await promptLoader.loadPrompts("container-metadata", {
2455
+ USER_PROMPT: userPrompt,
2456
+ VISUALIZATION_TYPES: visualizationTypes.join(", "),
2457
+ CONVERSATION_HISTORY: conversationHistory || "No previous conversation"
2458
+ });
2459
+ const result = await LLM.stream(
2460
+ {
2461
+ sys: prompts.system,
2462
+ user: prompts.user
2463
+ },
2464
+ {
2465
+ model: this.model,
2466
+ maxTokens: 500,
2467
+ temperature: 0.3,
2468
+ apiKey: this.getApiKey(apiKey)
2469
+ },
2470
+ true
2471
+ // Parse as JSON
2472
+ );
2473
+ logCollector?.logExplanation(
2474
+ "Container metadata generated",
2475
+ `Generated title and description for multi-component dashboard`,
2476
+ {
2477
+ title: result.title,
2478
+ description: result.description,
2479
+ visualizationTypes
2480
+ }
2481
+ );
2482
+ return {
2483
+ title: result.title || `${userPrompt} - Dashboard`,
2484
+ description: result.description || `Multi-component dashboard showing ${visualizationTypes.join(", ")}`
2485
+ };
2486
+ } catch (error) {
2487
+ console.error("Error generating container metadata:", error);
2488
+ return {
2489
+ title: `${userPrompt} - Dashboard`,
2490
+ description: `Multi-component dashboard showing ${visualizationTypes.join(", ")}`
2491
+ };
2492
+ }
2493
+ }
2337
2494
  /**
2338
2495
  * Match component from a list with enhanced props modification
2339
2496
  */
@@ -2395,12 +2552,12 @@ var BaseLLM = class {
2395
2552
  const noMatchMsg = `No matching component found (confidence: ${confidence}%)`;
2396
2553
  console.log("\u2717", noMatchMsg);
2397
2554
  logCollector?.warn(noMatchMsg);
2398
- const genMsg = "Attempting to generate dynamic component from analytical question...";
2555
+ const genMsg = "Attempting to match component from analytical question...";
2399
2556
  console.log("\u2713", genMsg);
2400
2557
  logCollector?.info(genMsg);
2401
- const generatedResult = await this.generateAnalyticalComponent(userPrompt, void 0, apiKey, logCollector, conversationHistory);
2558
+ const generatedResult = await this.generateAnalyticalComponent(userPrompt, components, void 0, apiKey, logCollector, conversationHistory);
2402
2559
  if (generatedResult.component) {
2403
- const genSuccessMsg = `Successfully generated component: ${generatedResult.component.name}`;
2560
+ const genSuccessMsg = `Successfully matched component: ${generatedResult.component.name}`;
2404
2561
  logCollector?.info(genSuccessMsg);
2405
2562
  return {
2406
2563
  component: generatedResult.component,
@@ -2412,10 +2569,10 @@ var BaseLLM = class {
2412
2569
  queryModified: false
2413
2570
  };
2414
2571
  }
2415
- logCollector?.error("Failed to generate dynamic component");
2572
+ logCollector?.error("Failed to match component");
2416
2573
  return {
2417
2574
  component: null,
2418
- reasoning: result.reasoning || "No matching component found and unable to generate dynamic component",
2575
+ reasoning: result.reasoning || "No matching component found and unable to match component",
2419
2576
  method: `${this.getProviderName()}-llm`,
2420
2577
  confidence
2421
2578
  };
@@ -2463,15 +2620,15 @@ var BaseLLM = class {
2463
2620
  }
2464
2621
  }
2465
2622
  /**
2466
- * Generate multiple dynamic components for analytical questions
2623
+ * Match multiple components for analytical questions by visualization types
2467
2624
  * This is used when the user needs multiple visualizations
2468
2625
  */
2469
- async generateMultipleAnalyticalComponents(userPrompt, visualizationTypes, apiKey, logCollector, conversationHistory) {
2626
+ async generateMultipleAnalyticalComponents(userPrompt, availableComponents, visualizationTypes, apiKey, logCollector, conversationHistory) {
2470
2627
  try {
2471
- console.log("\u2713 Generating multiple components:", visualizationTypes);
2628
+ console.log("\u2713 Matching multiple components:", visualizationTypes);
2472
2629
  const components = [];
2473
2630
  for (const vizType of visualizationTypes) {
2474
- const result = await this.generateAnalyticalComponent(userPrompt, vizType, apiKey, logCollector, conversationHistory);
2631
+ const result = await this.generateAnalyticalComponent(userPrompt, availableComponents, vizType, apiKey, logCollector, conversationHistory);
2475
2632
  if (result.component) {
2476
2633
  components.push(result.component);
2477
2634
  }
@@ -2479,75 +2636,45 @@ var BaseLLM = class {
2479
2636
  if (components.length === 0) {
2480
2637
  return {
2481
2638
  components: [],
2482
- reasoning: "Failed to generate any components",
2639
+ reasoning: "Failed to match any components",
2483
2640
  isGenerated: false
2484
2641
  };
2485
2642
  }
2486
2643
  return {
2487
2644
  components,
2488
- reasoning: `Generated ${components.length} components: ${visualizationTypes.join(", ")}`,
2645
+ reasoning: `Matched ${components.length} components: ${visualizationTypes.join(", ")}`,
2489
2646
  isGenerated: true
2490
2647
  };
2491
2648
  } catch (error) {
2492
- console.error("Error generating multiple analytical components:", error);
2649
+ console.error("Error matching multiple analytical components:", error);
2493
2650
  return {
2494
2651
  components: [],
2495
- reasoning: "Error occurred while generating components",
2652
+ reasoning: "Error occurred while matching components",
2496
2653
  isGenerated: false
2497
2654
  };
2498
2655
  }
2499
2656
  }
2500
2657
  /**
2501
- * Generate a complete multi-component response with intelligent container and component props
2658
+ * Match multiple components and wrap them in a container
2502
2659
  */
2503
- async generateMultiComponentResponse(userPrompt, visualizationTypes, apiKey, logCollector, conversationHistory) {
2504
- const schemaDoc = schema.generateSchemaDocumentation();
2660
+ async generateMultiComponentResponse(userPrompt, availableComponents, visualizationTypes, apiKey, logCollector, conversationHistory) {
2505
2661
  try {
2506
- const prompts = await promptLoader.loadPrompts("mutli-component", {
2507
- SCHEMA_DOC: schemaDoc || "No schema available",
2508
- DEFAULT_LIMIT: this.defaultLimit,
2509
- USER_PROMPT: userPrompt,
2510
- VISUALIZATION_TYPES: visualizationTypes.join(", "),
2511
- CONVERSATION_HISTORY: conversationHistory || "No previous conversation"
2512
- });
2513
- const result = await LLM.stream(
2514
- {
2515
- sys: prompts.system,
2516
- user: prompts.user
2517
- },
2518
- {
2519
- model: this.model,
2520
- maxTokens: 3e3,
2521
- temperature: 0.2,
2522
- apiKey: this.getApiKey(apiKey)
2523
- },
2524
- true
2525
- // Parse as JSON
2662
+ const matchResult = await this.generateMultipleAnalyticalComponents(
2663
+ userPrompt,
2664
+ availableComponents,
2665
+ visualizationTypes,
2666
+ apiKey,
2667
+ logCollector,
2668
+ conversationHistory
2526
2669
  );
2527
- if (!result.canGenerate || !result.components || result.components.length === 0) {
2670
+ if (!matchResult.isGenerated || matchResult.components.length === 0) {
2528
2671
  return {
2529
2672
  containerComponent: null,
2530
- reasoning: result.reasoning || "Unable to generate multi-component dashboard",
2673
+ reasoning: matchResult.reasoning || "Unable to match multi-component dashboard",
2531
2674
  isGenerated: false
2532
2675
  };
2533
2676
  }
2534
- const generatedComponents = result.components.map((compData, index) => {
2535
- const query = ensureQueryLimit(compData.query, this.defaultLimit);
2536
- return {
2537
- id: `dynamic_${compData.componentType.toLowerCase()}_${Date.now()}_${index}`,
2538
- name: `Dynamic${compData.componentType}`,
2539
- type: compData.componentType,
2540
- description: compData.description,
2541
- category: "dynamic",
2542
- keywords: [],
2543
- props: {
2544
- query,
2545
- title: compData.title,
2546
- description: compData.description,
2547
- config: compData.config || {}
2548
- }
2549
- };
2550
- });
2677
+ const generatedComponents = matchResult.components;
2551
2678
  generatedComponents.forEach((component, index) => {
2552
2679
  if (component.props.query) {
2553
2680
  logCollector?.logQuery(
@@ -2562,21 +2689,24 @@ var BaseLLM = class {
2562
2689
  );
2563
2690
  }
2564
2691
  });
2692
+ const containerTitle = `${userPrompt} - Dashboard`;
2693
+ const containerDescription = `Multi-component dashboard showing ${visualizationTypes.join(", ")}`;
2565
2694
  logCollector?.logExplanation(
2566
- "Multi-component dashboard generated",
2567
- result.reasoning || `Generated ${generatedComponents.length} components for comprehensive analysis`,
2695
+ "Multi-component dashboard matched",
2696
+ matchResult.reasoning || `Matched ${generatedComponents.length} components for comprehensive analysis`,
2568
2697
  {
2569
2698
  totalComponents: generatedComponents.length,
2570
2699
  componentTypes: generatedComponents.map((c) => c.type),
2571
- containerTitle: result.containerTitle,
2572
- containerDescription: result.containerDescription
2700
+ componentNames: generatedComponents.map((c) => c.name),
2701
+ containerTitle,
2702
+ containerDescription
2573
2703
  }
2574
2704
  );
2575
2705
  const containerComponent = {
2576
2706
  id: `multi_container_${Date.now()}`,
2577
2707
  name: "MultiComponentContainer",
2578
2708
  type: "Container",
2579
- description: result.containerDescription,
2709
+ description: containerDescription,
2580
2710
  category: "dynamic",
2581
2711
  keywords: ["multi", "container", "dashboard"],
2582
2712
  props: {
@@ -2584,14 +2714,14 @@ var BaseLLM = class {
2584
2714
  components: generatedComponents,
2585
2715
  layout: "grid",
2586
2716
  spacing: 24,
2587
- title: result.containerTitle,
2588
- description: result.containerDescription
2717
+ title: containerTitle,
2718
+ description: containerDescription
2589
2719
  }
2590
2720
  }
2591
2721
  };
2592
2722
  return {
2593
2723
  containerComponent,
2594
- reasoning: result.reasoning || `Generated multi-component dashboard with ${generatedComponents.length} components`,
2724
+ reasoning: matchResult.reasoning || `Matched multi-component dashboard with ${generatedComponents.length} components`,
2595
2725
  isGenerated: true
2596
2726
  };
2597
2727
  } catch (error) {
@@ -2612,41 +2742,89 @@ var BaseLLM = class {
2612
2742
  const classInfo = `Question type: ${classification.questionType}, Visualizations: ${classification.visualizations.join(", ") || "None"}, Multiple components: ${classification.needsMultipleComponents}`;
2613
2743
  logCollector?.info(classInfo);
2614
2744
  if (classification.questionType === "analytical") {
2615
- if (classification.visualizations.length > 0) {
2616
- if (classification.needsMultipleComponents && classification.visualizations.length > 1) {
2617
- const multiMsg = "Generating multi-component dashboard...";
2618
- logCollector?.info(multiMsg);
2619
- const result = await this.generateMultiComponentResponse(
2745
+ if (classification.visualizations.length > 1) {
2746
+ const multiMsg = `Matching ${classification.visualizations.length} components for types: ${classification.visualizations.join(", ")}`;
2747
+ logCollector?.info(multiMsg);
2748
+ const matchedComponents = [];
2749
+ for (const vizType of classification.visualizations) {
2750
+ logCollector?.info(`Matching component for type: ${vizType}`);
2751
+ const result = await this.generateAnalyticalComponent(
2620
2752
  userPrompt,
2621
- classification.visualizations,
2753
+ components,
2754
+ vizType,
2622
2755
  apiKey,
2623
2756
  logCollector,
2624
2757
  conversationHistory
2625
2758
  );
2759
+ if (result.component) {
2760
+ matchedComponents.push(result.component);
2761
+ logCollector?.info(`Matched: ${result.component.name}`);
2762
+ } else {
2763
+ logCollector?.warn(`Failed to match component for type: ${vizType}`);
2764
+ }
2765
+ }
2766
+ if (matchedComponents.length === 0) {
2626
2767
  return {
2627
- component: result.containerComponent,
2628
- reasoning: result.reasoning,
2629
- method: "classification-multi-generated",
2768
+ component: null,
2769
+ reasoning: "Failed to match any components for the requested visualization types",
2770
+ method: "classification-multi-failed",
2630
2771
  questionType: classification.questionType,
2631
2772
  needsMultipleComponents: true,
2632
2773
  propsModified: false,
2633
2774
  queryModified: false
2634
2775
  };
2635
- } else {
2636
- const vizType = classification.visualizations[0];
2637
- const result = await this.generateAnalyticalComponent(userPrompt, vizType, apiKey, logCollector, conversationHistory);
2638
- return {
2639
- component: result.component,
2640
- reasoning: result.reasoning,
2641
- method: "classification-generated",
2642
- questionType: classification.questionType,
2643
- needsMultipleComponents: false,
2644
- propsModified: false,
2645
- queryModified: false
2646
- };
2647
2776
  }
2777
+ logCollector?.info("Generating container metadata...");
2778
+ const containerMetadata = await this.generateContainerMetadata(
2779
+ userPrompt,
2780
+ classification.visualizations,
2781
+ apiKey,
2782
+ logCollector,
2783
+ conversationHistory
2784
+ );
2785
+ const containerComponent = {
2786
+ id: `multi_container_${Date.now()}`,
2787
+ name: "MultiComponentContainer",
2788
+ type: "Container",
2789
+ description: containerMetadata.description,
2790
+ category: "dynamic",
2791
+ keywords: ["multi", "container", "dashboard"],
2792
+ props: {
2793
+ config: {
2794
+ components: matchedComponents,
2795
+ layout: "grid",
2796
+ spacing: 24,
2797
+ title: containerMetadata.title,
2798
+ description: containerMetadata.description
2799
+ }
2800
+ }
2801
+ };
2802
+ logCollector?.info(`Created multi-component container with ${matchedComponents.length} components: "${containerMetadata.title}"`);
2803
+ return {
2804
+ component: containerComponent,
2805
+ reasoning: `Matched ${matchedComponents.length} components for visualization types: ${classification.visualizations.join(", ")}`,
2806
+ method: "classification-multi-generated",
2807
+ questionType: classification.questionType,
2808
+ needsMultipleComponents: true,
2809
+ propsModified: false,
2810
+ queryModified: false
2811
+ };
2812
+ } else if (classification.visualizations.length === 1) {
2813
+ const vizType = classification.visualizations[0];
2814
+ logCollector?.info(`Matching single component for type: ${vizType}`);
2815
+ const result = await this.generateAnalyticalComponent(userPrompt, components, vizType, apiKey, logCollector, conversationHistory);
2816
+ return {
2817
+ component: result.component,
2818
+ reasoning: result.reasoning,
2819
+ method: "classification-generated",
2820
+ questionType: classification.questionType,
2821
+ needsMultipleComponents: false,
2822
+ propsModified: false,
2823
+ queryModified: false
2824
+ };
2648
2825
  } else {
2649
- const result = await this.generateAnalyticalComponent(userPrompt, void 0, apiKey, logCollector, conversationHistory);
2826
+ logCollector?.info("No specific visualization type - matching from all components");
2827
+ const result = await this.generateAnalyticalComponent(userPrompt, components, void 0, apiKey, logCollector, conversationHistory);
2650
2828
  return {
2651
2829
  component: result.component,
2652
2830
  reasoning: result.reasoning,
@@ -2657,7 +2835,7 @@ var BaseLLM = class {
2657
2835
  queryModified: false
2658
2836
  };
2659
2837
  }
2660
- } else if (classification.questionType === "data_modification") {
2838
+ } else if (classification.questionType === "data_modification" || classification.questionType === "general") {
2661
2839
  const matchMsg = "Using component matching for data modification...";
2662
2840
  logCollector?.info(matchMsg);
2663
2841
  const matchResult = await this.matchComponent(userPrompt, components, apiKey, logCollector, conversationHistory);
@@ -2805,7 +2983,6 @@ var useAnthropicMethod = async (prompt, components, apiKey, logCollector, conver
2805
2983
  }
2806
2984
  try {
2807
2985
  const matchResult = await anthropicLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory);
2808
- logger.debug(`Anthropic method success: ${matchResult}`);
2809
2986
  return { success: true, data: matchResult };
2810
2987
  } catch (error) {
2811
2988
  const errorMsg = error instanceof Error ? error.message : String(error);
@@ -2859,7 +3036,6 @@ var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey,
2859
3036
  let result;
2860
3037
  if (provider === "anthropic") {
2861
3038
  result = await useAnthropicMethod(prompt, components, anthropicApiKey, logCollector, conversationHistory);
2862
- logger.debug("Anthropic result:", result);
2863
3039
  } else if (provider === "groq") {
2864
3040
  result = await useGroqMethod(prompt, components, groqApiKey, logCollector, conversationHistory);
2865
3041
  } else {
@@ -3060,7 +3236,6 @@ async function handleUserPromptRequest(data, components, sendMessage, anthropicA
3060
3236
  const SA_RUNTIME = payload.SA_RUNTIME;
3061
3237
  const wsId = userPromptRequest.from.id || "unknown";
3062
3238
  logger.info(`[REQUEST ${id}] Processing user prompt: "${prompt.substring(0, 50)}..."`);
3063
- logger.info(`[REQUEST ${id}] Providers: ${llmProviders?.join(", ")}, Anthropic key: ${anthropicApiKey ? "SET" : "NOT SET"}, Groq key: ${groqApiKey ? "SET" : "NOT SET"}`);
3064
3239
  if (processedMessageIds.has(id)) {
3065
3240
  logger.warn(`[REQUEST ${id}] Duplicate request detected - ignoring`);
3066
3241
  return;
@@ -3111,7 +3286,6 @@ async function handleUserPromptRequest(data, components, sendMessage, anthropicA
3111
3286
  return;
3112
3287
  }
3113
3288
  logCollector.info(`Starting user prompt request with ${components.length} components`);
3114
- logger.info(`components length: ${components.length}`);
3115
3289
  const threadManager = ThreadManager.getInstance();
3116
3290
  let thread = threadManager.getThread(threadId);
3117
3291
  if (!thread) {
@@ -3121,7 +3295,6 @@ async function handleUserPromptRequest(data, components, sendMessage, anthropicA
3121
3295
  const conversationHistory = thread.getConversationContext(CONTEXT_CONFIG.MAX_CONVERSATION_CONTEXT_BLOCKS, existingUiBlockId);
3122
3296
  const userResponse = await get_user_response(prompt, components, anthropicApiKey, groqApiKey, llmProviders, logCollector, conversationHistory);
3123
3297
  logCollector.info("User prompt request completed");
3124
- logger.info(`[REQUEST ${id}] Response success: ${userResponse.success}, reason: ${userResponse.success ? "N/A" : userResponse}`);
3125
3298
  if (userResponse.success && userResponse.data && typeof userResponse.data === "object" && "component" in userResponse.data) {
3126
3299
  const component = userResponse.data.component;
3127
3300
  const uiBlockId = existingUiBlockId;
@@ -3163,10 +3336,6 @@ function sendDataResponse4(id, res, sendMessage, clientId) {
3163
3336
  ...res
3164
3337
  }
3165
3338
  };
3166
- logger.info(`[REQUEST ${id}] Sending USER_PROMPT_RES with success=${res.success}`);
3167
- if (!res.success && res.reason) {
3168
- logger.info(`[REQUEST ${id}] Error reason: ${res.reason}`);
3169
- }
3170
3339
  sendMessage(response);
3171
3340
  }
3172
3341
 
@@ -4865,6 +5034,9 @@ var SuperatomSDK = class {
4865
5034
  this.userManager = new UserManager(this.projectId, 5e3);
4866
5035
  this.dashboardManager = new DashboardManager(this.projectId);
4867
5036
  this.reportManager = new ReportManager(this.projectId);
5037
+ this.initializePromptLoader(config.promptsDir).catch((error) => {
5038
+ logger.error("Failed to initialize PromptLoader:", error);
5039
+ });
4868
5040
  this.initializeUserManager().catch((error) => {
4869
5041
  logger.error("Failed to initialize UserManager:", error);
4870
5042
  });
@@ -4874,6 +5046,21 @@ var SuperatomSDK = class {
4874
5046
  logger.error("Failed to connect to Superatom:", error);
4875
5047
  });
4876
5048
  }
5049
+ /**
5050
+ * Initialize PromptLoader and load prompts into memory
5051
+ */
5052
+ async initializePromptLoader(promptsDir) {
5053
+ try {
5054
+ if (promptsDir) {
5055
+ promptLoader.setPromptsDir(promptsDir);
5056
+ }
5057
+ await promptLoader.initialize();
5058
+ logger.info(`PromptLoader initialized with ${promptLoader.getCacheSize()} prompts from ${promptLoader.getPromptsDir()}`);
5059
+ } catch (error) {
5060
+ logger.error("Failed to initialize PromptLoader:", error);
5061
+ throw error;
5062
+ }
5063
+ }
4877
5064
  /**
4878
5065
  * Initialize UserManager for the project
4879
5066
  */