@superatomai/sdk-node 0.0.60 → 0.0.62

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -857,6 +857,13 @@ interface SuperatomSDKConfig {
857
857
  * If not provided, falls back to provider-based model selection
858
858
  */
859
859
  dashCompModels?: DashCompModelConfig;
860
+ /**
861
+ * Similarity threshold for conversation search (semantic matching)
862
+ * Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
863
+ * Higher values require closer matches, lower values allow more distant matches
864
+ * Default: 0.8
865
+ */
866
+ conversationSimilarityThreshold?: number;
860
867
  }
861
868
 
862
869
  declare const KbNodesQueryFiltersSchema: z.ZodObject<{
@@ -2030,6 +2037,12 @@ interface BaseLLMConfig {
2030
2037
  * - 'balanced': Use best model for complex tasks, fast model for simple tasks (default)
2031
2038
  */
2032
2039
  modelStrategy?: ModelStrategy;
2040
+ /**
2041
+ * Similarity threshold for conversation search (semantic matching)
2042
+ * Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
2043
+ * Default: 0.8
2044
+ */
2045
+ conversationSimilarityThreshold?: number;
2033
2046
  }
2034
2047
  /**
2035
2048
  * BaseLLM abstract class for AI-powered component generation and matching
@@ -2041,6 +2054,7 @@ declare abstract class BaseLLM {
2041
2054
  protected defaultLimit: number;
2042
2055
  protected apiKey?: string;
2043
2056
  protected modelStrategy: ModelStrategy;
2057
+ protected conversationSimilarityThreshold: number;
2044
2058
  constructor(config?: BaseLLMConfig);
2045
2059
  /**
2046
2060
  * Get the appropriate model based on task type and model strategy
@@ -2058,6 +2072,16 @@ declare abstract class BaseLLM {
2058
2072
  * @returns The current model strategy
2059
2073
  */
2060
2074
  getModelStrategy(): ModelStrategy;
2075
+ /**
2076
+ * Set the conversation similarity threshold at runtime
2077
+ * @param threshold - Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
2078
+ */
2079
+ setConversationSimilarityThreshold(threshold: number): void;
2080
+ /**
2081
+ * Get the current conversation similarity threshold
2082
+ * @returns The current threshold value
2083
+ */
2084
+ getConversationSimilarityThreshold(): number;
2061
2085
  /**
2062
2086
  * Get the default model for this provider (used for complex tasks like text generation)
2063
2087
  */
@@ -2249,6 +2273,7 @@ declare class SuperatomSDK {
2249
2273
  private llmProviders;
2250
2274
  private databaseType;
2251
2275
  private modelStrategy;
2276
+ private conversationSimilarityThreshold;
2252
2277
  private userManager;
2253
2278
  private dashboardManager;
2254
2279
  private reportManager;
@@ -2359,6 +2384,20 @@ declare class SuperatomSDK {
2359
2384
  * Get current model strategy
2360
2385
  */
2361
2386
  getModelStrategy(): ModelStrategy;
2387
+ /**
2388
+ * Apply conversation similarity threshold to all LLM provider singletons
2389
+ * @param threshold - Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
2390
+ */
2391
+ private applyConversationSimilarityThreshold;
2392
+ /**
2393
+ * Set conversation similarity threshold at runtime
2394
+ * @param threshold - Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
2395
+ */
2396
+ setConversationSimilarityThreshold(threshold: number): void;
2397
+ /**
2398
+ * Get current conversation similarity threshold
2399
+ */
2400
+ getConversationSimilarityThreshold(): number;
2362
2401
  }
2363
2402
 
2364
2403
  export { type Action, BM25L, type BM25LOptions, type BaseLLMConfig, CONTEXT_CONFIG, type CapturedLog, CleanupService, type CollectionHandler, type CollectionOperation, type DBUIBlock, type DatabaseType, type HybridSearchOptions, type IncomingMessage, type KbNodesQueryFilters, type KbNodesRequestPayload, LLM, type LLMUsageEntry, type LogLevel, type Message, type ModelStrategy, type OutputField, type RerankedResult, SDK_VERSION, STORAGE_CONFIG, SuperatomSDK, type SuperatomSDKConfig, type TaskType, Thread, ThreadManager, type Tool$1 as Tool, type ToolOutputSchema, UIBlock, UILogCollector, type User, UserManager, type UsersData, anthropicLLM, geminiLLM, groqLLM, hybridRerank, llmUsageLogger, logger, openaiLLM, rerankChromaResults, rerankConversationResults, userPromptErrorLogger };
package/dist/index.d.ts CHANGED
@@ -857,6 +857,13 @@ interface SuperatomSDKConfig {
857
857
  * If not provided, falls back to provider-based model selection
858
858
  */
859
859
  dashCompModels?: DashCompModelConfig;
860
+ /**
861
+ * Similarity threshold for conversation search (semantic matching)
862
+ * Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
863
+ * Higher values require closer matches, lower values allow more distant matches
864
+ * Default: 0.8
865
+ */
866
+ conversationSimilarityThreshold?: number;
860
867
  }
861
868
 
862
869
  declare const KbNodesQueryFiltersSchema: z.ZodObject<{
@@ -2030,6 +2037,12 @@ interface BaseLLMConfig {
2030
2037
  * - 'balanced': Use best model for complex tasks, fast model for simple tasks (default)
2031
2038
  */
2032
2039
  modelStrategy?: ModelStrategy;
2040
+ /**
2041
+ * Similarity threshold for conversation search (semantic matching)
2042
+ * Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
2043
+ * Default: 0.8
2044
+ */
2045
+ conversationSimilarityThreshold?: number;
2033
2046
  }
2034
2047
  /**
2035
2048
  * BaseLLM abstract class for AI-powered component generation and matching
@@ -2041,6 +2054,7 @@ declare abstract class BaseLLM {
2041
2054
  protected defaultLimit: number;
2042
2055
  protected apiKey?: string;
2043
2056
  protected modelStrategy: ModelStrategy;
2057
+ protected conversationSimilarityThreshold: number;
2044
2058
  constructor(config?: BaseLLMConfig);
2045
2059
  /**
2046
2060
  * Get the appropriate model based on task type and model strategy
@@ -2058,6 +2072,16 @@ declare abstract class BaseLLM {
2058
2072
  * @returns The current model strategy
2059
2073
  */
2060
2074
  getModelStrategy(): ModelStrategy;
2075
+ /**
2076
+ * Set the conversation similarity threshold at runtime
2077
+ * @param threshold - Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
2078
+ */
2079
+ setConversationSimilarityThreshold(threshold: number): void;
2080
+ /**
2081
+ * Get the current conversation similarity threshold
2082
+ * @returns The current threshold value
2083
+ */
2084
+ getConversationSimilarityThreshold(): number;
2061
2085
  /**
2062
2086
  * Get the default model for this provider (used for complex tasks like text generation)
2063
2087
  */
@@ -2249,6 +2273,7 @@ declare class SuperatomSDK {
2249
2273
  private llmProviders;
2250
2274
  private databaseType;
2251
2275
  private modelStrategy;
2276
+ private conversationSimilarityThreshold;
2252
2277
  private userManager;
2253
2278
  private dashboardManager;
2254
2279
  private reportManager;
@@ -2359,6 +2384,20 @@ declare class SuperatomSDK {
2359
2384
  * Get current model strategy
2360
2385
  */
2361
2386
  getModelStrategy(): ModelStrategy;
2387
+ /**
2388
+ * Apply conversation similarity threshold to all LLM provider singletons
2389
+ * @param threshold - Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
2390
+ */
2391
+ private applyConversationSimilarityThreshold;
2392
+ /**
2393
+ * Set conversation similarity threshold at runtime
2394
+ * @param threshold - Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
2395
+ */
2396
+ setConversationSimilarityThreshold(threshold: number): void;
2397
+ /**
2398
+ * Get current conversation similarity threshold
2399
+ */
2400
+ getConversationSimilarityThreshold(): number;
2362
2401
  }
2363
2402
 
2364
2403
  export { type Action, BM25L, type BM25LOptions, type BaseLLMConfig, CONTEXT_CONFIG, type CapturedLog, CleanupService, type CollectionHandler, type CollectionOperation, type DBUIBlock, type DatabaseType, type HybridSearchOptions, type IncomingMessage, type KbNodesQueryFilters, type KbNodesRequestPayload, LLM, type LLMUsageEntry, type LogLevel, type Message, type ModelStrategy, type OutputField, type RerankedResult, SDK_VERSION, STORAGE_CONFIG, SuperatomSDK, type SuperatomSDKConfig, type TaskType, Thread, ThreadManager, type Tool$1 as Tool, type ToolOutputSchema, UIBlock, UILogCollector, type User, UserManager, type UsersData, anthropicLLM, geminiLLM, groqLLM, hybridRerank, llmUsageLogger, logger, openaiLLM, rerankChromaResults, rerankConversationResults, userPromptErrorLogger };
package/dist/index.js CHANGED
@@ -2544,7 +2544,13 @@ var ArtifactsRequestPayloadSchema = import_zod3.z.object({
2544
2544
  limit: import_zod3.z.number().optional(),
2545
2545
  // Query operation fields
2546
2546
  filters: ArtifactsQueryFiltersSchema.optional(),
2547
- sort: import_zod3.z.enum(["ASC", "DESC"]).optional()
2547
+ sort: import_zod3.z.enum(["ASC", "DESC"]).optional(),
2548
+ // Menu grouping fields
2549
+ type: import_zod3.z.string().optional(),
2550
+ menuId: import_zod3.z.number().optional(),
2551
+ artifactGroupName: import_zod3.z.string().optional(),
2552
+ artifactGroupId: import_zod3.z.string().optional(),
2553
+ artifactGroupIcon: import_zod3.z.string().optional()
2548
2554
  }).optional()
2549
2555
  });
2550
2556
  var ArtifactsRequestMessageSchema = import_zod3.z.object({
@@ -2621,7 +2627,9 @@ var MenusRequestPayloadSchema = import_zod3.z.object({
2621
2627
  items: import_zod3.z.array(import_zod3.z.object({
2622
2628
  id: import_zod3.z.number(),
2623
2629
  sortOrder: import_zod3.z.number()
2624
- })).optional()
2630
+ })).optional(),
2631
+ menuJson: import_zod3.z.record(import_zod3.z.unknown()).optional(),
2632
+ version: import_zod3.z.number().optional()
2625
2633
  }).optional()
2626
2634
  });
2627
2635
  var MenusRequestMessageSchema = import_zod3.z.object({
@@ -6002,6 +6010,7 @@ var BaseLLM = class {
6002
6010
  this.defaultLimit = config?.defaultLimit || 50;
6003
6011
  this.apiKey = config?.apiKey;
6004
6012
  this.modelStrategy = config?.modelStrategy || "fast";
6013
+ this.conversationSimilarityThreshold = config?.conversationSimilarityThreshold || 0.8;
6005
6014
  }
6006
6015
  /**
6007
6016
  * Get the appropriate model based on task type and model strategy
@@ -6034,6 +6043,26 @@ var BaseLLM = class {
6034
6043
  getModelStrategy() {
6035
6044
  return this.modelStrategy;
6036
6045
  }
6046
+ /**
6047
+ * Set the conversation similarity threshold at runtime
6048
+ * @param threshold - Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
6049
+ */
6050
+ setConversationSimilarityThreshold(threshold) {
6051
+ if (threshold < 0 || threshold > 1) {
6052
+ logger.warn(`[${this.getProviderName()}] Invalid threshold ${threshold}, must be between 0 and 1. Using default 0.8`);
6053
+ this.conversationSimilarityThreshold = 0.8;
6054
+ return;
6055
+ }
6056
+ this.conversationSimilarityThreshold = threshold;
6057
+ logger.info(`[${this.getProviderName()}] Conversation similarity threshold set to: ${threshold}`);
6058
+ }
6059
+ /**
6060
+ * Get the current conversation similarity threshold
6061
+ * @returns The current threshold value
6062
+ */
6063
+ getConversationSimilarityThreshold() {
6064
+ return this.conversationSimilarityThreshold;
6065
+ }
6037
6066
  /**
6038
6067
  * Get the API key (from instance, parameter, or environment)
6039
6068
  */
@@ -7335,8 +7364,7 @@ ${errorMsg}
7335
7364
  userPrompt,
7336
7365
  collections,
7337
7366
  userId,
7338
- similarityThreshold: 0.8
7339
- // 80% threshold
7367
+ similarityThreshold: this.conversationSimilarityThreshold
7340
7368
  });
7341
7369
  if (conversationMatch) {
7342
7370
  logger.info(`[${this.getProviderName()}] \u2713 Found matching conversation with ${(conversationMatch.similarity * 100).toFixed(2)}% similarity`);
@@ -10601,6 +10629,7 @@ async function handleArtifactsRequest(data, collections, sendMessage) {
10601
10629
  const request = ArtifactsRequestMessageSchema.parse(data);
10602
10630
  const { id, payload, from } = request;
10603
10631
  const { operation, data: requestData } = payload;
10632
+ logger.info("[SDK-NODEJS] Received artifacts request:", JSON.stringify({ operation, requestData }, null, 2));
10604
10633
  const artifactId = requestData?.id;
10605
10634
  const name = requestData?.name;
10606
10635
  const createdBy = requestData?.createdBy;
@@ -10610,9 +10639,22 @@ async function handleArtifactsRequest(data, collections, sendMessage) {
10610
10639
  const limit = requestData?.limit;
10611
10640
  const filters = requestData?.filters;
10612
10641
  const sort = requestData?.sort;
10642
+ const type = requestData?.type;
10643
+ const menuId = requestData?.menuId;
10644
+ const artifactGroupName = requestData?.artifactGroupName;
10645
+ const artifactGroupId = requestData?.artifactGroupId;
10646
+ const artifactGroupIcon = requestData?.artifactGroupIcon;
10647
+ logger.info("[SDK-NODEJS] Extracted params for create:", JSON.stringify({
10648
+ name,
10649
+ type,
10650
+ menuId,
10651
+ artifactGroupName,
10652
+ artifactGroupId,
10653
+ artifactGroupIcon
10654
+ }, null, 2));
10613
10655
  switch (operation) {
10614
10656
  case "create":
10615
- await handleCreate6(id, name, createdBy, dsl, status, executeCollection, sendMessage, from.id);
10657
+ await handleCreate6(id, name, createdBy, dsl, status, type, menuId, artifactGroupName, artifactGroupId, artifactGroupIcon, executeCollection, sendMessage, from.id);
10616
10658
  break;
10617
10659
  case "update":
10618
10660
  await handleUpdate6(id, artifactId, name, dsl, status, deleted, executeCollection, sendMessage, from.id);
@@ -10643,7 +10685,7 @@ async function handleArtifactsRequest(data, collections, sendMessage) {
10643
10685
  }, sendMessage);
10644
10686
  }
10645
10687
  }
10646
- async function handleCreate6(id, name, createdBy, dsl, status, executeCollection, sendMessage, clientId) {
10688
+ async function handleCreate6(id, name, createdBy, dsl, status, type, menuId, artifactGroupName, artifactGroupId, artifactGroupIcon, executeCollection, sendMessage, clientId) {
10647
10689
  if (!name) {
10648
10690
  sendResponse8(id, {
10649
10691
  success: false,
@@ -10652,7 +10694,17 @@ async function handleCreate6(id, name, createdBy, dsl, status, executeCollection
10652
10694
  return;
10653
10695
  }
10654
10696
  try {
10655
- const result = await executeCollection("artifacts", "create", { name, createdBy, dsl, status });
10697
+ const result = await executeCollection("artifacts", "create", {
10698
+ name,
10699
+ createdBy,
10700
+ dsl,
10701
+ status,
10702
+ type,
10703
+ menuId,
10704
+ artifactGroupName,
10705
+ artifactGroupId,
10706
+ artifactGroupIcon
10707
+ });
10656
10708
  sendResponse8(id, {
10657
10709
  success: true,
10658
10710
  data: result.data,
@@ -12892,8 +12944,10 @@ var SuperatomSDK = class {
12892
12944
  this.llmProviders = config.LLM_PROVIDERS || getLLMProviders();
12893
12945
  this.databaseType = config.databaseType || "postgresql";
12894
12946
  this.modelStrategy = config.modelStrategy || "fast";
12947
+ this.conversationSimilarityThreshold = config.conversationSimilarityThreshold ?? 0.8;
12895
12948
  this.applyModelStrategy(this.modelStrategy);
12896
- logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, database type: ${this.databaseType}, model strategy: ${this.modelStrategy}`);
12949
+ this.applyConversationSimilarityThreshold(this.conversationSimilarityThreshold);
12950
+ logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, database type: ${this.databaseType}, model strategy: ${this.modelStrategy}, conversation similarity threshold: ${this.conversationSimilarityThreshold}`);
12897
12951
  this.userManager = new UserManager(this.projectId, 5e3);
12898
12952
  this.dashboardManager = new DashboardManager(this.projectId);
12899
12953
  this.reportManager = new ReportManager(this.projectId);
@@ -13306,6 +13360,31 @@ var SuperatomSDK = class {
13306
13360
  getModelStrategy() {
13307
13361
  return this.modelStrategy;
13308
13362
  }
13363
+ /**
13364
+ * Apply conversation similarity threshold to all LLM provider singletons
13365
+ * @param threshold - Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
13366
+ */
13367
+ applyConversationSimilarityThreshold(threshold) {
13368
+ anthropicLLM.setConversationSimilarityThreshold(threshold);
13369
+ groqLLM.setConversationSimilarityThreshold(threshold);
13370
+ geminiLLM.setConversationSimilarityThreshold(threshold);
13371
+ openaiLLM.setConversationSimilarityThreshold(threshold);
13372
+ logger.info(`Conversation similarity threshold '${threshold}' applied to all LLM providers`);
13373
+ }
13374
+ /**
13375
+ * Set conversation similarity threshold at runtime
13376
+ * @param threshold - Value between 0 and 1 (e.g., 0.8 = 80% similarity required)
13377
+ */
13378
+ setConversationSimilarityThreshold(threshold) {
13379
+ this.conversationSimilarityThreshold = threshold;
13380
+ this.applyConversationSimilarityThreshold(threshold);
13381
+ }
13382
+ /**
13383
+ * Get current conversation similarity threshold
13384
+ */
13385
+ getConversationSimilarityThreshold() {
13386
+ return this.conversationSimilarityThreshold;
13387
+ }
13309
13388
  };
13310
13389
  // Annotate the CommonJS export names for ESM import in node:
13311
13390
  0 && (module.exports = {