@superatomai/sdk-node 0.0.40 → 0.0.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -615,6 +615,68 @@ declare const IncomingMessageSchema: z.ZodObject<{
615
615
  payload?: unknown;
616
616
  }>;
617
617
  type IncomingMessage = z.infer<typeof IncomingMessageSchema>;
618
+ declare const ComponentSchema: z.ZodObject<{
619
+ id: z.ZodString;
620
+ name: z.ZodString;
621
+ displayName: z.ZodOptional<z.ZodString>;
622
+ isDisplayComp: z.ZodOptional<z.ZodBoolean>;
623
+ type: z.ZodString;
624
+ description: z.ZodString;
625
+ props: z.ZodObject<{
626
+ query: z.ZodOptional<z.ZodUnion<[z.ZodString, z.ZodObject<{}, "strip", z.ZodTypeAny, {}, {}>]>>;
627
+ title: z.ZodOptional<z.ZodString>;
628
+ description: z.ZodOptional<z.ZodString>;
629
+ config: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodUnknown>>;
630
+ actions: z.ZodOptional<z.ZodArray<z.ZodAny, "many">>;
631
+ }, "strip", z.ZodTypeAny, {
632
+ description?: string | undefined;
633
+ query?: string | {} | undefined;
634
+ title?: string | undefined;
635
+ config?: Record<string, unknown> | undefined;
636
+ actions?: any[] | undefined;
637
+ }, {
638
+ description?: string | undefined;
639
+ query?: string | {} | undefined;
640
+ title?: string | undefined;
641
+ config?: Record<string, unknown> | undefined;
642
+ actions?: any[] | undefined;
643
+ }>;
644
+ category: z.ZodOptional<z.ZodString>;
645
+ keywords: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
646
+ }, "strip", z.ZodTypeAny, {
647
+ id: string;
648
+ type: string;
649
+ name: string;
650
+ description: string;
651
+ props: {
652
+ description?: string | undefined;
653
+ query?: string | {} | undefined;
654
+ title?: string | undefined;
655
+ config?: Record<string, unknown> | undefined;
656
+ actions?: any[] | undefined;
657
+ };
658
+ displayName?: string | undefined;
659
+ isDisplayComp?: boolean | undefined;
660
+ category?: string | undefined;
661
+ keywords?: string[] | undefined;
662
+ }, {
663
+ id: string;
664
+ type: string;
665
+ name: string;
666
+ description: string;
667
+ props: {
668
+ description?: string | undefined;
669
+ query?: string | {} | undefined;
670
+ title?: string | undefined;
671
+ config?: Record<string, unknown> | undefined;
672
+ actions?: any[] | undefined;
673
+ };
674
+ displayName?: string | undefined;
675
+ isDisplayComp?: boolean | undefined;
676
+ category?: string | undefined;
677
+ keywords?: string[] | undefined;
678
+ }>;
679
+ type Component = z.infer<typeof ComponentSchema>;
618
680
  declare const ToolSchema: z.ZodObject<{
619
681
  id: z.ZodString;
620
682
  name: z.ZodString;
@@ -640,6 +702,13 @@ type CollectionHandler<TParams = any, TResult = any> = (params: TParams) => Prom
640
702
  type LLMProvider = 'anthropic' | 'groq' | 'gemini' | 'openai';
641
703
 
642
704
  type DatabaseType = 'postgresql' | 'mssql' | 'snowflake' | 'mysql';
705
+ /**
706
+ * Model strategy for controlling which models are used for different tasks
707
+ * - 'best': Use the best model (e.g., Sonnet) for all tasks - highest quality, higher cost
708
+ * - 'fast': Use the fast model (e.g., Haiku) for all tasks - lower quality, lower cost
709
+ * - 'balanced': Use best model for complex tasks, fast model for simple tasks (default)
710
+ */
711
+ type ModelStrategy = 'best' | 'fast' | 'balanced';
643
712
  interface SuperatomSDKConfig {
644
713
  url?: string;
645
714
  apiKey?: string;
@@ -655,6 +724,13 @@ interface SuperatomSDKConfig {
655
724
  OPENAI_API_KEY?: string;
656
725
  LLM_PROVIDERS?: LLMProvider[];
657
726
  logLevel?: LogLevel;
727
+ /**
728
+ * Model selection strategy for LLM API calls:
729
+ * - 'best': Use best model for all tasks (highest quality, higher cost)
730
+ * - 'fast': Use fast model for all tasks (lower quality, lower cost)
731
+ * - 'balanced': Use best model for complex tasks, fast model for simple tasks (default)
732
+ */
733
+ modelStrategy?: ModelStrategy;
658
734
  }
659
735
 
660
736
  declare const KbNodesQueryFiltersSchema: z.ZodObject<{
@@ -785,6 +861,11 @@ declare const KbNodesRequestPayloadSchema: z.ZodObject<{
785
861
  } | undefined;
786
862
  }>;
787
863
  type KbNodesRequestPayload = z.infer<typeof KbNodesRequestPayloadSchema>;
864
+ interface T_RESPONSE {
865
+ success: boolean;
866
+ data?: any;
867
+ errors: string[];
868
+ }
788
869
 
789
870
  /**
790
871
  * UserManager class to handle CRUD operations on users with file persistence
@@ -1790,6 +1871,216 @@ declare function rerankConversationResults<T extends {
1790
1871
  bm25Score: number;
1791
1872
  }>;
1792
1873
 
1874
+ /**
1875
+ * Task types for model selection
1876
+ * - 'complex': Text generation, component matching, parameter adaptation (uses best model in balanced mode)
1877
+ * - 'simple': Classification, action generation (uses fast model in balanced mode)
1878
+ */
1879
+ type TaskType = 'complex' | 'simple';
1880
+ interface BaseLLMConfig {
1881
+ model?: string;
1882
+ fastModel?: string;
1883
+ defaultLimit?: number;
1884
+ apiKey?: string;
1885
+ /**
1886
+ * Model selection strategy:
1887
+ * - 'best': Use best model for all tasks (highest quality, higher cost)
1888
+ * - 'fast': Use fast model for all tasks (lower quality, lower cost)
1889
+ * - 'balanced': Use best model for complex tasks, fast model for simple tasks (default)
1890
+ */
1891
+ modelStrategy?: ModelStrategy;
1892
+ }
1893
+ /**
1894
+ * BaseLLM abstract class for AI-powered component generation and matching
1895
+ * Provides common functionality for all LLM providers
1896
+ */
1897
+ declare abstract class BaseLLM {
1898
+ protected model: string;
1899
+ protected fastModel: string;
1900
+ protected defaultLimit: number;
1901
+ protected apiKey?: string;
1902
+ protected modelStrategy: ModelStrategy;
1903
+ constructor(config?: BaseLLMConfig);
1904
+ /**
1905
+ * Get the appropriate model based on task type and model strategy
1906
+ * @param taskType - 'complex' for text generation/matching, 'simple' for classification/actions
1907
+ * @returns The model string to use for this task
1908
+ */
1909
+ protected getModelForTask(taskType: TaskType): string;
1910
+ /**
1911
+ * Set the model strategy at runtime
1912
+ * @param strategy - 'best', 'fast', or 'balanced'
1913
+ */
1914
+ setModelStrategy(strategy: ModelStrategy): void;
1915
+ /**
1916
+ * Get the current model strategy
1917
+ * @returns The current model strategy
1918
+ */
1919
+ getModelStrategy(): ModelStrategy;
1920
+ /**
1921
+ * Get the default model for this provider (used for complex tasks like text generation)
1922
+ */
1923
+ protected abstract getDefaultModel(): string;
1924
+ /**
1925
+ * Get the default fast model for this provider (used for simple tasks: classification, matching, actions)
1926
+ * Should return a cheaper/faster model like Haiku for Anthropic
1927
+ */
1928
+ protected abstract getDefaultFastModel(): string;
1929
+ /**
1930
+ * Get the default API key from environment
1931
+ */
1932
+ protected abstract getDefaultApiKey(): string | undefined;
1933
+ /**
1934
+ * Get the provider name (for logging)
1935
+ */
1936
+ protected abstract getProviderName(): string;
1937
+ /**
1938
+ * Get the API key (from instance, parameter, or environment)
1939
+ */
1940
+ protected getApiKey(apiKey?: string): string | undefined;
1941
+ /**
1942
+ * Check if a component contains a Form (data_modification component)
1943
+ * Forms have hardcoded defaultValues that become stale when cached
1944
+ * This checks both single Form components and Forms inside MultiComponentContainer
1945
+ */
1946
+ protected containsFormComponent(component: any): boolean;
1947
+ /**
1948
+ * Match components from text response suggestions and generate follow-up questions
1949
+ * Takes a text response with component suggestions (c1:type format) and matches with available components
1950
+ * Also generates title, description, and intelligent follow-up questions (actions) based on the analysis
1951
+ * All components are placed in a default MultiComponentContainer layout
1952
+ * @param analysisContent - The text response containing component suggestions
1953
+ * @param components - List of available components
1954
+ * @param apiKey - Optional API key
1955
+ * @param logCollector - Optional log collector
1956
+ * @param componentStreamCallback - Optional callback to stream primary KPI component as soon as it's identified
1957
+ * @returns Object containing matched components, layout title/description, and follow-up actions
1958
+ */
1959
+ matchComponentsFromAnalysis(analysisContent: string, components: Component[], apiKey?: string, logCollector?: any, componentStreamCallback?: (component: Component) => void, deferredTools?: any[], executedTools?: any[]): Promise<{
1960
+ components: Component[];
1961
+ layoutTitle: string;
1962
+ layoutDescription: string;
1963
+ actions: Action[];
1964
+ }>;
1965
+ /**
1966
+ * Classify user question into category and detect external tools needed
1967
+ * Determines if question is for data analysis, requires external tools, or needs text response
1968
+ */
1969
+ classifyQuestionCategory(userPrompt: string, apiKey?: string, logCollector?: any, conversationHistory?: string, externalTools?: any[]): Promise<{
1970
+ category: 'data_analysis' | 'data_modification' | 'general';
1971
+ externalTools: Array<{
1972
+ type: string;
1973
+ name: string;
1974
+ description: string;
1975
+ parameters: Record<string, any>;
1976
+ }>;
1977
+ dataAnalysisType?: 'visualization' | 'calculation' | 'comparison' | 'trend';
1978
+ reasoning: string;
1979
+ confidence: number;
1980
+ }>;
1981
+ /**
1982
+ * Adapt UI block parameters based on current user question
1983
+ * Takes a matched UI block from semantic search and modifies its props to answer the new question
1984
+ */
1985
+ adaptUIBlockParameters(currentUserPrompt: string, originalUserPrompt: string, matchedUIBlock: any, apiKey?: string, logCollector?: any): Promise<{
1986
+ success: boolean;
1987
+ adaptedComponent?: Component;
1988
+ parametersChanged?: Array<{
1989
+ field: string;
1990
+ reason: string;
1991
+ }>;
1992
+ explanation: string;
1993
+ }>;
1994
+ /**
1995
+ * Generate text-based response for user question
1996
+ * This provides conversational text responses instead of component generation
1997
+ * Supports tool calling for query execution with automatic retry on errors (max 3 attempts)
1998
+ * After generating text response, if components are provided, matches suggested components
1999
+ * @param streamCallback - Optional callback function to receive text chunks as they stream
2000
+ * @param collections - Collection registry for executing database queries via database.execute
2001
+ * @param components - Optional list of available components for matching suggestions
2002
+ * @param externalTools - Optional array of external tools (email, calendar, etc.) that can be called
2003
+ * @param category - Question category ('data_analysis' | 'data_modification' | 'general'). For data_modification, answer component streaming is skipped. For general, component generation is skipped entirely.
2004
+ */
2005
+ generateTextResponse(userPrompt: string, apiKey?: string, logCollector?: any, conversationHistory?: string, streamCallback?: (chunk: string) => void, collections?: any, components?: Component[], externalTools?: any[], category?: 'data_analysis' | 'data_modification' | 'general'): Promise<T_RESPONSE>;
2006
+ /**
2007
+ * Main orchestration function with semantic search and multi-step classification
2008
+ * NEW FLOW (Recommended):
2009
+ * 1. Semantic search: Check previous conversations (>60% match)
2010
+ * - If match found → Adapt UI block parameters and return
2011
+ * 2. Category classification: Determine if data_analysis, requires_external_tools, or text_response
2012
+ * 3. Route appropriately based on category and response mode
2013
+ *
2014
+ * @param responseMode - 'component' for component generation (default), 'text' for text responses
2015
+ * @param streamCallback - Optional callback function to receive text chunks as they stream (only for text mode)
2016
+ * @param collections - Collection registry for executing database queries (required for text mode)
2017
+ * @param externalTools - Optional array of external tools (email, calendar, etc.) that can be called (only for text mode)
2018
+ */
2019
+ handleUserRequest(userPrompt: string, components: Component[], apiKey?: string, logCollector?: any, conversationHistory?: string, responseMode?: 'component' | 'text', streamCallback?: (chunk: string) => void, collections?: any, externalTools?: any[], userId?: string): Promise<T_RESPONSE>;
2020
+ /**
2021
+ * Generate next questions that the user might ask based on the original prompt and generated component
2022
+ * This helps provide intelligent suggestions for follow-up queries
2023
+ * For general/conversational questions without components, pass textResponse instead
2024
+ */
2025
+ generateNextQuestions(originalUserPrompt: string, component?: Component | null, componentData?: Record<string, unknown>, apiKey?: string, logCollector?: any, conversationHistory?: string, textResponse?: string): Promise<string[]>;
2026
+ }
2027
+
2028
+ interface AnthropicLLMConfig extends BaseLLMConfig {
2029
+ }
2030
+ /**
2031
+ * AnthropicLLM class for handling AI-powered component generation and matching using Anthropic Claude
2032
+ */
2033
+ declare class AnthropicLLM extends BaseLLM {
2034
+ constructor(config?: AnthropicLLMConfig);
2035
+ protected getDefaultModel(): string;
2036
+ protected getDefaultFastModel(): string;
2037
+ protected getDefaultApiKey(): string | undefined;
2038
+ protected getProviderName(): string;
2039
+ }
2040
+ declare const anthropicLLM: AnthropicLLM;
2041
+
2042
+ interface GroqLLMConfig extends BaseLLMConfig {
2043
+ }
2044
+ /**
2045
+ * GroqLLM class for handling AI-powered component generation and matching using Groq
2046
+ */
2047
+ declare class GroqLLM extends BaseLLM {
2048
+ constructor(config?: GroqLLMConfig);
2049
+ protected getDefaultModel(): string;
2050
+ protected getDefaultFastModel(): string;
2051
+ protected getDefaultApiKey(): string | undefined;
2052
+ protected getProviderName(): string;
2053
+ }
2054
+ declare const groqLLM: GroqLLM;
2055
+
2056
+ interface GeminiLLMConfig extends BaseLLMConfig {
2057
+ }
2058
+ /**
2059
+ * GeminiLLM class for handling AI-powered component generation and matching using Google Gemini
2060
+ */
2061
+ declare class GeminiLLM extends BaseLLM {
2062
+ constructor(config?: GeminiLLMConfig);
2063
+ protected getDefaultModel(): string;
2064
+ protected getDefaultFastModel(): string;
2065
+ protected getDefaultApiKey(): string | undefined;
2066
+ protected getProviderName(): string;
2067
+ }
2068
+ declare const geminiLLM: GeminiLLM;
2069
+
2070
+ interface OpenAILLMConfig extends BaseLLMConfig {
2071
+ }
2072
+ /**
2073
+ * OpenAILLM class for handling AI-powered component generation and matching using OpenAI GPT models
2074
+ */
2075
+ declare class OpenAILLM extends BaseLLM {
2076
+ constructor(config?: OpenAILLMConfig);
2077
+ protected getDefaultModel(): string;
2078
+ protected getDefaultFastModel(): string;
2079
+ protected getDefaultApiKey(): string | undefined;
2080
+ protected getProviderName(): string;
2081
+ }
2082
+ declare const openaiLLM: OpenAILLM;
2083
+
1793
2084
  declare const SDK_VERSION = "0.0.8";
1794
2085
  type MessageTypeHandler = (message: IncomingMessage) => void | Promise<void>;
1795
2086
  declare class SuperatomSDK {
@@ -1814,6 +2105,7 @@ declare class SuperatomSDK {
1814
2105
  private openaiApiKey;
1815
2106
  private llmProviders;
1816
2107
  private databaseType;
2108
+ private modelStrategy;
1817
2109
  private userManager;
1818
2110
  private dashboardManager;
1819
2111
  private reportManager;
@@ -1910,6 +2202,20 @@ declare class SuperatomSDK {
1910
2202
  * Get the stored tools
1911
2203
  */
1912
2204
  getTools(): Tool$1[];
2205
+ /**
2206
+ * Apply model strategy to all LLM provider singletons
2207
+ * @param strategy - 'best', 'fast', or 'balanced'
2208
+ */
2209
+ private applyModelStrategy;
2210
+ /**
2211
+ * Set model strategy at runtime
2212
+ * @param strategy - 'best', 'fast', or 'balanced'
2213
+ */
2214
+ setModelStrategy(strategy: ModelStrategy): void;
2215
+ /**
2216
+ * Get current model strategy
2217
+ */
2218
+ getModelStrategy(): ModelStrategy;
1913
2219
  }
1914
2220
 
1915
- export { type Action, BM25L, type BM25LOptions, CONTEXT_CONFIG, type CapturedLog, CleanupService, type CollectionHandler, type CollectionOperation, type DBUIBlock, type DatabaseType, type HybridSearchOptions, type IncomingMessage, type KbNodesQueryFilters, type KbNodesRequestPayload, LLM, type LLMUsageEntry, type LogLevel, type Message, type RerankedResult, SDK_VERSION, STORAGE_CONFIG, SuperatomSDK, type SuperatomSDKConfig, Thread, ThreadManager, type Tool$1 as Tool, UIBlock, UILogCollector, type User, UserManager, type UsersData, hybridRerank, llmUsageLogger, logger, rerankChromaResults, rerankConversationResults, userPromptErrorLogger };
2221
+ export { type Action, BM25L, type BM25LOptions, type BaseLLMConfig, CONTEXT_CONFIG, type CapturedLog, CleanupService, type CollectionHandler, type CollectionOperation, type DBUIBlock, type DatabaseType, type HybridSearchOptions, type IncomingMessage, type KbNodesQueryFilters, type KbNodesRequestPayload, LLM, type LLMUsageEntry, type LogLevel, type Message, type ModelStrategy, type RerankedResult, SDK_VERSION, STORAGE_CONFIG, SuperatomSDK, type SuperatomSDKConfig, type TaskType, Thread, ThreadManager, type Tool$1 as Tool, UIBlock, UILogCollector, type User, UserManager, type UsersData, anthropicLLM, geminiLLM, groqLLM, hybridRerank, llmUsageLogger, logger, openaiLLM, rerankChromaResults, rerankConversationResults, userPromptErrorLogger };
package/dist/index.js CHANGED
@@ -1623,9 +1623,13 @@ __export(index_exports, {
1623
1623
  UIBlock: () => UIBlock,
1624
1624
  UILogCollector: () => UILogCollector,
1625
1625
  UserManager: () => UserManager,
1626
+ anthropicLLM: () => anthropicLLM,
1627
+ geminiLLM: () => geminiLLM,
1628
+ groqLLM: () => groqLLM,
1626
1629
  hybridRerank: () => hybridRerank,
1627
1630
  llmUsageLogger: () => llmUsageLogger,
1628
1631
  logger: () => logger,
1632
+ openaiLLM: () => openaiLLM,
1629
1633
  rerankChromaResults: () => rerankChromaResults,
1630
1634
  rerankConversationResults: () => rerankConversationResults,
1631
1635
  userPromptErrorLogger: () => userPromptErrorLogger
@@ -3432,30 +3436,47 @@ var import_jsonrepair = require("jsonrepair");
3432
3436
  var import_fs4 = __toESM(require("fs"));
3433
3437
  var import_path3 = __toESM(require("path"));
3434
3438
  var PRICING = {
3435
- // Anthropic
3436
- "claude-3-5-sonnet-20241022": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3439
+ // Anthropic (December 2025)
3440
+ "claude-opus-4-5": { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 },
3441
+ "claude-opus-4-5-20251101": { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 },
3437
3442
  "claude-sonnet-4-5": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3438
3443
  "claude-sonnet-4-5-20250929": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3439
- "claude-3-5-haiku-20241022": { input: 0.8, output: 4, cacheRead: 0.08, cacheWrite: 1 },
3440
- "claude-haiku-4-5-20251001": { input: 0.8, output: 4, cacheRead: 0.08, cacheWrite: 1 },
3444
+ "claude-haiku-4-5": { input: 1, output: 5, cacheRead: 0.1, cacheWrite: 1.25 },
3445
+ "claude-haiku-4-5-20251001": { input: 1, output: 5, cacheRead: 0.1, cacheWrite: 1.25 },
3446
+ "claude-3-5-sonnet-20241022": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3447
+ "claude-3-5-haiku-20241022": { input: 1, output: 5, cacheRead: 0.1, cacheWrite: 1.25 },
3441
3448
  "claude-3-opus-20240229": { input: 15, output: 75, cacheRead: 1.5, cacheWrite: 18.75 },
3442
3449
  "claude-3-sonnet-20240229": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3443
3450
  "claude-3-haiku-20240307": { input: 0.25, output: 1.25, cacheRead: 0.03, cacheWrite: 0.3 },
3444
- // OpenAI
3445
- "gpt-4o": { input: 2.5, output: 10 },
3451
+ // OpenAI (December 2025)
3452
+ "gpt-5": { input: 1.25, output: 10 },
3453
+ "gpt-5-mini": { input: 0.25, output: 2 },
3454
+ "gpt-4o": { input: 5, output: 15 },
3455
+ // Updated pricing as of late 2025
3446
3456
  "gpt-4o-mini": { input: 0.15, output: 0.6 },
3447
3457
  "gpt-4-turbo": { input: 10, output: 30 },
3448
3458
  "gpt-4": { input: 30, output: 60 },
3449
3459
  "gpt-3.5-turbo": { input: 0.5, output: 1.5 },
3450
- // Gemini
3460
+ // Google Gemini (December 2025)
3461
+ "gemini-3-pro": { input: 2, output: 8 },
3462
+ // New Gemini 3
3463
+ "gemini-2.5-pro": { input: 1.25, output: 10 },
3464
+ // For prompts ≤200K tokens, 2x for >200K
3465
+ "gemini-2.5-flash": { input: 0.15, output: 0.6 },
3466
+ // Standard mode (thinking disabled: $0.60, thinking enabled: $3.50)
3467
+ "gemini-2.5-flash-lite": { input: 0.1, output: 0.4 },
3468
+ "gemini-2.0-flash": { input: 0.1, output: 0.4 },
3469
+ "gemini-2.0-flash-lite": { input: 0.075, output: 0.3 },
3451
3470
  "gemini-1.5-pro": { input: 1.25, output: 5 },
3452
3471
  "gemini-1.5-flash": { input: 0.075, output: 0.3 },
3453
- "gemini-2.0-flash-exp": { input: 0.1, output: 0.4 },
3454
- // Groq (very cheap)
3472
+ // Groq (December 2025)
3455
3473
  "llama-3.3-70b-versatile": { input: 0.59, output: 0.79 },
3456
3474
  "llama-3.1-70b-versatile": { input: 0.59, output: 0.79 },
3457
3475
  "llama-3.1-8b-instant": { input: 0.05, output: 0.08 },
3458
- "mixtral-8x7b-32768": { input: 0.24, output: 0.24 }
3476
+ "llama-4-scout-17b-16e": { input: 0.11, output: 0.34 },
3477
+ "llama-4-maverick-17b-128e": { input: 0.2, output: 0.6 },
3478
+ "mixtral-8x7b-32768": { input: 0.27, output: 0.27 },
3479
+ "qwen3-32b": { input: 0.29, output: 0.59 }
3459
3480
  };
3460
3481
  var DEFAULT_PRICING = { input: 3, output: 15 };
3461
3482
  var LLMUsageLogger = class {
@@ -5382,6 +5403,38 @@ var BaseLLM = class {
5382
5403
  this.fastModel = config?.fastModel || this.getDefaultFastModel();
5383
5404
  this.defaultLimit = config?.defaultLimit || 50;
5384
5405
  this.apiKey = config?.apiKey;
5406
+ this.modelStrategy = config?.modelStrategy || "fast";
5407
+ }
5408
+ /**
5409
+ * Get the appropriate model based on task type and model strategy
5410
+ * @param taskType - 'complex' for text generation/matching, 'simple' for classification/actions
5411
+ * @returns The model string to use for this task
5412
+ */
5413
+ getModelForTask(taskType) {
5414
+ switch (this.modelStrategy) {
5415
+ case "best":
5416
+ return this.model;
5417
+ case "fast":
5418
+ return this.fastModel;
5419
+ case "balanced":
5420
+ default:
5421
+ return taskType === "complex" ? this.model : this.fastModel;
5422
+ }
5423
+ }
5424
+ /**
5425
+ * Set the model strategy at runtime
5426
+ * @param strategy - 'best', 'fast', or 'balanced'
5427
+ */
5428
+ setModelStrategy(strategy) {
5429
+ this.modelStrategy = strategy;
5430
+ logger.info(`[${this.getProviderName()}] Model strategy set to: ${strategy}`);
5431
+ }
5432
+ /**
5433
+ * Get the current model strategy
5434
+ * @returns The current model strategy
5435
+ */
5436
+ getModelStrategy() {
5437
+ return this.modelStrategy;
5385
5438
  }
5386
5439
  /**
5387
5440
  * Get the API key (from instance, parameter, or environment)
@@ -5566,7 +5619,7 @@ ${JSON.stringify(tool.requiredFields || [], null, 2)}`;
5566
5619
  user: prompts.user
5567
5620
  },
5568
5621
  {
5569
- model: this.model,
5622
+ model: this.getModelForTask("complex"),
5570
5623
  maxTokens: 8192,
5571
5624
  temperature: 0.2,
5572
5625
  apiKey: this.getApiKey(apiKey),
@@ -5689,7 +5742,7 @@ ${JSON.stringify(tool.requiredFields || [], null, 2)}`;
5689
5742
  user: prompts.user
5690
5743
  },
5691
5744
  {
5692
- model: this.fastModel,
5745
+ model: this.getModelForTask("simple"),
5693
5746
  maxTokens: 1500,
5694
5747
  temperature: 0.2,
5695
5748
  apiKey: this.getApiKey(apiKey)
@@ -5750,7 +5803,7 @@ ${JSON.stringify(tool.requiredFields || [], null, 2)}`;
5750
5803
  user: prompts.user
5751
5804
  },
5752
5805
  {
5753
- model: this.model,
5806
+ model: this.getModelForTask("complex"),
5754
5807
  maxTokens: 3e3,
5755
5808
  temperature: 0.2,
5756
5809
  apiKey: this.getApiKey(apiKey)
@@ -6240,7 +6293,7 @@ ${errorMsg}
6240
6293
  tools,
6241
6294
  toolHandler,
6242
6295
  {
6243
- model: this.model,
6296
+ model: this.getModelForTask("complex"),
6244
6297
  maxTokens: 4e3,
6245
6298
  temperature: 0.7,
6246
6299
  apiKey: this.getApiKey(apiKey),
@@ -6285,6 +6338,21 @@ ${errorMsg}
6285
6338
  if (category === "general") {
6286
6339
  logger.info(`[${this.getProviderName()}] Skipping component generation for general/conversational question`);
6287
6340
  logCollector?.info("Skipping component generation for general question");
6341
+ logger.info(`[${this.getProviderName()}] Generating actions for general question...`);
6342
+ const nextQuestions = await this.generateNextQuestions(
6343
+ userPrompt,
6344
+ null,
6345
+ // no component
6346
+ void 0,
6347
+ // no component data
6348
+ apiKey,
6349
+ logCollector,
6350
+ conversationHistory,
6351
+ textResponse
6352
+ // pass text response as context
6353
+ );
6354
+ actions = convertQuestionsToActions(nextQuestions);
6355
+ logger.info(`[${this.getProviderName()}] Generated ${actions.length} follow-up actions for general question`);
6288
6356
  } else if (components && components.length > 0) {
6289
6357
  logger.info(`[${this.getProviderName()}] Matching components from text response...`);
6290
6358
  const componentStreamCallback = wrappedStreamCallback && category !== "data_modification" ? (component) => {
@@ -6443,10 +6511,18 @@ ${errorMsg}
6443
6511
  logger.info(`[${this.getProviderName()}] \u2713 100% match - returning UI block directly without adaptation`);
6444
6512
  logCollector?.info(`\u2713 Exact match (${(conversationMatch.similarity * 100).toFixed(2)}%) - returning cached result`);
6445
6513
  logCollector?.info(`Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
6514
+ if (streamCallback && cachedTextResponse) {
6515
+ logger.info(`[${this.getProviderName()}] Streaming cached text response to frontend`);
6516
+ streamCallback(cachedTextResponse);
6517
+ }
6518
+ const cachedActions = conversationMatch.uiBlock?.actions || [];
6446
6519
  return {
6447
6520
  success: true,
6448
6521
  data: {
6522
+ text: cachedTextResponse,
6449
6523
  component,
6524
+ matchedComponents: component?.props?.config?.components || [],
6525
+ actions: cachedActions,
6450
6526
  reasoning: `Exact match from previous conversation (${(conversationMatch.similarity * 100).toFixed(2)}% similarity)`,
6451
6527
  method: `${this.getProviderName()}-semantic-match-exact`,
6452
6528
  semanticSimilarity: conversationMatch.similarity
@@ -6469,10 +6545,18 @@ ${errorMsg}
6469
6545
  logger.info(`[${this.getProviderName()}] Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
6470
6546
  logCollector?.info(`\u2713 UI block adapted successfully`);
6471
6547
  logCollector?.info(`Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
6548
+ if (streamCallback && cachedTextResponse) {
6549
+ logger.info(`[${this.getProviderName()}] Streaming cached text response to frontend (adapted match)`);
6550
+ streamCallback(cachedTextResponse);
6551
+ }
6552
+ const cachedActions = conversationMatch.uiBlock?.actions || [];
6472
6553
  return {
6473
6554
  success: true,
6474
6555
  data: {
6556
+ text: cachedTextResponse,
6475
6557
  component: adaptResult.adaptedComponent,
6558
+ matchedComponents: adaptResult.adaptedComponent?.props?.config?.components || [],
6559
+ actions: cachedActions,
6476
6560
  reasoning: `Adapted from previous conversation: ${originalPrompt}`,
6477
6561
  method: `${this.getProviderName()}-semantic-match`,
6478
6562
  semanticSimilarity: conversationMatch.similarity,
@@ -6585,15 +6669,26 @@ ${errorMsg}
6585
6669
  /**
6586
6670
  * Generate next questions that the user might ask based on the original prompt and generated component
6587
6671
  * This helps provide intelligent suggestions for follow-up queries
6672
+ * For general/conversational questions without components, pass textResponse instead
6588
6673
  */
6589
- async generateNextQuestions(originalUserPrompt, component, componentData, apiKey, logCollector, conversationHistory) {
6674
+ async generateNextQuestions(originalUserPrompt, component, componentData, apiKey, logCollector, conversationHistory, textResponse) {
6590
6675
  try {
6591
- const component_info = `
6676
+ let component_info;
6677
+ if (component) {
6678
+ component_info = `
6592
6679
  Component Name: ${component.name}
6593
6680
  Component Type: ${component.type}
6594
6681
  Component Description: ${component.description || "No description"}
6595
6682
  Component Props: ${component.props ? JSON.stringify(component.props, null, 2) : "No props"}
6596
6683
  `;
6684
+ } else if (textResponse) {
6685
+ component_info = `
6686
+ Response Type: Text/Conversational Response
6687
+ Response Content: ${textResponse.substring(0, 1e3)}${textResponse.length > 1e3 ? "..." : ""}
6688
+ `;
6689
+ } else {
6690
+ component_info = "No component or response context available";
6691
+ }
6597
6692
  const component_data = componentData ? `Component Data: ${JSON.stringify(componentData, null, 2)}` : "";
6598
6693
  const prompts = await promptLoader.loadPrompts("actions", {
6599
6694
  ORIGINAL_USER_PROMPT: originalUserPrompt,
@@ -6607,7 +6702,7 @@ ${errorMsg}
6607
6702
  user: prompts.user
6608
6703
  },
6609
6704
  {
6610
- model: this.fastModel,
6705
+ model: this.getModelForTask("simple"),
6611
6706
  maxTokens: 1200,
6612
6707
  temperature: 0.7,
6613
6708
  apiKey: this.getApiKey(apiKey)
@@ -11134,7 +11229,9 @@ var SuperatomSDK = class {
11134
11229
  this.openaiApiKey = config.OPENAI_API_KEY || process.env.OPENAI_API_KEY || "";
11135
11230
  this.llmProviders = config.LLM_PROVIDERS || getLLMProviders();
11136
11231
  this.databaseType = config.databaseType || "postgresql";
11137
- logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, database type: ${this.databaseType}`);
11232
+ this.modelStrategy = config.modelStrategy || "fast";
11233
+ this.applyModelStrategy(this.modelStrategy);
11234
+ logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, database type: ${this.databaseType}, model strategy: ${this.modelStrategy}`);
11138
11235
  this.userManager = new UserManager(this.projectId, 5e3);
11139
11236
  this.dashboardManager = new DashboardManager(this.projectId);
11140
11237
  this.reportManager = new ReportManager(this.projectId);
@@ -11513,6 +11610,31 @@ var SuperatomSDK = class {
11513
11610
  getTools() {
11514
11611
  return this.tools;
11515
11612
  }
11613
+ /**
11614
+ * Apply model strategy to all LLM provider singletons
11615
+ * @param strategy - 'best', 'fast', or 'balanced'
11616
+ */
11617
+ applyModelStrategy(strategy) {
11618
+ anthropicLLM.setModelStrategy(strategy);
11619
+ groqLLM.setModelStrategy(strategy);
11620
+ geminiLLM.setModelStrategy(strategy);
11621
+ openaiLLM.setModelStrategy(strategy);
11622
+ logger.info(`Model strategy '${strategy}' applied to all LLM providers`);
11623
+ }
11624
+ /**
11625
+ * Set model strategy at runtime
11626
+ * @param strategy - 'best', 'fast', or 'balanced'
11627
+ */
11628
+ setModelStrategy(strategy) {
11629
+ this.modelStrategy = strategy;
11630
+ this.applyModelStrategy(strategy);
11631
+ }
11632
+ /**
11633
+ * Get current model strategy
11634
+ */
11635
+ getModelStrategy() {
11636
+ return this.modelStrategy;
11637
+ }
11516
11638
  };
11517
11639
  // Annotate the CommonJS export names for ESM import in node:
11518
11640
  0 && (module.exports = {
@@ -11528,9 +11650,13 @@ var SuperatomSDK = class {
11528
11650
  UIBlock,
11529
11651
  UILogCollector,
11530
11652
  UserManager,
11653
+ anthropicLLM,
11654
+ geminiLLM,
11655
+ groqLLM,
11531
11656
  hybridRerank,
11532
11657
  llmUsageLogger,
11533
11658
  logger,
11659
+ openaiLLM,
11534
11660
  rerankChromaResults,
11535
11661
  rerankConversationResults,
11536
11662
  userPromptErrorLogger