@superatomai/sdk-node 0.0.71 → 0.0.74
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +942 -942
- package/dist/index.d.mts +94 -51
- package/dist/index.d.ts +94 -51
- package/dist/index.js +1666 -1384
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1676 -1393
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
|
@@ -2025,6 +2025,92 @@ declare function rerankConversationResults<T extends {
|
|
|
2025
2025
|
bm25Score: number;
|
|
2026
2026
|
}>;
|
|
2027
2027
|
|
|
2028
|
+
/**
|
|
2029
|
+
* QueryExecutionService - Handles all query execution, validation, and retry logic
|
|
2030
|
+
* Extracted from BaseLLM for better separation of concerns
|
|
2031
|
+
*/
|
|
2032
|
+
|
|
2033
|
+
/**
|
|
2034
|
+
* Context for component when requesting query fix
|
|
2035
|
+
*/
|
|
2036
|
+
interface ComponentContext {
|
|
2037
|
+
name: string;
|
|
2038
|
+
type: string;
|
|
2039
|
+
title?: string;
|
|
2040
|
+
}
|
|
2041
|
+
/**
|
|
2042
|
+
* Result of query validation
|
|
2043
|
+
*/
|
|
2044
|
+
interface QueryValidationResult {
|
|
2045
|
+
component: Component | null;
|
|
2046
|
+
queryKey: string;
|
|
2047
|
+
result: any;
|
|
2048
|
+
validated: boolean;
|
|
2049
|
+
}
|
|
2050
|
+
/**
|
|
2051
|
+
* Result of batch query validation
|
|
2052
|
+
*/
|
|
2053
|
+
interface BatchValidationResult {
|
|
2054
|
+
components: Component[];
|
|
2055
|
+
queryResults: Map<string, any>;
|
|
2056
|
+
}
|
|
2057
|
+
/**
|
|
2058
|
+
* Configuration for QueryExecutionService
|
|
2059
|
+
*/
|
|
2060
|
+
interface QueryExecutionServiceConfig {
|
|
2061
|
+
defaultLimit: number;
|
|
2062
|
+
getModelForTask: (taskType: 'simple' | 'complex') => string;
|
|
2063
|
+
getApiKey: (apiKey?: string) => string | undefined;
|
|
2064
|
+
providerName: string;
|
|
2065
|
+
}
|
|
2066
|
+
/**
|
|
2067
|
+
* QueryExecutionService handles all query-related operations
|
|
2068
|
+
*/
|
|
2069
|
+
declare class QueryExecutionService {
|
|
2070
|
+
private config;
|
|
2071
|
+
constructor(config: QueryExecutionServiceConfig);
|
|
2072
|
+
/**
|
|
2073
|
+
* Get the cache key for a query
|
|
2074
|
+
* This ensures the cache key matches what the frontend will send
|
|
2075
|
+
*/
|
|
2076
|
+
getQueryCacheKey(query: any): string;
|
|
2077
|
+
/**
|
|
2078
|
+
* Execute a query against the database
|
|
2079
|
+
* @param query - The SQL query to execute (string or object with sql/values)
|
|
2080
|
+
* @param collections - Collections object containing database execute function
|
|
2081
|
+
* @returns Object with result data and cache key
|
|
2082
|
+
*/
|
|
2083
|
+
executeQuery(query: any, collections: any): Promise<{
|
|
2084
|
+
result: any;
|
|
2085
|
+
cacheKey: string;
|
|
2086
|
+
}>;
|
|
2087
|
+
/**
|
|
2088
|
+
* Request the LLM to fix a failed SQL query
|
|
2089
|
+
* @param failedQuery - The query that failed execution
|
|
2090
|
+
* @param errorMessage - The error message from the failed execution
|
|
2091
|
+
* @param componentContext - Context about the component
|
|
2092
|
+
* @param apiKey - Optional API key
|
|
2093
|
+
* @returns Fixed query string
|
|
2094
|
+
*/
|
|
2095
|
+
requestQueryFix(failedQuery: string, errorMessage: string, componentContext: ComponentContext, apiKey?: string): Promise<string>;
|
|
2096
|
+
/**
|
|
2097
|
+
* Validate a single component's query with retry logic
|
|
2098
|
+
* @param component - The component to validate
|
|
2099
|
+
* @param collections - Collections object containing database execute function
|
|
2100
|
+
* @param apiKey - Optional API key for LLM calls
|
|
2101
|
+
* @returns Validation result with component, query key, and result
|
|
2102
|
+
*/
|
|
2103
|
+
validateSingleQuery(component: Component, collections: any, apiKey?: string): Promise<QueryValidationResult>;
|
|
2104
|
+
/**
|
|
2105
|
+
* Validate multiple component queries in parallel
|
|
2106
|
+
* @param components - Array of components with potential queries
|
|
2107
|
+
* @param collections - Collections object containing database execute function
|
|
2108
|
+
* @param apiKey - Optional API key for LLM calls
|
|
2109
|
+
* @returns Object with validated components and query results map
|
|
2110
|
+
*/
|
|
2111
|
+
validateComponentQueries(components: Component[], collections: any, apiKey?: string): Promise<BatchValidationResult>;
|
|
2112
|
+
}
|
|
2113
|
+
|
|
2028
2114
|
/**
|
|
2029
2115
|
* Task types for model selection
|
|
2030
2116
|
* - 'complex': Text generation, component matching, parameter adaptation (uses best model in balanced mode)
|
|
@@ -2056,6 +2142,7 @@ declare abstract class BaseLLM {
|
|
|
2056
2142
|
protected apiKey?: string;
|
|
2057
2143
|
protected modelStrategy: ModelStrategy;
|
|
2058
2144
|
protected conversationSimilarityThreshold: number;
|
|
2145
|
+
protected queryService: QueryExecutionService;
|
|
2059
2146
|
constructor(config?: BaseLLMConfig);
|
|
2060
2147
|
/**
|
|
2061
2148
|
* Get the appropriate model based on task type and model strategy
|
|
@@ -2110,29 +2197,6 @@ declare abstract class BaseLLM {
|
|
|
2110
2197
|
* This checks both single Form components and Forms inside MultiComponentContainer
|
|
2111
2198
|
*/
|
|
2112
2199
|
protected containsFormComponent(component: any): boolean;
|
|
2113
|
-
/**
|
|
2114
|
-
* Get the cache key for a query (the exact sql param that would be sent to execute)
|
|
2115
|
-
* This ensures the cache key matches what the frontend will send
|
|
2116
|
-
* Used for both caching and internal deduplication
|
|
2117
|
-
*/
|
|
2118
|
-
private getQueryCacheKey;
|
|
2119
|
-
/**
|
|
2120
|
-
* Execute a query against the database for validation and caching
|
|
2121
|
-
* @param query - The SQL query to execute (string or object with sql/values)
|
|
2122
|
-
* @param collections - Collections object containing database execute function
|
|
2123
|
-
* @returns Object with result data and cache key
|
|
2124
|
-
* @throws Error if query execution fails
|
|
2125
|
-
*/
|
|
2126
|
-
private executeQueryForValidation;
|
|
2127
|
-
/**
|
|
2128
|
-
* Request the LLM to fix a failed SQL query
|
|
2129
|
-
* @param failedQuery - The query that failed execution
|
|
2130
|
-
* @param errorMessage - The error message from the failed execution
|
|
2131
|
-
* @param componentContext - Context about the component (name, type, title)
|
|
2132
|
-
* @param apiKey - Optional API key
|
|
2133
|
-
* @returns Fixed query string
|
|
2134
|
-
*/
|
|
2135
|
-
private requestQueryFix;
|
|
2136
2200
|
/**
|
|
2137
2201
|
* Match components from text response suggestions and generate follow-up questions
|
|
2138
2202
|
* Takes a text response with component suggestions (c1:type format) and matches with available components
|
|
@@ -2141,40 +2205,20 @@ declare abstract class BaseLLM {
|
|
|
2141
2205
|
* @param analysisContent - The text response containing component suggestions
|
|
2142
2206
|
* @param components - List of available components
|
|
2143
2207
|
* @param apiKey - Optional API key
|
|
2144
|
-
* @param logCollector - Optional log collector
|
|
2145
2208
|
* @param componentStreamCallback - Optional callback to stream primary KPI component as soon as it's identified
|
|
2146
2209
|
* @returns Object containing matched components, layout title/description, and follow-up actions
|
|
2147
2210
|
*/
|
|
2148
|
-
matchComponentsFromAnalysis(analysisContent: string, components: Component[], userPrompt: string, apiKey?: string,
|
|
2211
|
+
matchComponentsFromAnalysis(analysisContent: string, components: Component[], userPrompt: string, apiKey?: string, componentStreamCallback?: (component: Component) => void, deferredTools?: any[], executedTools?: any[], collections?: any, userId?: string): Promise<{
|
|
2149
2212
|
components: Component[];
|
|
2150
2213
|
layoutTitle: string;
|
|
2151
2214
|
layoutDescription: string;
|
|
2152
2215
|
actions: Action[];
|
|
2153
2216
|
}>;
|
|
2154
|
-
/**
|
|
2155
|
-
* Validate a single component's query with retry logic
|
|
2156
|
-
* @param component - The component to validate
|
|
2157
|
-
* @param collections - Collections object containing database execute function
|
|
2158
|
-
* @param apiKey - Optional API key for LLM calls
|
|
2159
|
-
* @param logCollector - Optional log collector for logging
|
|
2160
|
-
* @returns Object with validated component (or null if failed) and query result
|
|
2161
|
-
*/
|
|
2162
|
-
private validateSingleComponentQuery;
|
|
2163
|
-
/**
|
|
2164
|
-
* Validate component queries against the database and retry with LLM fixes if they fail
|
|
2165
|
-
* Uses parallel execution for faster validation
|
|
2166
|
-
* @param components - Array of components with potential queries
|
|
2167
|
-
* @param collections - Collections object containing database execute function
|
|
2168
|
-
* @param apiKey - Optional API key for LLM calls
|
|
2169
|
-
* @param logCollector - Optional log collector for logging
|
|
2170
|
-
* @returns Object with validated components and a map of query results
|
|
2171
|
-
*/
|
|
2172
|
-
private validateAndRetryComponentQueries;
|
|
2173
2217
|
/**
|
|
2174
2218
|
* Classify user question into category and detect external tools needed
|
|
2175
2219
|
* Determines if question is for data analysis, requires external tools, or needs text response
|
|
2176
2220
|
*/
|
|
2177
|
-
classifyQuestionCategory(userPrompt: string, apiKey?: string,
|
|
2221
|
+
classifyQuestionCategory(userPrompt: string, apiKey?: string, conversationHistory?: string, externalTools?: any[]): Promise<{
|
|
2178
2222
|
category: 'data_analysis' | 'data_modification' | 'general';
|
|
2179
2223
|
externalTools: Array<{
|
|
2180
2224
|
type: string;
|
|
@@ -2191,7 +2235,7 @@ declare abstract class BaseLLM {
|
|
|
2191
2235
|
* Takes a matched UI block from semantic search and modifies its props to answer the new question
|
|
2192
2236
|
* Also adapts the cached text response to match the new question
|
|
2193
2237
|
*/
|
|
2194
|
-
adaptUIBlockParameters(currentUserPrompt: string, originalUserPrompt: string, matchedUIBlock: any, apiKey?: string,
|
|
2238
|
+
adaptUIBlockParameters(currentUserPrompt: string, originalUserPrompt: string, matchedUIBlock: any, apiKey?: string, cachedTextResponse?: string): Promise<{
|
|
2195
2239
|
success: boolean;
|
|
2196
2240
|
adaptedComponent?: Component;
|
|
2197
2241
|
adaptedTextResponse?: string;
|
|
@@ -2207,7 +2251,7 @@ declare abstract class BaseLLM {
|
|
|
2207
2251
|
* Supports tool calling for query execution with automatic retry on errors (max 3 attempts)
|
|
2208
2252
|
* After generating text response, if components are provided, matches suggested components
|
|
2209
2253
|
*/
|
|
2210
|
-
generateTextResponse(userPrompt: string, apiKey?: string,
|
|
2254
|
+
generateTextResponse(userPrompt: string, apiKey?: string, conversationHistory?: string, streamCallback?: (chunk: string) => void, collections?: any, components?: Component[], externalTools?: any[], category?: 'data_analysis' | 'data_modification' | 'general', userId?: string): Promise<T_RESPONSE>;
|
|
2211
2255
|
/**
|
|
2212
2256
|
* Main orchestration function with semantic search and multi-step classification
|
|
2213
2257
|
* NEW FLOW (Recommended):
|
|
@@ -2216,13 +2260,13 @@ declare abstract class BaseLLM {
|
|
|
2216
2260
|
* 2. Category classification: Determine if data_analysis, requires_external_tools, or text_response
|
|
2217
2261
|
* 3. Route appropriately based on category and response mode
|
|
2218
2262
|
*/
|
|
2219
|
-
handleUserRequest(userPrompt: string, components: Component[], apiKey?: string,
|
|
2263
|
+
handleUserRequest(userPrompt: string, components: Component[], apiKey?: string, conversationHistory?: string, responseMode?: 'component' | 'text', streamCallback?: (chunk: string) => void, collections?: any, externalTools?: any[], userId?: string): Promise<T_RESPONSE>;
|
|
2220
2264
|
/**
|
|
2221
2265
|
* Generate next questions that the user might ask based on the original prompt and generated component
|
|
2222
2266
|
* This helps provide intelligent suggestions for follow-up queries
|
|
2223
2267
|
* For general/conversational questions without components, pass textResponse instead
|
|
2224
2268
|
*/
|
|
2225
|
-
generateNextQuestions(originalUserPrompt: string, component?: Component | null, componentData?: Record<string, unknown>, apiKey?: string,
|
|
2269
|
+
generateNextQuestions(originalUserPrompt: string, component?: Component | null, componentData?: Record<string, unknown>, apiKey?: string, conversationHistory?: string, textResponse?: string): Promise<string[]>;
|
|
2226
2270
|
}
|
|
2227
2271
|
|
|
2228
2272
|
interface AnthropicLLMConfig extends BaseLLMConfig {
|
|
@@ -2338,7 +2382,6 @@ declare class QueryCache {
|
|
|
2338
2382
|
}
|
|
2339
2383
|
declare const queryCache: QueryCache;
|
|
2340
2384
|
|
|
2341
|
-
declare const SDK_VERSION = "0.0.8";
|
|
2342
2385
|
type MessageTypeHandler = (message: IncomingMessage) => void | Promise<void>;
|
|
2343
2386
|
declare class SuperatomSDK {
|
|
2344
2387
|
private ws;
|
|
@@ -2489,4 +2532,4 @@ declare class SuperatomSDK {
|
|
|
2489
2532
|
getConversationSimilarityThreshold(): number;
|
|
2490
2533
|
}
|
|
2491
2534
|
|
|
2492
|
-
export { type Action, BM25L, type BM25LOptions, type BaseLLMConfig, CONTEXT_CONFIG, type CapturedLog, CleanupService, type CollectionHandler, type CollectionOperation, type DBUIBlock, type DatabaseType, type HybridSearchOptions, type IncomingMessage, type KbNodesQueryFilters, type KbNodesRequestPayload, LLM, type LLMUsageEntry, type LogLevel, type Message, type ModelStrategy, type OutputField, type RerankedResult,
|
|
2535
|
+
export { type Action, BM25L, type BM25LOptions, type BaseLLMConfig, CONTEXT_CONFIG, type CapturedLog, CleanupService, type CollectionHandler, type CollectionOperation, type DBUIBlock, type DatabaseType, type HybridSearchOptions, type IncomingMessage, type KbNodesQueryFilters, type KbNodesRequestPayload, LLM, type LLMUsageEntry, type LogLevel, type Message, type ModelStrategy, type OutputField, type RerankedResult, STORAGE_CONFIG, SuperatomSDK, type SuperatomSDKConfig, type TaskType, Thread, ThreadManager, type Tool$1 as Tool, type ToolOutputSchema, UIBlock, UILogCollector, type User, UserManager, type UsersData, anthropicLLM, geminiLLM, groqLLM, hybridRerank, llmUsageLogger, logger, openaiLLM, queryCache, rerankChromaResults, rerankConversationResults, userPromptErrorLogger };
|
package/dist/index.d.ts
CHANGED
|
@@ -2025,6 +2025,92 @@ declare function rerankConversationResults<T extends {
|
|
|
2025
2025
|
bm25Score: number;
|
|
2026
2026
|
}>;
|
|
2027
2027
|
|
|
2028
|
+
/**
|
|
2029
|
+
* QueryExecutionService - Handles all query execution, validation, and retry logic
|
|
2030
|
+
* Extracted from BaseLLM for better separation of concerns
|
|
2031
|
+
*/
|
|
2032
|
+
|
|
2033
|
+
/**
|
|
2034
|
+
* Context for component when requesting query fix
|
|
2035
|
+
*/
|
|
2036
|
+
interface ComponentContext {
|
|
2037
|
+
name: string;
|
|
2038
|
+
type: string;
|
|
2039
|
+
title?: string;
|
|
2040
|
+
}
|
|
2041
|
+
/**
|
|
2042
|
+
* Result of query validation
|
|
2043
|
+
*/
|
|
2044
|
+
interface QueryValidationResult {
|
|
2045
|
+
component: Component | null;
|
|
2046
|
+
queryKey: string;
|
|
2047
|
+
result: any;
|
|
2048
|
+
validated: boolean;
|
|
2049
|
+
}
|
|
2050
|
+
/**
|
|
2051
|
+
* Result of batch query validation
|
|
2052
|
+
*/
|
|
2053
|
+
interface BatchValidationResult {
|
|
2054
|
+
components: Component[];
|
|
2055
|
+
queryResults: Map<string, any>;
|
|
2056
|
+
}
|
|
2057
|
+
/**
|
|
2058
|
+
* Configuration for QueryExecutionService
|
|
2059
|
+
*/
|
|
2060
|
+
interface QueryExecutionServiceConfig {
|
|
2061
|
+
defaultLimit: number;
|
|
2062
|
+
getModelForTask: (taskType: 'simple' | 'complex') => string;
|
|
2063
|
+
getApiKey: (apiKey?: string) => string | undefined;
|
|
2064
|
+
providerName: string;
|
|
2065
|
+
}
|
|
2066
|
+
/**
|
|
2067
|
+
* QueryExecutionService handles all query-related operations
|
|
2068
|
+
*/
|
|
2069
|
+
declare class QueryExecutionService {
|
|
2070
|
+
private config;
|
|
2071
|
+
constructor(config: QueryExecutionServiceConfig);
|
|
2072
|
+
/**
|
|
2073
|
+
* Get the cache key for a query
|
|
2074
|
+
* This ensures the cache key matches what the frontend will send
|
|
2075
|
+
*/
|
|
2076
|
+
getQueryCacheKey(query: any): string;
|
|
2077
|
+
/**
|
|
2078
|
+
* Execute a query against the database
|
|
2079
|
+
* @param query - The SQL query to execute (string or object with sql/values)
|
|
2080
|
+
* @param collections - Collections object containing database execute function
|
|
2081
|
+
* @returns Object with result data and cache key
|
|
2082
|
+
*/
|
|
2083
|
+
executeQuery(query: any, collections: any): Promise<{
|
|
2084
|
+
result: any;
|
|
2085
|
+
cacheKey: string;
|
|
2086
|
+
}>;
|
|
2087
|
+
/**
|
|
2088
|
+
* Request the LLM to fix a failed SQL query
|
|
2089
|
+
* @param failedQuery - The query that failed execution
|
|
2090
|
+
* @param errorMessage - The error message from the failed execution
|
|
2091
|
+
* @param componentContext - Context about the component
|
|
2092
|
+
* @param apiKey - Optional API key
|
|
2093
|
+
* @returns Fixed query string
|
|
2094
|
+
*/
|
|
2095
|
+
requestQueryFix(failedQuery: string, errorMessage: string, componentContext: ComponentContext, apiKey?: string): Promise<string>;
|
|
2096
|
+
/**
|
|
2097
|
+
* Validate a single component's query with retry logic
|
|
2098
|
+
* @param component - The component to validate
|
|
2099
|
+
* @param collections - Collections object containing database execute function
|
|
2100
|
+
* @param apiKey - Optional API key for LLM calls
|
|
2101
|
+
* @returns Validation result with component, query key, and result
|
|
2102
|
+
*/
|
|
2103
|
+
validateSingleQuery(component: Component, collections: any, apiKey?: string): Promise<QueryValidationResult>;
|
|
2104
|
+
/**
|
|
2105
|
+
* Validate multiple component queries in parallel
|
|
2106
|
+
* @param components - Array of components with potential queries
|
|
2107
|
+
* @param collections - Collections object containing database execute function
|
|
2108
|
+
* @param apiKey - Optional API key for LLM calls
|
|
2109
|
+
* @returns Object with validated components and query results map
|
|
2110
|
+
*/
|
|
2111
|
+
validateComponentQueries(components: Component[], collections: any, apiKey?: string): Promise<BatchValidationResult>;
|
|
2112
|
+
}
|
|
2113
|
+
|
|
2028
2114
|
/**
|
|
2029
2115
|
* Task types for model selection
|
|
2030
2116
|
* - 'complex': Text generation, component matching, parameter adaptation (uses best model in balanced mode)
|
|
@@ -2056,6 +2142,7 @@ declare abstract class BaseLLM {
|
|
|
2056
2142
|
protected apiKey?: string;
|
|
2057
2143
|
protected modelStrategy: ModelStrategy;
|
|
2058
2144
|
protected conversationSimilarityThreshold: number;
|
|
2145
|
+
protected queryService: QueryExecutionService;
|
|
2059
2146
|
constructor(config?: BaseLLMConfig);
|
|
2060
2147
|
/**
|
|
2061
2148
|
* Get the appropriate model based on task type and model strategy
|
|
@@ -2110,29 +2197,6 @@ declare abstract class BaseLLM {
|
|
|
2110
2197
|
* This checks both single Form components and Forms inside MultiComponentContainer
|
|
2111
2198
|
*/
|
|
2112
2199
|
protected containsFormComponent(component: any): boolean;
|
|
2113
|
-
/**
|
|
2114
|
-
* Get the cache key for a query (the exact sql param that would be sent to execute)
|
|
2115
|
-
* This ensures the cache key matches what the frontend will send
|
|
2116
|
-
* Used for both caching and internal deduplication
|
|
2117
|
-
*/
|
|
2118
|
-
private getQueryCacheKey;
|
|
2119
|
-
/**
|
|
2120
|
-
* Execute a query against the database for validation and caching
|
|
2121
|
-
* @param query - The SQL query to execute (string or object with sql/values)
|
|
2122
|
-
* @param collections - Collections object containing database execute function
|
|
2123
|
-
* @returns Object with result data and cache key
|
|
2124
|
-
* @throws Error if query execution fails
|
|
2125
|
-
*/
|
|
2126
|
-
private executeQueryForValidation;
|
|
2127
|
-
/**
|
|
2128
|
-
* Request the LLM to fix a failed SQL query
|
|
2129
|
-
* @param failedQuery - The query that failed execution
|
|
2130
|
-
* @param errorMessage - The error message from the failed execution
|
|
2131
|
-
* @param componentContext - Context about the component (name, type, title)
|
|
2132
|
-
* @param apiKey - Optional API key
|
|
2133
|
-
* @returns Fixed query string
|
|
2134
|
-
*/
|
|
2135
|
-
private requestQueryFix;
|
|
2136
2200
|
/**
|
|
2137
2201
|
* Match components from text response suggestions and generate follow-up questions
|
|
2138
2202
|
* Takes a text response with component suggestions (c1:type format) and matches with available components
|
|
@@ -2141,40 +2205,20 @@ declare abstract class BaseLLM {
|
|
|
2141
2205
|
* @param analysisContent - The text response containing component suggestions
|
|
2142
2206
|
* @param components - List of available components
|
|
2143
2207
|
* @param apiKey - Optional API key
|
|
2144
|
-
* @param logCollector - Optional log collector
|
|
2145
2208
|
* @param componentStreamCallback - Optional callback to stream primary KPI component as soon as it's identified
|
|
2146
2209
|
* @returns Object containing matched components, layout title/description, and follow-up actions
|
|
2147
2210
|
*/
|
|
2148
|
-
matchComponentsFromAnalysis(analysisContent: string, components: Component[], userPrompt: string, apiKey?: string,
|
|
2211
|
+
matchComponentsFromAnalysis(analysisContent: string, components: Component[], userPrompt: string, apiKey?: string, componentStreamCallback?: (component: Component) => void, deferredTools?: any[], executedTools?: any[], collections?: any, userId?: string): Promise<{
|
|
2149
2212
|
components: Component[];
|
|
2150
2213
|
layoutTitle: string;
|
|
2151
2214
|
layoutDescription: string;
|
|
2152
2215
|
actions: Action[];
|
|
2153
2216
|
}>;
|
|
2154
|
-
/**
|
|
2155
|
-
* Validate a single component's query with retry logic
|
|
2156
|
-
* @param component - The component to validate
|
|
2157
|
-
* @param collections - Collections object containing database execute function
|
|
2158
|
-
* @param apiKey - Optional API key for LLM calls
|
|
2159
|
-
* @param logCollector - Optional log collector for logging
|
|
2160
|
-
* @returns Object with validated component (or null if failed) and query result
|
|
2161
|
-
*/
|
|
2162
|
-
private validateSingleComponentQuery;
|
|
2163
|
-
/**
|
|
2164
|
-
* Validate component queries against the database and retry with LLM fixes if they fail
|
|
2165
|
-
* Uses parallel execution for faster validation
|
|
2166
|
-
* @param components - Array of components with potential queries
|
|
2167
|
-
* @param collections - Collections object containing database execute function
|
|
2168
|
-
* @param apiKey - Optional API key for LLM calls
|
|
2169
|
-
* @param logCollector - Optional log collector for logging
|
|
2170
|
-
* @returns Object with validated components and a map of query results
|
|
2171
|
-
*/
|
|
2172
|
-
private validateAndRetryComponentQueries;
|
|
2173
2217
|
/**
|
|
2174
2218
|
* Classify user question into category and detect external tools needed
|
|
2175
2219
|
* Determines if question is for data analysis, requires external tools, or needs text response
|
|
2176
2220
|
*/
|
|
2177
|
-
classifyQuestionCategory(userPrompt: string, apiKey?: string,
|
|
2221
|
+
classifyQuestionCategory(userPrompt: string, apiKey?: string, conversationHistory?: string, externalTools?: any[]): Promise<{
|
|
2178
2222
|
category: 'data_analysis' | 'data_modification' | 'general';
|
|
2179
2223
|
externalTools: Array<{
|
|
2180
2224
|
type: string;
|
|
@@ -2191,7 +2235,7 @@ declare abstract class BaseLLM {
|
|
|
2191
2235
|
* Takes a matched UI block from semantic search and modifies its props to answer the new question
|
|
2192
2236
|
* Also adapts the cached text response to match the new question
|
|
2193
2237
|
*/
|
|
2194
|
-
adaptUIBlockParameters(currentUserPrompt: string, originalUserPrompt: string, matchedUIBlock: any, apiKey?: string,
|
|
2238
|
+
adaptUIBlockParameters(currentUserPrompt: string, originalUserPrompt: string, matchedUIBlock: any, apiKey?: string, cachedTextResponse?: string): Promise<{
|
|
2195
2239
|
success: boolean;
|
|
2196
2240
|
adaptedComponent?: Component;
|
|
2197
2241
|
adaptedTextResponse?: string;
|
|
@@ -2207,7 +2251,7 @@ declare abstract class BaseLLM {
|
|
|
2207
2251
|
* Supports tool calling for query execution with automatic retry on errors (max 3 attempts)
|
|
2208
2252
|
* After generating text response, if components are provided, matches suggested components
|
|
2209
2253
|
*/
|
|
2210
|
-
generateTextResponse(userPrompt: string, apiKey?: string,
|
|
2254
|
+
generateTextResponse(userPrompt: string, apiKey?: string, conversationHistory?: string, streamCallback?: (chunk: string) => void, collections?: any, components?: Component[], externalTools?: any[], category?: 'data_analysis' | 'data_modification' | 'general', userId?: string): Promise<T_RESPONSE>;
|
|
2211
2255
|
/**
|
|
2212
2256
|
* Main orchestration function with semantic search and multi-step classification
|
|
2213
2257
|
* NEW FLOW (Recommended):
|
|
@@ -2216,13 +2260,13 @@ declare abstract class BaseLLM {
|
|
|
2216
2260
|
* 2. Category classification: Determine if data_analysis, requires_external_tools, or text_response
|
|
2217
2261
|
* 3. Route appropriately based on category and response mode
|
|
2218
2262
|
*/
|
|
2219
|
-
handleUserRequest(userPrompt: string, components: Component[], apiKey?: string,
|
|
2263
|
+
handleUserRequest(userPrompt: string, components: Component[], apiKey?: string, conversationHistory?: string, responseMode?: 'component' | 'text', streamCallback?: (chunk: string) => void, collections?: any, externalTools?: any[], userId?: string): Promise<T_RESPONSE>;
|
|
2220
2264
|
/**
|
|
2221
2265
|
* Generate next questions that the user might ask based on the original prompt and generated component
|
|
2222
2266
|
* This helps provide intelligent suggestions for follow-up queries
|
|
2223
2267
|
* For general/conversational questions without components, pass textResponse instead
|
|
2224
2268
|
*/
|
|
2225
|
-
generateNextQuestions(originalUserPrompt: string, component?: Component | null, componentData?: Record<string, unknown>, apiKey?: string,
|
|
2269
|
+
generateNextQuestions(originalUserPrompt: string, component?: Component | null, componentData?: Record<string, unknown>, apiKey?: string, conversationHistory?: string, textResponse?: string): Promise<string[]>;
|
|
2226
2270
|
}
|
|
2227
2271
|
|
|
2228
2272
|
interface AnthropicLLMConfig extends BaseLLMConfig {
|
|
@@ -2338,7 +2382,6 @@ declare class QueryCache {
|
|
|
2338
2382
|
}
|
|
2339
2383
|
declare const queryCache: QueryCache;
|
|
2340
2384
|
|
|
2341
|
-
declare const SDK_VERSION = "0.0.8";
|
|
2342
2385
|
type MessageTypeHandler = (message: IncomingMessage) => void | Promise<void>;
|
|
2343
2386
|
declare class SuperatomSDK {
|
|
2344
2387
|
private ws;
|
|
@@ -2489,4 +2532,4 @@ declare class SuperatomSDK {
|
|
|
2489
2532
|
getConversationSimilarityThreshold(): number;
|
|
2490
2533
|
}
|
|
2491
2534
|
|
|
2492
|
-
export { type Action, BM25L, type BM25LOptions, type BaseLLMConfig, CONTEXT_CONFIG, type CapturedLog, CleanupService, type CollectionHandler, type CollectionOperation, type DBUIBlock, type DatabaseType, type HybridSearchOptions, type IncomingMessage, type KbNodesQueryFilters, type KbNodesRequestPayload, LLM, type LLMUsageEntry, type LogLevel, type Message, type ModelStrategy, type OutputField, type RerankedResult,
|
|
2535
|
+
export { type Action, BM25L, type BM25LOptions, type BaseLLMConfig, CONTEXT_CONFIG, type CapturedLog, CleanupService, type CollectionHandler, type CollectionOperation, type DBUIBlock, type DatabaseType, type HybridSearchOptions, type IncomingMessage, type KbNodesQueryFilters, type KbNodesRequestPayload, LLM, type LLMUsageEntry, type LogLevel, type Message, type ModelStrategy, type OutputField, type RerankedResult, STORAGE_CONFIG, SuperatomSDK, type SuperatomSDKConfig, type TaskType, Thread, ThreadManager, type Tool$1 as Tool, type ToolOutputSchema, UIBlock, UILogCollector, type User, UserManager, type UsersData, anthropicLLM, geminiLLM, groqLLM, hybridRerank, llmUsageLogger, logger, openaiLLM, queryCache, rerankChromaResults, rerankConversationResults, userPromptErrorLogger };
|