@superatomai/sdk-node 0.0.72 → 0.0.74
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +9 -13
- package/dist/index.d.ts +9 -13
- package/dist/index.js +314 -591
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +314 -590
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
|
@@ -2098,19 +2098,17 @@ declare class QueryExecutionService {
|
|
|
2098
2098
|
* @param component - The component to validate
|
|
2099
2099
|
* @param collections - Collections object containing database execute function
|
|
2100
2100
|
* @param apiKey - Optional API key for LLM calls
|
|
2101
|
-
* @param logCollector - Optional log collector for logging
|
|
2102
2101
|
* @returns Validation result with component, query key, and result
|
|
2103
2102
|
*/
|
|
2104
|
-
validateSingleQuery(component: Component, collections: any, apiKey?: string
|
|
2103
|
+
validateSingleQuery(component: Component, collections: any, apiKey?: string): Promise<QueryValidationResult>;
|
|
2105
2104
|
/**
|
|
2106
2105
|
* Validate multiple component queries in parallel
|
|
2107
2106
|
* @param components - Array of components with potential queries
|
|
2108
2107
|
* @param collections - Collections object containing database execute function
|
|
2109
2108
|
* @param apiKey - Optional API key for LLM calls
|
|
2110
|
-
* @param logCollector - Optional log collector for logging
|
|
2111
2109
|
* @returns Object with validated components and query results map
|
|
2112
2110
|
*/
|
|
2113
|
-
validateComponentQueries(components: Component[], collections: any, apiKey?: string
|
|
2111
|
+
validateComponentQueries(components: Component[], collections: any, apiKey?: string): Promise<BatchValidationResult>;
|
|
2114
2112
|
}
|
|
2115
2113
|
|
|
2116
2114
|
/**
|
|
@@ -2207,11 +2205,10 @@ declare abstract class BaseLLM {
|
|
|
2207
2205
|
* @param analysisContent - The text response containing component suggestions
|
|
2208
2206
|
* @param components - List of available components
|
|
2209
2207
|
* @param apiKey - Optional API key
|
|
2210
|
-
* @param logCollector - Optional log collector
|
|
2211
2208
|
* @param componentStreamCallback - Optional callback to stream primary KPI component as soon as it's identified
|
|
2212
2209
|
* @returns Object containing matched components, layout title/description, and follow-up actions
|
|
2213
2210
|
*/
|
|
2214
|
-
matchComponentsFromAnalysis(analysisContent: string, components: Component[], userPrompt: string, apiKey?: string,
|
|
2211
|
+
matchComponentsFromAnalysis(analysisContent: string, components: Component[], userPrompt: string, apiKey?: string, componentStreamCallback?: (component: Component) => void, deferredTools?: any[], executedTools?: any[], collections?: any, userId?: string): Promise<{
|
|
2215
2212
|
components: Component[];
|
|
2216
2213
|
layoutTitle: string;
|
|
2217
2214
|
layoutDescription: string;
|
|
@@ -2221,7 +2218,7 @@ declare abstract class BaseLLM {
|
|
|
2221
2218
|
* Classify user question into category and detect external tools needed
|
|
2222
2219
|
* Determines if question is for data analysis, requires external tools, or needs text response
|
|
2223
2220
|
*/
|
|
2224
|
-
classifyQuestionCategory(userPrompt: string, apiKey?: string,
|
|
2221
|
+
classifyQuestionCategory(userPrompt: string, apiKey?: string, conversationHistory?: string, externalTools?: any[]): Promise<{
|
|
2225
2222
|
category: 'data_analysis' | 'data_modification' | 'general';
|
|
2226
2223
|
externalTools: Array<{
|
|
2227
2224
|
type: string;
|
|
@@ -2238,7 +2235,7 @@ declare abstract class BaseLLM {
|
|
|
2238
2235
|
* Takes a matched UI block from semantic search and modifies its props to answer the new question
|
|
2239
2236
|
* Also adapts the cached text response to match the new question
|
|
2240
2237
|
*/
|
|
2241
|
-
adaptUIBlockParameters(currentUserPrompt: string, originalUserPrompt: string, matchedUIBlock: any, apiKey?: string,
|
|
2238
|
+
adaptUIBlockParameters(currentUserPrompt: string, originalUserPrompt: string, matchedUIBlock: any, apiKey?: string, cachedTextResponse?: string): Promise<{
|
|
2242
2239
|
success: boolean;
|
|
2243
2240
|
adaptedComponent?: Component;
|
|
2244
2241
|
adaptedTextResponse?: string;
|
|
@@ -2254,7 +2251,7 @@ declare abstract class BaseLLM {
|
|
|
2254
2251
|
* Supports tool calling for query execution with automatic retry on errors (max 3 attempts)
|
|
2255
2252
|
* After generating text response, if components are provided, matches suggested components
|
|
2256
2253
|
*/
|
|
2257
|
-
generateTextResponse(userPrompt: string, apiKey?: string,
|
|
2254
|
+
generateTextResponse(userPrompt: string, apiKey?: string, conversationHistory?: string, streamCallback?: (chunk: string) => void, collections?: any, components?: Component[], externalTools?: any[], category?: 'data_analysis' | 'data_modification' | 'general', userId?: string): Promise<T_RESPONSE>;
|
|
2258
2255
|
/**
|
|
2259
2256
|
* Main orchestration function with semantic search and multi-step classification
|
|
2260
2257
|
* NEW FLOW (Recommended):
|
|
@@ -2263,13 +2260,13 @@ declare abstract class BaseLLM {
|
|
|
2263
2260
|
* 2. Category classification: Determine if data_analysis, requires_external_tools, or text_response
|
|
2264
2261
|
* 3. Route appropriately based on category and response mode
|
|
2265
2262
|
*/
|
|
2266
|
-
handleUserRequest(userPrompt: string, components: Component[], apiKey?: string,
|
|
2263
|
+
handleUserRequest(userPrompt: string, components: Component[], apiKey?: string, conversationHistory?: string, responseMode?: 'component' | 'text', streamCallback?: (chunk: string) => void, collections?: any, externalTools?: any[], userId?: string): Promise<T_RESPONSE>;
|
|
2267
2264
|
/**
|
|
2268
2265
|
* Generate next questions that the user might ask based on the original prompt and generated component
|
|
2269
2266
|
* This helps provide intelligent suggestions for follow-up queries
|
|
2270
2267
|
* For general/conversational questions without components, pass textResponse instead
|
|
2271
2268
|
*/
|
|
2272
|
-
generateNextQuestions(originalUserPrompt: string, component?: Component | null, componentData?: Record<string, unknown>, apiKey?: string,
|
|
2269
|
+
generateNextQuestions(originalUserPrompt: string, component?: Component | null, componentData?: Record<string, unknown>, apiKey?: string, conversationHistory?: string, textResponse?: string): Promise<string[]>;
|
|
2273
2270
|
}
|
|
2274
2271
|
|
|
2275
2272
|
interface AnthropicLLMConfig extends BaseLLMConfig {
|
|
@@ -2385,7 +2382,6 @@ declare class QueryCache {
|
|
|
2385
2382
|
}
|
|
2386
2383
|
declare const queryCache: QueryCache;
|
|
2387
2384
|
|
|
2388
|
-
declare const SDK_VERSION = "0.0.8";
|
|
2389
2385
|
type MessageTypeHandler = (message: IncomingMessage) => void | Promise<void>;
|
|
2390
2386
|
declare class SuperatomSDK {
|
|
2391
2387
|
private ws;
|
|
@@ -2536,4 +2532,4 @@ declare class SuperatomSDK {
|
|
|
2536
2532
|
getConversationSimilarityThreshold(): number;
|
|
2537
2533
|
}
|
|
2538
2534
|
|
|
2539
|
-
export { type Action, BM25L, type BM25LOptions, type BaseLLMConfig, CONTEXT_CONFIG, type CapturedLog, CleanupService, type CollectionHandler, type CollectionOperation, type DBUIBlock, type DatabaseType, type HybridSearchOptions, type IncomingMessage, type KbNodesQueryFilters, type KbNodesRequestPayload, LLM, type LLMUsageEntry, type LogLevel, type Message, type ModelStrategy, type OutputField, type RerankedResult,
|
|
2535
|
+
export { type Action, BM25L, type BM25LOptions, type BaseLLMConfig, CONTEXT_CONFIG, type CapturedLog, CleanupService, type CollectionHandler, type CollectionOperation, type DBUIBlock, type DatabaseType, type HybridSearchOptions, type IncomingMessage, type KbNodesQueryFilters, type KbNodesRequestPayload, LLM, type LLMUsageEntry, type LogLevel, type Message, type ModelStrategy, type OutputField, type RerankedResult, STORAGE_CONFIG, SuperatomSDK, type SuperatomSDKConfig, type TaskType, Thread, ThreadManager, type Tool$1 as Tool, type ToolOutputSchema, UIBlock, UILogCollector, type User, UserManager, type UsersData, anthropicLLM, geminiLLM, groqLLM, hybridRerank, llmUsageLogger, logger, openaiLLM, queryCache, rerankChromaResults, rerankConversationResults, userPromptErrorLogger };
|
package/dist/index.d.ts
CHANGED
|
@@ -2098,19 +2098,17 @@ declare class QueryExecutionService {
|
|
|
2098
2098
|
* @param component - The component to validate
|
|
2099
2099
|
* @param collections - Collections object containing database execute function
|
|
2100
2100
|
* @param apiKey - Optional API key for LLM calls
|
|
2101
|
-
* @param logCollector - Optional log collector for logging
|
|
2102
2101
|
* @returns Validation result with component, query key, and result
|
|
2103
2102
|
*/
|
|
2104
|
-
validateSingleQuery(component: Component, collections: any, apiKey?: string
|
|
2103
|
+
validateSingleQuery(component: Component, collections: any, apiKey?: string): Promise<QueryValidationResult>;
|
|
2105
2104
|
/**
|
|
2106
2105
|
* Validate multiple component queries in parallel
|
|
2107
2106
|
* @param components - Array of components with potential queries
|
|
2108
2107
|
* @param collections - Collections object containing database execute function
|
|
2109
2108
|
* @param apiKey - Optional API key for LLM calls
|
|
2110
|
-
* @param logCollector - Optional log collector for logging
|
|
2111
2109
|
* @returns Object with validated components and query results map
|
|
2112
2110
|
*/
|
|
2113
|
-
validateComponentQueries(components: Component[], collections: any, apiKey?: string
|
|
2111
|
+
validateComponentQueries(components: Component[], collections: any, apiKey?: string): Promise<BatchValidationResult>;
|
|
2114
2112
|
}
|
|
2115
2113
|
|
|
2116
2114
|
/**
|
|
@@ -2207,11 +2205,10 @@ declare abstract class BaseLLM {
|
|
|
2207
2205
|
* @param analysisContent - The text response containing component suggestions
|
|
2208
2206
|
* @param components - List of available components
|
|
2209
2207
|
* @param apiKey - Optional API key
|
|
2210
|
-
* @param logCollector - Optional log collector
|
|
2211
2208
|
* @param componentStreamCallback - Optional callback to stream primary KPI component as soon as it's identified
|
|
2212
2209
|
* @returns Object containing matched components, layout title/description, and follow-up actions
|
|
2213
2210
|
*/
|
|
2214
|
-
matchComponentsFromAnalysis(analysisContent: string, components: Component[], userPrompt: string, apiKey?: string,
|
|
2211
|
+
matchComponentsFromAnalysis(analysisContent: string, components: Component[], userPrompt: string, apiKey?: string, componentStreamCallback?: (component: Component) => void, deferredTools?: any[], executedTools?: any[], collections?: any, userId?: string): Promise<{
|
|
2215
2212
|
components: Component[];
|
|
2216
2213
|
layoutTitle: string;
|
|
2217
2214
|
layoutDescription: string;
|
|
@@ -2221,7 +2218,7 @@ declare abstract class BaseLLM {
|
|
|
2221
2218
|
* Classify user question into category and detect external tools needed
|
|
2222
2219
|
* Determines if question is for data analysis, requires external tools, or needs text response
|
|
2223
2220
|
*/
|
|
2224
|
-
classifyQuestionCategory(userPrompt: string, apiKey?: string,
|
|
2221
|
+
classifyQuestionCategory(userPrompt: string, apiKey?: string, conversationHistory?: string, externalTools?: any[]): Promise<{
|
|
2225
2222
|
category: 'data_analysis' | 'data_modification' | 'general';
|
|
2226
2223
|
externalTools: Array<{
|
|
2227
2224
|
type: string;
|
|
@@ -2238,7 +2235,7 @@ declare abstract class BaseLLM {
|
|
|
2238
2235
|
* Takes a matched UI block from semantic search and modifies its props to answer the new question
|
|
2239
2236
|
* Also adapts the cached text response to match the new question
|
|
2240
2237
|
*/
|
|
2241
|
-
adaptUIBlockParameters(currentUserPrompt: string, originalUserPrompt: string, matchedUIBlock: any, apiKey?: string,
|
|
2238
|
+
adaptUIBlockParameters(currentUserPrompt: string, originalUserPrompt: string, matchedUIBlock: any, apiKey?: string, cachedTextResponse?: string): Promise<{
|
|
2242
2239
|
success: boolean;
|
|
2243
2240
|
adaptedComponent?: Component;
|
|
2244
2241
|
adaptedTextResponse?: string;
|
|
@@ -2254,7 +2251,7 @@ declare abstract class BaseLLM {
|
|
|
2254
2251
|
* Supports tool calling for query execution with automatic retry on errors (max 3 attempts)
|
|
2255
2252
|
* After generating text response, if components are provided, matches suggested components
|
|
2256
2253
|
*/
|
|
2257
|
-
generateTextResponse(userPrompt: string, apiKey?: string,
|
|
2254
|
+
generateTextResponse(userPrompt: string, apiKey?: string, conversationHistory?: string, streamCallback?: (chunk: string) => void, collections?: any, components?: Component[], externalTools?: any[], category?: 'data_analysis' | 'data_modification' | 'general', userId?: string): Promise<T_RESPONSE>;
|
|
2258
2255
|
/**
|
|
2259
2256
|
* Main orchestration function with semantic search and multi-step classification
|
|
2260
2257
|
* NEW FLOW (Recommended):
|
|
@@ -2263,13 +2260,13 @@ declare abstract class BaseLLM {
|
|
|
2263
2260
|
* 2. Category classification: Determine if data_analysis, requires_external_tools, or text_response
|
|
2264
2261
|
* 3. Route appropriately based on category and response mode
|
|
2265
2262
|
*/
|
|
2266
|
-
handleUserRequest(userPrompt: string, components: Component[], apiKey?: string,
|
|
2263
|
+
handleUserRequest(userPrompt: string, components: Component[], apiKey?: string, conversationHistory?: string, responseMode?: 'component' | 'text', streamCallback?: (chunk: string) => void, collections?: any, externalTools?: any[], userId?: string): Promise<T_RESPONSE>;
|
|
2267
2264
|
/**
|
|
2268
2265
|
* Generate next questions that the user might ask based on the original prompt and generated component
|
|
2269
2266
|
* This helps provide intelligent suggestions for follow-up queries
|
|
2270
2267
|
* For general/conversational questions without components, pass textResponse instead
|
|
2271
2268
|
*/
|
|
2272
|
-
generateNextQuestions(originalUserPrompt: string, component?: Component | null, componentData?: Record<string, unknown>, apiKey?: string,
|
|
2269
|
+
generateNextQuestions(originalUserPrompt: string, component?: Component | null, componentData?: Record<string, unknown>, apiKey?: string, conversationHistory?: string, textResponse?: string): Promise<string[]>;
|
|
2273
2270
|
}
|
|
2274
2271
|
|
|
2275
2272
|
interface AnthropicLLMConfig extends BaseLLMConfig {
|
|
@@ -2385,7 +2382,6 @@ declare class QueryCache {
|
|
|
2385
2382
|
}
|
|
2386
2383
|
declare const queryCache: QueryCache;
|
|
2387
2384
|
|
|
2388
|
-
declare const SDK_VERSION = "0.0.8";
|
|
2389
2385
|
type MessageTypeHandler = (message: IncomingMessage) => void | Promise<void>;
|
|
2390
2386
|
declare class SuperatomSDK {
|
|
2391
2387
|
private ws;
|
|
@@ -2536,4 +2532,4 @@ declare class SuperatomSDK {
|
|
|
2536
2532
|
getConversationSimilarityThreshold(): number;
|
|
2537
2533
|
}
|
|
2538
2534
|
|
|
2539
|
-
export { type Action, BM25L, type BM25LOptions, type BaseLLMConfig, CONTEXT_CONFIG, type CapturedLog, CleanupService, type CollectionHandler, type CollectionOperation, type DBUIBlock, type DatabaseType, type HybridSearchOptions, type IncomingMessage, type KbNodesQueryFilters, type KbNodesRequestPayload, LLM, type LLMUsageEntry, type LogLevel, type Message, type ModelStrategy, type OutputField, type RerankedResult,
|
|
2535
|
+
export { type Action, BM25L, type BM25LOptions, type BaseLLMConfig, CONTEXT_CONFIG, type CapturedLog, CleanupService, type CollectionHandler, type CollectionOperation, type DBUIBlock, type DatabaseType, type HybridSearchOptions, type IncomingMessage, type KbNodesQueryFilters, type KbNodesRequestPayload, LLM, type LLMUsageEntry, type LogLevel, type Message, type ModelStrategy, type OutputField, type RerankedResult, STORAGE_CONFIG, SuperatomSDK, type SuperatomSDKConfig, type TaskType, Thread, ThreadManager, type Tool$1 as Tool, type ToolOutputSchema, UIBlock, UILogCollector, type User, UserManager, type UsersData, anthropicLLM, geminiLLM, groqLLM, hybridRerank, llmUsageLogger, logger, openaiLLM, queryCache, rerankChromaResults, rerankConversationResults, userPromptErrorLogger };
|