@superatomai/sdk-node 0.0.5 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -5,13 +5,17 @@ A TypeScript/Node.js SDK for building AI-powered data applications with Superato
5
5
  ## Features
6
6
 
7
7
  - **WebSocket Communication** - Real-time bidirectional messaging with automatic reconnection
8
+ - **AI-Powered Component Generation** - Automatically generate dashboard components and visualizations from user questions
9
+ - **Intelligent Text Responses** - LLM-powered conversational responses with SQL query execution and retry logic
10
+ - **Component Matching & Classification** - Smart component selection based on question type and visualization needs
8
11
  - **Collection Handlers** - Register custom data operation handlers (CRUD, queries, mutations)
9
12
  - **User Management** - Built-in authentication and user storage with file-based persistence
10
13
  - **Dashboard & Report Management** - Create and manage dashboards and reports with DSL-based rendering
11
- - **LLM Integration** - Unified interface for Anthropic Claude and Groq models with streaming support
14
+ - **Multi-Provider LLM Integration** - Unified interface for Anthropic Claude and Groq models with automatic fallback
12
15
  - **Thread & UI Block Management** - Organize conversations and UI components with automatic cleanup
13
- - **Log Collection** - Capture and send runtime logs to the UI
14
- - **Prompt Loader** - Load custom prompts from the file system
16
+ - **Log Collection** - Capture and send runtime logs to the UI with configurable log levels
17
+ - **Prompt Loader** - Load and cache custom prompts from the file system with template variable support
18
+ - **Database Schema Management** - Automatic schema documentation generation for LLM context
15
19
  - **Cleanup Service** - Automatic memory management and old data removal
16
20
  - **TypeScript First** - Full type safety with comprehensive type definitions
17
21
 
@@ -331,7 +335,216 @@ const jsonResult = await LLM.stream(
331
335
  - With provider: `anthropic/model-name` or `groq/model-name`
332
336
  - Without provider: Defaults to Anthropic
333
337
 
334
- ### 7. Thread & UI Block Management
338
+ ### 7. AI-Powered User Response System
339
+
340
+ The SDK includes a powerful AI system for generating intelligent responses to user questions with two modes: **component generation** and **text responses**.
341
+
342
+ #### Component Generation Mode
343
+
344
+ Automatically match and generate dashboard components based on user questions:
345
+
346
+ ```typescript
347
+ import { get_user_response } from '@superatomai/sdk-node/userResponse';
348
+
349
+ const components = [
350
+ { id: 'sales-chart', name: 'SalesChart', type: 'BarChart', /* ... */ },
351
+ { id: 'kpi-card', name: 'RevenueKPI', type: 'KPICard', /* ... */ },
352
+ // ... more components
353
+ ];
354
+
355
+ const result = await get_user_response(
356
+ 'Show me sales by region',
357
+ components,
358
+ anthropicApiKey,
359
+ groqApiKey,
360
+ ['anthropic', 'groq'], // Provider fallback order
361
+ logCollector,
362
+ conversationHistory,
363
+ 'component' // Component generation mode
364
+ );
365
+
366
+ if (result.success) {
367
+ console.log('Generated component:', result.data.component);
368
+ console.log('Reasoning:', result.data.reasoning);
369
+ }
370
+ ```
371
+
372
+ **Features:**
373
+ - **Question Classification** - Automatically determines question type (analytical, data_modification, general)
374
+ - **Visualization Type Detection** - Identifies required visualization types (charts, tables, KPIs)
375
+ - **Multi-Component Dashboards** - Generates multiple components for comprehensive analysis
376
+ - **Props Modification** - Intelligently modifies component props including SQL queries
377
+ - **SQL Query Validation** - Ensures queries have proper LIMIT clauses and fixes scalar subqueries
378
+
379
+ #### Text Response Mode
380
+
381
+ Generate conversational text responses with SQL query execution:
382
+
383
+ ```typescript
384
+ const result = await get_user_response(
385
+ 'What were the top 5 selling products last month?',
386
+ components,
387
+ anthropicApiKey,
388
+ groqApiKey,
389
+ ['anthropic', 'groq'],
390
+ logCollector,
391
+ conversationHistory,
392
+ 'text', // Text response mode
393
+ (chunk) => {
394
+ // Stream text chunks in real-time
395
+ process.stdout.write(chunk);
396
+ },
397
+ collections // Required for query execution
398
+ );
399
+
400
+ if (result.success) {
401
+ console.log('Text response:', result.data.text);
402
+ console.log('Matched components:', result.data.matchedComponents);
403
+ console.log('Container component:', result.data.component);
404
+ }
405
+ ```
406
+
407
+ **Features:**
408
+ - **SQL Query Execution** - Automatically generates and executes SQL queries via tool calling
409
+ - **Automatic Retry Logic** - Up to 6 retry attempts with query correction on errors
410
+ - **Streaming Responses** - Real-time text streaming with query execution status updates
411
+ - **Component Suggestions** - Parses component suggestions from text and matches with available components
412
+ - **Layout Discovery** - Automatically selects appropriate dashboard layouts based on component metadata
413
+ - **Performance Tracking** - Measures and logs total time taken for request processing
414
+
415
+ #### Using BaseLLM Classes Directly
416
+
417
+ For more control, use the BaseLLM implementations directly:
418
+
419
+ ```typescript
420
+ import { anthropicLLM, groqLLM } from '@superatomai/sdk-node/userResponse';
421
+
422
+ // Classify user question
423
+ const classification = await anthropicLLM.classifyUserQuestion(
424
+ 'Show me sales trends',
425
+ apiKey,
426
+ logCollector,
427
+ conversationHistory
428
+ );
429
+
430
+ console.log('Question type:', classification.questionType);
431
+ console.log('Visualizations needed:', classification.visualizations);
432
+
433
+ // Generate analytical component
434
+ const result = await anthropicLLM.generateAnalyticalComponent(
435
+ 'Show me sales by region',
436
+ components,
437
+ 'BarChart', // Preferred visualization type
438
+ apiKey,
439
+ logCollector,
440
+ conversationHistory
441
+ );
442
+
443
+ // Match existing component
444
+ const matchResult = await anthropicLLM.matchComponent(
445
+ 'Update the sales dashboard',
446
+ components,
447
+ apiKey,
448
+ logCollector,
449
+ conversationHistory
450
+ );
451
+
452
+ // Generate next questions
453
+ const nextQuestions = await anthropicLLM.generateNextQuestions(
454
+ originalPrompt,
455
+ generatedComponent,
456
+ componentData,
457
+ apiKey,
458
+ logCollector,
459
+ conversationHistory
460
+ );
461
+ ```
462
+
463
+ **BaseLLM Methods:**
464
+ - `handleUserRequest()` - Main orchestration method (supports both component and text modes)
465
+ - `classifyUserQuestion()` - Classify question type and identify visualizations
466
+ - `generateAnalyticalComponent()` - Generate single analytical component
467
+ - `generateMultipleAnalyticalComponents()` - Generate multiple components in parallel
468
+ - `matchComponent()` - Match and modify existing component
469
+ - `validateAndModifyProps()` - Validate and modify component props
470
+ - `generateTextResponse()` - Generate text with tool calling support
471
+ - `matchComponentsFromTextResponse()` - Match components from text suggestions
472
+ - `generateNextQuestions()` - Generate follow-up question suggestions
473
+
474
+ ### 8. Prompt Loader System
475
+
476
+ The SDK includes a sophisticated prompt loading system with caching and template variable support:
477
+
478
+ ```typescript
479
+ import { promptLoader } from '@superatomai/sdk-node/userResponse';
480
+
481
+ // Initialize with custom directory (default: .prompts)
482
+ const sdk = new SuperatomSDK({
483
+ apiKey: 'your-api-key',
484
+ projectId: 'your-project-id',
485
+ promptsDir: './my-custom-prompts',
486
+ });
487
+
488
+ // Prompts are automatically loaded and cached on initialization
489
+ // Access prompt cache size
490
+ const cacheSize = promptLoader.getCacheSize();
491
+ console.log(`Loaded ${cacheSize} prompts`);
492
+
493
+ // Load specific prompts with variables
494
+ const prompts = await promptLoader.loadPrompts('classify', {
495
+ USER_PROMPT: userQuestion,
496
+ CONVERSATION_HISTORY: history || 'No previous conversation'
497
+ });
498
+
499
+ console.log('System prompt:', prompts.system);
500
+ console.log('User prompt:', prompts.user);
501
+ ```
502
+
503
+ **Prompt Directory Structure:**
504
+ ```
505
+ .prompts/
506
+ ├── classify/
507
+ │ ├── system.md # System prompt for classification
508
+ │ └── user.md # User prompt template
509
+ ├── match-component/
510
+ │ ├── system.md
511
+ │ └── user.md
512
+ ├── modify-props/
513
+ │ ├── system.md
514
+ │ └── user.md
515
+ ├── text-response/
516
+ │ ├── system.md
517
+ │ └── user.md
518
+ └── match-text-components/
519
+ ├── system.md
520
+ └── user.md
521
+ ```
522
+
523
+ **Template Variables:**
524
+ Variables in prompts are replaced using the `{{VARIABLE_NAME}}` syntax:
525
+
526
+ ```markdown
527
+ # system.md
528
+ You are analyzing this question: {{USER_PROMPT}}
529
+
530
+ Previous conversation:
531
+ {{CONVERSATION_HISTORY}}
532
+
533
+ Available components:
534
+ {{AVAILABLE_COMPONENTS}}
535
+ ```
536
+
537
+ **Built-in Prompt Types:**
538
+ - `classify` - Question classification and visualization type detection
539
+ - `match-component` - Component matching and selection
540
+ - `modify-props` - Props validation and modification
541
+ - `single-component` - Single analytical component generation
542
+ - `text-response` - Text response with tool calling
543
+ - `match-text-components` - Component matching from text suggestions
544
+ - `container-metadata` - Container title and description generation
545
+ - `actions` - Next question generation
546
+
547
+ ### 9. Thread & UI Block Management
335
548
 
336
549
  Organize conversations and UI components:
337
550
 
@@ -381,7 +594,7 @@ threadManager.deleteThread(thread.getId());
381
594
  - `form` - Interactive forms
382
595
  - Custom types as needed
383
596
 
384
- ### 8. Logging System
597
+ ### 10. Logging System
385
598
 
386
599
  The SDK includes a comprehensive logging system with environment-based log levels for both terminal and UI log collection.
387
600
 
@@ -466,7 +679,7 @@ const logs = collector.getLogs();
466
679
 
467
680
  **Note:** The log level affects both terminal output and UI log collection, ensuring consistent logging behavior across your application.
468
681
 
469
- ### 9. Cleanup Service
682
+ ### 11. Cleanup Service
470
683
 
471
684
  Automatic memory management for threads and UI blocks:
472
685
 
@@ -507,31 +720,6 @@ STORAGE_CONFIG.UIBLOCK_RETENTION_DAYS = 14;
507
720
  STORAGE_CONFIG.MAX_COMPONENT_DATA_SIZE = 200 * 1024;
508
721
  ```
509
722
 
510
- ### 10. Prompt Loader
511
-
512
- Load custom prompts from the file system:
513
-
514
- ```typescript
515
- // Prompts are automatically loaded from .prompts/ directory
516
- // Or specify custom directory in config:
517
- const sdk = new SuperatomSDK({
518
- apiKey: 'your-api-key',
519
- projectId: 'your-project-id',
520
- promptsDir: './my-prompts',
521
- });
522
-
523
- // Prompts are loaded and cached on initialization
524
- // They can be accessed by handlers internally
525
- ```
526
-
527
- **Prompt File Structure:**
528
- ```
529
- .prompts/
530
- ├── system.txt # System prompts
531
- ├── user-greeting.txt # User greeting prompts
532
- └── analysis.txt # Analysis prompts
533
- ```
534
-
535
723
  ## API Reference
536
724
 
537
725
  ### SuperatomSDK
@@ -678,9 +866,35 @@ import type {
678
866
  IncomingMessage,
679
867
  SuperatomSDKConfig,
680
868
  CollectionHandler,
869
+ CollectionOperation,
681
870
  User,
682
- DSLRendererProps,
871
+ UsersData,
872
+ Component,
873
+ T_RESPONSE,
874
+ LLMProvider,
875
+ LogLevel,
876
+ CapturedLog,
877
+ Action,
878
+ } from '@superatomai/sdk-node';
879
+
880
+ // Import LLM and utility classes
881
+ import {
882
+ LLM,
883
+ UserManager,
884
+ UILogCollector,
885
+ Thread,
886
+ UIBlock,
887
+ ThreadManager,
888
+ CleanupService,
889
+ logger,
683
890
  } from '@superatomai/sdk-node';
891
+
892
+ // Import user response utilities
893
+ import {
894
+ get_user_response,
895
+ anthropicLLM,
896
+ groqLLM,
897
+ } from '@superatomai/sdk-node/userResponse';
684
898
  ```
685
899
 
686
900
  ## Examples
@@ -689,9 +903,13 @@ Check out the [examples](./examples) directory for complete examples:
689
903
  - Basic WebSocket communication
690
904
  - Data collection handlers
691
905
  - User authentication flow
692
- - LLM integration
906
+ - LLM integration with streaming
907
+ - AI-powered component generation
908
+ - Text response with query execution
909
+ - Component matching and classification
693
910
  - Thread management
694
911
  - Dashboard creation
912
+ - Prompt customization
695
913
 
696
914
  ## Development
697
915
 
package/dist/index.d.mts CHANGED
@@ -47,6 +47,7 @@ declare class Logger {
47
47
  * Log debug message (only shown for verbose level)
48
48
  */
49
49
  debug(...args: any[]): void;
50
+ file(...args: any[]): void;
50
51
  }
51
52
  declare const logger: Logger;
52
53
 
@@ -831,9 +832,19 @@ interface LLMOptions {
831
832
  apiKey?: string;
832
833
  partial?: (chunk: string) => void;
833
834
  }
835
+ interface Tool {
836
+ name: string;
837
+ description: string;
838
+ input_schema: {
839
+ type: string;
840
+ properties: Record<string, any>;
841
+ required?: string[];
842
+ };
843
+ }
834
844
  declare class LLM {
835
845
  static text(messages: LLMMessages, options?: LLMOptions): Promise<string>;
836
846
  static stream<T = string>(messages: LLMMessages, options?: LLMOptions, json?: boolean): Promise<T extends string ? string : any>;
847
+ static streamWithTools(messages: LLMMessages, tools: Tool[], toolHandler: (toolName: string, toolInput: any) => Promise<any>, options?: LLMOptions, maxIterations?: number): Promise<string>;
837
848
  /**
838
849
  * Parse model string to extract provider and model name
839
850
  * @param modelString - Format: "provider/model-name" or just "model-name"
@@ -847,6 +858,7 @@ declare class LLM {
847
858
  private static _parseModel;
848
859
  private static _anthropicText;
849
860
  private static _anthropicStream;
861
+ private static _anthropicStreamWithTools;
850
862
  private static _groqText;
851
863
  private static _groqStream;
852
864
  /**
@@ -985,6 +997,7 @@ declare class UIBlock {
985
997
  * Get component metadata
986
998
  */
987
999
  getComponentMetadata(): Record<string, any>;
1000
+ getTextResponse(): string;
988
1001
  /**
989
1002
  * Set or update component metadata
990
1003
  */
@@ -1013,10 +1026,6 @@ declare class UIBlock {
1013
1026
  * Set or update component data with size and row limits
1014
1027
  */
1015
1028
  setComponentData(data: Record<string, any>): void;
1016
- /**
1017
- * Get text response
1018
- */
1019
- getTextResponse(): string | null;
1020
1029
  /**
1021
1030
  * Set or update text response
1022
1031
  */
@@ -1033,6 +1042,10 @@ declare class UIBlock {
1033
1042
  * @returns Promise resolving to Action[]
1034
1043
  */
1035
1044
  getOrFetchActions(generateFn: () => Promise<Action[]>): Promise<Action[]>;
1045
+ /**
1046
+ * Set or replace all actions
1047
+ */
1048
+ setActions(actions: Action[]): void;
1036
1049
  /**
1037
1050
  * Add a single action (only if actions are resolved)
1038
1051
  */
package/dist/index.d.ts CHANGED
@@ -47,6 +47,7 @@ declare class Logger {
47
47
  * Log debug message (only shown for verbose level)
48
48
  */
49
49
  debug(...args: any[]): void;
50
+ file(...args: any[]): void;
50
51
  }
51
52
  declare const logger: Logger;
52
53
 
@@ -831,9 +832,19 @@ interface LLMOptions {
831
832
  apiKey?: string;
832
833
  partial?: (chunk: string) => void;
833
834
  }
835
+ interface Tool {
836
+ name: string;
837
+ description: string;
838
+ input_schema: {
839
+ type: string;
840
+ properties: Record<string, any>;
841
+ required?: string[];
842
+ };
843
+ }
834
844
  declare class LLM {
835
845
  static text(messages: LLMMessages, options?: LLMOptions): Promise<string>;
836
846
  static stream<T = string>(messages: LLMMessages, options?: LLMOptions, json?: boolean): Promise<T extends string ? string : any>;
847
+ static streamWithTools(messages: LLMMessages, tools: Tool[], toolHandler: (toolName: string, toolInput: any) => Promise<any>, options?: LLMOptions, maxIterations?: number): Promise<string>;
837
848
  /**
838
849
  * Parse model string to extract provider and model name
839
850
  * @param modelString - Format: "provider/model-name" or just "model-name"
@@ -847,6 +858,7 @@ declare class LLM {
847
858
  private static _parseModel;
848
859
  private static _anthropicText;
849
860
  private static _anthropicStream;
861
+ private static _anthropicStreamWithTools;
850
862
  private static _groqText;
851
863
  private static _groqStream;
852
864
  /**
@@ -985,6 +997,7 @@ declare class UIBlock {
985
997
  * Get component metadata
986
998
  */
987
999
  getComponentMetadata(): Record<string, any>;
1000
+ getTextResponse(): string;
988
1001
  /**
989
1002
  * Set or update component metadata
990
1003
  */
@@ -1013,10 +1026,6 @@ declare class UIBlock {
1013
1026
  * Set or update component data with size and row limits
1014
1027
  */
1015
1028
  setComponentData(data: Record<string, any>): void;
1016
- /**
1017
- * Get text response
1018
- */
1019
- getTextResponse(): string | null;
1020
1029
  /**
1021
1030
  * Set or update text response
1022
1031
  */
@@ -1033,6 +1042,10 @@ declare class UIBlock {
1033
1042
  * @returns Promise resolving to Action[]
1034
1043
  */
1035
1044
  getOrFetchActions(generateFn: () => Promise<Action[]>): Promise<Action[]>;
1045
+ /**
1046
+ * Set or replace all actions
1047
+ */
1048
+ setActions(actions: Action[]): void;
1036
1049
  /**
1037
1050
  * Add a single action (only if actions are resolved)
1038
1051
  */