@superatomai/sdk-node 0.0.40 → 0.0.42
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +332 -1
- package/dist/index.d.ts +332 -1
- package/dist/index.js +262 -45
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +258 -45
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -1623,9 +1623,13 @@ __export(index_exports, {
|
|
|
1623
1623
|
UIBlock: () => UIBlock,
|
|
1624
1624
|
UILogCollector: () => UILogCollector,
|
|
1625
1625
|
UserManager: () => UserManager,
|
|
1626
|
+
anthropicLLM: () => anthropicLLM,
|
|
1627
|
+
geminiLLM: () => geminiLLM,
|
|
1628
|
+
groqLLM: () => groqLLM,
|
|
1626
1629
|
hybridRerank: () => hybridRerank,
|
|
1627
1630
|
llmUsageLogger: () => llmUsageLogger,
|
|
1628
1631
|
logger: () => logger,
|
|
1632
|
+
openaiLLM: () => openaiLLM,
|
|
1629
1633
|
rerankChromaResults: () => rerankChromaResults,
|
|
1630
1634
|
rerankConversationResults: () => rerankConversationResults,
|
|
1631
1635
|
userPromptErrorLogger: () => userPromptErrorLogger
|
|
@@ -1929,7 +1933,7 @@ var ComponentPropsSchema = import_zod3.z.object({
|
|
|
1929
1933
|
description: import_zod3.z.string().optional(),
|
|
1930
1934
|
config: import_zod3.z.record(import_zod3.z.unknown()).optional(),
|
|
1931
1935
|
actions: import_zod3.z.array(import_zod3.z.any()).optional()
|
|
1932
|
-
});
|
|
1936
|
+
}).passthrough();
|
|
1933
1937
|
var ComponentSchema = import_zod3.z.object({
|
|
1934
1938
|
id: import_zod3.z.string(),
|
|
1935
1939
|
name: import_zod3.z.string(),
|
|
@@ -3432,30 +3436,47 @@ var import_jsonrepair = require("jsonrepair");
|
|
|
3432
3436
|
var import_fs4 = __toESM(require("fs"));
|
|
3433
3437
|
var import_path3 = __toESM(require("path"));
|
|
3434
3438
|
var PRICING = {
|
|
3435
|
-
// Anthropic
|
|
3436
|
-
"claude-
|
|
3439
|
+
// Anthropic (December 2025)
|
|
3440
|
+
"claude-opus-4-5": { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 },
|
|
3441
|
+
"claude-opus-4-5-20251101": { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 },
|
|
3437
3442
|
"claude-sonnet-4-5": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
|
|
3438
3443
|
"claude-sonnet-4-5-20250929": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
|
|
3439
|
-
"claude-
|
|
3440
|
-
"claude-haiku-4-5-20251001": { input:
|
|
3444
|
+
"claude-haiku-4-5": { input: 1, output: 5, cacheRead: 0.1, cacheWrite: 1.25 },
|
|
3445
|
+
"claude-haiku-4-5-20251001": { input: 1, output: 5, cacheRead: 0.1, cacheWrite: 1.25 },
|
|
3446
|
+
"claude-3-5-sonnet-20241022": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
|
|
3447
|
+
"claude-3-5-haiku-20241022": { input: 1, output: 5, cacheRead: 0.1, cacheWrite: 1.25 },
|
|
3441
3448
|
"claude-3-opus-20240229": { input: 15, output: 75, cacheRead: 1.5, cacheWrite: 18.75 },
|
|
3442
3449
|
"claude-3-sonnet-20240229": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
|
|
3443
3450
|
"claude-3-haiku-20240307": { input: 0.25, output: 1.25, cacheRead: 0.03, cacheWrite: 0.3 },
|
|
3444
|
-
// OpenAI
|
|
3445
|
-
"gpt-
|
|
3451
|
+
// OpenAI (December 2025)
|
|
3452
|
+
"gpt-5": { input: 1.25, output: 10 },
|
|
3453
|
+
"gpt-5-mini": { input: 0.25, output: 2 },
|
|
3454
|
+
"gpt-4o": { input: 5, output: 15 },
|
|
3455
|
+
// Updated pricing as of late 2025
|
|
3446
3456
|
"gpt-4o-mini": { input: 0.15, output: 0.6 },
|
|
3447
3457
|
"gpt-4-turbo": { input: 10, output: 30 },
|
|
3448
3458
|
"gpt-4": { input: 30, output: 60 },
|
|
3449
3459
|
"gpt-3.5-turbo": { input: 0.5, output: 1.5 },
|
|
3450
|
-
// Gemini
|
|
3460
|
+
// Google Gemini (December 2025)
|
|
3461
|
+
"gemini-3-pro-preview": { input: 2, output: 12 },
|
|
3462
|
+
// New Gemini 3
|
|
3463
|
+
"gemini-3-flash-preview": { input: 0.5, output: 3 },
|
|
3464
|
+
// For prompts ≤200K tokens, 2x for >200K
|
|
3465
|
+
"gemini-2.5-flash": { input: 0.15, output: 0.6 },
|
|
3466
|
+
// Standard mode (thinking disabled: $0.60, thinking enabled: $3.50)
|
|
3467
|
+
"gemini-2.5-flash-lite": { input: 0.1, output: 0.4 },
|
|
3468
|
+
"gemini-2.0-flash": { input: 0.1, output: 0.4 },
|
|
3469
|
+
"gemini-2.0-flash-lite": { input: 0.075, output: 0.3 },
|
|
3451
3470
|
"gemini-1.5-pro": { input: 1.25, output: 5 },
|
|
3452
3471
|
"gemini-1.5-flash": { input: 0.075, output: 0.3 },
|
|
3453
|
-
|
|
3454
|
-
// Groq (very cheap)
|
|
3472
|
+
// Groq (December 2025)
|
|
3455
3473
|
"llama-3.3-70b-versatile": { input: 0.59, output: 0.79 },
|
|
3456
3474
|
"llama-3.1-70b-versatile": { input: 0.59, output: 0.79 },
|
|
3457
3475
|
"llama-3.1-8b-instant": { input: 0.05, output: 0.08 },
|
|
3458
|
-
"
|
|
3476
|
+
"llama-4-scout-17b-16e": { input: 0.11, output: 0.34 },
|
|
3477
|
+
"llama-4-maverick-17b-128e": { input: 0.2, output: 0.6 },
|
|
3478
|
+
"mixtral-8x7b-32768": { input: 0.27, output: 0.27 },
|
|
3479
|
+
"qwen3-32b": { input: 0.29, output: 0.59 }
|
|
3459
3480
|
};
|
|
3460
3481
|
var DEFAULT_PRICING = { input: 3, output: 15 };
|
|
3461
3482
|
var LLMUsageLogger = class {
|
|
@@ -4624,6 +4645,7 @@ var LLM = class {
|
|
|
4624
4645
|
}
|
|
4625
4646
|
}
|
|
4626
4647
|
static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
|
|
4648
|
+
const methodStartTime = Date.now();
|
|
4627
4649
|
const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
|
|
4628
4650
|
const genAI = new import_generative_ai.GoogleGenerativeAI(apiKey);
|
|
4629
4651
|
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
@@ -4652,11 +4674,18 @@ var LLM = class {
|
|
|
4652
4674
|
let iterations = 0;
|
|
4653
4675
|
let finalText = "";
|
|
4654
4676
|
let currentUserMessage = messages.user;
|
|
4677
|
+
let totalToolCalls = 0;
|
|
4678
|
+
let totalInputTokens = 0;
|
|
4679
|
+
let totalOutputTokens = 0;
|
|
4655
4680
|
while (iterations < maxIterations) {
|
|
4656
4681
|
iterations++;
|
|
4682
|
+
const iterationStartTime = Date.now();
|
|
4683
|
+
const requestId = llmUsageLogger.generateRequestId();
|
|
4657
4684
|
const result = await chat.sendMessageStream(currentUserMessage);
|
|
4658
4685
|
let responseText = "";
|
|
4659
4686
|
const functionCalls = [];
|
|
4687
|
+
let inputTokens = 0;
|
|
4688
|
+
let outputTokens = 0;
|
|
4660
4689
|
for await (const chunk of result.stream) {
|
|
4661
4690
|
const candidate = chunk.candidates?.[0];
|
|
4662
4691
|
if (!candidate) continue;
|
|
@@ -4673,7 +4702,37 @@ var LLM = class {
|
|
|
4673
4702
|
});
|
|
4674
4703
|
}
|
|
4675
4704
|
}
|
|
4705
|
+
if (chunk.usageMetadata) {
|
|
4706
|
+
inputTokens = chunk.usageMetadata.promptTokenCount || 0;
|
|
4707
|
+
outputTokens = chunk.usageMetadata.candidatesTokenCount || 0;
|
|
4708
|
+
}
|
|
4709
|
+
}
|
|
4710
|
+
const iterationDuration = Date.now() - iterationStartTime;
|
|
4711
|
+
const toolCallsInIteration = functionCalls.length;
|
|
4712
|
+
totalToolCalls += toolCallsInIteration;
|
|
4713
|
+
if (inputTokens === 0) {
|
|
4714
|
+
const userMsg = typeof currentUserMessage === "string" ? currentUserMessage : JSON.stringify(currentUserMessage);
|
|
4715
|
+
inputTokens = Math.ceil((systemPrompt.length + userMsg.length) / 4);
|
|
4716
|
+
}
|
|
4717
|
+
if (outputTokens === 0) {
|
|
4718
|
+
outputTokens = Math.ceil(responseText.length / 4);
|
|
4676
4719
|
}
|
|
4720
|
+
totalInputTokens += inputTokens;
|
|
4721
|
+
totalOutputTokens += outputTokens;
|
|
4722
|
+
llmUsageLogger.log({
|
|
4723
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
4724
|
+
requestId,
|
|
4725
|
+
provider: "gemini",
|
|
4726
|
+
model: modelName,
|
|
4727
|
+
method: `streamWithTools[iter=${iterations}]`,
|
|
4728
|
+
inputTokens,
|
|
4729
|
+
outputTokens,
|
|
4730
|
+
totalTokens: inputTokens + outputTokens,
|
|
4731
|
+
costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
|
|
4732
|
+
durationMs: iterationDuration,
|
|
4733
|
+
toolCalls: toolCallsInIteration,
|
|
4734
|
+
success: true
|
|
4735
|
+
});
|
|
4677
4736
|
if (functionCalls.length === 0) {
|
|
4678
4737
|
finalText = responseText;
|
|
4679
4738
|
break;
|
|
@@ -4706,6 +4765,23 @@ var LLM = class {
|
|
|
4706
4765
|
}));
|
|
4707
4766
|
currentUserMessage = functionResponseParts;
|
|
4708
4767
|
}
|
|
4768
|
+
const totalDuration = Date.now() - methodStartTime;
|
|
4769
|
+
if (iterations > 1) {
|
|
4770
|
+
llmUsageLogger.log({
|
|
4771
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
4772
|
+
requestId: llmUsageLogger.generateRequestId(),
|
|
4773
|
+
provider: "gemini",
|
|
4774
|
+
model: modelName,
|
|
4775
|
+
method: `streamWithTools[TOTAL:${iterations}iters]`,
|
|
4776
|
+
inputTokens: totalInputTokens,
|
|
4777
|
+
outputTokens: totalOutputTokens,
|
|
4778
|
+
totalTokens: totalInputTokens + totalOutputTokens,
|
|
4779
|
+
costUSD: llmUsageLogger.calculateCost(modelName, totalInputTokens, totalOutputTokens),
|
|
4780
|
+
durationMs: totalDuration,
|
|
4781
|
+
toolCalls: totalToolCalls,
|
|
4782
|
+
success: true
|
|
4783
|
+
});
|
|
4784
|
+
}
|
|
4709
4785
|
if (iterations >= maxIterations) {
|
|
4710
4786
|
throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
|
|
4711
4787
|
}
|
|
@@ -5382,6 +5458,38 @@ var BaseLLM = class {
|
|
|
5382
5458
|
this.fastModel = config?.fastModel || this.getDefaultFastModel();
|
|
5383
5459
|
this.defaultLimit = config?.defaultLimit || 50;
|
|
5384
5460
|
this.apiKey = config?.apiKey;
|
|
5461
|
+
this.modelStrategy = config?.modelStrategy || "fast";
|
|
5462
|
+
}
|
|
5463
|
+
/**
|
|
5464
|
+
* Get the appropriate model based on task type and model strategy
|
|
5465
|
+
* @param taskType - 'complex' for text generation/matching, 'simple' for classification/actions
|
|
5466
|
+
* @returns The model string to use for this task
|
|
5467
|
+
*/
|
|
5468
|
+
getModelForTask(taskType) {
|
|
5469
|
+
switch (this.modelStrategy) {
|
|
5470
|
+
case "best":
|
|
5471
|
+
return this.model;
|
|
5472
|
+
case "fast":
|
|
5473
|
+
return this.fastModel;
|
|
5474
|
+
case "balanced":
|
|
5475
|
+
default:
|
|
5476
|
+
return taskType === "complex" ? this.model : this.fastModel;
|
|
5477
|
+
}
|
|
5478
|
+
}
|
|
5479
|
+
/**
|
|
5480
|
+
* Set the model strategy at runtime
|
|
5481
|
+
* @param strategy - 'best', 'fast', or 'balanced'
|
|
5482
|
+
*/
|
|
5483
|
+
setModelStrategy(strategy) {
|
|
5484
|
+
this.modelStrategy = strategy;
|
|
5485
|
+
logger.info(`[${this.getProviderName()}] Model strategy set to: ${strategy}`);
|
|
5486
|
+
}
|
|
5487
|
+
/**
|
|
5488
|
+
* Get the current model strategy
|
|
5489
|
+
* @returns The current model strategy
|
|
5490
|
+
*/
|
|
5491
|
+
getModelStrategy() {
|
|
5492
|
+
return this.modelStrategy;
|
|
5385
5493
|
}
|
|
5386
5494
|
/**
|
|
5387
5495
|
* Get the API key (from instance, parameter, or environment)
|
|
@@ -5566,7 +5674,7 @@ ${JSON.stringify(tool.requiredFields || [], null, 2)}`;
|
|
|
5566
5674
|
user: prompts.user
|
|
5567
5675
|
},
|
|
5568
5676
|
{
|
|
5569
|
-
model: this.
|
|
5677
|
+
model: this.getModelForTask("complex"),
|
|
5570
5678
|
maxTokens: 8192,
|
|
5571
5679
|
temperature: 0.2,
|
|
5572
5680
|
apiKey: this.getApiKey(apiKey),
|
|
@@ -5689,7 +5797,7 @@ ${JSON.stringify(tool.requiredFields || [], null, 2)}`;
|
|
|
5689
5797
|
user: prompts.user
|
|
5690
5798
|
},
|
|
5691
5799
|
{
|
|
5692
|
-
model: this.
|
|
5800
|
+
model: this.getModelForTask("simple"),
|
|
5693
5801
|
maxTokens: 1500,
|
|
5694
5802
|
temperature: 0.2,
|
|
5695
5803
|
apiKey: this.getApiKey(apiKey)
|
|
@@ -5750,7 +5858,7 @@ ${JSON.stringify(tool.requiredFields || [], null, 2)}`;
|
|
|
5750
5858
|
user: prompts.user
|
|
5751
5859
|
},
|
|
5752
5860
|
{
|
|
5753
|
-
model: this.
|
|
5861
|
+
model: this.getModelForTask("complex"),
|
|
5754
5862
|
maxTokens: 3e3,
|
|
5755
5863
|
temperature: 0.2,
|
|
5756
5864
|
apiKey: this.getApiKey(apiKey)
|
|
@@ -6240,7 +6348,7 @@ ${errorMsg}
|
|
|
6240
6348
|
tools,
|
|
6241
6349
|
toolHandler,
|
|
6242
6350
|
{
|
|
6243
|
-
model: this.
|
|
6351
|
+
model: this.getModelForTask("complex"),
|
|
6244
6352
|
maxTokens: 4e3,
|
|
6245
6353
|
temperature: 0.7,
|
|
6246
6354
|
apiKey: this.getApiKey(apiKey),
|
|
@@ -6285,6 +6393,21 @@ ${errorMsg}
|
|
|
6285
6393
|
if (category === "general") {
|
|
6286
6394
|
logger.info(`[${this.getProviderName()}] Skipping component generation for general/conversational question`);
|
|
6287
6395
|
logCollector?.info("Skipping component generation for general question");
|
|
6396
|
+
logger.info(`[${this.getProviderName()}] Generating actions for general question...`);
|
|
6397
|
+
const nextQuestions = await this.generateNextQuestions(
|
|
6398
|
+
userPrompt,
|
|
6399
|
+
null,
|
|
6400
|
+
// no component
|
|
6401
|
+
void 0,
|
|
6402
|
+
// no component data
|
|
6403
|
+
apiKey,
|
|
6404
|
+
logCollector,
|
|
6405
|
+
conversationHistory,
|
|
6406
|
+
textResponse
|
|
6407
|
+
// pass text response as context
|
|
6408
|
+
);
|
|
6409
|
+
actions = convertQuestionsToActions(nextQuestions);
|
|
6410
|
+
logger.info(`[${this.getProviderName()}] Generated ${actions.length} follow-up actions for general question`);
|
|
6288
6411
|
} else if (components && components.length > 0) {
|
|
6289
6412
|
logger.info(`[${this.getProviderName()}] Matching components from text response...`);
|
|
6290
6413
|
const componentStreamCallback = wrappedStreamCallback && category !== "data_modification" ? (component) => {
|
|
@@ -6443,10 +6566,18 @@ ${errorMsg}
|
|
|
6443
6566
|
logger.info(`[${this.getProviderName()}] \u2713 100% match - returning UI block directly without adaptation`);
|
|
6444
6567
|
logCollector?.info(`\u2713 Exact match (${(conversationMatch.similarity * 100).toFixed(2)}%) - returning cached result`);
|
|
6445
6568
|
logCollector?.info(`Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
|
|
6569
|
+
if (streamCallback && cachedTextResponse) {
|
|
6570
|
+
logger.info(`[${this.getProviderName()}] Streaming cached text response to frontend`);
|
|
6571
|
+
streamCallback(cachedTextResponse);
|
|
6572
|
+
}
|
|
6573
|
+
const cachedActions = conversationMatch.uiBlock?.actions || [];
|
|
6446
6574
|
return {
|
|
6447
6575
|
success: true,
|
|
6448
6576
|
data: {
|
|
6577
|
+
text: cachedTextResponse,
|
|
6449
6578
|
component,
|
|
6579
|
+
matchedComponents: component?.props?.config?.components || [],
|
|
6580
|
+
actions: cachedActions,
|
|
6450
6581
|
reasoning: `Exact match from previous conversation (${(conversationMatch.similarity * 100).toFixed(2)}% similarity)`,
|
|
6451
6582
|
method: `${this.getProviderName()}-semantic-match-exact`,
|
|
6452
6583
|
semanticSimilarity: conversationMatch.similarity
|
|
@@ -6469,10 +6600,18 @@ ${errorMsg}
|
|
|
6469
6600
|
logger.info(`[${this.getProviderName()}] Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
|
|
6470
6601
|
logCollector?.info(`\u2713 UI block adapted successfully`);
|
|
6471
6602
|
logCollector?.info(`Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
|
|
6603
|
+
if (streamCallback && cachedTextResponse) {
|
|
6604
|
+
logger.info(`[${this.getProviderName()}] Streaming cached text response to frontend (adapted match)`);
|
|
6605
|
+
streamCallback(cachedTextResponse);
|
|
6606
|
+
}
|
|
6607
|
+
const cachedActions = conversationMatch.uiBlock?.actions || [];
|
|
6472
6608
|
return {
|
|
6473
6609
|
success: true,
|
|
6474
6610
|
data: {
|
|
6611
|
+
text: cachedTextResponse,
|
|
6475
6612
|
component: adaptResult.adaptedComponent,
|
|
6613
|
+
matchedComponents: adaptResult.adaptedComponent?.props?.config?.components || [],
|
|
6614
|
+
actions: cachedActions,
|
|
6476
6615
|
reasoning: `Adapted from previous conversation: ${originalPrompt}`,
|
|
6477
6616
|
method: `${this.getProviderName()}-semantic-match`,
|
|
6478
6617
|
semanticSimilarity: conversationMatch.similarity,
|
|
@@ -6585,15 +6724,26 @@ ${errorMsg}
|
|
|
6585
6724
|
/**
|
|
6586
6725
|
* Generate next questions that the user might ask based on the original prompt and generated component
|
|
6587
6726
|
* This helps provide intelligent suggestions for follow-up queries
|
|
6727
|
+
* For general/conversational questions without components, pass textResponse instead
|
|
6588
6728
|
*/
|
|
6589
|
-
async generateNextQuestions(originalUserPrompt, component, componentData, apiKey, logCollector, conversationHistory) {
|
|
6729
|
+
async generateNextQuestions(originalUserPrompt, component, componentData, apiKey, logCollector, conversationHistory, textResponse) {
|
|
6590
6730
|
try {
|
|
6591
|
-
|
|
6731
|
+
let component_info;
|
|
6732
|
+
if (component) {
|
|
6733
|
+
component_info = `
|
|
6592
6734
|
Component Name: ${component.name}
|
|
6593
6735
|
Component Type: ${component.type}
|
|
6594
6736
|
Component Description: ${component.description || "No description"}
|
|
6595
6737
|
Component Props: ${component.props ? JSON.stringify(component.props, null, 2) : "No props"}
|
|
6596
6738
|
`;
|
|
6739
|
+
} else if (textResponse) {
|
|
6740
|
+
component_info = `
|
|
6741
|
+
Response Type: Text/Conversational Response
|
|
6742
|
+
Response Content: ${textResponse.substring(0, 1e3)}${textResponse.length > 1e3 ? "..." : ""}
|
|
6743
|
+
`;
|
|
6744
|
+
} else {
|
|
6745
|
+
component_info = "No component or response context available";
|
|
6746
|
+
}
|
|
6597
6747
|
const component_data = componentData ? `Component Data: ${JSON.stringify(componentData, null, 2)}` : "";
|
|
6598
6748
|
const prompts = await promptLoader.loadPrompts("actions", {
|
|
6599
6749
|
ORIGINAL_USER_PROMPT: originalUserPrompt,
|
|
@@ -6607,7 +6757,7 @@ ${errorMsg}
|
|
|
6607
6757
|
user: prompts.user
|
|
6608
6758
|
},
|
|
6609
6759
|
{
|
|
6610
|
-
model: this.
|
|
6760
|
+
model: this.getModelForTask("simple"),
|
|
6611
6761
|
maxTokens: 1200,
|
|
6612
6762
|
temperature: 0.7,
|
|
6613
6763
|
apiKey: this.getApiKey(apiKey)
|
|
@@ -6686,10 +6836,10 @@ var GeminiLLM = class extends BaseLLM {
|
|
|
6686
6836
|
super(config);
|
|
6687
6837
|
}
|
|
6688
6838
|
getDefaultModel() {
|
|
6689
|
-
return "gemini/gemini-
|
|
6839
|
+
return "gemini/gemini-3-pro-preview";
|
|
6690
6840
|
}
|
|
6691
6841
|
getDefaultFastModel() {
|
|
6692
|
-
return "gemini/gemini-
|
|
6842
|
+
return "gemini/gemini-3-flash-preview";
|
|
6693
6843
|
}
|
|
6694
6844
|
getDefaultApiKey() {
|
|
6695
6845
|
return process.env.GEMINI_API_KEY;
|
|
@@ -10069,7 +10219,13 @@ function sendResponse8(id, res, sendMessage, clientId) {
|
|
|
10069
10219
|
// src/handlers/dash-comp-request.ts
|
|
10070
10220
|
init_logger();
|
|
10071
10221
|
init_prompt_loader();
|
|
10072
|
-
|
|
10222
|
+
var DEFAULT_DASH_COMP_MODELS = {
|
|
10223
|
+
anthropic: "anthropic/claude-haiku-4-5-20251001",
|
|
10224
|
+
gemini: "gemini/gemini-3-flash-preview",
|
|
10225
|
+
openai: "openai/gpt-4o-mini",
|
|
10226
|
+
groq: "groq/llama-3.3-70b-versatile"
|
|
10227
|
+
};
|
|
10228
|
+
async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, _collections, tools, dashCompModels) {
|
|
10073
10229
|
const errors = [];
|
|
10074
10230
|
let availableComponentsText = "No components available";
|
|
10075
10231
|
if (components && components.length > 0) {
|
|
@@ -10107,27 +10263,37 @@ async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApi
|
|
|
10107
10263
|
logger.debug("[DASH_COMP_REQ] Loaded dash-comp-picker prompts with schema and tools");
|
|
10108
10264
|
const providers = llmProviders || ["anthropic", "gemini", "openai", "groq"];
|
|
10109
10265
|
let apiKey;
|
|
10110
|
-
let model
|
|
10111
|
-
|
|
10112
|
-
|
|
10113
|
-
|
|
10114
|
-
|
|
10115
|
-
|
|
10116
|
-
|
|
10117
|
-
|
|
10118
|
-
|
|
10119
|
-
|
|
10120
|
-
|
|
10121
|
-
|
|
10122
|
-
|
|
10123
|
-
|
|
10124
|
-
|
|
10125
|
-
|
|
10126
|
-
|
|
10127
|
-
|
|
10266
|
+
let model;
|
|
10267
|
+
if (dashCompModels?.model) {
|
|
10268
|
+
model = dashCompModels.model;
|
|
10269
|
+
const modelProvider = model.split("/")[0];
|
|
10270
|
+
if (modelProvider === "anthropic") apiKey = anthropicApiKey;
|
|
10271
|
+
else if (modelProvider === "gemini") apiKey = geminiApiKey;
|
|
10272
|
+
else if (modelProvider === "openai") apiKey = openaiApiKey;
|
|
10273
|
+
else if (modelProvider === "groq") apiKey = groqApiKey;
|
|
10274
|
+
logger.info(`[DASH_COMP_REQ] Using configured model: ${model}`);
|
|
10275
|
+
} else {
|
|
10276
|
+
for (const provider of providers) {
|
|
10277
|
+
if (provider === "anthropic" && anthropicApiKey) {
|
|
10278
|
+
apiKey = anthropicApiKey;
|
|
10279
|
+
model = DEFAULT_DASH_COMP_MODELS.anthropic;
|
|
10280
|
+
break;
|
|
10281
|
+
} else if (provider === "gemini" && geminiApiKey) {
|
|
10282
|
+
apiKey = geminiApiKey;
|
|
10283
|
+
model = DEFAULT_DASH_COMP_MODELS.gemini;
|
|
10284
|
+
break;
|
|
10285
|
+
} else if (provider === "openai" && openaiApiKey) {
|
|
10286
|
+
apiKey = openaiApiKey;
|
|
10287
|
+
model = DEFAULT_DASH_COMP_MODELS.openai;
|
|
10288
|
+
break;
|
|
10289
|
+
} else if (provider === "groq" && groqApiKey) {
|
|
10290
|
+
apiKey = groqApiKey;
|
|
10291
|
+
model = DEFAULT_DASH_COMP_MODELS.groq;
|
|
10292
|
+
break;
|
|
10293
|
+
}
|
|
10128
10294
|
}
|
|
10129
10295
|
}
|
|
10130
|
-
if (!apiKey) {
|
|
10296
|
+
if (!apiKey || !model) {
|
|
10131
10297
|
errors.push("No API key available for any LLM provider");
|
|
10132
10298
|
return { success: false, errors };
|
|
10133
10299
|
}
|
|
@@ -10150,11 +10316,21 @@ async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApi
|
|
|
10150
10316
|
logger.file("[DASH_COMP_REQ] LLM response:", JSON.stringify(result, null, 2));
|
|
10151
10317
|
if (!result.componentId || !result.props) {
|
|
10152
10318
|
errors.push("Invalid LLM response: missing componentId or props");
|
|
10319
|
+
userPromptErrorLogger.logError("DASH_COMP_REQ", "Invalid LLM response structure", {
|
|
10320
|
+
prompt,
|
|
10321
|
+
result,
|
|
10322
|
+
missingFields: { componentId: !result.componentId, props: !result.props }
|
|
10323
|
+
});
|
|
10153
10324
|
return { success: false, errors };
|
|
10154
10325
|
}
|
|
10155
10326
|
const originalComponent = components.find((c) => c.id === result.componentId);
|
|
10156
10327
|
if (!originalComponent) {
|
|
10157
10328
|
errors.push(`Component ${result.componentId} not found in available components`);
|
|
10329
|
+
userPromptErrorLogger.logError("DASH_COMP_REQ", "Component not found", {
|
|
10330
|
+
prompt,
|
|
10331
|
+
componentId: result.componentId,
|
|
10332
|
+
availableComponentIds: components.map((c) => c.id)
|
|
10333
|
+
});
|
|
10158
10334
|
return { success: false, errors };
|
|
10159
10335
|
}
|
|
10160
10336
|
const finalComponent = {
|
|
@@ -10183,11 +10359,16 @@ async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApi
|
|
|
10183
10359
|
} catch (error) {
|
|
10184
10360
|
const errorMsg = error instanceof Error ? error.message : String(error);
|
|
10185
10361
|
logger.error(`[DASH_COMP_REQ] Error picking component: ${errorMsg}`);
|
|
10362
|
+
userPromptErrorLogger.logError("DASH_COMP_REQ", error instanceof Error ? error : new Error(errorMsg), {
|
|
10363
|
+
prompt,
|
|
10364
|
+
componentsCount: components.length,
|
|
10365
|
+
toolsCount: tools?.length || 0
|
|
10366
|
+
});
|
|
10186
10367
|
errors.push(errorMsg);
|
|
10187
10368
|
return { success: false, errors };
|
|
10188
10369
|
}
|
|
10189
10370
|
}
|
|
10190
|
-
var processDashCompRequest = async (data, components, _sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, tools) => {
|
|
10371
|
+
var processDashCompRequest = async (data, components, _sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, tools, dashCompModels) => {
|
|
10191
10372
|
const errors = [];
|
|
10192
10373
|
logger.debug("[DASH_COMP_REQ] Parsing incoming message data");
|
|
10193
10374
|
const parseResult = DashCompRequestMessageSchema.safeParse(data);
|
|
@@ -10202,6 +10383,8 @@ var processDashCompRequest = async (data, components, _sendMessage, anthropicApi
|
|
|
10202
10383
|
const { id, payload } = dashCompRequest;
|
|
10203
10384
|
const prompt = payload.prompt;
|
|
10204
10385
|
const wsId = dashCompRequest.from.id || "unknown";
|
|
10386
|
+
const promptContext = `DASH_COMP: ${prompt?.substring(0, 50)}${(prompt?.length || 0) > 50 ? "..." : ""}`;
|
|
10387
|
+
llmUsageLogger.resetLogFile(promptContext);
|
|
10205
10388
|
if (!prompt) {
|
|
10206
10389
|
errors.push("Prompt is required");
|
|
10207
10390
|
}
|
|
@@ -10228,8 +10411,10 @@ var processDashCompRequest = async (data, components, _sendMessage, anthropicApi
|
|
|
10228
10411
|
openaiApiKey,
|
|
10229
10412
|
llmProviders,
|
|
10230
10413
|
collections,
|
|
10231
|
-
tools
|
|
10414
|
+
tools,
|
|
10415
|
+
dashCompModels
|
|
10232
10416
|
);
|
|
10417
|
+
llmUsageLogger.logSessionSummary(`DASH_COMP: ${prompt?.substring(0, 30)}`);
|
|
10233
10418
|
return {
|
|
10234
10419
|
success: llmResponse.success,
|
|
10235
10420
|
data: llmResponse.data,
|
|
@@ -10238,7 +10423,7 @@ var processDashCompRequest = async (data, components, _sendMessage, anthropicApi
|
|
|
10238
10423
|
wsId
|
|
10239
10424
|
};
|
|
10240
10425
|
};
|
|
10241
|
-
async function handleDashCompRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, tools) {
|
|
10426
|
+
async function handleDashCompRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, tools, dashCompModels) {
|
|
10242
10427
|
const response = await processDashCompRequest(
|
|
10243
10428
|
data,
|
|
10244
10429
|
components,
|
|
@@ -10249,7 +10434,8 @@ async function handleDashCompRequest(data, components, sendMessage, anthropicApi
|
|
|
10249
10434
|
openaiApiKey,
|
|
10250
10435
|
llmProviders,
|
|
10251
10436
|
collections,
|
|
10252
|
-
tools
|
|
10437
|
+
tools,
|
|
10438
|
+
dashCompModels
|
|
10253
10439
|
);
|
|
10254
10440
|
sendDashCompResponse(
|
|
10255
10441
|
response.id || data.id,
|
|
@@ -11134,7 +11320,9 @@ var SuperatomSDK = class {
|
|
|
11134
11320
|
this.openaiApiKey = config.OPENAI_API_KEY || process.env.OPENAI_API_KEY || "";
|
|
11135
11321
|
this.llmProviders = config.LLM_PROVIDERS || getLLMProviders();
|
|
11136
11322
|
this.databaseType = config.databaseType || "postgresql";
|
|
11137
|
-
|
|
11323
|
+
this.modelStrategy = config.modelStrategy || "fast";
|
|
11324
|
+
this.applyModelStrategy(this.modelStrategy);
|
|
11325
|
+
logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, database type: ${this.databaseType}, model strategy: ${this.modelStrategy}`);
|
|
11138
11326
|
this.userManager = new UserManager(this.projectId, 5e3);
|
|
11139
11327
|
this.dashboardManager = new DashboardManager(this.projectId);
|
|
11140
11328
|
this.reportManager = new ReportManager(this.projectId);
|
|
@@ -11513,6 +11701,31 @@ var SuperatomSDK = class {
|
|
|
11513
11701
|
getTools() {
|
|
11514
11702
|
return this.tools;
|
|
11515
11703
|
}
|
|
11704
|
+
/**
|
|
11705
|
+
* Apply model strategy to all LLM provider singletons
|
|
11706
|
+
* @param strategy - 'best', 'fast', or 'balanced'
|
|
11707
|
+
*/
|
|
11708
|
+
applyModelStrategy(strategy) {
|
|
11709
|
+
anthropicLLM.setModelStrategy(strategy);
|
|
11710
|
+
groqLLM.setModelStrategy(strategy);
|
|
11711
|
+
geminiLLM.setModelStrategy(strategy);
|
|
11712
|
+
openaiLLM.setModelStrategy(strategy);
|
|
11713
|
+
logger.info(`Model strategy '${strategy}' applied to all LLM providers`);
|
|
11714
|
+
}
|
|
11715
|
+
/**
|
|
11716
|
+
* Set model strategy at runtime
|
|
11717
|
+
* @param strategy - 'best', 'fast', or 'balanced'
|
|
11718
|
+
*/
|
|
11719
|
+
setModelStrategy(strategy) {
|
|
11720
|
+
this.modelStrategy = strategy;
|
|
11721
|
+
this.applyModelStrategy(strategy);
|
|
11722
|
+
}
|
|
11723
|
+
/**
|
|
11724
|
+
* Get current model strategy
|
|
11725
|
+
*/
|
|
11726
|
+
getModelStrategy() {
|
|
11727
|
+
return this.modelStrategy;
|
|
11728
|
+
}
|
|
11516
11729
|
};
|
|
11517
11730
|
// Annotate the CommonJS export names for ESM import in node:
|
|
11518
11731
|
0 && (module.exports = {
|
|
@@ -11528,9 +11741,13 @@ var SuperatomSDK = class {
|
|
|
11528
11741
|
UIBlock,
|
|
11529
11742
|
UILogCollector,
|
|
11530
11743
|
UserManager,
|
|
11744
|
+
anthropicLLM,
|
|
11745
|
+
geminiLLM,
|
|
11746
|
+
groqLLM,
|
|
11531
11747
|
hybridRerank,
|
|
11532
11748
|
llmUsageLogger,
|
|
11533
11749
|
logger,
|
|
11750
|
+
openaiLLM,
|
|
11534
11751
|
rerankChromaResults,
|
|
11535
11752
|
rerankConversationResults,
|
|
11536
11753
|
userPromptErrorLogger
|