@superatomai/sdk-node 0.0.41 → 0.0.42
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +38 -13
- package/dist/index.d.ts +38 -13
- package/dist/index.js +120 -29
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +120 -29
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -1883,7 +1883,7 @@ var ComponentPropsSchema = z3.object({
|
|
|
1883
1883
|
description: z3.string().optional(),
|
|
1884
1884
|
config: z3.record(z3.unknown()).optional(),
|
|
1885
1885
|
actions: z3.array(z3.any()).optional()
|
|
1886
|
-
});
|
|
1886
|
+
}).passthrough();
|
|
1887
1887
|
var ComponentSchema = z3.object({
|
|
1888
1888
|
id: z3.string(),
|
|
1889
1889
|
name: z3.string(),
|
|
@@ -3408,9 +3408,9 @@ var PRICING = {
|
|
|
3408
3408
|
"gpt-4": { input: 30, output: 60 },
|
|
3409
3409
|
"gpt-3.5-turbo": { input: 0.5, output: 1.5 },
|
|
3410
3410
|
// Google Gemini (December 2025)
|
|
3411
|
-
"gemini-3-pro": { input: 2, output:
|
|
3411
|
+
"gemini-3-pro-preview": { input: 2, output: 12 },
|
|
3412
3412
|
// New Gemini 3
|
|
3413
|
-
"gemini-
|
|
3413
|
+
"gemini-3-flash-preview": { input: 0.5, output: 3 },
|
|
3414
3414
|
// For prompts ≤200K tokens, 2x for >200K
|
|
3415
3415
|
"gemini-2.5-flash": { input: 0.15, output: 0.6 },
|
|
3416
3416
|
// Standard mode (thinking disabled: $0.60, thinking enabled: $3.50)
|
|
@@ -4595,6 +4595,7 @@ var LLM = class {
|
|
|
4595
4595
|
}
|
|
4596
4596
|
}
|
|
4597
4597
|
static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
|
|
4598
|
+
const methodStartTime = Date.now();
|
|
4598
4599
|
const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
|
|
4599
4600
|
const genAI = new GoogleGenerativeAI(apiKey);
|
|
4600
4601
|
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
@@ -4623,11 +4624,18 @@ var LLM = class {
|
|
|
4623
4624
|
let iterations = 0;
|
|
4624
4625
|
let finalText = "";
|
|
4625
4626
|
let currentUserMessage = messages.user;
|
|
4627
|
+
let totalToolCalls = 0;
|
|
4628
|
+
let totalInputTokens = 0;
|
|
4629
|
+
let totalOutputTokens = 0;
|
|
4626
4630
|
while (iterations < maxIterations) {
|
|
4627
4631
|
iterations++;
|
|
4632
|
+
const iterationStartTime = Date.now();
|
|
4633
|
+
const requestId = llmUsageLogger.generateRequestId();
|
|
4628
4634
|
const result = await chat.sendMessageStream(currentUserMessage);
|
|
4629
4635
|
let responseText = "";
|
|
4630
4636
|
const functionCalls = [];
|
|
4637
|
+
let inputTokens = 0;
|
|
4638
|
+
let outputTokens = 0;
|
|
4631
4639
|
for await (const chunk of result.stream) {
|
|
4632
4640
|
const candidate = chunk.candidates?.[0];
|
|
4633
4641
|
if (!candidate) continue;
|
|
@@ -4644,7 +4652,37 @@ var LLM = class {
|
|
|
4644
4652
|
});
|
|
4645
4653
|
}
|
|
4646
4654
|
}
|
|
4655
|
+
if (chunk.usageMetadata) {
|
|
4656
|
+
inputTokens = chunk.usageMetadata.promptTokenCount || 0;
|
|
4657
|
+
outputTokens = chunk.usageMetadata.candidatesTokenCount || 0;
|
|
4658
|
+
}
|
|
4647
4659
|
}
|
|
4660
|
+
const iterationDuration = Date.now() - iterationStartTime;
|
|
4661
|
+
const toolCallsInIteration = functionCalls.length;
|
|
4662
|
+
totalToolCalls += toolCallsInIteration;
|
|
4663
|
+
if (inputTokens === 0) {
|
|
4664
|
+
const userMsg = typeof currentUserMessage === "string" ? currentUserMessage : JSON.stringify(currentUserMessage);
|
|
4665
|
+
inputTokens = Math.ceil((systemPrompt.length + userMsg.length) / 4);
|
|
4666
|
+
}
|
|
4667
|
+
if (outputTokens === 0) {
|
|
4668
|
+
outputTokens = Math.ceil(responseText.length / 4);
|
|
4669
|
+
}
|
|
4670
|
+
totalInputTokens += inputTokens;
|
|
4671
|
+
totalOutputTokens += outputTokens;
|
|
4672
|
+
llmUsageLogger.log({
|
|
4673
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
4674
|
+
requestId,
|
|
4675
|
+
provider: "gemini",
|
|
4676
|
+
model: modelName,
|
|
4677
|
+
method: `streamWithTools[iter=${iterations}]`,
|
|
4678
|
+
inputTokens,
|
|
4679
|
+
outputTokens,
|
|
4680
|
+
totalTokens: inputTokens + outputTokens,
|
|
4681
|
+
costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
|
|
4682
|
+
durationMs: iterationDuration,
|
|
4683
|
+
toolCalls: toolCallsInIteration,
|
|
4684
|
+
success: true
|
|
4685
|
+
});
|
|
4648
4686
|
if (functionCalls.length === 0) {
|
|
4649
4687
|
finalText = responseText;
|
|
4650
4688
|
break;
|
|
@@ -4677,6 +4715,23 @@ var LLM = class {
|
|
|
4677
4715
|
}));
|
|
4678
4716
|
currentUserMessage = functionResponseParts;
|
|
4679
4717
|
}
|
|
4718
|
+
const totalDuration = Date.now() - methodStartTime;
|
|
4719
|
+
if (iterations > 1) {
|
|
4720
|
+
llmUsageLogger.log({
|
|
4721
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
4722
|
+
requestId: llmUsageLogger.generateRequestId(),
|
|
4723
|
+
provider: "gemini",
|
|
4724
|
+
model: modelName,
|
|
4725
|
+
method: `streamWithTools[TOTAL:${iterations}iters]`,
|
|
4726
|
+
inputTokens: totalInputTokens,
|
|
4727
|
+
outputTokens: totalOutputTokens,
|
|
4728
|
+
totalTokens: totalInputTokens + totalOutputTokens,
|
|
4729
|
+
costUSD: llmUsageLogger.calculateCost(modelName, totalInputTokens, totalOutputTokens),
|
|
4730
|
+
durationMs: totalDuration,
|
|
4731
|
+
toolCalls: totalToolCalls,
|
|
4732
|
+
success: true
|
|
4733
|
+
});
|
|
4734
|
+
}
|
|
4680
4735
|
if (iterations >= maxIterations) {
|
|
4681
4736
|
throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
|
|
4682
4737
|
}
|
|
@@ -6731,10 +6786,10 @@ var GeminiLLM = class extends BaseLLM {
|
|
|
6731
6786
|
super(config);
|
|
6732
6787
|
}
|
|
6733
6788
|
getDefaultModel() {
|
|
6734
|
-
return "gemini/gemini-
|
|
6789
|
+
return "gemini/gemini-3-pro-preview";
|
|
6735
6790
|
}
|
|
6736
6791
|
getDefaultFastModel() {
|
|
6737
|
-
return "gemini/gemini-
|
|
6792
|
+
return "gemini/gemini-3-flash-preview";
|
|
6738
6793
|
}
|
|
6739
6794
|
getDefaultApiKey() {
|
|
6740
6795
|
return process.env.GEMINI_API_KEY;
|
|
@@ -10114,7 +10169,13 @@ function sendResponse8(id, res, sendMessage, clientId) {
|
|
|
10114
10169
|
// src/handlers/dash-comp-request.ts
|
|
10115
10170
|
init_logger();
|
|
10116
10171
|
init_prompt_loader();
|
|
10117
|
-
|
|
10172
|
+
var DEFAULT_DASH_COMP_MODELS = {
|
|
10173
|
+
anthropic: "anthropic/claude-haiku-4-5-20251001",
|
|
10174
|
+
gemini: "gemini/gemini-3-flash-preview",
|
|
10175
|
+
openai: "openai/gpt-4o-mini",
|
|
10176
|
+
groq: "groq/llama-3.3-70b-versatile"
|
|
10177
|
+
};
|
|
10178
|
+
async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, _collections, tools, dashCompModels) {
|
|
10118
10179
|
const errors = [];
|
|
10119
10180
|
let availableComponentsText = "No components available";
|
|
10120
10181
|
if (components && components.length > 0) {
|
|
@@ -10152,27 +10213,37 @@ async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApi
|
|
|
10152
10213
|
logger.debug("[DASH_COMP_REQ] Loaded dash-comp-picker prompts with schema and tools");
|
|
10153
10214
|
const providers = llmProviders || ["anthropic", "gemini", "openai", "groq"];
|
|
10154
10215
|
let apiKey;
|
|
10155
|
-
let model
|
|
10156
|
-
|
|
10157
|
-
|
|
10158
|
-
|
|
10159
|
-
|
|
10160
|
-
|
|
10161
|
-
|
|
10162
|
-
|
|
10163
|
-
|
|
10164
|
-
|
|
10165
|
-
|
|
10166
|
-
|
|
10167
|
-
|
|
10168
|
-
|
|
10169
|
-
|
|
10170
|
-
|
|
10171
|
-
|
|
10172
|
-
|
|
10216
|
+
let model;
|
|
10217
|
+
if (dashCompModels?.model) {
|
|
10218
|
+
model = dashCompModels.model;
|
|
10219
|
+
const modelProvider = model.split("/")[0];
|
|
10220
|
+
if (modelProvider === "anthropic") apiKey = anthropicApiKey;
|
|
10221
|
+
else if (modelProvider === "gemini") apiKey = geminiApiKey;
|
|
10222
|
+
else if (modelProvider === "openai") apiKey = openaiApiKey;
|
|
10223
|
+
else if (modelProvider === "groq") apiKey = groqApiKey;
|
|
10224
|
+
logger.info(`[DASH_COMP_REQ] Using configured model: ${model}`);
|
|
10225
|
+
} else {
|
|
10226
|
+
for (const provider of providers) {
|
|
10227
|
+
if (provider === "anthropic" && anthropicApiKey) {
|
|
10228
|
+
apiKey = anthropicApiKey;
|
|
10229
|
+
model = DEFAULT_DASH_COMP_MODELS.anthropic;
|
|
10230
|
+
break;
|
|
10231
|
+
} else if (provider === "gemini" && geminiApiKey) {
|
|
10232
|
+
apiKey = geminiApiKey;
|
|
10233
|
+
model = DEFAULT_DASH_COMP_MODELS.gemini;
|
|
10234
|
+
break;
|
|
10235
|
+
} else if (provider === "openai" && openaiApiKey) {
|
|
10236
|
+
apiKey = openaiApiKey;
|
|
10237
|
+
model = DEFAULT_DASH_COMP_MODELS.openai;
|
|
10238
|
+
break;
|
|
10239
|
+
} else if (provider === "groq" && groqApiKey) {
|
|
10240
|
+
apiKey = groqApiKey;
|
|
10241
|
+
model = DEFAULT_DASH_COMP_MODELS.groq;
|
|
10242
|
+
break;
|
|
10243
|
+
}
|
|
10173
10244
|
}
|
|
10174
10245
|
}
|
|
10175
|
-
if (!apiKey) {
|
|
10246
|
+
if (!apiKey || !model) {
|
|
10176
10247
|
errors.push("No API key available for any LLM provider");
|
|
10177
10248
|
return { success: false, errors };
|
|
10178
10249
|
}
|
|
@@ -10195,11 +10266,21 @@ async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApi
|
|
|
10195
10266
|
logger.file("[DASH_COMP_REQ] LLM response:", JSON.stringify(result, null, 2));
|
|
10196
10267
|
if (!result.componentId || !result.props) {
|
|
10197
10268
|
errors.push("Invalid LLM response: missing componentId or props");
|
|
10269
|
+
userPromptErrorLogger.logError("DASH_COMP_REQ", "Invalid LLM response structure", {
|
|
10270
|
+
prompt,
|
|
10271
|
+
result,
|
|
10272
|
+
missingFields: { componentId: !result.componentId, props: !result.props }
|
|
10273
|
+
});
|
|
10198
10274
|
return { success: false, errors };
|
|
10199
10275
|
}
|
|
10200
10276
|
const originalComponent = components.find((c) => c.id === result.componentId);
|
|
10201
10277
|
if (!originalComponent) {
|
|
10202
10278
|
errors.push(`Component ${result.componentId} not found in available components`);
|
|
10279
|
+
userPromptErrorLogger.logError("DASH_COMP_REQ", "Component not found", {
|
|
10280
|
+
prompt,
|
|
10281
|
+
componentId: result.componentId,
|
|
10282
|
+
availableComponentIds: components.map((c) => c.id)
|
|
10283
|
+
});
|
|
10203
10284
|
return { success: false, errors };
|
|
10204
10285
|
}
|
|
10205
10286
|
const finalComponent = {
|
|
@@ -10228,11 +10309,16 @@ async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApi
|
|
|
10228
10309
|
} catch (error) {
|
|
10229
10310
|
const errorMsg = error instanceof Error ? error.message : String(error);
|
|
10230
10311
|
logger.error(`[DASH_COMP_REQ] Error picking component: ${errorMsg}`);
|
|
10312
|
+
userPromptErrorLogger.logError("DASH_COMP_REQ", error instanceof Error ? error : new Error(errorMsg), {
|
|
10313
|
+
prompt,
|
|
10314
|
+
componentsCount: components.length,
|
|
10315
|
+
toolsCount: tools?.length || 0
|
|
10316
|
+
});
|
|
10231
10317
|
errors.push(errorMsg);
|
|
10232
10318
|
return { success: false, errors };
|
|
10233
10319
|
}
|
|
10234
10320
|
}
|
|
10235
|
-
var processDashCompRequest = async (data, components, _sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, tools) => {
|
|
10321
|
+
var processDashCompRequest = async (data, components, _sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, tools, dashCompModels) => {
|
|
10236
10322
|
const errors = [];
|
|
10237
10323
|
logger.debug("[DASH_COMP_REQ] Parsing incoming message data");
|
|
10238
10324
|
const parseResult = DashCompRequestMessageSchema.safeParse(data);
|
|
@@ -10247,6 +10333,8 @@ var processDashCompRequest = async (data, components, _sendMessage, anthropicApi
|
|
|
10247
10333
|
const { id, payload } = dashCompRequest;
|
|
10248
10334
|
const prompt = payload.prompt;
|
|
10249
10335
|
const wsId = dashCompRequest.from.id || "unknown";
|
|
10336
|
+
const promptContext = `DASH_COMP: ${prompt?.substring(0, 50)}${(prompt?.length || 0) > 50 ? "..." : ""}`;
|
|
10337
|
+
llmUsageLogger.resetLogFile(promptContext);
|
|
10250
10338
|
if (!prompt) {
|
|
10251
10339
|
errors.push("Prompt is required");
|
|
10252
10340
|
}
|
|
@@ -10273,8 +10361,10 @@ var processDashCompRequest = async (data, components, _sendMessage, anthropicApi
|
|
|
10273
10361
|
openaiApiKey,
|
|
10274
10362
|
llmProviders,
|
|
10275
10363
|
collections,
|
|
10276
|
-
tools
|
|
10364
|
+
tools,
|
|
10365
|
+
dashCompModels
|
|
10277
10366
|
);
|
|
10367
|
+
llmUsageLogger.logSessionSummary(`DASH_COMP: ${prompt?.substring(0, 30)}`);
|
|
10278
10368
|
return {
|
|
10279
10369
|
success: llmResponse.success,
|
|
10280
10370
|
data: llmResponse.data,
|
|
@@ -10283,7 +10373,7 @@ var processDashCompRequest = async (data, components, _sendMessage, anthropicApi
|
|
|
10283
10373
|
wsId
|
|
10284
10374
|
};
|
|
10285
10375
|
};
|
|
10286
|
-
async function handleDashCompRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, tools) {
|
|
10376
|
+
async function handleDashCompRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, tools, dashCompModels) {
|
|
10287
10377
|
const response = await processDashCompRequest(
|
|
10288
10378
|
data,
|
|
10289
10379
|
components,
|
|
@@ -10294,7 +10384,8 @@ async function handleDashCompRequest(data, components, sendMessage, anthropicApi
|
|
|
10294
10384
|
openaiApiKey,
|
|
10295
10385
|
llmProviders,
|
|
10296
10386
|
collections,
|
|
10297
|
-
tools
|
|
10387
|
+
tools,
|
|
10388
|
+
dashCompModels
|
|
10298
10389
|
);
|
|
10299
10390
|
sendDashCompResponse(
|
|
10300
10391
|
response.id || data.id,
|