@superatomai/sdk-node 0.0.40 → 0.0.42

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1883,7 +1883,7 @@ var ComponentPropsSchema = z3.object({
1883
1883
  description: z3.string().optional(),
1884
1884
  config: z3.record(z3.unknown()).optional(),
1885
1885
  actions: z3.array(z3.any()).optional()
1886
- });
1886
+ }).passthrough();
1887
1887
  var ComponentSchema = z3.object({
1888
1888
  id: z3.string(),
1889
1889
  name: z3.string(),
@@ -3386,30 +3386,47 @@ import { jsonrepair } from "jsonrepair";
3386
3386
  import fs5 from "fs";
3387
3387
  import path4 from "path";
3388
3388
  var PRICING = {
3389
- // Anthropic
3390
- "claude-3-5-sonnet-20241022": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3389
+ // Anthropic (December 2025)
3390
+ "claude-opus-4-5": { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 },
3391
+ "claude-opus-4-5-20251101": { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 },
3391
3392
  "claude-sonnet-4-5": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3392
3393
  "claude-sonnet-4-5-20250929": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3393
- "claude-3-5-haiku-20241022": { input: 0.8, output: 4, cacheRead: 0.08, cacheWrite: 1 },
3394
- "claude-haiku-4-5-20251001": { input: 0.8, output: 4, cacheRead: 0.08, cacheWrite: 1 },
3394
+ "claude-haiku-4-5": { input: 1, output: 5, cacheRead: 0.1, cacheWrite: 1.25 },
3395
+ "claude-haiku-4-5-20251001": { input: 1, output: 5, cacheRead: 0.1, cacheWrite: 1.25 },
3396
+ "claude-3-5-sonnet-20241022": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3397
+ "claude-3-5-haiku-20241022": { input: 1, output: 5, cacheRead: 0.1, cacheWrite: 1.25 },
3395
3398
  "claude-3-opus-20240229": { input: 15, output: 75, cacheRead: 1.5, cacheWrite: 18.75 },
3396
3399
  "claude-3-sonnet-20240229": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
3397
3400
  "claude-3-haiku-20240307": { input: 0.25, output: 1.25, cacheRead: 0.03, cacheWrite: 0.3 },
3398
- // OpenAI
3399
- "gpt-4o": { input: 2.5, output: 10 },
3401
+ // OpenAI (December 2025)
3402
+ "gpt-5": { input: 1.25, output: 10 },
3403
+ "gpt-5-mini": { input: 0.25, output: 2 },
3404
+ "gpt-4o": { input: 5, output: 15 },
3405
+ // Updated pricing as of late 2025
3400
3406
  "gpt-4o-mini": { input: 0.15, output: 0.6 },
3401
3407
  "gpt-4-turbo": { input: 10, output: 30 },
3402
3408
  "gpt-4": { input: 30, output: 60 },
3403
3409
  "gpt-3.5-turbo": { input: 0.5, output: 1.5 },
3404
- // Gemini
3410
+ // Google Gemini (December 2025)
3411
+ "gemini-3-pro-preview": { input: 2, output: 12 },
3412
+ // New Gemini 3
3413
+ "gemini-3-flash-preview": { input: 0.5, output: 3 },
3414
+ // For prompts ≤200K tokens, 2x for >200K
3415
+ "gemini-2.5-flash": { input: 0.15, output: 0.6 },
3416
+ // Standard mode (thinking disabled: $0.60, thinking enabled: $3.50)
3417
+ "gemini-2.5-flash-lite": { input: 0.1, output: 0.4 },
3418
+ "gemini-2.0-flash": { input: 0.1, output: 0.4 },
3419
+ "gemini-2.0-flash-lite": { input: 0.075, output: 0.3 },
3405
3420
  "gemini-1.5-pro": { input: 1.25, output: 5 },
3406
3421
  "gemini-1.5-flash": { input: 0.075, output: 0.3 },
3407
- "gemini-2.0-flash-exp": { input: 0.1, output: 0.4 },
3408
- // Groq (very cheap)
3422
+ // Groq (December 2025)
3409
3423
  "llama-3.3-70b-versatile": { input: 0.59, output: 0.79 },
3410
3424
  "llama-3.1-70b-versatile": { input: 0.59, output: 0.79 },
3411
3425
  "llama-3.1-8b-instant": { input: 0.05, output: 0.08 },
3412
- "mixtral-8x7b-32768": { input: 0.24, output: 0.24 }
3426
+ "llama-4-scout-17b-16e": { input: 0.11, output: 0.34 },
3427
+ "llama-4-maverick-17b-128e": { input: 0.2, output: 0.6 },
3428
+ "mixtral-8x7b-32768": { input: 0.27, output: 0.27 },
3429
+ "qwen3-32b": { input: 0.29, output: 0.59 }
3413
3430
  };
3414
3431
  var DEFAULT_PRICING = { input: 3, output: 15 };
3415
3432
  var LLMUsageLogger = class {
@@ -4578,6 +4595,7 @@ var LLM = class {
4578
4595
  }
4579
4596
  }
4580
4597
  static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
4598
+ const methodStartTime = Date.now();
4581
4599
  const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
4582
4600
  const genAI = new GoogleGenerativeAI(apiKey);
4583
4601
  const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
@@ -4606,11 +4624,18 @@ var LLM = class {
4606
4624
  let iterations = 0;
4607
4625
  let finalText = "";
4608
4626
  let currentUserMessage = messages.user;
4627
+ let totalToolCalls = 0;
4628
+ let totalInputTokens = 0;
4629
+ let totalOutputTokens = 0;
4609
4630
  while (iterations < maxIterations) {
4610
4631
  iterations++;
4632
+ const iterationStartTime = Date.now();
4633
+ const requestId = llmUsageLogger.generateRequestId();
4611
4634
  const result = await chat.sendMessageStream(currentUserMessage);
4612
4635
  let responseText = "";
4613
4636
  const functionCalls = [];
4637
+ let inputTokens = 0;
4638
+ let outputTokens = 0;
4614
4639
  for await (const chunk of result.stream) {
4615
4640
  const candidate = chunk.candidates?.[0];
4616
4641
  if (!candidate) continue;
@@ -4627,7 +4652,37 @@ var LLM = class {
4627
4652
  });
4628
4653
  }
4629
4654
  }
4655
+ if (chunk.usageMetadata) {
4656
+ inputTokens = chunk.usageMetadata.promptTokenCount || 0;
4657
+ outputTokens = chunk.usageMetadata.candidatesTokenCount || 0;
4658
+ }
4659
+ }
4660
+ const iterationDuration = Date.now() - iterationStartTime;
4661
+ const toolCallsInIteration = functionCalls.length;
4662
+ totalToolCalls += toolCallsInIteration;
4663
+ if (inputTokens === 0) {
4664
+ const userMsg = typeof currentUserMessage === "string" ? currentUserMessage : JSON.stringify(currentUserMessage);
4665
+ inputTokens = Math.ceil((systemPrompt.length + userMsg.length) / 4);
4666
+ }
4667
+ if (outputTokens === 0) {
4668
+ outputTokens = Math.ceil(responseText.length / 4);
4630
4669
  }
4670
+ totalInputTokens += inputTokens;
4671
+ totalOutputTokens += outputTokens;
4672
+ llmUsageLogger.log({
4673
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4674
+ requestId,
4675
+ provider: "gemini",
4676
+ model: modelName,
4677
+ method: `streamWithTools[iter=${iterations}]`,
4678
+ inputTokens,
4679
+ outputTokens,
4680
+ totalTokens: inputTokens + outputTokens,
4681
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens, outputTokens),
4682
+ durationMs: iterationDuration,
4683
+ toolCalls: toolCallsInIteration,
4684
+ success: true
4685
+ });
4631
4686
  if (functionCalls.length === 0) {
4632
4687
  finalText = responseText;
4633
4688
  break;
@@ -4660,6 +4715,23 @@ var LLM = class {
4660
4715
  }));
4661
4716
  currentUserMessage = functionResponseParts;
4662
4717
  }
4718
+ const totalDuration = Date.now() - methodStartTime;
4719
+ if (iterations > 1) {
4720
+ llmUsageLogger.log({
4721
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
4722
+ requestId: llmUsageLogger.generateRequestId(),
4723
+ provider: "gemini",
4724
+ model: modelName,
4725
+ method: `streamWithTools[TOTAL:${iterations}iters]`,
4726
+ inputTokens: totalInputTokens,
4727
+ outputTokens: totalOutputTokens,
4728
+ totalTokens: totalInputTokens + totalOutputTokens,
4729
+ costUSD: llmUsageLogger.calculateCost(modelName, totalInputTokens, totalOutputTokens),
4730
+ durationMs: totalDuration,
4731
+ toolCalls: totalToolCalls,
4732
+ success: true
4733
+ });
4734
+ }
4663
4735
  if (iterations >= maxIterations) {
4664
4736
  throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
4665
4737
  }
@@ -5336,6 +5408,38 @@ var BaseLLM = class {
5336
5408
  this.fastModel = config?.fastModel || this.getDefaultFastModel();
5337
5409
  this.defaultLimit = config?.defaultLimit || 50;
5338
5410
  this.apiKey = config?.apiKey;
5411
+ this.modelStrategy = config?.modelStrategy || "fast";
5412
+ }
5413
+ /**
5414
+ * Get the appropriate model based on task type and model strategy
5415
+ * @param taskType - 'complex' for text generation/matching, 'simple' for classification/actions
5416
+ * @returns The model string to use for this task
5417
+ */
5418
+ getModelForTask(taskType) {
5419
+ switch (this.modelStrategy) {
5420
+ case "best":
5421
+ return this.model;
5422
+ case "fast":
5423
+ return this.fastModel;
5424
+ case "balanced":
5425
+ default:
5426
+ return taskType === "complex" ? this.model : this.fastModel;
5427
+ }
5428
+ }
5429
+ /**
5430
+ * Set the model strategy at runtime
5431
+ * @param strategy - 'best', 'fast', or 'balanced'
5432
+ */
5433
+ setModelStrategy(strategy) {
5434
+ this.modelStrategy = strategy;
5435
+ logger.info(`[${this.getProviderName()}] Model strategy set to: ${strategy}`);
5436
+ }
5437
+ /**
5438
+ * Get the current model strategy
5439
+ * @returns The current model strategy
5440
+ */
5441
+ getModelStrategy() {
5442
+ return this.modelStrategy;
5339
5443
  }
5340
5444
  /**
5341
5445
  * Get the API key (from instance, parameter, or environment)
@@ -5520,7 +5624,7 @@ ${JSON.stringify(tool.requiredFields || [], null, 2)}`;
5520
5624
  user: prompts.user
5521
5625
  },
5522
5626
  {
5523
- model: this.model,
5627
+ model: this.getModelForTask("complex"),
5524
5628
  maxTokens: 8192,
5525
5629
  temperature: 0.2,
5526
5630
  apiKey: this.getApiKey(apiKey),
@@ -5643,7 +5747,7 @@ ${JSON.stringify(tool.requiredFields || [], null, 2)}`;
5643
5747
  user: prompts.user
5644
5748
  },
5645
5749
  {
5646
- model: this.fastModel,
5750
+ model: this.getModelForTask("simple"),
5647
5751
  maxTokens: 1500,
5648
5752
  temperature: 0.2,
5649
5753
  apiKey: this.getApiKey(apiKey)
@@ -5704,7 +5808,7 @@ ${JSON.stringify(tool.requiredFields || [], null, 2)}`;
5704
5808
  user: prompts.user
5705
5809
  },
5706
5810
  {
5707
- model: this.model,
5811
+ model: this.getModelForTask("complex"),
5708
5812
  maxTokens: 3e3,
5709
5813
  temperature: 0.2,
5710
5814
  apiKey: this.getApiKey(apiKey)
@@ -6194,7 +6298,7 @@ ${errorMsg}
6194
6298
  tools,
6195
6299
  toolHandler,
6196
6300
  {
6197
- model: this.model,
6301
+ model: this.getModelForTask("complex"),
6198
6302
  maxTokens: 4e3,
6199
6303
  temperature: 0.7,
6200
6304
  apiKey: this.getApiKey(apiKey),
@@ -6239,6 +6343,21 @@ ${errorMsg}
6239
6343
  if (category === "general") {
6240
6344
  logger.info(`[${this.getProviderName()}] Skipping component generation for general/conversational question`);
6241
6345
  logCollector?.info("Skipping component generation for general question");
6346
+ logger.info(`[${this.getProviderName()}] Generating actions for general question...`);
6347
+ const nextQuestions = await this.generateNextQuestions(
6348
+ userPrompt,
6349
+ null,
6350
+ // no component
6351
+ void 0,
6352
+ // no component data
6353
+ apiKey,
6354
+ logCollector,
6355
+ conversationHistory,
6356
+ textResponse
6357
+ // pass text response as context
6358
+ );
6359
+ actions = convertQuestionsToActions(nextQuestions);
6360
+ logger.info(`[${this.getProviderName()}] Generated ${actions.length} follow-up actions for general question`);
6242
6361
  } else if (components && components.length > 0) {
6243
6362
  logger.info(`[${this.getProviderName()}] Matching components from text response...`);
6244
6363
  const componentStreamCallback = wrappedStreamCallback && category !== "data_modification" ? (component) => {
@@ -6397,10 +6516,18 @@ ${errorMsg}
6397
6516
  logger.info(`[${this.getProviderName()}] \u2713 100% match - returning UI block directly without adaptation`);
6398
6517
  logCollector?.info(`\u2713 Exact match (${(conversationMatch.similarity * 100).toFixed(2)}%) - returning cached result`);
6399
6518
  logCollector?.info(`Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
6519
+ if (streamCallback && cachedTextResponse) {
6520
+ logger.info(`[${this.getProviderName()}] Streaming cached text response to frontend`);
6521
+ streamCallback(cachedTextResponse);
6522
+ }
6523
+ const cachedActions = conversationMatch.uiBlock?.actions || [];
6400
6524
  return {
6401
6525
  success: true,
6402
6526
  data: {
6527
+ text: cachedTextResponse,
6403
6528
  component,
6529
+ matchedComponents: component?.props?.config?.components || [],
6530
+ actions: cachedActions,
6404
6531
  reasoning: `Exact match from previous conversation (${(conversationMatch.similarity * 100).toFixed(2)}% similarity)`,
6405
6532
  method: `${this.getProviderName()}-semantic-match-exact`,
6406
6533
  semanticSimilarity: conversationMatch.similarity
@@ -6423,10 +6550,18 @@ ${errorMsg}
6423
6550
  logger.info(`[${this.getProviderName()}] Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
6424
6551
  logCollector?.info(`\u2713 UI block adapted successfully`);
6425
6552
  logCollector?.info(`Total time taken: ${elapsedTime2}ms (${(elapsedTime2 / 1e3).toFixed(2)}s)`);
6553
+ if (streamCallback && cachedTextResponse) {
6554
+ logger.info(`[${this.getProviderName()}] Streaming cached text response to frontend (adapted match)`);
6555
+ streamCallback(cachedTextResponse);
6556
+ }
6557
+ const cachedActions = conversationMatch.uiBlock?.actions || [];
6426
6558
  return {
6427
6559
  success: true,
6428
6560
  data: {
6561
+ text: cachedTextResponse,
6429
6562
  component: adaptResult.adaptedComponent,
6563
+ matchedComponents: adaptResult.adaptedComponent?.props?.config?.components || [],
6564
+ actions: cachedActions,
6430
6565
  reasoning: `Adapted from previous conversation: ${originalPrompt}`,
6431
6566
  method: `${this.getProviderName()}-semantic-match`,
6432
6567
  semanticSimilarity: conversationMatch.similarity,
@@ -6539,15 +6674,26 @@ ${errorMsg}
6539
6674
  /**
6540
6675
  * Generate next questions that the user might ask based on the original prompt and generated component
6541
6676
  * This helps provide intelligent suggestions for follow-up queries
6677
+ * For general/conversational questions without components, pass textResponse instead
6542
6678
  */
6543
- async generateNextQuestions(originalUserPrompt, component, componentData, apiKey, logCollector, conversationHistory) {
6679
+ async generateNextQuestions(originalUserPrompt, component, componentData, apiKey, logCollector, conversationHistory, textResponse) {
6544
6680
  try {
6545
- const component_info = `
6681
+ let component_info;
6682
+ if (component) {
6683
+ component_info = `
6546
6684
  Component Name: ${component.name}
6547
6685
  Component Type: ${component.type}
6548
6686
  Component Description: ${component.description || "No description"}
6549
6687
  Component Props: ${component.props ? JSON.stringify(component.props, null, 2) : "No props"}
6550
6688
  `;
6689
+ } else if (textResponse) {
6690
+ component_info = `
6691
+ Response Type: Text/Conversational Response
6692
+ Response Content: ${textResponse.substring(0, 1e3)}${textResponse.length > 1e3 ? "..." : ""}
6693
+ `;
6694
+ } else {
6695
+ component_info = "No component or response context available";
6696
+ }
6551
6697
  const component_data = componentData ? `Component Data: ${JSON.stringify(componentData, null, 2)}` : "";
6552
6698
  const prompts = await promptLoader.loadPrompts("actions", {
6553
6699
  ORIGINAL_USER_PROMPT: originalUserPrompt,
@@ -6561,7 +6707,7 @@ ${errorMsg}
6561
6707
  user: prompts.user
6562
6708
  },
6563
6709
  {
6564
- model: this.fastModel,
6710
+ model: this.getModelForTask("simple"),
6565
6711
  maxTokens: 1200,
6566
6712
  temperature: 0.7,
6567
6713
  apiKey: this.getApiKey(apiKey)
@@ -6640,10 +6786,10 @@ var GeminiLLM = class extends BaseLLM {
6640
6786
  super(config);
6641
6787
  }
6642
6788
  getDefaultModel() {
6643
- return "gemini/gemini-2.5-flash";
6789
+ return "gemini/gemini-3-pro-preview";
6644
6790
  }
6645
6791
  getDefaultFastModel() {
6646
- return "gemini/gemini-2.0-flash-exp";
6792
+ return "gemini/gemini-3-flash-preview";
6647
6793
  }
6648
6794
  getDefaultApiKey() {
6649
6795
  return process.env.GEMINI_API_KEY;
@@ -10023,7 +10169,13 @@ function sendResponse8(id, res, sendMessage, clientId) {
10023
10169
  // src/handlers/dash-comp-request.ts
10024
10170
  init_logger();
10025
10171
  init_prompt_loader();
10026
- async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, _collections, tools) {
10172
+ var DEFAULT_DASH_COMP_MODELS = {
10173
+ anthropic: "anthropic/claude-haiku-4-5-20251001",
10174
+ gemini: "gemini/gemini-3-flash-preview",
10175
+ openai: "openai/gpt-4o-mini",
10176
+ groq: "groq/llama-3.3-70b-versatile"
10177
+ };
10178
+ async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, _collections, tools, dashCompModels) {
10027
10179
  const errors = [];
10028
10180
  let availableComponentsText = "No components available";
10029
10181
  if (components && components.length > 0) {
@@ -10061,27 +10213,37 @@ async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApi
10061
10213
  logger.debug("[DASH_COMP_REQ] Loaded dash-comp-picker prompts with schema and tools");
10062
10214
  const providers = llmProviders || ["anthropic", "gemini", "openai", "groq"];
10063
10215
  let apiKey;
10064
- let model = "anthropic/claude-sonnet-4-5-20250929";
10065
- for (const provider of providers) {
10066
- if (provider === "anthropic" && anthropicApiKey) {
10067
- apiKey = anthropicApiKey;
10068
- model = "anthropic/claude-sonnet-4-5-20250929";
10069
- break;
10070
- } else if (provider === "openai" && openaiApiKey) {
10071
- apiKey = openaiApiKey;
10072
- model = "openai/gpt-4o-mini";
10073
- break;
10074
- } else if (provider === "gemini" && geminiApiKey) {
10075
- apiKey = geminiApiKey;
10076
- model = "google/gemini-2.0-flash-001";
10077
- break;
10078
- } else if (provider === "groq" && groqApiKey) {
10079
- apiKey = groqApiKey;
10080
- model = "groq/llama-3.3-70b-versatile";
10081
- break;
10216
+ let model;
10217
+ if (dashCompModels?.model) {
10218
+ model = dashCompModels.model;
10219
+ const modelProvider = model.split("/")[0];
10220
+ if (modelProvider === "anthropic") apiKey = anthropicApiKey;
10221
+ else if (modelProvider === "gemini") apiKey = geminiApiKey;
10222
+ else if (modelProvider === "openai") apiKey = openaiApiKey;
10223
+ else if (modelProvider === "groq") apiKey = groqApiKey;
10224
+ logger.info(`[DASH_COMP_REQ] Using configured model: ${model}`);
10225
+ } else {
10226
+ for (const provider of providers) {
10227
+ if (provider === "anthropic" && anthropicApiKey) {
10228
+ apiKey = anthropicApiKey;
10229
+ model = DEFAULT_DASH_COMP_MODELS.anthropic;
10230
+ break;
10231
+ } else if (provider === "gemini" && geminiApiKey) {
10232
+ apiKey = geminiApiKey;
10233
+ model = DEFAULT_DASH_COMP_MODELS.gemini;
10234
+ break;
10235
+ } else if (provider === "openai" && openaiApiKey) {
10236
+ apiKey = openaiApiKey;
10237
+ model = DEFAULT_DASH_COMP_MODELS.openai;
10238
+ break;
10239
+ } else if (provider === "groq" && groqApiKey) {
10240
+ apiKey = groqApiKey;
10241
+ model = DEFAULT_DASH_COMP_MODELS.groq;
10242
+ break;
10243
+ }
10082
10244
  }
10083
10245
  }
10084
- if (!apiKey) {
10246
+ if (!apiKey || !model) {
10085
10247
  errors.push("No API key available for any LLM provider");
10086
10248
  return { success: false, errors };
10087
10249
  }
@@ -10104,11 +10266,21 @@ async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApi
10104
10266
  logger.file("[DASH_COMP_REQ] LLM response:", JSON.stringify(result, null, 2));
10105
10267
  if (!result.componentId || !result.props) {
10106
10268
  errors.push("Invalid LLM response: missing componentId or props");
10269
+ userPromptErrorLogger.logError("DASH_COMP_REQ", "Invalid LLM response structure", {
10270
+ prompt,
10271
+ result,
10272
+ missingFields: { componentId: !result.componentId, props: !result.props }
10273
+ });
10107
10274
  return { success: false, errors };
10108
10275
  }
10109
10276
  const originalComponent = components.find((c) => c.id === result.componentId);
10110
10277
  if (!originalComponent) {
10111
10278
  errors.push(`Component ${result.componentId} not found in available components`);
10279
+ userPromptErrorLogger.logError("DASH_COMP_REQ", "Component not found", {
10280
+ prompt,
10281
+ componentId: result.componentId,
10282
+ availableComponentIds: components.map((c) => c.id)
10283
+ });
10112
10284
  return { success: false, errors };
10113
10285
  }
10114
10286
  const finalComponent = {
@@ -10137,11 +10309,16 @@ async function pickComponentWithLLM(prompt, components, anthropicApiKey, groqApi
10137
10309
  } catch (error) {
10138
10310
  const errorMsg = error instanceof Error ? error.message : String(error);
10139
10311
  logger.error(`[DASH_COMP_REQ] Error picking component: ${errorMsg}`);
10312
+ userPromptErrorLogger.logError("DASH_COMP_REQ", error instanceof Error ? error : new Error(errorMsg), {
10313
+ prompt,
10314
+ componentsCount: components.length,
10315
+ toolsCount: tools?.length || 0
10316
+ });
10140
10317
  errors.push(errorMsg);
10141
10318
  return { success: false, errors };
10142
10319
  }
10143
10320
  }
10144
- var processDashCompRequest = async (data, components, _sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, tools) => {
10321
+ var processDashCompRequest = async (data, components, _sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, tools, dashCompModels) => {
10145
10322
  const errors = [];
10146
10323
  logger.debug("[DASH_COMP_REQ] Parsing incoming message data");
10147
10324
  const parseResult = DashCompRequestMessageSchema.safeParse(data);
@@ -10156,6 +10333,8 @@ var processDashCompRequest = async (data, components, _sendMessage, anthropicApi
10156
10333
  const { id, payload } = dashCompRequest;
10157
10334
  const prompt = payload.prompt;
10158
10335
  const wsId = dashCompRequest.from.id || "unknown";
10336
+ const promptContext = `DASH_COMP: ${prompt?.substring(0, 50)}${(prompt?.length || 0) > 50 ? "..." : ""}`;
10337
+ llmUsageLogger.resetLogFile(promptContext);
10159
10338
  if (!prompt) {
10160
10339
  errors.push("Prompt is required");
10161
10340
  }
@@ -10182,8 +10361,10 @@ var processDashCompRequest = async (data, components, _sendMessage, anthropicApi
10182
10361
  openaiApiKey,
10183
10362
  llmProviders,
10184
10363
  collections,
10185
- tools
10364
+ tools,
10365
+ dashCompModels
10186
10366
  );
10367
+ llmUsageLogger.logSessionSummary(`DASH_COMP: ${prompt?.substring(0, 30)}`);
10187
10368
  return {
10188
10369
  success: llmResponse.success,
10189
10370
  data: llmResponse.data,
@@ -10192,7 +10373,7 @@ var processDashCompRequest = async (data, components, _sendMessage, anthropicApi
10192
10373
  wsId
10193
10374
  };
10194
10375
  };
10195
- async function handleDashCompRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, tools) {
10376
+ async function handleDashCompRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, tools, dashCompModels) {
10196
10377
  const response = await processDashCompRequest(
10197
10378
  data,
10198
10379
  components,
@@ -10203,7 +10384,8 @@ async function handleDashCompRequest(data, components, sendMessage, anthropicApi
10203
10384
  openaiApiKey,
10204
10385
  llmProviders,
10205
10386
  collections,
10206
- tools
10387
+ tools,
10388
+ dashCompModels
10207
10389
  );
10208
10390
  sendDashCompResponse(
10209
10391
  response.id || data.id,
@@ -11088,7 +11270,9 @@ var SuperatomSDK = class {
11088
11270
  this.openaiApiKey = config.OPENAI_API_KEY || process.env.OPENAI_API_KEY || "";
11089
11271
  this.llmProviders = config.LLM_PROVIDERS || getLLMProviders();
11090
11272
  this.databaseType = config.databaseType || "postgresql";
11091
- logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, database type: ${this.databaseType}`);
11273
+ this.modelStrategy = config.modelStrategy || "fast";
11274
+ this.applyModelStrategy(this.modelStrategy);
11275
+ logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, database type: ${this.databaseType}, model strategy: ${this.modelStrategy}`);
11092
11276
  this.userManager = new UserManager(this.projectId, 5e3);
11093
11277
  this.dashboardManager = new DashboardManager(this.projectId);
11094
11278
  this.reportManager = new ReportManager(this.projectId);
@@ -11467,6 +11651,31 @@ var SuperatomSDK = class {
11467
11651
  getTools() {
11468
11652
  return this.tools;
11469
11653
  }
11654
+ /**
11655
+ * Apply model strategy to all LLM provider singletons
11656
+ * @param strategy - 'best', 'fast', or 'balanced'
11657
+ */
11658
+ applyModelStrategy(strategy) {
11659
+ anthropicLLM.setModelStrategy(strategy);
11660
+ groqLLM.setModelStrategy(strategy);
11661
+ geminiLLM.setModelStrategy(strategy);
11662
+ openaiLLM.setModelStrategy(strategy);
11663
+ logger.info(`Model strategy '${strategy}' applied to all LLM providers`);
11664
+ }
11665
+ /**
11666
+ * Set model strategy at runtime
11667
+ * @param strategy - 'best', 'fast', or 'balanced'
11668
+ */
11669
+ setModelStrategy(strategy) {
11670
+ this.modelStrategy = strategy;
11671
+ this.applyModelStrategy(strategy);
11672
+ }
11673
+ /**
11674
+ * Get current model strategy
11675
+ */
11676
+ getModelStrategy() {
11677
+ return this.modelStrategy;
11678
+ }
11470
11679
  };
11471
11680
  export {
11472
11681
  BM25L,
@@ -11481,9 +11690,13 @@ export {
11481
11690
  UIBlock,
11482
11691
  UILogCollector,
11483
11692
  UserManager,
11693
+ anthropicLLM,
11694
+ geminiLLM,
11695
+ groqLLM,
11484
11696
  hybridRerank,
11485
11697
  llmUsageLogger,
11486
11698
  logger,
11699
+ openaiLLM,
11487
11700
  rerankChromaResults,
11488
11701
  rerankConversationResults,
11489
11702
  userPromptErrorLogger