@superatomai/sdk-node 0.0.17 → 0.0.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -429,6 +429,8 @@ var ComponentPropsSchema = z3.object({
429
429
  var ComponentSchema = z3.object({
430
430
  id: z3.string(),
431
431
  name: z3.string(),
432
+ displayName: z3.string().optional(),
433
+ isDisplayComp: z3.boolean().optional(),
432
434
  type: z3.string(),
433
435
  description: z3.string(),
434
436
  props: ComponentPropsSchema,
@@ -2905,6 +2907,8 @@ var promptLoader = new PromptLoader({
2905
2907
  // src/llm.ts
2906
2908
  import Anthropic from "@anthropic-ai/sdk";
2907
2909
  import Groq from "groq-sdk";
2910
+ import { GoogleGenerativeAI, SchemaType } from "@google/generative-ai";
2911
+ import OpenAI from "openai";
2908
2912
  import { jsonrepair } from "jsonrepair";
2909
2913
  var LLM = class {
2910
2914
  /* Get a complete text response from an LLM (Anthropic or Groq) */
@@ -2914,8 +2918,12 @@ var LLM = class {
2914
2918
  return this._anthropicText(messages, modelName, options);
2915
2919
  } else if (provider === "groq") {
2916
2920
  return this._groqText(messages, modelName, options);
2921
+ } else if (provider === "gemini") {
2922
+ return this._geminiText(messages, modelName, options);
2923
+ } else if (provider === "openai") {
2924
+ return this._openaiText(messages, modelName, options);
2917
2925
  } else {
2918
- throw new Error(`Unsupported provider: ${provider}. Use "anthropic" or "groq"`);
2926
+ throw new Error(`Unsupported provider: ${provider}. Use "anthropic", "groq", "gemini", or "openai"`);
2919
2927
  }
2920
2928
  }
2921
2929
  /* Stream response from an LLM (Anthropic or Groq) */
@@ -2925,17 +2933,26 @@ var LLM = class {
2925
2933
  return this._anthropicStream(messages, modelName, options, json);
2926
2934
  } else if (provider === "groq") {
2927
2935
  return this._groqStream(messages, modelName, options, json);
2936
+ } else if (provider === "gemini") {
2937
+ return this._geminiStream(messages, modelName, options, json);
2938
+ } else if (provider === "openai") {
2939
+ return this._openaiStream(messages, modelName, options, json);
2928
2940
  } else {
2929
- throw new Error(`Unsupported provider: ${provider}. Use "anthropic" or "groq"`);
2941
+ throw new Error(`Unsupported provider: ${provider}. Use "anthropic", "groq", "gemini", or "openai"`);
2930
2942
  }
2931
2943
  }
2932
- /* Stream response with tool calling support (Anthropic only for now) */
2944
+ /* Stream response with tool calling support (Anthropic and Gemini) */
2933
2945
  static async streamWithTools(messages, tools, toolHandler, options = {}, maxIterations = 3) {
2934
2946
  const [provider, modelName] = this._parseModel(options.model);
2935
- if (provider !== "anthropic") {
2936
- throw new Error(`Tool calling is only supported for Anthropic models`);
2947
+ if (provider === "anthropic") {
2948
+ return this._anthropicStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
2949
+ } else if (provider === "gemini") {
2950
+ return this._geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
2951
+ } else if (provider === "openai") {
2952
+ return this._openaiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
2953
+ } else {
2954
+ throw new Error(`Tool calling is only supported for Anthropic, Gemini, and OpenAI models`);
2937
2955
  }
2938
- return this._anthropicStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
2939
2956
  }
2940
2957
  // ============================================================
2941
2958
  // PRIVATE HELPER METHODS
@@ -3269,6 +3286,298 @@ var LLM = class {
3269
3286
  return fullText;
3270
3287
  }
3271
3288
  // ============================================================
3289
+ // GEMINI IMPLEMENTATION
3290
+ // ============================================================
3291
+ static async _geminiText(messages, modelName, options) {
3292
+ const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
3293
+ const genAI = new GoogleGenerativeAI(apiKey);
3294
+ const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3295
+ const model = genAI.getGenerativeModel({
3296
+ model: modelName,
3297
+ systemInstruction: systemPrompt,
3298
+ generationConfig: {
3299
+ maxOutputTokens: options.maxTokens || 1e3,
3300
+ temperature: options.temperature,
3301
+ topP: options.topP
3302
+ }
3303
+ });
3304
+ const result = await model.generateContent(messages.user);
3305
+ const response = await result.response;
3306
+ return response.text();
3307
+ }
3308
+ static async _geminiStream(messages, modelName, options, json) {
3309
+ const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
3310
+ const genAI = new GoogleGenerativeAI(apiKey);
3311
+ const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3312
+ const model = genAI.getGenerativeModel({
3313
+ model: modelName,
3314
+ systemInstruction: systemPrompt,
3315
+ generationConfig: {
3316
+ maxOutputTokens: options.maxTokens || 1e3,
3317
+ temperature: options.temperature,
3318
+ topP: options.topP,
3319
+ responseMimeType: json ? "application/json" : void 0
3320
+ }
3321
+ });
3322
+ const result = await model.generateContentStream(messages.user);
3323
+ let fullText = "";
3324
+ for await (const chunk of result.stream) {
3325
+ const text = chunk.text();
3326
+ if (text) {
3327
+ fullText += text;
3328
+ if (options.partial) {
3329
+ options.partial(text);
3330
+ }
3331
+ }
3332
+ }
3333
+ if (json) {
3334
+ return this._parseJSON(fullText);
3335
+ }
3336
+ return fullText;
3337
+ }
3338
+ static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
3339
+ const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
3340
+ const genAI = new GoogleGenerativeAI(apiKey);
3341
+ const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3342
+ const functionDeclarations = tools.map((tool) => ({
3343
+ name: tool.name,
3344
+ description: tool.description,
3345
+ parameters: {
3346
+ type: SchemaType.OBJECT,
3347
+ properties: tool.input_schema.properties,
3348
+ required: tool.input_schema.required || []
3349
+ }
3350
+ }));
3351
+ const model = genAI.getGenerativeModel({
3352
+ model: modelName,
3353
+ systemInstruction: systemPrompt,
3354
+ tools: [{ functionDeclarations }],
3355
+ generationConfig: {
3356
+ maxOutputTokens: options.maxTokens || 4e3,
3357
+ temperature: options.temperature,
3358
+ topP: options.topP
3359
+ }
3360
+ });
3361
+ const chat = model.startChat({
3362
+ history: []
3363
+ });
3364
+ let iterations = 0;
3365
+ let finalText = "";
3366
+ let currentUserMessage = messages.user;
3367
+ while (iterations < maxIterations) {
3368
+ iterations++;
3369
+ const result = await chat.sendMessageStream(currentUserMessage);
3370
+ let responseText = "";
3371
+ const functionCalls = [];
3372
+ for await (const chunk of result.stream) {
3373
+ const candidate = chunk.candidates?.[0];
3374
+ if (!candidate) continue;
3375
+ for (const part of candidate.content?.parts || []) {
3376
+ if (part.text) {
3377
+ responseText += part.text;
3378
+ if (options.partial) {
3379
+ options.partial(part.text);
3380
+ }
3381
+ } else if (part.functionCall) {
3382
+ functionCalls.push({
3383
+ name: part.functionCall.name,
3384
+ args: part.functionCall.args
3385
+ });
3386
+ }
3387
+ }
3388
+ }
3389
+ if (functionCalls.length === 0) {
3390
+ finalText = responseText;
3391
+ break;
3392
+ }
3393
+ const functionResponses = [];
3394
+ for (const fc of functionCalls) {
3395
+ try {
3396
+ const result2 = await toolHandler(fc.name, fc.args);
3397
+ functionResponses.push({
3398
+ name: fc.name,
3399
+ response: { result: typeof result2 === "string" ? result2 : JSON.stringify(result2) }
3400
+ });
3401
+ } catch (error) {
3402
+ functionResponses.push({
3403
+ name: fc.name,
3404
+ response: { error: error instanceof Error ? error.message : String(error) }
3405
+ });
3406
+ }
3407
+ }
3408
+ const functionResponseParts = functionResponses.map((fr) => ({
3409
+ functionResponse: {
3410
+ name: fr.name,
3411
+ response: fr.response
3412
+ }
3413
+ }));
3414
+ currentUserMessage = functionResponseParts;
3415
+ }
3416
+ if (iterations >= maxIterations) {
3417
+ throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
3418
+ }
3419
+ return finalText;
3420
+ }
3421
+ // ============================================================
3422
+ // OPENAI IMPLEMENTATION
3423
+ // ============================================================
3424
+ static async _openaiText(messages, modelName, options) {
3425
+ const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
3426
+ const openai = new OpenAI({ apiKey });
3427
+ const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3428
+ const response = await openai.chat.completions.create({
3429
+ model: modelName,
3430
+ messages: [
3431
+ { role: "system", content: systemPrompt },
3432
+ { role: "user", content: messages.user }
3433
+ ],
3434
+ max_tokens: options.maxTokens || 1e3,
3435
+ temperature: options.temperature,
3436
+ top_p: options.topP
3437
+ });
3438
+ return response.choices[0]?.message?.content || "";
3439
+ }
3440
+ static async _openaiStream(messages, modelName, options, json) {
3441
+ const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
3442
+ const openai = new OpenAI({ apiKey });
3443
+ const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3444
+ const stream = await openai.chat.completions.create({
3445
+ model: modelName,
3446
+ messages: [
3447
+ { role: "system", content: systemPrompt },
3448
+ { role: "user", content: messages.user }
3449
+ ],
3450
+ max_tokens: options.maxTokens || 1e3,
3451
+ temperature: options.temperature,
3452
+ top_p: options.topP,
3453
+ response_format: json ? { type: "json_object" } : void 0,
3454
+ stream: true
3455
+ });
3456
+ let fullText = "";
3457
+ for await (const chunk of stream) {
3458
+ const content = chunk.choices[0]?.delta?.content || "";
3459
+ if (content) {
3460
+ fullText += content;
3461
+ if (options.partial) {
3462
+ options.partial(content);
3463
+ }
3464
+ }
3465
+ }
3466
+ if (json) {
3467
+ return this._parseJSON(fullText);
3468
+ }
3469
+ return fullText;
3470
+ }
3471
+ static async _openaiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
3472
+ const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
3473
+ const openai = new OpenAI({ apiKey });
3474
+ const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3475
+ const openaiTools = tools.map((tool) => ({
3476
+ type: "function",
3477
+ function: {
3478
+ name: tool.name,
3479
+ description: tool.description,
3480
+ parameters: {
3481
+ type: tool.input_schema.type,
3482
+ properties: tool.input_schema.properties,
3483
+ required: tool.input_schema.required || []
3484
+ }
3485
+ }
3486
+ }));
3487
+ const conversationMessages = [
3488
+ { role: "system", content: systemPrompt },
3489
+ { role: "user", content: messages.user }
3490
+ ];
3491
+ let iterations = 0;
3492
+ let finalText = "";
3493
+ while (iterations < maxIterations) {
3494
+ iterations++;
3495
+ const stream = await openai.chat.completions.create({
3496
+ model: modelName,
3497
+ messages: conversationMessages,
3498
+ max_tokens: options.maxTokens || 4e3,
3499
+ temperature: options.temperature,
3500
+ top_p: options.topP,
3501
+ tools: openaiTools,
3502
+ stream: true
3503
+ });
3504
+ let responseText = "";
3505
+ const toolCalls = [];
3506
+ const toolCallsInProgress = /* @__PURE__ */ new Map();
3507
+ for await (const chunk of stream) {
3508
+ const delta = chunk.choices[0]?.delta;
3509
+ if (delta?.content) {
3510
+ responseText += delta.content;
3511
+ if (options.partial) {
3512
+ options.partial(delta.content);
3513
+ }
3514
+ }
3515
+ if (delta?.tool_calls) {
3516
+ for (const toolCallDelta of delta.tool_calls) {
3517
+ const index = toolCallDelta.index;
3518
+ if (!toolCallsInProgress.has(index)) {
3519
+ toolCallsInProgress.set(index, {
3520
+ id: toolCallDelta.id || "",
3521
+ name: toolCallDelta.function?.name || "",
3522
+ arguments: ""
3523
+ });
3524
+ }
3525
+ const tc = toolCallsInProgress.get(index);
3526
+ if (toolCallDelta.id) {
3527
+ tc.id = toolCallDelta.id;
3528
+ }
3529
+ if (toolCallDelta.function?.name) {
3530
+ tc.name = toolCallDelta.function.name;
3531
+ }
3532
+ if (toolCallDelta.function?.arguments) {
3533
+ tc.arguments += toolCallDelta.function.arguments;
3534
+ }
3535
+ }
3536
+ }
3537
+ }
3538
+ for (const tc of toolCallsInProgress.values()) {
3539
+ if (tc.id && tc.name) {
3540
+ toolCalls.push(tc);
3541
+ }
3542
+ }
3543
+ if (toolCalls.length === 0) {
3544
+ finalText = responseText;
3545
+ break;
3546
+ }
3547
+ conversationMessages.push({
3548
+ role: "assistant",
3549
+ content: responseText || null,
3550
+ tool_calls: toolCalls.map((tc) => ({
3551
+ id: tc.id,
3552
+ type: "function",
3553
+ function: {
3554
+ name: tc.name,
3555
+ arguments: tc.arguments
3556
+ }
3557
+ }))
3558
+ });
3559
+ for (const tc of toolCalls) {
3560
+ let result;
3561
+ try {
3562
+ const args = JSON.parse(tc.arguments);
3563
+ const toolResult = await toolHandler(tc.name, args);
3564
+ result = typeof toolResult === "string" ? toolResult : JSON.stringify(toolResult);
3565
+ } catch (error) {
3566
+ result = JSON.stringify({ error: error instanceof Error ? error.message : String(error) });
3567
+ }
3568
+ conversationMessages.push({
3569
+ role: "tool",
3570
+ tool_call_id: tc.id,
3571
+ content: result
3572
+ });
3573
+ }
3574
+ }
3575
+ if (iterations >= maxIterations) {
3576
+ throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
3577
+ }
3578
+ return finalText;
3579
+ }
3580
+ // ============================================================
3272
3581
  // JSON PARSING HELPER
3273
3582
  // ============================================================
3274
3583
  /**
@@ -4557,18 +4866,56 @@ var AnthropicLLM = class extends BaseLLM {
4557
4866
  };
4558
4867
  var anthropicLLM = new AnthropicLLM();
4559
4868
 
4560
- // src/userResponse/index.ts
4869
+ // src/userResponse/gemini.ts
4561
4870
  import dotenv3 from "dotenv";
4562
4871
  dotenv3.config();
4872
+ var GeminiLLM = class extends BaseLLM {
4873
+ constructor(config) {
4874
+ super(config);
4875
+ }
4876
+ getDefaultModel() {
4877
+ return "gemini/gemini-2.5-flash";
4878
+ }
4879
+ getDefaultApiKey() {
4880
+ return process.env.GEMINI_API_KEY;
4881
+ }
4882
+ getProviderName() {
4883
+ return "Gemini";
4884
+ }
4885
+ };
4886
+ var geminiLLM = new GeminiLLM();
4887
+
4888
+ // src/userResponse/openai.ts
4889
+ import dotenv4 from "dotenv";
4890
+ dotenv4.config();
4891
+ var OpenAILLM = class extends BaseLLM {
4892
+ constructor(config) {
4893
+ super(config);
4894
+ }
4895
+ getDefaultModel() {
4896
+ return "openai/gpt-4.1";
4897
+ }
4898
+ getDefaultApiKey() {
4899
+ return process.env.OPENAI_API_KEY;
4900
+ }
4901
+ getProviderName() {
4902
+ return "OpenAI";
4903
+ }
4904
+ };
4905
+ var openaiLLM = new OpenAILLM();
4906
+
4907
+ // src/userResponse/index.ts
4908
+ import dotenv5 from "dotenv";
4909
+ dotenv5.config();
4563
4910
  function getLLMProviders() {
4564
4911
  const envProviders = process.env.LLM_PROVIDERS;
4565
- const DEFAULT_PROVIDERS = ["anthropic", "groq"];
4912
+ const DEFAULT_PROVIDERS = ["anthropic", "gemini", "openai", "groq"];
4566
4913
  if (!envProviders) {
4567
4914
  return DEFAULT_PROVIDERS;
4568
4915
  }
4569
4916
  try {
4570
4917
  const providers = JSON.parse(envProviders);
4571
- const validProviders = providers.filter((p) => p === "anthropic" || p === "groq");
4918
+ const validProviders = providers.filter((p) => p === "anthropic" || p === "groq" || p === "gemini" || p === "openai");
4572
4919
  if (validProviders.length === 0) {
4573
4920
  return DEFAULT_PROVIDERS;
4574
4921
  }
@@ -4611,10 +4958,44 @@ var useGroqMethod = async (prompt, components, apiKey, logCollector, conversatio
4611
4958
  logger.info(`[useGroqMethod] Successfully generated ${responseMode} using Groq`);
4612
4959
  return matchResult;
4613
4960
  };
4961
+ var useGeminiMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
4962
+ logger.debug("[useGeminiMethod] Initializing Gemini LLM matching method");
4963
+ logger.debug(`[useGeminiMethod] Response mode: ${responseMode}`);
4964
+ const msg = `Using Gemini LLM ${responseMode === "text" ? "text response" : "matching"} method...`;
4965
+ logger.info(msg);
4966
+ logCollector?.info(msg);
4967
+ if (responseMode === "component" && components.length === 0) {
4968
+ const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
4969
+ logger.error("[useGeminiMethod] No components available");
4970
+ logCollector?.error(emptyMsg);
4971
+ return { success: false, errors: [emptyMsg] };
4972
+ }
4973
+ logger.debug(`[useGeminiMethod] Processing with ${components.length} components`);
4974
+ const matchResult = await geminiLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
4975
+ logger.info(`[useGeminiMethod] Successfully generated ${responseMode} using Gemini`);
4976
+ return matchResult;
4977
+ };
4978
+ var useOpenAIMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
4979
+ logger.debug("[useOpenAIMethod] Initializing OpenAI GPT matching method");
4980
+ logger.debug(`[useOpenAIMethod] Response mode: ${responseMode}`);
4981
+ const msg = `Using OpenAI GPT ${responseMode === "text" ? "text response" : "matching"} method...`;
4982
+ logger.info(msg);
4983
+ logCollector?.info(msg);
4984
+ if (responseMode === "component" && components.length === 0) {
4985
+ const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
4986
+ logger.error("[useOpenAIMethod] No components available");
4987
+ logCollector?.error(emptyMsg);
4988
+ return { success: false, errors: [emptyMsg] };
4989
+ }
4990
+ logger.debug(`[useOpenAIMethod] Processing with ${components.length} components`);
4991
+ const matchResult = await openaiLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
4992
+ logger.info(`[useOpenAIMethod] Successfully generated ${responseMode} using OpenAI`);
4993
+ return matchResult;
4994
+ };
4614
4995
  var getUserResponseFromCache = async (prompt) => {
4615
4996
  return false;
4616
4997
  };
4617
- var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey, llmProviders, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
4998
+ var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
4618
4999
  logger.debug(`[get_user_response] Starting user response generation for prompt: "${prompt.substring(0, 50)}..."`);
4619
5000
  logger.debug(`[get_user_response] Response mode: ${responseMode}`);
4620
5001
  logger.debug("[get_user_response] Checking cache for existing response");
@@ -4650,6 +5031,10 @@ var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey,
4650
5031
  result = await useAnthropicMethod(prompt, components, anthropicApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
4651
5032
  } else if (provider === "groq") {
4652
5033
  result = await useGroqMethod(prompt, components, groqApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
5034
+ } else if (provider === "gemini") {
5035
+ result = await useGeminiMethod(prompt, components, geminiApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
5036
+ } else if (provider === "openai") {
5037
+ result = await useOpenAIMethod(prompt, components, openaiApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
4653
5038
  } else {
4654
5039
  logger.warn(`[get_user_response] Unknown provider: ${provider} - skipping`);
4655
5040
  errors.push(`Unknown provider: ${provider}`);
@@ -4994,7 +5379,7 @@ var CONTEXT_CONFIG = {
4994
5379
  };
4995
5380
 
4996
5381
  // src/handlers/user-prompt-request.ts
4997
- var get_user_request = async (data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders, collections, externalTools, userId) => {
5382
+ var get_user_request = async (data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools, userId) => {
4998
5383
  const errors = [];
4999
5384
  logger.debug("[USER_PROMPT_REQ] Parsing incoming message data");
5000
5385
  const parseResult = UserPromptRequestMessageSchema.safeParse(data);
@@ -5069,6 +5454,8 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
5069
5454
  components,
5070
5455
  anthropicApiKey,
5071
5456
  groqApiKey,
5457
+ geminiApiKey,
5458
+ openaiApiKey,
5072
5459
  llmProviders,
5073
5460
  logCollector,
5074
5461
  conversationHistory,
@@ -5167,8 +5554,8 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
5167
5554
  wsId
5168
5555
  };
5169
5556
  };
5170
- async function handleUserPromptRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders, collections, externalTools, userId) {
5171
- const response = await get_user_request(data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders, collections, externalTools, userId);
5557
+ async function handleUserPromptRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools, userId) {
5558
+ const response = await get_user_request(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools, userId);
5172
5559
  sendDataResponse4(
5173
5560
  response.id || data.id,
5174
5561
  {
@@ -5212,19 +5599,20 @@ async function handleUserPromptSuggestions(data, components, sendMessage) {
5212
5599
  }, sendMessage, wsId);
5213
5600
  return;
5214
5601
  }
5215
- if (!components || components.length === 0) {
5602
+ const displayComponents = components.filter((c) => c.isDisplayComp === true);
5603
+ if (!displayComponents || displayComponents.length === 0) {
5216
5604
  sendResponse(id, {
5217
5605
  success: true,
5218
5606
  data: {
5219
5607
  prompt,
5220
5608
  suggestions: [],
5221
5609
  count: 0,
5222
- message: "No components available"
5610
+ message: "No display components available"
5223
5611
  }
5224
5612
  }, sendMessage, wsId);
5225
5613
  return;
5226
5614
  }
5227
- const suggestions = searchComponents(prompt, components, limit);
5615
+ const suggestions = searchComponents(prompt, displayComponents, limit);
5228
5616
  sendResponse(id, {
5229
5617
  success: true,
5230
5618
  data: {
@@ -5248,6 +5636,7 @@ function searchComponents(prompt, components, limit) {
5248
5636
  const scoredComponents = components.map((component) => {
5249
5637
  let score = 0;
5250
5638
  const componentName = component.name.toLowerCase();
5639
+ const componentDisplayName = (component.displayName || "").toLowerCase();
5251
5640
  const componentDesc = component.description.toLowerCase();
5252
5641
  const componentKeywords = (component.keywords || []).map((k) => k.toLowerCase());
5253
5642
  const componentCategory = (component.category || "").toLowerCase();
@@ -5257,6 +5646,9 @@ function searchComponents(prompt, components, limit) {
5257
5646
  } else if (componentName.includes(token)) {
5258
5647
  score += 5;
5259
5648
  }
5649
+ if (componentDisplayName.includes(token)) {
5650
+ score += 6;
5651
+ }
5260
5652
  if (componentKeywords.includes(token)) {
5261
5653
  score += 8;
5262
5654
  } else if (componentKeywords.some((k) => k.includes(token))) {
@@ -5290,13 +5682,13 @@ function sendResponse(id, res, sendMessage, clientId) {
5290
5682
  }
5291
5683
 
5292
5684
  // src/userResponse/next-questions.ts
5293
- async function generateNextQuestions(originalUserPrompt, component, componentData, anthropicApiKey, groqApiKey, llmProviders, logCollector, conversationHistory) {
5685
+ async function generateNextQuestions(originalUserPrompt, component, componentData, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, logCollector, conversationHistory) {
5294
5686
  try {
5295
5687
  logger.debug("[generateNextQuestions] Starting next questions generation");
5296
5688
  logger.debug(`[generateNextQuestions] User prompt: "${originalUserPrompt?.substring(0, 50)}..."`);
5297
5689
  logger.debug(`[generateNextQuestions] Component: ${component?.name || "unknown"} (${component?.type || "unknown"})`);
5298
5690
  logger.debug(`[generateNextQuestions] Component data available: ${componentData ? "yes" : "no"}`);
5299
- const providers = llmProviders || ["anthropic"];
5691
+ const providers = llmProviders || ["anthropic", "gemini", "openai", "groq"];
5300
5692
  logger.info(`[generateNextQuestions] Using LLM providers: [${providers.join(", ")}]`);
5301
5693
  if (conversationHistory && conversationHistory.length > 0) {
5302
5694
  const exchangeCount = conversationHistory.split("\n").filter((l) => l.startsWith("Q")).length;
@@ -5321,6 +5713,26 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
5321
5713
  logCollector,
5322
5714
  conversationHistory
5323
5715
  );
5716
+ } else if (provider === "gemini") {
5717
+ logger.debug("[generateNextQuestions] Using Gemini LLM for next questions");
5718
+ result = await geminiLLM.generateNextQuestions(
5719
+ originalUserPrompt,
5720
+ component,
5721
+ componentData,
5722
+ geminiApiKey,
5723
+ logCollector,
5724
+ conversationHistory
5725
+ );
5726
+ } else if (provider === "openai") {
5727
+ logger.debug("[generateNextQuestions] Using OpenAI LLM for next questions");
5728
+ result = await openaiLLM.generateNextQuestions(
5729
+ originalUserPrompt,
5730
+ component,
5731
+ componentData,
5732
+ openaiApiKey,
5733
+ logCollector,
5734
+ conversationHistory
5735
+ );
5324
5736
  } else {
5325
5737
  logger.debug("[generateNextQuestions] Using Anthropic LLM for next questions");
5326
5738
  result = await anthropicLLM.generateNextQuestions(
@@ -5371,7 +5783,7 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
5371
5783
  }
5372
5784
 
5373
5785
  // src/handlers/actions-request.ts
5374
- async function handleActionsRequest(data, sendMessage, anthropicApiKey, groqApiKey, llmProviders) {
5786
+ async function handleActionsRequest(data, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders) {
5375
5787
  try {
5376
5788
  logger.debug("[ACTIONS_REQ] Parsing incoming actions request");
5377
5789
  const actionsRequest = ActionsRequestMessageSchema.parse(data);
@@ -5443,6 +5855,8 @@ ${conversationHistory.substring(0, 200)}...`);
5443
5855
  componentData,
5444
5856
  anthropicApiKey,
5445
5857
  groqApiKey,
5858
+ geminiApiKey,
5859
+ openaiApiKey,
5446
5860
  llmProviders,
5447
5861
  logCollector,
5448
5862
  conversationHistory
@@ -7317,7 +7731,10 @@ var SuperatomSDK = class {
7317
7731
  this.url = config.url || process.env.SA_WEBSOCKET_URL || DEFAULT_WS_URL;
7318
7732
  this.anthropicApiKey = config.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY || "";
7319
7733
  this.groqApiKey = config.GROQ_API_KEY || process.env.GROQ_API_KEY || "";
7734
+ this.geminiApiKey = config.GEMINI_API_KEY || process.env.GEMINI_API_KEY || "";
7735
+ this.openaiApiKey = config.OPENAI_API_KEY || process.env.OPENAI_API_KEY || "";
7320
7736
  this.llmProviders = config.LLM_PROVIDERS || getLLMProviders();
7737
+ logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, config llm providers: ${config.LLM_PROVIDERS}`);
7321
7738
  this.userManager = new UserManager(this.projectId, 5e3);
7322
7739
  this.dashboardManager = new DashboardManager(this.projectId);
7323
7740
  this.reportManager = new ReportManager(this.projectId);
@@ -7402,7 +7819,9 @@ var SuperatomSDK = class {
7402
7819
  return new Promise((resolve, reject) => {
7403
7820
  try {
7404
7821
  const url = new URL(this.url);
7405
- url.searchParams.set("apiKey", this.apiKey);
7822
+ if (this.apiKey) {
7823
+ url.searchParams.set("apiKey", this.apiKey);
7824
+ }
7406
7825
  url.searchParams.set("projectId", this.projectId);
7407
7826
  url.searchParams.set("userId", this.userId);
7408
7827
  url.searchParams.set("type", this.type);
@@ -7461,12 +7880,12 @@ var SuperatomSDK = class {
7461
7880
  });
7462
7881
  break;
7463
7882
  case "USER_PROMPT_REQ":
7464
- handleUserPromptRequest(parsed, this.components, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.llmProviders, this.collections, this.tools, this.userId).catch((error) => {
7883
+ handleUserPromptRequest(parsed, this.components, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.geminiApiKey, this.openaiApiKey, this.llmProviders, this.collections, this.tools, this.userId).catch((error) => {
7465
7884
  logger.error("Failed to handle user prompt request:", error);
7466
7885
  });
7467
7886
  break;
7468
7887
  case "ACTIONS":
7469
- handleActionsRequest(parsed, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.llmProviders).catch((error) => {
7888
+ handleActionsRequest(parsed, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.geminiApiKey, this.openaiApiKey, this.llmProviders).catch((error) => {
7470
7889
  logger.error("Failed to handle actions request:", error);
7471
7890
  });
7472
7891
  break;