@superatomai/sdk-node 0.0.17 → 0.0.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -469,6 +469,8 @@ var ComponentPropsSchema = import_zod3.z.object({
469
469
  var ComponentSchema = import_zod3.z.object({
470
470
  id: import_zod3.z.string(),
471
471
  name: import_zod3.z.string(),
472
+ displayName: import_zod3.z.string().optional(),
473
+ isDisplayComp: import_zod3.z.boolean().optional(),
472
474
  type: import_zod3.z.string(),
473
475
  description: import_zod3.z.string(),
474
476
  props: ComponentPropsSchema,
@@ -2945,6 +2947,8 @@ var promptLoader = new PromptLoader({
2945
2947
  // src/llm.ts
2946
2948
  var import_sdk = __toESM(require("@anthropic-ai/sdk"));
2947
2949
  var import_groq_sdk = __toESM(require("groq-sdk"));
2950
+ var import_generative_ai = require("@google/generative-ai");
2951
+ var import_openai = __toESM(require("openai"));
2948
2952
  var import_jsonrepair = require("jsonrepair");
2949
2953
  var LLM = class {
2950
2954
  /* Get a complete text response from an LLM (Anthropic or Groq) */
@@ -2954,8 +2958,12 @@ var LLM = class {
2954
2958
  return this._anthropicText(messages, modelName, options);
2955
2959
  } else if (provider === "groq") {
2956
2960
  return this._groqText(messages, modelName, options);
2961
+ } else if (provider === "gemini") {
2962
+ return this._geminiText(messages, modelName, options);
2963
+ } else if (provider === "openai") {
2964
+ return this._openaiText(messages, modelName, options);
2957
2965
  } else {
2958
- throw new Error(`Unsupported provider: ${provider}. Use "anthropic" or "groq"`);
2966
+ throw new Error(`Unsupported provider: ${provider}. Use "anthropic", "groq", "gemini", or "openai"`);
2959
2967
  }
2960
2968
  }
2961
2969
  /* Stream response from an LLM (Anthropic or Groq) */
@@ -2965,17 +2973,26 @@ var LLM = class {
2965
2973
  return this._anthropicStream(messages, modelName, options, json);
2966
2974
  } else if (provider === "groq") {
2967
2975
  return this._groqStream(messages, modelName, options, json);
2976
+ } else if (provider === "gemini") {
2977
+ return this._geminiStream(messages, modelName, options, json);
2978
+ } else if (provider === "openai") {
2979
+ return this._openaiStream(messages, modelName, options, json);
2968
2980
  } else {
2969
- throw new Error(`Unsupported provider: ${provider}. Use "anthropic" or "groq"`);
2981
+ throw new Error(`Unsupported provider: ${provider}. Use "anthropic", "groq", "gemini", or "openai"`);
2970
2982
  }
2971
2983
  }
2972
- /* Stream response with tool calling support (Anthropic only for now) */
2984
+ /* Stream response with tool calling support (Anthropic and Gemini) */
2973
2985
  static async streamWithTools(messages, tools, toolHandler, options = {}, maxIterations = 3) {
2974
2986
  const [provider, modelName] = this._parseModel(options.model);
2975
- if (provider !== "anthropic") {
2976
- throw new Error(`Tool calling is only supported for Anthropic models`);
2987
+ if (provider === "anthropic") {
2988
+ return this._anthropicStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
2989
+ } else if (provider === "gemini") {
2990
+ return this._geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
2991
+ } else if (provider === "openai") {
2992
+ return this._openaiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
2993
+ } else {
2994
+ throw new Error(`Tool calling is only supported for Anthropic, Gemini, and OpenAI models`);
2977
2995
  }
2978
- return this._anthropicStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
2979
2996
  }
2980
2997
  // ============================================================
2981
2998
  // PRIVATE HELPER METHODS
@@ -3309,6 +3326,298 @@ var LLM = class {
3309
3326
  return fullText;
3310
3327
  }
3311
3328
  // ============================================================
3329
+ // GEMINI IMPLEMENTATION
3330
+ // ============================================================
3331
+ static async _geminiText(messages, modelName, options) {
3332
+ const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
3333
+ const genAI = new import_generative_ai.GoogleGenerativeAI(apiKey);
3334
+ const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3335
+ const model = genAI.getGenerativeModel({
3336
+ model: modelName,
3337
+ systemInstruction: systemPrompt,
3338
+ generationConfig: {
3339
+ maxOutputTokens: options.maxTokens || 1e3,
3340
+ temperature: options.temperature,
3341
+ topP: options.topP
3342
+ }
3343
+ });
3344
+ const result = await model.generateContent(messages.user);
3345
+ const response = await result.response;
3346
+ return response.text();
3347
+ }
3348
+ static async _geminiStream(messages, modelName, options, json) {
3349
+ const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
3350
+ const genAI = new import_generative_ai.GoogleGenerativeAI(apiKey);
3351
+ const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3352
+ const model = genAI.getGenerativeModel({
3353
+ model: modelName,
3354
+ systemInstruction: systemPrompt,
3355
+ generationConfig: {
3356
+ maxOutputTokens: options.maxTokens || 1e3,
3357
+ temperature: options.temperature,
3358
+ topP: options.topP,
3359
+ responseMimeType: json ? "application/json" : void 0
3360
+ }
3361
+ });
3362
+ const result = await model.generateContentStream(messages.user);
3363
+ let fullText = "";
3364
+ for await (const chunk of result.stream) {
3365
+ const text = chunk.text();
3366
+ if (text) {
3367
+ fullText += text;
3368
+ if (options.partial) {
3369
+ options.partial(text);
3370
+ }
3371
+ }
3372
+ }
3373
+ if (json) {
3374
+ return this._parseJSON(fullText);
3375
+ }
3376
+ return fullText;
3377
+ }
3378
+ static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
3379
+ const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
3380
+ const genAI = new import_generative_ai.GoogleGenerativeAI(apiKey);
3381
+ const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3382
+ const functionDeclarations = tools.map((tool) => ({
3383
+ name: tool.name,
3384
+ description: tool.description,
3385
+ parameters: {
3386
+ type: import_generative_ai.SchemaType.OBJECT,
3387
+ properties: tool.input_schema.properties,
3388
+ required: tool.input_schema.required || []
3389
+ }
3390
+ }));
3391
+ const model = genAI.getGenerativeModel({
3392
+ model: modelName,
3393
+ systemInstruction: systemPrompt,
3394
+ tools: [{ functionDeclarations }],
3395
+ generationConfig: {
3396
+ maxOutputTokens: options.maxTokens || 4e3,
3397
+ temperature: options.temperature,
3398
+ topP: options.topP
3399
+ }
3400
+ });
3401
+ const chat = model.startChat({
3402
+ history: []
3403
+ });
3404
+ let iterations = 0;
3405
+ let finalText = "";
3406
+ let currentUserMessage = messages.user;
3407
+ while (iterations < maxIterations) {
3408
+ iterations++;
3409
+ const result = await chat.sendMessageStream(currentUserMessage);
3410
+ let responseText = "";
3411
+ const functionCalls = [];
3412
+ for await (const chunk of result.stream) {
3413
+ const candidate = chunk.candidates?.[0];
3414
+ if (!candidate) continue;
3415
+ for (const part of candidate.content?.parts || []) {
3416
+ if (part.text) {
3417
+ responseText += part.text;
3418
+ if (options.partial) {
3419
+ options.partial(part.text);
3420
+ }
3421
+ } else if (part.functionCall) {
3422
+ functionCalls.push({
3423
+ name: part.functionCall.name,
3424
+ args: part.functionCall.args
3425
+ });
3426
+ }
3427
+ }
3428
+ }
3429
+ if (functionCalls.length === 0) {
3430
+ finalText = responseText;
3431
+ break;
3432
+ }
3433
+ const functionResponses = [];
3434
+ for (const fc of functionCalls) {
3435
+ try {
3436
+ const result2 = await toolHandler(fc.name, fc.args);
3437
+ functionResponses.push({
3438
+ name: fc.name,
3439
+ response: { result: typeof result2 === "string" ? result2 : JSON.stringify(result2) }
3440
+ });
3441
+ } catch (error) {
3442
+ functionResponses.push({
3443
+ name: fc.name,
3444
+ response: { error: error instanceof Error ? error.message : String(error) }
3445
+ });
3446
+ }
3447
+ }
3448
+ const functionResponseParts = functionResponses.map((fr) => ({
3449
+ functionResponse: {
3450
+ name: fr.name,
3451
+ response: fr.response
3452
+ }
3453
+ }));
3454
+ currentUserMessage = functionResponseParts;
3455
+ }
3456
+ if (iterations >= maxIterations) {
3457
+ throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
3458
+ }
3459
+ return finalText;
3460
+ }
3461
+ // ============================================================
3462
+ // OPENAI IMPLEMENTATION
3463
+ // ============================================================
3464
+ static async _openaiText(messages, modelName, options) {
3465
+ const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
3466
+ const openai = new import_openai.default({ apiKey });
3467
+ const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3468
+ const response = await openai.chat.completions.create({
3469
+ model: modelName,
3470
+ messages: [
3471
+ { role: "system", content: systemPrompt },
3472
+ { role: "user", content: messages.user }
3473
+ ],
3474
+ max_tokens: options.maxTokens || 1e3,
3475
+ temperature: options.temperature,
3476
+ top_p: options.topP
3477
+ });
3478
+ return response.choices[0]?.message?.content || "";
3479
+ }
3480
+ static async _openaiStream(messages, modelName, options, json) {
3481
+ const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
3482
+ const openai = new import_openai.default({ apiKey });
3483
+ const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3484
+ const stream = await openai.chat.completions.create({
3485
+ model: modelName,
3486
+ messages: [
3487
+ { role: "system", content: systemPrompt },
3488
+ { role: "user", content: messages.user }
3489
+ ],
3490
+ max_tokens: options.maxTokens || 1e3,
3491
+ temperature: options.temperature,
3492
+ top_p: options.topP,
3493
+ response_format: json ? { type: "json_object" } : void 0,
3494
+ stream: true
3495
+ });
3496
+ let fullText = "";
3497
+ for await (const chunk of stream) {
3498
+ const content = chunk.choices[0]?.delta?.content || "";
3499
+ if (content) {
3500
+ fullText += content;
3501
+ if (options.partial) {
3502
+ options.partial(content);
3503
+ }
3504
+ }
3505
+ }
3506
+ if (json) {
3507
+ return this._parseJSON(fullText);
3508
+ }
3509
+ return fullText;
3510
+ }
3511
+ static async _openaiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
3512
+ const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
3513
+ const openai = new import_openai.default({ apiKey });
3514
+ const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
3515
+ const openaiTools = tools.map((tool) => ({
3516
+ type: "function",
3517
+ function: {
3518
+ name: tool.name,
3519
+ description: tool.description,
3520
+ parameters: {
3521
+ type: tool.input_schema.type,
3522
+ properties: tool.input_schema.properties,
3523
+ required: tool.input_schema.required || []
3524
+ }
3525
+ }
3526
+ }));
3527
+ const conversationMessages = [
3528
+ { role: "system", content: systemPrompt },
3529
+ { role: "user", content: messages.user }
3530
+ ];
3531
+ let iterations = 0;
3532
+ let finalText = "";
3533
+ while (iterations < maxIterations) {
3534
+ iterations++;
3535
+ const stream = await openai.chat.completions.create({
3536
+ model: modelName,
3537
+ messages: conversationMessages,
3538
+ max_tokens: options.maxTokens || 4e3,
3539
+ temperature: options.temperature,
3540
+ top_p: options.topP,
3541
+ tools: openaiTools,
3542
+ stream: true
3543
+ });
3544
+ let responseText = "";
3545
+ const toolCalls = [];
3546
+ const toolCallsInProgress = /* @__PURE__ */ new Map();
3547
+ for await (const chunk of stream) {
3548
+ const delta = chunk.choices[0]?.delta;
3549
+ if (delta?.content) {
3550
+ responseText += delta.content;
3551
+ if (options.partial) {
3552
+ options.partial(delta.content);
3553
+ }
3554
+ }
3555
+ if (delta?.tool_calls) {
3556
+ for (const toolCallDelta of delta.tool_calls) {
3557
+ const index = toolCallDelta.index;
3558
+ if (!toolCallsInProgress.has(index)) {
3559
+ toolCallsInProgress.set(index, {
3560
+ id: toolCallDelta.id || "",
3561
+ name: toolCallDelta.function?.name || "",
3562
+ arguments: ""
3563
+ });
3564
+ }
3565
+ const tc = toolCallsInProgress.get(index);
3566
+ if (toolCallDelta.id) {
3567
+ tc.id = toolCallDelta.id;
3568
+ }
3569
+ if (toolCallDelta.function?.name) {
3570
+ tc.name = toolCallDelta.function.name;
3571
+ }
3572
+ if (toolCallDelta.function?.arguments) {
3573
+ tc.arguments += toolCallDelta.function.arguments;
3574
+ }
3575
+ }
3576
+ }
3577
+ }
3578
+ for (const tc of toolCallsInProgress.values()) {
3579
+ if (tc.id && tc.name) {
3580
+ toolCalls.push(tc);
3581
+ }
3582
+ }
3583
+ if (toolCalls.length === 0) {
3584
+ finalText = responseText;
3585
+ break;
3586
+ }
3587
+ conversationMessages.push({
3588
+ role: "assistant",
3589
+ content: responseText || null,
3590
+ tool_calls: toolCalls.map((tc) => ({
3591
+ id: tc.id,
3592
+ type: "function",
3593
+ function: {
3594
+ name: tc.name,
3595
+ arguments: tc.arguments
3596
+ }
3597
+ }))
3598
+ });
3599
+ for (const tc of toolCalls) {
3600
+ let result;
3601
+ try {
3602
+ const args = JSON.parse(tc.arguments);
3603
+ const toolResult = await toolHandler(tc.name, args);
3604
+ result = typeof toolResult === "string" ? toolResult : JSON.stringify(toolResult);
3605
+ } catch (error) {
3606
+ result = JSON.stringify({ error: error instanceof Error ? error.message : String(error) });
3607
+ }
3608
+ conversationMessages.push({
3609
+ role: "tool",
3610
+ tool_call_id: tc.id,
3611
+ content: result
3612
+ });
3613
+ }
3614
+ }
3615
+ if (iterations >= maxIterations) {
3616
+ throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
3617
+ }
3618
+ return finalText;
3619
+ }
3620
+ // ============================================================
3312
3621
  // JSON PARSING HELPER
3313
3622
  // ============================================================
3314
3623
  /**
@@ -4597,18 +4906,56 @@ var AnthropicLLM = class extends BaseLLM {
4597
4906
  };
4598
4907
  var anthropicLLM = new AnthropicLLM();
4599
4908
 
4600
- // src/userResponse/index.ts
4909
+ // src/userResponse/gemini.ts
4601
4910
  var import_dotenv3 = __toESM(require("dotenv"));
4602
4911
  import_dotenv3.default.config();
4912
+ var GeminiLLM = class extends BaseLLM {
4913
+ constructor(config) {
4914
+ super(config);
4915
+ }
4916
+ getDefaultModel() {
4917
+ return "gemini/gemini-2.5-flash";
4918
+ }
4919
+ getDefaultApiKey() {
4920
+ return process.env.GEMINI_API_KEY;
4921
+ }
4922
+ getProviderName() {
4923
+ return "Gemini";
4924
+ }
4925
+ };
4926
+ var geminiLLM = new GeminiLLM();
4927
+
4928
+ // src/userResponse/openai.ts
4929
+ var import_dotenv4 = __toESM(require("dotenv"));
4930
+ import_dotenv4.default.config();
4931
+ var OpenAILLM = class extends BaseLLM {
4932
+ constructor(config) {
4933
+ super(config);
4934
+ }
4935
+ getDefaultModel() {
4936
+ return "openai/gpt-4.1";
4937
+ }
4938
+ getDefaultApiKey() {
4939
+ return process.env.OPENAI_API_KEY;
4940
+ }
4941
+ getProviderName() {
4942
+ return "OpenAI";
4943
+ }
4944
+ };
4945
+ var openaiLLM = new OpenAILLM();
4946
+
4947
+ // src/userResponse/index.ts
4948
+ var import_dotenv5 = __toESM(require("dotenv"));
4949
+ import_dotenv5.default.config();
4603
4950
  function getLLMProviders() {
4604
4951
  const envProviders = process.env.LLM_PROVIDERS;
4605
- const DEFAULT_PROVIDERS = ["anthropic", "groq"];
4952
+ const DEFAULT_PROVIDERS = ["anthropic", "gemini", "openai", "groq"];
4606
4953
  if (!envProviders) {
4607
4954
  return DEFAULT_PROVIDERS;
4608
4955
  }
4609
4956
  try {
4610
4957
  const providers = JSON.parse(envProviders);
4611
- const validProviders = providers.filter((p) => p === "anthropic" || p === "groq");
4958
+ const validProviders = providers.filter((p) => p === "anthropic" || p === "groq" || p === "gemini" || p === "openai");
4612
4959
  if (validProviders.length === 0) {
4613
4960
  return DEFAULT_PROVIDERS;
4614
4961
  }
@@ -4651,10 +4998,44 @@ var useGroqMethod = async (prompt, components, apiKey, logCollector, conversatio
4651
4998
  logger.info(`[useGroqMethod] Successfully generated ${responseMode} using Groq`);
4652
4999
  return matchResult;
4653
5000
  };
5001
+ var useGeminiMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
5002
+ logger.debug("[useGeminiMethod] Initializing Gemini LLM matching method");
5003
+ logger.debug(`[useGeminiMethod] Response mode: ${responseMode}`);
5004
+ const msg = `Using Gemini LLM ${responseMode === "text" ? "text response" : "matching"} method...`;
5005
+ logger.info(msg);
5006
+ logCollector?.info(msg);
5007
+ if (responseMode === "component" && components.length === 0) {
5008
+ const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
5009
+ logger.error("[useGeminiMethod] No components available");
5010
+ logCollector?.error(emptyMsg);
5011
+ return { success: false, errors: [emptyMsg] };
5012
+ }
5013
+ logger.debug(`[useGeminiMethod] Processing with ${components.length} components`);
5014
+ const matchResult = await geminiLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
5015
+ logger.info(`[useGeminiMethod] Successfully generated ${responseMode} using Gemini`);
5016
+ return matchResult;
5017
+ };
5018
+ var useOpenAIMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
5019
+ logger.debug("[useOpenAIMethod] Initializing OpenAI GPT matching method");
5020
+ logger.debug(`[useOpenAIMethod] Response mode: ${responseMode}`);
5021
+ const msg = `Using OpenAI GPT ${responseMode === "text" ? "text response" : "matching"} method...`;
5022
+ logger.info(msg);
5023
+ logCollector?.info(msg);
5024
+ if (responseMode === "component" && components.length === 0) {
5025
+ const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
5026
+ logger.error("[useOpenAIMethod] No components available");
5027
+ logCollector?.error(emptyMsg);
5028
+ return { success: false, errors: [emptyMsg] };
5029
+ }
5030
+ logger.debug(`[useOpenAIMethod] Processing with ${components.length} components`);
5031
+ const matchResult = await openaiLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
5032
+ logger.info(`[useOpenAIMethod] Successfully generated ${responseMode} using OpenAI`);
5033
+ return matchResult;
5034
+ };
4654
5035
  var getUserResponseFromCache = async (prompt) => {
4655
5036
  return false;
4656
5037
  };
4657
- var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey, llmProviders, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
5038
+ var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
4658
5039
  logger.debug(`[get_user_response] Starting user response generation for prompt: "${prompt.substring(0, 50)}..."`);
4659
5040
  logger.debug(`[get_user_response] Response mode: ${responseMode}`);
4660
5041
  logger.debug("[get_user_response] Checking cache for existing response");
@@ -4690,6 +5071,10 @@ var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey,
4690
5071
  result = await useAnthropicMethod(prompt, components, anthropicApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
4691
5072
  } else if (provider === "groq") {
4692
5073
  result = await useGroqMethod(prompt, components, groqApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
5074
+ } else if (provider === "gemini") {
5075
+ result = await useGeminiMethod(prompt, components, geminiApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
5076
+ } else if (provider === "openai") {
5077
+ result = await useOpenAIMethod(prompt, components, openaiApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
4693
5078
  } else {
4694
5079
  logger.warn(`[get_user_response] Unknown provider: ${provider} - skipping`);
4695
5080
  errors.push(`Unknown provider: ${provider}`);
@@ -5034,7 +5419,7 @@ var CONTEXT_CONFIG = {
5034
5419
  };
5035
5420
 
5036
5421
  // src/handlers/user-prompt-request.ts
5037
- var get_user_request = async (data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders, collections, externalTools, userId) => {
5422
+ var get_user_request = async (data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools, userId) => {
5038
5423
  const errors = [];
5039
5424
  logger.debug("[USER_PROMPT_REQ] Parsing incoming message data");
5040
5425
  const parseResult = UserPromptRequestMessageSchema.safeParse(data);
@@ -5109,6 +5494,8 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
5109
5494
  components,
5110
5495
  anthropicApiKey,
5111
5496
  groqApiKey,
5497
+ geminiApiKey,
5498
+ openaiApiKey,
5112
5499
  llmProviders,
5113
5500
  logCollector,
5114
5501
  conversationHistory,
@@ -5207,8 +5594,8 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
5207
5594
  wsId
5208
5595
  };
5209
5596
  };
5210
- async function handleUserPromptRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders, collections, externalTools, userId) {
5211
- const response = await get_user_request(data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders, collections, externalTools, userId);
5597
+ async function handleUserPromptRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools, userId) {
5598
+ const response = await get_user_request(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools, userId);
5212
5599
  sendDataResponse4(
5213
5600
  response.id || data.id,
5214
5601
  {
@@ -5252,19 +5639,20 @@ async function handleUserPromptSuggestions(data, components, sendMessage) {
5252
5639
  }, sendMessage, wsId);
5253
5640
  return;
5254
5641
  }
5255
- if (!components || components.length === 0) {
5642
+ const displayComponents = components.filter((c) => c.isDisplayComp === true);
5643
+ if (!displayComponents || displayComponents.length === 0) {
5256
5644
  sendResponse(id, {
5257
5645
  success: true,
5258
5646
  data: {
5259
5647
  prompt,
5260
5648
  suggestions: [],
5261
5649
  count: 0,
5262
- message: "No components available"
5650
+ message: "No display components available"
5263
5651
  }
5264
5652
  }, sendMessage, wsId);
5265
5653
  return;
5266
5654
  }
5267
- const suggestions = searchComponents(prompt, components, limit);
5655
+ const suggestions = searchComponents(prompt, displayComponents, limit);
5268
5656
  sendResponse(id, {
5269
5657
  success: true,
5270
5658
  data: {
@@ -5288,6 +5676,7 @@ function searchComponents(prompt, components, limit) {
5288
5676
  const scoredComponents = components.map((component) => {
5289
5677
  let score = 0;
5290
5678
  const componentName = component.name.toLowerCase();
5679
+ const componentDisplayName = (component.displayName || "").toLowerCase();
5291
5680
  const componentDesc = component.description.toLowerCase();
5292
5681
  const componentKeywords = (component.keywords || []).map((k) => k.toLowerCase());
5293
5682
  const componentCategory = (component.category || "").toLowerCase();
@@ -5297,6 +5686,9 @@ function searchComponents(prompt, components, limit) {
5297
5686
  } else if (componentName.includes(token)) {
5298
5687
  score += 5;
5299
5688
  }
5689
+ if (componentDisplayName.includes(token)) {
5690
+ score += 6;
5691
+ }
5300
5692
  if (componentKeywords.includes(token)) {
5301
5693
  score += 8;
5302
5694
  } else if (componentKeywords.some((k) => k.includes(token))) {
@@ -5330,13 +5722,13 @@ function sendResponse(id, res, sendMessage, clientId) {
5330
5722
  }
5331
5723
 
5332
5724
  // src/userResponse/next-questions.ts
5333
- async function generateNextQuestions(originalUserPrompt, component, componentData, anthropicApiKey, groqApiKey, llmProviders, logCollector, conversationHistory) {
5725
+ async function generateNextQuestions(originalUserPrompt, component, componentData, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, logCollector, conversationHistory) {
5334
5726
  try {
5335
5727
  logger.debug("[generateNextQuestions] Starting next questions generation");
5336
5728
  logger.debug(`[generateNextQuestions] User prompt: "${originalUserPrompt?.substring(0, 50)}..."`);
5337
5729
  logger.debug(`[generateNextQuestions] Component: ${component?.name || "unknown"} (${component?.type || "unknown"})`);
5338
5730
  logger.debug(`[generateNextQuestions] Component data available: ${componentData ? "yes" : "no"}`);
5339
- const providers = llmProviders || ["anthropic"];
5731
+ const providers = llmProviders || ["anthropic", "gemini", "openai", "groq"];
5340
5732
  logger.info(`[generateNextQuestions] Using LLM providers: [${providers.join(", ")}]`);
5341
5733
  if (conversationHistory && conversationHistory.length > 0) {
5342
5734
  const exchangeCount = conversationHistory.split("\n").filter((l) => l.startsWith("Q")).length;
@@ -5361,6 +5753,26 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
5361
5753
  logCollector,
5362
5754
  conversationHistory
5363
5755
  );
5756
+ } else if (provider === "gemini") {
5757
+ logger.debug("[generateNextQuestions] Using Gemini LLM for next questions");
5758
+ result = await geminiLLM.generateNextQuestions(
5759
+ originalUserPrompt,
5760
+ component,
5761
+ componentData,
5762
+ geminiApiKey,
5763
+ logCollector,
5764
+ conversationHistory
5765
+ );
5766
+ } else if (provider === "openai") {
5767
+ logger.debug("[generateNextQuestions] Using OpenAI LLM for next questions");
5768
+ result = await openaiLLM.generateNextQuestions(
5769
+ originalUserPrompt,
5770
+ component,
5771
+ componentData,
5772
+ openaiApiKey,
5773
+ logCollector,
5774
+ conversationHistory
5775
+ );
5364
5776
  } else {
5365
5777
  logger.debug("[generateNextQuestions] Using Anthropic LLM for next questions");
5366
5778
  result = await anthropicLLM.generateNextQuestions(
@@ -5411,7 +5823,7 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
5411
5823
  }
5412
5824
 
5413
5825
  // src/handlers/actions-request.ts
5414
- async function handleActionsRequest(data, sendMessage, anthropicApiKey, groqApiKey, llmProviders) {
5826
+ async function handleActionsRequest(data, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders) {
5415
5827
  try {
5416
5828
  logger.debug("[ACTIONS_REQ] Parsing incoming actions request");
5417
5829
  const actionsRequest = ActionsRequestMessageSchema.parse(data);
@@ -5483,6 +5895,8 @@ ${conversationHistory.substring(0, 200)}...`);
5483
5895
  componentData,
5484
5896
  anthropicApiKey,
5485
5897
  groqApiKey,
5898
+ geminiApiKey,
5899
+ openaiApiKey,
5486
5900
  llmProviders,
5487
5901
  logCollector,
5488
5902
  conversationHistory
@@ -7357,7 +7771,10 @@ var SuperatomSDK = class {
7357
7771
  this.url = config.url || process.env.SA_WEBSOCKET_URL || DEFAULT_WS_URL;
7358
7772
  this.anthropicApiKey = config.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY || "";
7359
7773
  this.groqApiKey = config.GROQ_API_KEY || process.env.GROQ_API_KEY || "";
7774
+ this.geminiApiKey = config.GEMINI_API_KEY || process.env.GEMINI_API_KEY || "";
7775
+ this.openaiApiKey = config.OPENAI_API_KEY || process.env.OPENAI_API_KEY || "";
7360
7776
  this.llmProviders = config.LLM_PROVIDERS || getLLMProviders();
7777
+ logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, config llm providers: ${config.LLM_PROVIDERS}`);
7361
7778
  this.userManager = new UserManager(this.projectId, 5e3);
7362
7779
  this.dashboardManager = new DashboardManager(this.projectId);
7363
7780
  this.reportManager = new ReportManager(this.projectId);
@@ -7442,7 +7859,9 @@ var SuperatomSDK = class {
7442
7859
  return new Promise((resolve, reject) => {
7443
7860
  try {
7444
7861
  const url = new URL(this.url);
7445
- url.searchParams.set("apiKey", this.apiKey);
7862
+ if (this.apiKey) {
7863
+ url.searchParams.set("apiKey", this.apiKey);
7864
+ }
7446
7865
  url.searchParams.set("projectId", this.projectId);
7447
7866
  url.searchParams.set("userId", this.userId);
7448
7867
  url.searchParams.set("type", this.type);
@@ -7501,12 +7920,12 @@ var SuperatomSDK = class {
7501
7920
  });
7502
7921
  break;
7503
7922
  case "USER_PROMPT_REQ":
7504
- handleUserPromptRequest(parsed, this.components, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.llmProviders, this.collections, this.tools, this.userId).catch((error) => {
7923
+ handleUserPromptRequest(parsed, this.components, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.geminiApiKey, this.openaiApiKey, this.llmProviders, this.collections, this.tools, this.userId).catch((error) => {
7505
7924
  logger.error("Failed to handle user prompt request:", error);
7506
7925
  });
7507
7926
  break;
7508
7927
  case "ACTIONS":
7509
- handleActionsRequest(parsed, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.llmProviders).catch((error) => {
7928
+ handleActionsRequest(parsed, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.geminiApiKey, this.openaiApiKey, this.llmProviders).catch((error) => {
7510
7929
  logger.error("Failed to handle actions request:", error);
7511
7930
  });
7512
7931
  break;