@superatomai/sdk-node 0.0.18 → 0.0.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +942 -942
- package/dist/index.d.mts +13 -3
- package/dist/index.d.ts +13 -3
- package/dist/index.js +431 -19
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +431 -19
- package/dist/index.mjs.map +1 -1
- package/package.json +3 -1
package/dist/index.mjs
CHANGED
|
@@ -2907,6 +2907,8 @@ var promptLoader = new PromptLoader({
|
|
|
2907
2907
|
// src/llm.ts
|
|
2908
2908
|
import Anthropic from "@anthropic-ai/sdk";
|
|
2909
2909
|
import Groq from "groq-sdk";
|
|
2910
|
+
import { GoogleGenerativeAI, SchemaType } from "@google/generative-ai";
|
|
2911
|
+
import OpenAI from "openai";
|
|
2910
2912
|
import { jsonrepair } from "jsonrepair";
|
|
2911
2913
|
var LLM = class {
|
|
2912
2914
|
/* Get a complete text response from an LLM (Anthropic or Groq) */
|
|
@@ -2916,8 +2918,12 @@ var LLM = class {
|
|
|
2916
2918
|
return this._anthropicText(messages, modelName, options);
|
|
2917
2919
|
} else if (provider === "groq") {
|
|
2918
2920
|
return this._groqText(messages, modelName, options);
|
|
2921
|
+
} else if (provider === "gemini") {
|
|
2922
|
+
return this._geminiText(messages, modelName, options);
|
|
2923
|
+
} else if (provider === "openai") {
|
|
2924
|
+
return this._openaiText(messages, modelName, options);
|
|
2919
2925
|
} else {
|
|
2920
|
-
throw new Error(`Unsupported provider: ${provider}. Use "anthropic" or "
|
|
2926
|
+
throw new Error(`Unsupported provider: ${provider}. Use "anthropic", "groq", "gemini", or "openai"`);
|
|
2921
2927
|
}
|
|
2922
2928
|
}
|
|
2923
2929
|
/* Stream response from an LLM (Anthropic or Groq) */
|
|
@@ -2927,17 +2933,26 @@ var LLM = class {
|
|
|
2927
2933
|
return this._anthropicStream(messages, modelName, options, json);
|
|
2928
2934
|
} else if (provider === "groq") {
|
|
2929
2935
|
return this._groqStream(messages, modelName, options, json);
|
|
2936
|
+
} else if (provider === "gemini") {
|
|
2937
|
+
return this._geminiStream(messages, modelName, options, json);
|
|
2938
|
+
} else if (provider === "openai") {
|
|
2939
|
+
return this._openaiStream(messages, modelName, options, json);
|
|
2930
2940
|
} else {
|
|
2931
|
-
throw new Error(`Unsupported provider: ${provider}. Use "anthropic" or "
|
|
2941
|
+
throw new Error(`Unsupported provider: ${provider}. Use "anthropic", "groq", "gemini", or "openai"`);
|
|
2932
2942
|
}
|
|
2933
2943
|
}
|
|
2934
|
-
/* Stream response with tool calling support (Anthropic
|
|
2944
|
+
/* Stream response with tool calling support (Anthropic and Gemini) */
|
|
2935
2945
|
static async streamWithTools(messages, tools, toolHandler, options = {}, maxIterations = 3) {
|
|
2936
2946
|
const [provider, modelName] = this._parseModel(options.model);
|
|
2937
|
-
if (provider
|
|
2938
|
-
|
|
2947
|
+
if (provider === "anthropic") {
|
|
2948
|
+
return this._anthropicStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
|
|
2949
|
+
} else if (provider === "gemini") {
|
|
2950
|
+
return this._geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
|
|
2951
|
+
} else if (provider === "openai") {
|
|
2952
|
+
return this._openaiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
|
|
2953
|
+
} else {
|
|
2954
|
+
throw new Error(`Tool calling is only supported for Anthropic, Gemini, and OpenAI models`);
|
|
2939
2955
|
}
|
|
2940
|
-
return this._anthropicStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
|
|
2941
2956
|
}
|
|
2942
2957
|
// ============================================================
|
|
2943
2958
|
// PRIVATE HELPER METHODS
|
|
@@ -3271,6 +3286,298 @@ var LLM = class {
|
|
|
3271
3286
|
return fullText;
|
|
3272
3287
|
}
|
|
3273
3288
|
// ============================================================
|
|
3289
|
+
// GEMINI IMPLEMENTATION
|
|
3290
|
+
// ============================================================
|
|
3291
|
+
static async _geminiText(messages, modelName, options) {
|
|
3292
|
+
const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
|
|
3293
|
+
const genAI = new GoogleGenerativeAI(apiKey);
|
|
3294
|
+
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
3295
|
+
const model = genAI.getGenerativeModel({
|
|
3296
|
+
model: modelName,
|
|
3297
|
+
systemInstruction: systemPrompt,
|
|
3298
|
+
generationConfig: {
|
|
3299
|
+
maxOutputTokens: options.maxTokens || 1e3,
|
|
3300
|
+
temperature: options.temperature,
|
|
3301
|
+
topP: options.topP
|
|
3302
|
+
}
|
|
3303
|
+
});
|
|
3304
|
+
const result = await model.generateContent(messages.user);
|
|
3305
|
+
const response = await result.response;
|
|
3306
|
+
return response.text();
|
|
3307
|
+
}
|
|
3308
|
+
static async _geminiStream(messages, modelName, options, json) {
|
|
3309
|
+
const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
|
|
3310
|
+
const genAI = new GoogleGenerativeAI(apiKey);
|
|
3311
|
+
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
3312
|
+
const model = genAI.getGenerativeModel({
|
|
3313
|
+
model: modelName,
|
|
3314
|
+
systemInstruction: systemPrompt,
|
|
3315
|
+
generationConfig: {
|
|
3316
|
+
maxOutputTokens: options.maxTokens || 1e3,
|
|
3317
|
+
temperature: options.temperature,
|
|
3318
|
+
topP: options.topP,
|
|
3319
|
+
responseMimeType: json ? "application/json" : void 0
|
|
3320
|
+
}
|
|
3321
|
+
});
|
|
3322
|
+
const result = await model.generateContentStream(messages.user);
|
|
3323
|
+
let fullText = "";
|
|
3324
|
+
for await (const chunk of result.stream) {
|
|
3325
|
+
const text = chunk.text();
|
|
3326
|
+
if (text) {
|
|
3327
|
+
fullText += text;
|
|
3328
|
+
if (options.partial) {
|
|
3329
|
+
options.partial(text);
|
|
3330
|
+
}
|
|
3331
|
+
}
|
|
3332
|
+
}
|
|
3333
|
+
if (json) {
|
|
3334
|
+
return this._parseJSON(fullText);
|
|
3335
|
+
}
|
|
3336
|
+
return fullText;
|
|
3337
|
+
}
|
|
3338
|
+
static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
|
|
3339
|
+
const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
|
|
3340
|
+
const genAI = new GoogleGenerativeAI(apiKey);
|
|
3341
|
+
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
3342
|
+
const functionDeclarations = tools.map((tool) => ({
|
|
3343
|
+
name: tool.name,
|
|
3344
|
+
description: tool.description,
|
|
3345
|
+
parameters: {
|
|
3346
|
+
type: SchemaType.OBJECT,
|
|
3347
|
+
properties: tool.input_schema.properties,
|
|
3348
|
+
required: tool.input_schema.required || []
|
|
3349
|
+
}
|
|
3350
|
+
}));
|
|
3351
|
+
const model = genAI.getGenerativeModel({
|
|
3352
|
+
model: modelName,
|
|
3353
|
+
systemInstruction: systemPrompt,
|
|
3354
|
+
tools: [{ functionDeclarations }],
|
|
3355
|
+
generationConfig: {
|
|
3356
|
+
maxOutputTokens: options.maxTokens || 4e3,
|
|
3357
|
+
temperature: options.temperature,
|
|
3358
|
+
topP: options.topP
|
|
3359
|
+
}
|
|
3360
|
+
});
|
|
3361
|
+
const chat = model.startChat({
|
|
3362
|
+
history: []
|
|
3363
|
+
});
|
|
3364
|
+
let iterations = 0;
|
|
3365
|
+
let finalText = "";
|
|
3366
|
+
let currentUserMessage = messages.user;
|
|
3367
|
+
while (iterations < maxIterations) {
|
|
3368
|
+
iterations++;
|
|
3369
|
+
const result = await chat.sendMessageStream(currentUserMessage);
|
|
3370
|
+
let responseText = "";
|
|
3371
|
+
const functionCalls = [];
|
|
3372
|
+
for await (const chunk of result.stream) {
|
|
3373
|
+
const candidate = chunk.candidates?.[0];
|
|
3374
|
+
if (!candidate) continue;
|
|
3375
|
+
for (const part of candidate.content?.parts || []) {
|
|
3376
|
+
if (part.text) {
|
|
3377
|
+
responseText += part.text;
|
|
3378
|
+
if (options.partial) {
|
|
3379
|
+
options.partial(part.text);
|
|
3380
|
+
}
|
|
3381
|
+
} else if (part.functionCall) {
|
|
3382
|
+
functionCalls.push({
|
|
3383
|
+
name: part.functionCall.name,
|
|
3384
|
+
args: part.functionCall.args
|
|
3385
|
+
});
|
|
3386
|
+
}
|
|
3387
|
+
}
|
|
3388
|
+
}
|
|
3389
|
+
if (functionCalls.length === 0) {
|
|
3390
|
+
finalText = responseText;
|
|
3391
|
+
break;
|
|
3392
|
+
}
|
|
3393
|
+
const functionResponses = [];
|
|
3394
|
+
for (const fc of functionCalls) {
|
|
3395
|
+
try {
|
|
3396
|
+
const result2 = await toolHandler(fc.name, fc.args);
|
|
3397
|
+
functionResponses.push({
|
|
3398
|
+
name: fc.name,
|
|
3399
|
+
response: { result: typeof result2 === "string" ? result2 : JSON.stringify(result2) }
|
|
3400
|
+
});
|
|
3401
|
+
} catch (error) {
|
|
3402
|
+
functionResponses.push({
|
|
3403
|
+
name: fc.name,
|
|
3404
|
+
response: { error: error instanceof Error ? error.message : String(error) }
|
|
3405
|
+
});
|
|
3406
|
+
}
|
|
3407
|
+
}
|
|
3408
|
+
const functionResponseParts = functionResponses.map((fr) => ({
|
|
3409
|
+
functionResponse: {
|
|
3410
|
+
name: fr.name,
|
|
3411
|
+
response: fr.response
|
|
3412
|
+
}
|
|
3413
|
+
}));
|
|
3414
|
+
currentUserMessage = functionResponseParts;
|
|
3415
|
+
}
|
|
3416
|
+
if (iterations >= maxIterations) {
|
|
3417
|
+
throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
|
|
3418
|
+
}
|
|
3419
|
+
return finalText;
|
|
3420
|
+
}
|
|
3421
|
+
// ============================================================
|
|
3422
|
+
// OPENAI IMPLEMENTATION
|
|
3423
|
+
// ============================================================
|
|
3424
|
+
static async _openaiText(messages, modelName, options) {
|
|
3425
|
+
const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
|
|
3426
|
+
const openai = new OpenAI({ apiKey });
|
|
3427
|
+
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
3428
|
+
const response = await openai.chat.completions.create({
|
|
3429
|
+
model: modelName,
|
|
3430
|
+
messages: [
|
|
3431
|
+
{ role: "system", content: systemPrompt },
|
|
3432
|
+
{ role: "user", content: messages.user }
|
|
3433
|
+
],
|
|
3434
|
+
max_tokens: options.maxTokens || 1e3,
|
|
3435
|
+
temperature: options.temperature,
|
|
3436
|
+
top_p: options.topP
|
|
3437
|
+
});
|
|
3438
|
+
return response.choices[0]?.message?.content || "";
|
|
3439
|
+
}
|
|
3440
|
+
static async _openaiStream(messages, modelName, options, json) {
|
|
3441
|
+
const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
|
|
3442
|
+
const openai = new OpenAI({ apiKey });
|
|
3443
|
+
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
3444
|
+
const stream = await openai.chat.completions.create({
|
|
3445
|
+
model: modelName,
|
|
3446
|
+
messages: [
|
|
3447
|
+
{ role: "system", content: systemPrompt },
|
|
3448
|
+
{ role: "user", content: messages.user }
|
|
3449
|
+
],
|
|
3450
|
+
max_tokens: options.maxTokens || 1e3,
|
|
3451
|
+
temperature: options.temperature,
|
|
3452
|
+
top_p: options.topP,
|
|
3453
|
+
response_format: json ? { type: "json_object" } : void 0,
|
|
3454
|
+
stream: true
|
|
3455
|
+
});
|
|
3456
|
+
let fullText = "";
|
|
3457
|
+
for await (const chunk of stream) {
|
|
3458
|
+
const content = chunk.choices[0]?.delta?.content || "";
|
|
3459
|
+
if (content) {
|
|
3460
|
+
fullText += content;
|
|
3461
|
+
if (options.partial) {
|
|
3462
|
+
options.partial(content);
|
|
3463
|
+
}
|
|
3464
|
+
}
|
|
3465
|
+
}
|
|
3466
|
+
if (json) {
|
|
3467
|
+
return this._parseJSON(fullText);
|
|
3468
|
+
}
|
|
3469
|
+
return fullText;
|
|
3470
|
+
}
|
|
3471
|
+
static async _openaiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
|
|
3472
|
+
const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
|
|
3473
|
+
const openai = new OpenAI({ apiKey });
|
|
3474
|
+
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
3475
|
+
const openaiTools = tools.map((tool) => ({
|
|
3476
|
+
type: "function",
|
|
3477
|
+
function: {
|
|
3478
|
+
name: tool.name,
|
|
3479
|
+
description: tool.description,
|
|
3480
|
+
parameters: {
|
|
3481
|
+
type: tool.input_schema.type,
|
|
3482
|
+
properties: tool.input_schema.properties,
|
|
3483
|
+
required: tool.input_schema.required || []
|
|
3484
|
+
}
|
|
3485
|
+
}
|
|
3486
|
+
}));
|
|
3487
|
+
const conversationMessages = [
|
|
3488
|
+
{ role: "system", content: systemPrompt },
|
|
3489
|
+
{ role: "user", content: messages.user }
|
|
3490
|
+
];
|
|
3491
|
+
let iterations = 0;
|
|
3492
|
+
let finalText = "";
|
|
3493
|
+
while (iterations < maxIterations) {
|
|
3494
|
+
iterations++;
|
|
3495
|
+
const stream = await openai.chat.completions.create({
|
|
3496
|
+
model: modelName,
|
|
3497
|
+
messages: conversationMessages,
|
|
3498
|
+
max_tokens: options.maxTokens || 4e3,
|
|
3499
|
+
temperature: options.temperature,
|
|
3500
|
+
top_p: options.topP,
|
|
3501
|
+
tools: openaiTools,
|
|
3502
|
+
stream: true
|
|
3503
|
+
});
|
|
3504
|
+
let responseText = "";
|
|
3505
|
+
const toolCalls = [];
|
|
3506
|
+
const toolCallsInProgress = /* @__PURE__ */ new Map();
|
|
3507
|
+
for await (const chunk of stream) {
|
|
3508
|
+
const delta = chunk.choices[0]?.delta;
|
|
3509
|
+
if (delta?.content) {
|
|
3510
|
+
responseText += delta.content;
|
|
3511
|
+
if (options.partial) {
|
|
3512
|
+
options.partial(delta.content);
|
|
3513
|
+
}
|
|
3514
|
+
}
|
|
3515
|
+
if (delta?.tool_calls) {
|
|
3516
|
+
for (const toolCallDelta of delta.tool_calls) {
|
|
3517
|
+
const index = toolCallDelta.index;
|
|
3518
|
+
if (!toolCallsInProgress.has(index)) {
|
|
3519
|
+
toolCallsInProgress.set(index, {
|
|
3520
|
+
id: toolCallDelta.id || "",
|
|
3521
|
+
name: toolCallDelta.function?.name || "",
|
|
3522
|
+
arguments: ""
|
|
3523
|
+
});
|
|
3524
|
+
}
|
|
3525
|
+
const tc = toolCallsInProgress.get(index);
|
|
3526
|
+
if (toolCallDelta.id) {
|
|
3527
|
+
tc.id = toolCallDelta.id;
|
|
3528
|
+
}
|
|
3529
|
+
if (toolCallDelta.function?.name) {
|
|
3530
|
+
tc.name = toolCallDelta.function.name;
|
|
3531
|
+
}
|
|
3532
|
+
if (toolCallDelta.function?.arguments) {
|
|
3533
|
+
tc.arguments += toolCallDelta.function.arguments;
|
|
3534
|
+
}
|
|
3535
|
+
}
|
|
3536
|
+
}
|
|
3537
|
+
}
|
|
3538
|
+
for (const tc of toolCallsInProgress.values()) {
|
|
3539
|
+
if (tc.id && tc.name) {
|
|
3540
|
+
toolCalls.push(tc);
|
|
3541
|
+
}
|
|
3542
|
+
}
|
|
3543
|
+
if (toolCalls.length === 0) {
|
|
3544
|
+
finalText = responseText;
|
|
3545
|
+
break;
|
|
3546
|
+
}
|
|
3547
|
+
conversationMessages.push({
|
|
3548
|
+
role: "assistant",
|
|
3549
|
+
content: responseText || null,
|
|
3550
|
+
tool_calls: toolCalls.map((tc) => ({
|
|
3551
|
+
id: tc.id,
|
|
3552
|
+
type: "function",
|
|
3553
|
+
function: {
|
|
3554
|
+
name: tc.name,
|
|
3555
|
+
arguments: tc.arguments
|
|
3556
|
+
}
|
|
3557
|
+
}))
|
|
3558
|
+
});
|
|
3559
|
+
for (const tc of toolCalls) {
|
|
3560
|
+
let result;
|
|
3561
|
+
try {
|
|
3562
|
+
const args = JSON.parse(tc.arguments);
|
|
3563
|
+
const toolResult = await toolHandler(tc.name, args);
|
|
3564
|
+
result = typeof toolResult === "string" ? toolResult : JSON.stringify(toolResult);
|
|
3565
|
+
} catch (error) {
|
|
3566
|
+
result = JSON.stringify({ error: error instanceof Error ? error.message : String(error) });
|
|
3567
|
+
}
|
|
3568
|
+
conversationMessages.push({
|
|
3569
|
+
role: "tool",
|
|
3570
|
+
tool_call_id: tc.id,
|
|
3571
|
+
content: result
|
|
3572
|
+
});
|
|
3573
|
+
}
|
|
3574
|
+
}
|
|
3575
|
+
if (iterations >= maxIterations) {
|
|
3576
|
+
throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
|
|
3577
|
+
}
|
|
3578
|
+
return finalText;
|
|
3579
|
+
}
|
|
3580
|
+
// ============================================================
|
|
3274
3581
|
// JSON PARSING HELPER
|
|
3275
3582
|
// ============================================================
|
|
3276
3583
|
/**
|
|
@@ -4559,18 +4866,56 @@ var AnthropicLLM = class extends BaseLLM {
|
|
|
4559
4866
|
};
|
|
4560
4867
|
var anthropicLLM = new AnthropicLLM();
|
|
4561
4868
|
|
|
4562
|
-
// src/userResponse/
|
|
4869
|
+
// src/userResponse/gemini.ts
|
|
4563
4870
|
import dotenv3 from "dotenv";
|
|
4564
4871
|
dotenv3.config();
|
|
4872
|
+
var GeminiLLM = class extends BaseLLM {
|
|
4873
|
+
constructor(config) {
|
|
4874
|
+
super(config);
|
|
4875
|
+
}
|
|
4876
|
+
getDefaultModel() {
|
|
4877
|
+
return "gemini/gemini-2.5-flash";
|
|
4878
|
+
}
|
|
4879
|
+
getDefaultApiKey() {
|
|
4880
|
+
return process.env.GEMINI_API_KEY;
|
|
4881
|
+
}
|
|
4882
|
+
getProviderName() {
|
|
4883
|
+
return "Gemini";
|
|
4884
|
+
}
|
|
4885
|
+
};
|
|
4886
|
+
var geminiLLM = new GeminiLLM();
|
|
4887
|
+
|
|
4888
|
+
// src/userResponse/openai.ts
|
|
4889
|
+
import dotenv4 from "dotenv";
|
|
4890
|
+
dotenv4.config();
|
|
4891
|
+
var OpenAILLM = class extends BaseLLM {
|
|
4892
|
+
constructor(config) {
|
|
4893
|
+
super(config);
|
|
4894
|
+
}
|
|
4895
|
+
getDefaultModel() {
|
|
4896
|
+
return "openai/gpt-4.1";
|
|
4897
|
+
}
|
|
4898
|
+
getDefaultApiKey() {
|
|
4899
|
+
return process.env.OPENAI_API_KEY;
|
|
4900
|
+
}
|
|
4901
|
+
getProviderName() {
|
|
4902
|
+
return "OpenAI";
|
|
4903
|
+
}
|
|
4904
|
+
};
|
|
4905
|
+
var openaiLLM = new OpenAILLM();
|
|
4906
|
+
|
|
4907
|
+
// src/userResponse/index.ts
|
|
4908
|
+
import dotenv5 from "dotenv";
|
|
4909
|
+
dotenv5.config();
|
|
4565
4910
|
function getLLMProviders() {
|
|
4566
4911
|
const envProviders = process.env.LLM_PROVIDERS;
|
|
4567
|
-
const DEFAULT_PROVIDERS = ["anthropic", "groq"];
|
|
4912
|
+
const DEFAULT_PROVIDERS = ["anthropic", "gemini", "openai", "groq"];
|
|
4568
4913
|
if (!envProviders) {
|
|
4569
4914
|
return DEFAULT_PROVIDERS;
|
|
4570
4915
|
}
|
|
4571
4916
|
try {
|
|
4572
4917
|
const providers = JSON.parse(envProviders);
|
|
4573
|
-
const validProviders = providers.filter((p) => p === "anthropic" || p === "groq");
|
|
4918
|
+
const validProviders = providers.filter((p) => p === "anthropic" || p === "groq" || p === "gemini" || p === "openai");
|
|
4574
4919
|
if (validProviders.length === 0) {
|
|
4575
4920
|
return DEFAULT_PROVIDERS;
|
|
4576
4921
|
}
|
|
@@ -4613,10 +4958,44 @@ var useGroqMethod = async (prompt, components, apiKey, logCollector, conversatio
|
|
|
4613
4958
|
logger.info(`[useGroqMethod] Successfully generated ${responseMode} using Groq`);
|
|
4614
4959
|
return matchResult;
|
|
4615
4960
|
};
|
|
4961
|
+
var useGeminiMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
|
|
4962
|
+
logger.debug("[useGeminiMethod] Initializing Gemini LLM matching method");
|
|
4963
|
+
logger.debug(`[useGeminiMethod] Response mode: ${responseMode}`);
|
|
4964
|
+
const msg = `Using Gemini LLM ${responseMode === "text" ? "text response" : "matching"} method...`;
|
|
4965
|
+
logger.info(msg);
|
|
4966
|
+
logCollector?.info(msg);
|
|
4967
|
+
if (responseMode === "component" && components.length === 0) {
|
|
4968
|
+
const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
|
|
4969
|
+
logger.error("[useGeminiMethod] No components available");
|
|
4970
|
+
logCollector?.error(emptyMsg);
|
|
4971
|
+
return { success: false, errors: [emptyMsg] };
|
|
4972
|
+
}
|
|
4973
|
+
logger.debug(`[useGeminiMethod] Processing with ${components.length} components`);
|
|
4974
|
+
const matchResult = await geminiLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
|
|
4975
|
+
logger.info(`[useGeminiMethod] Successfully generated ${responseMode} using Gemini`);
|
|
4976
|
+
return matchResult;
|
|
4977
|
+
};
|
|
4978
|
+
var useOpenAIMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
|
|
4979
|
+
logger.debug("[useOpenAIMethod] Initializing OpenAI GPT matching method");
|
|
4980
|
+
logger.debug(`[useOpenAIMethod] Response mode: ${responseMode}`);
|
|
4981
|
+
const msg = `Using OpenAI GPT ${responseMode === "text" ? "text response" : "matching"} method...`;
|
|
4982
|
+
logger.info(msg);
|
|
4983
|
+
logCollector?.info(msg);
|
|
4984
|
+
if (responseMode === "component" && components.length === 0) {
|
|
4985
|
+
const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
|
|
4986
|
+
logger.error("[useOpenAIMethod] No components available");
|
|
4987
|
+
logCollector?.error(emptyMsg);
|
|
4988
|
+
return { success: false, errors: [emptyMsg] };
|
|
4989
|
+
}
|
|
4990
|
+
logger.debug(`[useOpenAIMethod] Processing with ${components.length} components`);
|
|
4991
|
+
const matchResult = await openaiLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
|
|
4992
|
+
logger.info(`[useOpenAIMethod] Successfully generated ${responseMode} using OpenAI`);
|
|
4993
|
+
return matchResult;
|
|
4994
|
+
};
|
|
4616
4995
|
var getUserResponseFromCache = async (prompt) => {
|
|
4617
4996
|
return false;
|
|
4618
4997
|
};
|
|
4619
|
-
var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey, llmProviders, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
|
|
4998
|
+
var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
|
|
4620
4999
|
logger.debug(`[get_user_response] Starting user response generation for prompt: "${prompt.substring(0, 50)}..."`);
|
|
4621
5000
|
logger.debug(`[get_user_response] Response mode: ${responseMode}`);
|
|
4622
5001
|
logger.debug("[get_user_response] Checking cache for existing response");
|
|
@@ -4652,6 +5031,10 @@ var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey,
|
|
|
4652
5031
|
result = await useAnthropicMethod(prompt, components, anthropicApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
|
|
4653
5032
|
} else if (provider === "groq") {
|
|
4654
5033
|
result = await useGroqMethod(prompt, components, groqApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
|
|
5034
|
+
} else if (provider === "gemini") {
|
|
5035
|
+
result = await useGeminiMethod(prompt, components, geminiApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
|
|
5036
|
+
} else if (provider === "openai") {
|
|
5037
|
+
result = await useOpenAIMethod(prompt, components, openaiApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
|
|
4655
5038
|
} else {
|
|
4656
5039
|
logger.warn(`[get_user_response] Unknown provider: ${provider} - skipping`);
|
|
4657
5040
|
errors.push(`Unknown provider: ${provider}`);
|
|
@@ -4996,7 +5379,7 @@ var CONTEXT_CONFIG = {
|
|
|
4996
5379
|
};
|
|
4997
5380
|
|
|
4998
5381
|
// src/handlers/user-prompt-request.ts
|
|
4999
|
-
var get_user_request = async (data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders, collections, externalTools, userId) => {
|
|
5382
|
+
var get_user_request = async (data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools, userId) => {
|
|
5000
5383
|
const errors = [];
|
|
5001
5384
|
logger.debug("[USER_PROMPT_REQ] Parsing incoming message data");
|
|
5002
5385
|
const parseResult = UserPromptRequestMessageSchema.safeParse(data);
|
|
@@ -5071,6 +5454,8 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
|
|
|
5071
5454
|
components,
|
|
5072
5455
|
anthropicApiKey,
|
|
5073
5456
|
groqApiKey,
|
|
5457
|
+
geminiApiKey,
|
|
5458
|
+
openaiApiKey,
|
|
5074
5459
|
llmProviders,
|
|
5075
5460
|
logCollector,
|
|
5076
5461
|
conversationHistory,
|
|
@@ -5169,8 +5554,8 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
|
|
|
5169
5554
|
wsId
|
|
5170
5555
|
};
|
|
5171
5556
|
};
|
|
5172
|
-
async function handleUserPromptRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders, collections, externalTools, userId) {
|
|
5173
|
-
const response = await get_user_request(data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders, collections, externalTools, userId);
|
|
5557
|
+
async function handleUserPromptRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools, userId) {
|
|
5558
|
+
const response = await get_user_request(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools, userId);
|
|
5174
5559
|
sendDataResponse4(
|
|
5175
5560
|
response.id || data.id,
|
|
5176
5561
|
{
|
|
@@ -5297,13 +5682,13 @@ function sendResponse(id, res, sendMessage, clientId) {
|
|
|
5297
5682
|
}
|
|
5298
5683
|
|
|
5299
5684
|
// src/userResponse/next-questions.ts
|
|
5300
|
-
async function generateNextQuestions(originalUserPrompt, component, componentData, anthropicApiKey, groqApiKey, llmProviders, logCollector, conversationHistory) {
|
|
5685
|
+
async function generateNextQuestions(originalUserPrompt, component, componentData, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, logCollector, conversationHistory) {
|
|
5301
5686
|
try {
|
|
5302
5687
|
logger.debug("[generateNextQuestions] Starting next questions generation");
|
|
5303
5688
|
logger.debug(`[generateNextQuestions] User prompt: "${originalUserPrompt?.substring(0, 50)}..."`);
|
|
5304
5689
|
logger.debug(`[generateNextQuestions] Component: ${component?.name || "unknown"} (${component?.type || "unknown"})`);
|
|
5305
5690
|
logger.debug(`[generateNextQuestions] Component data available: ${componentData ? "yes" : "no"}`);
|
|
5306
|
-
const providers = llmProviders || ["anthropic"];
|
|
5691
|
+
const providers = llmProviders || ["anthropic", "gemini", "openai", "groq"];
|
|
5307
5692
|
logger.info(`[generateNextQuestions] Using LLM providers: [${providers.join(", ")}]`);
|
|
5308
5693
|
if (conversationHistory && conversationHistory.length > 0) {
|
|
5309
5694
|
const exchangeCount = conversationHistory.split("\n").filter((l) => l.startsWith("Q")).length;
|
|
@@ -5328,6 +5713,26 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
|
|
|
5328
5713
|
logCollector,
|
|
5329
5714
|
conversationHistory
|
|
5330
5715
|
);
|
|
5716
|
+
} else if (provider === "gemini") {
|
|
5717
|
+
logger.debug("[generateNextQuestions] Using Gemini LLM for next questions");
|
|
5718
|
+
result = await geminiLLM.generateNextQuestions(
|
|
5719
|
+
originalUserPrompt,
|
|
5720
|
+
component,
|
|
5721
|
+
componentData,
|
|
5722
|
+
geminiApiKey,
|
|
5723
|
+
logCollector,
|
|
5724
|
+
conversationHistory
|
|
5725
|
+
);
|
|
5726
|
+
} else if (provider === "openai") {
|
|
5727
|
+
logger.debug("[generateNextQuestions] Using OpenAI LLM for next questions");
|
|
5728
|
+
result = await openaiLLM.generateNextQuestions(
|
|
5729
|
+
originalUserPrompt,
|
|
5730
|
+
component,
|
|
5731
|
+
componentData,
|
|
5732
|
+
openaiApiKey,
|
|
5733
|
+
logCollector,
|
|
5734
|
+
conversationHistory
|
|
5735
|
+
);
|
|
5331
5736
|
} else {
|
|
5332
5737
|
logger.debug("[generateNextQuestions] Using Anthropic LLM for next questions");
|
|
5333
5738
|
result = await anthropicLLM.generateNextQuestions(
|
|
@@ -5378,7 +5783,7 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
|
|
|
5378
5783
|
}
|
|
5379
5784
|
|
|
5380
5785
|
// src/handlers/actions-request.ts
|
|
5381
|
-
async function handleActionsRequest(data, sendMessage, anthropicApiKey, groqApiKey, llmProviders) {
|
|
5786
|
+
async function handleActionsRequest(data, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders) {
|
|
5382
5787
|
try {
|
|
5383
5788
|
logger.debug("[ACTIONS_REQ] Parsing incoming actions request");
|
|
5384
5789
|
const actionsRequest = ActionsRequestMessageSchema.parse(data);
|
|
@@ -5450,6 +5855,8 @@ ${conversationHistory.substring(0, 200)}...`);
|
|
|
5450
5855
|
componentData,
|
|
5451
5856
|
anthropicApiKey,
|
|
5452
5857
|
groqApiKey,
|
|
5858
|
+
geminiApiKey,
|
|
5859
|
+
openaiApiKey,
|
|
5453
5860
|
llmProviders,
|
|
5454
5861
|
logCollector,
|
|
5455
5862
|
conversationHistory
|
|
@@ -7324,7 +7731,10 @@ var SuperatomSDK = class {
|
|
|
7324
7731
|
this.url = config.url || process.env.SA_WEBSOCKET_URL || DEFAULT_WS_URL;
|
|
7325
7732
|
this.anthropicApiKey = config.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY || "";
|
|
7326
7733
|
this.groqApiKey = config.GROQ_API_KEY || process.env.GROQ_API_KEY || "";
|
|
7734
|
+
this.geminiApiKey = config.GEMINI_API_KEY || process.env.GEMINI_API_KEY || "";
|
|
7735
|
+
this.openaiApiKey = config.OPENAI_API_KEY || process.env.OPENAI_API_KEY || "";
|
|
7327
7736
|
this.llmProviders = config.LLM_PROVIDERS || getLLMProviders();
|
|
7737
|
+
logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, config llm providers: ${config.LLM_PROVIDERS}`);
|
|
7328
7738
|
this.userManager = new UserManager(this.projectId, 5e3);
|
|
7329
7739
|
this.dashboardManager = new DashboardManager(this.projectId);
|
|
7330
7740
|
this.reportManager = new ReportManager(this.projectId);
|
|
@@ -7409,7 +7819,9 @@ var SuperatomSDK = class {
|
|
|
7409
7819
|
return new Promise((resolve, reject) => {
|
|
7410
7820
|
try {
|
|
7411
7821
|
const url = new URL(this.url);
|
|
7412
|
-
|
|
7822
|
+
if (this.apiKey) {
|
|
7823
|
+
url.searchParams.set("apiKey", this.apiKey);
|
|
7824
|
+
}
|
|
7413
7825
|
url.searchParams.set("projectId", this.projectId);
|
|
7414
7826
|
url.searchParams.set("userId", this.userId);
|
|
7415
7827
|
url.searchParams.set("type", this.type);
|
|
@@ -7468,12 +7880,12 @@ var SuperatomSDK = class {
|
|
|
7468
7880
|
});
|
|
7469
7881
|
break;
|
|
7470
7882
|
case "USER_PROMPT_REQ":
|
|
7471
|
-
handleUserPromptRequest(parsed, this.components, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.llmProviders, this.collections, this.tools, this.userId).catch((error) => {
|
|
7883
|
+
handleUserPromptRequest(parsed, this.components, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.geminiApiKey, this.openaiApiKey, this.llmProviders, this.collections, this.tools, this.userId).catch((error) => {
|
|
7472
7884
|
logger.error("Failed to handle user prompt request:", error);
|
|
7473
7885
|
});
|
|
7474
7886
|
break;
|
|
7475
7887
|
case "ACTIONS":
|
|
7476
|
-
handleActionsRequest(parsed, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.llmProviders).catch((error) => {
|
|
7888
|
+
handleActionsRequest(parsed, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.geminiApiKey, this.openaiApiKey, this.llmProviders).catch((error) => {
|
|
7477
7889
|
logger.error("Failed to handle actions request:", error);
|
|
7478
7890
|
});
|
|
7479
7891
|
break;
|