@superatomai/sdk-node 0.0.18 → 0.0.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +942 -942
- package/dist/index.d.mts +13 -3
- package/dist/index.d.ts +13 -3
- package/dist/index.js +431 -19
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +431 -19
- package/dist/index.mjs.map +1 -1
- package/package.json +3 -1
package/dist/index.js
CHANGED
|
@@ -2947,6 +2947,8 @@ var promptLoader = new PromptLoader({
|
|
|
2947
2947
|
// src/llm.ts
|
|
2948
2948
|
var import_sdk = __toESM(require("@anthropic-ai/sdk"));
|
|
2949
2949
|
var import_groq_sdk = __toESM(require("groq-sdk"));
|
|
2950
|
+
var import_generative_ai = require("@google/generative-ai");
|
|
2951
|
+
var import_openai = __toESM(require("openai"));
|
|
2950
2952
|
var import_jsonrepair = require("jsonrepair");
|
|
2951
2953
|
var LLM = class {
|
|
2952
2954
|
/* Get a complete text response from an LLM (Anthropic or Groq) */
|
|
@@ -2956,8 +2958,12 @@ var LLM = class {
|
|
|
2956
2958
|
return this._anthropicText(messages, modelName, options);
|
|
2957
2959
|
} else if (provider === "groq") {
|
|
2958
2960
|
return this._groqText(messages, modelName, options);
|
|
2961
|
+
} else if (provider === "gemini") {
|
|
2962
|
+
return this._geminiText(messages, modelName, options);
|
|
2963
|
+
} else if (provider === "openai") {
|
|
2964
|
+
return this._openaiText(messages, modelName, options);
|
|
2959
2965
|
} else {
|
|
2960
|
-
throw new Error(`Unsupported provider: ${provider}. Use "anthropic" or "
|
|
2966
|
+
throw new Error(`Unsupported provider: ${provider}. Use "anthropic", "groq", "gemini", or "openai"`);
|
|
2961
2967
|
}
|
|
2962
2968
|
}
|
|
2963
2969
|
/* Stream response from an LLM (Anthropic or Groq) */
|
|
@@ -2967,17 +2973,26 @@ var LLM = class {
|
|
|
2967
2973
|
return this._anthropicStream(messages, modelName, options, json);
|
|
2968
2974
|
} else if (provider === "groq") {
|
|
2969
2975
|
return this._groqStream(messages, modelName, options, json);
|
|
2976
|
+
} else if (provider === "gemini") {
|
|
2977
|
+
return this._geminiStream(messages, modelName, options, json);
|
|
2978
|
+
} else if (provider === "openai") {
|
|
2979
|
+
return this._openaiStream(messages, modelName, options, json);
|
|
2970
2980
|
} else {
|
|
2971
|
-
throw new Error(`Unsupported provider: ${provider}. Use "anthropic" or "
|
|
2981
|
+
throw new Error(`Unsupported provider: ${provider}. Use "anthropic", "groq", "gemini", or "openai"`);
|
|
2972
2982
|
}
|
|
2973
2983
|
}
|
|
2974
|
-
/* Stream response with tool calling support (Anthropic
|
|
2984
|
+
/* Stream response with tool calling support (Anthropic and Gemini) */
|
|
2975
2985
|
static async streamWithTools(messages, tools, toolHandler, options = {}, maxIterations = 3) {
|
|
2976
2986
|
const [provider, modelName] = this._parseModel(options.model);
|
|
2977
|
-
if (provider
|
|
2978
|
-
|
|
2987
|
+
if (provider === "anthropic") {
|
|
2988
|
+
return this._anthropicStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
|
|
2989
|
+
} else if (provider === "gemini") {
|
|
2990
|
+
return this._geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
|
|
2991
|
+
} else if (provider === "openai") {
|
|
2992
|
+
return this._openaiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
|
|
2993
|
+
} else {
|
|
2994
|
+
throw new Error(`Tool calling is only supported for Anthropic, Gemini, and OpenAI models`);
|
|
2979
2995
|
}
|
|
2980
|
-
return this._anthropicStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations);
|
|
2981
2996
|
}
|
|
2982
2997
|
// ============================================================
|
|
2983
2998
|
// PRIVATE HELPER METHODS
|
|
@@ -3311,6 +3326,298 @@ var LLM = class {
|
|
|
3311
3326
|
return fullText;
|
|
3312
3327
|
}
|
|
3313
3328
|
// ============================================================
|
|
3329
|
+
// GEMINI IMPLEMENTATION
|
|
3330
|
+
// ============================================================
|
|
3331
|
+
static async _geminiText(messages, modelName, options) {
|
|
3332
|
+
const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
|
|
3333
|
+
const genAI = new import_generative_ai.GoogleGenerativeAI(apiKey);
|
|
3334
|
+
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
3335
|
+
const model = genAI.getGenerativeModel({
|
|
3336
|
+
model: modelName,
|
|
3337
|
+
systemInstruction: systemPrompt,
|
|
3338
|
+
generationConfig: {
|
|
3339
|
+
maxOutputTokens: options.maxTokens || 1e3,
|
|
3340
|
+
temperature: options.temperature,
|
|
3341
|
+
topP: options.topP
|
|
3342
|
+
}
|
|
3343
|
+
});
|
|
3344
|
+
const result = await model.generateContent(messages.user);
|
|
3345
|
+
const response = await result.response;
|
|
3346
|
+
return response.text();
|
|
3347
|
+
}
|
|
3348
|
+
static async _geminiStream(messages, modelName, options, json) {
|
|
3349
|
+
const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
|
|
3350
|
+
const genAI = new import_generative_ai.GoogleGenerativeAI(apiKey);
|
|
3351
|
+
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
3352
|
+
const model = genAI.getGenerativeModel({
|
|
3353
|
+
model: modelName,
|
|
3354
|
+
systemInstruction: systemPrompt,
|
|
3355
|
+
generationConfig: {
|
|
3356
|
+
maxOutputTokens: options.maxTokens || 1e3,
|
|
3357
|
+
temperature: options.temperature,
|
|
3358
|
+
topP: options.topP,
|
|
3359
|
+
responseMimeType: json ? "application/json" : void 0
|
|
3360
|
+
}
|
|
3361
|
+
});
|
|
3362
|
+
const result = await model.generateContentStream(messages.user);
|
|
3363
|
+
let fullText = "";
|
|
3364
|
+
for await (const chunk of result.stream) {
|
|
3365
|
+
const text = chunk.text();
|
|
3366
|
+
if (text) {
|
|
3367
|
+
fullText += text;
|
|
3368
|
+
if (options.partial) {
|
|
3369
|
+
options.partial(text);
|
|
3370
|
+
}
|
|
3371
|
+
}
|
|
3372
|
+
}
|
|
3373
|
+
if (json) {
|
|
3374
|
+
return this._parseJSON(fullText);
|
|
3375
|
+
}
|
|
3376
|
+
return fullText;
|
|
3377
|
+
}
|
|
3378
|
+
static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
|
|
3379
|
+
const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
|
|
3380
|
+
const genAI = new import_generative_ai.GoogleGenerativeAI(apiKey);
|
|
3381
|
+
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
3382
|
+
const functionDeclarations = tools.map((tool) => ({
|
|
3383
|
+
name: tool.name,
|
|
3384
|
+
description: tool.description,
|
|
3385
|
+
parameters: {
|
|
3386
|
+
type: import_generative_ai.SchemaType.OBJECT,
|
|
3387
|
+
properties: tool.input_schema.properties,
|
|
3388
|
+
required: tool.input_schema.required || []
|
|
3389
|
+
}
|
|
3390
|
+
}));
|
|
3391
|
+
const model = genAI.getGenerativeModel({
|
|
3392
|
+
model: modelName,
|
|
3393
|
+
systemInstruction: systemPrompt,
|
|
3394
|
+
tools: [{ functionDeclarations }],
|
|
3395
|
+
generationConfig: {
|
|
3396
|
+
maxOutputTokens: options.maxTokens || 4e3,
|
|
3397
|
+
temperature: options.temperature,
|
|
3398
|
+
topP: options.topP
|
|
3399
|
+
}
|
|
3400
|
+
});
|
|
3401
|
+
const chat = model.startChat({
|
|
3402
|
+
history: []
|
|
3403
|
+
});
|
|
3404
|
+
let iterations = 0;
|
|
3405
|
+
let finalText = "";
|
|
3406
|
+
let currentUserMessage = messages.user;
|
|
3407
|
+
while (iterations < maxIterations) {
|
|
3408
|
+
iterations++;
|
|
3409
|
+
const result = await chat.sendMessageStream(currentUserMessage);
|
|
3410
|
+
let responseText = "";
|
|
3411
|
+
const functionCalls = [];
|
|
3412
|
+
for await (const chunk of result.stream) {
|
|
3413
|
+
const candidate = chunk.candidates?.[0];
|
|
3414
|
+
if (!candidate) continue;
|
|
3415
|
+
for (const part of candidate.content?.parts || []) {
|
|
3416
|
+
if (part.text) {
|
|
3417
|
+
responseText += part.text;
|
|
3418
|
+
if (options.partial) {
|
|
3419
|
+
options.partial(part.text);
|
|
3420
|
+
}
|
|
3421
|
+
} else if (part.functionCall) {
|
|
3422
|
+
functionCalls.push({
|
|
3423
|
+
name: part.functionCall.name,
|
|
3424
|
+
args: part.functionCall.args
|
|
3425
|
+
});
|
|
3426
|
+
}
|
|
3427
|
+
}
|
|
3428
|
+
}
|
|
3429
|
+
if (functionCalls.length === 0) {
|
|
3430
|
+
finalText = responseText;
|
|
3431
|
+
break;
|
|
3432
|
+
}
|
|
3433
|
+
const functionResponses = [];
|
|
3434
|
+
for (const fc of functionCalls) {
|
|
3435
|
+
try {
|
|
3436
|
+
const result2 = await toolHandler(fc.name, fc.args);
|
|
3437
|
+
functionResponses.push({
|
|
3438
|
+
name: fc.name,
|
|
3439
|
+
response: { result: typeof result2 === "string" ? result2 : JSON.stringify(result2) }
|
|
3440
|
+
});
|
|
3441
|
+
} catch (error) {
|
|
3442
|
+
functionResponses.push({
|
|
3443
|
+
name: fc.name,
|
|
3444
|
+
response: { error: error instanceof Error ? error.message : String(error) }
|
|
3445
|
+
});
|
|
3446
|
+
}
|
|
3447
|
+
}
|
|
3448
|
+
const functionResponseParts = functionResponses.map((fr) => ({
|
|
3449
|
+
functionResponse: {
|
|
3450
|
+
name: fr.name,
|
|
3451
|
+
response: fr.response
|
|
3452
|
+
}
|
|
3453
|
+
}));
|
|
3454
|
+
currentUserMessage = functionResponseParts;
|
|
3455
|
+
}
|
|
3456
|
+
if (iterations >= maxIterations) {
|
|
3457
|
+
throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
|
|
3458
|
+
}
|
|
3459
|
+
return finalText;
|
|
3460
|
+
}
|
|
3461
|
+
// ============================================================
|
|
3462
|
+
// OPENAI IMPLEMENTATION
|
|
3463
|
+
// ============================================================
|
|
3464
|
+
static async _openaiText(messages, modelName, options) {
|
|
3465
|
+
const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
|
|
3466
|
+
const openai = new import_openai.default({ apiKey });
|
|
3467
|
+
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
3468
|
+
const response = await openai.chat.completions.create({
|
|
3469
|
+
model: modelName,
|
|
3470
|
+
messages: [
|
|
3471
|
+
{ role: "system", content: systemPrompt },
|
|
3472
|
+
{ role: "user", content: messages.user }
|
|
3473
|
+
],
|
|
3474
|
+
max_tokens: options.maxTokens || 1e3,
|
|
3475
|
+
temperature: options.temperature,
|
|
3476
|
+
top_p: options.topP
|
|
3477
|
+
});
|
|
3478
|
+
return response.choices[0]?.message?.content || "";
|
|
3479
|
+
}
|
|
3480
|
+
static async _openaiStream(messages, modelName, options, json) {
|
|
3481
|
+
const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
|
|
3482
|
+
const openai = new import_openai.default({ apiKey });
|
|
3483
|
+
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
3484
|
+
const stream = await openai.chat.completions.create({
|
|
3485
|
+
model: modelName,
|
|
3486
|
+
messages: [
|
|
3487
|
+
{ role: "system", content: systemPrompt },
|
|
3488
|
+
{ role: "user", content: messages.user }
|
|
3489
|
+
],
|
|
3490
|
+
max_tokens: options.maxTokens || 1e3,
|
|
3491
|
+
temperature: options.temperature,
|
|
3492
|
+
top_p: options.topP,
|
|
3493
|
+
response_format: json ? { type: "json_object" } : void 0,
|
|
3494
|
+
stream: true
|
|
3495
|
+
});
|
|
3496
|
+
let fullText = "";
|
|
3497
|
+
for await (const chunk of stream) {
|
|
3498
|
+
const content = chunk.choices[0]?.delta?.content || "";
|
|
3499
|
+
if (content) {
|
|
3500
|
+
fullText += content;
|
|
3501
|
+
if (options.partial) {
|
|
3502
|
+
options.partial(content);
|
|
3503
|
+
}
|
|
3504
|
+
}
|
|
3505
|
+
}
|
|
3506
|
+
if (json) {
|
|
3507
|
+
return this._parseJSON(fullText);
|
|
3508
|
+
}
|
|
3509
|
+
return fullText;
|
|
3510
|
+
}
|
|
3511
|
+
static async _openaiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
|
|
3512
|
+
const apiKey = options.apiKey || process.env.OPENAI_API_KEY || "";
|
|
3513
|
+
const openai = new import_openai.default({ apiKey });
|
|
3514
|
+
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
3515
|
+
const openaiTools = tools.map((tool) => ({
|
|
3516
|
+
type: "function",
|
|
3517
|
+
function: {
|
|
3518
|
+
name: tool.name,
|
|
3519
|
+
description: tool.description,
|
|
3520
|
+
parameters: {
|
|
3521
|
+
type: tool.input_schema.type,
|
|
3522
|
+
properties: tool.input_schema.properties,
|
|
3523
|
+
required: tool.input_schema.required || []
|
|
3524
|
+
}
|
|
3525
|
+
}
|
|
3526
|
+
}));
|
|
3527
|
+
const conversationMessages = [
|
|
3528
|
+
{ role: "system", content: systemPrompt },
|
|
3529
|
+
{ role: "user", content: messages.user }
|
|
3530
|
+
];
|
|
3531
|
+
let iterations = 0;
|
|
3532
|
+
let finalText = "";
|
|
3533
|
+
while (iterations < maxIterations) {
|
|
3534
|
+
iterations++;
|
|
3535
|
+
const stream = await openai.chat.completions.create({
|
|
3536
|
+
model: modelName,
|
|
3537
|
+
messages: conversationMessages,
|
|
3538
|
+
max_tokens: options.maxTokens || 4e3,
|
|
3539
|
+
temperature: options.temperature,
|
|
3540
|
+
top_p: options.topP,
|
|
3541
|
+
tools: openaiTools,
|
|
3542
|
+
stream: true
|
|
3543
|
+
});
|
|
3544
|
+
let responseText = "";
|
|
3545
|
+
const toolCalls = [];
|
|
3546
|
+
const toolCallsInProgress = /* @__PURE__ */ new Map();
|
|
3547
|
+
for await (const chunk of stream) {
|
|
3548
|
+
const delta = chunk.choices[0]?.delta;
|
|
3549
|
+
if (delta?.content) {
|
|
3550
|
+
responseText += delta.content;
|
|
3551
|
+
if (options.partial) {
|
|
3552
|
+
options.partial(delta.content);
|
|
3553
|
+
}
|
|
3554
|
+
}
|
|
3555
|
+
if (delta?.tool_calls) {
|
|
3556
|
+
for (const toolCallDelta of delta.tool_calls) {
|
|
3557
|
+
const index = toolCallDelta.index;
|
|
3558
|
+
if (!toolCallsInProgress.has(index)) {
|
|
3559
|
+
toolCallsInProgress.set(index, {
|
|
3560
|
+
id: toolCallDelta.id || "",
|
|
3561
|
+
name: toolCallDelta.function?.name || "",
|
|
3562
|
+
arguments: ""
|
|
3563
|
+
});
|
|
3564
|
+
}
|
|
3565
|
+
const tc = toolCallsInProgress.get(index);
|
|
3566
|
+
if (toolCallDelta.id) {
|
|
3567
|
+
tc.id = toolCallDelta.id;
|
|
3568
|
+
}
|
|
3569
|
+
if (toolCallDelta.function?.name) {
|
|
3570
|
+
tc.name = toolCallDelta.function.name;
|
|
3571
|
+
}
|
|
3572
|
+
if (toolCallDelta.function?.arguments) {
|
|
3573
|
+
tc.arguments += toolCallDelta.function.arguments;
|
|
3574
|
+
}
|
|
3575
|
+
}
|
|
3576
|
+
}
|
|
3577
|
+
}
|
|
3578
|
+
for (const tc of toolCallsInProgress.values()) {
|
|
3579
|
+
if (tc.id && tc.name) {
|
|
3580
|
+
toolCalls.push(tc);
|
|
3581
|
+
}
|
|
3582
|
+
}
|
|
3583
|
+
if (toolCalls.length === 0) {
|
|
3584
|
+
finalText = responseText;
|
|
3585
|
+
break;
|
|
3586
|
+
}
|
|
3587
|
+
conversationMessages.push({
|
|
3588
|
+
role: "assistant",
|
|
3589
|
+
content: responseText || null,
|
|
3590
|
+
tool_calls: toolCalls.map((tc) => ({
|
|
3591
|
+
id: tc.id,
|
|
3592
|
+
type: "function",
|
|
3593
|
+
function: {
|
|
3594
|
+
name: tc.name,
|
|
3595
|
+
arguments: tc.arguments
|
|
3596
|
+
}
|
|
3597
|
+
}))
|
|
3598
|
+
});
|
|
3599
|
+
for (const tc of toolCalls) {
|
|
3600
|
+
let result;
|
|
3601
|
+
try {
|
|
3602
|
+
const args = JSON.parse(tc.arguments);
|
|
3603
|
+
const toolResult = await toolHandler(tc.name, args);
|
|
3604
|
+
result = typeof toolResult === "string" ? toolResult : JSON.stringify(toolResult);
|
|
3605
|
+
} catch (error) {
|
|
3606
|
+
result = JSON.stringify({ error: error instanceof Error ? error.message : String(error) });
|
|
3607
|
+
}
|
|
3608
|
+
conversationMessages.push({
|
|
3609
|
+
role: "tool",
|
|
3610
|
+
tool_call_id: tc.id,
|
|
3611
|
+
content: result
|
|
3612
|
+
});
|
|
3613
|
+
}
|
|
3614
|
+
}
|
|
3615
|
+
if (iterations >= maxIterations) {
|
|
3616
|
+
throw new Error(`Max iterations (${maxIterations}) reached in tool calling loop`);
|
|
3617
|
+
}
|
|
3618
|
+
return finalText;
|
|
3619
|
+
}
|
|
3620
|
+
// ============================================================
|
|
3314
3621
|
// JSON PARSING HELPER
|
|
3315
3622
|
// ============================================================
|
|
3316
3623
|
/**
|
|
@@ -4599,18 +4906,56 @@ var AnthropicLLM = class extends BaseLLM {
|
|
|
4599
4906
|
};
|
|
4600
4907
|
var anthropicLLM = new AnthropicLLM();
|
|
4601
4908
|
|
|
4602
|
-
// src/userResponse/
|
|
4909
|
+
// src/userResponse/gemini.ts
|
|
4603
4910
|
var import_dotenv3 = __toESM(require("dotenv"));
|
|
4604
4911
|
import_dotenv3.default.config();
|
|
4912
|
+
var GeminiLLM = class extends BaseLLM {
|
|
4913
|
+
constructor(config) {
|
|
4914
|
+
super(config);
|
|
4915
|
+
}
|
|
4916
|
+
getDefaultModel() {
|
|
4917
|
+
return "gemini/gemini-2.5-flash";
|
|
4918
|
+
}
|
|
4919
|
+
getDefaultApiKey() {
|
|
4920
|
+
return process.env.GEMINI_API_KEY;
|
|
4921
|
+
}
|
|
4922
|
+
getProviderName() {
|
|
4923
|
+
return "Gemini";
|
|
4924
|
+
}
|
|
4925
|
+
};
|
|
4926
|
+
var geminiLLM = new GeminiLLM();
|
|
4927
|
+
|
|
4928
|
+
// src/userResponse/openai.ts
|
|
4929
|
+
var import_dotenv4 = __toESM(require("dotenv"));
|
|
4930
|
+
import_dotenv4.default.config();
|
|
4931
|
+
var OpenAILLM = class extends BaseLLM {
|
|
4932
|
+
constructor(config) {
|
|
4933
|
+
super(config);
|
|
4934
|
+
}
|
|
4935
|
+
getDefaultModel() {
|
|
4936
|
+
return "openai/gpt-4.1";
|
|
4937
|
+
}
|
|
4938
|
+
getDefaultApiKey() {
|
|
4939
|
+
return process.env.OPENAI_API_KEY;
|
|
4940
|
+
}
|
|
4941
|
+
getProviderName() {
|
|
4942
|
+
return "OpenAI";
|
|
4943
|
+
}
|
|
4944
|
+
};
|
|
4945
|
+
var openaiLLM = new OpenAILLM();
|
|
4946
|
+
|
|
4947
|
+
// src/userResponse/index.ts
|
|
4948
|
+
var import_dotenv5 = __toESM(require("dotenv"));
|
|
4949
|
+
import_dotenv5.default.config();
|
|
4605
4950
|
function getLLMProviders() {
|
|
4606
4951
|
const envProviders = process.env.LLM_PROVIDERS;
|
|
4607
|
-
const DEFAULT_PROVIDERS = ["anthropic", "groq"];
|
|
4952
|
+
const DEFAULT_PROVIDERS = ["anthropic", "gemini", "openai", "groq"];
|
|
4608
4953
|
if (!envProviders) {
|
|
4609
4954
|
return DEFAULT_PROVIDERS;
|
|
4610
4955
|
}
|
|
4611
4956
|
try {
|
|
4612
4957
|
const providers = JSON.parse(envProviders);
|
|
4613
|
-
const validProviders = providers.filter((p) => p === "anthropic" || p === "groq");
|
|
4958
|
+
const validProviders = providers.filter((p) => p === "anthropic" || p === "groq" || p === "gemini" || p === "openai");
|
|
4614
4959
|
if (validProviders.length === 0) {
|
|
4615
4960
|
return DEFAULT_PROVIDERS;
|
|
4616
4961
|
}
|
|
@@ -4653,10 +4998,44 @@ var useGroqMethod = async (prompt, components, apiKey, logCollector, conversatio
|
|
|
4653
4998
|
logger.info(`[useGroqMethod] Successfully generated ${responseMode} using Groq`);
|
|
4654
4999
|
return matchResult;
|
|
4655
5000
|
};
|
|
5001
|
+
var useGeminiMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
|
|
5002
|
+
logger.debug("[useGeminiMethod] Initializing Gemini LLM matching method");
|
|
5003
|
+
logger.debug(`[useGeminiMethod] Response mode: ${responseMode}`);
|
|
5004
|
+
const msg = `Using Gemini LLM ${responseMode === "text" ? "text response" : "matching"} method...`;
|
|
5005
|
+
logger.info(msg);
|
|
5006
|
+
logCollector?.info(msg);
|
|
5007
|
+
if (responseMode === "component" && components.length === 0) {
|
|
5008
|
+
const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
|
|
5009
|
+
logger.error("[useGeminiMethod] No components available");
|
|
5010
|
+
logCollector?.error(emptyMsg);
|
|
5011
|
+
return { success: false, errors: [emptyMsg] };
|
|
5012
|
+
}
|
|
5013
|
+
logger.debug(`[useGeminiMethod] Processing with ${components.length} components`);
|
|
5014
|
+
const matchResult = await geminiLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
|
|
5015
|
+
logger.info(`[useGeminiMethod] Successfully generated ${responseMode} using Gemini`);
|
|
5016
|
+
return matchResult;
|
|
5017
|
+
};
|
|
5018
|
+
var useOpenAIMethod = async (prompt, components, apiKey, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
|
|
5019
|
+
logger.debug("[useOpenAIMethod] Initializing OpenAI GPT matching method");
|
|
5020
|
+
logger.debug(`[useOpenAIMethod] Response mode: ${responseMode}`);
|
|
5021
|
+
const msg = `Using OpenAI GPT ${responseMode === "text" ? "text response" : "matching"} method...`;
|
|
5022
|
+
logger.info(msg);
|
|
5023
|
+
logCollector?.info(msg);
|
|
5024
|
+
if (responseMode === "component" && components.length === 0) {
|
|
5025
|
+
const emptyMsg = "Components not loaded in memory. Please ensure components are fetched first.";
|
|
5026
|
+
logger.error("[useOpenAIMethod] No components available");
|
|
5027
|
+
logCollector?.error(emptyMsg);
|
|
5028
|
+
return { success: false, errors: [emptyMsg] };
|
|
5029
|
+
}
|
|
5030
|
+
logger.debug(`[useOpenAIMethod] Processing with ${components.length} components`);
|
|
5031
|
+
const matchResult = await openaiLLM.handleUserRequest(prompt, components, apiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
|
|
5032
|
+
logger.info(`[useOpenAIMethod] Successfully generated ${responseMode} using OpenAI`);
|
|
5033
|
+
return matchResult;
|
|
5034
|
+
};
|
|
4656
5035
|
var getUserResponseFromCache = async (prompt) => {
|
|
4657
5036
|
return false;
|
|
4658
5037
|
};
|
|
4659
|
-
var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey, llmProviders, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
|
|
5038
|
+
var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, logCollector, conversationHistory, responseMode = "component", streamCallback, collections, externalTools, userId) => {
|
|
4660
5039
|
logger.debug(`[get_user_response] Starting user response generation for prompt: "${prompt.substring(0, 50)}..."`);
|
|
4661
5040
|
logger.debug(`[get_user_response] Response mode: ${responseMode}`);
|
|
4662
5041
|
logger.debug("[get_user_response] Checking cache for existing response");
|
|
@@ -4692,6 +5071,10 @@ var get_user_response = async (prompt, components, anthropicApiKey, groqApiKey,
|
|
|
4692
5071
|
result = await useAnthropicMethod(prompt, components, anthropicApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
|
|
4693
5072
|
} else if (provider === "groq") {
|
|
4694
5073
|
result = await useGroqMethod(prompt, components, groqApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
|
|
5074
|
+
} else if (provider === "gemini") {
|
|
5075
|
+
result = await useGeminiMethod(prompt, components, geminiApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
|
|
5076
|
+
} else if (provider === "openai") {
|
|
5077
|
+
result = await useOpenAIMethod(prompt, components, openaiApiKey, logCollector, conversationHistory, responseMode, streamCallback, collections, externalTools, userId);
|
|
4695
5078
|
} else {
|
|
4696
5079
|
logger.warn(`[get_user_response] Unknown provider: ${provider} - skipping`);
|
|
4697
5080
|
errors.push(`Unknown provider: ${provider}`);
|
|
@@ -5036,7 +5419,7 @@ var CONTEXT_CONFIG = {
|
|
|
5036
5419
|
};
|
|
5037
5420
|
|
|
5038
5421
|
// src/handlers/user-prompt-request.ts
|
|
5039
|
-
var get_user_request = async (data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders, collections, externalTools, userId) => {
|
|
5422
|
+
var get_user_request = async (data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools, userId) => {
|
|
5040
5423
|
const errors = [];
|
|
5041
5424
|
logger.debug("[USER_PROMPT_REQ] Parsing incoming message data");
|
|
5042
5425
|
const parseResult = UserPromptRequestMessageSchema.safeParse(data);
|
|
@@ -5111,6 +5494,8 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
|
|
|
5111
5494
|
components,
|
|
5112
5495
|
anthropicApiKey,
|
|
5113
5496
|
groqApiKey,
|
|
5497
|
+
geminiApiKey,
|
|
5498
|
+
openaiApiKey,
|
|
5114
5499
|
llmProviders,
|
|
5115
5500
|
logCollector,
|
|
5116
5501
|
conversationHistory,
|
|
@@ -5209,8 +5594,8 @@ var get_user_request = async (data, components, sendMessage, anthropicApiKey, gr
|
|
|
5209
5594
|
wsId
|
|
5210
5595
|
};
|
|
5211
5596
|
};
|
|
5212
|
-
async function handleUserPromptRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders, collections, externalTools, userId) {
|
|
5213
|
-
const response = await get_user_request(data, components, sendMessage, anthropicApiKey, groqApiKey, llmProviders, collections, externalTools, userId);
|
|
5597
|
+
async function handleUserPromptRequest(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools, userId) {
|
|
5598
|
+
const response = await get_user_request(data, components, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, collections, externalTools, userId);
|
|
5214
5599
|
sendDataResponse4(
|
|
5215
5600
|
response.id || data.id,
|
|
5216
5601
|
{
|
|
@@ -5337,13 +5722,13 @@ function sendResponse(id, res, sendMessage, clientId) {
|
|
|
5337
5722
|
}
|
|
5338
5723
|
|
|
5339
5724
|
// src/userResponse/next-questions.ts
|
|
5340
|
-
async function generateNextQuestions(originalUserPrompt, component, componentData, anthropicApiKey, groqApiKey, llmProviders, logCollector, conversationHistory) {
|
|
5725
|
+
async function generateNextQuestions(originalUserPrompt, component, componentData, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders, logCollector, conversationHistory) {
|
|
5341
5726
|
try {
|
|
5342
5727
|
logger.debug("[generateNextQuestions] Starting next questions generation");
|
|
5343
5728
|
logger.debug(`[generateNextQuestions] User prompt: "${originalUserPrompt?.substring(0, 50)}..."`);
|
|
5344
5729
|
logger.debug(`[generateNextQuestions] Component: ${component?.name || "unknown"} (${component?.type || "unknown"})`);
|
|
5345
5730
|
logger.debug(`[generateNextQuestions] Component data available: ${componentData ? "yes" : "no"}`);
|
|
5346
|
-
const providers = llmProviders || ["anthropic"];
|
|
5731
|
+
const providers = llmProviders || ["anthropic", "gemini", "openai", "groq"];
|
|
5347
5732
|
logger.info(`[generateNextQuestions] Using LLM providers: [${providers.join(", ")}]`);
|
|
5348
5733
|
if (conversationHistory && conversationHistory.length > 0) {
|
|
5349
5734
|
const exchangeCount = conversationHistory.split("\n").filter((l) => l.startsWith("Q")).length;
|
|
@@ -5368,6 +5753,26 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
|
|
|
5368
5753
|
logCollector,
|
|
5369
5754
|
conversationHistory
|
|
5370
5755
|
);
|
|
5756
|
+
} else if (provider === "gemini") {
|
|
5757
|
+
logger.debug("[generateNextQuestions] Using Gemini LLM for next questions");
|
|
5758
|
+
result = await geminiLLM.generateNextQuestions(
|
|
5759
|
+
originalUserPrompt,
|
|
5760
|
+
component,
|
|
5761
|
+
componentData,
|
|
5762
|
+
geminiApiKey,
|
|
5763
|
+
logCollector,
|
|
5764
|
+
conversationHistory
|
|
5765
|
+
);
|
|
5766
|
+
} else if (provider === "openai") {
|
|
5767
|
+
logger.debug("[generateNextQuestions] Using OpenAI LLM for next questions");
|
|
5768
|
+
result = await openaiLLM.generateNextQuestions(
|
|
5769
|
+
originalUserPrompt,
|
|
5770
|
+
component,
|
|
5771
|
+
componentData,
|
|
5772
|
+
openaiApiKey,
|
|
5773
|
+
logCollector,
|
|
5774
|
+
conversationHistory
|
|
5775
|
+
);
|
|
5371
5776
|
} else {
|
|
5372
5777
|
logger.debug("[generateNextQuestions] Using Anthropic LLM for next questions");
|
|
5373
5778
|
result = await anthropicLLM.generateNextQuestions(
|
|
@@ -5418,7 +5823,7 @@ async function generateNextQuestions(originalUserPrompt, component, componentDat
|
|
|
5418
5823
|
}
|
|
5419
5824
|
|
|
5420
5825
|
// src/handlers/actions-request.ts
|
|
5421
|
-
async function handleActionsRequest(data, sendMessage, anthropicApiKey, groqApiKey, llmProviders) {
|
|
5826
|
+
async function handleActionsRequest(data, sendMessage, anthropicApiKey, groqApiKey, geminiApiKey, openaiApiKey, llmProviders) {
|
|
5422
5827
|
try {
|
|
5423
5828
|
logger.debug("[ACTIONS_REQ] Parsing incoming actions request");
|
|
5424
5829
|
const actionsRequest = ActionsRequestMessageSchema.parse(data);
|
|
@@ -5490,6 +5895,8 @@ ${conversationHistory.substring(0, 200)}...`);
|
|
|
5490
5895
|
componentData,
|
|
5491
5896
|
anthropicApiKey,
|
|
5492
5897
|
groqApiKey,
|
|
5898
|
+
geminiApiKey,
|
|
5899
|
+
openaiApiKey,
|
|
5493
5900
|
llmProviders,
|
|
5494
5901
|
logCollector,
|
|
5495
5902
|
conversationHistory
|
|
@@ -7364,7 +7771,10 @@ var SuperatomSDK = class {
|
|
|
7364
7771
|
this.url = config.url || process.env.SA_WEBSOCKET_URL || DEFAULT_WS_URL;
|
|
7365
7772
|
this.anthropicApiKey = config.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY || "";
|
|
7366
7773
|
this.groqApiKey = config.GROQ_API_KEY || process.env.GROQ_API_KEY || "";
|
|
7774
|
+
this.geminiApiKey = config.GEMINI_API_KEY || process.env.GEMINI_API_KEY || "";
|
|
7775
|
+
this.openaiApiKey = config.OPENAI_API_KEY || process.env.OPENAI_API_KEY || "";
|
|
7367
7776
|
this.llmProviders = config.LLM_PROVIDERS || getLLMProviders();
|
|
7777
|
+
logger.info(`Initializing Superatom SDK v${SDK_VERSION} for project ${this.projectId}, llm providers: ${this.llmProviders.join(", ")}, config llm providers: ${config.LLM_PROVIDERS}`);
|
|
7368
7778
|
this.userManager = new UserManager(this.projectId, 5e3);
|
|
7369
7779
|
this.dashboardManager = new DashboardManager(this.projectId);
|
|
7370
7780
|
this.reportManager = new ReportManager(this.projectId);
|
|
@@ -7449,7 +7859,9 @@ var SuperatomSDK = class {
|
|
|
7449
7859
|
return new Promise((resolve, reject) => {
|
|
7450
7860
|
try {
|
|
7451
7861
|
const url = new URL(this.url);
|
|
7452
|
-
|
|
7862
|
+
if (this.apiKey) {
|
|
7863
|
+
url.searchParams.set("apiKey", this.apiKey);
|
|
7864
|
+
}
|
|
7453
7865
|
url.searchParams.set("projectId", this.projectId);
|
|
7454
7866
|
url.searchParams.set("userId", this.userId);
|
|
7455
7867
|
url.searchParams.set("type", this.type);
|
|
@@ -7508,12 +7920,12 @@ var SuperatomSDK = class {
|
|
|
7508
7920
|
});
|
|
7509
7921
|
break;
|
|
7510
7922
|
case "USER_PROMPT_REQ":
|
|
7511
|
-
handleUserPromptRequest(parsed, this.components, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.llmProviders, this.collections, this.tools, this.userId).catch((error) => {
|
|
7923
|
+
handleUserPromptRequest(parsed, this.components, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.geminiApiKey, this.openaiApiKey, this.llmProviders, this.collections, this.tools, this.userId).catch((error) => {
|
|
7512
7924
|
logger.error("Failed to handle user prompt request:", error);
|
|
7513
7925
|
});
|
|
7514
7926
|
break;
|
|
7515
7927
|
case "ACTIONS":
|
|
7516
|
-
handleActionsRequest(parsed, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.llmProviders).catch((error) => {
|
|
7928
|
+
handleActionsRequest(parsed, (msg) => this.send(msg), this.anthropicApiKey, this.groqApiKey, this.geminiApiKey, this.openaiApiKey, this.llmProviders).catch((error) => {
|
|
7517
7929
|
logger.error("Failed to handle actions request:", error);
|
|
7518
7930
|
});
|
|
7519
7931
|
break;
|