@superatomai/sdk-node 0.0.74 → 0.0.76
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +942 -942
- package/dist/index.d.mts +10 -5
- package/dist/index.d.ts +10 -5
- package/dist/index.js +117 -14
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +117 -14
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -292,7 +292,7 @@ var UserPromptSuggestionsMessageSchema = z3.object({
|
|
|
292
292
|
payload: UserPromptSuggestionsPayloadSchema
|
|
293
293
|
});
|
|
294
294
|
var ComponentPropsSchema = z3.object({
|
|
295
|
-
query: z3.string().or(z3.object({})).optional(),
|
|
295
|
+
query: z3.string().or(z3.object({})).nullable().optional(),
|
|
296
296
|
title: z3.string().optional(),
|
|
297
297
|
description: z3.string().optional(),
|
|
298
298
|
config: z3.record(z3.unknown()).optional(),
|
|
@@ -5343,14 +5343,97 @@ var LLM = class {
|
|
|
5343
5343
|
const genAI = new GoogleGenerativeAI(apiKey);
|
|
5344
5344
|
const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
|
|
5345
5345
|
try {
|
|
5346
|
+
if (json && options.partial) {
|
|
5347
|
+
const model2 = genAI.getGenerativeModel({
|
|
5348
|
+
model: modelName,
|
|
5349
|
+
systemInstruction: systemPrompt,
|
|
5350
|
+
generationConfig: {
|
|
5351
|
+
maxOutputTokens: options.maxTokens || 1e3,
|
|
5352
|
+
temperature: options.temperature,
|
|
5353
|
+
topP: options.topP,
|
|
5354
|
+
responseMimeType: "application/json"
|
|
5355
|
+
}
|
|
5356
|
+
});
|
|
5357
|
+
const result2 = await model2.generateContentStream(messages.user);
|
|
5358
|
+
let fullText2 = "";
|
|
5359
|
+
let inputTokens2 = 0;
|
|
5360
|
+
let outputTokens2 = 0;
|
|
5361
|
+
for await (const chunk of result2.stream) {
|
|
5362
|
+
try {
|
|
5363
|
+
const text = chunk.text();
|
|
5364
|
+
if (text) {
|
|
5365
|
+
fullText2 += text;
|
|
5366
|
+
options.partial(text);
|
|
5367
|
+
}
|
|
5368
|
+
} catch (chunkError) {
|
|
5369
|
+
}
|
|
5370
|
+
if (chunk.usageMetadata) {
|
|
5371
|
+
inputTokens2 = chunk.usageMetadata.promptTokenCount || 0;
|
|
5372
|
+
outputTokens2 = chunk.usageMetadata.candidatesTokenCount || 0;
|
|
5373
|
+
}
|
|
5374
|
+
}
|
|
5375
|
+
const durationMs2 = Date.now() - startTime;
|
|
5376
|
+
if (inputTokens2 === 0) {
|
|
5377
|
+
inputTokens2 = Math.ceil((systemPrompt.length + messages.user.length) / 4);
|
|
5378
|
+
}
|
|
5379
|
+
if (outputTokens2 === 0) {
|
|
5380
|
+
outputTokens2 = Math.ceil(fullText2.length / 4);
|
|
5381
|
+
}
|
|
5382
|
+
llmUsageLogger.log({
|
|
5383
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
5384
|
+
requestId,
|
|
5385
|
+
provider: "gemini",
|
|
5386
|
+
model: modelName,
|
|
5387
|
+
method: "stream-json-partial",
|
|
5388
|
+
inputTokens: inputTokens2,
|
|
5389
|
+
outputTokens: outputTokens2,
|
|
5390
|
+
totalTokens: inputTokens2 + outputTokens2,
|
|
5391
|
+
costUSD: llmUsageLogger.calculateCost(modelName, inputTokens2, outputTokens2),
|
|
5392
|
+
durationMs: durationMs2,
|
|
5393
|
+
success: true
|
|
5394
|
+
});
|
|
5395
|
+
return this._parseJSON(fullText2);
|
|
5396
|
+
}
|
|
5397
|
+
if (json) {
|
|
5398
|
+
const model2 = genAI.getGenerativeModel({
|
|
5399
|
+
model: modelName,
|
|
5400
|
+
systemInstruction: systemPrompt,
|
|
5401
|
+
generationConfig: {
|
|
5402
|
+
maxOutputTokens: options.maxTokens || 1e3,
|
|
5403
|
+
temperature: options.temperature,
|
|
5404
|
+
topP: options.topP,
|
|
5405
|
+
responseMimeType: "application/json"
|
|
5406
|
+
}
|
|
5407
|
+
});
|
|
5408
|
+
const result2 = await model2.generateContent(messages.user);
|
|
5409
|
+
const response = result2.response;
|
|
5410
|
+
const fullText2 = response.text();
|
|
5411
|
+
const durationMs2 = Date.now() - startTime;
|
|
5412
|
+
const usage = response.usageMetadata;
|
|
5413
|
+
const inputTokens2 = usage?.promptTokenCount || Math.ceil((systemPrompt.length + messages.user.length) / 4);
|
|
5414
|
+
const outputTokens2 = usage?.candidatesTokenCount || Math.ceil(fullText2.length / 4);
|
|
5415
|
+
llmUsageLogger.log({
|
|
5416
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
5417
|
+
requestId,
|
|
5418
|
+
provider: "gemini",
|
|
5419
|
+
model: modelName,
|
|
5420
|
+
method: "stream-json",
|
|
5421
|
+
inputTokens: inputTokens2,
|
|
5422
|
+
outputTokens: outputTokens2,
|
|
5423
|
+
totalTokens: inputTokens2 + outputTokens2,
|
|
5424
|
+
costUSD: llmUsageLogger.calculateCost(modelName, inputTokens2, outputTokens2),
|
|
5425
|
+
durationMs: durationMs2,
|
|
5426
|
+
success: true
|
|
5427
|
+
});
|
|
5428
|
+
return this._parseJSON(fullText2);
|
|
5429
|
+
}
|
|
5346
5430
|
const model = genAI.getGenerativeModel({
|
|
5347
5431
|
model: modelName,
|
|
5348
5432
|
systemInstruction: systemPrompt,
|
|
5349
5433
|
generationConfig: {
|
|
5350
5434
|
maxOutputTokens: options.maxTokens || 1e3,
|
|
5351
5435
|
temperature: options.temperature,
|
|
5352
|
-
topP: options.topP
|
|
5353
|
-
responseMimeType: json ? "application/json" : void 0
|
|
5436
|
+
topP: options.topP
|
|
5354
5437
|
}
|
|
5355
5438
|
});
|
|
5356
5439
|
const result = await model.generateContentStream(messages.user);
|
|
@@ -5358,12 +5441,15 @@ var LLM = class {
|
|
|
5358
5441
|
let inputTokens = 0;
|
|
5359
5442
|
let outputTokens = 0;
|
|
5360
5443
|
for await (const chunk of result.stream) {
|
|
5361
|
-
|
|
5362
|
-
|
|
5363
|
-
|
|
5364
|
-
|
|
5365
|
-
options.partial
|
|
5444
|
+
try {
|
|
5445
|
+
const text = chunk.text();
|
|
5446
|
+
if (text) {
|
|
5447
|
+
fullText += text;
|
|
5448
|
+
if (options.partial) {
|
|
5449
|
+
options.partial(text);
|
|
5450
|
+
}
|
|
5366
5451
|
}
|
|
5452
|
+
} catch (chunkError) {
|
|
5367
5453
|
}
|
|
5368
5454
|
if (chunk.usageMetadata) {
|
|
5369
5455
|
inputTokens = chunk.usageMetadata.promptTokenCount || 0;
|
|
@@ -5390,9 +5476,6 @@ var LLM = class {
|
|
|
5390
5476
|
durationMs,
|
|
5391
5477
|
success: true
|
|
5392
5478
|
});
|
|
5393
|
-
if (json) {
|
|
5394
|
-
return this._parseJSON(fullText);
|
|
5395
|
-
}
|
|
5396
5479
|
return fullText;
|
|
5397
5480
|
} catch (error) {
|
|
5398
5481
|
const durationMs = Date.now() - startTime;
|
|
@@ -5413,6 +5496,26 @@ var LLM = class {
|
|
|
5413
5496
|
throw error;
|
|
5414
5497
|
}
|
|
5415
5498
|
}
|
|
5499
|
+
/**
|
|
5500
|
+
* Recursively strip unsupported JSON Schema properties for Gemini
|
|
5501
|
+
* Gemini doesn't support: additionalProperties, $schema, etc.
|
|
5502
|
+
*/
|
|
5503
|
+
static _cleanSchemaForGemini(obj) {
|
|
5504
|
+
if (obj === null || typeof obj !== "object") {
|
|
5505
|
+
return obj;
|
|
5506
|
+
}
|
|
5507
|
+
if (Array.isArray(obj)) {
|
|
5508
|
+
return obj.map((item) => this._cleanSchemaForGemini(item));
|
|
5509
|
+
}
|
|
5510
|
+
const cleaned = {};
|
|
5511
|
+
for (const [key, value] of Object.entries(obj)) {
|
|
5512
|
+
if (key === "additionalProperties" || key === "$schema") {
|
|
5513
|
+
continue;
|
|
5514
|
+
}
|
|
5515
|
+
cleaned[key] = this._cleanSchemaForGemini(value);
|
|
5516
|
+
}
|
|
5517
|
+
return cleaned;
|
|
5518
|
+
}
|
|
5416
5519
|
static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
|
|
5417
5520
|
const methodStartTime = Date.now();
|
|
5418
5521
|
const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
|
|
@@ -5423,7 +5526,7 @@ var LLM = class {
|
|
|
5423
5526
|
description: tool.description,
|
|
5424
5527
|
parameters: {
|
|
5425
5528
|
type: SchemaType.OBJECT,
|
|
5426
|
-
properties: tool.input_schema.properties,
|
|
5529
|
+
properties: this._cleanSchemaForGemini(tool.input_schema.properties),
|
|
5427
5530
|
required: tool.input_schema.required || []
|
|
5428
5531
|
}
|
|
5429
5532
|
}));
|
|
@@ -8365,10 +8468,10 @@ var GeminiLLM = class extends BaseLLM {
|
|
|
8365
8468
|
super(config);
|
|
8366
8469
|
}
|
|
8367
8470
|
getDefaultModel() {
|
|
8368
|
-
return "gemini/gemini-
|
|
8471
|
+
return "gemini/gemini-2.5-flash";
|
|
8369
8472
|
}
|
|
8370
8473
|
getDefaultFastModel() {
|
|
8371
|
-
return "gemini/gemini-
|
|
8474
|
+
return "gemini/gemini-2.5-flash";
|
|
8372
8475
|
}
|
|
8373
8476
|
getDefaultApiKey() {
|
|
8374
8477
|
return process.env.GEMINI_API_KEY;
|