@superatomai/sdk-node 0.0.74 → 0.0.75

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1337,6 +1337,11 @@ declare class LLM {
1337
1337
  private static _groqStream;
1338
1338
  private static _geminiText;
1339
1339
  private static _geminiStream;
1340
+ /**
1341
+ * Recursively strip unsupported JSON Schema properties for Gemini
1342
+ * Gemini doesn't support: additionalProperties, $schema, etc.
1343
+ */
1344
+ private static _cleanSchemaForGemini;
1340
1345
  private static _geminiStreamWithTools;
1341
1346
  private static _openaiText;
1342
1347
  private static _openaiStream;
package/dist/index.d.ts CHANGED
@@ -1337,6 +1337,11 @@ declare class LLM {
1337
1337
  private static _groqStream;
1338
1338
  private static _geminiText;
1339
1339
  private static _geminiStream;
1340
+ /**
1341
+ * Recursively strip unsupported JSON Schema properties for Gemini
1342
+ * Gemini doesn't support: additionalProperties, $schema, etc.
1343
+ */
1344
+ private static _cleanSchemaForGemini;
1340
1345
  private static _geminiStreamWithTools;
1341
1346
  private static _openaiText;
1342
1347
  private static _openaiStream;
package/dist/index.js CHANGED
@@ -5400,14 +5400,97 @@ var LLM = class {
5400
5400
  const genAI = new import_generative_ai.GoogleGenerativeAI(apiKey);
5401
5401
  const systemPrompt = typeof messages.sys === "string" ? messages.sys : messages.sys.map((block) => block.text).join("\n");
5402
5402
  try {
5403
+ if (json && options.partial) {
5404
+ const model2 = genAI.getGenerativeModel({
5405
+ model: modelName,
5406
+ systemInstruction: systemPrompt,
5407
+ generationConfig: {
5408
+ maxOutputTokens: options.maxTokens || 1e3,
5409
+ temperature: options.temperature,
5410
+ topP: options.topP,
5411
+ responseMimeType: "application/json"
5412
+ }
5413
+ });
5414
+ const result2 = await model2.generateContentStream(messages.user);
5415
+ let fullText2 = "";
5416
+ let inputTokens2 = 0;
5417
+ let outputTokens2 = 0;
5418
+ for await (const chunk of result2.stream) {
5419
+ try {
5420
+ const text = chunk.text();
5421
+ if (text) {
5422
+ fullText2 += text;
5423
+ options.partial(text);
5424
+ }
5425
+ } catch (chunkError) {
5426
+ }
5427
+ if (chunk.usageMetadata) {
5428
+ inputTokens2 = chunk.usageMetadata.promptTokenCount || 0;
5429
+ outputTokens2 = chunk.usageMetadata.candidatesTokenCount || 0;
5430
+ }
5431
+ }
5432
+ const durationMs2 = Date.now() - startTime;
5433
+ if (inputTokens2 === 0) {
5434
+ inputTokens2 = Math.ceil((systemPrompt.length + messages.user.length) / 4);
5435
+ }
5436
+ if (outputTokens2 === 0) {
5437
+ outputTokens2 = Math.ceil(fullText2.length / 4);
5438
+ }
5439
+ llmUsageLogger.log({
5440
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
5441
+ requestId,
5442
+ provider: "gemini",
5443
+ model: modelName,
5444
+ method: "stream-json-partial",
5445
+ inputTokens: inputTokens2,
5446
+ outputTokens: outputTokens2,
5447
+ totalTokens: inputTokens2 + outputTokens2,
5448
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens2, outputTokens2),
5449
+ durationMs: durationMs2,
5450
+ success: true
5451
+ });
5452
+ return this._parseJSON(fullText2);
5453
+ }
5454
+ if (json) {
5455
+ const model2 = genAI.getGenerativeModel({
5456
+ model: modelName,
5457
+ systemInstruction: systemPrompt,
5458
+ generationConfig: {
5459
+ maxOutputTokens: options.maxTokens || 1e3,
5460
+ temperature: options.temperature,
5461
+ topP: options.topP,
5462
+ responseMimeType: "application/json"
5463
+ }
5464
+ });
5465
+ const result2 = await model2.generateContent(messages.user);
5466
+ const response = result2.response;
5467
+ const fullText2 = response.text();
5468
+ const durationMs2 = Date.now() - startTime;
5469
+ const usage = response.usageMetadata;
5470
+ const inputTokens2 = usage?.promptTokenCount || Math.ceil((systemPrompt.length + messages.user.length) / 4);
5471
+ const outputTokens2 = usage?.candidatesTokenCount || Math.ceil(fullText2.length / 4);
5472
+ llmUsageLogger.log({
5473
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
5474
+ requestId,
5475
+ provider: "gemini",
5476
+ model: modelName,
5477
+ method: "stream-json",
5478
+ inputTokens: inputTokens2,
5479
+ outputTokens: outputTokens2,
5480
+ totalTokens: inputTokens2 + outputTokens2,
5481
+ costUSD: llmUsageLogger.calculateCost(modelName, inputTokens2, outputTokens2),
5482
+ durationMs: durationMs2,
5483
+ success: true
5484
+ });
5485
+ return this._parseJSON(fullText2);
5486
+ }
5403
5487
  const model = genAI.getGenerativeModel({
5404
5488
  model: modelName,
5405
5489
  systemInstruction: systemPrompt,
5406
5490
  generationConfig: {
5407
5491
  maxOutputTokens: options.maxTokens || 1e3,
5408
5492
  temperature: options.temperature,
5409
- topP: options.topP,
5410
- responseMimeType: json ? "application/json" : void 0
5493
+ topP: options.topP
5411
5494
  }
5412
5495
  });
5413
5496
  const result = await model.generateContentStream(messages.user);
@@ -5415,12 +5498,15 @@ var LLM = class {
5415
5498
  let inputTokens = 0;
5416
5499
  let outputTokens = 0;
5417
5500
  for await (const chunk of result.stream) {
5418
- const text = chunk.text();
5419
- if (text) {
5420
- fullText += text;
5421
- if (options.partial) {
5422
- options.partial(text);
5501
+ try {
5502
+ const text = chunk.text();
5503
+ if (text) {
5504
+ fullText += text;
5505
+ if (options.partial) {
5506
+ options.partial(text);
5507
+ }
5423
5508
  }
5509
+ } catch (chunkError) {
5424
5510
  }
5425
5511
  if (chunk.usageMetadata) {
5426
5512
  inputTokens = chunk.usageMetadata.promptTokenCount || 0;
@@ -5447,9 +5533,6 @@ var LLM = class {
5447
5533
  durationMs,
5448
5534
  success: true
5449
5535
  });
5450
- if (json) {
5451
- return this._parseJSON(fullText);
5452
- }
5453
5536
  return fullText;
5454
5537
  } catch (error) {
5455
5538
  const durationMs = Date.now() - startTime;
@@ -5470,6 +5553,26 @@ var LLM = class {
5470
5553
  throw error;
5471
5554
  }
5472
5555
  }
5556
+ /**
5557
+ * Recursively strip unsupported JSON Schema properties for Gemini
5558
+ * Gemini doesn't support: additionalProperties, $schema, etc.
5559
+ */
5560
+ static _cleanSchemaForGemini(obj) {
5561
+ if (obj === null || typeof obj !== "object") {
5562
+ return obj;
5563
+ }
5564
+ if (Array.isArray(obj)) {
5565
+ return obj.map((item) => this._cleanSchemaForGemini(item));
5566
+ }
5567
+ const cleaned = {};
5568
+ for (const [key, value] of Object.entries(obj)) {
5569
+ if (key === "additionalProperties" || key === "$schema") {
5570
+ continue;
5571
+ }
5572
+ cleaned[key] = this._cleanSchemaForGemini(value);
5573
+ }
5574
+ return cleaned;
5575
+ }
5473
5576
  static async _geminiStreamWithTools(messages, tools, toolHandler, modelName, options, maxIterations) {
5474
5577
  const methodStartTime = Date.now();
5475
5578
  const apiKey = options.apiKey || process.env.GEMINI_API_KEY || "";
@@ -5480,7 +5583,7 @@ var LLM = class {
5480
5583
  description: tool.description,
5481
5584
  parameters: {
5482
5585
  type: import_generative_ai.SchemaType.OBJECT,
5483
- properties: tool.input_schema.properties,
5586
+ properties: this._cleanSchemaForGemini(tool.input_schema.properties),
5484
5587
  required: tool.input_schema.required || []
5485
5588
  }
5486
5589
  }));
@@ -8422,10 +8525,10 @@ var GeminiLLM = class extends BaseLLM {
8422
8525
  super(config);
8423
8526
  }
8424
8527
  getDefaultModel() {
8425
- return "gemini/gemini-3-pro-preview";
8528
+ return "gemini/gemini-2.5-flash";
8426
8529
  }
8427
8530
  getDefaultFastModel() {
8428
- return "gemini/gemini-3-flash-preview";
8531
+ return "gemini/gemini-2.5-flash";
8429
8532
  }
8430
8533
  getDefaultApiKey() {
8431
8534
  return process.env.GEMINI_API_KEY;