@paean-ai/adk 0.2.24 → 0.2.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -73,11 +73,13 @@ class Gemini extends BaseLlm {
73
73
  this.headers = headers;
74
74
  this.isGemini3Preview = isGemini3PreviewModel(model);
75
75
  const canReadEnv = typeof process === "object";
76
+ const aiStudioApiKey = canReadEnv ? process.env["AI_STUDIO_API_KEY"] : void 0;
77
+ const useAiStudioMode = !!aiStudioApiKey;
76
78
  this.apiEndpoint = apiEndpoint;
77
79
  if (!this.apiEndpoint && canReadEnv) {
78
80
  this.apiEndpoint = process.env["GEMINI_API_ENDPOINT"];
79
81
  }
80
- if (!this.apiEndpoint && this.isGemini3Preview) {
82
+ if (!this.apiEndpoint && this.isGemini3Preview && !useAiStudioMode) {
81
83
  this.apiEndpoint = GEMINI3_PREVIEW_API_ENDPOINT;
82
84
  logger.info("Using Gemini 3 preview endpoint: ".concat(this.apiEndpoint));
83
85
  }
@@ -88,6 +90,15 @@ class Gemini extends BaseLlm {
88
90
  useVertexAI = vertexAIfromEnv.toLowerCase() === "true" || vertexAIfromEnv === "1";
89
91
  }
90
92
  }
93
+ if (useAiStudioMode) {
94
+ if (useVertexAI) {
95
+ logger.info(
96
+ "AI_STUDIO_API_KEY set \u2014 overriding Vertex AI mode to use AI Studio (generativelanguage.googleapis.com)"
97
+ );
98
+ }
99
+ useVertexAI = false;
100
+ this.apiKey = aiStudioApiKey;
101
+ }
91
102
  if (this.isGemini3Preview && useVertexAI) {
92
103
  const availableApiKey = apiKey || (canReadEnv ? process.env["GOOGLE_GENAI_API_KEY"] || process.env["GEMINI_API_KEY"] : void 0);
93
104
  if (availableApiKey) {
@@ -140,7 +151,7 @@ class Gemini extends BaseLlm {
140
151
  */
141
152
  generateContentAsync(llmRequest, stream = false) {
142
153
  return __asyncGenerator(this, null, function* () {
143
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
154
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
144
155
  this.preprocessRequest(llmRequest);
145
156
  this.maybeAppendUserContent(llmRequest);
146
157
  logger.info(
@@ -160,6 +171,7 @@ class Gemini extends BaseLlm {
160
171
  let text = "";
161
172
  let usageMetadata;
162
173
  let lastResponse;
174
+ let pendingFCResponse = null;
163
175
  try {
164
176
  for (var iter = __forAwait(streamResult), more, temp, error; more = !(temp = yield new __await(iter.next())).done; more = false) {
165
177
  const response = temp.value;
@@ -190,7 +202,9 @@ class Gemini extends BaseLlm {
190
202
  } else {
191
203
  text += firstPart.text;
192
204
  }
193
- llmResponse.partial = true;
205
+ if (!hasFunctionCalls) {
206
+ llmResponse.partial = true;
207
+ }
194
208
  if (this.isGemini3Preview && hasFunctionCalls) {
195
209
  thoughtText = "";
196
210
  thoughtSignature = void 0;
@@ -260,14 +274,25 @@ class Gemini extends BaseLlm {
260
274
  }
261
275
  }
262
276
  }
263
- const partsWithSig = llmResponse.content.parts.filter(
264
- (p) => p.thoughtSignature
265
- ).length;
266
- if (partsWithSig === 0) {
267
- logger.warn(
268
- "[Gemini3] No thoughtSignature on function call parts \u2014 may cause 400 on next request"
277
+ }
278
+ if (hasFunctionCalls) {
279
+ if (pendingFCResponse && ((_i = pendingFCResponse.content) == null ? void 0 : _i.parts)) {
280
+ const newParts = (((_j = llmResponse.content) == null ? void 0 : _j.parts) || []).filter(
281
+ (p) => p.functionCall || p.thoughtSignature
269
282
  );
283
+ pendingFCResponse.content.parts.push(...newParts);
284
+ pendingFCResponse.usageMetadata = llmResponse.usageMetadata;
285
+ } else {
286
+ pendingFCResponse = llmResponse;
287
+ }
288
+ continue;
289
+ }
290
+ if (pendingFCResponse) {
291
+ if (!((_l = (_k = llmResponse.content) == null ? void 0 : _k.parts) == null ? void 0 : _l.length)) {
292
+ continue;
270
293
  }
294
+ yield pendingFCResponse;
295
+ pendingFCResponse = null;
271
296
  }
272
297
  yield llmResponse;
273
298
  }
@@ -281,7 +306,28 @@ class Gemini extends BaseLlm {
281
306
  throw error[0];
282
307
  }
283
308
  }
284
- if ((text || thoughtText) && ((_j = (_i = lastResponse == null ? void 0 : lastResponse.candidates) == null ? void 0 : _i[0]) == null ? void 0 : _j.finishReason) === FinishReason.STOP) {
309
+ if (pendingFCResponse) {
310
+ if (this.isGemini3Preview && ((_m = pendingFCResponse.content) == null ? void 0 : _m.parts)) {
311
+ const partsWithSig = pendingFCResponse.content.parts.filter(
312
+ (p) => p.thoughtSignature
313
+ ).length;
314
+ if (partsWithSig === 0 && thoughtSignature) {
315
+ for (const part of pendingFCResponse.content.parts) {
316
+ if (part.functionCall) {
317
+ part.thoughtSignature = thoughtSignature;
318
+ break;
319
+ }
320
+ }
321
+ } else if (partsWithSig === 0) {
322
+ logger.warn(
323
+ "[Gemini3] No thoughtSignature on merged function call parts \u2014 may cause 400 on next request"
324
+ );
325
+ }
326
+ }
327
+ yield pendingFCResponse;
328
+ pendingFCResponse = null;
329
+ }
330
+ if ((text || thoughtText) && ((_o = (_n = lastResponse == null ? void 0 : lastResponse.candidates) == null ? void 0 : _n[0]) == null ? void 0 : _o.finishReason) === FinishReason.STOP) {
285
331
  const parts = [];
286
332
  if (thoughtText) {
287
333
  const thoughtPart = { text: thoughtText, thought: true };
@@ -303,12 +349,12 @@ class Gemini extends BaseLlm {
303
349
  }
304
350
  } else {
305
351
  const response = yield new __await(this.apiClient.models.generateContent({
306
- model: (_k = llmRequest.model) != null ? _k : this.model,
352
+ model: (_p = llmRequest.model) != null ? _p : this.model,
307
353
  contents: llmRequest.contents,
308
354
  config: llmRequest.config
309
355
  }));
310
356
  const llmResponse = createLlmResponse(response);
311
- if (this.isGemini3Preview && ((_l = llmResponse.content) == null ? void 0 : _l.parts)) {
357
+ if (this.isGemini3Preview && ((_q = llmResponse.content) == null ? void 0 : _q.parts)) {
312
358
  let thoughtSig;
313
359
  let hasThoughtPartWithSignature = false;
314
360
  for (const part of llmResponse.content.parts) {
@@ -3,6 +3,7 @@
3
3
  * Copyright 2025 Google LLC
4
4
  * SPDX-License-Identifier: Apache-2.0
5
5
  */
6
+ import { FinishReason } from "@google/genai";
6
7
  function createLlmResponse(response) {
7
8
  var _a;
8
9
  const usageMetadata = response.usageMetadata;
@@ -16,6 +17,12 @@ function createLlmResponse(response) {
16
17
  finishReason: candidate.finishReason
17
18
  };
18
19
  }
20
+ if (candidate.finishReason === FinishReason.STOP) {
21
+ return {
22
+ usageMetadata,
23
+ finishReason: candidate.finishReason
24
+ };
25
+ }
19
26
  return {
20
27
  errorCode: candidate.finishReason,
21
28
  errorMessage: candidate.finishMessage,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@paean-ai/adk",
3
- "version": "0.2.24",
3
+ "version": "0.2.26",
4
4
  "description": "Google ADK JS (paean-ai fork with streaming fixes)",
5
5
  "author": "paean-ai",
6
6
  "license": "Apache-2.0",