@paean-ai/adk 0.2.24 → 0.2.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -65,11 +65,13 @@ class Gemini extends import_base_llm.BaseLlm {
65
65
  this.headers = headers;
66
66
  this.isGemini3Preview = (0, import_model_name.isGemini3PreviewModel)(model);
67
67
  const canReadEnv = typeof process === "object";
68
+ const aiStudioApiKey = canReadEnv ? process.env["AI_STUDIO_API_KEY"] : void 0;
69
+ const useAiStudioMode = !!aiStudioApiKey;
68
70
  this.apiEndpoint = apiEndpoint;
69
71
  if (!this.apiEndpoint && canReadEnv) {
70
72
  this.apiEndpoint = process.env["GEMINI_API_ENDPOINT"];
71
73
  }
72
- if (!this.apiEndpoint && this.isGemini3Preview) {
74
+ if (!this.apiEndpoint && this.isGemini3Preview && !useAiStudioMode) {
73
75
  this.apiEndpoint = GEMINI3_PREVIEW_API_ENDPOINT;
74
76
  import_logger.logger.info(`Using Gemini 3 preview endpoint: ${this.apiEndpoint}`);
75
77
  }
@@ -80,6 +82,15 @@ class Gemini extends import_base_llm.BaseLlm {
80
82
  useVertexAI = vertexAIfromEnv.toLowerCase() === "true" || vertexAIfromEnv === "1";
81
83
  }
82
84
  }
85
+ if (useAiStudioMode) {
86
+ if (useVertexAI) {
87
+ import_logger.logger.info(
88
+ "AI_STUDIO_API_KEY set \u2014 overriding Vertex AI mode to use AI Studio (generativelanguage.googleapis.com)"
89
+ );
90
+ }
91
+ useVertexAI = false;
92
+ this.apiKey = aiStudioApiKey;
93
+ }
83
94
  if (this.isGemini3Preview && useVertexAI) {
84
95
  const availableApiKey = apiKey || (canReadEnv ? process.env["GOOGLE_GENAI_API_KEY"] || process.env["GEMINI_API_KEY"] : void 0);
85
96
  if (availableApiKey) {
@@ -131,7 +142,7 @@ class Gemini extends import_base_llm.BaseLlm {
131
142
  * @yields LlmResponse: The model response.
132
143
  */
133
144
  async *generateContentAsync(llmRequest, stream = false) {
134
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
145
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
135
146
  this.preprocessRequest(llmRequest);
136
147
  this.maybeAppendUserContent(llmRequest);
137
148
  import_logger.logger.info(
@@ -154,6 +165,7 @@ class Gemini extends import_base_llm.BaseLlm {
154
165
  let text = "";
155
166
  let usageMetadata;
156
167
  let lastResponse;
168
+ let pendingFCResponse = null;
157
169
  for await (const response of streamResult) {
158
170
  lastResponse = response;
159
171
  const llmResponse = (0, import_llm_response.createLlmResponse)(response);
@@ -182,7 +194,9 @@ class Gemini extends import_base_llm.BaseLlm {
182
194
  } else {
183
195
  text += firstPart.text;
184
196
  }
185
- llmResponse.partial = true;
197
+ if (!hasFunctionCalls) {
198
+ llmResponse.partial = true;
199
+ }
186
200
  if (this.isGemini3Preview && hasFunctionCalls) {
187
201
  thoughtText = "";
188
202
  thoughtSignature = void 0;
@@ -252,18 +266,50 @@ class Gemini extends import_base_llm.BaseLlm {
252
266
  }
253
267
  }
254
268
  }
255
- const partsWithSig = llmResponse.content.parts.filter(
269
+ }
270
+ if (hasFunctionCalls) {
271
+ if (pendingFCResponse && ((_i = pendingFCResponse.content) == null ? void 0 : _i.parts)) {
272
+ const newParts = (((_j = llmResponse.content) == null ? void 0 : _j.parts) || []).filter(
273
+ (p) => p.functionCall || p.thoughtSignature
274
+ );
275
+ pendingFCResponse.content.parts.push(...newParts);
276
+ pendingFCResponse.usageMetadata = llmResponse.usageMetadata;
277
+ } else {
278
+ pendingFCResponse = llmResponse;
279
+ }
280
+ continue;
281
+ }
282
+ if (pendingFCResponse) {
283
+ if (!((_l = (_k = llmResponse.content) == null ? void 0 : _k.parts) == null ? void 0 : _l.length)) {
284
+ continue;
285
+ }
286
+ yield pendingFCResponse;
287
+ pendingFCResponse = null;
288
+ }
289
+ yield llmResponse;
290
+ }
291
+ if (pendingFCResponse) {
292
+ if (this.isGemini3Preview && ((_m = pendingFCResponse.content) == null ? void 0 : _m.parts)) {
293
+ const partsWithSig = pendingFCResponse.content.parts.filter(
256
294
  (p) => p.thoughtSignature
257
295
  ).length;
258
- if (partsWithSig === 0) {
296
+ if (partsWithSig === 0 && thoughtSignature) {
297
+ for (const part of pendingFCResponse.content.parts) {
298
+ if (part.functionCall) {
299
+ part.thoughtSignature = thoughtSignature;
300
+ break;
301
+ }
302
+ }
303
+ } else if (partsWithSig === 0) {
259
304
  import_logger.logger.warn(
260
- `[Gemini3] No thoughtSignature on function call parts \u2014 may cause 400 on next request`
305
+ `[Gemini3] No thoughtSignature on merged function call parts \u2014 may cause 400 on next request`
261
306
  );
262
307
  }
263
308
  }
264
- yield llmResponse;
309
+ yield pendingFCResponse;
310
+ pendingFCResponse = null;
265
311
  }
266
- if ((text || thoughtText) && ((_j = (_i = lastResponse == null ? void 0 : lastResponse.candidates) == null ? void 0 : _i[0]) == null ? void 0 : _j.finishReason) === import_genai.FinishReason.STOP) {
312
+ if ((text || thoughtText) && ((_o = (_n = lastResponse == null ? void 0 : lastResponse.candidates) == null ? void 0 : _n[0]) == null ? void 0 : _o.finishReason) === import_genai.FinishReason.STOP) {
267
313
  const parts = [];
268
314
  if (thoughtText) {
269
315
  const thoughtPart = { text: thoughtText, thought: true };
@@ -285,12 +331,12 @@ class Gemini extends import_base_llm.BaseLlm {
285
331
  }
286
332
  } else {
287
333
  const response = await this.apiClient.models.generateContent({
288
- model: (_k = llmRequest.model) != null ? _k : this.model,
334
+ model: (_p = llmRequest.model) != null ? _p : this.model,
289
335
  contents: llmRequest.contents,
290
336
  config: llmRequest.config
291
337
  });
292
338
  const llmResponse = (0, import_llm_response.createLlmResponse)(response);
293
- if (this.isGemini3Preview && ((_l = llmResponse.content) == null ? void 0 : _l.parts)) {
339
+ if (this.isGemini3Preview && ((_q = llmResponse.content) == null ? void 0 : _q.parts)) {
294
340
  let thoughtSig;
295
341
  let hasThoughtPartWithSignature = false;
296
342
  for (const part of llmResponse.content.parts) {
@@ -27,6 +27,7 @@ __export(llm_response_exports, {
27
27
  createLlmResponse: () => createLlmResponse
28
28
  });
29
29
  module.exports = __toCommonJS(llm_response_exports);
30
+ var import_genai = require("@google/genai");
30
31
  /**
31
32
  * @license
32
33
  * Copyright 2025 Google LLC
@@ -45,6 +46,12 @@ function createLlmResponse(response) {
45
46
  finishReason: candidate.finishReason
46
47
  };
47
48
  }
49
+ if (candidate.finishReason === import_genai.FinishReason.STOP) {
50
+ return {
51
+ usageMetadata,
52
+ finishReason: candidate.finishReason
53
+ };
54
+ }
48
55
  return {
49
56
  errorCode: candidate.finishReason,
50
57
  errorMessage: candidate.finishMessage,
@@ -89,6 +89,9 @@ function generateAuthEvent(invocationContext, functionResponseEvent) {
89
89
  longRunningToolIds.add(requestEucFunctionCall.id);
90
90
  parts.push({ functionCall: requestEucFunctionCall });
91
91
  }
92
+ if (parts.length === 0) {
93
+ return void 0;
94
+ }
92
95
  return createEvent({
93
96
  invocationId: invocationContext.invocationId,
94
97
  author: invocationContext.agent.name,
@@ -130,6 +133,9 @@ function generateRequestConfirmationEvent({
130
133
  longRunningToolIds.add(requestConfirmationFunctionCall.id);
131
134
  parts.push({ functionCall: requestConfirmationFunctionCall });
132
135
  }
136
+ if (parts.length === 0) {
137
+ return void 0;
138
+ }
133
139
  return createEvent({
134
140
  invocationId: invocationContext.invocationId,
135
141
  author: invocationContext.agent.name,
@@ -193,8 +199,9 @@ async function handleFunctionCallList({
193
199
  }
194
200
  );
195
201
  if (!toolAndContext) {
202
+ const argsPreview = functionCall.args ? JSON.stringify(functionCall.args).substring(0, 300) : "(none)";
196
203
  logger.warn(
197
- `Function "${functionCall.name}" not found in toolsDict (${Object.keys(toolsDict).length} tools registered).`
204
+ `Function "${functionCall.name}" not found in toolsDict (${Object.keys(toolsDict).length} tools registered). Args: ${argsPreview}`
198
205
  );
199
206
  const errorResponseEvent = createEvent({
200
207
  invocationId: invocationContext.invocationId,
@@ -243,6 +250,10 @@ async function handleFunctionCallList({
243
250
  toolContext
244
251
  );
245
252
  } catch (e) {
253
+ const argsPreview = JSON.stringify(functionArgs).substring(0, 500);
254
+ logger.error(
255
+ `Tool execution error: "${tool.name}" threw ${e instanceof Error ? e.message : String(e)}. Args: ${argsPreview}`
256
+ );
246
257
  if (e instanceof Error) {
247
258
  const onToolErrorResponse = await invocationContext.pluginManager.runOnToolErrorCallback(
248
259
  {
@@ -360,6 +371,7 @@ function mergeParallelFunctionResponseEvents(functionResponseEvents) {
360
371
  const actionsList = functionResponseEvents.map((event) => event.actions || {});
361
372
  const mergedActions = mergeEventActions(actionsList);
362
373
  return createEvent({
374
+ invocationId: baseEvent.invocationId,
363
375
  author: baseEvent.author,
364
376
  branch: baseEvent.branch,
365
377
  content: { role: "user", parts: mergedParts },
@@ -899,12 +899,12 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
899
899
  consecutiveErrors++;
900
900
  if (consecutiveErrors <= _LlmAgent.MAX_AGENT_LOOP_ERROR_RETRIES) {
901
901
  logger.warn(
902
- `[runAsyncImpl] Error event (${lastEvent.errorCode}), retrying agent loop (${consecutiveErrors}/${_LlmAgent.MAX_AGENT_LOOP_ERROR_RETRIES})`
902
+ `[runAsyncImpl] Error event (${lastEvent.errorCode}: ${lastEvent.errorMessage || "no message"}), retrying agent loop (${consecutiveErrors}/${_LlmAgent.MAX_AGENT_LOOP_ERROR_RETRIES})`
903
903
  );
904
904
  continue;
905
905
  }
906
906
  logger.error(
907
- `[runAsyncImpl] Max agent-loop error retries exhausted for ${lastEvent.errorCode}`
907
+ `[runAsyncImpl] Max agent-loop error retries exhausted for ${lastEvent.errorCode}: ${lastEvent.errorMessage || "no message"}`
908
908
  );
909
909
  break;
910
910
  }
@@ -993,7 +993,7 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
993
993
  }
994
994
  }
995
995
  async *postprocess(invocationContext, llmRequest, llmResponse, modelResponseEvent) {
996
- var _a, _b;
996
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
997
997
  for (const processor of this.responseProcessors) {
998
998
  for await (const event of processor.runAsync(invocationContext, llmResponse)) {
999
999
  yield event;
@@ -1002,6 +1002,28 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
1002
1002
  if (!llmResponse.content && !llmResponse.errorCode && !llmResponse.interrupted) {
1003
1003
  return;
1004
1004
  }
1005
+ if (llmResponse.content && !llmResponse.errorCode) {
1006
+ if (!llmResponse.content.parts || llmResponse.content.parts.length === 0) {
1007
+ logger.debug(
1008
+ `[postprocess] Skipping LLM response with no parts (role=${llmResponse.content.role})`
1009
+ );
1010
+ return;
1011
+ }
1012
+ const allEmpty = llmResponse.content.parts.every(
1013
+ (p) => {
1014
+ if (p.functionCall || p.functionResponse || p.executableCode || p.codeExecutionResult) return false;
1015
+ if (p.inlineData || p.fileData) return false;
1016
+ if ("text" in p && typeof p.text === "string" && p.text.length > 0) return false;
1017
+ return true;
1018
+ }
1019
+ );
1020
+ if (allEmpty) {
1021
+ logger.debug(
1022
+ `[postprocess] Skipping empty-content LLM response (${llmResponse.content.parts.length} empty parts)`
1023
+ );
1024
+ return;
1025
+ }
1026
+ }
1005
1027
  const mergedEvent = createEvent({
1006
1028
  ...modelResponseEvent,
1007
1029
  ...llmResponse
@@ -1023,8 +1045,11 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
1023
1045
  }
1024
1046
  }
1025
1047
  }
1048
+ logger.debug(
1049
+ `[postprocess] Yielding mergedEvent: role=${(_b = mergedEvent.content) == null ? void 0 : _b.role}, parts=${(_d = (_c = mergedEvent.content) == null ? void 0 : _c.parts) == null ? void 0 : _d.length}, hasFCs=${((_e = getFunctionCalls(mergedEvent)) == null ? void 0 : _e.length) || 0}, partial=${mergedEvent.partial}`
1050
+ );
1026
1051
  yield mergedEvent;
1027
- if (!((_b = getFunctionCalls(mergedEvent)) == null ? void 0 : _b.length)) {
1052
+ if (!((_f = getFunctionCalls(mergedEvent)) == null ? void 0 : _f.length)) {
1028
1053
  return;
1029
1054
  }
1030
1055
  const functionResponseEvent = await handleFunctionCallsAsync({
@@ -1049,6 +1074,9 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
1049
1074
  if (toolConfirmationEvent) {
1050
1075
  yield toolConfirmationEvent;
1051
1076
  }
1077
+ logger.debug(
1078
+ `[postprocess] Yielding functionResponseEvent: role=${(_g = functionResponseEvent.content) == null ? void 0 : _g.role}, parts=${(_i = (_h = functionResponseEvent.content) == null ? void 0 : _h.parts) == null ? void 0 : _i.length}`
1079
+ );
1052
1080
  yield functionResponseEvent;
1053
1081
  const nextAgentName = functionResponseEvent.actions.transferToAgent;
1054
1082
  if (nextAgentName) {
@@ -1130,7 +1158,7 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
1130
1158
  if (llmResponse.errorCode && _LlmAgent.LLM_RETRYABLE_ERROR_CODES.has(llmResponse.errorCode) && !contentYielded && attempt < maxRetries) {
1131
1159
  shouldRetry = true;
1132
1160
  logger.warn(
1133
- `[callLlmAsync] Transient LLM error: ${llmResponse.errorCode}, usage: ${JSON.stringify(llmResponse.usageMetadata)}`
1161
+ `[callLlmAsync] Transient LLM error: ${llmResponse.errorCode}${llmResponse.errorMessage ? ": " + llmResponse.errorMessage : ""}, finishReason: ${llmResponse.finishReason || "none"}, usage: ${JSON.stringify(llmResponse.usageMetadata)}`
1134
1162
  );
1135
1163
  break;
1136
1164
  }