@paean-ai/adk 0.2.24 → 0.2.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -131,7 +131,7 @@ class Gemini extends import_base_llm.BaseLlm {
131
131
  * @yields LlmResponse: The model response.
132
132
  */
133
133
  async *generateContentAsync(llmRequest, stream = false) {
134
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
134
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
135
135
  this.preprocessRequest(llmRequest);
136
136
  this.maybeAppendUserContent(llmRequest);
137
137
  import_logger.logger.info(
@@ -154,6 +154,7 @@ class Gemini extends import_base_llm.BaseLlm {
154
154
  let text = "";
155
155
  let usageMetadata;
156
156
  let lastResponse;
157
+ let pendingFCResponse = null;
157
158
  for await (const response of streamResult) {
158
159
  lastResponse = response;
159
160
  const llmResponse = (0, import_llm_response.createLlmResponse)(response);
@@ -182,7 +183,9 @@ class Gemini extends import_base_llm.BaseLlm {
182
183
  } else {
183
184
  text += firstPart.text;
184
185
  }
185
- llmResponse.partial = true;
186
+ if (!hasFunctionCalls) {
187
+ llmResponse.partial = true;
188
+ }
186
189
  if (this.isGemini3Preview && hasFunctionCalls) {
187
190
  thoughtText = "";
188
191
  thoughtSignature = void 0;
@@ -252,18 +255,43 @@ class Gemini extends import_base_llm.BaseLlm {
252
255
  }
253
256
  }
254
257
  }
255
- const partsWithSig = llmResponse.content.parts.filter(
258
+ }
259
+ if (hasFunctionCalls) {
260
+ if (pendingFCResponse && ((_i = pendingFCResponse.content) == null ? void 0 : _i.parts)) {
261
+ const newFCParts = (((_j = llmResponse.content) == null ? void 0 : _j.parts) || []).filter(
262
+ (p) => p.functionCall
263
+ );
264
+ pendingFCResponse.content.parts.push(...newFCParts);
265
+ pendingFCResponse.usageMetadata = llmResponse.usageMetadata;
266
+ } else {
267
+ pendingFCResponse = llmResponse;
268
+ }
269
+ continue;
270
+ }
271
+ if (pendingFCResponse) {
272
+ if (!((_l = (_k = llmResponse.content) == null ? void 0 : _k.parts) == null ? void 0 : _l.length)) {
273
+ continue;
274
+ }
275
+ yield pendingFCResponse;
276
+ pendingFCResponse = null;
277
+ }
278
+ yield llmResponse;
279
+ }
280
+ if (pendingFCResponse) {
281
+ if (this.isGemini3Preview && ((_m = pendingFCResponse.content) == null ? void 0 : _m.parts)) {
282
+ const partsWithSig = pendingFCResponse.content.parts.filter(
256
283
  (p) => p.thoughtSignature
257
284
  ).length;
258
285
  if (partsWithSig === 0) {
259
286
  import_logger.logger.warn(
260
- `[Gemini3] No thoughtSignature on function call parts \u2014 may cause 400 on next request`
287
+ `[Gemini3] No thoughtSignature on merged function call parts \u2014 may cause 400 on next request`
261
288
  );
262
289
  }
263
290
  }
264
- yield llmResponse;
291
+ yield pendingFCResponse;
292
+ pendingFCResponse = null;
265
293
  }
266
- if ((text || thoughtText) && ((_j = (_i = lastResponse == null ? void 0 : lastResponse.candidates) == null ? void 0 : _i[0]) == null ? void 0 : _j.finishReason) === import_genai.FinishReason.STOP) {
294
+ if ((text || thoughtText) && ((_o = (_n = lastResponse == null ? void 0 : lastResponse.candidates) == null ? void 0 : _n[0]) == null ? void 0 : _o.finishReason) === import_genai.FinishReason.STOP) {
267
295
  const parts = [];
268
296
  if (thoughtText) {
269
297
  const thoughtPart = { text: thoughtText, thought: true };
@@ -285,12 +313,12 @@ class Gemini extends import_base_llm.BaseLlm {
285
313
  }
286
314
  } else {
287
315
  const response = await this.apiClient.models.generateContent({
288
- model: (_k = llmRequest.model) != null ? _k : this.model,
316
+ model: (_p = llmRequest.model) != null ? _p : this.model,
289
317
  contents: llmRequest.contents,
290
318
  config: llmRequest.config
291
319
  });
292
320
  const llmResponse = (0, import_llm_response.createLlmResponse)(response);
293
- if (this.isGemini3Preview && ((_l = llmResponse.content) == null ? void 0 : _l.parts)) {
321
+ if (this.isGemini3Preview && ((_q = llmResponse.content) == null ? void 0 : _q.parts)) {
294
322
  let thoughtSig;
295
323
  let hasThoughtPartWithSignature = false;
296
324
  for (const part of llmResponse.content.parts) {
@@ -89,6 +89,9 @@ function generateAuthEvent(invocationContext, functionResponseEvent) {
89
89
  longRunningToolIds.add(requestEucFunctionCall.id);
90
90
  parts.push({ functionCall: requestEucFunctionCall });
91
91
  }
92
+ if (parts.length === 0) {
93
+ return void 0;
94
+ }
92
95
  return createEvent({
93
96
  invocationId: invocationContext.invocationId,
94
97
  author: invocationContext.agent.name,
@@ -130,6 +133,9 @@ function generateRequestConfirmationEvent({
130
133
  longRunningToolIds.add(requestConfirmationFunctionCall.id);
131
134
  parts.push({ functionCall: requestConfirmationFunctionCall });
132
135
  }
136
+ if (parts.length === 0) {
137
+ return void 0;
138
+ }
133
139
  return createEvent({
134
140
  invocationId: invocationContext.invocationId,
135
141
  author: invocationContext.agent.name,
@@ -193,8 +199,9 @@ async function handleFunctionCallList({
193
199
  }
194
200
  );
195
201
  if (!toolAndContext) {
202
+ const argsPreview = functionCall.args ? JSON.stringify(functionCall.args).substring(0, 300) : "(none)";
196
203
  logger.warn(
197
- `Function "${functionCall.name}" not found in toolsDict (${Object.keys(toolsDict).length} tools registered).`
204
+ `Function "${functionCall.name}" not found in toolsDict (${Object.keys(toolsDict).length} tools registered). Args: ${argsPreview}`
198
205
  );
199
206
  const errorResponseEvent = createEvent({
200
207
  invocationId: invocationContext.invocationId,
@@ -243,6 +250,10 @@ async function handleFunctionCallList({
243
250
  toolContext
244
251
  );
245
252
  } catch (e) {
253
+ const argsPreview = JSON.stringify(functionArgs).substring(0, 500);
254
+ logger.error(
255
+ `Tool execution error: "${tool.name}" threw ${e instanceof Error ? e.message : String(e)}. Args: ${argsPreview}`
256
+ );
246
257
  if (e instanceof Error) {
247
258
  const onToolErrorResponse = await invocationContext.pluginManager.runOnToolErrorCallback(
248
259
  {
@@ -899,12 +899,12 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
899
899
  consecutiveErrors++;
900
900
  if (consecutiveErrors <= _LlmAgent.MAX_AGENT_LOOP_ERROR_RETRIES) {
901
901
  logger.warn(
902
- `[runAsyncImpl] Error event (${lastEvent.errorCode}), retrying agent loop (${consecutiveErrors}/${_LlmAgent.MAX_AGENT_LOOP_ERROR_RETRIES})`
902
+ `[runAsyncImpl] Error event (${lastEvent.errorCode}: ${lastEvent.errorMessage || "no message"}), retrying agent loop (${consecutiveErrors}/${_LlmAgent.MAX_AGENT_LOOP_ERROR_RETRIES})`
903
903
  );
904
904
  continue;
905
905
  }
906
906
  logger.error(
907
- `[runAsyncImpl] Max agent-loop error retries exhausted for ${lastEvent.errorCode}`
907
+ `[runAsyncImpl] Max agent-loop error retries exhausted for ${lastEvent.errorCode}: ${lastEvent.errorMessage || "no message"}`
908
908
  );
909
909
  break;
910
910
  }
@@ -993,7 +993,7 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
993
993
  }
994
994
  }
995
995
  async *postprocess(invocationContext, llmRequest, llmResponse, modelResponseEvent) {
996
- var _a, _b;
996
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
997
997
  for (const processor of this.responseProcessors) {
998
998
  for await (const event of processor.runAsync(invocationContext, llmResponse)) {
999
999
  yield event;
@@ -1002,6 +1002,27 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
1002
1002
  if (!llmResponse.content && !llmResponse.errorCode && !llmResponse.interrupted) {
1003
1003
  return;
1004
1004
  }
1005
+ if (llmResponse.content && !llmResponse.errorCode) {
1006
+ if (!llmResponse.content.parts || llmResponse.content.parts.length === 0) {
1007
+ logger.debug(
1008
+ `[postprocess] Skipping LLM response with no parts (role=${llmResponse.content.role})`
1009
+ );
1010
+ return;
1011
+ }
1012
+ const allEmpty = llmResponse.content.parts.every(
1013
+ (p) => {
1014
+ if (p.functionCall || p.functionResponse || p.executableCode || p.codeExecutionResult) return false;
1015
+ if ("text" in p && typeof p.text === "string" && p.text.length > 0) return false;
1016
+ return true;
1017
+ }
1018
+ );
1019
+ if (allEmpty) {
1020
+ logger.debug(
1021
+ `[postprocess] Skipping empty-content LLM response (${llmResponse.content.parts.length} empty parts)`
1022
+ );
1023
+ return;
1024
+ }
1025
+ }
1005
1026
  const mergedEvent = createEvent({
1006
1027
  ...modelResponseEvent,
1007
1028
  ...llmResponse
@@ -1023,8 +1044,11 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
1023
1044
  }
1024
1045
  }
1025
1046
  }
1047
+ logger.debug(
1048
+ `[postprocess] Yielding mergedEvent: role=${(_b = mergedEvent.content) == null ? void 0 : _b.role}, parts=${(_d = (_c = mergedEvent.content) == null ? void 0 : _c.parts) == null ? void 0 : _d.length}, hasFCs=${((_e = getFunctionCalls(mergedEvent)) == null ? void 0 : _e.length) || 0}, partial=${mergedEvent.partial}`
1049
+ );
1026
1050
  yield mergedEvent;
1027
- if (!((_b = getFunctionCalls(mergedEvent)) == null ? void 0 : _b.length)) {
1051
+ if (!((_f = getFunctionCalls(mergedEvent)) == null ? void 0 : _f.length)) {
1028
1052
  return;
1029
1053
  }
1030
1054
  const functionResponseEvent = await handleFunctionCallsAsync({
@@ -1049,6 +1073,9 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
1049
1073
  if (toolConfirmationEvent) {
1050
1074
  yield toolConfirmationEvent;
1051
1075
  }
1076
+ logger.debug(
1077
+ `[postprocess] Yielding functionResponseEvent: role=${(_g = functionResponseEvent.content) == null ? void 0 : _g.role}, parts=${(_i = (_h = functionResponseEvent.content) == null ? void 0 : _h.parts) == null ? void 0 : _i.length}`
1078
+ );
1052
1079
  yield functionResponseEvent;
1053
1080
  const nextAgentName = functionResponseEvent.actions.transferToAgent;
1054
1081
  if (nextAgentName) {
@@ -1130,7 +1157,7 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
1130
1157
  if (llmResponse.errorCode && _LlmAgent.LLM_RETRYABLE_ERROR_CODES.has(llmResponse.errorCode) && !contentYielded && attempt < maxRetries) {
1131
1158
  shouldRetry = true;
1132
1159
  logger.warn(
1133
- `[callLlmAsync] Transient LLM error: ${llmResponse.errorCode}, usage: ${JSON.stringify(llmResponse.usageMetadata)}`
1160
+ `[callLlmAsync] Transient LLM error: ${llmResponse.errorCode}${llmResponse.errorMessage ? ": " + llmResponse.errorMessage : ""}, finishReason: ${llmResponse.finishReason || "none"}, usage: ${JSON.stringify(llmResponse.usageMetadata)}`
1134
1161
  );
1135
1162
  break;
1136
1163
  }
@@ -5,7 +5,7 @@
5
5
  */
6
6
  import { createEventActions } from "./event_actions.js";
7
7
  function createEvent(params = {}) {
8
- return {
8
+ const event = {
9
9
  ...params,
10
10
  id: params.id || createNewEventId(),
11
11
  invocationId: params.invocationId || "",
@@ -15,6 +15,7 @@ function createEvent(params = {}) {
15
15
  branch: params.branch,
16
16
  timestamp: params.timestamp || Date.now()
17
17
  };
18
+ return event;
18
19
  }
19
20
  function isFinalResponse(event) {
20
21
  if (event.actions.skipSummarization || event.longRunningToolIds && event.longRunningToolIds.length > 0) {