@paean-ai/adk 0.2.24 → 0.2.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/agents/functions.js +13 -1
- package/dist/cjs/agents/llm_agent.js +33 -5
- package/dist/cjs/index.js +12 -12
- package/dist/cjs/index.js.map +3 -3
- package/dist/cjs/models/google_llm.js +56 -10
- package/dist/cjs/models/llm_response.js +7 -0
- package/dist/esm/agents/functions.js +13 -1
- package/dist/esm/agents/llm_agent.js +33 -5
- package/dist/esm/index.js +12 -12
- package/dist/esm/index.js.map +3 -3
- package/dist/esm/models/google_llm.js +56 -10
- package/dist/esm/models/llm_response.js +7 -0
- package/dist/types/models/google_llm.d.ts +4 -0
- package/dist/web/agents/functions.js +13 -1
- package/dist/web/agents/llm_agent.js +33 -5
- package/dist/web/index.js +1 -1
- package/dist/web/index.js.map +3 -3
- package/dist/web/models/google_llm.js +58 -12
- package/dist/web/models/llm_response.js +7 -0
- package/package.json +1 -1
|
@@ -40,11 +40,13 @@ class Gemini extends BaseLlm {
|
|
|
40
40
|
this.headers = headers;
|
|
41
41
|
this.isGemini3Preview = isGemini3PreviewModel(model);
|
|
42
42
|
const canReadEnv = typeof process === "object";
|
|
43
|
+
const aiStudioApiKey = canReadEnv ? process.env["AI_STUDIO_API_KEY"] : void 0;
|
|
44
|
+
const useAiStudioMode = !!aiStudioApiKey;
|
|
43
45
|
this.apiEndpoint = apiEndpoint;
|
|
44
46
|
if (!this.apiEndpoint && canReadEnv) {
|
|
45
47
|
this.apiEndpoint = process.env["GEMINI_API_ENDPOINT"];
|
|
46
48
|
}
|
|
47
|
-
if (!this.apiEndpoint && this.isGemini3Preview) {
|
|
49
|
+
if (!this.apiEndpoint && this.isGemini3Preview && !useAiStudioMode) {
|
|
48
50
|
this.apiEndpoint = GEMINI3_PREVIEW_API_ENDPOINT;
|
|
49
51
|
logger.info(`Using Gemini 3 preview endpoint: ${this.apiEndpoint}`);
|
|
50
52
|
}
|
|
@@ -55,6 +57,15 @@ class Gemini extends BaseLlm {
|
|
|
55
57
|
useVertexAI = vertexAIfromEnv.toLowerCase() === "true" || vertexAIfromEnv === "1";
|
|
56
58
|
}
|
|
57
59
|
}
|
|
60
|
+
if (useAiStudioMode) {
|
|
61
|
+
if (useVertexAI) {
|
|
62
|
+
logger.info(
|
|
63
|
+
"AI_STUDIO_API_KEY set \u2014 overriding Vertex AI mode to use AI Studio (generativelanguage.googleapis.com)"
|
|
64
|
+
);
|
|
65
|
+
}
|
|
66
|
+
useVertexAI = false;
|
|
67
|
+
this.apiKey = aiStudioApiKey;
|
|
68
|
+
}
|
|
58
69
|
if (this.isGemini3Preview && useVertexAI) {
|
|
59
70
|
const availableApiKey = apiKey || (canReadEnv ? process.env["GOOGLE_GENAI_API_KEY"] || process.env["GEMINI_API_KEY"] : void 0);
|
|
60
71
|
if (availableApiKey) {
|
|
@@ -106,7 +117,7 @@ class Gemini extends BaseLlm {
|
|
|
106
117
|
* @yields LlmResponse: The model response.
|
|
107
118
|
*/
|
|
108
119
|
async *generateContentAsync(llmRequest, stream = false) {
|
|
109
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
120
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
|
|
110
121
|
this.preprocessRequest(llmRequest);
|
|
111
122
|
this.maybeAppendUserContent(llmRequest);
|
|
112
123
|
logger.info(
|
|
@@ -129,6 +140,7 @@ class Gemini extends BaseLlm {
|
|
|
129
140
|
let text = "";
|
|
130
141
|
let usageMetadata;
|
|
131
142
|
let lastResponse;
|
|
143
|
+
let pendingFCResponse = null;
|
|
132
144
|
for await (const response of streamResult) {
|
|
133
145
|
lastResponse = response;
|
|
134
146
|
const llmResponse = createLlmResponse(response);
|
|
@@ -157,7 +169,9 @@ class Gemini extends BaseLlm {
|
|
|
157
169
|
} else {
|
|
158
170
|
text += firstPart.text;
|
|
159
171
|
}
|
|
160
|
-
|
|
172
|
+
if (!hasFunctionCalls) {
|
|
173
|
+
llmResponse.partial = true;
|
|
174
|
+
}
|
|
161
175
|
if (this.isGemini3Preview && hasFunctionCalls) {
|
|
162
176
|
thoughtText = "";
|
|
163
177
|
thoughtSignature = void 0;
|
|
@@ -227,18 +241,50 @@ class Gemini extends BaseLlm {
|
|
|
227
241
|
}
|
|
228
242
|
}
|
|
229
243
|
}
|
|
230
|
-
|
|
244
|
+
}
|
|
245
|
+
if (hasFunctionCalls) {
|
|
246
|
+
if (pendingFCResponse && ((_i = pendingFCResponse.content) == null ? void 0 : _i.parts)) {
|
|
247
|
+
const newParts = (((_j = llmResponse.content) == null ? void 0 : _j.parts) || []).filter(
|
|
248
|
+
(p) => p.functionCall || p.thoughtSignature
|
|
249
|
+
);
|
|
250
|
+
pendingFCResponse.content.parts.push(...newParts);
|
|
251
|
+
pendingFCResponse.usageMetadata = llmResponse.usageMetadata;
|
|
252
|
+
} else {
|
|
253
|
+
pendingFCResponse = llmResponse;
|
|
254
|
+
}
|
|
255
|
+
continue;
|
|
256
|
+
}
|
|
257
|
+
if (pendingFCResponse) {
|
|
258
|
+
if (!((_l = (_k = llmResponse.content) == null ? void 0 : _k.parts) == null ? void 0 : _l.length)) {
|
|
259
|
+
continue;
|
|
260
|
+
}
|
|
261
|
+
yield pendingFCResponse;
|
|
262
|
+
pendingFCResponse = null;
|
|
263
|
+
}
|
|
264
|
+
yield llmResponse;
|
|
265
|
+
}
|
|
266
|
+
if (pendingFCResponse) {
|
|
267
|
+
if (this.isGemini3Preview && ((_m = pendingFCResponse.content) == null ? void 0 : _m.parts)) {
|
|
268
|
+
const partsWithSig = pendingFCResponse.content.parts.filter(
|
|
231
269
|
(p) => p.thoughtSignature
|
|
232
270
|
).length;
|
|
233
|
-
if (partsWithSig === 0) {
|
|
271
|
+
if (partsWithSig === 0 && thoughtSignature) {
|
|
272
|
+
for (const part of pendingFCResponse.content.parts) {
|
|
273
|
+
if (part.functionCall) {
|
|
274
|
+
part.thoughtSignature = thoughtSignature;
|
|
275
|
+
break;
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
} else if (partsWithSig === 0) {
|
|
234
279
|
logger.warn(
|
|
235
|
-
`[Gemini3] No thoughtSignature on function call parts \u2014 may cause 400 on next request`
|
|
280
|
+
`[Gemini3] No thoughtSignature on merged function call parts \u2014 may cause 400 on next request`
|
|
236
281
|
);
|
|
237
282
|
}
|
|
238
283
|
}
|
|
239
|
-
yield
|
|
284
|
+
yield pendingFCResponse;
|
|
285
|
+
pendingFCResponse = null;
|
|
240
286
|
}
|
|
241
|
-
if ((text || thoughtText) && ((
|
|
287
|
+
if ((text || thoughtText) && ((_o = (_n = lastResponse == null ? void 0 : lastResponse.candidates) == null ? void 0 : _n[0]) == null ? void 0 : _o.finishReason) === FinishReason.STOP) {
|
|
242
288
|
const parts = [];
|
|
243
289
|
if (thoughtText) {
|
|
244
290
|
const thoughtPart = { text: thoughtText, thought: true };
|
|
@@ -260,12 +306,12 @@ class Gemini extends BaseLlm {
|
|
|
260
306
|
}
|
|
261
307
|
} else {
|
|
262
308
|
const response = await this.apiClient.models.generateContent({
|
|
263
|
-
model: (
|
|
309
|
+
model: (_p = llmRequest.model) != null ? _p : this.model,
|
|
264
310
|
contents: llmRequest.contents,
|
|
265
311
|
config: llmRequest.config
|
|
266
312
|
});
|
|
267
313
|
const llmResponse = createLlmResponse(response);
|
|
268
|
-
if (this.isGemini3Preview && ((
|
|
314
|
+
if (this.isGemini3Preview && ((_q = llmResponse.content) == null ? void 0 : _q.parts)) {
|
|
269
315
|
let thoughtSig;
|
|
270
316
|
let hasThoughtPartWithSignature = false;
|
|
271
317
|
for (const part of llmResponse.content.parts) {
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
* Copyright 2025 Google LLC
|
|
4
4
|
* SPDX-License-Identifier: Apache-2.0
|
|
5
5
|
*/
|
|
6
|
+
import { FinishReason } from "@google/genai";
|
|
6
7
|
function createLlmResponse(response) {
|
|
7
8
|
var _a;
|
|
8
9
|
const usageMetadata = response.usageMetadata;
|
|
@@ -16,6 +17,12 @@ function createLlmResponse(response) {
|
|
|
16
17
|
finishReason: candidate.finishReason
|
|
17
18
|
};
|
|
18
19
|
}
|
|
20
|
+
if (candidate.finishReason === FinishReason.STOP) {
|
|
21
|
+
return {
|
|
22
|
+
usageMetadata,
|
|
23
|
+
finishReason: candidate.finishReason
|
|
24
|
+
};
|
|
25
|
+
}
|
|
19
26
|
return {
|
|
20
27
|
errorCode: candidate.finishReason,
|
|
21
28
|
errorMessage: candidate.finishMessage,
|
|
@@ -20,6 +20,10 @@ export interface GeminiParams {
|
|
|
20
20
|
/**
|
|
21
21
|
* The API key to use for the Gemini API. If not provided, it will look for
|
|
22
22
|
* the GOOGLE_GENAI_API_KEY or GEMINI_API_KEY environment variable.
|
|
23
|
+
*
|
|
24
|
+
* Alternatively, set AI_STUDIO_API_KEY env var to force all models
|
|
25
|
+
* (including Gemini 3 preview) through the standard AI Studio endpoint
|
|
26
|
+
* (generativelanguage.googleapis.com) instead of aiplatform.googleapis.com.
|
|
23
27
|
*/
|
|
24
28
|
apiKey?: string;
|
|
25
29
|
/**
|
|
@@ -89,6 +89,9 @@ function generateAuthEvent(invocationContext, functionResponseEvent) {
|
|
|
89
89
|
longRunningToolIds.add(requestEucFunctionCall.id);
|
|
90
90
|
parts.push({ functionCall: requestEucFunctionCall });
|
|
91
91
|
}
|
|
92
|
+
if (parts.length === 0) {
|
|
93
|
+
return void 0;
|
|
94
|
+
}
|
|
92
95
|
return createEvent({
|
|
93
96
|
invocationId: invocationContext.invocationId,
|
|
94
97
|
author: invocationContext.agent.name,
|
|
@@ -130,6 +133,9 @@ function generateRequestConfirmationEvent({
|
|
|
130
133
|
longRunningToolIds.add(requestConfirmationFunctionCall.id);
|
|
131
134
|
parts.push({ functionCall: requestConfirmationFunctionCall });
|
|
132
135
|
}
|
|
136
|
+
if (parts.length === 0) {
|
|
137
|
+
return void 0;
|
|
138
|
+
}
|
|
133
139
|
return createEvent({
|
|
134
140
|
invocationId: invocationContext.invocationId,
|
|
135
141
|
author: invocationContext.agent.name,
|
|
@@ -193,8 +199,9 @@ async function handleFunctionCallList({
|
|
|
193
199
|
}
|
|
194
200
|
);
|
|
195
201
|
if (!toolAndContext) {
|
|
202
|
+
const argsPreview = functionCall.args ? JSON.stringify(functionCall.args).substring(0, 300) : "(none)";
|
|
196
203
|
logger.warn(
|
|
197
|
-
'Function "'.concat(functionCall.name, '" not found in toolsDict ') + "(".concat(Object.keys(toolsDict).length, " tools registered).")
|
|
204
|
+
'Function "'.concat(functionCall.name, '" not found in toolsDict ') + "(".concat(Object.keys(toolsDict).length, " tools registered). ") + "Args: ".concat(argsPreview)
|
|
198
205
|
);
|
|
199
206
|
const errorResponseEvent = createEvent({
|
|
200
207
|
invocationId: invocationContext.invocationId,
|
|
@@ -243,6 +250,10 @@ async function handleFunctionCallList({
|
|
|
243
250
|
toolContext
|
|
244
251
|
);
|
|
245
252
|
} catch (e) {
|
|
253
|
+
const argsPreview = JSON.stringify(functionArgs).substring(0, 500);
|
|
254
|
+
logger.error(
|
|
255
|
+
'Tool execution error: "'.concat(tool.name, '" threw ').concat(e instanceof Error ? e.message : String(e), ". Args: ").concat(argsPreview)
|
|
256
|
+
);
|
|
246
257
|
if (e instanceof Error) {
|
|
247
258
|
const onToolErrorResponse = await invocationContext.pluginManager.runOnToolErrorCallback(
|
|
248
259
|
{
|
|
@@ -360,6 +371,7 @@ function mergeParallelFunctionResponseEvents(functionResponseEvents) {
|
|
|
360
371
|
const actionsList = functionResponseEvents.map((event) => event.actions || {});
|
|
361
372
|
const mergedActions = mergeEventActions(actionsList);
|
|
362
373
|
return createEvent({
|
|
374
|
+
invocationId: baseEvent.invocationId,
|
|
363
375
|
author: baseEvent.author,
|
|
364
376
|
branch: baseEvent.branch,
|
|
365
377
|
content: { role: "user", parts: mergedParts },
|
|
@@ -923,12 +923,12 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
|
|
|
923
923
|
consecutiveErrors++;
|
|
924
924
|
if (consecutiveErrors <= _LlmAgent.MAX_AGENT_LOOP_ERROR_RETRIES) {
|
|
925
925
|
logger.warn(
|
|
926
|
-
"[runAsyncImpl] Error event (".concat(lastEvent.errorCode, "), ") + "retrying agent loop (".concat(consecutiveErrors, "/").concat(_LlmAgent.MAX_AGENT_LOOP_ERROR_RETRIES, ")")
|
|
926
|
+
"[runAsyncImpl] Error event (".concat(lastEvent.errorCode, ": ").concat(lastEvent.errorMessage || "no message", "), ") + "retrying agent loop (".concat(consecutiveErrors, "/").concat(_LlmAgent.MAX_AGENT_LOOP_ERROR_RETRIES, ")")
|
|
927
927
|
);
|
|
928
928
|
continue;
|
|
929
929
|
}
|
|
930
930
|
logger.error(
|
|
931
|
-
"[runAsyncImpl] Max agent-loop error retries exhausted for ".concat(lastEvent.errorCode)
|
|
931
|
+
"[runAsyncImpl] Max agent-loop error retries exhausted for " + "".concat(lastEvent.errorCode, ": ").concat(lastEvent.errorMessage || "no message")
|
|
932
932
|
);
|
|
933
933
|
break;
|
|
934
934
|
}
|
|
@@ -1073,7 +1073,7 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
|
|
|
1073
1073
|
}
|
|
1074
1074
|
postprocess(invocationContext, llmRequest, llmResponse, modelResponseEvent) {
|
|
1075
1075
|
return __asyncGenerator(this, null, function* () {
|
|
1076
|
-
var _a, _b;
|
|
1076
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
1077
1077
|
for (const processor of this.responseProcessors) {
|
|
1078
1078
|
try {
|
|
1079
1079
|
for (var iter = __forAwait(processor.runAsync(invocationContext, llmResponse)), more, temp, error; more = !(temp = yield new __await(iter.next())).done; more = false) {
|
|
@@ -1094,6 +1094,28 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
|
|
|
1094
1094
|
if (!llmResponse.content && !llmResponse.errorCode && !llmResponse.interrupted) {
|
|
1095
1095
|
return;
|
|
1096
1096
|
}
|
|
1097
|
+
if (llmResponse.content && !llmResponse.errorCode) {
|
|
1098
|
+
if (!llmResponse.content.parts || llmResponse.content.parts.length === 0) {
|
|
1099
|
+
logger.debug(
|
|
1100
|
+
"[postprocess] Skipping LLM response with no parts (role=".concat(llmResponse.content.role, ")")
|
|
1101
|
+
);
|
|
1102
|
+
return;
|
|
1103
|
+
}
|
|
1104
|
+
const allEmpty = llmResponse.content.parts.every(
|
|
1105
|
+
(p) => {
|
|
1106
|
+
if (p.functionCall || p.functionResponse || p.executableCode || p.codeExecutionResult) return false;
|
|
1107
|
+
if (p.inlineData || p.fileData) return false;
|
|
1108
|
+
if ("text" in p && typeof p.text === "string" && p.text.length > 0) return false;
|
|
1109
|
+
return true;
|
|
1110
|
+
}
|
|
1111
|
+
);
|
|
1112
|
+
if (allEmpty) {
|
|
1113
|
+
logger.debug(
|
|
1114
|
+
"[postprocess] Skipping empty-content LLM response (".concat(llmResponse.content.parts.length, " empty parts)")
|
|
1115
|
+
);
|
|
1116
|
+
return;
|
|
1117
|
+
}
|
|
1118
|
+
}
|
|
1097
1119
|
const mergedEvent = createEvent(__spreadValues(__spreadValues({}, modelResponseEvent), llmResponse));
|
|
1098
1120
|
if (mergedEvent.content) {
|
|
1099
1121
|
const functionCalls = getFunctionCalls(mergedEvent);
|
|
@@ -1112,8 +1134,11 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
|
|
|
1112
1134
|
}
|
|
1113
1135
|
}
|
|
1114
1136
|
}
|
|
1137
|
+
logger.debug(
|
|
1138
|
+
"[postprocess] Yielding mergedEvent: role=".concat((_b = mergedEvent.content) == null ? void 0 : _b.role, ", parts=").concat((_d = (_c = mergedEvent.content) == null ? void 0 : _c.parts) == null ? void 0 : _d.length, ", ") + "hasFCs=".concat(((_e = getFunctionCalls(mergedEvent)) == null ? void 0 : _e.length) || 0, ", partial=").concat(mergedEvent.partial)
|
|
1139
|
+
);
|
|
1115
1140
|
yield mergedEvent;
|
|
1116
|
-
if (!((
|
|
1141
|
+
if (!((_f = getFunctionCalls(mergedEvent)) == null ? void 0 : _f.length)) {
|
|
1117
1142
|
return;
|
|
1118
1143
|
}
|
|
1119
1144
|
const functionResponseEvent = yield new __await(handleFunctionCallsAsync({
|
|
@@ -1138,6 +1163,9 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
|
|
|
1138
1163
|
if (toolConfirmationEvent) {
|
|
1139
1164
|
yield toolConfirmationEvent;
|
|
1140
1165
|
}
|
|
1166
|
+
logger.debug(
|
|
1167
|
+
"[postprocess] Yielding functionResponseEvent: role=".concat((_g = functionResponseEvent.content) == null ? void 0 : _g.role, ", parts=").concat((_i = (_h = functionResponseEvent.content) == null ? void 0 : _h.parts) == null ? void 0 : _i.length)
|
|
1168
|
+
);
|
|
1141
1169
|
yield functionResponseEvent;
|
|
1142
1170
|
const nextAgentName = functionResponseEvent.actions.transferToAgent;
|
|
1143
1171
|
if (nextAgentName) {
|
|
@@ -1235,7 +1263,7 @@ const _LlmAgent = class _LlmAgent extends BaseAgent {
|
|
|
1235
1263
|
if (llmResponse.errorCode && _LlmAgent.LLM_RETRYABLE_ERROR_CODES.has(llmResponse.errorCode) && !contentYielded && attempt < maxRetries) {
|
|
1236
1264
|
shouldRetry = true;
|
|
1237
1265
|
logger.warn(
|
|
1238
|
-
"[callLlmAsync] Transient LLM error: ".concat(llmResponse.errorCode, ", ") + "usage: ".concat(JSON.stringify(llmResponse.usageMetadata))
|
|
1266
|
+
"[callLlmAsync] Transient LLM error: ".concat(llmResponse.errorCode) + "".concat(llmResponse.errorMessage ? ": " + llmResponse.errorMessage : "", ", ") + "finishReason: ".concat(llmResponse.finishReason || "none", ", ") + "usage: ".concat(JSON.stringify(llmResponse.usageMetadata))
|
|
1239
1267
|
);
|
|
1240
1268
|
break;
|
|
1241
1269
|
}
|