@iqai/adk 0.1.4 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -26,8 +26,9 @@ var __copyProps = (to, from, except, desc) => {
26
26
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
27
27
 
28
28
  // src/helpers/logger.ts
29
+ import chalk from "chalk";
29
30
  function isDebugEnabled() {
30
- return process.env.NODE_ENV === "development" || process.env.DEBUG === "true";
31
+ return process.env.NODE_ENV === "development" || process.env.ADK_DEBUG === "true";
31
32
  }
32
33
  var Logger;
33
34
  var init_logger = __esm({
@@ -38,34 +39,99 @@ var init_logger = __esm({
38
39
  constructor({ name }) {
39
40
  this.name = name;
40
41
  }
42
+ colorize(message) {
43
+ return chalk.blue(message);
44
+ }
41
45
  debug(message, ...args) {
42
- const time = (/* @__PURE__ */ new Date()).toISOString();
43
46
  if (this.isDebugEnabled) {
44
- console.log(`[${time}] \u{1F41B} [DEBUG] \u2728 [${this.name}] ${message}`, ...args);
47
+ const time = (/* @__PURE__ */ new Date()).toLocaleTimeString();
48
+ console.log(
49
+ this.colorize(`[${time}] \u{1F41B} [${this.name}] ${message}`),
50
+ ...args
51
+ );
45
52
  }
46
53
  }
47
54
  info(message, ...args) {
48
- const time = (/* @__PURE__ */ new Date()).toISOString();
49
- console.info(`[${time}] \u2139\uFE0F [INFO] \u2728 [${this.name}] ${message}`, ...args);
55
+ const time = (/* @__PURE__ */ new Date()).toLocaleTimeString();
56
+ console.info(
57
+ this.colorize(`[${time}] \u2139\uFE0F [${this.name}] ${message}`),
58
+ ...args
59
+ );
50
60
  }
51
61
  warn(message, ...args) {
52
- const time = (/* @__PURE__ */ new Date()).toISOString();
53
- console.warn(`[${time}] \u{1F6A7} [WARN] \u2728 [${this.name}] ${message}`, ...args);
62
+ const time = (/* @__PURE__ */ new Date()).toLocaleTimeString();
63
+ console.warn(
64
+ this.colorize(`[${time}] \u{1F6A7} [${this.name}] ${message}`),
65
+ ...args
66
+ );
54
67
  }
55
68
  error(message, ...args) {
56
- const time = (/* @__PURE__ */ new Date()).toISOString();
57
- console.error(`[${time}] \u274C [ERROR] \u2728 [${this.name}] ${message}`, ...args);
69
+ const time = (/* @__PURE__ */ new Date()).toLocaleTimeString();
70
+ console.error(
71
+ this.colorize(`[${time}] \u274C [${this.name}] ${message}`),
72
+ ...args
73
+ );
74
+ }
75
+ /**
76
+ * Logs structured data in a visually appealing table format.
77
+ * Uses vertical layout for better readability and respects debug settings.
78
+ */
79
+ debugStructured(title, data) {
80
+ if (!this.isDebugEnabled) return;
81
+ const terminalWidth = process.stdout.columns || 60;
82
+ const width = Math.min(terminalWidth, 100);
83
+ const contentWidth = width - 4;
84
+ const topBorder = `\u250C${"\u2500".repeat(width - 2)}\u2510`;
85
+ const bottomBorder = `\u2514${"\u2500".repeat(width - 2)}\u2518`;
86
+ const middleBorder = `\u251C${"\u2500".repeat(width - 2)}\u2524`;
87
+ console.log(this.colorize(topBorder));
88
+ console.log(this.colorize(`\u2502 ${title.padEnd(contentWidth)} \u2502`));
89
+ console.log(this.colorize(middleBorder));
90
+ Object.entries(data).forEach(([key, value]) => {
91
+ const formattedKey = key.padEnd(20);
92
+ const formattedValue = String(value);
93
+ const availableValueSpace = contentWidth - 20 - 2;
94
+ const truncatedValue = formattedValue.length > availableValueSpace ? `${formattedValue.substring(0, availableValueSpace - 3)}...` : formattedValue;
95
+ const content = `${formattedKey}: ${truncatedValue}`;
96
+ const paddedContent = content.padEnd(contentWidth);
97
+ console.log(this.colorize(`\u2502 ${paddedContent} \u2502`));
98
+ });
99
+ console.log(this.colorize(bottomBorder));
100
+ }
101
+ /**
102
+ * Logs array data in a compact, readable format.
103
+ */
104
+ debugArray(title, items) {
105
+ if (!this.isDebugEnabled) return;
106
+ const terminalWidth = process.stdout.columns || 78;
107
+ const width = Math.min(terminalWidth, 120);
108
+ const contentWidth = width - 4;
109
+ const topBorder = `\u250C${"\u2500".repeat(width - 2)}\u2510`;
110
+ const bottomBorder = `\u2514${"\u2500".repeat(width - 2)}\u2518`;
111
+ const middleBorder = `\u251C${"\u2500".repeat(width - 2)}\u2524`;
112
+ console.log(this.colorize(topBorder));
113
+ console.log(this.colorize(`\u2502 ${title.padEnd(contentWidth)} \u2502`));
114
+ console.log(this.colorize(middleBorder));
115
+ items.forEach((item, index) => {
116
+ const itemStr = Object.entries(item).map(([k, v]) => `${k}: ${v}`).join(" \u2022 ");
117
+ const indexPart = `[${index + 1}] `;
118
+ const availableSpace = contentWidth - indexPart.length;
119
+ const truncatedItem = itemStr.length > availableSpace ? `${itemStr.substring(0, availableSpace - 3)}...` : itemStr;
120
+ const content = `${indexPart}${truncatedItem}`;
121
+ const paddedContent = content.padEnd(contentWidth);
122
+ console.log(this.colorize(`\u2502 ${paddedContent} \u2502`));
123
+ });
124
+ console.log(this.colorize(bottomBorder));
58
125
  }
59
126
  };
60
127
  }
61
128
  });
62
129
 
63
130
  // src/tools/base/base-tool.ts
64
- var logger6, BaseTool;
131
+ var BaseTool;
65
132
  var init_base_tool = __esm({
66
133
  "src/tools/base/base-tool.ts"() {
67
134
  init_logger();
68
- logger6 = new Logger({ name: "BaseTool" });
69
135
  BaseTool = class {
70
136
  /**
71
137
  * Name of the tool
@@ -96,6 +162,7 @@ var init_base_tool = __esm({
96
162
  * Maximum delay for retry in ms
97
163
  */
98
164
  maxRetryDelay = 1e4;
165
+ logger = new Logger({ name: "BaseTool" });
99
166
  /**
100
167
  * Constructor for BaseTool
101
168
  */
@@ -226,7 +293,7 @@ var init_base_tool = __esm({
226
293
  while (attempts <= (this.shouldRetryOnFailure ? this.maxRetryAttempts : 0)) {
227
294
  try {
228
295
  if (attempts > 0) {
229
- logger6.debug(
296
+ this.logger.debug(
230
297
  `Retrying tool ${this.name} (attempt ${attempts} of ${this.maxRetryAttempts})...`
231
298
  );
232
299
  const delay = Math.min(
@@ -613,6 +680,7 @@ __export(agents_exports, {
613
680
  // src/models/index.ts
614
681
  var models_exports = {};
615
682
  __export(models_exports, {
683
+ AiSdkLlm: () => AiSdkLlm,
616
684
  AnthropicLlm: () => AnthropicLlm,
617
685
  ApiKeyCredential: () => ApiKeyCredential,
618
686
  ApiKeyScheme: () => ApiKeyScheme,
@@ -640,8 +708,6 @@ __export(models_exports, {
640
708
  });
641
709
 
642
710
  // src/models/llm-request.ts
643
- init_logger();
644
- var logger = new Logger({ name: "LlmRequest" });
645
711
  var LlmRequest = class {
646
712
  /**
647
713
  * The model name.
@@ -805,6 +871,10 @@ var LlmResponse = class _LlmResponse {
805
871
  * Reason why the model finished generating.
806
872
  */
807
873
  finishReason;
874
+ /**
875
+ * Error object if the response is an error.
876
+ */
877
+ error;
808
878
  /**
809
879
  * Creates a new LlmResponse.
810
880
  */
@@ -848,6 +918,29 @@ var LlmResponse = class _LlmResponse {
848
918
  usageMetadata
849
919
  });
850
920
  }
921
+ /**
922
+ * Creates an LlmResponse from an error.
923
+ *
924
+ * @param error The error object or message.
925
+ * @param options Additional options for the error response.
926
+ * @param options.errorCode A specific error code for the response.
927
+ * @param options.model The model that was being used when the error occurred.
928
+ * @returns The LlmResponse.
929
+ */
930
+ static fromError(error, options = {}) {
931
+ const errorMessage = error instanceof Error ? error.message : String(error);
932
+ const errorCode = options.errorCode || "UNKNOWN_ERROR";
933
+ return new _LlmResponse({
934
+ errorCode,
935
+ errorMessage: `LLM call failed for model ${options.model || "unknown"}: ${errorMessage}`,
936
+ content: {
937
+ role: "model",
938
+ parts: [{ text: `Error: ${errorMessage}` }]
939
+ },
940
+ finishReason: "STOP",
941
+ error: error instanceof Error ? error : new Error(errorMessage)
942
+ });
943
+ }
851
944
  };
852
945
 
853
946
  // src/models/base-llm.ts
@@ -1127,12 +1220,12 @@ var traceLlmCall = (invocationContext, eventId, llmRequest, llmResponse) => tele
1127
1220
  );
1128
1221
 
1129
1222
  // src/models/base-llm.ts
1130
- var logger2 = new Logger({ name: "BaseLlm" });
1131
1223
  var BaseLlm = class {
1132
1224
  /**
1133
1225
  * The name of the LLM, e.g. gemini-1.5-flash or gemini-1.5-flash-001.
1134
1226
  */
1135
1227
  model;
1228
+ logger = new Logger({ name: "BaseLlm" });
1136
1229
  /**
1137
1230
  * Constructor for BaseLlm
1138
1231
  */
@@ -1183,12 +1276,6 @@ var BaseLlm = class {
1183
1276
  }),
1184
1277
  "adk.streaming": stream || false
1185
1278
  });
1186
- logger2.debug("ADK LLM Request:", {
1187
- model: this.model,
1188
- contentCount: llmRequest.contents?.length || 0,
1189
- streaming: stream || false,
1190
- config: llmRequest.config
1191
- });
1192
1279
  let responseCount = 0;
1193
1280
  let totalTokens = 0;
1194
1281
  for await (const response of this.generateContentAsyncImpl(
@@ -1196,14 +1283,6 @@ var BaseLlm = class {
1196
1283
  stream
1197
1284
  )) {
1198
1285
  responseCount++;
1199
- logger2.debug(`ADK LLM Response ${responseCount}:`, {
1200
- model: this.model,
1201
- parts: response.parts?.map((part) => ({
1202
- text: typeof part.text === "string" ? part.text.substring(0, 200) + (part.text.length > 200 ? "..." : "") : "[non_text_content]"
1203
- })),
1204
- finishReason: response.finish_reason,
1205
- usage: response.usage
1206
- });
1207
1286
  if (response.usage) {
1208
1287
  totalTokens += response.usage.total_tokens || 0;
1209
1288
  span.setAttributes({
@@ -1224,7 +1303,7 @@ var BaseLlm = class {
1224
1303
  } catch (error) {
1225
1304
  span.recordException(error);
1226
1305
  span.setStatus({ code: 2, message: error.message });
1227
- console.error("\u274C ADK LLM Error:", {
1306
+ this.logger.error("\u274C ADK LLM Error:", {
1228
1307
  model: this.model,
1229
1308
  error: error.message
1230
1309
  });
@@ -1280,13 +1359,10 @@ var BaseLLMConnection = class {
1280
1359
  };
1281
1360
 
1282
1361
  // src/models/google-llm.ts
1283
- init_logger();
1284
1362
  import {
1285
1363
  FinishReason,
1286
1364
  GoogleGenAI
1287
1365
  } from "@google/genai";
1288
- import dedent from "dedent";
1289
- var NEW_LINE = "\n";
1290
1366
  var AGENT_ENGINE_TELEMETRY_TAG = "remote_reasoning_engine";
1291
1367
  var AGENT_ENGINE_TELEMETRY_ENV_VARIABLE_NAME = "GOOGLE_CLOUD_AGENT_ENGINE_ID";
1292
1368
  var GoogleLlm = class extends BaseLlm {
@@ -1294,7 +1370,6 @@ var GoogleLlm = class extends BaseLlm {
1294
1370
  _liveApiClient;
1295
1371
  _apiBackend;
1296
1372
  _trackingHeaders;
1297
- logger = new Logger({ name: "GoogleLlm" });
1298
1373
  /**
1299
1374
  * Constructor for Gemini
1300
1375
  */
@@ -1318,10 +1393,6 @@ var GoogleLlm = class extends BaseLlm {
1318
1393
  */
1319
1394
  async *generateContentAsyncImpl(llmRequest, stream = false) {
1320
1395
  this.preprocessRequest(llmRequest);
1321
- this.logger.debug(
1322
- `Sending out request, model: ${llmRequest.model || this.model}, backend: ${this.apiBackend}, stream: ${stream}`
1323
- );
1324
- this.logger.debug(this.buildRequestLog(llmRequest));
1325
1396
  const model = llmRequest.model || this.model;
1326
1397
  const contents = this.convertContents(llmRequest.contents || []);
1327
1398
  const config = this.convertConfig(llmRequest.config);
@@ -1337,7 +1408,6 @@ var GoogleLlm = class extends BaseLlm {
1337
1408
  let usageMetadata = null;
1338
1409
  for await (const resp of responses) {
1339
1410
  response = resp;
1340
- this.logger.debug(this.buildResponseLog(resp));
1341
1411
  const llmResponse = LlmResponse.create(resp);
1342
1412
  usageMetadata = llmResponse.usageMetadata;
1343
1413
  if (llmResponse.content?.parts?.[0]?.text) {
@@ -1390,8 +1460,11 @@ var GoogleLlm = class extends BaseLlm {
1390
1460
  contents,
1391
1461
  config
1392
1462
  });
1393
- this.logger.debug(this.buildResponseLog(response));
1394
- yield LlmResponse.create(response);
1463
+ const llmResponse = LlmResponse.create(response);
1464
+ this.logger.debug(
1465
+ `Google response: ${llmResponse.usageMetadata?.candidatesTokenCount || 0} tokens`
1466
+ );
1467
+ yield llmResponse;
1395
1468
  }
1396
1469
  }
1397
1470
  /**
@@ -1466,60 +1539,6 @@ var GoogleLlm = class extends BaseLlm {
1466
1539
  }
1467
1540
  return `${funcDecl.name}: ${paramStr}`;
1468
1541
  }
1469
- /**
1470
- * Builds request log string.
1471
- */
1472
- buildRequestLog(req) {
1473
- const functionDecls = req.config?.tools?.[0]?.functionDeclarations || [];
1474
- const functionLogs = functionDecls.length > 0 ? functionDecls.map(
1475
- (funcDecl) => this.buildFunctionDeclarationLog(funcDecl)
1476
- ) : [];
1477
- const contentsLogs = req.contents?.map(
1478
- (content) => JSON.stringify(content, (key, value) => {
1479
- if (key === "data" && typeof value === "string" && value.length > 100) {
1480
- return "[EXCLUDED]";
1481
- }
1482
- return value;
1483
- })
1484
- ) || [];
1485
- return dedent`
1486
- LLM Request:
1487
- -----------------------------------------------------------
1488
- System Instruction:
1489
- ${req.config?.systemInstruction || ""}
1490
- -----------------------------------------------------------
1491
- Contents:
1492
- ${contentsLogs.join(NEW_LINE)}
1493
- -----------------------------------------------------------
1494
- Functions:
1495
- ${functionLogs.join(NEW_LINE)}
1496
- -----------------------------------------------------------`;
1497
- }
1498
- /**
1499
- * Builds response log string.
1500
- */
1501
- buildResponseLog(resp) {
1502
- const functionCallsText = [];
1503
- if (resp.functionCalls) {
1504
- for (const funcCall of resp.functionCalls) {
1505
- functionCallsText.push(
1506
- `name: ${funcCall.name}, args: ${JSON.stringify(funcCall.args)}`
1507
- );
1508
- }
1509
- }
1510
- return dedent`
1511
- LLM Response:
1512
- -----------------------------------------------------------
1513
- Text:
1514
- ${resp.text || ""}
1515
- -----------------------------------------------------------
1516
- Function calls:
1517
- ${functionCallsText.join(NEW_LINE)}
1518
- -----------------------------------------------------------
1519
- Raw response:
1520
- ${JSON.stringify(resp, null, 2)}
1521
- -----------------------------------------------------------`;
1522
- }
1523
1542
  /**
1524
1543
  * Provides the api client.
1525
1544
  */
@@ -1613,10 +1632,10 @@ var GoogleLlm = class extends BaseLlm {
1613
1632
  // src/models/anthropic-llm.ts
1614
1633
  init_logger();
1615
1634
  import Anthropic from "@anthropic-ai/sdk";
1616
- var logger3 = new Logger({ name: "AnthropicLlm" });
1617
1635
  var MAX_TOKENS = 1024;
1618
1636
  var AnthropicLlm = class extends BaseLlm {
1619
1637
  _client;
1638
+ logger = new Logger({ name: "AnthropicLlm" });
1620
1639
  /**
1621
1640
  * Constructor for Anthropic LLM
1622
1641
  */
@@ -1633,9 +1652,6 @@ var AnthropicLlm = class extends BaseLlm {
1633
1652
  * Main content generation method - handles both streaming and non-streaming
1634
1653
  */
1635
1654
  async *generateContentAsyncImpl(llmRequest, stream = false) {
1636
- logger3.debug(
1637
- `Sending Anthropic request, model: ${llmRequest.model || this.model}, stream: ${stream}`
1638
- );
1639
1655
  const model = llmRequest.model || this.model;
1640
1656
  const messages = (llmRequest.contents || []).map(
1641
1657
  (content) => this.contentToAnthropicMessage(content)
@@ -1679,7 +1695,9 @@ var AnthropicLlm = class extends BaseLlm {
1679
1695
  * Convert Anthropic Message to ADK LlmResponse
1680
1696
  */
1681
1697
  anthropicMessageToLlmResponse(message) {
1682
- logger3.debug("Anthropic response:", JSON.stringify(message, null, 2));
1698
+ this.logger.debug(
1699
+ `Anthropic response: ${message.usage.output_tokens} tokens, ${message.stop_reason}`
1700
+ );
1683
1701
  return new LlmResponse({
1684
1702
  content: {
1685
1703
  role: "model",
@@ -1836,11 +1854,7 @@ var AnthropicLlm = class extends BaseLlm {
1836
1854
  };
1837
1855
 
1838
1856
  // src/models/openai-llm.ts
1839
- init_logger();
1840
- import dedent2 from "dedent";
1841
1857
  import OpenAI from "openai";
1842
- var logger4 = new Logger({ name: "OpenAiLlm" });
1843
- var NEW_LINE2 = "\n";
1844
1858
  var OpenAiLlm = class extends BaseLlm {
1845
1859
  _client;
1846
1860
  /**
@@ -1860,10 +1874,6 @@ var OpenAiLlm = class extends BaseLlm {
1860
1874
  */
1861
1875
  async *generateContentAsyncImpl(llmRequest, stream = false) {
1862
1876
  this.preprocessRequest(llmRequest);
1863
- logger4.debug(
1864
- `Sending OpenAI request, model: ${llmRequest.model || this.model}, stream: ${stream}`
1865
- );
1866
- logger4.debug(this.buildRequestLog(llmRequest));
1867
1877
  const model = llmRequest.model || this.model;
1868
1878
  const messages = (llmRequest.contents || []).map(
1869
1879
  (content) => this.contentToOpenAiMessage(content)
@@ -1905,12 +1915,10 @@ var OpenAiLlm = class extends BaseLlm {
1905
1915
  const choice = chunk.choices[0];
1906
1916
  if (!choice) continue;
1907
1917
  const delta = choice.delta;
1908
- logger4.debug("Delta content:", delta.content);
1909
1918
  const llmResponse = this.createChunkResponse(delta, chunk.usage);
1910
1919
  if (chunk.usage) {
1911
1920
  usageMetadata = chunk.usage;
1912
1921
  }
1913
- logger4.debug(this.buildResponseLog(llmResponse));
1914
1922
  if (llmResponse.content?.parts?.[0]?.text) {
1915
1923
  const part0 = llmResponse.content.parts[0];
1916
1924
  if (part0.thought) {
@@ -1993,7 +2001,6 @@ var OpenAiLlm = class extends BaseLlm {
1993
2001
  } : void 0,
1994
2002
  finishReason: this.toAdkFinishReason(choice.finish_reason)
1995
2003
  });
1996
- logger4.debug(this.buildResponseLog(finalResponse));
1997
2004
  yield finalResponse;
1998
2005
  } else {
1999
2006
  yield llmResponse;
@@ -2030,7 +2037,9 @@ var OpenAiLlm = class extends BaseLlm {
2030
2037
  choice,
2031
2038
  response.usage
2032
2039
  );
2033
- logger4.debug(this.buildResponseLog(llmResponse));
2040
+ this.logger.debug(
2041
+ `OpenAI response: ${response.usage?.completion_tokens || 0} tokens`
2042
+ );
2034
2043
  yield llmResponse;
2035
2044
  }
2036
2045
  }
@@ -2084,10 +2093,6 @@ var OpenAiLlm = class extends BaseLlm {
2084
2093
  */
2085
2094
  openAiMessageToLlmResponse(choice, usage) {
2086
2095
  const message = choice.message;
2087
- logger4.debug(
2088
- "OpenAI response:",
2089
- JSON.stringify({ message, usage }, null, 2)
2090
- );
2091
2096
  const parts = [];
2092
2097
  if (message.content) {
2093
2098
  parts.push({ text: message.content });
@@ -2276,67 +2281,6 @@ var OpenAiLlm = class extends BaseLlm {
2276
2281
  const parts = response.content?.parts;
2277
2282
  return parts?.some((part) => part.inlineData) || false;
2278
2283
  }
2279
- /**
2280
- * Build request log string for debugging (similar to Google LLM)
2281
- */
2282
- buildRequestLog(req) {
2283
- const functionDecls = req.config?.tools?.[0]?.functionDeclarations || [];
2284
- const functionLogs = functionDecls.length > 0 ? functionDecls.map(
2285
- (funcDecl) => `${funcDecl.name}: ${JSON.stringify(funcDecl.parameters?.properties || {})}`
2286
- ) : [];
2287
- const contentsLogs = req.contents?.map(
2288
- (content) => JSON.stringify(content, (key, value) => {
2289
- if (key === "data" && typeof value === "string" && value.length > 100) {
2290
- return "[EXCLUDED]";
2291
- }
2292
- return value;
2293
- })
2294
- ) || [];
2295
- return dedent2`
2296
- LLM Request:
2297
- -----------------------------------------------------------
2298
- System Instruction:
2299
- ${req.getSystemInstructionText() || ""}
2300
- -----------------------------------------------------------
2301
- Contents:
2302
- ${contentsLogs.join(NEW_LINE2)}
2303
- -----------------------------------------------------------
2304
- Functions:
2305
- ${functionLogs.join(NEW_LINE2)}
2306
- -----------------------------------------------------------`;
2307
- }
2308
- /**
2309
- * Build response log string for debugging (similar to Google LLM)
2310
- */
2311
- buildResponseLog(response) {
2312
- const functionCallsText = [];
2313
- if (response.content?.parts) {
2314
- for (const part of response.content.parts) {
2315
- if (part.functionCall) {
2316
- const funcCall = part.functionCall;
2317
- functionCallsText.push(
2318
- `name: ${funcCall.name}, args: ${JSON.stringify(funcCall.args)}`
2319
- );
2320
- }
2321
- }
2322
- }
2323
- const text = response.content?.parts?.filter((part) => part.text)?.map((part) => part.text)?.join("") || "";
2324
- return dedent2`
2325
- LLM Response:
2326
- -----------------------------------------------------------
2327
- Text:
2328
- ${text}
2329
- -----------------------------------------------------------
2330
- Function calls:
2331
- ${functionCallsText.join(NEW_LINE2)}
2332
- -----------------------------------------------------------
2333
- Usage:
2334
- ${JSON.stringify(response.usageMetadata, null, 2)}
2335
- -----------------------------------------------------------
2336
- Finish Reason:
2337
- ${response.finishReason}
2338
- -----------------------------------------------------------`;
2339
- }
2340
2284
  /**
2341
2285
  * Gets the OpenAI client
2342
2286
  */
@@ -2356,14 +2300,289 @@ var OpenAiLlm = class extends BaseLlm {
2356
2300
  }
2357
2301
  };
2358
2302
 
2303
+ // src/models/ai-sdk.ts
2304
+ init_logger();
2305
+ import {
2306
+ generateText,
2307
+ jsonSchema,
2308
+ streamText
2309
+ } from "ai";
2310
+ var AiSdkLlm = class extends BaseLlm {
2311
+ modelInstance;
2312
+ logger = new Logger({ name: "AiSdkLlm" });
2313
+ /**
2314
+ * Constructor accepts a pre-configured LanguageModel instance
2315
+ * @param model - Pre-configured LanguageModel from provider(modelName)
2316
+ */
2317
+ constructor(modelInstance) {
2318
+ super(modelInstance.modelId || "ai-sdk-model");
2319
+ this.modelInstance = modelInstance;
2320
+ }
2321
+ /**
2322
+ * Returns empty array - following Python ADK pattern
2323
+ */
2324
+ static supportedModels() {
2325
+ return [];
2326
+ }
2327
+ async *generateContentAsyncImpl(request, stream = false) {
2328
+ try {
2329
+ const messages = this.convertToAiSdkMessages(request);
2330
+ const systemMessage = request.getSystemInstructionText();
2331
+ const tools = this.convertToAiSdkTools(request);
2332
+ const requestParams = {
2333
+ model: this.modelInstance,
2334
+ messages,
2335
+ system: systemMessage,
2336
+ tools: Object.keys(tools).length > 0 ? tools : void 0,
2337
+ maxTokens: request.config?.maxOutputTokens,
2338
+ temperature: request.config?.temperature,
2339
+ topP: request.config?.topP
2340
+ };
2341
+ if (stream) {
2342
+ const result = streamText(requestParams);
2343
+ let accumulatedText = "";
2344
+ for await (const delta of result.textStream) {
2345
+ accumulatedText += delta;
2346
+ yield new LlmResponse({
2347
+ content: {
2348
+ role: "model",
2349
+ parts: [{ text: accumulatedText }]
2350
+ },
2351
+ partial: true
2352
+ });
2353
+ }
2354
+ const toolCalls = await result.toolCalls;
2355
+ const parts = [];
2356
+ if (accumulatedText) {
2357
+ parts.push({ text: accumulatedText });
2358
+ }
2359
+ if (toolCalls && toolCalls.length > 0) {
2360
+ for (const toolCall of toolCalls) {
2361
+ parts.push({
2362
+ functionCall: {
2363
+ id: toolCall.toolCallId,
2364
+ name: toolCall.toolName,
2365
+ args: toolCall.args
2366
+ }
2367
+ });
2368
+ }
2369
+ }
2370
+ const finalUsage = await result.usage;
2371
+ const finishReason = await result.finishReason;
2372
+ yield new LlmResponse({
2373
+ content: {
2374
+ role: "model",
2375
+ parts: parts.length > 0 ? parts : [{ text: "" }]
2376
+ },
2377
+ usageMetadata: finalUsage ? {
2378
+ promptTokenCount: finalUsage.promptTokens,
2379
+ candidatesTokenCount: finalUsage.completionTokens,
2380
+ totalTokenCount: finalUsage.totalTokens
2381
+ } : void 0,
2382
+ finishReason: this.mapFinishReason(finishReason),
2383
+ turnComplete: true
2384
+ });
2385
+ } else {
2386
+ const result = await generateText(requestParams);
2387
+ const parts = [];
2388
+ if (result.text) {
2389
+ parts.push({ text: result.text });
2390
+ }
2391
+ if (result.toolCalls && result.toolCalls.length > 0) {
2392
+ for (const toolCall of result.toolCalls) {
2393
+ parts.push({
2394
+ functionCall: {
2395
+ id: toolCall.toolCallId,
2396
+ name: toolCall.toolName,
2397
+ args: toolCall.args
2398
+ }
2399
+ });
2400
+ }
2401
+ }
2402
+ yield new LlmResponse({
2403
+ content: {
2404
+ role: "model",
2405
+ parts: parts.length > 0 ? parts : [{ text: "" }]
2406
+ },
2407
+ usageMetadata: result.usage ? {
2408
+ promptTokenCount: result.usage.promptTokens,
2409
+ candidatesTokenCount: result.usage.completionTokens,
2410
+ totalTokenCount: result.usage.totalTokens
2411
+ } : void 0,
2412
+ finishReason: this.mapFinishReason(result.finishReason),
2413
+ turnComplete: true
2414
+ });
2415
+ }
2416
+ } catch (error) {
2417
+ this.logger.error(`AI SDK Error: ${String(error)}`, { error, request });
2418
+ yield LlmResponse.fromError(error, {
2419
+ errorCode: "AI_SDK_ERROR",
2420
+ model: this.model
2421
+ });
2422
+ }
2423
+ }
2424
+ /**
2425
+ * Convert ADK LlmRequest to AI SDK CoreMessage format
2426
+ */
2427
+ convertToAiSdkMessages(llmRequest) {
2428
+ const messages = [];
2429
+ for (const content of llmRequest.contents || []) {
2430
+ const message = this.contentToAiSdkMessage(content);
2431
+ if (message) {
2432
+ messages.push(message);
2433
+ }
2434
+ }
2435
+ return messages;
2436
+ }
2437
+ /**
2438
+ * Convert ADK tools to AI SDK tools format
2439
+ */
2440
+ convertToAiSdkTools(llmRequest) {
2441
+ const tools = {};
2442
+ if (llmRequest.config?.tools) {
2443
+ for (const toolConfig of llmRequest.config.tools) {
2444
+ if ("functionDeclarations" in toolConfig) {
2445
+ for (const funcDecl of toolConfig.functionDeclarations) {
2446
+ tools[funcDecl.name] = {
2447
+ description: funcDecl.description,
2448
+ parameters: jsonSchema(funcDecl.parameters || {})
2449
+ };
2450
+ }
2451
+ }
2452
+ }
2453
+ }
2454
+ return tools;
2455
+ }
2456
+ /**
2457
+ * Convert ADK Content to AI SDK CoreMessage
2458
+ */
2459
+ contentToAiSdkMessage(content) {
2460
+ const role = this.mapRole(content.role);
2461
+ if (!content.parts || content.parts.length === 0) {
2462
+ return null;
2463
+ }
2464
+ if (content.parts.length === 1 && content.parts[0].text) {
2465
+ const textContent = content.parts[0].text;
2466
+ if (role === "system") {
2467
+ return { role: "system", content: textContent };
2468
+ }
2469
+ if (role === "assistant") {
2470
+ return { role: "assistant", content: textContent };
2471
+ }
2472
+ return { role: "user", content: textContent };
2473
+ }
2474
+ if (content.parts?.some((part) => part.functionCall)) {
2475
+ const textParts = content.parts.filter((part) => part.text);
2476
+ const functionCalls = content.parts.filter((part) => part.functionCall);
2477
+ const contentParts2 = [];
2478
+ for (const textPart of textParts) {
2479
+ if (textPart.text) {
2480
+ contentParts2.push({
2481
+ type: "text",
2482
+ text: textPart.text
2483
+ });
2484
+ }
2485
+ }
2486
+ for (const funcPart of functionCalls) {
2487
+ if (funcPart.functionCall) {
2488
+ contentParts2.push({
2489
+ type: "tool-call",
2490
+ toolCallId: funcPart.functionCall.id,
2491
+ toolName: funcPart.functionCall.name,
2492
+ args: funcPart.functionCall.args
2493
+ });
2494
+ }
2495
+ }
2496
+ return {
2497
+ role: "assistant",
2498
+ content: contentParts2
2499
+ };
2500
+ }
2501
+ if (content.parts?.some((part) => part.functionResponse)) {
2502
+ const functionResponses = content.parts.filter(
2503
+ (part) => part.functionResponse
2504
+ );
2505
+ const contentParts2 = functionResponses.map((part) => ({
2506
+ type: "tool-result",
2507
+ toolCallId: part.functionResponse.id,
2508
+ toolName: part.functionResponse.name || "unknown",
2509
+ result: part.functionResponse.response
2510
+ }));
2511
+ return {
2512
+ role: "tool",
2513
+ content: contentParts2
2514
+ };
2515
+ }
2516
+ const contentParts = [];
2517
+ for (const part of content.parts) {
2518
+ if (part.text) {
2519
+ contentParts.push({
2520
+ type: "text",
2521
+ text: part.text
2522
+ });
2523
+ }
2524
+ }
2525
+ if (contentParts.length === 0) {
2526
+ return null;
2527
+ }
2528
+ if (contentParts.length === 1) {
2529
+ const textContent = contentParts[0].text;
2530
+ if (role === "system") {
2531
+ return { role: "system", content: textContent };
2532
+ }
2533
+ if (role === "assistant") {
2534
+ return { role: "assistant", content: textContent };
2535
+ }
2536
+ return { role: "user", content: textContent };
2537
+ }
2538
+ if (role === "system") {
2539
+ const combinedText = contentParts.map((p) => p.text).join("");
2540
+ return { role: "system", content: combinedText };
2541
+ }
2542
+ if (role === "assistant") {
2543
+ return { role: "assistant", content: contentParts };
2544
+ }
2545
+ return { role: "user", content: contentParts };
2546
+ }
2547
+ /**
2548
+ * Map ADK role to AI SDK role
2549
+ */
2550
+ mapRole(role) {
2551
+ switch (role) {
2552
+ case "model":
2553
+ case "assistant":
2554
+ return "assistant";
2555
+ case "system":
2556
+ return "system";
2557
+ default:
2558
+ return "user";
2559
+ }
2560
+ }
2561
+ /**
2562
+ * Map AI SDK finish reason to ADK finish reason
2563
+ */
2564
+ mapFinishReason(finishReason) {
2565
+ switch (finishReason) {
2566
+ case "stop":
2567
+ case "end_of_message":
2568
+ return "STOP";
2569
+ case "length":
2570
+ case "max_tokens":
2571
+ return "MAX_TOKENS";
2572
+ default:
2573
+ return "FINISH_REASON_UNSPECIFIED";
2574
+ }
2575
+ }
2576
+ };
2577
+
2359
2578
  // src/models/llm-registry.ts
2360
2579
  init_logger();
2361
- var logger5 = new Logger({ name: "LLMRegistry" });
2362
2580
  var LLMRegistry = class _LLMRegistry {
2363
2581
  /**
2364
2582
  * Map of model name regex to LLM class
2365
2583
  */
2366
2584
  static llmRegistry = /* @__PURE__ */ new Map();
2585
+ static logger = new Logger({ name: "LLMRegistry" });
2367
2586
  /**
2368
2587
  * Creates a new LLM instance
2369
2588
  *
@@ -2415,7 +2634,7 @@ var LLMRegistry = class _LLMRegistry {
2415
2634
  * Logs all registered models for debugging
2416
2635
  */
2417
2636
  static logRegisteredModels() {
2418
- logger5.debug(
2637
+ _LLMRegistry.logger.debug(
2419
2638
  "Registered LLM models:",
2420
2639
  [..._LLMRegistry.llmRegistry.entries()].map(([regex]) => regex.toString())
2421
2640
  );
@@ -3479,6 +3698,7 @@ __export(tools_exports, {
3479
3698
  McpAbi: () => McpAbi,
3480
3699
  McpAtp: () => McpAtp,
3481
3700
  McpBamm: () => McpBamm,
3701
+ McpCoinGecko: () => McpCoinGecko,
3482
3702
  McpError: () => McpError,
3483
3703
  McpErrorType: () => McpErrorType,
3484
3704
  McpFilesystem: () => McpFilesystem,
@@ -4665,15 +4885,13 @@ var McpClientService = class {
4665
4885
  await connectPromise;
4666
4886
  }
4667
4887
  await this.setupSamplingHandler(client);
4668
- if (this.config.debug) {
4669
- console.log("\u2705 MCP client connected successfully");
4670
- }
4888
+ this.logger.debug("\u2705 MCP client connected successfully");
4671
4889
  this.client = client;
4672
4890
  return client;
4673
4891
  } catch (error) {
4674
4892
  await this.cleanupResources();
4675
4893
  if (!(error instanceof McpError)) {
4676
- console.error("Failed to initialize MCP client:", error);
4894
+ this.logger.error("Failed to initialize MCP client:", error);
4677
4895
  throw new McpError(
4678
4896
  `Failed to initialize MCP client: ${error instanceof Error ? error.message : String(error)}`,
4679
4897
  "connection_error" /* CONNECTION_ERROR */,
@@ -4689,12 +4907,10 @@ var McpClientService = class {
4689
4907
  async createTransport() {
4690
4908
  try {
4691
4909
  if (this.config.transport.mode === "sse") {
4692
- if (this.config.debug) {
4693
- console.log(
4694
- "\u{1F680} Initializing MCP client in SSE mode",
4695
- this.config.transport.serverUrl
4696
- );
4697
- }
4910
+ this.logger.debug(
4911
+ "\u{1F680} Initializing MCP client in SSE mode",
4912
+ this.config.transport.serverUrl
4913
+ );
4698
4914
  const headers = {
4699
4915
  ...this.config.transport.headers || {},
4700
4916
  ...this.config.headers || {}
@@ -4709,12 +4925,10 @@ var McpClientService = class {
4709
4925
  }
4710
4926
  );
4711
4927
  }
4712
- if (this.config.debug) {
4713
- console.log(
4714
- "\u{1F680} Initializing MCP client in STDIO mode",
4715
- this.config.transport.command
4716
- );
4717
- }
4928
+ this.logger.debug(
4929
+ "\u{1F680} Initializing MCP client in STDIO mode",
4930
+ this.config.transport.command
4931
+ );
4718
4932
  return new StdioClientTransport({
4719
4933
  command: this.config.transport.command,
4720
4934
  args: this.config.transport.args,
@@ -4733,9 +4947,7 @@ var McpClientService = class {
4733
4947
  * Used by the retry mechanism.
4734
4948
  */
4735
4949
  async reinitialize() {
4736
- if (this.config.debug) {
4737
- console.log("\u{1F504} Reinitializing MCP client after closed connection");
4738
- }
4950
+ this.logger.debug("\u{1F504} Reinitializing MCP client after closed connection");
4739
4951
  await this.cleanupResources();
4740
4952
  this.client = null;
4741
4953
  this.transport = null;
@@ -4759,11 +4971,9 @@ var McpClientService = class {
4759
4971
  if (this.transport && typeof this.transport.close === "function") {
4760
4972
  await this.transport.close();
4761
4973
  }
4762
- if (this.config.debug) {
4763
- console.log("\u{1F9F9} Cleaned up MCP client resources");
4764
- }
4974
+ this.logger.debug("\u{1F9F9} Cleaned up MCP client resources");
4765
4975
  } catch (error) {
4766
- console.error("Error cleaning up MCP resources:", error);
4976
+ this.logger.error("Error cleaning up MCP resources:", error);
4767
4977
  } finally {
4768
4978
  this.client = null;
4769
4979
  this.transport = null;
@@ -4805,9 +5015,7 @@ var McpClientService = class {
4805
5015
  * Similar to Python's close() method.
4806
5016
  */
4807
5017
  async close() {
4808
- if (this.config.debug) {
4809
- console.log("\u{1F51A} Closing MCP client service");
4810
- }
5018
+ this.logger.debug("\u{1F51A} Closing MCP client service");
4811
5019
  await this.cleanupResources();
4812
5020
  }
4813
5021
  /**
@@ -4818,11 +5026,9 @@ var McpClientService = class {
4818
5026
  }
4819
5027
  async setupSamplingHandler(client) {
4820
5028
  if (!this.mcpSamplingHandler) {
4821
- if (this.config.debug) {
4822
- console.log(
4823
- "\u26A0\uFE0F No sampling handler provided - sampling requests will be rejected"
4824
- );
4825
- }
5029
+ this.logger.debug(
5030
+ "\u26A0\uFE0F No sampling handler provided - sampling requests will be rejected"
5031
+ );
4826
5032
  return;
4827
5033
  }
4828
5034
  try {
@@ -4832,12 +5038,10 @@ var McpClientService = class {
4832
5038
  try {
4833
5039
  this.logger.debug("Received sampling request:", request);
4834
5040
  const response = await this.mcpSamplingHandler.handleSamplingRequest(request);
4835
- if (this.config.debug) {
4836
- console.log("\u2705 Sampling request completed successfully");
4837
- }
5041
+ this.logger.debug("\u2705 Sampling request completed successfully");
4838
5042
  return response;
4839
5043
  } catch (error) {
4840
- console.error("\u274C Error handling sampling request:", error);
5044
+ this.logger.error("\u274C Error handling sampling request:", error);
4841
5045
  if (error instanceof McpError) {
4842
5046
  throw error;
4843
5047
  }
@@ -4849,16 +5053,12 @@ var McpClientService = class {
4849
5053
  }
4850
5054
  }
4851
5055
  );
4852
- if (this.config.debug) {
4853
- console.log("\u{1F3AF} Sampling handler registered successfully");
4854
- }
5056
+ this.logger.debug("\u{1F3AF} Sampling handler registered successfully");
4855
5057
  } catch (error) {
4856
- console.error("Failed to setup sampling handler:", error);
4857
- if (this.config.debug) {
4858
- console.log(
4859
- "\u26A0\uFE0F Sampling handler registration failed, continuing without sampling support"
4860
- );
4861
- }
5058
+ this.logger.error("Failed to setup sampling handler:", error);
5059
+ this.logger.debug(
5060
+ "\u26A0\uFE0F Sampling handler registration failed, continuing without sampling support"
5061
+ );
4862
5062
  }
4863
5063
  }
4864
5064
  /**
@@ -4868,7 +5068,7 @@ var McpClientService = class {
4868
5068
  this.mcpSamplingHandler = new McpSamplingHandler(handler);
4869
5069
  if (this.client) {
4870
5070
  this.setupSamplingHandler(this.client).catch((error) => {
4871
- console.error("Failed to update ADK sampling handler:", error);
5071
+ this.logger.error("Failed to update ADK sampling handler:", error);
4872
5072
  });
4873
5073
  }
4874
5074
  }
@@ -4881,7 +5081,7 @@ var McpClientService = class {
4881
5081
  try {
4882
5082
  this.client.removeRequestHandler?.("sampling/createMessage");
4883
5083
  } catch (error) {
4884
- console.error("Failed to remove sampling handler:", error);
5084
+ this.logger.error("Failed to remove sampling handler:", error);
4885
5085
  }
4886
5086
  }
4887
5087
  }
@@ -5249,7 +5449,7 @@ function McpNearAgent(config = {}) {
5249
5449
  }
5250
5450
  function McpNearIntentSwaps(config = {}) {
5251
5451
  const mcpConfig = createMcpConfig(
5252
- "NEAR Intent Swaps MCP Client",
5452
+ "Near Intents Swaps MCP Client",
5253
5453
  "@iqai/mcp-near-intent-swaps",
5254
5454
  config
5255
5455
  );
@@ -5271,6 +5471,14 @@ function McpTelegram(config = {}) {
5271
5471
  );
5272
5472
  return new McpToolset(mcpConfig);
5273
5473
  }
5474
+ function McpCoinGecko(config = {}) {
5475
+ const mcpConfig = createMcpConfig(
5476
+ "CoinGecko MCP Client",
5477
+ "@coingecko/coingecko-mcp",
5478
+ config
5479
+ );
5480
+ return new McpToolset(mcpConfig);
5481
+ }
5274
5482
  function McpFilesystem(config = {}) {
5275
5483
  const mcpConfig = createMcpConfig(
5276
5484
  "Filesystem MCP Client",
@@ -5677,89 +5885,47 @@ var BaseLlmFlow = class {
5677
5885
  responseProcessors = [];
5678
5886
  logger = new Logger({ name: "BaseLlmFlow" });
5679
5887
  async *runAsync(invocationContext) {
5680
- this.logger.debug("\u{1F680} Starting runAsync flow", {
5681
- invocationId: invocationContext.invocationId,
5682
- agentName: invocationContext.agent.name,
5683
- branch: invocationContext.branch
5684
- });
5888
+ this.logger.info(`Agent '${invocationContext.agent.name}' started.`);
5685
5889
  let stepCount = 0;
5686
5890
  while (true) {
5687
5891
  stepCount++;
5688
- this.logger.debug(`\u{1F4CB} Running step ${stepCount}`, {
5689
- invocationId: invocationContext.invocationId
5690
- });
5691
5892
  let lastEvent = null;
5692
- let eventCount = 0;
5693
5893
  for await (const event of this._runOneStepAsync(invocationContext)) {
5694
- eventCount++;
5695
5894
  lastEvent = event;
5696
- this.logger.debug(
5697
- `\u{1F4E4} Yielding event ${eventCount} from step ${stepCount}`,
5698
- {
5699
- eventId: event.id,
5700
- eventType: event.constructor.name,
5701
- hasContent: !!event.content,
5702
- isFinalResponse: event.isFinalResponse(),
5703
- partial: event.partial
5704
- }
5705
- );
5706
5895
  yield event;
5707
5896
  }
5708
5897
  if (!lastEvent || lastEvent.isFinalResponse()) {
5709
- this.logger.debug("\u2705 Flow completed", {
5710
- reason: !lastEvent ? "no_events" : "final_response",
5711
- totalSteps: stepCount
5712
- });
5898
+ this.logger.info(
5899
+ `Agent '${invocationContext.agent.name}' finished after ${stepCount} steps.`
5900
+ );
5713
5901
  break;
5714
5902
  }
5715
5903
  if (lastEvent.partial) {
5716
- this.logger.error("\u274C Flow error: Last event is partial", {
5717
- eventId: lastEvent.id,
5718
- stepCount
5719
- });
5904
+ this.logger.error(
5905
+ "Partial event encountered. LLM max output limit may be reached."
5906
+ );
5720
5907
  throw new Error(
5721
5908
  "Last event shouldn't be partial. LLM max output limit may be reached."
5722
5909
  );
5723
5910
  }
5724
5911
  }
5725
- this.logger.debug("\u{1F3C1} runAsync flow finished", {
5726
- totalSteps: stepCount,
5727
- invocationId: invocationContext.invocationId
5728
- });
5729
5912
  }
5730
5913
  async *runLive(invocationContext) {
5731
- this.logger.debug("\u{1F534} Starting runLive flow", {
5732
- invocationId: invocationContext.invocationId,
5733
- agentName: invocationContext.agent.name
5734
- });
5735
5914
  this.logger.warn("\u26A0\uFE0F runLive not fully implemented, delegating to runAsync");
5736
5915
  yield* this.runAsync(invocationContext);
5737
5916
  }
5738
5917
  async *_runOneStepAsync(invocationContext) {
5739
- this.logger.debug("\u{1F504} Starting one step execution", {
5740
- invocationId: invocationContext.invocationId
5741
- });
5742
5918
  const llmRequest = new LlmRequest();
5743
- this.logger.debug("\u{1F4DD} Created new LlmRequest", {
5744
- requestId: llmRequest.id || "unknown"
5745
- });
5746
- this.logger.debug("\u{1F527} Starting preprocessing phase");
5747
5919
  let preprocessEventCount = 0;
5748
5920
  for await (const event of this._preprocessAsync(
5749
5921
  invocationContext,
5750
5922
  llmRequest
5751
5923
  )) {
5752
5924
  preprocessEventCount++;
5753
- this.logger.debug(`\u{1F4E4} Preprocessing event ${preprocessEventCount}`, {
5754
- eventId: event.id
5755
- });
5756
5925
  yield event;
5757
5926
  }
5758
- this.logger.debug("\u2705 Preprocessing completed", {
5759
- eventCount: preprocessEventCount
5760
- });
5761
5927
  if (invocationContext.endInvocation) {
5762
- this.logger.debug("\u{1F6D1} Invocation ended during preprocessing");
5928
+ this.logger.info("Invocation ended during preprocessing.");
5763
5929
  return;
5764
5930
  }
5765
5931
  const modelResponseEvent = new Event({
@@ -5768,9 +5934,6 @@ var BaseLlmFlow = class {
5768
5934
  author: invocationContext.agent.name,
5769
5935
  branch: invocationContext.branch
5770
5936
  });
5771
- this.logger.debug("\u{1F916} Starting LLM call phase", {
5772
- modelResponseEventId: modelResponseEvent.id
5773
- });
5774
5937
  let llmResponseCount = 0;
5775
5938
  for await (const llmResponse of this._callLlmAsync(
5776
5939
  invocationContext,
@@ -5778,12 +5941,6 @@ var BaseLlmFlow = class {
5778
5941
  modelResponseEvent
5779
5942
  )) {
5780
5943
  llmResponseCount++;
5781
- this.logger.debug(`\u{1F504} Processing LLM response ${llmResponseCount}`, {
5782
- hasContent: !!llmResponse.content,
5783
- hasError: !!llmResponse.errorCode,
5784
- interrupted: !!llmResponse.interrupted,
5785
- partial: !!llmResponse.partial
5786
- });
5787
5944
  for await (const event of this._postprocessAsync(
5788
5945
  invocationContext,
5789
5946
  llmRequest,
@@ -5791,89 +5948,47 @@ var BaseLlmFlow = class {
5791
5948
  modelResponseEvent
5792
5949
  )) {
5793
5950
  modelResponseEvent.id = Event.newId();
5794
- this.logger.debug("\u{1F4E4} Yielding postprocessed event", {
5795
- eventId: event.id,
5796
- hasFunctionCalls: !!event.getFunctionCalls()
5797
- });
5798
5951
  yield event;
5799
5952
  }
5800
5953
  }
5801
- this.logger.debug("\u2705 One step execution completed", {
5802
- llmResponseCount
5803
- });
5804
5954
  }
5805
5955
  async *_preprocessAsync(invocationContext, llmRequest) {
5806
- this.logger.debug("\u{1F527} Starting preprocessing", {
5807
- processorCount: this.requestProcessors.length
5808
- });
5809
5956
  const agent = invocationContext.agent;
5810
5957
  if (!("canonicalTools" in agent) || typeof agent.canonicalTools !== "function") {
5811
- this.logger.debug("\u2139\uFE0F Agent has no canonical tools");
5812
5958
  return;
5813
5959
  }
5814
- for (let i = 0; i < this.requestProcessors.length; i++) {
5815
- const processor = this.requestProcessors[i];
5816
- this.logger.debug(`\u{1F504} Running request processor ${i + 1}`, {
5817
- processorName: processor.constructor?.name || "unknown"
5818
- });
5819
- let processorEventCount = 0;
5960
+ for (const processor of this.requestProcessors) {
5820
5961
  for await (const event of processor.runAsync(
5821
5962
  invocationContext,
5822
5963
  llmRequest
5823
5964
  )) {
5824
- processorEventCount++;
5825
- this.logger.debug(
5826
- `\u{1F4E4} Request processor ${i + 1} event ${processorEventCount}`,
5827
- {
5828
- eventId: event.id
5829
- }
5830
- );
5831
5965
  yield event;
5832
5966
  }
5833
- this.logger.debug(`\u2705 Request processor ${i + 1} completed`, {
5834
- eventCount: processorEventCount
5835
- });
5836
5967
  }
5837
5968
  const tools = await agent.canonicalTools(
5838
5969
  new ReadonlyContext(invocationContext)
5839
5970
  );
5840
- this.logger.debug("\u{1F6E0}\uFE0F Processing canonical tools", {
5841
- toolCount: tools.length
5842
- });
5843
- for (let i = 0; i < tools.length; i++) {
5844
- const tool = tools[i];
5845
- this.logger.debug(`\u{1F504} Processing tool ${i + 1}`, {
5846
- toolName: tool.constructor?.name || "unknown"
5847
- });
5971
+ for (const tool of tools) {
5848
5972
  const toolContext = new ToolContext(invocationContext);
5849
5973
  await tool.processLlmRequest(toolContext, llmRequest);
5850
- this.logger.debug(`\u2705 Tool ${i + 1} processed`);
5851
5974
  }
5852
- this.logger.debug("\u2705 Preprocessing completed", {
5853
- totalTools: tools.length
5854
- });
5975
+ if (tools.length > 0) {
5976
+ const toolsData = tools.map((tool) => ({
5977
+ Name: tool.name,
5978
+ Description: tool.description?.substring(0, 50) + (tool.description?.length > 50 ? "..." : ""),
5979
+ "Long Running": tool.isLongRunning ? "Yes" : "No"
5980
+ }));
5981
+ this.logger.debugArray("\u{1F6E0}\uFE0F Available Tools", toolsData);
5982
+ }
5855
5983
  }
5856
5984
  async *_postprocessAsync(invocationContext, llmRequest, llmResponse, modelResponseEvent) {
5857
- this.logger.debug("\u{1F504} Starting postprocessing", {
5858
- hasContent: !!llmResponse.content,
5859
- hasError: !!llmResponse.errorCode,
5860
- interrupted: !!llmResponse.interrupted
5861
- });
5862
- let processorEventCount = 0;
5863
5985
  for await (const event of this._postprocessRunProcessorsAsync(
5864
5986
  invocationContext,
5865
5987
  llmResponse
5866
5988
  )) {
5867
- processorEventCount++;
5868
- this.logger.debug(`\u{1F4E4} Response processor event ${processorEventCount}`, {
5869
- eventId: event.id
5870
- });
5871
5989
  yield event;
5872
5990
  }
5873
5991
  if (!llmResponse.content && !llmResponse.errorCode && !llmResponse.interrupted) {
5874
- this.logger.debug(
5875
- "\u2139\uFE0F Skipping event creation - no content, error, or interruption"
5876
- );
5877
5992
  return;
5878
5993
  }
5879
5994
  const finalizedEvent = this._finalizeModelResponseEvent(
@@ -5881,54 +5996,32 @@ var BaseLlmFlow = class {
5881
5996
  llmResponse,
5882
5997
  modelResponseEvent
5883
5998
  );
5884
- this.logger.debug("\u{1F4DD} Finalized model response event", {
5885
- eventId: finalizedEvent.id,
5886
- hasContent: !!finalizedEvent.content,
5887
- hasFunctionCalls: !!finalizedEvent.getFunctionCalls(),
5888
- longRunningToolIds: finalizedEvent.longRunningToolIds.entries.length || 0
5889
- });
5890
5999
  yield finalizedEvent;
5891
6000
  const functionCalls = finalizedEvent.getFunctionCalls();
5892
- if (functionCalls) {
5893
- this.logger.debug("\u{1F527} Processing function calls", {
5894
- functionCallCount: functionCalls.length
5895
- });
5896
- let functionEventCount = 0;
6001
+ if (functionCalls && functionCalls.length > 0) {
6002
+ const functionCallsData = functionCalls.map((fc) => ({
6003
+ Name: fc.name,
6004
+ Arguments: JSON.stringify(fc.args).substring(0, 100) + (JSON.stringify(fc.args).length > 100 ? "..." : ""),
6005
+ ID: fc.id || "auto"
6006
+ }));
6007
+ this.logger.debugArray("\u{1F527} Function Calls", functionCallsData);
5897
6008
  for await (const event of this._postprocessHandleFunctionCallsAsync(
5898
6009
  invocationContext,
5899
6010
  finalizedEvent,
5900
6011
  llmRequest
5901
6012
  )) {
5902
- functionEventCount++;
5903
- this.logger.debug(`\u{1F4E4} Function call event ${functionEventCount}`, {
5904
- eventId: event.id
5905
- });
5906
6013
  yield event;
5907
6014
  }
5908
- this.logger.debug("\u2705 Function calls processed", {
5909
- eventCount: functionEventCount
5910
- });
5911
6015
  }
5912
- this.logger.debug("\u2705 Postprocessing completed");
5913
6016
  }
5914
6017
  async *_postprocessLive(invocationContext, llmRequest, llmResponse, modelResponseEvent) {
5915
- this.logger.debug("\u{1F534} Starting live postprocessing", {
5916
- hasContent: !!llmResponse.content,
5917
- turnComplete: !!llmResponse.turnComplete
5918
- });
5919
6018
  for await (const event of this._postprocessRunProcessorsAsync(
5920
6019
  invocationContext,
5921
6020
  llmResponse
5922
6021
  )) {
5923
- this.logger.debug("\u{1F4E4} Live response processor event", {
5924
- eventId: event.id
5925
- });
5926
6022
  yield event;
5927
6023
  }
5928
6024
  if (!llmResponse.content && !llmResponse.errorCode && !llmResponse.interrupted && !llmResponse.turnComplete) {
5929
- this.logger.debug(
5930
- "\u2139\uFE0F Skipping live event - no content or completion signal"
5931
- );
5932
6025
  return;
5933
6026
  }
5934
6027
  const finalizedEvent = this._finalizeModelResponseEvent(
@@ -5936,165 +6029,83 @@ var BaseLlmFlow = class {
5936
6029
  llmResponse,
5937
6030
  modelResponseEvent
5938
6031
  );
5939
- this.logger.debug("\u{1F4DD} Finalized live model response event", {
5940
- eventId: finalizedEvent.id,
5941
- hasFunctionCalls: !!finalizedEvent.getFunctionCalls()
5942
- });
5943
6032
  yield finalizedEvent;
5944
6033
  if (finalizedEvent.getFunctionCalls()) {
5945
- this.logger.debug("\u{1F527} Processing live function calls");
5946
6034
  const functionResponseEvent = await handleFunctionCallsAsync(
5947
6035
  invocationContext,
5948
6036
  finalizedEvent,
5949
6037
  llmRequest.toolsDict || {}
5950
6038
  );
5951
6039
  if (functionResponseEvent) {
5952
- this.logger.debug("\u{1F4E4} Live function response event", {
5953
- eventId: functionResponseEvent.id,
5954
- hasTransfer: !!functionResponseEvent.actions?.transferToAgent
5955
- });
5956
6040
  yield functionResponseEvent;
5957
6041
  const transferToAgent = functionResponseEvent.actions?.transferToAgent;
5958
6042
  if (transferToAgent) {
5959
- this.logger.debug("\u{1F504} Transferring to agent in live mode", {
5960
- targetAgent: transferToAgent
5961
- });
6043
+ this.logger.info(`\u{1F504} Live transfer to agent '${transferToAgent}'`);
5962
6044
  const agentToRun = this._getAgentToRun(
5963
6045
  invocationContext,
5964
6046
  transferToAgent
5965
6047
  );
5966
- let transferEventCount = 0;
5967
6048
  for await (const event of agentToRun.runLive?.(invocationContext) || agentToRun.runAsync(invocationContext)) {
5968
- transferEventCount++;
5969
- this.logger.debug(`\u{1F4E4} Transfer agent event ${transferEventCount}`, {
5970
- eventId: event.id
5971
- });
5972
6049
  yield event;
5973
6050
  }
5974
- this.logger.debug("\u2705 Agent transfer completed", {
5975
- eventCount: transferEventCount
5976
- });
5977
6051
  }
5978
6052
  }
5979
6053
  }
5980
- this.logger.debug("\u2705 Live postprocessing completed");
5981
6054
  }
5982
6055
  async *_postprocessRunProcessorsAsync(invocationContext, llmResponse) {
5983
- this.logger.debug("\u{1F504} Running response processors", {
5984
- processorCount: this.responseProcessors.length
5985
- });
5986
- for (let i = 0; i < this.responseProcessors.length; i++) {
5987
- const processor = this.responseProcessors[i];
5988
- this.logger.debug(`\u{1F504} Running response processor ${i + 1}`, {
5989
- processorName: processor.constructor?.name || "unknown"
5990
- });
5991
- let processorEventCount = 0;
6056
+ for (const processor of this.responseProcessors) {
5992
6057
  for await (const event of processor.runAsync(
5993
6058
  invocationContext,
5994
6059
  llmResponse
5995
6060
  )) {
5996
- processorEventCount++;
5997
- this.logger.debug(
5998
- `\u{1F4E4} Response processor ${i + 1} event ${processorEventCount}`,
5999
- {
6000
- eventId: event.id
6001
- }
6002
- );
6003
6061
  yield event;
6004
6062
  }
6005
- this.logger;
6006
- this.logger.debug(`\u2705 Response processor ${i + 1} completed`, {
6007
- eventCount: processorEventCount
6008
- });
6009
6063
  }
6010
- this.logger.debug("\u2705 All response processors completed");
6011
6064
  }
6012
6065
  async *_postprocessHandleFunctionCallsAsync(invocationContext, functionCallEvent, llmRequest) {
6013
- this.logger.debug("\u{1F527} Handling function calls", {
6014
- eventId: functionCallEvent.id,
6015
- toolsDictSize: Object.keys(llmRequest.toolsDict || {}).length
6016
- });
6017
6066
  const functionResponseEvent = await handleFunctionCallsAsync(
6018
6067
  invocationContext,
6019
6068
  functionCallEvent,
6020
6069
  llmRequest.toolsDict || {}
6021
6070
  );
6022
6071
  if (functionResponseEvent) {
6023
- this.logger.debug("\u{1F4CB} Function calls executed", {
6024
- responseEventId: functionResponseEvent.id,
6025
- hasActions: !!functionResponseEvent.actions
6026
- });
6027
6072
  const authEvent = generateAuthEvent(
6028
6073
  invocationContext,
6029
6074
  functionResponseEvent
6030
6075
  );
6031
6076
  if (authEvent) {
6032
- this.logger.debug("\u{1F510} Generated auth event", {
6033
- authEventId: authEvent.id
6034
- });
6035
6077
  yield authEvent;
6036
6078
  }
6037
6079
  yield functionResponseEvent;
6038
6080
  const transferToAgent = functionResponseEvent.actions?.transferToAgent;
6039
6081
  if (transferToAgent) {
6040
- this.logger.debug("\u{1F504} Transferring to agent", {
6041
- targetAgent: transferToAgent
6042
- });
6082
+ this.logger.info(`\u{1F504} Transferring to agent '${transferToAgent}'`);
6043
6083
  const agentToRun = this._getAgentToRun(
6044
6084
  invocationContext,
6045
6085
  transferToAgent
6046
6086
  );
6047
- let transferEventCount = 0;
6048
6087
  for await (const event of agentToRun.runAsync(invocationContext)) {
6049
- transferEventCount++;
6050
- this.logger.debug(`\u{1F4E4} Transfer agent event ${transferEventCount}`, {
6051
- eventId: event.id
6052
- });
6053
6088
  yield event;
6054
6089
  }
6055
- this.logger.debug("\u2705 Agent transfer completed", {
6056
- eventCount: transferEventCount
6057
- });
6058
6090
  }
6059
- } else {
6060
- this.logger.debug("\u2139\uFE0F No function response event generated");
6061
6091
  }
6062
6092
  }
6063
6093
  _getAgentToRun(invocationContext, agentName) {
6064
- this.logger.debug("\u{1F50D} Finding agent to run", {
6065
- targetAgent: agentName,
6066
- currentAgent: invocationContext.agent.name
6067
- });
6068
6094
  const rootAgent = invocationContext.agent.rootAgent;
6069
6095
  const agentToRun = rootAgent.findAgent(agentName);
6070
6096
  if (!agentToRun) {
6071
- this.logger.error("\u274C Agent not found", {
6072
- targetAgent: agentName,
6073
- rootAgent: rootAgent.name
6074
- });
6097
+ this.logger.error(`Agent '${agentName}' not found in the agent tree.`);
6075
6098
  throw new Error(`Agent ${agentName} not found in the agent tree.`);
6076
6099
  }
6077
- this.logger.debug("\u2705 Agent found", {
6078
- targetAgent: agentName,
6079
- agentType: agentToRun.constructor.name
6080
- });
6081
6100
  return agentToRun;
6082
6101
  }
6083
6102
  async *_callLlmAsync(invocationContext, llmRequest, modelResponseEvent) {
6084
- this.logger.debug("\u{1F916} Starting LLM call", {
6085
- model: llmRequest.model || "default",
6086
- eventId: modelResponseEvent.id
6087
- });
6088
- this.logger.debug("\u{1F504} Processing before model callbacks");
6089
6103
  const beforeModelCallbackContent = await this._handleBeforeModelCallback(
6090
6104
  invocationContext,
6091
6105
  llmRequest,
6092
6106
  modelResponseEvent
6093
6107
  );
6094
6108
  if (beforeModelCallbackContent) {
6095
- this.logger.debug("\u{1F4CB} Before model callback returned content", {
6096
- hasContent: !!beforeModelCallbackContent.content
6097
- });
6098
6109
  yield beforeModelCallbackContent;
6099
6110
  return;
6100
6111
  }
@@ -6102,27 +6113,38 @@ var BaseLlmFlow = class {
6102
6113
  llmRequest.config.labels = llmRequest.config.labels || {};
6103
6114
  if (!(_ADK_AGENT_NAME_LABEL_KEY in llmRequest.config.labels)) {
6104
6115
  llmRequest.config.labels[_ADK_AGENT_NAME_LABEL_KEY] = invocationContext.agent.name;
6105
- this.logger.debug("\u{1F3F7}\uFE0F Added agent name label", {
6106
- agentName: invocationContext.agent.name
6107
- });
6108
6116
  }
6109
6117
  const llm = this.__getLlm(invocationContext);
6110
- this.logger.debug("\u{1F527} Retrieved LLM instance", {
6111
- llmModel: llm.model,
6112
- llmType: llm.constructor.name
6113
- });
6114
6118
  const runConfig = invocationContext.runConfig;
6115
6119
  if (runConfig.supportCfc) {
6116
6120
  this.logger.warn(
6117
- "\u26A0\uFE0F CFC (supportCfc) not fully implemented, using standard flow"
6121
+ "CFC (supportCfc) not fully implemented, using standard flow."
6118
6122
  );
6119
6123
  }
6120
6124
  invocationContext.incrementLlmCallCount();
6121
- this.logger.debug("\u{1F4C8} Incremented LLM call count");
6122
6125
  const isStreaming = invocationContext.runConfig.streamingMode === "sse" /* SSE */;
6123
- this.logger.debug("\u{1F30A} LLM generation mode", {
6124
- streaming: isStreaming,
6125
- streamingMode: invocationContext.runConfig.streamingMode
6126
+ const tools = llmRequest.config?.tools || [];
6127
+ const toolNames = tools.map((tool) => {
6128
+ if (tool.functionDeclarations && Array.isArray(tool.functionDeclarations)) {
6129
+ return tool.functionDeclarations.map((fn) => fn.name).join(", ");
6130
+ }
6131
+ if (tool.name) return tool.name;
6132
+ if (tool.function?.name) return tool.function.name;
6133
+ if (tool.function?.function?.name) return tool.function.function.name;
6134
+ return "unknown";
6135
+ }).join(", ");
6136
+ const systemInstruction = llmRequest.getSystemInstructionText() || "";
6137
+ const truncatedSystemInstruction = systemInstruction.length > 100 ? `${systemInstruction.substring(0, 100)}...` : systemInstruction;
6138
+ const contentPreview = llmRequest.contents?.length > 0 ? this._formatContentPreview(llmRequest.contents[0]) : "none";
6139
+ this.logger.debugStructured("\u{1F4E4} LLM Request", {
6140
+ Model: llm.model,
6141
+ Agent: invocationContext.agent.name,
6142
+ "Content Items": llmRequest.contents?.length || 0,
6143
+ "Content Preview": contentPreview,
6144
+ "System Instruction": truncatedSystemInstruction || "none",
6145
+ "Available Tools": toolNames || "none",
6146
+ "Tool Count": llmRequest.config?.tools?.length || 0,
6147
+ Streaming: isStreaming ? "Yes" : "No"
6126
6148
  });
6127
6149
  let responseCount = 0;
6128
6150
  for await (const llmResponse of llm.generateContentAsync(
@@ -6130,59 +6152,46 @@ var BaseLlmFlow = class {
6130
6152
  isStreaming
6131
6153
  )) {
6132
6154
  responseCount++;
6133
- this.logger.debug(`\u{1F4E5} Received LLM response ${responseCount}`, {
6134
- hasContent: !!llmResponse.content,
6135
- hasError: !!llmResponse.errorCode,
6136
- interrupted: !!llmResponse.interrupted,
6137
- partial: !!llmResponse.partial,
6138
- finishReason: llmResponse.finishReason,
6139
- usage: llmResponse.usageMetadata ? {
6140
- promptTokens: llmResponse.usageMetadata.promptTokenCount,
6141
- completionTokens: llmResponse.usageMetadata.candidatesTokenCount,
6142
- totalTokens: llmResponse.usageMetadata.totalTokenCount
6143
- } : null
6144
- });
6145
6155
  traceLlmCall(
6146
6156
  invocationContext,
6147
6157
  modelResponseEvent.id,
6148
6158
  llmRequest,
6149
6159
  llmResponse
6150
6160
  );
6151
- this.logger.debug("\u{1F504} Processing after model callbacks");
6161
+ const tokenCount = llmResponse.usageMetadata?.totalTokenCount || "unknown";
6162
+ const functionCallCount = llmResponse.content?.parts?.filter((part) => part.functionCall).length || 0;
6163
+ const responsePreview = this._formatResponsePreview(llmResponse);
6164
+ this.logger.debugStructured("\u{1F4E5} LLM Response", {
6165
+ Model: llm.model,
6166
+ "Token Count": tokenCount,
6167
+ "Function Calls": functionCallCount,
6168
+ "Response Preview": responsePreview,
6169
+ "Finish Reason": llmResponse.finishReason || "unknown",
6170
+ "Response #": responseCount,
6171
+ Partial: llmResponse.partial ? "Yes" : "No",
6172
+ Error: llmResponse.errorCode || "none"
6173
+ });
6152
6174
  const alteredLlmResponse = await this._handleAfterModelCallback(
6153
6175
  invocationContext,
6154
6176
  llmResponse,
6155
6177
  modelResponseEvent
6156
6178
  );
6157
- if (alteredLlmResponse) {
6158
- this.logger.debug("\u{1F4CB} After model callback altered response");
6159
- }
6160
6179
  yield alteredLlmResponse || llmResponse;
6161
6180
  }
6162
- this.logger.debug("\u2705 LLM call completed", {
6163
- totalResponses: responseCount
6164
- });
6165
6181
  }
6166
6182
  async _handleBeforeModelCallback(invocationContext, llmRequest, modelResponseEvent) {
6167
6183
  const agent = invocationContext.agent;
6168
6184
  if (!("canonicalBeforeModelCallbacks" in agent)) {
6169
- this.logger.debug("\u2139\uFE0F Agent has no before model callbacks");
6170
6185
  return;
6171
6186
  }
6172
6187
  const beforeCallbacks = agent.canonicalBeforeModelCallbacks;
6173
6188
  if (!beforeCallbacks) {
6174
- this.logger.debug("\u2139\uFE0F Before model callbacks is null/undefined");
6175
6189
  return;
6176
6190
  }
6177
- this.logger.debug("\u{1F504} Processing before model callbacks", {
6178
- callbackCount: beforeCallbacks.length
6179
- });
6180
6191
  const callbackContext = new CallbackContext(invocationContext, {
6181
6192
  eventActions: modelResponseEvent.actions
6182
6193
  });
6183
- for (let i = 0; i < beforeCallbacks.length; i++) {
6184
- const callback = beforeCallbacks[i];
6185
- this.logger.debug(`\u{1F504} Running before model callback ${i + 1}`);
6194
+ for (const callback of beforeCallbacks) {
6186
6195
  let beforeModelCallbackContent = callback({
6187
6196
  callbackContext,
6188
6197
  llmRequest
@@ -6191,35 +6200,23 @@ var BaseLlmFlow = class {
6191
6200
  beforeModelCallbackContent = await beforeModelCallbackContent;
6192
6201
  }
6193
6202
  if (beforeModelCallbackContent) {
6194
- this.logger.debug(`\u2705 Before model callback ${i + 1} returned content`);
6195
6203
  return beforeModelCallbackContent;
6196
6204
  }
6197
- this.logger.debug(
6198
- `\u2705 Before model callback ${i + 1} completed (no content)`
6199
- );
6200
6205
  }
6201
- this.logger.debug("\u2705 All before model callbacks completed");
6202
6206
  }
6203
6207
  async _handleAfterModelCallback(invocationContext, llmResponse, modelResponseEvent) {
6204
6208
  const agent = invocationContext.agent;
6205
6209
  if (!("canonicalAfterModelCallbacks" in agent)) {
6206
- this.logger.debug("\u2139\uFE0F Agent has no after model callbacks");
6207
6210
  return;
6208
6211
  }
6209
6212
  const afterCallbacks = agent.canonicalAfterModelCallbacks;
6210
6213
  if (!afterCallbacks) {
6211
- this.logger.debug("\u2139\uFE0F After model callbacks is null/undefined");
6212
6214
  return;
6213
6215
  }
6214
- this.logger.debug("\u{1F504} Processing after model callbacks", {
6215
- callbackCount: afterCallbacks.length
6216
- });
6217
6216
  const callbackContext = new CallbackContext(invocationContext, {
6218
6217
  eventActions: modelResponseEvent.actions
6219
6218
  });
6220
- for (let i = 0; i < afterCallbacks.length; i++) {
6221
- const callback = afterCallbacks[i];
6222
- this.logger.debug(`\u{1F504} Running after model callback ${i + 1}`);
6219
+ for (const callback of afterCallbacks) {
6223
6220
  let afterModelCallbackContent = callback({
6224
6221
  callbackContext,
6225
6222
  llmResponse
@@ -6228,21 +6225,11 @@ var BaseLlmFlow = class {
6228
6225
  afterModelCallbackContent = await afterModelCallbackContent;
6229
6226
  }
6230
6227
  if (afterModelCallbackContent) {
6231
- this.logger.debug(`\u2705 After model callback ${i + 1} returned content`);
6232
6228
  return afterModelCallbackContent;
6233
6229
  }
6234
- this.logger.debug(
6235
- `\u2705 After model callback ${i + 1} completed (no content)`
6236
- );
6237
6230
  }
6238
- this.logger.debug("\u2705 All after model callbacks completed");
6239
6231
  }
6240
6232
  _finalizeModelResponseEvent(llmRequest, llmResponse, modelResponseEvent) {
6241
- this.logger.debug("\u{1F4DD} Finalizing model response event", {
6242
- requestModel: llmRequest.model,
6243
- responseHasContent: !!llmResponse.content,
6244
- eventId: modelResponseEvent.id
6245
- });
6246
6233
  const eventData = { ...modelResponseEvent };
6247
6234
  const responseData = { ...llmResponse };
6248
6235
  Object.keys(responseData).forEach((key) => {
@@ -6254,91 +6241,55 @@ var BaseLlmFlow = class {
6254
6241
  if (event.content) {
6255
6242
  const functionCalls = event.getFunctionCalls();
6256
6243
  if (functionCalls) {
6257
- this.logger.debug("\u{1F527} Processing function calls in event", {
6258
- functionCallCount: functionCalls.length
6259
- });
6260
6244
  populateClientFunctionCallId(event);
6261
6245
  event.longRunningToolIds = getLongRunningFunctionCalls(
6262
6246
  functionCalls,
6263
6247
  llmRequest.toolsDict || {}
6264
6248
  );
6265
- this.logger.debug("\u2705 Function calls processed", {
6266
- longRunningToolCount: event.longRunningToolIds.entries.length || 0
6267
- });
6268
6249
  }
6269
6250
  }
6270
- this.logger.debug("\u2705 Model response event finalized", {
6271
- finalEventId: event.id,
6272
- hasContent: !!event.content,
6273
- hasFunctionCalls: !!event.getFunctionCalls()
6274
- });
6275
6251
  return event;
6276
6252
  }
6253
+ /**
6254
+ * Logs data in a visually appealing format that works well in any terminal size.
6255
+ * Uses vertical layout for better readability and respects debug settings.
6256
+ */
6257
+ _formatContentPreview(content) {
6258
+ if (!content) return "none";
6259
+ if (content.parts && Array.isArray(content.parts)) {
6260
+ const textParts = content.parts.filter((part) => part.text).map((part) => part.text).join(" ");
6261
+ return textParts.length > 80 ? `${textParts.substring(0, 80)}...` : textParts || "no text content";
6262
+ }
6263
+ if (typeof content === "string") {
6264
+ return content.length > 80 ? `${content.substring(0, 80)}...` : content;
6265
+ }
6266
+ const stringified = JSON.stringify(content);
6267
+ return stringified.length > 80 ? `${stringified.substring(0, 80)}...` : stringified;
6268
+ }
6269
+ /**
6270
+ * Formats response content preview for debug logging
6271
+ */
6272
+ _formatResponsePreview(llmResponse) {
6273
+ if (!llmResponse.content) return "none";
6274
+ if (llmResponse.content.parts && Array.isArray(llmResponse.content.parts)) {
6275
+ const textParts = llmResponse.content.parts.filter((part) => part.text).map((part) => part.text).join(" ");
6276
+ return textParts.length > 80 ? `${textParts.substring(0, 80)}...` : textParts || "no text content";
6277
+ }
6278
+ const stringified = JSON.stringify(llmResponse.content);
6279
+ return stringified.length > 80 ? `${stringified.substring(0, 80)}...` : stringified;
6280
+ }
6277
6281
  __getLlm(invocationContext) {
6278
6282
  const llm = invocationContext.agent.canonicalModel;
6279
- this.logger.debug("\u{1F527} Retrieved canonical model", {
6280
- model: llm?.model || "unknown",
6281
- llmType: llm?.constructor?.name || "unknown"
6282
- });
6283
6283
  return llm;
6284
6284
  }
6285
6285
  };
6286
6286
 
6287
- // src/flows/llm-flows/single-flow.ts
6288
- init_logger();
6289
-
6290
6287
  // src/flows/llm-flows/base-llm-processor.ts
6291
6288
  var BaseLlmRequestProcessor = class {
6292
6289
  };
6293
6290
  var BaseLlmResponseProcessor = class {
6294
6291
  };
6295
6292
 
6296
- // src/flows/llm-flows/basic.ts
6297
- var BasicLlmRequestProcessor = class extends BaseLlmRequestProcessor {
6298
- async *runAsync(invocationContext, llmRequest) {
6299
- const agent = invocationContext.agent;
6300
- if (!this.isLlmAgent(agent)) {
6301
- return;
6302
- }
6303
- llmRequest.model = typeof agent.canonicalModel === "string" ? agent.canonicalModel : agent.canonicalModel.model;
6304
- if (agent.generateContentConfig) {
6305
- llmRequest.config = JSON.parse(
6306
- JSON.stringify(agent.generateContentConfig)
6307
- );
6308
- } else {
6309
- llmRequest.config = {};
6310
- }
6311
- if (agent.outputSchema) {
6312
- llmRequest.setOutputSchema(agent.outputSchema);
6313
- }
6314
- const runConfig = invocationContext.runConfig;
6315
- if (!llmRequest.liveConnectConfig) {
6316
- llmRequest.liveConnectConfig = {};
6317
- }
6318
- if (runConfig.responseModalities) {
6319
- llmRequest.liveConnectConfig.responseModalities = runConfig.responseModalities;
6320
- }
6321
- llmRequest.liveConnectConfig.speechConfig = runConfig.speechConfig;
6322
- llmRequest.liveConnectConfig.outputAudioTranscription = runConfig.outputAudioTranscription;
6323
- llmRequest.liveConnectConfig.inputAudioTranscription = runConfig.inputAudioTranscription;
6324
- llmRequest.liveConnectConfig.realtimeInputConfig = runConfig.realtimeInputConfig;
6325
- llmRequest.liveConnectConfig.enableAffectiveDialog = runConfig.enableAffectiveDialog;
6326
- llmRequest.liveConnectConfig.proactivity = runConfig.proactivity;
6327
- const tools = await agent.canonicalTools();
6328
- llmRequest.appendTools(tools);
6329
- for await (const _ of []) {
6330
- yield _;
6331
- }
6332
- }
6333
- /**
6334
- * Type guard to check if agent is an LlmAgent
6335
- */
6336
- isLlmAgent(agent) {
6337
- return agent && typeof agent === "object" && "canonicalModel" in agent;
6338
- }
6339
- };
6340
- var requestProcessor = new BasicLlmRequestProcessor();
6341
-
6342
6293
  // src/auth/auth-tool.ts
6343
6294
  var EnhancedAuthConfig = class {
6344
6295
  /**
@@ -6546,152 +6497,738 @@ var AuthLlmRequestProcessor = class extends BaseLlmRequestProcessor {
6546
6497
  }
6547
6498
  }
6548
6499
  };
6549
- var requestProcessor2 = new AuthLlmRequestProcessor();
6500
+ var requestProcessor = new AuthLlmRequestProcessor();
6550
6501
 
6551
- // src/flows/llm-flows/identity.ts
6552
- var IdentityLlmRequestProcessor = class extends BaseLlmRequestProcessor {
6502
+ // src/flows/llm-flows/basic.ts
6503
+ var BasicLlmRequestProcessor = class extends BaseLlmRequestProcessor {
6553
6504
  async *runAsync(invocationContext, llmRequest) {
6554
6505
  const agent = invocationContext.agent;
6555
- const instructions = [
6556
- `You are an agent. Your internal name is "${agent.name}".`
6557
- ];
6558
- if (agent.description) {
6559
- instructions.push(` The description about you is "${agent.description}"`);
6506
+ if (!this.isLlmAgent(agent)) {
6507
+ return;
6560
6508
  }
6561
- llmRequest.appendInstructions(instructions);
6509
+ llmRequest.model = typeof agent.canonicalModel === "string" ? agent.canonicalModel : agent.canonicalModel.model;
6510
+ if (agent.generateContentConfig) {
6511
+ llmRequest.config = JSON.parse(
6512
+ JSON.stringify(agent.generateContentConfig)
6513
+ );
6514
+ } else {
6515
+ llmRequest.config = {};
6516
+ }
6517
+ if (agent.outputSchema) {
6518
+ llmRequest.setOutputSchema(agent.outputSchema);
6519
+ }
6520
+ const runConfig = invocationContext.runConfig;
6521
+ if (!llmRequest.liveConnectConfig) {
6522
+ llmRequest.liveConnectConfig = {};
6523
+ }
6524
+ if (runConfig.responseModalities) {
6525
+ llmRequest.liveConnectConfig.responseModalities = runConfig.responseModalities;
6526
+ }
6527
+ llmRequest.liveConnectConfig.speechConfig = runConfig.speechConfig;
6528
+ llmRequest.liveConnectConfig.outputAudioTranscription = runConfig.outputAudioTranscription;
6529
+ llmRequest.liveConnectConfig.inputAudioTranscription = runConfig.inputAudioTranscription;
6530
+ llmRequest.liveConnectConfig.realtimeInputConfig = runConfig.realtimeInputConfig;
6531
+ llmRequest.liveConnectConfig.enableAffectiveDialog = runConfig.enableAffectiveDialog;
6532
+ llmRequest.liveConnectConfig.proactivity = runConfig.proactivity;
6533
+ const tools = await agent.canonicalTools();
6534
+ llmRequest.appendTools(tools);
6562
6535
  for await (const _ of []) {
6563
6536
  yield _;
6564
6537
  }
6565
6538
  }
6539
+ /**
6540
+ * Type guard to check if agent is an LlmAgent
6541
+ */
6542
+ isLlmAgent(agent) {
6543
+ return agent && typeof agent === "object" && "canonicalModel" in agent;
6544
+ }
6566
6545
  };
6567
- var requestProcessor3 = new IdentityLlmRequestProcessor();
6546
+ var requestProcessor2 = new BasicLlmRequestProcessor();
6568
6547
 
6569
- // src/utils/instructions-utils.ts
6570
- async function injectSessionState(template, readonlyContext) {
6571
- const invocationContext = readonlyContext._invocationContext;
6572
- async function asyncReplace(pattern, replaceAsyncFn, string) {
6573
- const result = [];
6574
- let lastEnd = 0;
6575
- const matches = Array.from(string.matchAll(pattern));
6576
- for (const match of matches) {
6577
- result.push(string.slice(lastEnd, match.index));
6578
- const replacement = await replaceAsyncFn(match);
6579
- result.push(replacement);
6580
- lastEnd = (match.index || 0) + match[0].length;
6548
+ // src/code-executors/base-code-executor.ts
6549
+ var BaseCodeExecutor = class {
6550
+ config;
6551
+ constructor(config = {}) {
6552
+ this.config = {
6553
+ optimizeDataFile: config.optimizeDataFile ?? false,
6554
+ stateful: config.stateful ?? false,
6555
+ errorRetryAttempts: config.errorRetryAttempts ?? 2,
6556
+ codeBlockDelimiters: config.codeBlockDelimiters ?? [
6557
+ ["`tool_code\n", "\n`"],
6558
+ ["`python\n", "\n`"]
6559
+ ],
6560
+ executionResultDelimiters: config.executionResultDelimiters ?? [
6561
+ "`tool_output\n",
6562
+ "\n`"
6563
+ ]
6564
+ };
6565
+ }
6566
+ // Getters for configuration
6567
+ get optimizeDataFile() {
6568
+ return this.config.optimizeDataFile;
6569
+ }
6570
+ get stateful() {
6571
+ return this.config.stateful;
6572
+ }
6573
+ get errorRetryAttempts() {
6574
+ return this.config.errorRetryAttempts;
6575
+ }
6576
+ get codeBlockDelimiters() {
6577
+ return this.config.codeBlockDelimiters;
6578
+ }
6579
+ get executionResultDelimiters() {
6580
+ return this.config.executionResultDelimiters;
6581
+ }
6582
+ };
6583
+
6584
+ // src/code-executors/built-in-code-executor.ts
6585
+ var BuiltInCodeExecutor = class extends BaseCodeExecutor {
6586
+ constructor(config = {}) {
6587
+ super(config);
6588
+ }
6589
+ async executeCode(invocationContext, codeExecutionInput) {
6590
+ throw new Error(
6591
+ "BuiltInCodeExecutor.executeCode should not be called directly"
6592
+ );
6593
+ }
6594
+ /**
6595
+ * Pre-process the LLM request for Gemini 2.0+ models to use the code execution tool
6596
+ */
6597
+ processLlmRequest(llmRequest) {
6598
+ if (!llmRequest.model?.startsWith("gemini-2")) {
6599
+ throw new Error(
6600
+ `Gemini code execution tool is not supported for model ${llmRequest.model}`
6601
+ );
6581
6602
  }
6582
- result.push(string.slice(lastEnd));
6583
- return result.join("");
6603
+ if (!llmRequest.config) {
6604
+ llmRequest.config = {};
6605
+ }
6606
+ if (!llmRequest.config.tools) {
6607
+ llmRequest.config.tools = [];
6608
+ }
6609
+ const codeExecutionTool = {
6610
+ codeExecution: {}
6611
+ };
6612
+ llmRequest.config.tools.push(codeExecutionTool);
6584
6613
  }
6585
- async function replaceMatch(match) {
6586
- let varName = match[0].replace(/[{}]/g, "").trim();
6587
- let optional = false;
6588
- if (varName.endsWith("?")) {
6589
- optional = true;
6590
- varName = varName.slice(0, -1);
6614
+ };
6615
+
6616
+ // src/code-executors/code-execution-utils.ts
6617
+ import { Language, Outcome } from "@google/genai";
6618
+ var CodeExecutionUtils = class _CodeExecutionUtils {
6619
+ /**
6620
+ * Gets the file content as a base64-encoded string
6621
+ */
6622
+ static getEncodedFileContent(data) {
6623
+ let decodedData;
6624
+ if (data instanceof ArrayBuffer) {
6625
+ decodedData = new TextDecoder().decode(data);
6591
6626
  }
6592
- if (varName.startsWith("artifact.")) {
6593
- varName = varName.replace("artifact.", "");
6594
- if (!invocationContext.artifactService) {
6595
- throw new Error("Artifact service is not initialized.");
6596
- }
6597
- try {
6598
- const artifact = await invocationContext.artifactService.loadArtifact({
6599
- appName: invocationContext.session.appName,
6600
- userId: invocationContext.session.userId,
6601
- sessionId: invocationContext.session.id,
6602
- filename: varName
6603
- });
6604
- if (!artifact) {
6605
- throw new Error(`Artifact ${varName} not found.`);
6606
- }
6607
- return String(artifact);
6608
- } catch (error) {
6609
- if (optional) {
6610
- return "";
6611
- }
6612
- throw error;
6613
- }
6614
- } else {
6615
- if (!isValidStateName(varName)) {
6616
- return match[0];
6627
+ if (_CodeExecutionUtils.isBase64Encoded(decodedData)) {
6628
+ return decodedData;
6629
+ }
6630
+ return btoa(decodedData);
6631
+ }
6632
+ static isBase64Encoded(str) {
6633
+ try {
6634
+ return btoa(atob(str)) === str;
6635
+ } catch {
6636
+ return false;
6637
+ }
6638
+ }
6639
+ /**
6640
+ * Extracts the first code block from the content and truncates everything after it
6641
+ */
6642
+ static extractCodeAndTruncateContent(content, codeBlockDelimiters) {
6643
+ if (!content?.parts?.length) {
6644
+ return null;
6645
+ }
6646
+ for (let idx = 0; idx < content.parts.length; idx++) {
6647
+ const part = content.parts[idx];
6648
+ if (part.executableCode && (idx === content.parts.length - 1 || !content.parts[idx + 1].codeExecutionResult)) {
6649
+ content.parts = content.parts.slice(0, idx + 1);
6650
+ return part.executableCode.code;
6617
6651
  }
6618
- const sessionState = invocationContext.session.state;
6619
- if (varName in sessionState) {
6620
- return String(sessionState[varName]);
6652
+ }
6653
+ const textParts = content.parts.filter((p) => p.text);
6654
+ if (!textParts.length) {
6655
+ return null;
6656
+ }
6657
+ const responseText = textParts.map((p) => p.text).join("\n");
6658
+ const leadingDelimiterPattern = codeBlockDelimiters.map(([start]) => _CodeExecutionUtils.escapeRegex(start)).join("|");
6659
+ const trailingDelimiterPattern = codeBlockDelimiters.map(([, end]) => _CodeExecutionUtils.escapeRegex(end)).join("|");
6660
+ const pattern = new RegExp(
6661
+ `(.*?)(${leadingDelimiterPattern})(.*?)(${trailingDelimiterPattern})(.*?)$`,
6662
+ "s"
6663
+ );
6664
+ const match = responseText.match(pattern);
6665
+ if (!match) {
6666
+ return null;
6667
+ }
6668
+ const [, prefix, , code, , suffix] = match;
6669
+ if (!code) {
6670
+ return null;
6671
+ }
6672
+ content.parts = [];
6673
+ if (prefix) {
6674
+ content.parts.push({ text: prefix });
6675
+ }
6676
+ content.parts.push(_CodeExecutionUtils.buildExecutableCodePart(code));
6677
+ return code;
6678
+ }
6679
+ static escapeRegex(str) {
6680
+ return str.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
6681
+ }
6682
+ /**
6683
+ * Builds an executable code part with code string
6684
+ */
6685
+ static buildExecutableCodePart(code) {
6686
+ return {
6687
+ executableCode: {
6688
+ code,
6689
+ language: Language.PYTHON
6621
6690
  }
6622
- if (optional) {
6623
- return "";
6691
+ };
6692
+ }
6693
+ /**
6694
+ * Builds the code execution result part from the code execution result
6695
+ */
6696
+ static buildCodeExecutionResultPart(codeExecutionResult) {
6697
+ if (codeExecutionResult.stderr) {
6698
+ return {
6699
+ codeExecutionResult: {
6700
+ outcome: Outcome.OUTCOME_FAILED,
6701
+ output: codeExecutionResult.stderr
6702
+ }
6703
+ };
6704
+ }
6705
+ const finalResult = [];
6706
+ if (codeExecutionResult.stdout || !codeExecutionResult.outputFiles.length) {
6707
+ finalResult.push(
6708
+ `Code execution result:
6709
+ ${codeExecutionResult.stdout}
6710
+ `
6711
+ );
6712
+ }
6713
+ if (codeExecutionResult.outputFiles.length) {
6714
+ const fileNames = codeExecutionResult.outputFiles.map((f) => `\`${f.name}\``).join(",");
6715
+ finalResult.push(`Saved artifacts:
6716
+ ${fileNames}`);
6717
+ }
6718
+ return {
6719
+ codeExecutionResult: {
6720
+ outcome: Outcome.OUTCOME_OK,
6721
+ output: finalResult.join("\n\n")
6624
6722
  }
6625
- throw new Error(`Context variable not found: \`${varName}\`.`);
6723
+ };
6724
+ }
6725
+ /**
6726
+ * Converts the code execution parts to text parts in a Content
6727
+ */
6728
+ static convertCodeExecutionParts(content, codeBlockDelimiter, executionResultDelimiters) {
6729
+ if (!content.parts?.length) {
6730
+ return;
6731
+ }
6732
+ const lastPart = content.parts[content.parts.length - 1];
6733
+ if (lastPart.executableCode) {
6734
+ content.parts[content.parts.length - 1] = {
6735
+ text: `${codeBlockDelimiter[0]}${lastPart.executableCode.code}${codeBlockDelimiter[1]}`
6736
+ };
6737
+ } else if (content.parts.length === 1 && lastPart.codeExecutionResult) {
6738
+ content.parts[content.parts.length - 1] = {
6739
+ text: `${executionResultDelimiters[0]}${lastPart.codeExecutionResult.output}${executionResultDelimiters[1]}`
6740
+ };
6741
+ content.role = "user";
6626
6742
  }
6627
6743
  }
6628
- return await asyncReplace(/{[^{}]*}/g, replaceMatch, template);
6629
- }
6630
- function isValidStateName(varName) {
6631
- const parts = varName.split(":");
6632
- if (parts.length === 1) {
6633
- return isValidIdentifier(varName);
6744
+ };
6745
+
6746
+ // src/code-executors/code-executor-context.ts
6747
+ var CONTEXT_KEY = "_code_execution_context";
6748
+ var SESSION_ID_KEY = "execution_session_id";
6749
+ var PROCESSED_FILE_NAMES_KEY = "processed_input_files";
6750
+ var INPUT_FILE_KEY = "_code_executor_input_files";
6751
+ var ERROR_COUNT_KEY = "_code_executor_error_counts";
6752
+ var CODE_EXECUTION_RESULTS_KEY = "_code_execution_results";
6753
+ var CodeExecutorContext = class {
6754
+ context;
6755
+ sessionState;
6756
+ constructor(sessionState) {
6757
+ this.sessionState = sessionState;
6758
+ this.context = this.getCodeExecutorContext(sessionState);
6634
6759
  }
6635
- if (parts.length === 2) {
6636
- const validPrefixes = ["app:", "user:", "temp:"];
6637
- const prefix = `${parts[0]}:`;
6638
- if (validPrefixes.includes(prefix)) {
6639
- return isValidIdentifier(parts[1]);
6760
+ /**
6761
+ * Gets the state delta to update in the persistent session state.
6762
+ */
6763
+ getStateDelta() {
6764
+ const contextToUpdate = JSON.parse(JSON.stringify(this.context));
6765
+ return { [CONTEXT_KEY]: contextToUpdate };
6766
+ }
6767
+ /**
6768
+ * Gets the session ID for the code executor.
6769
+ */
6770
+ getExecutionId() {
6771
+ if (!(SESSION_ID_KEY in this.context)) {
6772
+ return null;
6640
6773
  }
6774
+ return this.context[SESSION_ID_KEY];
6641
6775
  }
6642
- return false;
6643
- }
6644
- function isValidIdentifier(name) {
6645
- const identifierRegex = /^[a-zA-Z_$][a-zA-Z0-9_$]*$/;
6646
- return identifierRegex.test(name);
6647
- }
6776
+ /**
6777
+ * Sets the session ID for the code executor.
6778
+ */
6779
+ setExecutionId(sessionId) {
6780
+ this.context[SESSION_ID_KEY] = sessionId;
6781
+ }
6782
+ /**
6783
+ * Gets the processed file names from the session state.
6784
+ */
6785
+ getProcessedFileNames() {
6786
+ if (!(PROCESSED_FILE_NAMES_KEY in this.context)) {
6787
+ return [];
6788
+ }
6789
+ return this.context[PROCESSED_FILE_NAMES_KEY];
6790
+ }
6791
+ /**
6792
+ * Adds the processed file names to the session state.
6793
+ */
6794
+ addProcessedFileNames(fileNames) {
6795
+ if (!(PROCESSED_FILE_NAMES_KEY in this.context)) {
6796
+ this.context[PROCESSED_FILE_NAMES_KEY] = [];
6797
+ }
6798
+ this.context[PROCESSED_FILE_NAMES_KEY].push(...fileNames);
6799
+ }
6800
+ /**
6801
+ * Gets the code executor input files from the session state.
6802
+ */
6803
+ getInputFiles() {
6804
+ if (!(INPUT_FILE_KEY in this.sessionState)) {
6805
+ return [];
6806
+ }
6807
+ return this.sessionState[INPUT_FILE_KEY].map(
6808
+ (file) => file
6809
+ );
6810
+ }
6811
+ /**
6812
+ * Adds the input files to the code executor context.
6813
+ */
6814
+ addInputFiles(inputFiles) {
6815
+ if (!(INPUT_FILE_KEY in this.sessionState)) {
6816
+ this.sessionState[INPUT_FILE_KEY] = [];
6817
+ }
6818
+ const fileArray = this.sessionState[INPUT_FILE_KEY];
6819
+ for (const inputFile of inputFiles) {
6820
+ fileArray.push({
6821
+ name: inputFile.name,
6822
+ content: inputFile.content,
6823
+ mimeType: inputFile.mimeType
6824
+ });
6825
+ }
6826
+ }
6827
+ /**
6828
+ * Removes the input files and processed file names from the code executor context.
6829
+ */
6830
+ clearInputFiles() {
6831
+ if (INPUT_FILE_KEY in this.sessionState) {
6832
+ this.sessionState[INPUT_FILE_KEY] = [];
6833
+ }
6834
+ if (PROCESSED_FILE_NAMES_KEY in this.context) {
6835
+ this.context[PROCESSED_FILE_NAMES_KEY] = [];
6836
+ }
6837
+ }
6838
+ /**
6839
+ * Gets the error count from the session state.
6840
+ */
6841
+ getErrorCount(invocationId) {
6842
+ if (!(ERROR_COUNT_KEY in this.sessionState)) {
6843
+ return 0;
6844
+ }
6845
+ const errorCounts = this.sessionState[ERROR_COUNT_KEY];
6846
+ return errorCounts[invocationId] ?? 0;
6847
+ }
6848
+ /**
6849
+ * Increments the error count for the given invocation ID.
6850
+ */
6851
+ incrementErrorCount(invocationId) {
6852
+ if (!(ERROR_COUNT_KEY in this.sessionState)) {
6853
+ this.sessionState[ERROR_COUNT_KEY] = {};
6854
+ }
6855
+ const errorCounts = this.sessionState[ERROR_COUNT_KEY];
6856
+ errorCounts[invocationId] = this.getErrorCount(invocationId) + 1;
6857
+ }
6858
+ /**
6859
+ * Resets the error count for the given invocation ID.
6860
+ */
6861
+ resetErrorCount(invocationId) {
6862
+ if (!(ERROR_COUNT_KEY in this.sessionState)) {
6863
+ return;
6864
+ }
6865
+ const errorCounts = this.sessionState[ERROR_COUNT_KEY];
6866
+ if (invocationId in errorCounts) {
6867
+ delete errorCounts[invocationId];
6868
+ }
6869
+ }
6870
+ /**
6871
+ * Updates the code execution result.
6872
+ */
6873
+ updateCodeExecutionResult(invocationId, code, resultStdout, resultStderr) {
6874
+ if (!(CODE_EXECUTION_RESULTS_KEY in this.sessionState)) {
6875
+ this.sessionState[CODE_EXECUTION_RESULTS_KEY] = {};
6876
+ }
6877
+ const results = this.sessionState[CODE_EXECUTION_RESULTS_KEY];
6878
+ if (!(invocationId in results)) {
6879
+ results[invocationId] = [];
6880
+ }
6881
+ results[invocationId].push({
6882
+ code,
6883
+ resultStdout,
6884
+ resultStderr,
6885
+ timestamp: Math.floor(Date.now() / 1e3)
6886
+ });
6887
+ }
6888
+ /**
6889
+ * Gets the code executor context from the session state.
6890
+ */
6891
+ getCodeExecutorContext(sessionState) {
6892
+ if (!(CONTEXT_KEY in sessionState)) {
6893
+ sessionState[CONTEXT_KEY] = {};
6894
+ }
6895
+ return sessionState[CONTEXT_KEY];
6896
+ }
6897
+ };
6648
6898
 
6649
- // src/flows/llm-flows/instructions.ts
6650
- var InstructionsLlmRequestProcessor = class extends BaseLlmRequestProcessor {
6899
+ // src/flows/llm-flows/code-execution.ts
6900
+ var DATA_FILE_UTIL_MAP = {
6901
+ "text/csv": {
6902
+ extension: ".csv",
6903
+ loaderCodeTemplate: "pd.read_csv('{filename}')"
6904
+ }
6905
+ };
6906
+ var DATA_FILE_HELPER_LIB = `
6907
+ import pandas as pd
6908
+
6909
+ def explore_df(df: pd.DataFrame) -> None:
6910
+ """Prints some information about a pandas DataFrame."""
6911
+
6912
+ with pd.option_context(
6913
+ 'display.max_columns', None, 'display.expand_frame_repr', False
6914
+ ):
6915
+ # Print the column names to never encounter KeyError when selecting one.
6916
+ df_dtypes = df.dtypes
6917
+
6918
+ # Obtain information about data types and missing values.
6919
+ df_nulls = (len(df) - df.isnull().sum()).apply(
6920
+ lambda x: f'{x} / {df.shape[0]} non-null'
6921
+ )
6922
+
6923
+ # Explore unique total values in columns using \`.unique()\`.
6924
+ df_unique_count = df.apply(lambda x: len(x.unique()))
6925
+
6926
+ # Explore unique values in columns using \`.unique()\`.
6927
+ df_unique = df.apply(lambda x: crop(str(list(x.unique()))))
6928
+
6929
+ df_info = pd.concat(
6930
+ (
6931
+ df_dtypes.rename('Dtype'),
6932
+ df_nulls.rename('Non-Null Count'),
6933
+ df_unique_count.rename('Unique Values Count'),
6934
+ df_unique.rename('Unique Values'),
6935
+ ),
6936
+ axis=1,
6937
+ )
6938
+ df_info.index.name = 'Columns'
6939
+ print(f"""Total rows: {df.shape[0]}
6940
+ Total columns: {df.shape[1]}
6941
+
6942
+ {df_info}""")
6943
+
6944
+ def crop(text: str, max_length: int = 100) -> str:
6945
+ """Crop text to maximum length with ellipsis."""
6946
+ return text if len(text) <= max_length else text[:max_length] + "..."
6947
+ `;
6948
+ function hasCodeExecutor(agent) {
6949
+ return agent && typeof agent === "object" && "codeExecutor" in agent;
6950
+ }
6951
+ var CodeExecutionRequestProcessor = class extends BaseLlmRequestProcessor {
6651
6952
  async *runAsync(invocationContext, llmRequest) {
6652
6953
  const agent = invocationContext.agent;
6653
- if (!this.isLlmAgent(agent)) {
6954
+ if (!hasCodeExecutor(agent)) {
6654
6955
  return;
6655
6956
  }
6656
- const rootAgent = agent.rootAgent;
6657
- if (this.isLlmAgent(rootAgent) && rootAgent.globalInstruction) {
6658
- const [rawInstruction, bypassStateInjection] = await rootAgent.canonicalGlobalInstruction(
6659
- new ReadonlyContext(invocationContext)
6957
+ if (!(agent instanceof LlmAgent) || !agent.codeExecutor) {
6958
+ return;
6959
+ }
6960
+ yield* runPreProcessor(invocationContext, llmRequest);
6961
+ if (!(agent.codeExecutor instanceof BaseCodeExecutor)) {
6962
+ return;
6963
+ }
6964
+ for (const content of llmRequest.contents || []) {
6965
+ CodeExecutionUtils.convertCodeExecutionParts(
6966
+ content,
6967
+ agent.codeExecutor.codeBlockDelimiters[0] || ["", ""],
6968
+ agent.codeExecutor.executionResultDelimiters
6660
6969
  );
6661
- let instruction = rawInstruction;
6662
- if (!bypassStateInjection) {
6663
- instruction = await injectSessionState(
6664
- rawInstruction,
6665
- new ReadonlyContext(invocationContext)
6666
- );
6970
+ }
6971
+ }
6972
+ };
6973
+ var CodeExecutionResponseProcessor = class extends BaseLlmResponseProcessor {
6974
+ async *runAsync(invocationContext, llmResponse) {
6975
+ if (llmResponse.partial) {
6976
+ return;
6977
+ }
6978
+ yield* runPostProcessor(invocationContext, llmResponse);
6979
+ }
6980
+ };
6981
+ async function* runPreProcessor(invocationContext, llmRequest) {
6982
+ const agent = invocationContext.agent;
6983
+ if (!hasCodeExecutor(agent)) {
6984
+ return;
6985
+ }
6986
+ const codeExecutor = agent.codeExecutor;
6987
+ if (!codeExecutor || !(codeExecutor instanceof BaseCodeExecutor)) {
6988
+ return;
6989
+ }
6990
+ if (codeExecutor instanceof BuiltInCodeExecutor) {
6991
+ codeExecutor.processLlmRequest(llmRequest);
6992
+ return;
6993
+ }
6994
+ if (!codeExecutor.optimizeDataFile) {
6995
+ return;
6996
+ }
6997
+ const codeExecutorContext = new CodeExecutorContext(
6998
+ invocationContext.session.state
6999
+ // Type assertion for State compatibility
7000
+ );
7001
+ if (codeExecutorContext.getErrorCount(invocationContext.invocationId) >= codeExecutor.errorRetryAttempts) {
7002
+ return;
7003
+ }
7004
+ const allInputFiles = extractAndReplaceInlineFiles(
7005
+ codeExecutorContext,
7006
+ llmRequest
7007
+ );
7008
+ const processedFileNames = new Set(
7009
+ codeExecutorContext.getProcessedFileNames()
7010
+ );
7011
+ const filesToProcess = allInputFiles.filter(
7012
+ (f) => !processedFileNames.has(f.name)
7013
+ );
7014
+ for (const file of filesToProcess) {
7015
+ const codeStr = getDataFilePreprocessingCode(file);
7016
+ if (!codeStr) {
7017
+ continue;
7018
+ }
7019
+ const codeContent = {
7020
+ role: "model",
7021
+ parts: [
7022
+ { text: `Processing input file: \`${file.name}\`` },
7023
+ CodeExecutionUtils.buildExecutableCodePart(codeStr)
7024
+ ]
7025
+ };
7026
+ llmRequest.contents = llmRequest.contents || [];
7027
+ llmRequest.contents.push(structuredClone(codeContent));
7028
+ yield new Event({
7029
+ invocationId: invocationContext.invocationId,
7030
+ author: agent.name,
7031
+ branch: invocationContext.branch,
7032
+ content: codeContent
7033
+ });
7034
+ const codeExecutionResult = await codeExecutor.executeCode(
7035
+ invocationContext,
7036
+ {
7037
+ code: codeStr,
7038
+ inputFiles: [file],
7039
+ executionId: getOrSetExecutionId(
7040
+ invocationContext,
7041
+ codeExecutorContext
7042
+ )
6667
7043
  }
6668
- llmRequest.appendInstructions([instruction]);
7044
+ );
7045
+ codeExecutorContext.updateCodeExecutionResult(
7046
+ invocationContext.invocationId,
7047
+ codeStr,
7048
+ codeExecutionResult.stdout,
7049
+ codeExecutionResult.stderr
7050
+ );
7051
+ codeExecutorContext.addProcessedFileNames([file.name]);
7052
+ const executionResultEvent = await postProcessCodeExecutionResult(
7053
+ invocationContext,
7054
+ codeExecutorContext,
7055
+ codeExecutionResult
7056
+ );
7057
+ yield executionResultEvent;
7058
+ llmRequest.contents.push(structuredClone(executionResultEvent.content));
7059
+ }
7060
+ }
7061
+ async function* runPostProcessor(invocationContext, llmResponse) {
7062
+ const agent = invocationContext.agent;
7063
+ if (!hasCodeExecutor(agent)) {
7064
+ return;
7065
+ }
7066
+ const codeExecutor = agent.codeExecutor;
7067
+ if (!(codeExecutor instanceof BaseCodeExecutor)) {
7068
+ return;
7069
+ }
7070
+ if (!llmResponse || !llmResponse.content) {
7071
+ return;
7072
+ }
7073
+ if (codeExecutor instanceof BuiltInCodeExecutor) {
7074
+ return;
7075
+ }
7076
+ const codeExecutorContext = new CodeExecutorContext(
7077
+ invocationContext.session.state
7078
+ // Type assertion for State compatibility
7079
+ );
7080
+ if (codeExecutorContext.getErrorCount(invocationContext.invocationId) >= codeExecutor.errorRetryAttempts) {
7081
+ return;
7082
+ }
7083
+ const responseContent = llmResponse.content;
7084
+ const codeStr = CodeExecutionUtils.extractCodeAndTruncateContent(
7085
+ responseContent,
7086
+ codeExecutor.codeBlockDelimiters
7087
+ );
7088
+ if (!codeStr) {
7089
+ return;
7090
+ }
7091
+ yield new Event({
7092
+ invocationId: invocationContext.invocationId,
7093
+ author: agent.name,
7094
+ branch: invocationContext.branch,
7095
+ content: responseContent,
7096
+ actions: new EventActions()
7097
+ });
7098
+ const codeExecutionResult = await codeExecutor.executeCode(
7099
+ invocationContext,
7100
+ {
7101
+ code: codeStr,
7102
+ inputFiles: codeExecutorContext.getInputFiles(),
7103
+ executionId: getOrSetExecutionId(invocationContext, codeExecutorContext)
6669
7104
  }
6670
- if (agent.instruction) {
6671
- const [rawInstruction, bypassStateInjection] = await agent.canonicalInstruction(
6672
- new ReadonlyContext(invocationContext)
6673
- );
6674
- let instruction = rawInstruction;
6675
- if (!bypassStateInjection) {
6676
- instruction = await injectSessionState(
6677
- rawInstruction,
6678
- new ReadonlyContext(invocationContext)
6679
- );
7105
+ );
7106
+ codeExecutorContext.updateCodeExecutionResult(
7107
+ invocationContext.invocationId,
7108
+ codeStr,
7109
+ codeExecutionResult.stdout,
7110
+ codeExecutionResult.stderr
7111
+ );
7112
+ yield await postProcessCodeExecutionResult(
7113
+ invocationContext,
7114
+ codeExecutorContext,
7115
+ codeExecutionResult
7116
+ );
7117
+ llmResponse.content = void 0;
7118
+ }
7119
+ function extractAndReplaceInlineFiles(codeExecutorContext, llmRequest) {
7120
+ const allInputFiles = codeExecutorContext.getInputFiles();
7121
+ const savedFileNames = new Set(allInputFiles.map((f) => f.name));
7122
+ for (let i = 0; i < (llmRequest.contents?.length || 0); i++) {
7123
+ const content = llmRequest.contents[i];
7124
+ if (content.role !== "user" || !content.parts) {
7125
+ continue;
7126
+ }
7127
+ for (let j = 0; j < content.parts.length; j++) {
7128
+ const part = content.parts[j];
7129
+ if (!part.inlineData || !(part.inlineData.mimeType in DATA_FILE_UTIL_MAP)) {
7130
+ continue;
7131
+ }
7132
+ const mimeType = part.inlineData.mimeType;
7133
+ const fileName = `data_${i + 1}_${j + 1}${DATA_FILE_UTIL_MAP[mimeType].extension}`;
7134
+ llmRequest.contents[i].parts[j] = {
7135
+ text: `
7136
+ Available file: \`${fileName}\`
7137
+ `
7138
+ };
7139
+ const file = {
7140
+ name: fileName,
7141
+ content: CodeExecutionUtils.getEncodedFileContent(part.inlineData.data),
7142
+ mimeType
7143
+ };
7144
+ if (!savedFileNames.has(fileName)) {
7145
+ codeExecutorContext.addInputFiles([file]);
7146
+ allInputFiles.push(file);
6680
7147
  }
6681
- llmRequest.appendInstructions([instruction]);
6682
7148
  }
6683
- for await (const _ of []) {
6684
- yield _;
7149
+ }
7150
+ return allInputFiles;
7151
+ }
7152
+ function getOrSetExecutionId(invocationContext, codeExecutorContext) {
7153
+ const agent = invocationContext.agent;
7154
+ if (!hasCodeExecutor(agent) || !agent.codeExecutor?.stateful) {
7155
+ return void 0;
7156
+ }
7157
+ let executionId = codeExecutorContext.getExecutionId();
7158
+ if (!executionId) {
7159
+ executionId = invocationContext.session.id;
7160
+ codeExecutorContext.setExecutionId(executionId);
7161
+ }
7162
+ return executionId;
7163
+ }
7164
+ async function postProcessCodeExecutionResult(invocationContext, codeExecutorContext, codeExecutionResult) {
7165
+ if (!invocationContext.artifactService) {
7166
+ throw new Error("Artifact service is not initialized.");
7167
+ }
7168
+ const resultContent = {
7169
+ role: "model",
7170
+ parts: [
7171
+ CodeExecutionUtils.buildCodeExecutionResultPart(codeExecutionResult)
7172
+ ]
7173
+ };
7174
+ const eventActions = new EventActions({
7175
+ stateDelta: codeExecutorContext.getStateDelta()
7176
+ });
7177
+ if (codeExecutionResult.stderr) {
7178
+ codeExecutorContext.incrementErrorCount(invocationContext.invocationId);
7179
+ } else {
7180
+ codeExecutorContext.resetErrorCount(invocationContext.invocationId);
7181
+ }
7182
+ for (const outputFile of codeExecutionResult.outputFiles) {
7183
+ const version = await invocationContext.artifactService.saveArtifact({
7184
+ appName: invocationContext.appName,
7185
+ userId: invocationContext.userId,
7186
+ sessionId: invocationContext.session.id,
7187
+ filename: outputFile.name,
7188
+ artifact: {
7189
+ inlineData: {
7190
+ data: atob(outputFile.content),
7191
+ // Convert from base64
7192
+ mimeType: outputFile.mimeType
7193
+ }
7194
+ }
7195
+ });
7196
+ eventActions.artifactDelta[outputFile.name] = version;
7197
+ }
7198
+ return new Event({
7199
+ invocationId: invocationContext.invocationId,
7200
+ author: invocationContext.agent.name,
7201
+ branch: invocationContext.branch,
7202
+ content: resultContent,
7203
+ actions: eventActions
7204
+ });
7205
+ }
7206
+ function getDataFilePreprocessingCode(file) {
7207
+ function getNormalizedFileName(fileName) {
7208
+ const baseName = fileName.split(".")[0];
7209
+ let varName2 = baseName.replace(/[^a-zA-Z0-9_]/g, "_");
7210
+ if (/^\d/.test(varName2)) {
7211
+ varName2 = `_${varName2}`;
6685
7212
  }
7213
+ return varName2;
6686
7214
  }
6687
- /**
6688
- * Type guard to check if agent is an LlmAgent
6689
- */
6690
- isLlmAgent(agent) {
6691
- return agent && typeof agent === "object" && "canonicalModel" in agent;
7215
+ if (!(file.mimeType in DATA_FILE_UTIL_MAP)) {
7216
+ return void 0;
6692
7217
  }
6693
- };
6694
- var requestProcessor4 = new InstructionsLlmRequestProcessor();
7218
+ const varName = getNormalizedFileName(file.name);
7219
+ const loaderCode = DATA_FILE_UTIL_MAP[file.mimeType].loaderCodeTemplate.replace("{filename}", file.name);
7220
+ return `
7221
+ ${DATA_FILE_HELPER_LIB}
7222
+
7223
+ # Load the dataframe.
7224
+ ${varName} = ${loaderCode}
7225
+
7226
+ # Use \`explore_df\` to guide my analysis.
7227
+ explore_df(${varName})
7228
+ `;
7229
+ }
7230
+ var requestProcessor3 = new CodeExecutionRequestProcessor();
7231
+ var responseProcessor = new CodeExecutionResponseProcessor();
6695
7232
 
6696
7233
  // src/flows/llm-flows/contents.ts
6697
7234
  var ContentLlmRequestProcessor = class extends BaseLlmRequestProcessor {
@@ -6724,7 +7261,7 @@ var ContentLlmRequestProcessor = class extends BaseLlmRequestProcessor {
6724
7261
  return agent && typeof agent === "object" && "canonicalModel" in agent;
6725
7262
  }
6726
7263
  };
6727
- var requestProcessor5 = new ContentLlmRequestProcessor();
7264
+ var requestProcessor4 = new ContentLlmRequestProcessor();
6728
7265
  function rearrangeEventsForAsyncFunctionResponsesInHistory(events) {
6729
7266
  const functionCallIdToResponseEventsIndex = {};
6730
7267
  for (let i = 0; i < events.length; i++) {
@@ -6989,6 +7526,151 @@ function isAuthEvent(event) {
6989
7526
  return false;
6990
7527
  }
6991
7528
 
7529
+ // src/flows/llm-flows/identity.ts
7530
+ var IdentityLlmRequestProcessor = class extends BaseLlmRequestProcessor {
7531
+ async *runAsync(invocationContext, llmRequest) {
7532
+ const agent = invocationContext.agent;
7533
+ const instructions = [
7534
+ `You are an agent. Your internal name is "${agent.name}".`
7535
+ ];
7536
+ if (agent.description) {
7537
+ instructions.push(` The description about you is "${agent.description}"`);
7538
+ }
7539
+ llmRequest.appendInstructions(instructions);
7540
+ for await (const _ of []) {
7541
+ yield _;
7542
+ }
7543
+ }
7544
+ };
7545
+ var requestProcessor5 = new IdentityLlmRequestProcessor();
7546
+
7547
+ // src/utils/instructions-utils.ts
7548
+ async function injectSessionState(template, readonlyContext) {
7549
+ const invocationContext = readonlyContext._invocationContext;
7550
+ async function asyncReplace(pattern, replaceAsyncFn, string) {
7551
+ const result = [];
7552
+ let lastEnd = 0;
7553
+ const matches = Array.from(string.matchAll(pattern));
7554
+ for (const match of matches) {
7555
+ result.push(string.slice(lastEnd, match.index));
7556
+ const replacement = await replaceAsyncFn(match);
7557
+ result.push(replacement);
7558
+ lastEnd = (match.index || 0) + match[0].length;
7559
+ }
7560
+ result.push(string.slice(lastEnd));
7561
+ return result.join("");
7562
+ }
7563
+ async function replaceMatch(match) {
7564
+ let varName = match[0].replace(/[{}]/g, "").trim();
7565
+ let optional = false;
7566
+ if (varName.endsWith("?")) {
7567
+ optional = true;
7568
+ varName = varName.slice(0, -1);
7569
+ }
7570
+ if (varName.startsWith("artifact.")) {
7571
+ varName = varName.replace("artifact.", "");
7572
+ if (!invocationContext.artifactService) {
7573
+ throw new Error("Artifact service is not initialized.");
7574
+ }
7575
+ try {
7576
+ const artifact = await invocationContext.artifactService.loadArtifact({
7577
+ appName: invocationContext.session.appName,
7578
+ userId: invocationContext.session.userId,
7579
+ sessionId: invocationContext.session.id,
7580
+ filename: varName
7581
+ });
7582
+ if (!artifact) {
7583
+ throw new Error(`Artifact ${varName} not found.`);
7584
+ }
7585
+ return String(artifact);
7586
+ } catch (error) {
7587
+ if (optional) {
7588
+ return "";
7589
+ }
7590
+ throw error;
7591
+ }
7592
+ } else {
7593
+ if (!isValidStateName(varName)) {
7594
+ return match[0];
7595
+ }
7596
+ const sessionState = invocationContext.session.state;
7597
+ if (varName in sessionState) {
7598
+ return String(sessionState[varName]);
7599
+ }
7600
+ if (optional) {
7601
+ return "";
7602
+ }
7603
+ throw new Error(`Context variable not found: \`${varName}\`.`);
7604
+ }
7605
+ }
7606
+ return await asyncReplace(/{[^{}]*}/g, replaceMatch, template);
7607
+ }
7608
+ function isValidStateName(varName) {
7609
+ const parts = varName.split(":");
7610
+ if (parts.length === 1) {
7611
+ return isValidIdentifier(varName);
7612
+ }
7613
+ if (parts.length === 2) {
7614
+ const validPrefixes = ["app:", "user:", "temp:"];
7615
+ const prefix = `${parts[0]}:`;
7616
+ if (validPrefixes.includes(prefix)) {
7617
+ return isValidIdentifier(parts[1]);
7618
+ }
7619
+ }
7620
+ return false;
7621
+ }
7622
+ function isValidIdentifier(name) {
7623
+ const identifierRegex = /^[a-zA-Z_$][a-zA-Z0-9_$]*$/;
7624
+ return identifierRegex.test(name);
7625
+ }
7626
+
7627
+ // src/flows/llm-flows/instructions.ts
7628
+ var InstructionsLlmRequestProcessor = class extends BaseLlmRequestProcessor {
7629
+ async *runAsync(invocationContext, llmRequest) {
7630
+ const agent = invocationContext.agent;
7631
+ if (!this.isLlmAgent(agent)) {
7632
+ return;
7633
+ }
7634
+ const rootAgent = agent.rootAgent;
7635
+ if (this.isLlmAgent(rootAgent) && rootAgent.globalInstruction) {
7636
+ const [rawInstruction, bypassStateInjection] = await rootAgent.canonicalGlobalInstruction(
7637
+ new ReadonlyContext(invocationContext)
7638
+ );
7639
+ let instruction = rawInstruction;
7640
+ if (!bypassStateInjection) {
7641
+ instruction = await injectSessionState(
7642
+ rawInstruction,
7643
+ new ReadonlyContext(invocationContext)
7644
+ );
7645
+ }
7646
+ llmRequest.appendInstructions([instruction]);
7647
+ }
7648
+ if (agent.instruction) {
7649
+ const [rawInstruction, bypassStateInjection] = await agent.canonicalInstruction(
7650
+ new ReadonlyContext(invocationContext)
7651
+ );
7652
+ let instruction = rawInstruction;
7653
+ if (!bypassStateInjection) {
7654
+ instruction = await injectSessionState(
7655
+ rawInstruction,
7656
+ new ReadonlyContext(invocationContext)
7657
+ );
7658
+ }
7659
+ llmRequest.appendInstructions([instruction]);
7660
+ }
7661
+ for await (const _ of []) {
7662
+ yield _;
7663
+ }
7664
+ }
7665
+ /**
7666
+ * Type guard to check if agent is an LlmAgent
7667
+ */
7668
+ isLlmAgent(agent) {
7669
+ return agent && typeof agent === "object" && "canonicalModel" in agent;
7670
+ }
7671
+ };
7672
+ var requestProcessor6 = new InstructionsLlmRequestProcessor();
7673
+
6992
7674
  // src/planners/base-planner.ts
6993
7675
  var BasePlanner = class {
6994
7676
  };
@@ -7264,66 +7946,10 @@ function removeThoughtFromRequest(llmRequest) {
7264
7946
  }
7265
7947
  }
7266
7948
  }
7267
- var requestProcessor6 = new NlPlanningRequestProcessor();
7268
- var responseProcessor = new NlPlanningResponseProcessor();
7269
-
7270
- // src/flows/llm-flows/code-execution.ts
7271
- var CodeExecutionRequestProcessor = class extends BaseLlmRequestProcessor {
7272
- async *runAsync(invocationContext, llmRequest) {
7273
- const agent = invocationContext.agent;
7274
- if (!("codeExecutor" in agent) || !agent.codeExecutor) {
7275
- return;
7276
- }
7277
- console.log(
7278
- "Code execution request processing - TODO: Implement when code-executors module is ready"
7279
- );
7280
- for await (const _ of []) {
7281
- yield _;
7282
- }
7283
- }
7284
- /**
7285
- * Placeholder for pre-processor logic
7286
- * TODO: Implement when code-executors are ready
7287
- */
7288
- async *runPreProcessor(invocationContext, llmRequest) {
7289
- console.log("Code execution pre-processor - placeholder");
7290
- for await (const _ of []) {
7291
- yield _;
7292
- }
7293
- }
7294
- };
7295
- var CodeExecutionResponseProcessor = class extends BaseLlmResponseProcessor {
7296
- async *runAsync(invocationContext, llmResponse) {
7297
- if (llmResponse.partial) {
7298
- return;
7299
- }
7300
- const agent = invocationContext.agent;
7301
- if (!("codeExecutor" in agent) || !agent.codeExecutor) {
7302
- return;
7303
- }
7304
- console.log(
7305
- "Code execution response processing - TODO: Implement when code-executors module is ready"
7306
- );
7307
- for await (const _ of []) {
7308
- yield _;
7309
- }
7310
- }
7311
- /**
7312
- * Placeholder for post-processor logic
7313
- * TODO: Implement when code-executors are ready
7314
- */
7315
- async *runPostProcessor(invocationContext, llmResponse) {
7316
- console.log("Code execution post-processor - placeholder");
7317
- for await (const _ of []) {
7318
- yield _;
7319
- }
7320
- }
7321
- };
7322
- var requestProcessor7 = new CodeExecutionRequestProcessor();
7323
- var responseProcessor2 = new CodeExecutionResponseProcessor();
7949
+ var requestProcessor7 = new NlPlanningRequestProcessor();
7950
+ var responseProcessor2 = new NlPlanningResponseProcessor();
7324
7951
 
7325
7952
  // src/flows/llm-flows/single-flow.ts
7326
- var logger7 = new Logger({ name: "SingleFlow" });
7327
7953
  var SingleFlow = class extends BaseLlmFlow {
7328
7954
  /**
7329
7955
  * Constructor for SingleFlow
@@ -7331,35 +7957,32 @@ var SingleFlow = class extends BaseLlmFlow {
7331
7957
  constructor() {
7332
7958
  super();
7333
7959
  this.requestProcessors.push(
7334
- requestProcessor,
7335
7960
  requestProcessor2,
7961
+ requestProcessor,
7336
7962
  // Phase 3: Auth preprocessor
7337
- requestProcessor4,
7338
- requestProcessor3,
7963
+ requestProcessor6,
7339
7964
  requestProcessor5,
7965
+ requestProcessor4,
7340
7966
  // Some implementations of NL Planning mark planning contents as thoughts
7341
7967
  // in the post processor. Since these need to be unmarked, NL Planning
7342
7968
  // should be after contents.
7343
- requestProcessor6,
7969
+ requestProcessor7,
7344
7970
  // Phase 5: NL Planning
7345
7971
  // Code execution should be after the contents as it mutates the contents
7346
7972
  // to optimize data files.
7347
- requestProcessor7
7973
+ requestProcessor3
7348
7974
  // Phase 5: Code Execution (placeholder)
7349
7975
  );
7350
7976
  this.responseProcessors.push(
7351
- responseProcessor,
7977
+ responseProcessor2,
7352
7978
  // Phase 5: NL Planning
7353
- responseProcessor2
7979
+ responseProcessor
7354
7980
  // Phase 5: Code Execution (placeholder)
7355
7981
  );
7356
- logger7.debug("SingleFlow initialized with processors");
7982
+ this.logger.debug("SingleFlow initialized with processors");
7357
7983
  }
7358
7984
  };
7359
7985
 
7360
- // src/flows/llm-flows/auto-flow.ts
7361
- init_logger();
7362
-
7363
7986
  // src/flows/llm-flows/agent-transfer.ts
7364
7987
  var AgentTransferLlmRequestProcessor = class extends BaseLlmRequestProcessor {
7365
7988
  /**
@@ -7449,7 +8072,6 @@ function getTransferTargets(agent) {
7449
8072
  var requestProcessor8 = new AgentTransferLlmRequestProcessor();
7450
8073
 
7451
8074
  // src/flows/llm-flows/auto-flow.ts
7452
- var logger8 = new Logger({ name: "AutoFlow" });
7453
8075
  var AutoFlow = class extends SingleFlow {
7454
8076
  /**
7455
8077
  * Constructor for AutoFlow
@@ -7457,7 +8079,7 @@ var AutoFlow = class extends SingleFlow {
7457
8079
  constructor() {
7458
8080
  super();
7459
8081
  this.requestProcessors.push(requestProcessor8);
7460
- logger8.debug("AutoFlow initialized with agent transfer capability");
8082
+ this.logger.debug("AutoFlow initialized with agent transfer capability");
7461
8083
  }
7462
8084
  };
7463
8085
 
@@ -7482,6 +8104,10 @@ var LlmAgent = class _LlmAgent extends BaseAgent {
7482
8104
  * Tools available to this agent
7483
8105
  */
7484
8106
  tools;
8107
+ /**
8108
+ * Code executor for this agent
8109
+ */
8110
+ codeExecutor;
7485
8111
  /**
7486
8112
  * Disallows LLM-controlled transferring to the parent agent
7487
8113
  */
@@ -7549,6 +8175,7 @@ var LlmAgent = class _LlmAgent extends BaseAgent {
7549
8175
  this.instruction = config.instruction || "";
7550
8176
  this.globalInstruction = config.globalInstruction || "";
7551
8177
  this.tools = config.tools || [];
8178
+ this.codeExecutor = config.codeExecutor;
7552
8179
  this.disallowTransferToParent = config.disallowTransferToParent || false;
7553
8180
  this.disallowTransferToPeers = config.disallowTransferToPeers || false;
7554
8181
  this.includeContents = config.includeContents || "default";
@@ -7568,11 +8195,14 @@ var LlmAgent = class _LlmAgent extends BaseAgent {
7568
8195
  * This method is only for use by Agent Development Kit
7569
8196
  */
7570
8197
  get canonicalModel() {
7571
- if (typeof this.model !== "string") {
8198
+ if (typeof this.model === "string") {
8199
+ if (this.model) {
8200
+ return LLMRegistry.newLLM(this.model);
8201
+ }
8202
+ } else if (this.model instanceof BaseLlm) {
7572
8203
  return this.model;
7573
- }
7574
- if (this.model) {
7575
- return LLMRegistry.newLLM(this.model);
8204
+ } else if (this.model) {
8205
+ return new AiSdkLlm(this.model);
7576
8206
  }
7577
8207
  let ancestorAgent = this.parentAgent;
7578
8208
  while (ancestorAgent !== null) {
@@ -8338,8 +8968,6 @@ var RunConfig = class {
8338
8968
  };
8339
8969
 
8340
8970
  // src/artifacts/in-memory-artifact-service.ts
8341
- init_logger();
8342
- var logger9 = new Logger({ name: "InMemoryArtifactService" });
8343
8971
  var InMemoryArtifactService = class {
8344
8972
  artifacts = /* @__PURE__ */ new Map();
8345
8973
  fileHasUserNamespace(filename) {
@@ -8787,7 +9415,6 @@ var InMemorySessionService = class extends BaseSessionService {
8787
9415
  };
8788
9416
 
8789
9417
  // src/runners.ts
8790
- var logger10 = new Logger({ name: "Runner" });
8791
9418
  function _findFunctionCallEventIfLastEventIsFunctionResponse(session) {
8792
9419
  const events = session.events;
8793
9420
  if (!events || events.length === 0) {
@@ -8832,6 +9459,7 @@ var Runner = class {
8832
9459
  * The memory service for the runner.
8833
9460
  */
8834
9461
  memoryService;
9462
+ logger = new Logger({ name: "Runner" });
8835
9463
  /**
8836
9464
  * Initializes the Runner.
8837
9465
  */
@@ -8934,7 +9562,7 @@ var Runner = class {
8934
9562
  yield event;
8935
9563
  }
8936
9564
  } catch (error) {
8937
- logger10.debug("Error running agent:", error);
9565
+ this.logger.debug("Error running agent:", error);
8938
9566
  span.recordException(error);
8939
9567
  span.setStatus({
8940
9568
  code: SpanStatusCode.ERROR,
@@ -8998,7 +9626,7 @@ var Runner = class {
8998
9626
  }
8999
9627
  const agent = rootAgent.findSubAgent?.(event2.author);
9000
9628
  if (!agent) {
9001
- logger10.debug(
9629
+ this.logger.debug(
9002
9630
  `Event from an unknown agent: ${event2.author}, event id: ${event2.id}`
9003
9631
  );
9004
9632
  continue;
@@ -9277,6 +9905,20 @@ var AgentBuilder = class _AgentBuilder {
9277
9905
  */
9278
9906
  createAgent() {
9279
9907
  switch (this.agentType) {
9908
+ case "llm": {
9909
+ if (!this.config.model) {
9910
+ throw new Error("Model is required for LLM agent");
9911
+ }
9912
+ const model = this.config.model;
9913
+ return new LlmAgent({
9914
+ name: this.config.name,
9915
+ model,
9916
+ description: this.config.description,
9917
+ instruction: this.config.instruction,
9918
+ tools: this.config.tools,
9919
+ planner: this.config.planner
9920
+ });
9921
+ }
9280
9922
  case "sequential":
9281
9923
  if (!this.config.subAgents) {
9282
9924
  throw new Error("Sub-agents required for sequential agent");
@@ -9315,15 +9957,6 @@ var AgentBuilder = class _AgentBuilder {
9315
9957
  nodes: this.config.nodes,
9316
9958
  rootNode: this.config.rootNode
9317
9959
  });
9318
- default:
9319
- return new LlmAgent({
9320
- name: this.config.name,
9321
- model: this.config.model,
9322
- description: this.config.description,
9323
- instruction: this.config.instruction,
9324
- tools: this.config.tools,
9325
- planner: this.config.planner
9326
- });
9327
9960
  }
9328
9961
  }
9329
9962
  };
@@ -10061,11 +10694,11 @@ var DatabaseSessionService = class extends BaseSessionService {
10061
10694
  };
10062
10695
 
10063
10696
  // src/sessions/database-factories.ts
10064
- import dedent3 from "dedent";
10697
+ import dedent from "dedent";
10065
10698
  import { Kysely, MysqlDialect, PostgresDialect, SqliteDialect } from "kysely";
10066
10699
  function createDependencyError(packageName, dbType) {
10067
10700
  return new Error(
10068
- dedent3`
10701
+ dedent`
10069
10702
  Missing required peer dependency: ${packageName}
10070
10703
  To use ${dbType} sessions, install the required package:
10071
10704
  npm install ${packageName}
@@ -10138,11 +10771,9 @@ function createDatabaseSessionService(databaseUrl, options) {
10138
10771
  }
10139
10772
 
10140
10773
  // src/artifacts/gcs-artifact-service.ts
10141
- init_logger();
10142
10774
  import {
10143
10775
  Storage
10144
10776
  } from "@google-cloud/storage";
10145
- var logger11 = new Logger({ name: "GcsArtifactService" });
10146
10777
  var GcsArtifactService = class {
10147
10778
  bucketName;
10148
10779
  storageClient;
@@ -10297,20 +10928,20 @@ __export(flows_exports, {
10297
10928
  REQUEST_EUC_FUNCTION_CALL_NAME: () => REQUEST_EUC_FUNCTION_CALL_NAME,
10298
10929
  SingleFlow: () => SingleFlow,
10299
10930
  agentTransferRequestProcessor: () => requestProcessor8,
10300
- basicRequestProcessor: () => requestProcessor,
10301
- codeExecutionRequestProcessor: () => requestProcessor7,
10302
- codeExecutionResponseProcessor: () => responseProcessor2,
10303
- contentRequestProcessor: () => requestProcessor5,
10931
+ basicRequestProcessor: () => requestProcessor2,
10932
+ codeExecutionRequestProcessor: () => requestProcessor3,
10933
+ codeExecutionResponseProcessor: () => responseProcessor,
10934
+ contentRequestProcessor: () => requestProcessor4,
10304
10935
  generateAuthEvent: () => generateAuthEvent,
10305
10936
  generateClientFunctionCallId: () => generateClientFunctionCallId,
10306
10937
  getLongRunningFunctionCalls: () => getLongRunningFunctionCalls,
10307
10938
  handleFunctionCallsAsync: () => handleFunctionCallsAsync,
10308
10939
  handleFunctionCallsLive: () => handleFunctionCallsLive,
10309
- identityRequestProcessor: () => requestProcessor3,
10310
- instructionsRequestProcessor: () => requestProcessor4,
10940
+ identityRequestProcessor: () => requestProcessor5,
10941
+ instructionsRequestProcessor: () => requestProcessor6,
10311
10942
  mergeParallelFunctionResponseEvents: () => mergeParallelFunctionResponseEvents,
10312
- nlPlanningRequestProcessor: () => requestProcessor6,
10313
- nlPlanningResponseProcessor: () => responseProcessor,
10943
+ nlPlanningRequestProcessor: () => requestProcessor7,
10944
+ nlPlanningResponseProcessor: () => responseProcessor2,
10314
10945
  populateClientFunctionCallId: () => populateClientFunctionCallId,
10315
10946
  removeClientFunctionCallId: () => removeClientFunctionCallId
10316
10947
  });
@@ -10322,6 +10953,7 @@ export {
10322
10953
  LlmAgent as Agent,
10323
10954
  AgentBuilder,
10324
10955
  agents_exports as Agents,
10956
+ AiSdkLlm,
10325
10957
  AnthropicLlm,
10326
10958
  ApiKeyCredential,
10327
10959
  ApiKeyScheme,
@@ -10334,6 +10966,7 @@ export {
10334
10966
  AuthTool,
10335
10967
  AutoFlow,
10336
10968
  BaseAgent,
10969
+ BaseCodeExecutor,
10337
10970
  BaseLLMConnection,
10338
10971
  BaseLlm,
10339
10972
  BaseLlmFlow,
@@ -10344,8 +10977,11 @@ export {
10344
10977
  BaseTool,
10345
10978
  BasicAuthCredential,
10346
10979
  BearerTokenCredential,
10980
+ BuiltInCodeExecutor,
10347
10981
  BuiltInPlanner,
10348
10982
  CallbackContext,
10983
+ CodeExecutionUtils,
10984
+ CodeExecutorContext,
10349
10985
  DatabaseSessionService,
10350
10986
  EnhancedAuthConfig,
10351
10987
  Event,
@@ -10378,6 +11014,7 @@ export {
10378
11014
  McpAbi,
10379
11015
  McpAtp,
10380
11016
  McpBamm,
11017
+ McpCoinGecko,
10381
11018
  McpError,
10382
11019
  McpErrorType,
10383
11020
  McpFilesystem,
@@ -10417,11 +11054,11 @@ export {
10417
11054
  VertexAiSessionService,
10418
11055
  adkToMcpToolType,
10419
11056
  requestProcessor8 as agentTransferRequestProcessor,
10420
- requestProcessor as basicRequestProcessor,
11057
+ requestProcessor2 as basicRequestProcessor,
10421
11058
  buildFunctionDeclaration,
10422
- requestProcessor7 as codeExecutionRequestProcessor,
10423
- responseProcessor2 as codeExecutionResponseProcessor,
10424
- requestProcessor5 as contentRequestProcessor,
11059
+ requestProcessor3 as codeExecutionRequestProcessor,
11060
+ responseProcessor as codeExecutionResponseProcessor,
11061
+ requestProcessor4 as contentRequestProcessor,
10425
11062
  createAuthToolArguments,
10426
11063
  createDatabaseSessionService,
10427
11064
  createFunctionTool,
@@ -10435,22 +11072,22 @@ export {
10435
11072
  getMcpTools,
10436
11073
  handleFunctionCallsAsync,
10437
11074
  handleFunctionCallsLive,
10438
- requestProcessor3 as identityRequestProcessor,
11075
+ requestProcessor5 as identityRequestProcessor,
10439
11076
  initializeTelemetry,
10440
11077
  injectSessionState,
10441
- requestProcessor4 as instructionsRequestProcessor,
11078
+ requestProcessor6 as instructionsRequestProcessor,
10442
11079
  isEnhancedAuthConfig,
10443
11080
  jsonSchemaToDeclaration,
10444
11081
  mcpSchemaToParameters,
10445
11082
  mergeParallelFunctionResponseEvents,
10446
11083
  newInvocationContextId,
10447
- requestProcessor6 as nlPlanningRequestProcessor,
10448
- responseProcessor as nlPlanningResponseProcessor,
11084
+ requestProcessor7 as nlPlanningRequestProcessor,
11085
+ responseProcessor2 as nlPlanningResponseProcessor,
10449
11086
  normalizeJsonSchema,
10450
11087
  populateClientFunctionCallId,
10451
11088
  registerProviders,
10452
11089
  removeClientFunctionCallId,
10453
- requestProcessor2 as requestProcessor,
11090
+ requestProcessor,
10454
11091
  shutdownTelemetry,
10455
11092
  telemetryService,
10456
11093
  traceLlmCall,