@iqai/adk 0.1.3 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -26,8 +26,9 @@ var __copyProps = (to, from, except, desc) => {
26
26
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
27
27
 
28
28
  // src/helpers/logger.ts
29
+ import chalk from "chalk";
29
30
  function isDebugEnabled() {
30
- return process.env.NODE_ENV === "development" || process.env.DEBUG === "true";
31
+ return process.env.NODE_ENV === "development" || process.env.ADK_DEBUG === "true";
31
32
  }
32
33
  var Logger;
33
34
  var init_logger = __esm({
@@ -38,34 +39,99 @@ var init_logger = __esm({
38
39
  constructor({ name }) {
39
40
  this.name = name;
40
41
  }
42
+ colorize(message) {
43
+ return chalk.blue(message);
44
+ }
41
45
  debug(message, ...args) {
42
- const time = (/* @__PURE__ */ new Date()).toISOString();
43
46
  if (this.isDebugEnabled) {
44
- console.log(`[${time}] \u{1F41B} [DEBUG] \u2728 [${this.name}] ${message}`, ...args);
47
+ const time = (/* @__PURE__ */ new Date()).toLocaleTimeString();
48
+ console.log(
49
+ this.colorize(`[${time}] \u{1F41B} [${this.name}] ${message}`),
50
+ ...args
51
+ );
45
52
  }
46
53
  }
47
54
  info(message, ...args) {
48
- const time = (/* @__PURE__ */ new Date()).toISOString();
49
- console.info(`[${time}] \u2139\uFE0F [INFO] \u2728 [${this.name}] ${message}`, ...args);
55
+ const time = (/* @__PURE__ */ new Date()).toLocaleTimeString();
56
+ console.info(
57
+ this.colorize(`[${time}] \u2139\uFE0F [${this.name}] ${message}`),
58
+ ...args
59
+ );
50
60
  }
51
61
  warn(message, ...args) {
52
- const time = (/* @__PURE__ */ new Date()).toISOString();
53
- console.warn(`[${time}] \u{1F6A7} [WARN] \u2728 [${this.name}] ${message}`, ...args);
62
+ const time = (/* @__PURE__ */ new Date()).toLocaleTimeString();
63
+ console.warn(
64
+ this.colorize(`[${time}] \u{1F6A7} [${this.name}] ${message}`),
65
+ ...args
66
+ );
54
67
  }
55
68
  error(message, ...args) {
56
- const time = (/* @__PURE__ */ new Date()).toISOString();
57
- console.error(`[${time}] \u274C [ERROR] \u2728 [${this.name}] ${message}`, ...args);
69
+ const time = (/* @__PURE__ */ new Date()).toLocaleTimeString();
70
+ console.error(
71
+ this.colorize(`[${time}] \u274C [${this.name}] ${message}`),
72
+ ...args
73
+ );
74
+ }
75
+ /**
76
+ * Logs structured data in a visually appealing table format.
77
+ * Uses vertical layout for better readability and respects debug settings.
78
+ */
79
+ debugStructured(title, data) {
80
+ if (!this.isDebugEnabled) return;
81
+ const terminalWidth = process.stdout.columns || 60;
82
+ const width = Math.min(terminalWidth, 100);
83
+ const contentWidth = width - 4;
84
+ const topBorder = `\u250C${"\u2500".repeat(width - 2)}\u2510`;
85
+ const bottomBorder = `\u2514${"\u2500".repeat(width - 2)}\u2518`;
86
+ const middleBorder = `\u251C${"\u2500".repeat(width - 2)}\u2524`;
87
+ console.log(this.colorize(topBorder));
88
+ console.log(this.colorize(`\u2502 ${title.padEnd(contentWidth)} \u2502`));
89
+ console.log(this.colorize(middleBorder));
90
+ Object.entries(data).forEach(([key, value]) => {
91
+ const formattedKey = key.padEnd(20);
92
+ const formattedValue = String(value);
93
+ const availableValueSpace = contentWidth - 20 - 2;
94
+ const truncatedValue = formattedValue.length > availableValueSpace ? `${formattedValue.substring(0, availableValueSpace - 3)}...` : formattedValue;
95
+ const content = `${formattedKey}: ${truncatedValue}`;
96
+ const paddedContent = content.padEnd(contentWidth);
97
+ console.log(this.colorize(`\u2502 ${paddedContent} \u2502`));
98
+ });
99
+ console.log(this.colorize(bottomBorder));
100
+ }
101
+ /**
102
+ * Logs array data in a compact, readable format.
103
+ */
104
+ debugArray(title, items) {
105
+ if (!this.isDebugEnabled) return;
106
+ const terminalWidth = process.stdout.columns || 78;
107
+ const width = Math.min(terminalWidth, 120);
108
+ const contentWidth = width - 4;
109
+ const topBorder = `\u250C${"\u2500".repeat(width - 2)}\u2510`;
110
+ const bottomBorder = `\u2514${"\u2500".repeat(width - 2)}\u2518`;
111
+ const middleBorder = `\u251C${"\u2500".repeat(width - 2)}\u2524`;
112
+ console.log(this.colorize(topBorder));
113
+ console.log(this.colorize(`\u2502 ${title.padEnd(contentWidth)} \u2502`));
114
+ console.log(this.colorize(middleBorder));
115
+ items.forEach((item, index) => {
116
+ const itemStr = Object.entries(item).map(([k, v]) => `${k}: ${v}`).join(" \u2022 ");
117
+ const indexPart = `[${index + 1}] `;
118
+ const availableSpace = contentWidth - indexPart.length;
119
+ const truncatedItem = itemStr.length > availableSpace ? `${itemStr.substring(0, availableSpace - 3)}...` : itemStr;
120
+ const content = `${indexPart}${truncatedItem}`;
121
+ const paddedContent = content.padEnd(contentWidth);
122
+ console.log(this.colorize(`\u2502 ${paddedContent} \u2502`));
123
+ });
124
+ console.log(this.colorize(bottomBorder));
58
125
  }
59
126
  };
60
127
  }
61
128
  });
62
129
 
63
130
  // src/tools/base/base-tool.ts
64
- var logger6, BaseTool;
131
+ var BaseTool;
65
132
  var init_base_tool = __esm({
66
133
  "src/tools/base/base-tool.ts"() {
67
134
  init_logger();
68
- logger6 = new Logger({ name: "BaseTool" });
69
135
  BaseTool = class {
70
136
  /**
71
137
  * Name of the tool
@@ -96,6 +162,7 @@ var init_base_tool = __esm({
96
162
  * Maximum delay for retry in ms
97
163
  */
98
164
  maxRetryDelay = 1e4;
165
+ logger = new Logger({ name: "BaseTool" });
99
166
  /**
100
167
  * Constructor for BaseTool
101
168
  */
@@ -226,7 +293,7 @@ var init_base_tool = __esm({
226
293
  while (attempts <= (this.shouldRetryOnFailure ? this.maxRetryAttempts : 0)) {
227
294
  try {
228
295
  if (attempts > 0) {
229
- logger6.debug(
296
+ this.logger.debug(
230
297
  `Retrying tool ${this.name} (attempt ${attempts} of ${this.maxRetryAttempts})...`
231
298
  );
232
299
  const delay = Math.min(
@@ -613,6 +680,7 @@ __export(agents_exports, {
613
680
  // src/models/index.ts
614
681
  var models_exports = {};
615
682
  __export(models_exports, {
683
+ AiSdkLlm: () => AiSdkLlm,
616
684
  AnthropicLlm: () => AnthropicLlm,
617
685
  ApiKeyCredential: () => ApiKeyCredential,
618
686
  ApiKeyScheme: () => ApiKeyScheme,
@@ -640,8 +708,6 @@ __export(models_exports, {
640
708
  });
641
709
 
642
710
  // src/models/llm-request.ts
643
- init_logger();
644
- var logger = new Logger({ name: "LlmRequest" });
645
711
  var LlmRequest = class {
646
712
  /**
647
713
  * The model name.
@@ -805,6 +871,10 @@ var LlmResponse = class _LlmResponse {
805
871
  * Reason why the model finished generating.
806
872
  */
807
873
  finishReason;
874
+ /**
875
+ * Error object if the response is an error.
876
+ */
877
+ error;
808
878
  /**
809
879
  * Creates a new LlmResponse.
810
880
  */
@@ -848,6 +918,29 @@ var LlmResponse = class _LlmResponse {
848
918
  usageMetadata
849
919
  });
850
920
  }
921
+ /**
922
+ * Creates an LlmResponse from an error.
923
+ *
924
+ * @param error The error object or message.
925
+ * @param options Additional options for the error response.
926
+ * @param options.errorCode A specific error code for the response.
927
+ * @param options.model The model that was being used when the error occurred.
928
+ * @returns The LlmResponse.
929
+ */
930
+ static fromError(error, options = {}) {
931
+ const errorMessage = error instanceof Error ? error.message : String(error);
932
+ const errorCode = options.errorCode || "UNKNOWN_ERROR";
933
+ return new _LlmResponse({
934
+ errorCode,
935
+ errorMessage: `LLM call failed for model ${options.model || "unknown"}: ${errorMessage}`,
936
+ content: {
937
+ role: "model",
938
+ parts: [{ text: `Error: ${errorMessage}` }]
939
+ },
940
+ finishReason: "STOP",
941
+ error: error instanceof Error ? error : new Error(errorMessage)
942
+ });
943
+ }
851
944
  };
852
945
 
853
946
  // src/models/base-llm.ts
@@ -1127,12 +1220,12 @@ var traceLlmCall = (invocationContext, eventId, llmRequest, llmResponse) => tele
1127
1220
  );
1128
1221
 
1129
1222
  // src/models/base-llm.ts
1130
- var logger2 = new Logger({ name: "BaseLlm" });
1131
1223
  var BaseLlm = class {
1132
1224
  /**
1133
1225
  * The name of the LLM, e.g. gemini-1.5-flash or gemini-1.5-flash-001.
1134
1226
  */
1135
1227
  model;
1228
+ logger = new Logger({ name: "BaseLlm" });
1136
1229
  /**
1137
1230
  * Constructor for BaseLlm
1138
1231
  */
@@ -1183,12 +1276,6 @@ var BaseLlm = class {
1183
1276
  }),
1184
1277
  "adk.streaming": stream || false
1185
1278
  });
1186
- logger2.debug("ADK LLM Request:", {
1187
- model: this.model,
1188
- contentCount: llmRequest.contents?.length || 0,
1189
- streaming: stream || false,
1190
- config: llmRequest.config
1191
- });
1192
1279
  let responseCount = 0;
1193
1280
  let totalTokens = 0;
1194
1281
  for await (const response of this.generateContentAsyncImpl(
@@ -1196,14 +1283,6 @@ var BaseLlm = class {
1196
1283
  stream
1197
1284
  )) {
1198
1285
  responseCount++;
1199
- logger2.debug(`ADK LLM Response ${responseCount}:`, {
1200
- model: this.model,
1201
- parts: response.parts?.map((part) => ({
1202
- text: typeof part.text === "string" ? part.text.substring(0, 200) + (part.text.length > 200 ? "..." : "") : "[non_text_content]"
1203
- })),
1204
- finishReason: response.finish_reason,
1205
- usage: response.usage
1206
- });
1207
1286
  if (response.usage) {
1208
1287
  totalTokens += response.usage.total_tokens || 0;
1209
1288
  span.setAttributes({
@@ -1224,7 +1303,7 @@ var BaseLlm = class {
1224
1303
  } catch (error) {
1225
1304
  span.recordException(error);
1226
1305
  span.setStatus({ code: 2, message: error.message });
1227
- console.error("\u274C ADK LLM Error:", {
1306
+ this.logger.error("\u274C ADK LLM Error:", {
1228
1307
  model: this.model,
1229
1308
  error: error.message
1230
1309
  });
@@ -1280,13 +1359,10 @@ var BaseLLMConnection = class {
1280
1359
  };
1281
1360
 
1282
1361
  // src/models/google-llm.ts
1283
- init_logger();
1284
1362
  import {
1285
1363
  FinishReason,
1286
1364
  GoogleGenAI
1287
1365
  } from "@google/genai";
1288
- import dedent from "dedent";
1289
- var NEW_LINE = "\n";
1290
1366
  var AGENT_ENGINE_TELEMETRY_TAG = "remote_reasoning_engine";
1291
1367
  var AGENT_ENGINE_TELEMETRY_ENV_VARIABLE_NAME = "GOOGLE_CLOUD_AGENT_ENGINE_ID";
1292
1368
  var GoogleLlm = class extends BaseLlm {
@@ -1294,7 +1370,6 @@ var GoogleLlm = class extends BaseLlm {
1294
1370
  _liveApiClient;
1295
1371
  _apiBackend;
1296
1372
  _trackingHeaders;
1297
- logger = new Logger({ name: "GoogleLlm" });
1298
1373
  /**
1299
1374
  * Constructor for Gemini
1300
1375
  */
@@ -1318,10 +1393,6 @@ var GoogleLlm = class extends BaseLlm {
1318
1393
  */
1319
1394
  async *generateContentAsyncImpl(llmRequest, stream = false) {
1320
1395
  this.preprocessRequest(llmRequest);
1321
- this.logger.debug(
1322
- `Sending out request, model: ${llmRequest.model || this.model}, backend: ${this.apiBackend}, stream: ${stream}`
1323
- );
1324
- this.logger.debug(this.buildRequestLog(llmRequest));
1325
1396
  const model = llmRequest.model || this.model;
1326
1397
  const contents = this.convertContents(llmRequest.contents || []);
1327
1398
  const config = this.convertConfig(llmRequest.config);
@@ -1337,7 +1408,6 @@ var GoogleLlm = class extends BaseLlm {
1337
1408
  let usageMetadata = null;
1338
1409
  for await (const resp of responses) {
1339
1410
  response = resp;
1340
- this.logger.debug(this.buildResponseLog(resp));
1341
1411
  const llmResponse = LlmResponse.create(resp);
1342
1412
  usageMetadata = llmResponse.usageMetadata;
1343
1413
  if (llmResponse.content?.parts?.[0]?.text) {
@@ -1390,8 +1460,11 @@ var GoogleLlm = class extends BaseLlm {
1390
1460
  contents,
1391
1461
  config
1392
1462
  });
1393
- this.logger.debug(this.buildResponseLog(response));
1394
- yield LlmResponse.create(response);
1463
+ const llmResponse = LlmResponse.create(response);
1464
+ this.logger.debug(
1465
+ `Google response: ${llmResponse.usageMetadata?.candidatesTokenCount || 0} tokens`
1466
+ );
1467
+ yield llmResponse;
1395
1468
  }
1396
1469
  }
1397
1470
  /**
@@ -1466,60 +1539,6 @@ var GoogleLlm = class extends BaseLlm {
1466
1539
  }
1467
1540
  return `${funcDecl.name}: ${paramStr}`;
1468
1541
  }
1469
- /**
1470
- * Builds request log string.
1471
- */
1472
- buildRequestLog(req) {
1473
- const functionDecls = req.config?.tools?.[0]?.functionDeclarations || [];
1474
- const functionLogs = functionDecls.length > 0 ? functionDecls.map(
1475
- (funcDecl) => this.buildFunctionDeclarationLog(funcDecl)
1476
- ) : [];
1477
- const contentsLogs = req.contents?.map(
1478
- (content) => JSON.stringify(content, (key, value) => {
1479
- if (key === "data" && typeof value === "string" && value.length > 100) {
1480
- return "[EXCLUDED]";
1481
- }
1482
- return value;
1483
- })
1484
- ) || [];
1485
- return dedent`
1486
- LLM Request:
1487
- -----------------------------------------------------------
1488
- System Instruction:
1489
- ${req.config?.systemInstruction || ""}
1490
- -----------------------------------------------------------
1491
- Contents:
1492
- ${contentsLogs.join(NEW_LINE)}
1493
- -----------------------------------------------------------
1494
- Functions:
1495
- ${functionLogs.join(NEW_LINE)}
1496
- -----------------------------------------------------------`;
1497
- }
1498
- /**
1499
- * Builds response log string.
1500
- */
1501
- buildResponseLog(resp) {
1502
- const functionCallsText = [];
1503
- if (resp.functionCalls) {
1504
- for (const funcCall of resp.functionCalls) {
1505
- functionCallsText.push(
1506
- `name: ${funcCall.name}, args: ${JSON.stringify(funcCall.args)}`
1507
- );
1508
- }
1509
- }
1510
- return dedent`
1511
- LLM Response:
1512
- -----------------------------------------------------------
1513
- Text:
1514
- ${resp.text || ""}
1515
- -----------------------------------------------------------
1516
- Function calls:
1517
- ${functionCallsText.join(NEW_LINE)}
1518
- -----------------------------------------------------------
1519
- Raw response:
1520
- ${JSON.stringify(resp, null, 2)}
1521
- -----------------------------------------------------------`;
1522
- }
1523
1542
  /**
1524
1543
  * Provides the api client.
1525
1544
  */
@@ -1613,10 +1632,10 @@ var GoogleLlm = class extends BaseLlm {
1613
1632
  // src/models/anthropic-llm.ts
1614
1633
  init_logger();
1615
1634
  import Anthropic from "@anthropic-ai/sdk";
1616
- var logger3 = new Logger({ name: "AnthropicLlm" });
1617
1635
  var MAX_TOKENS = 1024;
1618
1636
  var AnthropicLlm = class extends BaseLlm {
1619
1637
  _client;
1638
+ logger = new Logger({ name: "AnthropicLlm" });
1620
1639
  /**
1621
1640
  * Constructor for Anthropic LLM
1622
1641
  */
@@ -1633,9 +1652,6 @@ var AnthropicLlm = class extends BaseLlm {
1633
1652
  * Main content generation method - handles both streaming and non-streaming
1634
1653
  */
1635
1654
  async *generateContentAsyncImpl(llmRequest, stream = false) {
1636
- logger3.debug(
1637
- `Sending Anthropic request, model: ${llmRequest.model || this.model}, stream: ${stream}`
1638
- );
1639
1655
  const model = llmRequest.model || this.model;
1640
1656
  const messages = (llmRequest.contents || []).map(
1641
1657
  (content) => this.contentToAnthropicMessage(content)
@@ -1679,7 +1695,9 @@ var AnthropicLlm = class extends BaseLlm {
1679
1695
  * Convert Anthropic Message to ADK LlmResponse
1680
1696
  */
1681
1697
  anthropicMessageToLlmResponse(message) {
1682
- logger3.debug("Anthropic response:", JSON.stringify(message, null, 2));
1698
+ this.logger.debug(
1699
+ `Anthropic response: ${message.usage.output_tokens} tokens, ${message.stop_reason}`
1700
+ );
1683
1701
  return new LlmResponse({
1684
1702
  content: {
1685
1703
  role: "model",
@@ -1836,11 +1854,7 @@ var AnthropicLlm = class extends BaseLlm {
1836
1854
  };
1837
1855
 
1838
1856
  // src/models/openai-llm.ts
1839
- init_logger();
1840
- import dedent2 from "dedent";
1841
1857
  import OpenAI from "openai";
1842
- var logger4 = new Logger({ name: "OpenAiLlm" });
1843
- var NEW_LINE2 = "\n";
1844
1858
  var OpenAiLlm = class extends BaseLlm {
1845
1859
  _client;
1846
1860
  /**
@@ -1860,10 +1874,6 @@ var OpenAiLlm = class extends BaseLlm {
1860
1874
  */
1861
1875
  async *generateContentAsyncImpl(llmRequest, stream = false) {
1862
1876
  this.preprocessRequest(llmRequest);
1863
- logger4.debug(
1864
- `Sending OpenAI request, model: ${llmRequest.model || this.model}, stream: ${stream}`
1865
- );
1866
- logger4.debug(this.buildRequestLog(llmRequest));
1867
1877
  const model = llmRequest.model || this.model;
1868
1878
  const messages = (llmRequest.contents || []).map(
1869
1879
  (content) => this.contentToOpenAiMessage(content)
@@ -1905,12 +1915,10 @@ var OpenAiLlm = class extends BaseLlm {
1905
1915
  const choice = chunk.choices[0];
1906
1916
  if (!choice) continue;
1907
1917
  const delta = choice.delta;
1908
- logger4.debug("Delta content:", delta.content);
1909
1918
  const llmResponse = this.createChunkResponse(delta, chunk.usage);
1910
1919
  if (chunk.usage) {
1911
1920
  usageMetadata = chunk.usage;
1912
1921
  }
1913
- logger4.debug(this.buildResponseLog(llmResponse));
1914
1922
  if (llmResponse.content?.parts?.[0]?.text) {
1915
1923
  const part0 = llmResponse.content.parts[0];
1916
1924
  if (part0.thought) {
@@ -1993,7 +2001,6 @@ var OpenAiLlm = class extends BaseLlm {
1993
2001
  } : void 0,
1994
2002
  finishReason: this.toAdkFinishReason(choice.finish_reason)
1995
2003
  });
1996
- logger4.debug(this.buildResponseLog(finalResponse));
1997
2004
  yield finalResponse;
1998
2005
  } else {
1999
2006
  yield llmResponse;
@@ -2030,7 +2037,9 @@ var OpenAiLlm = class extends BaseLlm {
2030
2037
  choice,
2031
2038
  response.usage
2032
2039
  );
2033
- logger4.debug(this.buildResponseLog(llmResponse));
2040
+ this.logger.debug(
2041
+ `OpenAI response: ${response.usage?.completion_tokens || 0} tokens`
2042
+ );
2034
2043
  yield llmResponse;
2035
2044
  }
2036
2045
  }
@@ -2084,10 +2093,6 @@ var OpenAiLlm = class extends BaseLlm {
2084
2093
  */
2085
2094
  openAiMessageToLlmResponse(choice, usage) {
2086
2095
  const message = choice.message;
2087
- logger4.debug(
2088
- "OpenAI response:",
2089
- JSON.stringify({ message, usage }, null, 2)
2090
- );
2091
2096
  const parts = [];
2092
2097
  if (message.content) {
2093
2098
  parts.push({ text: message.content });
@@ -2276,67 +2281,6 @@ var OpenAiLlm = class extends BaseLlm {
2276
2281
  const parts = response.content?.parts;
2277
2282
  return parts?.some((part) => part.inlineData) || false;
2278
2283
  }
2279
- /**
2280
- * Build request log string for debugging (similar to Google LLM)
2281
- */
2282
- buildRequestLog(req) {
2283
- const functionDecls = req.config?.tools?.[0]?.functionDeclarations || [];
2284
- const functionLogs = functionDecls.length > 0 ? functionDecls.map(
2285
- (funcDecl) => `${funcDecl.name}: ${JSON.stringify(funcDecl.parameters?.properties || {})}`
2286
- ) : [];
2287
- const contentsLogs = req.contents?.map(
2288
- (content) => JSON.stringify(content, (key, value) => {
2289
- if (key === "data" && typeof value === "string" && value.length > 100) {
2290
- return "[EXCLUDED]";
2291
- }
2292
- return value;
2293
- })
2294
- ) || [];
2295
- return dedent2`
2296
- LLM Request:
2297
- -----------------------------------------------------------
2298
- System Instruction:
2299
- ${req.getSystemInstructionText() || ""}
2300
- -----------------------------------------------------------
2301
- Contents:
2302
- ${contentsLogs.join(NEW_LINE2)}
2303
- -----------------------------------------------------------
2304
- Functions:
2305
- ${functionLogs.join(NEW_LINE2)}
2306
- -----------------------------------------------------------`;
2307
- }
2308
- /**
2309
- * Build response log string for debugging (similar to Google LLM)
2310
- */
2311
- buildResponseLog(response) {
2312
- const functionCallsText = [];
2313
- if (response.content?.parts) {
2314
- for (const part of response.content.parts) {
2315
- if (part.functionCall) {
2316
- const funcCall = part.functionCall;
2317
- functionCallsText.push(
2318
- `name: ${funcCall.name}, args: ${JSON.stringify(funcCall.args)}`
2319
- );
2320
- }
2321
- }
2322
- }
2323
- const text = response.content?.parts?.filter((part) => part.text)?.map((part) => part.text)?.join("") || "";
2324
- return dedent2`
2325
- LLM Response:
2326
- -----------------------------------------------------------
2327
- Text:
2328
- ${text}
2329
- -----------------------------------------------------------
2330
- Function calls:
2331
- ${functionCallsText.join(NEW_LINE2)}
2332
- -----------------------------------------------------------
2333
- Usage:
2334
- ${JSON.stringify(response.usageMetadata, null, 2)}
2335
- -----------------------------------------------------------
2336
- Finish Reason:
2337
- ${response.finishReason}
2338
- -----------------------------------------------------------`;
2339
- }
2340
2284
  /**
2341
2285
  * Gets the OpenAI client
2342
2286
  */
@@ -2356,14 +2300,289 @@ var OpenAiLlm = class extends BaseLlm {
2356
2300
  }
2357
2301
  };
2358
2302
 
2303
+ // src/models/ai-sdk.ts
2304
+ init_logger();
2305
+ import {
2306
+ generateText,
2307
+ jsonSchema,
2308
+ streamText
2309
+ } from "ai";
2310
+ var AiSdkLlm = class extends BaseLlm {
2311
+ modelInstance;
2312
+ logger = new Logger({ name: "AiSdkLlm" });
2313
+ /**
2314
+ * Constructor accepts a pre-configured LanguageModel instance
2315
+ * @param model - Pre-configured LanguageModel from provider(modelName)
2316
+ */
2317
+ constructor(modelInstance) {
2318
+ super(modelInstance.modelId || "ai-sdk-model");
2319
+ this.modelInstance = modelInstance;
2320
+ }
2321
+ /**
2322
+ * Returns empty array - following Python ADK pattern
2323
+ */
2324
+ static supportedModels() {
2325
+ return [];
2326
+ }
2327
+ async *generateContentAsyncImpl(request, stream = false) {
2328
+ try {
2329
+ const messages = this.convertToAiSdkMessages(request);
2330
+ const systemMessage = request.getSystemInstructionText();
2331
+ const tools = this.convertToAiSdkTools(request);
2332
+ const requestParams = {
2333
+ model: this.modelInstance,
2334
+ messages,
2335
+ system: systemMessage,
2336
+ tools: Object.keys(tools).length > 0 ? tools : void 0,
2337
+ maxTokens: request.config?.maxOutputTokens,
2338
+ temperature: request.config?.temperature,
2339
+ topP: request.config?.topP
2340
+ };
2341
+ if (stream) {
2342
+ const result = streamText(requestParams);
2343
+ let accumulatedText = "";
2344
+ for await (const delta of result.textStream) {
2345
+ accumulatedText += delta;
2346
+ yield new LlmResponse({
2347
+ content: {
2348
+ role: "model",
2349
+ parts: [{ text: accumulatedText }]
2350
+ },
2351
+ partial: true
2352
+ });
2353
+ }
2354
+ const toolCalls = await result.toolCalls;
2355
+ const parts = [];
2356
+ if (accumulatedText) {
2357
+ parts.push({ text: accumulatedText });
2358
+ }
2359
+ if (toolCalls && toolCalls.length > 0) {
2360
+ for (const toolCall of toolCalls) {
2361
+ parts.push({
2362
+ functionCall: {
2363
+ id: toolCall.toolCallId,
2364
+ name: toolCall.toolName,
2365
+ args: toolCall.args
2366
+ }
2367
+ });
2368
+ }
2369
+ }
2370
+ const finalUsage = await result.usage;
2371
+ const finishReason = await result.finishReason;
2372
+ yield new LlmResponse({
2373
+ content: {
2374
+ role: "model",
2375
+ parts: parts.length > 0 ? parts : [{ text: "" }]
2376
+ },
2377
+ usageMetadata: finalUsage ? {
2378
+ promptTokenCount: finalUsage.promptTokens,
2379
+ candidatesTokenCount: finalUsage.completionTokens,
2380
+ totalTokenCount: finalUsage.totalTokens
2381
+ } : void 0,
2382
+ finishReason: this.mapFinishReason(finishReason),
2383
+ turnComplete: true
2384
+ });
2385
+ } else {
2386
+ const result = await generateText(requestParams);
2387
+ const parts = [];
2388
+ if (result.text) {
2389
+ parts.push({ text: result.text });
2390
+ }
2391
+ if (result.toolCalls && result.toolCalls.length > 0) {
2392
+ for (const toolCall of result.toolCalls) {
2393
+ parts.push({
2394
+ functionCall: {
2395
+ id: toolCall.toolCallId,
2396
+ name: toolCall.toolName,
2397
+ args: toolCall.args
2398
+ }
2399
+ });
2400
+ }
2401
+ }
2402
+ yield new LlmResponse({
2403
+ content: {
2404
+ role: "model",
2405
+ parts: parts.length > 0 ? parts : [{ text: "" }]
2406
+ },
2407
+ usageMetadata: result.usage ? {
2408
+ promptTokenCount: result.usage.promptTokens,
2409
+ candidatesTokenCount: result.usage.completionTokens,
2410
+ totalTokenCount: result.usage.totalTokens
2411
+ } : void 0,
2412
+ finishReason: this.mapFinishReason(result.finishReason),
2413
+ turnComplete: true
2414
+ });
2415
+ }
2416
+ } catch (error) {
2417
+ this.logger.error(`AI SDK Error: ${String(error)}`, { error, request });
2418
+ yield LlmResponse.fromError(error, {
2419
+ errorCode: "AI_SDK_ERROR",
2420
+ model: this.model
2421
+ });
2422
+ }
2423
+ }
2424
+ /**
2425
+ * Convert ADK LlmRequest to AI SDK CoreMessage format
2426
+ */
2427
+ convertToAiSdkMessages(llmRequest) {
2428
+ const messages = [];
2429
+ for (const content of llmRequest.contents || []) {
2430
+ const message = this.contentToAiSdkMessage(content);
2431
+ if (message) {
2432
+ messages.push(message);
2433
+ }
2434
+ }
2435
+ return messages;
2436
+ }
2437
+ /**
2438
+ * Convert ADK tools to AI SDK tools format
2439
+ */
2440
+ convertToAiSdkTools(llmRequest) {
2441
+ const tools = {};
2442
+ if (llmRequest.config?.tools) {
2443
+ for (const toolConfig of llmRequest.config.tools) {
2444
+ if ("functionDeclarations" in toolConfig) {
2445
+ for (const funcDecl of toolConfig.functionDeclarations) {
2446
+ tools[funcDecl.name] = {
2447
+ description: funcDecl.description,
2448
+ parameters: jsonSchema(funcDecl.parameters || {})
2449
+ };
2450
+ }
2451
+ }
2452
+ }
2453
+ }
2454
+ return tools;
2455
+ }
2456
+ /**
2457
+ * Convert ADK Content to AI SDK CoreMessage
2458
+ */
2459
+ contentToAiSdkMessage(content) {
2460
+ const role = this.mapRole(content.role);
2461
+ if (!content.parts || content.parts.length === 0) {
2462
+ return null;
2463
+ }
2464
+ if (content.parts.length === 1 && content.parts[0].text) {
2465
+ const textContent = content.parts[0].text;
2466
+ if (role === "system") {
2467
+ return { role: "system", content: textContent };
2468
+ }
2469
+ if (role === "assistant") {
2470
+ return { role: "assistant", content: textContent };
2471
+ }
2472
+ return { role: "user", content: textContent };
2473
+ }
2474
+ if (content.parts?.some((part) => part.functionCall)) {
2475
+ const textParts = content.parts.filter((part) => part.text);
2476
+ const functionCalls = content.parts.filter((part) => part.functionCall);
2477
+ const contentParts2 = [];
2478
+ for (const textPart of textParts) {
2479
+ if (textPart.text) {
2480
+ contentParts2.push({
2481
+ type: "text",
2482
+ text: textPart.text
2483
+ });
2484
+ }
2485
+ }
2486
+ for (const funcPart of functionCalls) {
2487
+ if (funcPart.functionCall) {
2488
+ contentParts2.push({
2489
+ type: "tool-call",
2490
+ toolCallId: funcPart.functionCall.id,
2491
+ toolName: funcPart.functionCall.name,
2492
+ args: funcPart.functionCall.args
2493
+ });
2494
+ }
2495
+ }
2496
+ return {
2497
+ role: "assistant",
2498
+ content: contentParts2
2499
+ };
2500
+ }
2501
+ if (content.parts?.some((part) => part.functionResponse)) {
2502
+ const functionResponses = content.parts.filter(
2503
+ (part) => part.functionResponse
2504
+ );
2505
+ const contentParts2 = functionResponses.map((part) => ({
2506
+ type: "tool-result",
2507
+ toolCallId: part.functionResponse.id,
2508
+ toolName: part.functionResponse.name || "unknown",
2509
+ result: part.functionResponse.response
2510
+ }));
2511
+ return {
2512
+ role: "tool",
2513
+ content: contentParts2
2514
+ };
2515
+ }
2516
+ const contentParts = [];
2517
+ for (const part of content.parts) {
2518
+ if (part.text) {
2519
+ contentParts.push({
2520
+ type: "text",
2521
+ text: part.text
2522
+ });
2523
+ }
2524
+ }
2525
+ if (contentParts.length === 0) {
2526
+ return null;
2527
+ }
2528
+ if (contentParts.length === 1) {
2529
+ const textContent = contentParts[0].text;
2530
+ if (role === "system") {
2531
+ return { role: "system", content: textContent };
2532
+ }
2533
+ if (role === "assistant") {
2534
+ return { role: "assistant", content: textContent };
2535
+ }
2536
+ return { role: "user", content: textContent };
2537
+ }
2538
+ if (role === "system") {
2539
+ const combinedText = contentParts.map((p) => p.text).join("");
2540
+ return { role: "system", content: combinedText };
2541
+ }
2542
+ if (role === "assistant") {
2543
+ return { role: "assistant", content: contentParts };
2544
+ }
2545
+ return { role: "user", content: contentParts };
2546
+ }
2547
+ /**
2548
+ * Map ADK role to AI SDK role
2549
+ */
2550
+ mapRole(role) {
2551
+ switch (role) {
2552
+ case "model":
2553
+ case "assistant":
2554
+ return "assistant";
2555
+ case "system":
2556
+ return "system";
2557
+ default:
2558
+ return "user";
2559
+ }
2560
+ }
2561
+ /**
2562
+ * Map AI SDK finish reason to ADK finish reason
2563
+ */
2564
+ mapFinishReason(finishReason) {
2565
+ switch (finishReason) {
2566
+ case "stop":
2567
+ case "end_of_message":
2568
+ return "STOP";
2569
+ case "length":
2570
+ case "max_tokens":
2571
+ return "MAX_TOKENS";
2572
+ default:
2573
+ return "FINISH_REASON_UNSPECIFIED";
2574
+ }
2575
+ }
2576
+ };
2577
+
2359
2578
  // src/models/llm-registry.ts
2360
2579
  init_logger();
2361
- var logger5 = new Logger({ name: "LLMRegistry" });
2362
2580
  var LLMRegistry = class _LLMRegistry {
2363
2581
  /**
2364
2582
  * Map of model name regex to LLM class
2365
2583
  */
2366
2584
  static llmRegistry = /* @__PURE__ */ new Map();
2585
+ static logger = new Logger({ name: "LLMRegistry" });
2367
2586
  /**
2368
2587
  * Creates a new LLM instance
2369
2588
  *
@@ -2415,7 +2634,7 @@ var LLMRegistry = class _LLMRegistry {
2415
2634
  * Logs all registered models for debugging
2416
2635
  */
2417
2636
  static logRegisteredModels() {
2418
- logger5.debug(
2637
+ _LLMRegistry.logger.debug(
2419
2638
  "Registered LLM models:",
2420
2639
  [..._LLMRegistry.llmRegistry.entries()].map(([regex]) => regex.toString())
2421
2640
  );
@@ -4665,15 +4884,13 @@ var McpClientService = class {
4665
4884
  await connectPromise;
4666
4885
  }
4667
4886
  await this.setupSamplingHandler(client);
4668
- if (this.config.debug) {
4669
- console.log("\u2705 MCP client connected successfully");
4670
- }
4887
+ this.logger.debug("\u2705 MCP client connected successfully");
4671
4888
  this.client = client;
4672
4889
  return client;
4673
4890
  } catch (error) {
4674
4891
  await this.cleanupResources();
4675
4892
  if (!(error instanceof McpError)) {
4676
- console.error("Failed to initialize MCP client:", error);
4893
+ this.logger.error("Failed to initialize MCP client:", error);
4677
4894
  throw new McpError(
4678
4895
  `Failed to initialize MCP client: ${error instanceof Error ? error.message : String(error)}`,
4679
4896
  "connection_error" /* CONNECTION_ERROR */,
@@ -4689,12 +4906,10 @@ var McpClientService = class {
4689
4906
  async createTransport() {
4690
4907
  try {
4691
4908
  if (this.config.transport.mode === "sse") {
4692
- if (this.config.debug) {
4693
- console.log(
4694
- "\u{1F680} Initializing MCP client in SSE mode",
4695
- this.config.transport.serverUrl
4696
- );
4697
- }
4909
+ this.logger.debug(
4910
+ "\u{1F680} Initializing MCP client in SSE mode",
4911
+ this.config.transport.serverUrl
4912
+ );
4698
4913
  const headers = {
4699
4914
  ...this.config.transport.headers || {},
4700
4915
  ...this.config.headers || {}
@@ -4709,12 +4924,10 @@ var McpClientService = class {
4709
4924
  }
4710
4925
  );
4711
4926
  }
4712
- if (this.config.debug) {
4713
- console.log(
4714
- "\u{1F680} Initializing MCP client in STDIO mode",
4715
- this.config.transport.command
4716
- );
4717
- }
4927
+ this.logger.debug(
4928
+ "\u{1F680} Initializing MCP client in STDIO mode",
4929
+ this.config.transport.command
4930
+ );
4718
4931
  return new StdioClientTransport({
4719
4932
  command: this.config.transport.command,
4720
4933
  args: this.config.transport.args,
@@ -4733,9 +4946,7 @@ var McpClientService = class {
4733
4946
  * Used by the retry mechanism.
4734
4947
  */
4735
4948
  async reinitialize() {
4736
- if (this.config.debug) {
4737
- console.log("\u{1F504} Reinitializing MCP client after closed connection");
4738
- }
4949
+ this.logger.debug("\u{1F504} Reinitializing MCP client after closed connection");
4739
4950
  await this.cleanupResources();
4740
4951
  this.client = null;
4741
4952
  this.transport = null;
@@ -4759,11 +4970,9 @@ var McpClientService = class {
4759
4970
  if (this.transport && typeof this.transport.close === "function") {
4760
4971
  await this.transport.close();
4761
4972
  }
4762
- if (this.config.debug) {
4763
- console.log("\u{1F9F9} Cleaned up MCP client resources");
4764
- }
4973
+ this.logger.debug("\u{1F9F9} Cleaned up MCP client resources");
4765
4974
  } catch (error) {
4766
- console.error("Error cleaning up MCP resources:", error);
4975
+ this.logger.error("Error cleaning up MCP resources:", error);
4767
4976
  } finally {
4768
4977
  this.client = null;
4769
4978
  this.transport = null;
@@ -4805,9 +5014,7 @@ var McpClientService = class {
4805
5014
  * Similar to Python's close() method.
4806
5015
  */
4807
5016
  async close() {
4808
- if (this.config.debug) {
4809
- console.log("\u{1F51A} Closing MCP client service");
4810
- }
5017
+ this.logger.debug("\u{1F51A} Closing MCP client service");
4811
5018
  await this.cleanupResources();
4812
5019
  }
4813
5020
  /**
@@ -4818,11 +5025,9 @@ var McpClientService = class {
4818
5025
  }
4819
5026
  async setupSamplingHandler(client) {
4820
5027
  if (!this.mcpSamplingHandler) {
4821
- if (this.config.debug) {
4822
- console.log(
4823
- "\u26A0\uFE0F No sampling handler provided - sampling requests will be rejected"
4824
- );
4825
- }
5028
+ this.logger.debug(
5029
+ "\u26A0\uFE0F No sampling handler provided - sampling requests will be rejected"
5030
+ );
4826
5031
  return;
4827
5032
  }
4828
5033
  try {
@@ -4832,12 +5037,10 @@ var McpClientService = class {
4832
5037
  try {
4833
5038
  this.logger.debug("Received sampling request:", request);
4834
5039
  const response = await this.mcpSamplingHandler.handleSamplingRequest(request);
4835
- if (this.config.debug) {
4836
- console.log("\u2705 Sampling request completed successfully");
4837
- }
5040
+ this.logger.debug("\u2705 Sampling request completed successfully");
4838
5041
  return response;
4839
5042
  } catch (error) {
4840
- console.error("\u274C Error handling sampling request:", error);
5043
+ this.logger.error("\u274C Error handling sampling request:", error);
4841
5044
  if (error instanceof McpError) {
4842
5045
  throw error;
4843
5046
  }
@@ -4849,16 +5052,12 @@ var McpClientService = class {
4849
5052
  }
4850
5053
  }
4851
5054
  );
4852
- if (this.config.debug) {
4853
- console.log("\u{1F3AF} Sampling handler registered successfully");
4854
- }
5055
+ this.logger.debug("\u{1F3AF} Sampling handler registered successfully");
4855
5056
  } catch (error) {
4856
- console.error("Failed to setup sampling handler:", error);
4857
- if (this.config.debug) {
4858
- console.log(
4859
- "\u26A0\uFE0F Sampling handler registration failed, continuing without sampling support"
4860
- );
4861
- }
5057
+ this.logger.error("Failed to setup sampling handler:", error);
5058
+ this.logger.debug(
5059
+ "\u26A0\uFE0F Sampling handler registration failed, continuing without sampling support"
5060
+ );
4862
5061
  }
4863
5062
  }
4864
5063
  /**
@@ -4868,7 +5067,7 @@ var McpClientService = class {
4868
5067
  this.mcpSamplingHandler = new McpSamplingHandler(handler);
4869
5068
  if (this.client) {
4870
5069
  this.setupSamplingHandler(this.client).catch((error) => {
4871
- console.error("Failed to update ADK sampling handler:", error);
5070
+ this.logger.error("Failed to update ADK sampling handler:", error);
4872
5071
  });
4873
5072
  }
4874
5073
  }
@@ -4881,7 +5080,7 @@ var McpClientService = class {
4881
5080
  try {
4882
5081
  this.client.removeRequestHandler?.("sampling/createMessage");
4883
5082
  } catch (error) {
4884
- console.error("Failed to remove sampling handler:", error);
5083
+ this.logger.error("Failed to remove sampling handler:", error);
4885
5084
  }
4886
5085
  }
4887
5086
  }
@@ -5177,7 +5376,13 @@ var McpToolAdapter = class extends BaseTool {
5177
5376
 
5178
5377
  // src/tools/mcp/servers.ts
5179
5378
  function createMcpConfig(name, packageName, config = {}) {
5180
- const { debug, description, retryOptions, env: envVars = {} } = config;
5379
+ const {
5380
+ debug,
5381
+ description,
5382
+ retryOptions,
5383
+ env: envVars = {},
5384
+ samplingHandler
5385
+ } = config;
5181
5386
  const env = {};
5182
5387
  for (const [key, value] of Object.entries(envVars)) {
5183
5388
  if (value !== void 0) {
@@ -5197,7 +5402,8 @@ function createMcpConfig(name, packageName, config = {}) {
5197
5402
  command: "npx",
5198
5403
  args: ["-y", packageName],
5199
5404
  env
5200
- }
5405
+ },
5406
+ samplingHandler
5201
5407
  };
5202
5408
  }
5203
5409
  function McpAbi(config = {}) {
@@ -5242,7 +5448,7 @@ function McpNearAgent(config = {}) {
5242
5448
  }
5243
5449
  function McpNearIntentSwaps(config = {}) {
5244
5450
  const mcpConfig = createMcpConfig(
5245
- "NEAR Intent Swaps MCP Client",
5451
+ "Near Intents Swaps MCP Client",
5246
5452
  "@iqai/mcp-near-intent-swaps",
5247
5453
  config
5248
5454
  );
@@ -5670,89 +5876,47 @@ var BaseLlmFlow = class {
5670
5876
  responseProcessors = [];
5671
5877
  logger = new Logger({ name: "BaseLlmFlow" });
5672
5878
  async *runAsync(invocationContext) {
5673
- this.logger.debug("\u{1F680} Starting runAsync flow", {
5674
- invocationId: invocationContext.invocationId,
5675
- agentName: invocationContext.agent.name,
5676
- branch: invocationContext.branch
5677
- });
5879
+ this.logger.info(`Agent '${invocationContext.agent.name}' started.`);
5678
5880
  let stepCount = 0;
5679
5881
  while (true) {
5680
5882
  stepCount++;
5681
- this.logger.debug(`\u{1F4CB} Running step ${stepCount}`, {
5682
- invocationId: invocationContext.invocationId
5683
- });
5684
5883
  let lastEvent = null;
5685
- let eventCount = 0;
5686
5884
  for await (const event of this._runOneStepAsync(invocationContext)) {
5687
- eventCount++;
5688
5885
  lastEvent = event;
5689
- this.logger.debug(
5690
- `\u{1F4E4} Yielding event ${eventCount} from step ${stepCount}`,
5691
- {
5692
- eventId: event.id,
5693
- eventType: event.constructor.name,
5694
- hasContent: !!event.content,
5695
- isFinalResponse: event.isFinalResponse(),
5696
- partial: event.partial
5697
- }
5698
- );
5699
5886
  yield event;
5700
5887
  }
5701
5888
  if (!lastEvent || lastEvent.isFinalResponse()) {
5702
- this.logger.debug("\u2705 Flow completed", {
5703
- reason: !lastEvent ? "no_events" : "final_response",
5704
- totalSteps: stepCount
5705
- });
5889
+ this.logger.info(
5890
+ `Agent '${invocationContext.agent.name}' finished after ${stepCount} steps.`
5891
+ );
5706
5892
  break;
5707
5893
  }
5708
5894
  if (lastEvent.partial) {
5709
- this.logger.error("\u274C Flow error: Last event is partial", {
5710
- eventId: lastEvent.id,
5711
- stepCount
5712
- });
5895
+ this.logger.error(
5896
+ "Partial event encountered. LLM max output limit may be reached."
5897
+ );
5713
5898
  throw new Error(
5714
5899
  "Last event shouldn't be partial. LLM max output limit may be reached."
5715
5900
  );
5716
5901
  }
5717
5902
  }
5718
- this.logger.debug("\u{1F3C1} runAsync flow finished", {
5719
- totalSteps: stepCount,
5720
- invocationId: invocationContext.invocationId
5721
- });
5722
5903
  }
5723
5904
  async *runLive(invocationContext) {
5724
- this.logger.debug("\u{1F534} Starting runLive flow", {
5725
- invocationId: invocationContext.invocationId,
5726
- agentName: invocationContext.agent.name
5727
- });
5728
5905
  this.logger.warn("\u26A0\uFE0F runLive not fully implemented, delegating to runAsync");
5729
5906
  yield* this.runAsync(invocationContext);
5730
5907
  }
5731
5908
  async *_runOneStepAsync(invocationContext) {
5732
- this.logger.debug("\u{1F504} Starting one step execution", {
5733
- invocationId: invocationContext.invocationId
5734
- });
5735
5909
  const llmRequest = new LlmRequest();
5736
- this.logger.debug("\u{1F4DD} Created new LlmRequest", {
5737
- requestId: llmRequest.id || "unknown"
5738
- });
5739
- this.logger.debug("\u{1F527} Starting preprocessing phase");
5740
5910
  let preprocessEventCount = 0;
5741
5911
  for await (const event of this._preprocessAsync(
5742
5912
  invocationContext,
5743
5913
  llmRequest
5744
5914
  )) {
5745
5915
  preprocessEventCount++;
5746
- this.logger.debug(`\u{1F4E4} Preprocessing event ${preprocessEventCount}`, {
5747
- eventId: event.id
5748
- });
5749
5916
  yield event;
5750
5917
  }
5751
- this.logger.debug("\u2705 Preprocessing completed", {
5752
- eventCount: preprocessEventCount
5753
- });
5754
5918
  if (invocationContext.endInvocation) {
5755
- this.logger.debug("\u{1F6D1} Invocation ended during preprocessing");
5919
+ this.logger.info("Invocation ended during preprocessing.");
5756
5920
  return;
5757
5921
  }
5758
5922
  const modelResponseEvent = new Event({
@@ -5761,9 +5925,6 @@ var BaseLlmFlow = class {
5761
5925
  author: invocationContext.agent.name,
5762
5926
  branch: invocationContext.branch
5763
5927
  });
5764
- this.logger.debug("\u{1F916} Starting LLM call phase", {
5765
- modelResponseEventId: modelResponseEvent.id
5766
- });
5767
5928
  let llmResponseCount = 0;
5768
5929
  for await (const llmResponse of this._callLlmAsync(
5769
5930
  invocationContext,
@@ -5771,12 +5932,6 @@ var BaseLlmFlow = class {
5771
5932
  modelResponseEvent
5772
5933
  )) {
5773
5934
  llmResponseCount++;
5774
- this.logger.debug(`\u{1F504} Processing LLM response ${llmResponseCount}`, {
5775
- hasContent: !!llmResponse.content,
5776
- hasError: !!llmResponse.errorCode,
5777
- interrupted: !!llmResponse.interrupted,
5778
- partial: !!llmResponse.partial
5779
- });
5780
5935
  for await (const event of this._postprocessAsync(
5781
5936
  invocationContext,
5782
5937
  llmRequest,
@@ -5784,89 +5939,47 @@ var BaseLlmFlow = class {
5784
5939
  modelResponseEvent
5785
5940
  )) {
5786
5941
  modelResponseEvent.id = Event.newId();
5787
- this.logger.debug("\u{1F4E4} Yielding postprocessed event", {
5788
- eventId: event.id,
5789
- hasFunctionCalls: !!event.getFunctionCalls()
5790
- });
5791
5942
  yield event;
5792
5943
  }
5793
5944
  }
5794
- this.logger.debug("\u2705 One step execution completed", {
5795
- llmResponseCount
5796
- });
5797
5945
  }
5798
5946
  async *_preprocessAsync(invocationContext, llmRequest) {
5799
- this.logger.debug("\u{1F527} Starting preprocessing", {
5800
- processorCount: this.requestProcessors.length
5801
- });
5802
5947
  const agent = invocationContext.agent;
5803
5948
  if (!("canonicalTools" in agent) || typeof agent.canonicalTools !== "function") {
5804
- this.logger.debug("\u2139\uFE0F Agent has no canonical tools");
5805
5949
  return;
5806
5950
  }
5807
- for (let i = 0; i < this.requestProcessors.length; i++) {
5808
- const processor = this.requestProcessors[i];
5809
- this.logger.debug(`\u{1F504} Running request processor ${i + 1}`, {
5810
- processorName: processor.constructor?.name || "unknown"
5811
- });
5812
- let processorEventCount = 0;
5951
+ for (const processor of this.requestProcessors) {
5813
5952
  for await (const event of processor.runAsync(
5814
5953
  invocationContext,
5815
5954
  llmRequest
5816
5955
  )) {
5817
- processorEventCount++;
5818
- this.logger.debug(
5819
- `\u{1F4E4} Request processor ${i + 1} event ${processorEventCount}`,
5820
- {
5821
- eventId: event.id
5822
- }
5823
- );
5824
5956
  yield event;
5825
5957
  }
5826
- this.logger.debug(`\u2705 Request processor ${i + 1} completed`, {
5827
- eventCount: processorEventCount
5828
- });
5829
5958
  }
5830
5959
  const tools = await agent.canonicalTools(
5831
5960
  new ReadonlyContext(invocationContext)
5832
5961
  );
5833
- this.logger.debug("\u{1F6E0}\uFE0F Processing canonical tools", {
5834
- toolCount: tools.length
5835
- });
5836
- for (let i = 0; i < tools.length; i++) {
5837
- const tool = tools[i];
5838
- this.logger.debug(`\u{1F504} Processing tool ${i + 1}`, {
5839
- toolName: tool.constructor?.name || "unknown"
5840
- });
5962
+ for (const tool of tools) {
5841
5963
  const toolContext = new ToolContext(invocationContext);
5842
5964
  await tool.processLlmRequest(toolContext, llmRequest);
5843
- this.logger.debug(`\u2705 Tool ${i + 1} processed`);
5844
5965
  }
5845
- this.logger.debug("\u2705 Preprocessing completed", {
5846
- totalTools: tools.length
5847
- });
5966
+ if (tools.length > 0) {
5967
+ const toolsData = tools.map((tool) => ({
5968
+ Name: tool.name,
5969
+ Description: tool.description?.substring(0, 50) + (tool.description?.length > 50 ? "..." : ""),
5970
+ "Long Running": tool.isLongRunning ? "Yes" : "No"
5971
+ }));
5972
+ this.logger.debugArray("\u{1F6E0}\uFE0F Available Tools", toolsData);
5973
+ }
5848
5974
  }
5849
5975
  async *_postprocessAsync(invocationContext, llmRequest, llmResponse, modelResponseEvent) {
5850
- this.logger.debug("\u{1F504} Starting postprocessing", {
5851
- hasContent: !!llmResponse.content,
5852
- hasError: !!llmResponse.errorCode,
5853
- interrupted: !!llmResponse.interrupted
5854
- });
5855
- let processorEventCount = 0;
5856
5976
  for await (const event of this._postprocessRunProcessorsAsync(
5857
5977
  invocationContext,
5858
5978
  llmResponse
5859
5979
  )) {
5860
- processorEventCount++;
5861
- this.logger.debug(`\u{1F4E4} Response processor event ${processorEventCount}`, {
5862
- eventId: event.id
5863
- });
5864
5980
  yield event;
5865
5981
  }
5866
5982
  if (!llmResponse.content && !llmResponse.errorCode && !llmResponse.interrupted) {
5867
- this.logger.debug(
5868
- "\u2139\uFE0F Skipping event creation - no content, error, or interruption"
5869
- );
5870
5983
  return;
5871
5984
  }
5872
5985
  const finalizedEvent = this._finalizeModelResponseEvent(
@@ -5874,54 +5987,32 @@ var BaseLlmFlow = class {
5874
5987
  llmResponse,
5875
5988
  modelResponseEvent
5876
5989
  );
5877
- this.logger.debug("\u{1F4DD} Finalized model response event", {
5878
- eventId: finalizedEvent.id,
5879
- hasContent: !!finalizedEvent.content,
5880
- hasFunctionCalls: !!finalizedEvent.getFunctionCalls(),
5881
- longRunningToolIds: finalizedEvent.longRunningToolIds.entries.length || 0
5882
- });
5883
5990
  yield finalizedEvent;
5884
5991
  const functionCalls = finalizedEvent.getFunctionCalls();
5885
- if (functionCalls) {
5886
- this.logger.debug("\u{1F527} Processing function calls", {
5887
- functionCallCount: functionCalls.length
5888
- });
5889
- let functionEventCount = 0;
5992
+ if (functionCalls && functionCalls.length > 0) {
5993
+ const functionCallsData = functionCalls.map((fc) => ({
5994
+ Name: fc.name,
5995
+ Arguments: JSON.stringify(fc.args).substring(0, 100) + (JSON.stringify(fc.args).length > 100 ? "..." : ""),
5996
+ ID: fc.id || "auto"
5997
+ }));
5998
+ this.logger.debugArray("\u{1F527} Function Calls", functionCallsData);
5890
5999
  for await (const event of this._postprocessHandleFunctionCallsAsync(
5891
6000
  invocationContext,
5892
6001
  finalizedEvent,
5893
6002
  llmRequest
5894
6003
  )) {
5895
- functionEventCount++;
5896
- this.logger.debug(`\u{1F4E4} Function call event ${functionEventCount}`, {
5897
- eventId: event.id
5898
- });
5899
6004
  yield event;
5900
6005
  }
5901
- this.logger.debug("\u2705 Function calls processed", {
5902
- eventCount: functionEventCount
5903
- });
5904
6006
  }
5905
- this.logger.debug("\u2705 Postprocessing completed");
5906
6007
  }
5907
6008
  async *_postprocessLive(invocationContext, llmRequest, llmResponse, modelResponseEvent) {
5908
- this.logger.debug("\u{1F534} Starting live postprocessing", {
5909
- hasContent: !!llmResponse.content,
5910
- turnComplete: !!llmResponse.turnComplete
5911
- });
5912
6009
  for await (const event of this._postprocessRunProcessorsAsync(
5913
6010
  invocationContext,
5914
6011
  llmResponse
5915
6012
  )) {
5916
- this.logger.debug("\u{1F4E4} Live response processor event", {
5917
- eventId: event.id
5918
- });
5919
6013
  yield event;
5920
6014
  }
5921
6015
  if (!llmResponse.content && !llmResponse.errorCode && !llmResponse.interrupted && !llmResponse.turnComplete) {
5922
- this.logger.debug(
5923
- "\u2139\uFE0F Skipping live event - no content or completion signal"
5924
- );
5925
6016
  return;
5926
6017
  }
5927
6018
  const finalizedEvent = this._finalizeModelResponseEvent(
@@ -5929,165 +6020,83 @@ var BaseLlmFlow = class {
5929
6020
  llmResponse,
5930
6021
  modelResponseEvent
5931
6022
  );
5932
- this.logger.debug("\u{1F4DD} Finalized live model response event", {
5933
- eventId: finalizedEvent.id,
5934
- hasFunctionCalls: !!finalizedEvent.getFunctionCalls()
5935
- });
5936
6023
  yield finalizedEvent;
5937
6024
  if (finalizedEvent.getFunctionCalls()) {
5938
- this.logger.debug("\u{1F527} Processing live function calls");
5939
6025
  const functionResponseEvent = await handleFunctionCallsAsync(
5940
6026
  invocationContext,
5941
6027
  finalizedEvent,
5942
6028
  llmRequest.toolsDict || {}
5943
6029
  );
5944
6030
  if (functionResponseEvent) {
5945
- this.logger.debug("\u{1F4E4} Live function response event", {
5946
- eventId: functionResponseEvent.id,
5947
- hasTransfer: !!functionResponseEvent.actions?.transferToAgent
5948
- });
5949
6031
  yield functionResponseEvent;
5950
6032
  const transferToAgent = functionResponseEvent.actions?.transferToAgent;
5951
6033
  if (transferToAgent) {
5952
- this.logger.debug("\u{1F504} Transferring to agent in live mode", {
5953
- targetAgent: transferToAgent
5954
- });
6034
+ this.logger.info(`\u{1F504} Live transfer to agent '${transferToAgent}'`);
5955
6035
  const agentToRun = this._getAgentToRun(
5956
6036
  invocationContext,
5957
6037
  transferToAgent
5958
6038
  );
5959
- let transferEventCount = 0;
5960
6039
  for await (const event of agentToRun.runLive?.(invocationContext) || agentToRun.runAsync(invocationContext)) {
5961
- transferEventCount++;
5962
- this.logger.debug(`\u{1F4E4} Transfer agent event ${transferEventCount}`, {
5963
- eventId: event.id
5964
- });
5965
6040
  yield event;
5966
6041
  }
5967
- this.logger.debug("\u2705 Agent transfer completed", {
5968
- eventCount: transferEventCount
5969
- });
5970
6042
  }
5971
6043
  }
5972
6044
  }
5973
- this.logger.debug("\u2705 Live postprocessing completed");
5974
6045
  }
5975
6046
  async *_postprocessRunProcessorsAsync(invocationContext, llmResponse) {
5976
- this.logger.debug("\u{1F504} Running response processors", {
5977
- processorCount: this.responseProcessors.length
5978
- });
5979
- for (let i = 0; i < this.responseProcessors.length; i++) {
5980
- const processor = this.responseProcessors[i];
5981
- this.logger.debug(`\u{1F504} Running response processor ${i + 1}`, {
5982
- processorName: processor.constructor?.name || "unknown"
5983
- });
5984
- let processorEventCount = 0;
6047
+ for (const processor of this.responseProcessors) {
5985
6048
  for await (const event of processor.runAsync(
5986
6049
  invocationContext,
5987
6050
  llmResponse
5988
6051
  )) {
5989
- processorEventCount++;
5990
- this.logger.debug(
5991
- `\u{1F4E4} Response processor ${i + 1} event ${processorEventCount}`,
5992
- {
5993
- eventId: event.id
5994
- }
5995
- );
5996
6052
  yield event;
5997
6053
  }
5998
- this.logger;
5999
- this.logger.debug(`\u2705 Response processor ${i + 1} completed`, {
6000
- eventCount: processorEventCount
6001
- });
6002
6054
  }
6003
- this.logger.debug("\u2705 All response processors completed");
6004
6055
  }
6005
6056
  async *_postprocessHandleFunctionCallsAsync(invocationContext, functionCallEvent, llmRequest) {
6006
- this.logger.debug("\u{1F527} Handling function calls", {
6007
- eventId: functionCallEvent.id,
6008
- toolsDictSize: Object.keys(llmRequest.toolsDict || {}).length
6009
- });
6010
6057
  const functionResponseEvent = await handleFunctionCallsAsync(
6011
6058
  invocationContext,
6012
6059
  functionCallEvent,
6013
6060
  llmRequest.toolsDict || {}
6014
6061
  );
6015
6062
  if (functionResponseEvent) {
6016
- this.logger.debug("\u{1F4CB} Function calls executed", {
6017
- responseEventId: functionResponseEvent.id,
6018
- hasActions: !!functionResponseEvent.actions
6019
- });
6020
6063
  const authEvent = generateAuthEvent(
6021
6064
  invocationContext,
6022
6065
  functionResponseEvent
6023
6066
  );
6024
6067
  if (authEvent) {
6025
- this.logger.debug("\u{1F510} Generated auth event", {
6026
- authEventId: authEvent.id
6027
- });
6028
6068
  yield authEvent;
6029
6069
  }
6030
6070
  yield functionResponseEvent;
6031
6071
  const transferToAgent = functionResponseEvent.actions?.transferToAgent;
6032
6072
  if (transferToAgent) {
6033
- this.logger.debug("\u{1F504} Transferring to agent", {
6034
- targetAgent: transferToAgent
6035
- });
6073
+ this.logger.info(`\u{1F504} Transferring to agent '${transferToAgent}'`);
6036
6074
  const agentToRun = this._getAgentToRun(
6037
6075
  invocationContext,
6038
6076
  transferToAgent
6039
6077
  );
6040
- let transferEventCount = 0;
6041
6078
  for await (const event of agentToRun.runAsync(invocationContext)) {
6042
- transferEventCount++;
6043
- this.logger.debug(`\u{1F4E4} Transfer agent event ${transferEventCount}`, {
6044
- eventId: event.id
6045
- });
6046
6079
  yield event;
6047
6080
  }
6048
- this.logger.debug("\u2705 Agent transfer completed", {
6049
- eventCount: transferEventCount
6050
- });
6051
6081
  }
6052
- } else {
6053
- this.logger.debug("\u2139\uFE0F No function response event generated");
6054
6082
  }
6055
6083
  }
6056
6084
  _getAgentToRun(invocationContext, agentName) {
6057
- this.logger.debug("\u{1F50D} Finding agent to run", {
6058
- targetAgent: agentName,
6059
- currentAgent: invocationContext.agent.name
6060
- });
6061
6085
  const rootAgent = invocationContext.agent.rootAgent;
6062
6086
  const agentToRun = rootAgent.findAgent(agentName);
6063
6087
  if (!agentToRun) {
6064
- this.logger.error("\u274C Agent not found", {
6065
- targetAgent: agentName,
6066
- rootAgent: rootAgent.name
6067
- });
6088
+ this.logger.error(`Agent '${agentName}' not found in the agent tree.`);
6068
6089
  throw new Error(`Agent ${agentName} not found in the agent tree.`);
6069
6090
  }
6070
- this.logger.debug("\u2705 Agent found", {
6071
- targetAgent: agentName,
6072
- agentType: agentToRun.constructor.name
6073
- });
6074
6091
  return agentToRun;
6075
6092
  }
6076
6093
  async *_callLlmAsync(invocationContext, llmRequest, modelResponseEvent) {
6077
- this.logger.debug("\u{1F916} Starting LLM call", {
6078
- model: llmRequest.model || "default",
6079
- eventId: modelResponseEvent.id
6080
- });
6081
- this.logger.debug("\u{1F504} Processing before model callbacks");
6082
6094
  const beforeModelCallbackContent = await this._handleBeforeModelCallback(
6083
6095
  invocationContext,
6084
6096
  llmRequest,
6085
6097
  modelResponseEvent
6086
6098
  );
6087
6099
  if (beforeModelCallbackContent) {
6088
- this.logger.debug("\u{1F4CB} Before model callback returned content", {
6089
- hasContent: !!beforeModelCallbackContent.content
6090
- });
6091
6100
  yield beforeModelCallbackContent;
6092
6101
  return;
6093
6102
  }
@@ -6095,27 +6104,38 @@ var BaseLlmFlow = class {
6095
6104
  llmRequest.config.labels = llmRequest.config.labels || {};
6096
6105
  if (!(_ADK_AGENT_NAME_LABEL_KEY in llmRequest.config.labels)) {
6097
6106
  llmRequest.config.labels[_ADK_AGENT_NAME_LABEL_KEY] = invocationContext.agent.name;
6098
- this.logger.debug("\u{1F3F7}\uFE0F Added agent name label", {
6099
- agentName: invocationContext.agent.name
6100
- });
6101
6107
  }
6102
6108
  const llm = this.__getLlm(invocationContext);
6103
- this.logger.debug("\u{1F527} Retrieved LLM instance", {
6104
- llmModel: llm.model,
6105
- llmType: llm.constructor.name
6106
- });
6107
6109
  const runConfig = invocationContext.runConfig;
6108
6110
  if (runConfig.supportCfc) {
6109
6111
  this.logger.warn(
6110
- "\u26A0\uFE0F CFC (supportCfc) not fully implemented, using standard flow"
6112
+ "CFC (supportCfc) not fully implemented, using standard flow."
6111
6113
  );
6112
6114
  }
6113
6115
  invocationContext.incrementLlmCallCount();
6114
- this.logger.debug("\u{1F4C8} Incremented LLM call count");
6115
6116
  const isStreaming = invocationContext.runConfig.streamingMode === "sse" /* SSE */;
6116
- this.logger.debug("\u{1F30A} LLM generation mode", {
6117
- streaming: isStreaming,
6118
- streamingMode: invocationContext.runConfig.streamingMode
6117
+ const tools = llmRequest.config?.tools || [];
6118
+ const toolNames = tools.map((tool) => {
6119
+ if (tool.functionDeclarations && Array.isArray(tool.functionDeclarations)) {
6120
+ return tool.functionDeclarations.map((fn) => fn.name).join(", ");
6121
+ }
6122
+ if (tool.name) return tool.name;
6123
+ if (tool.function?.name) return tool.function.name;
6124
+ if (tool.function?.function?.name) return tool.function.function.name;
6125
+ return "unknown";
6126
+ }).join(", ");
6127
+ const systemInstruction = llmRequest.getSystemInstructionText() || "";
6128
+ const truncatedSystemInstruction = systemInstruction.length > 100 ? `${systemInstruction.substring(0, 100)}...` : systemInstruction;
6129
+ const contentPreview = llmRequest.contents?.length > 0 ? this._formatContentPreview(llmRequest.contents[0]) : "none";
6130
+ this.logger.debugStructured("\u{1F4E4} LLM Request", {
6131
+ Model: llm.model,
6132
+ Agent: invocationContext.agent.name,
6133
+ "Content Items": llmRequest.contents?.length || 0,
6134
+ "Content Preview": contentPreview,
6135
+ "System Instruction": truncatedSystemInstruction || "none",
6136
+ "Available Tools": toolNames || "none",
6137
+ "Tool Count": llmRequest.config?.tools?.length || 0,
6138
+ Streaming: isStreaming ? "Yes" : "No"
6119
6139
  });
6120
6140
  let responseCount = 0;
6121
6141
  for await (const llmResponse of llm.generateContentAsync(
@@ -6123,59 +6143,46 @@ var BaseLlmFlow = class {
6123
6143
  isStreaming
6124
6144
  )) {
6125
6145
  responseCount++;
6126
- this.logger.debug(`\u{1F4E5} Received LLM response ${responseCount}`, {
6127
- hasContent: !!llmResponse.content,
6128
- hasError: !!llmResponse.errorCode,
6129
- interrupted: !!llmResponse.interrupted,
6130
- partial: !!llmResponse.partial,
6131
- finishReason: llmResponse.finishReason,
6132
- usage: llmResponse.usageMetadata ? {
6133
- promptTokens: llmResponse.usageMetadata.promptTokenCount,
6134
- completionTokens: llmResponse.usageMetadata.candidatesTokenCount,
6135
- totalTokens: llmResponse.usageMetadata.totalTokenCount
6136
- } : null
6137
- });
6138
6146
  traceLlmCall(
6139
6147
  invocationContext,
6140
6148
  modelResponseEvent.id,
6141
6149
  llmRequest,
6142
6150
  llmResponse
6143
6151
  );
6144
- this.logger.debug("\u{1F504} Processing after model callbacks");
6152
+ const tokenCount = llmResponse.usageMetadata?.totalTokenCount || "unknown";
6153
+ const functionCallCount = llmResponse.content?.parts?.filter((part) => part.functionCall).length || 0;
6154
+ const responsePreview = this._formatResponsePreview(llmResponse);
6155
+ this.logger.debugStructured("\u{1F4E5} LLM Response", {
6156
+ Model: llm.model,
6157
+ "Token Count": tokenCount,
6158
+ "Function Calls": functionCallCount,
6159
+ "Response Preview": responsePreview,
6160
+ "Finish Reason": llmResponse.finishReason || "unknown",
6161
+ "Response #": responseCount,
6162
+ Partial: llmResponse.partial ? "Yes" : "No",
6163
+ Error: llmResponse.errorCode || "none"
6164
+ });
6145
6165
  const alteredLlmResponse = await this._handleAfterModelCallback(
6146
6166
  invocationContext,
6147
6167
  llmResponse,
6148
6168
  modelResponseEvent
6149
6169
  );
6150
- if (alteredLlmResponse) {
6151
- this.logger.debug("\u{1F4CB} After model callback altered response");
6152
- }
6153
6170
  yield alteredLlmResponse || llmResponse;
6154
6171
  }
6155
- this.logger.debug("\u2705 LLM call completed", {
6156
- totalResponses: responseCount
6157
- });
6158
6172
  }
6159
6173
  async _handleBeforeModelCallback(invocationContext, llmRequest, modelResponseEvent) {
6160
6174
  const agent = invocationContext.agent;
6161
6175
  if (!("canonicalBeforeModelCallbacks" in agent)) {
6162
- this.logger.debug("\u2139\uFE0F Agent has no before model callbacks");
6163
6176
  return;
6164
6177
  }
6165
6178
  const beforeCallbacks = agent.canonicalBeforeModelCallbacks;
6166
6179
  if (!beforeCallbacks) {
6167
- this.logger.debug("\u2139\uFE0F Before model callbacks is null/undefined");
6168
6180
  return;
6169
6181
  }
6170
- this.logger.debug("\u{1F504} Processing before model callbacks", {
6171
- callbackCount: beforeCallbacks.length
6172
- });
6173
6182
  const callbackContext = new CallbackContext(invocationContext, {
6174
6183
  eventActions: modelResponseEvent.actions
6175
6184
  });
6176
- for (let i = 0; i < beforeCallbacks.length; i++) {
6177
- const callback = beforeCallbacks[i];
6178
- this.logger.debug(`\u{1F504} Running before model callback ${i + 1}`);
6185
+ for (const callback of beforeCallbacks) {
6179
6186
  let beforeModelCallbackContent = callback({
6180
6187
  callbackContext,
6181
6188
  llmRequest
@@ -6184,35 +6191,23 @@ var BaseLlmFlow = class {
6184
6191
  beforeModelCallbackContent = await beforeModelCallbackContent;
6185
6192
  }
6186
6193
  if (beforeModelCallbackContent) {
6187
- this.logger.debug(`\u2705 Before model callback ${i + 1} returned content`);
6188
6194
  return beforeModelCallbackContent;
6189
6195
  }
6190
- this.logger.debug(
6191
- `\u2705 Before model callback ${i + 1} completed (no content)`
6192
- );
6193
6196
  }
6194
- this.logger.debug("\u2705 All before model callbacks completed");
6195
6197
  }
6196
6198
  async _handleAfterModelCallback(invocationContext, llmResponse, modelResponseEvent) {
6197
6199
  const agent = invocationContext.agent;
6198
6200
  if (!("canonicalAfterModelCallbacks" in agent)) {
6199
- this.logger.debug("\u2139\uFE0F Agent has no after model callbacks");
6200
6201
  return;
6201
6202
  }
6202
6203
  const afterCallbacks = agent.canonicalAfterModelCallbacks;
6203
6204
  if (!afterCallbacks) {
6204
- this.logger.debug("\u2139\uFE0F After model callbacks is null/undefined");
6205
6205
  return;
6206
6206
  }
6207
- this.logger.debug("\u{1F504} Processing after model callbacks", {
6208
- callbackCount: afterCallbacks.length
6209
- });
6210
6207
  const callbackContext = new CallbackContext(invocationContext, {
6211
6208
  eventActions: modelResponseEvent.actions
6212
6209
  });
6213
- for (let i = 0; i < afterCallbacks.length; i++) {
6214
- const callback = afterCallbacks[i];
6215
- this.logger.debug(`\u{1F504} Running after model callback ${i + 1}`);
6210
+ for (const callback of afterCallbacks) {
6216
6211
  let afterModelCallbackContent = callback({
6217
6212
  callbackContext,
6218
6213
  llmResponse
@@ -6221,21 +6216,11 @@ var BaseLlmFlow = class {
6221
6216
  afterModelCallbackContent = await afterModelCallbackContent;
6222
6217
  }
6223
6218
  if (afterModelCallbackContent) {
6224
- this.logger.debug(`\u2705 After model callback ${i + 1} returned content`);
6225
6219
  return afterModelCallbackContent;
6226
6220
  }
6227
- this.logger.debug(
6228
- `\u2705 After model callback ${i + 1} completed (no content)`
6229
- );
6230
6221
  }
6231
- this.logger.debug("\u2705 All after model callbacks completed");
6232
6222
  }
6233
6223
  _finalizeModelResponseEvent(llmRequest, llmResponse, modelResponseEvent) {
6234
- this.logger.debug("\u{1F4DD} Finalizing model response event", {
6235
- requestModel: llmRequest.model,
6236
- responseHasContent: !!llmResponse.content,
6237
- eventId: modelResponseEvent.id
6238
- });
6239
6224
  const eventData = { ...modelResponseEvent };
6240
6225
  const responseData = { ...llmResponse };
6241
6226
  Object.keys(responseData).forEach((key) => {
@@ -6247,91 +6232,55 @@ var BaseLlmFlow = class {
6247
6232
  if (event.content) {
6248
6233
  const functionCalls = event.getFunctionCalls();
6249
6234
  if (functionCalls) {
6250
- this.logger.debug("\u{1F527} Processing function calls in event", {
6251
- functionCallCount: functionCalls.length
6252
- });
6253
6235
  populateClientFunctionCallId(event);
6254
6236
  event.longRunningToolIds = getLongRunningFunctionCalls(
6255
6237
  functionCalls,
6256
6238
  llmRequest.toolsDict || {}
6257
6239
  );
6258
- this.logger.debug("\u2705 Function calls processed", {
6259
- longRunningToolCount: event.longRunningToolIds.entries.length || 0
6260
- });
6261
6240
  }
6262
6241
  }
6263
- this.logger.debug("\u2705 Model response event finalized", {
6264
- finalEventId: event.id,
6265
- hasContent: !!event.content,
6266
- hasFunctionCalls: !!event.getFunctionCalls()
6267
- });
6268
6242
  return event;
6269
6243
  }
6244
+ /**
6245
+ * Logs data in a visually appealing format that works well in any terminal size.
6246
+ * Uses vertical layout for better readability and respects debug settings.
6247
+ */
6248
+ _formatContentPreview(content) {
6249
+ if (!content) return "none";
6250
+ if (content.parts && Array.isArray(content.parts)) {
6251
+ const textParts = content.parts.filter((part) => part.text).map((part) => part.text).join(" ");
6252
+ return textParts.length > 80 ? `${textParts.substring(0, 80)}...` : textParts || "no text content";
6253
+ }
6254
+ if (typeof content === "string") {
6255
+ return content.length > 80 ? `${content.substring(0, 80)}...` : content;
6256
+ }
6257
+ const stringified = JSON.stringify(content);
6258
+ return stringified.length > 80 ? `${stringified.substring(0, 80)}...` : stringified;
6259
+ }
6260
+ /**
6261
+ * Formats response content preview for debug logging
6262
+ */
6263
+ _formatResponsePreview(llmResponse) {
6264
+ if (!llmResponse.content) return "none";
6265
+ if (llmResponse.content.parts && Array.isArray(llmResponse.content.parts)) {
6266
+ const textParts = llmResponse.content.parts.filter((part) => part.text).map((part) => part.text).join(" ");
6267
+ return textParts.length > 80 ? `${textParts.substring(0, 80)}...` : textParts || "no text content";
6268
+ }
6269
+ const stringified = JSON.stringify(llmResponse.content);
6270
+ return stringified.length > 80 ? `${stringified.substring(0, 80)}...` : stringified;
6271
+ }
6270
6272
  __getLlm(invocationContext) {
6271
6273
  const llm = invocationContext.agent.canonicalModel;
6272
- this.logger.debug("\u{1F527} Retrieved canonical model", {
6273
- model: llm?.model || "unknown",
6274
- llmType: llm?.constructor?.name || "unknown"
6275
- });
6276
6274
  return llm;
6277
6275
  }
6278
6276
  };
6279
6277
 
6280
- // src/flows/llm-flows/single-flow.ts
6281
- init_logger();
6282
-
6283
6278
  // src/flows/llm-flows/base-llm-processor.ts
6284
6279
  var BaseLlmRequestProcessor = class {
6285
6280
  };
6286
6281
  var BaseLlmResponseProcessor = class {
6287
6282
  };
6288
6283
 
6289
- // src/flows/llm-flows/basic.ts
6290
- var BasicLlmRequestProcessor = class extends BaseLlmRequestProcessor {
6291
- async *runAsync(invocationContext, llmRequest) {
6292
- const agent = invocationContext.agent;
6293
- if (!this.isLlmAgent(agent)) {
6294
- return;
6295
- }
6296
- llmRequest.model = typeof agent.canonicalModel === "string" ? agent.canonicalModel : agent.canonicalModel.model;
6297
- if (agent.generateContentConfig) {
6298
- llmRequest.config = JSON.parse(
6299
- JSON.stringify(agent.generateContentConfig)
6300
- );
6301
- } else {
6302
- llmRequest.config = {};
6303
- }
6304
- if (agent.outputSchema) {
6305
- llmRequest.setOutputSchema(agent.outputSchema);
6306
- }
6307
- const runConfig = invocationContext.runConfig;
6308
- if (!llmRequest.liveConnectConfig) {
6309
- llmRequest.liveConnectConfig = {};
6310
- }
6311
- if (runConfig.responseModalities) {
6312
- llmRequest.liveConnectConfig.responseModalities = runConfig.responseModalities;
6313
- }
6314
- llmRequest.liveConnectConfig.speechConfig = runConfig.speechConfig;
6315
- llmRequest.liveConnectConfig.outputAudioTranscription = runConfig.outputAudioTranscription;
6316
- llmRequest.liveConnectConfig.inputAudioTranscription = runConfig.inputAudioTranscription;
6317
- llmRequest.liveConnectConfig.realtimeInputConfig = runConfig.realtimeInputConfig;
6318
- llmRequest.liveConnectConfig.enableAffectiveDialog = runConfig.enableAffectiveDialog;
6319
- llmRequest.liveConnectConfig.proactivity = runConfig.proactivity;
6320
- const tools = await agent.canonicalTools();
6321
- llmRequest.appendTools(tools);
6322
- for await (const _ of []) {
6323
- yield _;
6324
- }
6325
- }
6326
- /**
6327
- * Type guard to check if agent is an LlmAgent
6328
- */
6329
- isLlmAgent(agent) {
6330
- return agent && typeof agent === "object" && "canonicalModel" in agent;
6331
- }
6332
- };
6333
- var requestProcessor = new BasicLlmRequestProcessor();
6334
-
6335
6284
  // src/auth/auth-tool.ts
6336
6285
  var EnhancedAuthConfig = class {
6337
6286
  /**
@@ -6539,152 +6488,738 @@ var AuthLlmRequestProcessor = class extends BaseLlmRequestProcessor {
6539
6488
  }
6540
6489
  }
6541
6490
  };
6542
- var requestProcessor2 = new AuthLlmRequestProcessor();
6491
+ var requestProcessor = new AuthLlmRequestProcessor();
6543
6492
 
6544
- // src/flows/llm-flows/identity.ts
6545
- var IdentityLlmRequestProcessor = class extends BaseLlmRequestProcessor {
6493
+ // src/flows/llm-flows/basic.ts
6494
+ var BasicLlmRequestProcessor = class extends BaseLlmRequestProcessor {
6546
6495
  async *runAsync(invocationContext, llmRequest) {
6547
6496
  const agent = invocationContext.agent;
6548
- const instructions = [
6549
- `You are an agent. Your internal name is "${agent.name}".`
6550
- ];
6551
- if (agent.description) {
6552
- instructions.push(` The description about you is "${agent.description}"`);
6497
+ if (!this.isLlmAgent(agent)) {
6498
+ return;
6553
6499
  }
6554
- llmRequest.appendInstructions(instructions);
6500
+ llmRequest.model = typeof agent.canonicalModel === "string" ? agent.canonicalModel : agent.canonicalModel.model;
6501
+ if (agent.generateContentConfig) {
6502
+ llmRequest.config = JSON.parse(
6503
+ JSON.stringify(agent.generateContentConfig)
6504
+ );
6505
+ } else {
6506
+ llmRequest.config = {};
6507
+ }
6508
+ if (agent.outputSchema) {
6509
+ llmRequest.setOutputSchema(agent.outputSchema);
6510
+ }
6511
+ const runConfig = invocationContext.runConfig;
6512
+ if (!llmRequest.liveConnectConfig) {
6513
+ llmRequest.liveConnectConfig = {};
6514
+ }
6515
+ if (runConfig.responseModalities) {
6516
+ llmRequest.liveConnectConfig.responseModalities = runConfig.responseModalities;
6517
+ }
6518
+ llmRequest.liveConnectConfig.speechConfig = runConfig.speechConfig;
6519
+ llmRequest.liveConnectConfig.outputAudioTranscription = runConfig.outputAudioTranscription;
6520
+ llmRequest.liveConnectConfig.inputAudioTranscription = runConfig.inputAudioTranscription;
6521
+ llmRequest.liveConnectConfig.realtimeInputConfig = runConfig.realtimeInputConfig;
6522
+ llmRequest.liveConnectConfig.enableAffectiveDialog = runConfig.enableAffectiveDialog;
6523
+ llmRequest.liveConnectConfig.proactivity = runConfig.proactivity;
6524
+ const tools = await agent.canonicalTools();
6525
+ llmRequest.appendTools(tools);
6555
6526
  for await (const _ of []) {
6556
6527
  yield _;
6557
6528
  }
6558
6529
  }
6530
+ /**
6531
+ * Type guard to check if agent is an LlmAgent
6532
+ */
6533
+ isLlmAgent(agent) {
6534
+ return agent && typeof agent === "object" && "canonicalModel" in agent;
6535
+ }
6559
6536
  };
6560
- var requestProcessor3 = new IdentityLlmRequestProcessor();
6537
+ var requestProcessor2 = new BasicLlmRequestProcessor();
6561
6538
 
6562
- // src/utils/instructions-utils.ts
6563
- async function injectSessionState(template, readonlyContext) {
6564
- const invocationContext = readonlyContext._invocationContext;
6565
- async function asyncReplace(pattern, replaceAsyncFn, string) {
6566
- const result = [];
6567
- let lastEnd = 0;
6568
- const matches = Array.from(string.matchAll(pattern));
6569
- for (const match of matches) {
6570
- result.push(string.slice(lastEnd, match.index));
6571
- const replacement = await replaceAsyncFn(match);
6572
- result.push(replacement);
6573
- lastEnd = (match.index || 0) + match[0].length;
6539
+ // src/code-executors/base-code-executor.ts
6540
+ var BaseCodeExecutor = class {
6541
+ config;
6542
+ constructor(config = {}) {
6543
+ this.config = {
6544
+ optimizeDataFile: config.optimizeDataFile ?? false,
6545
+ stateful: config.stateful ?? false,
6546
+ errorRetryAttempts: config.errorRetryAttempts ?? 2,
6547
+ codeBlockDelimiters: config.codeBlockDelimiters ?? [
6548
+ ["`tool_code\n", "\n`"],
6549
+ ["`python\n", "\n`"]
6550
+ ],
6551
+ executionResultDelimiters: config.executionResultDelimiters ?? [
6552
+ "`tool_output\n",
6553
+ "\n`"
6554
+ ]
6555
+ };
6556
+ }
6557
+ // Getters for configuration
6558
+ get optimizeDataFile() {
6559
+ return this.config.optimizeDataFile;
6560
+ }
6561
+ get stateful() {
6562
+ return this.config.stateful;
6563
+ }
6564
+ get errorRetryAttempts() {
6565
+ return this.config.errorRetryAttempts;
6566
+ }
6567
+ get codeBlockDelimiters() {
6568
+ return this.config.codeBlockDelimiters;
6569
+ }
6570
+ get executionResultDelimiters() {
6571
+ return this.config.executionResultDelimiters;
6572
+ }
6573
+ };
6574
+
6575
+ // src/code-executors/built-in-code-executor.ts
6576
+ var BuiltInCodeExecutor = class extends BaseCodeExecutor {
6577
+ constructor(config = {}) {
6578
+ super(config);
6579
+ }
6580
+ async executeCode(invocationContext, codeExecutionInput) {
6581
+ throw new Error(
6582
+ "BuiltInCodeExecutor.executeCode should not be called directly"
6583
+ );
6584
+ }
6585
+ /**
6586
+ * Pre-process the LLM request for Gemini 2.0+ models to use the code execution tool
6587
+ */
6588
+ processLlmRequest(llmRequest) {
6589
+ if (!llmRequest.model?.startsWith("gemini-2")) {
6590
+ throw new Error(
6591
+ `Gemini code execution tool is not supported for model ${llmRequest.model}`
6592
+ );
6574
6593
  }
6575
- result.push(string.slice(lastEnd));
6576
- return result.join("");
6594
+ if (!llmRequest.config) {
6595
+ llmRequest.config = {};
6596
+ }
6597
+ if (!llmRequest.config.tools) {
6598
+ llmRequest.config.tools = [];
6599
+ }
6600
+ const codeExecutionTool = {
6601
+ codeExecution: {}
6602
+ };
6603
+ llmRequest.config.tools.push(codeExecutionTool);
6577
6604
  }
6578
- async function replaceMatch(match) {
6579
- let varName = match[0].replace(/[{}]/g, "").trim();
6580
- let optional = false;
6581
- if (varName.endsWith("?")) {
6582
- optional = true;
6583
- varName = varName.slice(0, -1);
6605
+ };
6606
+
6607
+ // src/code-executors/code-execution-utils.ts
6608
+ import { Language, Outcome } from "@google/genai";
6609
+ var CodeExecutionUtils = class _CodeExecutionUtils {
6610
+ /**
6611
+ * Gets the file content as a base64-encoded string
6612
+ */
6613
+ static getEncodedFileContent(data) {
6614
+ let decodedData;
6615
+ if (data instanceof ArrayBuffer) {
6616
+ decodedData = new TextDecoder().decode(data);
6584
6617
  }
6585
- if (varName.startsWith("artifact.")) {
6586
- varName = varName.replace("artifact.", "");
6587
- if (!invocationContext.artifactService) {
6588
- throw new Error("Artifact service is not initialized.");
6589
- }
6590
- try {
6591
- const artifact = await invocationContext.artifactService.loadArtifact({
6592
- appName: invocationContext.session.appName,
6593
- userId: invocationContext.session.userId,
6594
- sessionId: invocationContext.session.id,
6595
- filename: varName
6596
- });
6597
- if (!artifact) {
6598
- throw new Error(`Artifact ${varName} not found.`);
6599
- }
6600
- return String(artifact);
6601
- } catch (error) {
6602
- if (optional) {
6603
- return "";
6604
- }
6605
- throw error;
6606
- }
6607
- } else {
6608
- if (!isValidStateName(varName)) {
6609
- return match[0];
6618
+ if (_CodeExecutionUtils.isBase64Encoded(decodedData)) {
6619
+ return decodedData;
6620
+ }
6621
+ return btoa(decodedData);
6622
+ }
6623
+ static isBase64Encoded(str) {
6624
+ try {
6625
+ return btoa(atob(str)) === str;
6626
+ } catch {
6627
+ return false;
6628
+ }
6629
+ }
6630
+ /**
6631
+ * Extracts the first code block from the content and truncates everything after it
6632
+ */
6633
+ static extractCodeAndTruncateContent(content, codeBlockDelimiters) {
6634
+ if (!content?.parts?.length) {
6635
+ return null;
6636
+ }
6637
+ for (let idx = 0; idx < content.parts.length; idx++) {
6638
+ const part = content.parts[idx];
6639
+ if (part.executableCode && (idx === content.parts.length - 1 || !content.parts[idx + 1].codeExecutionResult)) {
6640
+ content.parts = content.parts.slice(0, idx + 1);
6641
+ return part.executableCode.code;
6610
6642
  }
6611
- const sessionState = invocationContext.session.state;
6612
- if (varName in sessionState) {
6613
- return String(sessionState[varName]);
6643
+ }
6644
+ const textParts = content.parts.filter((p) => p.text);
6645
+ if (!textParts.length) {
6646
+ return null;
6647
+ }
6648
+ const responseText = textParts.map((p) => p.text).join("\n");
6649
+ const leadingDelimiterPattern = codeBlockDelimiters.map(([start]) => _CodeExecutionUtils.escapeRegex(start)).join("|");
6650
+ const trailingDelimiterPattern = codeBlockDelimiters.map(([, end]) => _CodeExecutionUtils.escapeRegex(end)).join("|");
6651
+ const pattern = new RegExp(
6652
+ `(.*?)(${leadingDelimiterPattern})(.*?)(${trailingDelimiterPattern})(.*?)$`,
6653
+ "s"
6654
+ );
6655
+ const match = responseText.match(pattern);
6656
+ if (!match) {
6657
+ return null;
6658
+ }
6659
+ const [, prefix, , code, , suffix] = match;
6660
+ if (!code) {
6661
+ return null;
6662
+ }
6663
+ content.parts = [];
6664
+ if (prefix) {
6665
+ content.parts.push({ text: prefix });
6666
+ }
6667
+ content.parts.push(_CodeExecutionUtils.buildExecutableCodePart(code));
6668
+ return code;
6669
+ }
6670
+ static escapeRegex(str) {
6671
+ return str.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
6672
+ }
6673
+ /**
6674
+ * Builds an executable code part with code string
6675
+ */
6676
+ static buildExecutableCodePart(code) {
6677
+ return {
6678
+ executableCode: {
6679
+ code,
6680
+ language: Language.PYTHON
6614
6681
  }
6615
- if (optional) {
6616
- return "";
6682
+ };
6683
+ }
6684
+ /**
6685
+ * Builds the code execution result part from the code execution result
6686
+ */
6687
+ static buildCodeExecutionResultPart(codeExecutionResult) {
6688
+ if (codeExecutionResult.stderr) {
6689
+ return {
6690
+ codeExecutionResult: {
6691
+ outcome: Outcome.OUTCOME_FAILED,
6692
+ output: codeExecutionResult.stderr
6693
+ }
6694
+ };
6695
+ }
6696
+ const finalResult = [];
6697
+ if (codeExecutionResult.stdout || !codeExecutionResult.outputFiles.length) {
6698
+ finalResult.push(
6699
+ `Code execution result:
6700
+ ${codeExecutionResult.stdout}
6701
+ `
6702
+ );
6703
+ }
6704
+ if (codeExecutionResult.outputFiles.length) {
6705
+ const fileNames = codeExecutionResult.outputFiles.map((f) => `\`${f.name}\``).join(",");
6706
+ finalResult.push(`Saved artifacts:
6707
+ ${fileNames}`);
6708
+ }
6709
+ return {
6710
+ codeExecutionResult: {
6711
+ outcome: Outcome.OUTCOME_OK,
6712
+ output: finalResult.join("\n\n")
6617
6713
  }
6618
- throw new Error(`Context variable not found: \`${varName}\`.`);
6714
+ };
6715
+ }
6716
+ /**
6717
+ * Converts the code execution parts to text parts in a Content
6718
+ */
6719
+ static convertCodeExecutionParts(content, codeBlockDelimiter, executionResultDelimiters) {
6720
+ if (!content.parts?.length) {
6721
+ return;
6722
+ }
6723
+ const lastPart = content.parts[content.parts.length - 1];
6724
+ if (lastPart.executableCode) {
6725
+ content.parts[content.parts.length - 1] = {
6726
+ text: `${codeBlockDelimiter[0]}${lastPart.executableCode.code}${codeBlockDelimiter[1]}`
6727
+ };
6728
+ } else if (content.parts.length === 1 && lastPart.codeExecutionResult) {
6729
+ content.parts[content.parts.length - 1] = {
6730
+ text: `${executionResultDelimiters[0]}${lastPart.codeExecutionResult.output}${executionResultDelimiters[1]}`
6731
+ };
6732
+ content.role = "user";
6619
6733
  }
6620
6734
  }
6621
- return await asyncReplace(/{[^{}]*}/g, replaceMatch, template);
6622
- }
6623
- function isValidStateName(varName) {
6624
- const parts = varName.split(":");
6625
- if (parts.length === 1) {
6626
- return isValidIdentifier(varName);
6735
+ };
6736
+
6737
+ // src/code-executors/code-executor-context.ts
6738
+ var CONTEXT_KEY = "_code_execution_context";
6739
+ var SESSION_ID_KEY = "execution_session_id";
6740
+ var PROCESSED_FILE_NAMES_KEY = "processed_input_files";
6741
+ var INPUT_FILE_KEY = "_code_executor_input_files";
6742
+ var ERROR_COUNT_KEY = "_code_executor_error_counts";
6743
+ var CODE_EXECUTION_RESULTS_KEY = "_code_execution_results";
6744
+ var CodeExecutorContext = class {
6745
+ context;
6746
+ sessionState;
6747
+ constructor(sessionState) {
6748
+ this.sessionState = sessionState;
6749
+ this.context = this.getCodeExecutorContext(sessionState);
6627
6750
  }
6628
- if (parts.length === 2) {
6629
- const validPrefixes = ["app:", "user:", "temp:"];
6630
- const prefix = `${parts[0]}:`;
6631
- if (validPrefixes.includes(prefix)) {
6632
- return isValidIdentifier(parts[1]);
6751
+ /**
6752
+ * Gets the state delta to update in the persistent session state.
6753
+ */
6754
+ getStateDelta() {
6755
+ const contextToUpdate = JSON.parse(JSON.stringify(this.context));
6756
+ return { [CONTEXT_KEY]: contextToUpdate };
6757
+ }
6758
+ /**
6759
+ * Gets the session ID for the code executor.
6760
+ */
6761
+ getExecutionId() {
6762
+ if (!(SESSION_ID_KEY in this.context)) {
6763
+ return null;
6633
6764
  }
6765
+ return this.context[SESSION_ID_KEY];
6634
6766
  }
6635
- return false;
6636
- }
6637
- function isValidIdentifier(name) {
6638
- const identifierRegex = /^[a-zA-Z_$][a-zA-Z0-9_$]*$/;
6639
- return identifierRegex.test(name);
6640
- }
6767
+ /**
6768
+ * Sets the session ID for the code executor.
6769
+ */
6770
+ setExecutionId(sessionId) {
6771
+ this.context[SESSION_ID_KEY] = sessionId;
6772
+ }
6773
+ /**
6774
+ * Gets the processed file names from the session state.
6775
+ */
6776
+ getProcessedFileNames() {
6777
+ if (!(PROCESSED_FILE_NAMES_KEY in this.context)) {
6778
+ return [];
6779
+ }
6780
+ return this.context[PROCESSED_FILE_NAMES_KEY];
6781
+ }
6782
+ /**
6783
+ * Adds the processed file names to the session state.
6784
+ */
6785
+ addProcessedFileNames(fileNames) {
6786
+ if (!(PROCESSED_FILE_NAMES_KEY in this.context)) {
6787
+ this.context[PROCESSED_FILE_NAMES_KEY] = [];
6788
+ }
6789
+ this.context[PROCESSED_FILE_NAMES_KEY].push(...fileNames);
6790
+ }
6791
+ /**
6792
+ * Gets the code executor input files from the session state.
6793
+ */
6794
+ getInputFiles() {
6795
+ if (!(INPUT_FILE_KEY in this.sessionState)) {
6796
+ return [];
6797
+ }
6798
+ return this.sessionState[INPUT_FILE_KEY].map(
6799
+ (file) => file
6800
+ );
6801
+ }
6802
+ /**
6803
+ * Adds the input files to the code executor context.
6804
+ */
6805
+ addInputFiles(inputFiles) {
6806
+ if (!(INPUT_FILE_KEY in this.sessionState)) {
6807
+ this.sessionState[INPUT_FILE_KEY] = [];
6808
+ }
6809
+ const fileArray = this.sessionState[INPUT_FILE_KEY];
6810
+ for (const inputFile of inputFiles) {
6811
+ fileArray.push({
6812
+ name: inputFile.name,
6813
+ content: inputFile.content,
6814
+ mimeType: inputFile.mimeType
6815
+ });
6816
+ }
6817
+ }
6818
+ /**
6819
+ * Removes the input files and processed file names from the code executor context.
6820
+ */
6821
+ clearInputFiles() {
6822
+ if (INPUT_FILE_KEY in this.sessionState) {
6823
+ this.sessionState[INPUT_FILE_KEY] = [];
6824
+ }
6825
+ if (PROCESSED_FILE_NAMES_KEY in this.context) {
6826
+ this.context[PROCESSED_FILE_NAMES_KEY] = [];
6827
+ }
6828
+ }
6829
+ /**
6830
+ * Gets the error count from the session state.
6831
+ */
6832
+ getErrorCount(invocationId) {
6833
+ if (!(ERROR_COUNT_KEY in this.sessionState)) {
6834
+ return 0;
6835
+ }
6836
+ const errorCounts = this.sessionState[ERROR_COUNT_KEY];
6837
+ return errorCounts[invocationId] ?? 0;
6838
+ }
6839
+ /**
6840
+ * Increments the error count for the given invocation ID.
6841
+ */
6842
+ incrementErrorCount(invocationId) {
6843
+ if (!(ERROR_COUNT_KEY in this.sessionState)) {
6844
+ this.sessionState[ERROR_COUNT_KEY] = {};
6845
+ }
6846
+ const errorCounts = this.sessionState[ERROR_COUNT_KEY];
6847
+ errorCounts[invocationId] = this.getErrorCount(invocationId) + 1;
6848
+ }
6849
+ /**
6850
+ * Resets the error count for the given invocation ID.
6851
+ */
6852
+ resetErrorCount(invocationId) {
6853
+ if (!(ERROR_COUNT_KEY in this.sessionState)) {
6854
+ return;
6855
+ }
6856
+ const errorCounts = this.sessionState[ERROR_COUNT_KEY];
6857
+ if (invocationId in errorCounts) {
6858
+ delete errorCounts[invocationId];
6859
+ }
6860
+ }
6861
+ /**
6862
+ * Updates the code execution result.
6863
+ */
6864
+ updateCodeExecutionResult(invocationId, code, resultStdout, resultStderr) {
6865
+ if (!(CODE_EXECUTION_RESULTS_KEY in this.sessionState)) {
6866
+ this.sessionState[CODE_EXECUTION_RESULTS_KEY] = {};
6867
+ }
6868
+ const results = this.sessionState[CODE_EXECUTION_RESULTS_KEY];
6869
+ if (!(invocationId in results)) {
6870
+ results[invocationId] = [];
6871
+ }
6872
+ results[invocationId].push({
6873
+ code,
6874
+ resultStdout,
6875
+ resultStderr,
6876
+ timestamp: Math.floor(Date.now() / 1e3)
6877
+ });
6878
+ }
6879
+ /**
6880
+ * Gets the code executor context from the session state.
6881
+ */
6882
+ getCodeExecutorContext(sessionState) {
6883
+ if (!(CONTEXT_KEY in sessionState)) {
6884
+ sessionState[CONTEXT_KEY] = {};
6885
+ }
6886
+ return sessionState[CONTEXT_KEY];
6887
+ }
6888
+ };
6641
6889
 
6642
- // src/flows/llm-flows/instructions.ts
6643
- var InstructionsLlmRequestProcessor = class extends BaseLlmRequestProcessor {
6890
+ // src/flows/llm-flows/code-execution.ts
6891
+ var DATA_FILE_UTIL_MAP = {
6892
+ "text/csv": {
6893
+ extension: ".csv",
6894
+ loaderCodeTemplate: "pd.read_csv('{filename}')"
6895
+ }
6896
+ };
6897
+ var DATA_FILE_HELPER_LIB = `
6898
+ import pandas as pd
6899
+
6900
+ def explore_df(df: pd.DataFrame) -> None:
6901
+ """Prints some information about a pandas DataFrame."""
6902
+
6903
+ with pd.option_context(
6904
+ 'display.max_columns', None, 'display.expand_frame_repr', False
6905
+ ):
6906
+ # Print the column names to never encounter KeyError when selecting one.
6907
+ df_dtypes = df.dtypes
6908
+
6909
+ # Obtain information about data types and missing values.
6910
+ df_nulls = (len(df) - df.isnull().sum()).apply(
6911
+ lambda x: f'{x} / {df.shape[0]} non-null'
6912
+ )
6913
+
6914
+ # Explore unique total values in columns using \`.unique()\`.
6915
+ df_unique_count = df.apply(lambda x: len(x.unique()))
6916
+
6917
+ # Explore unique values in columns using \`.unique()\`.
6918
+ df_unique = df.apply(lambda x: crop(str(list(x.unique()))))
6919
+
6920
+ df_info = pd.concat(
6921
+ (
6922
+ df_dtypes.rename('Dtype'),
6923
+ df_nulls.rename('Non-Null Count'),
6924
+ df_unique_count.rename('Unique Values Count'),
6925
+ df_unique.rename('Unique Values'),
6926
+ ),
6927
+ axis=1,
6928
+ )
6929
+ df_info.index.name = 'Columns'
6930
+ print(f"""Total rows: {df.shape[0]}
6931
+ Total columns: {df.shape[1]}
6932
+
6933
+ {df_info}""")
6934
+
6935
+ def crop(text: str, max_length: int = 100) -> str:
6936
+ """Crop text to maximum length with ellipsis."""
6937
+ return text if len(text) <= max_length else text[:max_length] + "..."
6938
+ `;
6939
+ function hasCodeExecutor(agent) {
6940
+ return agent && typeof agent === "object" && "codeExecutor" in agent;
6941
+ }
6942
+ var CodeExecutionRequestProcessor = class extends BaseLlmRequestProcessor {
6644
6943
  async *runAsync(invocationContext, llmRequest) {
6645
6944
  const agent = invocationContext.agent;
6646
- if (!this.isLlmAgent(agent)) {
6945
+ if (!hasCodeExecutor(agent)) {
6647
6946
  return;
6648
6947
  }
6649
- const rootAgent = agent.rootAgent;
6650
- if (this.isLlmAgent(rootAgent) && rootAgent.globalInstruction) {
6651
- const [rawInstruction, bypassStateInjection] = await rootAgent.canonicalGlobalInstruction(
6652
- new ReadonlyContext(invocationContext)
6948
+ if (!(agent instanceof LlmAgent) || !agent.codeExecutor) {
6949
+ return;
6950
+ }
6951
+ yield* runPreProcessor(invocationContext, llmRequest);
6952
+ if (!(agent.codeExecutor instanceof BaseCodeExecutor)) {
6953
+ return;
6954
+ }
6955
+ for (const content of llmRequest.contents || []) {
6956
+ CodeExecutionUtils.convertCodeExecutionParts(
6957
+ content,
6958
+ agent.codeExecutor.codeBlockDelimiters[0] || ["", ""],
6959
+ agent.codeExecutor.executionResultDelimiters
6653
6960
  );
6654
- let instruction = rawInstruction;
6655
- if (!bypassStateInjection) {
6656
- instruction = await injectSessionState(
6657
- rawInstruction,
6658
- new ReadonlyContext(invocationContext)
6659
- );
6961
+ }
6962
+ }
6963
+ };
6964
+ var CodeExecutionResponseProcessor = class extends BaseLlmResponseProcessor {
6965
+ async *runAsync(invocationContext, llmResponse) {
6966
+ if (llmResponse.partial) {
6967
+ return;
6968
+ }
6969
+ yield* runPostProcessor(invocationContext, llmResponse);
6970
+ }
6971
+ };
6972
+ async function* runPreProcessor(invocationContext, llmRequest) {
6973
+ const agent = invocationContext.agent;
6974
+ if (!hasCodeExecutor(agent)) {
6975
+ return;
6976
+ }
6977
+ const codeExecutor = agent.codeExecutor;
6978
+ if (!codeExecutor || !(codeExecutor instanceof BaseCodeExecutor)) {
6979
+ return;
6980
+ }
6981
+ if (codeExecutor instanceof BuiltInCodeExecutor) {
6982
+ codeExecutor.processLlmRequest(llmRequest);
6983
+ return;
6984
+ }
6985
+ if (!codeExecutor.optimizeDataFile) {
6986
+ return;
6987
+ }
6988
+ const codeExecutorContext = new CodeExecutorContext(
6989
+ invocationContext.session.state
6990
+ // Type assertion for State compatibility
6991
+ );
6992
+ if (codeExecutorContext.getErrorCount(invocationContext.invocationId) >= codeExecutor.errorRetryAttempts) {
6993
+ return;
6994
+ }
6995
+ const allInputFiles = extractAndReplaceInlineFiles(
6996
+ codeExecutorContext,
6997
+ llmRequest
6998
+ );
6999
+ const processedFileNames = new Set(
7000
+ codeExecutorContext.getProcessedFileNames()
7001
+ );
7002
+ const filesToProcess = allInputFiles.filter(
7003
+ (f) => !processedFileNames.has(f.name)
7004
+ );
7005
+ for (const file of filesToProcess) {
7006
+ const codeStr = getDataFilePreprocessingCode(file);
7007
+ if (!codeStr) {
7008
+ continue;
7009
+ }
7010
+ const codeContent = {
7011
+ role: "model",
7012
+ parts: [
7013
+ { text: `Processing input file: \`${file.name}\`` },
7014
+ CodeExecutionUtils.buildExecutableCodePart(codeStr)
7015
+ ]
7016
+ };
7017
+ llmRequest.contents = llmRequest.contents || [];
7018
+ llmRequest.contents.push(structuredClone(codeContent));
7019
+ yield new Event({
7020
+ invocationId: invocationContext.invocationId,
7021
+ author: agent.name,
7022
+ branch: invocationContext.branch,
7023
+ content: codeContent
7024
+ });
7025
+ const codeExecutionResult = await codeExecutor.executeCode(
7026
+ invocationContext,
7027
+ {
7028
+ code: codeStr,
7029
+ inputFiles: [file],
7030
+ executionId: getOrSetExecutionId(
7031
+ invocationContext,
7032
+ codeExecutorContext
7033
+ )
6660
7034
  }
6661
- llmRequest.appendInstructions([instruction]);
7035
+ );
7036
+ codeExecutorContext.updateCodeExecutionResult(
7037
+ invocationContext.invocationId,
7038
+ codeStr,
7039
+ codeExecutionResult.stdout,
7040
+ codeExecutionResult.stderr
7041
+ );
7042
+ codeExecutorContext.addProcessedFileNames([file.name]);
7043
+ const executionResultEvent = await postProcessCodeExecutionResult(
7044
+ invocationContext,
7045
+ codeExecutorContext,
7046
+ codeExecutionResult
7047
+ );
7048
+ yield executionResultEvent;
7049
+ llmRequest.contents.push(structuredClone(executionResultEvent.content));
7050
+ }
7051
+ }
7052
+ async function* runPostProcessor(invocationContext, llmResponse) {
7053
+ const agent = invocationContext.agent;
7054
+ if (!hasCodeExecutor(agent)) {
7055
+ return;
7056
+ }
7057
+ const codeExecutor = agent.codeExecutor;
7058
+ if (!(codeExecutor instanceof BaseCodeExecutor)) {
7059
+ return;
7060
+ }
7061
+ if (!llmResponse || !llmResponse.content) {
7062
+ return;
7063
+ }
7064
+ if (codeExecutor instanceof BuiltInCodeExecutor) {
7065
+ return;
7066
+ }
7067
+ const codeExecutorContext = new CodeExecutorContext(
7068
+ invocationContext.session.state
7069
+ // Type assertion for State compatibility
7070
+ );
7071
+ if (codeExecutorContext.getErrorCount(invocationContext.invocationId) >= codeExecutor.errorRetryAttempts) {
7072
+ return;
7073
+ }
7074
+ const responseContent = llmResponse.content;
7075
+ const codeStr = CodeExecutionUtils.extractCodeAndTruncateContent(
7076
+ responseContent,
7077
+ codeExecutor.codeBlockDelimiters
7078
+ );
7079
+ if (!codeStr) {
7080
+ return;
7081
+ }
7082
+ yield new Event({
7083
+ invocationId: invocationContext.invocationId,
7084
+ author: agent.name,
7085
+ branch: invocationContext.branch,
7086
+ content: responseContent,
7087
+ actions: new EventActions()
7088
+ });
7089
+ const codeExecutionResult = await codeExecutor.executeCode(
7090
+ invocationContext,
7091
+ {
7092
+ code: codeStr,
7093
+ inputFiles: codeExecutorContext.getInputFiles(),
7094
+ executionId: getOrSetExecutionId(invocationContext, codeExecutorContext)
6662
7095
  }
6663
- if (agent.instruction) {
6664
- const [rawInstruction, bypassStateInjection] = await agent.canonicalInstruction(
6665
- new ReadonlyContext(invocationContext)
6666
- );
6667
- let instruction = rawInstruction;
6668
- if (!bypassStateInjection) {
6669
- instruction = await injectSessionState(
6670
- rawInstruction,
6671
- new ReadonlyContext(invocationContext)
6672
- );
7096
+ );
7097
+ codeExecutorContext.updateCodeExecutionResult(
7098
+ invocationContext.invocationId,
7099
+ codeStr,
7100
+ codeExecutionResult.stdout,
7101
+ codeExecutionResult.stderr
7102
+ );
7103
+ yield await postProcessCodeExecutionResult(
7104
+ invocationContext,
7105
+ codeExecutorContext,
7106
+ codeExecutionResult
7107
+ );
7108
+ llmResponse.content = void 0;
7109
+ }
7110
+ function extractAndReplaceInlineFiles(codeExecutorContext, llmRequest) {
7111
+ const allInputFiles = codeExecutorContext.getInputFiles();
7112
+ const savedFileNames = new Set(allInputFiles.map((f) => f.name));
7113
+ for (let i = 0; i < (llmRequest.contents?.length || 0); i++) {
7114
+ const content = llmRequest.contents[i];
7115
+ if (content.role !== "user" || !content.parts) {
7116
+ continue;
7117
+ }
7118
+ for (let j = 0; j < content.parts.length; j++) {
7119
+ const part = content.parts[j];
7120
+ if (!part.inlineData || !(part.inlineData.mimeType in DATA_FILE_UTIL_MAP)) {
7121
+ continue;
7122
+ }
7123
+ const mimeType = part.inlineData.mimeType;
7124
+ const fileName = `data_${i + 1}_${j + 1}${DATA_FILE_UTIL_MAP[mimeType].extension}`;
7125
+ llmRequest.contents[i].parts[j] = {
7126
+ text: `
7127
+ Available file: \`${fileName}\`
7128
+ `
7129
+ };
7130
+ const file = {
7131
+ name: fileName,
7132
+ content: CodeExecutionUtils.getEncodedFileContent(part.inlineData.data),
7133
+ mimeType
7134
+ };
7135
+ if (!savedFileNames.has(fileName)) {
7136
+ codeExecutorContext.addInputFiles([file]);
7137
+ allInputFiles.push(file);
6673
7138
  }
6674
- llmRequest.appendInstructions([instruction]);
6675
7139
  }
6676
- for await (const _ of []) {
6677
- yield _;
7140
+ }
7141
+ return allInputFiles;
7142
+ }
7143
+ function getOrSetExecutionId(invocationContext, codeExecutorContext) {
7144
+ const agent = invocationContext.agent;
7145
+ if (!hasCodeExecutor(agent) || !agent.codeExecutor?.stateful) {
7146
+ return void 0;
7147
+ }
7148
+ let executionId = codeExecutorContext.getExecutionId();
7149
+ if (!executionId) {
7150
+ executionId = invocationContext.session.id;
7151
+ codeExecutorContext.setExecutionId(executionId);
7152
+ }
7153
+ return executionId;
7154
+ }
7155
+ async function postProcessCodeExecutionResult(invocationContext, codeExecutorContext, codeExecutionResult) {
7156
+ if (!invocationContext.artifactService) {
7157
+ throw new Error("Artifact service is not initialized.");
7158
+ }
7159
+ const resultContent = {
7160
+ role: "model",
7161
+ parts: [
7162
+ CodeExecutionUtils.buildCodeExecutionResultPart(codeExecutionResult)
7163
+ ]
7164
+ };
7165
+ const eventActions = new EventActions({
7166
+ stateDelta: codeExecutorContext.getStateDelta()
7167
+ });
7168
+ if (codeExecutionResult.stderr) {
7169
+ codeExecutorContext.incrementErrorCount(invocationContext.invocationId);
7170
+ } else {
7171
+ codeExecutorContext.resetErrorCount(invocationContext.invocationId);
7172
+ }
7173
+ for (const outputFile of codeExecutionResult.outputFiles) {
7174
+ const version = await invocationContext.artifactService.saveArtifact({
7175
+ appName: invocationContext.appName,
7176
+ userId: invocationContext.userId,
7177
+ sessionId: invocationContext.session.id,
7178
+ filename: outputFile.name,
7179
+ artifact: {
7180
+ inlineData: {
7181
+ data: atob(outputFile.content),
7182
+ // Convert from base64
7183
+ mimeType: outputFile.mimeType
7184
+ }
7185
+ }
7186
+ });
7187
+ eventActions.artifactDelta[outputFile.name] = version;
7188
+ }
7189
+ return new Event({
7190
+ invocationId: invocationContext.invocationId,
7191
+ author: invocationContext.agent.name,
7192
+ branch: invocationContext.branch,
7193
+ content: resultContent,
7194
+ actions: eventActions
7195
+ });
7196
+ }
7197
+ function getDataFilePreprocessingCode(file) {
7198
+ function getNormalizedFileName(fileName) {
7199
+ const baseName = fileName.split(".")[0];
7200
+ let varName2 = baseName.replace(/[^a-zA-Z0-9_]/g, "_");
7201
+ if (/^\d/.test(varName2)) {
7202
+ varName2 = `_${varName2}`;
6678
7203
  }
7204
+ return varName2;
6679
7205
  }
6680
- /**
6681
- * Type guard to check if agent is an LlmAgent
6682
- */
6683
- isLlmAgent(agent) {
6684
- return agent && typeof agent === "object" && "canonicalModel" in agent;
7206
+ if (!(file.mimeType in DATA_FILE_UTIL_MAP)) {
7207
+ return void 0;
6685
7208
  }
6686
- };
6687
- var requestProcessor4 = new InstructionsLlmRequestProcessor();
7209
+ const varName = getNormalizedFileName(file.name);
7210
+ const loaderCode = DATA_FILE_UTIL_MAP[file.mimeType].loaderCodeTemplate.replace("{filename}", file.name);
7211
+ return `
7212
+ ${DATA_FILE_HELPER_LIB}
7213
+
7214
+ # Load the dataframe.
7215
+ ${varName} = ${loaderCode}
7216
+
7217
+ # Use \`explore_df\` to guide my analysis.
7218
+ explore_df(${varName})
7219
+ `;
7220
+ }
7221
+ var requestProcessor3 = new CodeExecutionRequestProcessor();
7222
+ var responseProcessor = new CodeExecutionResponseProcessor();
6688
7223
 
6689
7224
  // src/flows/llm-flows/contents.ts
6690
7225
  var ContentLlmRequestProcessor = class extends BaseLlmRequestProcessor {
@@ -6717,7 +7252,7 @@ var ContentLlmRequestProcessor = class extends BaseLlmRequestProcessor {
6717
7252
  return agent && typeof agent === "object" && "canonicalModel" in agent;
6718
7253
  }
6719
7254
  };
6720
- var requestProcessor5 = new ContentLlmRequestProcessor();
7255
+ var requestProcessor4 = new ContentLlmRequestProcessor();
6721
7256
  function rearrangeEventsForAsyncFunctionResponsesInHistory(events) {
6722
7257
  const functionCallIdToResponseEventsIndex = {};
6723
7258
  for (let i = 0; i < events.length; i++) {
@@ -6982,6 +7517,151 @@ function isAuthEvent(event) {
6982
7517
  return false;
6983
7518
  }
6984
7519
 
7520
+ // src/flows/llm-flows/identity.ts
7521
+ var IdentityLlmRequestProcessor = class extends BaseLlmRequestProcessor {
7522
+ async *runAsync(invocationContext, llmRequest) {
7523
+ const agent = invocationContext.agent;
7524
+ const instructions = [
7525
+ `You are an agent. Your internal name is "${agent.name}".`
7526
+ ];
7527
+ if (agent.description) {
7528
+ instructions.push(` The description about you is "${agent.description}"`);
7529
+ }
7530
+ llmRequest.appendInstructions(instructions);
7531
+ for await (const _ of []) {
7532
+ yield _;
7533
+ }
7534
+ }
7535
+ };
7536
+ var requestProcessor5 = new IdentityLlmRequestProcessor();
7537
+
7538
+ // src/utils/instructions-utils.ts
7539
+ async function injectSessionState(template, readonlyContext) {
7540
+ const invocationContext = readonlyContext._invocationContext;
7541
+ async function asyncReplace(pattern, replaceAsyncFn, string) {
7542
+ const result = [];
7543
+ let lastEnd = 0;
7544
+ const matches = Array.from(string.matchAll(pattern));
7545
+ for (const match of matches) {
7546
+ result.push(string.slice(lastEnd, match.index));
7547
+ const replacement = await replaceAsyncFn(match);
7548
+ result.push(replacement);
7549
+ lastEnd = (match.index || 0) + match[0].length;
7550
+ }
7551
+ result.push(string.slice(lastEnd));
7552
+ return result.join("");
7553
+ }
7554
+ async function replaceMatch(match) {
7555
+ let varName = match[0].replace(/[{}]/g, "").trim();
7556
+ let optional = false;
7557
+ if (varName.endsWith("?")) {
7558
+ optional = true;
7559
+ varName = varName.slice(0, -1);
7560
+ }
7561
+ if (varName.startsWith("artifact.")) {
7562
+ varName = varName.replace("artifact.", "");
7563
+ if (!invocationContext.artifactService) {
7564
+ throw new Error("Artifact service is not initialized.");
7565
+ }
7566
+ try {
7567
+ const artifact = await invocationContext.artifactService.loadArtifact({
7568
+ appName: invocationContext.session.appName,
7569
+ userId: invocationContext.session.userId,
7570
+ sessionId: invocationContext.session.id,
7571
+ filename: varName
7572
+ });
7573
+ if (!artifact) {
7574
+ throw new Error(`Artifact ${varName} not found.`);
7575
+ }
7576
+ return String(artifact);
7577
+ } catch (error) {
7578
+ if (optional) {
7579
+ return "";
7580
+ }
7581
+ throw error;
7582
+ }
7583
+ } else {
7584
+ if (!isValidStateName(varName)) {
7585
+ return match[0];
7586
+ }
7587
+ const sessionState = invocationContext.session.state;
7588
+ if (varName in sessionState) {
7589
+ return String(sessionState[varName]);
7590
+ }
7591
+ if (optional) {
7592
+ return "";
7593
+ }
7594
+ throw new Error(`Context variable not found: \`${varName}\`.`);
7595
+ }
7596
+ }
7597
+ return await asyncReplace(/{[^{}]*}/g, replaceMatch, template);
7598
+ }
7599
+ function isValidStateName(varName) {
7600
+ const parts = varName.split(":");
7601
+ if (parts.length === 1) {
7602
+ return isValidIdentifier(varName);
7603
+ }
7604
+ if (parts.length === 2) {
7605
+ const validPrefixes = ["app:", "user:", "temp:"];
7606
+ const prefix = `${parts[0]}:`;
7607
+ if (validPrefixes.includes(prefix)) {
7608
+ return isValidIdentifier(parts[1]);
7609
+ }
7610
+ }
7611
+ return false;
7612
+ }
7613
+ function isValidIdentifier(name) {
7614
+ const identifierRegex = /^[a-zA-Z_$][a-zA-Z0-9_$]*$/;
7615
+ return identifierRegex.test(name);
7616
+ }
7617
+
7618
+ // src/flows/llm-flows/instructions.ts
7619
+ var InstructionsLlmRequestProcessor = class extends BaseLlmRequestProcessor {
7620
+ async *runAsync(invocationContext, llmRequest) {
7621
+ const agent = invocationContext.agent;
7622
+ if (!this.isLlmAgent(agent)) {
7623
+ return;
7624
+ }
7625
+ const rootAgent = agent.rootAgent;
7626
+ if (this.isLlmAgent(rootAgent) && rootAgent.globalInstruction) {
7627
+ const [rawInstruction, bypassStateInjection] = await rootAgent.canonicalGlobalInstruction(
7628
+ new ReadonlyContext(invocationContext)
7629
+ );
7630
+ let instruction = rawInstruction;
7631
+ if (!bypassStateInjection) {
7632
+ instruction = await injectSessionState(
7633
+ rawInstruction,
7634
+ new ReadonlyContext(invocationContext)
7635
+ );
7636
+ }
7637
+ llmRequest.appendInstructions([instruction]);
7638
+ }
7639
+ if (agent.instruction) {
7640
+ const [rawInstruction, bypassStateInjection] = await agent.canonicalInstruction(
7641
+ new ReadonlyContext(invocationContext)
7642
+ );
7643
+ let instruction = rawInstruction;
7644
+ if (!bypassStateInjection) {
7645
+ instruction = await injectSessionState(
7646
+ rawInstruction,
7647
+ new ReadonlyContext(invocationContext)
7648
+ );
7649
+ }
7650
+ llmRequest.appendInstructions([instruction]);
7651
+ }
7652
+ for await (const _ of []) {
7653
+ yield _;
7654
+ }
7655
+ }
7656
+ /**
7657
+ * Type guard to check if agent is an LlmAgent
7658
+ */
7659
+ isLlmAgent(agent) {
7660
+ return agent && typeof agent === "object" && "canonicalModel" in agent;
7661
+ }
7662
+ };
7663
+ var requestProcessor6 = new InstructionsLlmRequestProcessor();
7664
+
6985
7665
  // src/planners/base-planner.ts
6986
7666
  var BasePlanner = class {
6987
7667
  };
@@ -7257,66 +7937,10 @@ function removeThoughtFromRequest(llmRequest) {
7257
7937
  }
7258
7938
  }
7259
7939
  }
7260
- var requestProcessor6 = new NlPlanningRequestProcessor();
7261
- var responseProcessor = new NlPlanningResponseProcessor();
7262
-
7263
- // src/flows/llm-flows/code-execution.ts
7264
- var CodeExecutionRequestProcessor = class extends BaseLlmRequestProcessor {
7265
- async *runAsync(invocationContext, llmRequest) {
7266
- const agent = invocationContext.agent;
7267
- if (!("codeExecutor" in agent) || !agent.codeExecutor) {
7268
- return;
7269
- }
7270
- console.log(
7271
- "Code execution request processing - TODO: Implement when code-executors module is ready"
7272
- );
7273
- for await (const _ of []) {
7274
- yield _;
7275
- }
7276
- }
7277
- /**
7278
- * Placeholder for pre-processor logic
7279
- * TODO: Implement when code-executors are ready
7280
- */
7281
- async *runPreProcessor(invocationContext, llmRequest) {
7282
- console.log("Code execution pre-processor - placeholder");
7283
- for await (const _ of []) {
7284
- yield _;
7285
- }
7286
- }
7287
- };
7288
- var CodeExecutionResponseProcessor = class extends BaseLlmResponseProcessor {
7289
- async *runAsync(invocationContext, llmResponse) {
7290
- if (llmResponse.partial) {
7291
- return;
7292
- }
7293
- const agent = invocationContext.agent;
7294
- if (!("codeExecutor" in agent) || !agent.codeExecutor) {
7295
- return;
7296
- }
7297
- console.log(
7298
- "Code execution response processing - TODO: Implement when code-executors module is ready"
7299
- );
7300
- for await (const _ of []) {
7301
- yield _;
7302
- }
7303
- }
7304
- /**
7305
- * Placeholder for post-processor logic
7306
- * TODO: Implement when code-executors are ready
7307
- */
7308
- async *runPostProcessor(invocationContext, llmResponse) {
7309
- console.log("Code execution post-processor - placeholder");
7310
- for await (const _ of []) {
7311
- yield _;
7312
- }
7313
- }
7314
- };
7315
- var requestProcessor7 = new CodeExecutionRequestProcessor();
7316
- var responseProcessor2 = new CodeExecutionResponseProcessor();
7940
+ var requestProcessor7 = new NlPlanningRequestProcessor();
7941
+ var responseProcessor2 = new NlPlanningResponseProcessor();
7317
7942
 
7318
7943
  // src/flows/llm-flows/single-flow.ts
7319
- var logger7 = new Logger({ name: "SingleFlow" });
7320
7944
  var SingleFlow = class extends BaseLlmFlow {
7321
7945
  /**
7322
7946
  * Constructor for SingleFlow
@@ -7324,35 +7948,32 @@ var SingleFlow = class extends BaseLlmFlow {
7324
7948
  constructor() {
7325
7949
  super();
7326
7950
  this.requestProcessors.push(
7327
- requestProcessor,
7328
7951
  requestProcessor2,
7952
+ requestProcessor,
7329
7953
  // Phase 3: Auth preprocessor
7330
- requestProcessor4,
7331
- requestProcessor3,
7954
+ requestProcessor6,
7332
7955
  requestProcessor5,
7956
+ requestProcessor4,
7333
7957
  // Some implementations of NL Planning mark planning contents as thoughts
7334
7958
  // in the post processor. Since these need to be unmarked, NL Planning
7335
7959
  // should be after contents.
7336
- requestProcessor6,
7960
+ requestProcessor7,
7337
7961
  // Phase 5: NL Planning
7338
7962
  // Code execution should be after the contents as it mutates the contents
7339
7963
  // to optimize data files.
7340
- requestProcessor7
7964
+ requestProcessor3
7341
7965
  // Phase 5: Code Execution (placeholder)
7342
7966
  );
7343
7967
  this.responseProcessors.push(
7344
- responseProcessor,
7968
+ responseProcessor2,
7345
7969
  // Phase 5: NL Planning
7346
- responseProcessor2
7970
+ responseProcessor
7347
7971
  // Phase 5: Code Execution (placeholder)
7348
7972
  );
7349
- logger7.debug("SingleFlow initialized with processors");
7973
+ this.logger.debug("SingleFlow initialized with processors");
7350
7974
  }
7351
7975
  };
7352
7976
 
7353
- // src/flows/llm-flows/auto-flow.ts
7354
- init_logger();
7355
-
7356
7977
  // src/flows/llm-flows/agent-transfer.ts
7357
7978
  var AgentTransferLlmRequestProcessor = class extends BaseLlmRequestProcessor {
7358
7979
  /**
@@ -7442,7 +8063,6 @@ function getTransferTargets(agent) {
7442
8063
  var requestProcessor8 = new AgentTransferLlmRequestProcessor();
7443
8064
 
7444
8065
  // src/flows/llm-flows/auto-flow.ts
7445
- var logger8 = new Logger({ name: "AutoFlow" });
7446
8066
  var AutoFlow = class extends SingleFlow {
7447
8067
  /**
7448
8068
  * Constructor for AutoFlow
@@ -7450,7 +8070,7 @@ var AutoFlow = class extends SingleFlow {
7450
8070
  constructor() {
7451
8071
  super();
7452
8072
  this.requestProcessors.push(requestProcessor8);
7453
- logger8.debug("AutoFlow initialized with agent transfer capability");
8073
+ this.logger.debug("AutoFlow initialized with agent transfer capability");
7454
8074
  }
7455
8075
  };
7456
8076
 
@@ -7475,6 +8095,10 @@ var LlmAgent = class _LlmAgent extends BaseAgent {
7475
8095
  * Tools available to this agent
7476
8096
  */
7477
8097
  tools;
8098
+ /**
8099
+ * Code executor for this agent
8100
+ */
8101
+ codeExecutor;
7478
8102
  /**
7479
8103
  * Disallows LLM-controlled transferring to the parent agent
7480
8104
  */
@@ -7542,6 +8166,7 @@ var LlmAgent = class _LlmAgent extends BaseAgent {
7542
8166
  this.instruction = config.instruction || "";
7543
8167
  this.globalInstruction = config.globalInstruction || "";
7544
8168
  this.tools = config.tools || [];
8169
+ this.codeExecutor = config.codeExecutor;
7545
8170
  this.disallowTransferToParent = config.disallowTransferToParent || false;
7546
8171
  this.disallowTransferToPeers = config.disallowTransferToPeers || false;
7547
8172
  this.includeContents = config.includeContents || "default";
@@ -7561,11 +8186,14 @@ var LlmAgent = class _LlmAgent extends BaseAgent {
7561
8186
  * This method is only for use by Agent Development Kit
7562
8187
  */
7563
8188
  get canonicalModel() {
7564
- if (typeof this.model !== "string") {
8189
+ if (typeof this.model === "string") {
8190
+ if (this.model) {
8191
+ return LLMRegistry.newLLM(this.model);
8192
+ }
8193
+ } else if (this.model instanceof BaseLlm) {
7565
8194
  return this.model;
7566
- }
7567
- if (this.model) {
7568
- return LLMRegistry.newLLM(this.model);
8195
+ } else if (this.model) {
8196
+ return new AiSdkLlm(this.model);
7569
8197
  }
7570
8198
  let ancestorAgent = this.parentAgent;
7571
8199
  while (ancestorAgent !== null) {
@@ -8331,8 +8959,6 @@ var RunConfig = class {
8331
8959
  };
8332
8960
 
8333
8961
  // src/artifacts/in-memory-artifact-service.ts
8334
- init_logger();
8335
- var logger9 = new Logger({ name: "InMemoryArtifactService" });
8336
8962
  var InMemoryArtifactService = class {
8337
8963
  artifacts = /* @__PURE__ */ new Map();
8338
8964
  fileHasUserNamespace(filename) {
@@ -8780,7 +9406,6 @@ var InMemorySessionService = class extends BaseSessionService {
8780
9406
  };
8781
9407
 
8782
9408
  // src/runners.ts
8783
- var logger10 = new Logger({ name: "Runner" });
8784
9409
  function _findFunctionCallEventIfLastEventIsFunctionResponse(session) {
8785
9410
  const events = session.events;
8786
9411
  if (!events || events.length === 0) {
@@ -8825,6 +9450,7 @@ var Runner = class {
8825
9450
  * The memory service for the runner.
8826
9451
  */
8827
9452
  memoryService;
9453
+ logger = new Logger({ name: "Runner" });
8828
9454
  /**
8829
9455
  * Initializes the Runner.
8830
9456
  */
@@ -8927,7 +9553,7 @@ var Runner = class {
8927
9553
  yield event;
8928
9554
  }
8929
9555
  } catch (error) {
8930
- logger10.debug("Error running agent:", error);
9556
+ this.logger.debug("Error running agent:", error);
8931
9557
  span.recordException(error);
8932
9558
  span.setStatus({
8933
9559
  code: SpanStatusCode.ERROR,
@@ -8991,7 +9617,7 @@ var Runner = class {
8991
9617
  }
8992
9618
  const agent = rootAgent.findSubAgent?.(event2.author);
8993
9619
  if (!agent) {
8994
- logger10.debug(
9620
+ this.logger.debug(
8995
9621
  `Event from an unknown agent: ${event2.author}, event id: ${event2.id}`
8996
9622
  );
8997
9623
  continue;
@@ -9270,6 +9896,20 @@ var AgentBuilder = class _AgentBuilder {
9270
9896
  */
9271
9897
  createAgent() {
9272
9898
  switch (this.agentType) {
9899
+ case "llm": {
9900
+ if (!this.config.model) {
9901
+ throw new Error("Model is required for LLM agent");
9902
+ }
9903
+ const model = this.config.model;
9904
+ return new LlmAgent({
9905
+ name: this.config.name,
9906
+ model,
9907
+ description: this.config.description,
9908
+ instruction: this.config.instruction,
9909
+ tools: this.config.tools,
9910
+ planner: this.config.planner
9911
+ });
9912
+ }
9273
9913
  case "sequential":
9274
9914
  if (!this.config.subAgents) {
9275
9915
  throw new Error("Sub-agents required for sequential agent");
@@ -9308,15 +9948,6 @@ var AgentBuilder = class _AgentBuilder {
9308
9948
  nodes: this.config.nodes,
9309
9949
  rootNode: this.config.rootNode
9310
9950
  });
9311
- default:
9312
- return new LlmAgent({
9313
- name: this.config.name,
9314
- model: this.config.model,
9315
- description: this.config.description,
9316
- instruction: this.config.instruction,
9317
- tools: this.config.tools,
9318
- planner: this.config.planner
9319
- });
9320
9951
  }
9321
9952
  }
9322
9953
  };
@@ -10054,11 +10685,11 @@ var DatabaseSessionService = class extends BaseSessionService {
10054
10685
  };
10055
10686
 
10056
10687
  // src/sessions/database-factories.ts
10057
- import dedent3 from "dedent";
10688
+ import dedent from "dedent";
10058
10689
  import { Kysely, MysqlDialect, PostgresDialect, SqliteDialect } from "kysely";
10059
10690
  function createDependencyError(packageName, dbType) {
10060
10691
  return new Error(
10061
- dedent3`
10692
+ dedent`
10062
10693
  Missing required peer dependency: ${packageName}
10063
10694
  To use ${dbType} sessions, install the required package:
10064
10695
  npm install ${packageName}
@@ -10131,11 +10762,9 @@ function createDatabaseSessionService(databaseUrl, options) {
10131
10762
  }
10132
10763
 
10133
10764
  // src/artifacts/gcs-artifact-service.ts
10134
- init_logger();
10135
10765
  import {
10136
10766
  Storage
10137
10767
  } from "@google-cloud/storage";
10138
- var logger11 = new Logger({ name: "GcsArtifactService" });
10139
10768
  var GcsArtifactService = class {
10140
10769
  bucketName;
10141
10770
  storageClient;
@@ -10290,20 +10919,20 @@ __export(flows_exports, {
10290
10919
  REQUEST_EUC_FUNCTION_CALL_NAME: () => REQUEST_EUC_FUNCTION_CALL_NAME,
10291
10920
  SingleFlow: () => SingleFlow,
10292
10921
  agentTransferRequestProcessor: () => requestProcessor8,
10293
- basicRequestProcessor: () => requestProcessor,
10294
- codeExecutionRequestProcessor: () => requestProcessor7,
10295
- codeExecutionResponseProcessor: () => responseProcessor2,
10296
- contentRequestProcessor: () => requestProcessor5,
10922
+ basicRequestProcessor: () => requestProcessor2,
10923
+ codeExecutionRequestProcessor: () => requestProcessor3,
10924
+ codeExecutionResponseProcessor: () => responseProcessor,
10925
+ contentRequestProcessor: () => requestProcessor4,
10297
10926
  generateAuthEvent: () => generateAuthEvent,
10298
10927
  generateClientFunctionCallId: () => generateClientFunctionCallId,
10299
10928
  getLongRunningFunctionCalls: () => getLongRunningFunctionCalls,
10300
10929
  handleFunctionCallsAsync: () => handleFunctionCallsAsync,
10301
10930
  handleFunctionCallsLive: () => handleFunctionCallsLive,
10302
- identityRequestProcessor: () => requestProcessor3,
10303
- instructionsRequestProcessor: () => requestProcessor4,
10931
+ identityRequestProcessor: () => requestProcessor5,
10932
+ instructionsRequestProcessor: () => requestProcessor6,
10304
10933
  mergeParallelFunctionResponseEvents: () => mergeParallelFunctionResponseEvents,
10305
- nlPlanningRequestProcessor: () => requestProcessor6,
10306
- nlPlanningResponseProcessor: () => responseProcessor,
10934
+ nlPlanningRequestProcessor: () => requestProcessor7,
10935
+ nlPlanningResponseProcessor: () => responseProcessor2,
10307
10936
  populateClientFunctionCallId: () => populateClientFunctionCallId,
10308
10937
  removeClientFunctionCallId: () => removeClientFunctionCallId
10309
10938
  });
@@ -10315,6 +10944,7 @@ export {
10315
10944
  LlmAgent as Agent,
10316
10945
  AgentBuilder,
10317
10946
  agents_exports as Agents,
10947
+ AiSdkLlm,
10318
10948
  AnthropicLlm,
10319
10949
  ApiKeyCredential,
10320
10950
  ApiKeyScheme,
@@ -10327,6 +10957,7 @@ export {
10327
10957
  AuthTool,
10328
10958
  AutoFlow,
10329
10959
  BaseAgent,
10960
+ BaseCodeExecutor,
10330
10961
  BaseLLMConnection,
10331
10962
  BaseLlm,
10332
10963
  BaseLlmFlow,
@@ -10337,8 +10968,11 @@ export {
10337
10968
  BaseTool,
10338
10969
  BasicAuthCredential,
10339
10970
  BearerTokenCredential,
10971
+ BuiltInCodeExecutor,
10340
10972
  BuiltInPlanner,
10341
10973
  CallbackContext,
10974
+ CodeExecutionUtils,
10975
+ CodeExecutorContext,
10342
10976
  DatabaseSessionService,
10343
10977
  EnhancedAuthConfig,
10344
10978
  Event,
@@ -10410,11 +11044,11 @@ export {
10410
11044
  VertexAiSessionService,
10411
11045
  adkToMcpToolType,
10412
11046
  requestProcessor8 as agentTransferRequestProcessor,
10413
- requestProcessor as basicRequestProcessor,
11047
+ requestProcessor2 as basicRequestProcessor,
10414
11048
  buildFunctionDeclaration,
10415
- requestProcessor7 as codeExecutionRequestProcessor,
10416
- responseProcessor2 as codeExecutionResponseProcessor,
10417
- requestProcessor5 as contentRequestProcessor,
11049
+ requestProcessor3 as codeExecutionRequestProcessor,
11050
+ responseProcessor as codeExecutionResponseProcessor,
11051
+ requestProcessor4 as contentRequestProcessor,
10418
11052
  createAuthToolArguments,
10419
11053
  createDatabaseSessionService,
10420
11054
  createFunctionTool,
@@ -10428,22 +11062,22 @@ export {
10428
11062
  getMcpTools,
10429
11063
  handleFunctionCallsAsync,
10430
11064
  handleFunctionCallsLive,
10431
- requestProcessor3 as identityRequestProcessor,
11065
+ requestProcessor5 as identityRequestProcessor,
10432
11066
  initializeTelemetry,
10433
11067
  injectSessionState,
10434
- requestProcessor4 as instructionsRequestProcessor,
11068
+ requestProcessor6 as instructionsRequestProcessor,
10435
11069
  isEnhancedAuthConfig,
10436
11070
  jsonSchemaToDeclaration,
10437
11071
  mcpSchemaToParameters,
10438
11072
  mergeParallelFunctionResponseEvents,
10439
11073
  newInvocationContextId,
10440
- requestProcessor6 as nlPlanningRequestProcessor,
10441
- responseProcessor as nlPlanningResponseProcessor,
11074
+ requestProcessor7 as nlPlanningRequestProcessor,
11075
+ responseProcessor2 as nlPlanningResponseProcessor,
10442
11076
  normalizeJsonSchema,
10443
11077
  populateClientFunctionCallId,
10444
11078
  registerProviders,
10445
11079
  removeClientFunctionCallId,
10446
- requestProcessor2 as requestProcessor,
11080
+ requestProcessor,
10447
11081
  shutdownTelemetry,
10448
11082
  telemetryService,
10449
11083
  traceLlmCall,