@inkeep/agents-run-api 0.38.0 → 0.38.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.cjs +1371 -322
  2. package/dist/index.js +1439 -395
  3. package/package.json +2 -5
package/dist/index.cjs CHANGED
@@ -33,6 +33,7 @@ var streaming = require('hono/streaming');
33
33
  var ai = require('ai');
34
34
  var jmespath = require('jmespath');
35
35
  var Ajv = require('ajv');
36
+ var zod = require('zod');
36
37
  var destr = require('destr');
37
38
  var traverse = require('traverse');
38
39
  var mcp_js = require('@alcyone-labs/modelcontextprotocol-sdk/server/mcp.js');
@@ -1240,7 +1241,7 @@ var init_chunk_VBDAOXYI = __esm({
1240
1241
  }], ["modifyTime", 12, 136, function(r, e) {
1241
1242
  return k4(r[e[0]], e[1]);
1242
1243
  }, function(r, e, t2) {
1243
- return z13(r.slice(e, e + t2[1]));
1244
+ return z14(r.slice(e, e + t2[1]));
1244
1245
  }], ["checksum", 8, 148, function(r, e) {
1245
1246
  return " ";
1246
1247
  }, function(r, e, t2) {
@@ -1282,11 +1283,11 @@ var init_chunk_VBDAOXYI = __esm({
1282
1283
  }], ["accessTime", 12, 476, function(r, e) {
1283
1284
  return k4(r[e[0]], e[1]);
1284
1285
  }, function(r, e, t2) {
1285
- return z13(r.slice(e, e + t2[1]));
1286
+ return z14(r.slice(e, e + t2[1]));
1286
1287
  }], ["createTime", 12, 488, function(r, e) {
1287
1288
  return k4(r[e[0]], e[1]);
1288
1289
  }, function(r, e, t2) {
1289
- return z13(r.slice(e, e + t2[1]));
1290
+ return z14(r.slice(e, e + t2[1]));
1290
1291
  }]], $4 = (function(r) {
1291
1292
  var e = r[r.length - 1];
1292
1293
  return e[2] + e[1];
@@ -1323,7 +1324,7 @@ var init_chunk_VBDAOXYI = __esm({
1323
1324
  var e = String.fromCharCode.apply(null, r);
1324
1325
  return parseInt(e.replace(/^0+$/g, ""), 8) || 0;
1325
1326
  }
1326
- function z13(r) {
1327
+ function z14(r) {
1327
1328
  return r.length == 0 || r[0] == 0 ? null : new Date(1e3 * S3(r));
1328
1329
  }
1329
1330
  function Tr2(r, e, t2) {
@@ -1349,7 +1350,7 @@ var init_chunk_VBDAOXYI = __esm({
1349
1350
  f3.exports.formatTarDateTime = k4;
1350
1351
  f3.exports.parseTarString = A2;
1351
1352
  f3.exports.parseTarNumber = S3;
1352
- f3.exports.parseTarDateTime = z13;
1353
+ f3.exports.parseTarDateTime = z14;
1353
1354
  });
1354
1355
  er = D((ne3, rr) => {
1355
1356
  u();
@@ -6519,19 +6520,19 @@ async function getConversationScopedArtifacts(params) {
6519
6520
  if (visibleMessageIds.length === 0) {
6520
6521
  return [];
6521
6522
  }
6522
- const { getLedgerArtifacts: getLedgerArtifacts3 } = await import('@inkeep/agents-core');
6523
+ const { getLedgerArtifacts: getLedgerArtifacts4 } = await import('@inkeep/agents-core');
6523
6524
  const dbClient2 = (await Promise.resolve().then(() => (init_dbClient(), dbClient_exports))).default;
6524
6525
  const visibleTaskIds = visibleMessages.map((msg) => msg.taskId).filter((taskId) => Boolean(taskId));
6525
6526
  const referenceArtifacts = [];
6526
6527
  for (const taskId of visibleTaskIds) {
6527
- const artifacts = await getLedgerArtifacts3(dbClient2)({
6528
+ const artifacts = await getLedgerArtifacts4(dbClient2)({
6528
6529
  scopes: { tenantId, projectId },
6529
6530
  taskId
6530
6531
  });
6531
6532
  referenceArtifacts.push(...artifacts);
6532
6533
  }
6533
- const logger28 = (await Promise.resolve().then(() => (init_logger(), logger_exports))).getLogger("conversations");
6534
- logger28.debug(
6534
+ const logger30 = (await Promise.resolve().then(() => (init_logger(), logger_exports))).getLogger("conversations");
6535
+ logger30.debug(
6535
6536
  {
6536
6537
  conversationId,
6537
6538
  visibleMessages: visibleMessages.length,
@@ -6543,8 +6544,8 @@ async function getConversationScopedArtifacts(params) {
6543
6544
  );
6544
6545
  return referenceArtifacts;
6545
6546
  } catch (error) {
6546
- const logger28 = (await Promise.resolve().then(() => (init_logger(), logger_exports))).getLogger("conversations");
6547
- logger28.error(
6547
+ const logger30 = (await Promise.resolve().then(() => (init_logger(), logger_exports))).getLogger("conversations");
6548
+ logger30.error(
6548
6549
  {
6549
6550
  error: error instanceof Error ? error.message : "Unknown error",
6550
6551
  conversationId
@@ -6586,14 +6587,14 @@ const execute = ${executeCode}
6586
6587
  })();
6587
6588
  `;
6588
6589
  }
6589
- function parseExecutionResult(stdout, functionId, logger28) {
6590
+ function parseExecutionResult(stdout, functionId, logger30) {
6590
6591
  try {
6591
6592
  const outputLines = stdout.split("\n").filter((line) => line.trim());
6592
6593
  const resultLine = outputLines[outputLines.length - 1];
6593
6594
  return JSON.parse(resultLine);
6594
6595
  } catch (parseError) {
6595
- if (logger28) {
6596
- logger28.warn(
6596
+ if (logger30) {
6597
+ logger30.warn(
6597
6598
  {
6598
6599
  functionId,
6599
6600
  stdout,
@@ -6609,13 +6610,13 @@ var init_sandbox_utils = __esm({
6609
6610
  "src/tools/sandbox-utils.ts"() {
6610
6611
  }
6611
6612
  });
6612
- var logger15, ExecutionSemaphore, NativeSandboxExecutor;
6613
+ var logger17, ExecutionSemaphore, NativeSandboxExecutor;
6613
6614
  var init_NativeSandboxExecutor = __esm({
6614
6615
  "src/tools/NativeSandboxExecutor.ts"() {
6615
6616
  init_execution_limits();
6616
6617
  init_logger();
6617
6618
  init_sandbox_utils();
6618
- logger15 = agentsCore.getLogger("native-sandbox-executor");
6619
+ logger17 = agentsCore.getLogger("native-sandbox-executor");
6619
6620
  ExecutionSemaphore = class {
6620
6621
  permits;
6621
6622
  waitQueue = [];
@@ -6687,7 +6688,7 @@ var init_NativeSandboxExecutor = __esm({
6687
6688
  getSemaphore(vcpus) {
6688
6689
  const effectiveVcpus = Math.max(1, vcpus || 1);
6689
6690
  if (!this.executionSemaphores.has(effectiveVcpus)) {
6690
- logger15.debug({ vcpus: effectiveVcpus }, "Creating new execution semaphore");
6691
+ logger17.debug({ vcpus: effectiveVcpus }, "Creating new execution semaphore");
6691
6692
  this.executionSemaphores.set(effectiveVcpus, new ExecutionSemaphore(effectiveVcpus));
6692
6693
  }
6693
6694
  const semaphore = this.executionSemaphores.get(effectiveVcpus);
@@ -6724,7 +6725,7 @@ var init_NativeSandboxExecutor = __esm({
6724
6725
  if (now - sandbox.lastUsed < FUNCTION_TOOL_SANDBOX_POOL_TTL_MS && sandbox.useCount < FUNCTION_TOOL_SANDBOX_MAX_USE_COUNT) {
6725
6726
  sandbox.lastUsed = now;
6726
6727
  sandbox.useCount++;
6727
- logger15.debug(
6728
+ logger17.debug(
6728
6729
  {
6729
6730
  poolKey,
6730
6731
  useCount: sandbox.useCount,
@@ -6751,14 +6752,14 @@ var init_NativeSandboxExecutor = __esm({
6751
6752
  useCount: 1,
6752
6753
  dependencies
6753
6754
  };
6754
- logger15.debug({ poolKey, sandboxDir }, "Added sandbox to pool");
6755
+ logger17.debug({ poolKey, sandboxDir }, "Added sandbox to pool");
6755
6756
  }
6756
6757
  cleanupSandbox(sandboxDir) {
6757
6758
  try {
6758
6759
  s3.rmSync(sandboxDir, { recursive: true, force: true });
6759
- logger15.debug({ sandboxDir }, "Cleaned up sandbox");
6760
+ logger17.debug({ sandboxDir }, "Cleaned up sandbox");
6760
6761
  } catch (error) {
6761
- logger15.warn({ sandboxDir, error }, "Failed to clean up sandbox");
6762
+ logger17.warn({ sandboxDir, error }, "Failed to clean up sandbox");
6762
6763
  }
6763
6764
  }
6764
6765
  startPoolCleanup() {
@@ -6775,7 +6776,7 @@ var init_NativeSandboxExecutor = __esm({
6775
6776
  delete this.sandboxPool[key];
6776
6777
  });
6777
6778
  if (keysToDelete.length > 0) {
6778
- logger15.debug({ cleanedCount: keysToDelete.length }, "Cleaned up expired sandboxes");
6779
+ logger17.debug({ cleanedCount: keysToDelete.length }, "Cleaned up expired sandboxes");
6779
6780
  }
6780
6781
  }, FUNCTION_TOOL_SANDBOX_CLEANUP_INTERVAL_MS);
6781
6782
  }
@@ -6804,7 +6805,7 @@ var init_NativeSandboxExecutor = __esm({
6804
6805
  return hasCjsSyntax ? "cjs" : "esm";
6805
6806
  }
6806
6807
  if (hasEsmSyntax && hasCjsSyntax) {
6807
- logger15.warn(
6808
+ logger17.warn(
6808
6809
  { executeCode: `${executeCode.substring(0, 100)}...` },
6809
6810
  "Both ESM and CommonJS syntax detected, defaulting to ESM"
6810
6811
  );
@@ -6821,7 +6822,7 @@ var init_NativeSandboxExecutor = __esm({
6821
6822
  async executeFunctionTool(toolId, args2, config) {
6822
6823
  const vcpus = config.sandboxConfig?.vcpus || 1;
6823
6824
  const semaphore = this.getSemaphore(vcpus);
6824
- logger15.debug(
6825
+ logger17.debug(
6825
6826
  {
6826
6827
  toolId,
6827
6828
  vcpus,
@@ -6839,7 +6840,7 @@ var init_NativeSandboxExecutor = __esm({
6839
6840
  async executeInSandbox_Internal(toolId, args2, config) {
6840
6841
  const dependencies = config.dependencies || {};
6841
6842
  const dependencyHash = this.generateDependencyHash(dependencies);
6842
- logger15.debug(
6843
+ logger17.debug(
6843
6844
  {
6844
6845
  toolId,
6845
6846
  dependencies,
@@ -6855,7 +6856,7 @@ var init_NativeSandboxExecutor = __esm({
6855
6856
  sandboxDir = o3.join(this.tempDir, `sandbox-${dependencyHash}-${Date.now()}`);
6856
6857
  s3.mkdirSync(sandboxDir, { recursive: true });
6857
6858
  isNewSandbox = true;
6858
- logger15.debug(
6859
+ logger17.debug(
6859
6860
  {
6860
6861
  toolId,
6861
6862
  dependencyHash,
@@ -6926,15 +6927,15 @@ var init_NativeSandboxExecutor = __esm({
6926
6927
  });
6927
6928
  npm.on("close", (code) => {
6928
6929
  if (code === 0) {
6929
- logger15.debug({ sandboxDir }, "Dependencies installed successfully");
6930
+ logger17.debug({ sandboxDir }, "Dependencies installed successfully");
6930
6931
  resolve2();
6931
6932
  } else {
6932
- logger15.error({ sandboxDir, code, stderr }, "Failed to install dependencies");
6933
+ logger17.error({ sandboxDir, code, stderr }, "Failed to install dependencies");
6933
6934
  reject(new Error(`npm install failed with code ${code}: ${stderr}`));
6934
6935
  }
6935
6936
  });
6936
6937
  npm.on("error", (err2) => {
6937
- logger15.error({ sandboxDir, error: err2 }, "Failed to spawn npm install");
6938
+ logger17.error({ sandboxDir, error: err2 }, "Failed to spawn npm install");
6938
6939
  reject(err2);
6939
6940
  });
6940
6941
  });
@@ -6981,7 +6982,7 @@ var init_NativeSandboxExecutor = __esm({
6981
6982
  stderr += dataStr;
6982
6983
  });
6983
6984
  const timeoutId = setTimeout(() => {
6984
- logger15.warn({ sandboxDir, timeout }, "Function execution timed out, killing process");
6985
+ logger17.warn({ sandboxDir, timeout }, "Function execution timed out, killing process");
6985
6986
  node.kill("SIGTERM");
6986
6987
  const forceKillTimeout = Math.min(Math.max(timeout / 10, 2e3), 5e3);
6987
6988
  setTimeout(() => {
@@ -6996,7 +6997,7 @@ var init_NativeSandboxExecutor = __esm({
6996
6997
  clearTimeout(timeoutId);
6997
6998
  if (code === 0) {
6998
6999
  try {
6999
- const result = parseExecutionResult(stdout, "function", logger15);
7000
+ const result = parseExecutionResult(stdout, "function", logger17);
7000
7001
  if (typeof result === "object" && result !== null && "success" in result) {
7001
7002
  const parsed = result;
7002
7003
  if (parsed.success) {
@@ -7008,18 +7009,18 @@ var init_NativeSandboxExecutor = __esm({
7008
7009
  resolve2(result);
7009
7010
  }
7010
7011
  } catch (parseError) {
7011
- logger15.error({ stdout, stderr, parseError }, "Failed to parse function result");
7012
+ logger17.error({ stdout, stderr, parseError }, "Failed to parse function result");
7012
7013
  reject(new Error(`Invalid function result: ${stdout}`));
7013
7014
  }
7014
7015
  } else {
7015
7016
  const errorMsg = signal ? `Function execution killed by signal ${signal}: ${stderr}` : `Function execution failed with code ${code}: ${stderr}`;
7016
- logger15.error({ code, signal, stderr }, "Function execution failed");
7017
+ logger17.error({ code, signal, stderr }, "Function execution failed");
7017
7018
  reject(new Error(errorMsg));
7018
7019
  }
7019
7020
  });
7020
7021
  node.on("error", (error) => {
7021
7022
  clearTimeout(timeoutId);
7022
- logger15.error({ sandboxDir, error }, "Failed to spawn node process");
7023
+ logger17.error({ sandboxDir, error }, "Failed to spawn node process");
7023
7024
  reject(error);
7024
7025
  });
7025
7026
  });
@@ -7027,13 +7028,13 @@ var init_NativeSandboxExecutor = __esm({
7027
7028
  };
7028
7029
  }
7029
7030
  });
7030
- var logger16, VercelSandboxExecutor;
7031
+ var logger18, VercelSandboxExecutor;
7031
7032
  var init_VercelSandboxExecutor = __esm({
7032
7033
  "src/tools/VercelSandboxExecutor.ts"() {
7033
7034
  init_execution_limits();
7034
7035
  init_logger();
7035
7036
  init_sandbox_utils();
7036
- logger16 = agentsCore.getLogger("VercelSandboxExecutor");
7037
+ logger18 = agentsCore.getLogger("VercelSandboxExecutor");
7037
7038
  VercelSandboxExecutor = class _VercelSandboxExecutor {
7038
7039
  static instance;
7039
7040
  config;
@@ -7041,7 +7042,7 @@ var init_VercelSandboxExecutor = __esm({
7041
7042
  cleanupInterval = null;
7042
7043
  constructor(config) {
7043
7044
  this.config = config;
7044
- logger16.info(
7045
+ logger18.info(
7045
7046
  {
7046
7047
  teamId: config.teamId,
7047
7048
  projectId: config.projectId,
@@ -7078,7 +7079,7 @@ var init_VercelSandboxExecutor = __esm({
7078
7079
  const now = Date.now();
7079
7080
  const age = now - cached.createdAt;
7080
7081
  if (age > FUNCTION_TOOL_SANDBOX_POOL_TTL_MS || cached.useCount >= FUNCTION_TOOL_SANDBOX_MAX_USE_COUNT) {
7081
- logger16.debug(
7082
+ logger18.debug(
7082
7083
  {
7083
7084
  dependencyHash,
7084
7085
  age,
@@ -7091,7 +7092,7 @@ var init_VercelSandboxExecutor = __esm({
7091
7092
  this.removeSandbox(dependencyHash);
7092
7093
  return null;
7093
7094
  }
7094
- logger16.debug(
7095
+ logger18.debug(
7095
7096
  {
7096
7097
  dependencyHash,
7097
7098
  useCount: cached.useCount,
@@ -7111,7 +7112,7 @@ var init_VercelSandboxExecutor = __esm({
7111
7112
  useCount: 0,
7112
7113
  dependencies
7113
7114
  });
7114
- logger16.debug(
7115
+ logger18.debug(
7115
7116
  {
7116
7117
  dependencyHash,
7117
7118
  poolSize: this.sandboxPool.size
@@ -7136,9 +7137,9 @@ var init_VercelSandboxExecutor = __esm({
7136
7137
  if (cached) {
7137
7138
  try {
7138
7139
  await cached.sandbox.stop();
7139
- logger16.debug({ dependencyHash }, "Sandbox stopped");
7140
+ logger18.debug({ dependencyHash }, "Sandbox stopped");
7140
7141
  } catch (error) {
7141
- logger16.warn({ error, dependencyHash }, "Error stopping sandbox");
7142
+ logger18.warn({ error, dependencyHash }, "Error stopping sandbox");
7142
7143
  }
7143
7144
  this.sandboxPool.delete(dependencyHash);
7144
7145
  }
@@ -7157,7 +7158,7 @@ var init_VercelSandboxExecutor = __esm({
7157
7158
  }
7158
7159
  }
7159
7160
  if (toRemove.length > 0) {
7160
- logger16.info(
7161
+ logger18.info(
7161
7162
  {
7162
7163
  count: toRemove.length,
7163
7164
  poolSize: this.sandboxPool.size
@@ -7178,7 +7179,7 @@ var init_VercelSandboxExecutor = __esm({
7178
7179
  clearInterval(this.cleanupInterval);
7179
7180
  this.cleanupInterval = null;
7180
7181
  }
7181
- logger16.info(
7182
+ logger18.info(
7182
7183
  {
7183
7184
  poolSize: this.sandboxPool.size
7184
7185
  },
@@ -7215,7 +7216,7 @@ var init_VercelSandboxExecutor = __esm({
7215
7216
  const envLines = [];
7216
7217
  for (const varName of envVarNames) {
7217
7218
  envLines.push(`${varName}=""`);
7218
- logger16.debug({ varName }, "Adding environment variable placeholder to sandbox");
7219
+ logger18.debug({ varName }, "Adding environment variable placeholder to sandbox");
7219
7220
  }
7220
7221
  return envLines.join("\n");
7221
7222
  }
@@ -7228,7 +7229,7 @@ var init_VercelSandboxExecutor = __esm({
7228
7229
  const dependencies = toolConfig.dependencies || {};
7229
7230
  const dependencyHash = this.generateDependencyHash(dependencies);
7230
7231
  try {
7231
- logger16.info(
7232
+ logger18.info(
7232
7233
  {
7233
7234
  functionId,
7234
7235
  functionName: toolConfig.name,
@@ -7251,7 +7252,7 @@ var init_VercelSandboxExecutor = __esm({
7251
7252
  },
7252
7253
  runtime: this.config.runtime
7253
7254
  });
7254
- logger16.info(
7255
+ logger18.info(
7255
7256
  {
7256
7257
  functionId,
7257
7258
  sandboxId: sandbox$1.sandboxId,
@@ -7261,7 +7262,7 @@ var init_VercelSandboxExecutor = __esm({
7261
7262
  );
7262
7263
  this.addToPool(dependencyHash, sandbox$1, dependencies);
7263
7264
  } else {
7264
- logger16.info(
7265
+ logger18.info(
7265
7266
  {
7266
7267
  functionId,
7267
7268
  sandboxId: sandbox$1.sandboxId,
@@ -7273,7 +7274,7 @@ var init_VercelSandboxExecutor = __esm({
7273
7274
  this.incrementUseCount(dependencyHash);
7274
7275
  try {
7275
7276
  if (isNewSandbox && toolConfig.dependencies && Object.keys(toolConfig.dependencies).length > 0) {
7276
- logger16.debug(
7277
+ logger18.debug(
7277
7278
  {
7278
7279
  functionId,
7279
7280
  functionName: toolConfig.name,
@@ -7306,7 +7307,7 @@ var init_VercelSandboxExecutor = __esm({
7306
7307
  if (installCmd.exitCode !== 0) {
7307
7308
  throw new Error(`Failed to install dependencies: ${installStderr}`);
7308
7309
  }
7309
- logger16.info(
7310
+ logger18.info(
7310
7311
  {
7311
7312
  functionId,
7312
7313
  dependencyHash
@@ -7329,7 +7330,7 @@ var init_VercelSandboxExecutor = __esm({
7329
7330
  path: ".env",
7330
7331
  content: Buffer.from(envFileContent, "utf-8")
7331
7332
  });
7332
- logger16.info(
7333
+ logger18.info(
7333
7334
  {
7334
7335
  functionId,
7335
7336
  envVarCount: envVars.size,
@@ -7340,7 +7341,7 @@ var init_VercelSandboxExecutor = __esm({
7340
7341
  }
7341
7342
  }
7342
7343
  await sandbox$1.writeFiles(filesToWrite);
7343
- logger16.info(
7344
+ logger18.info(
7344
7345
  {
7345
7346
  functionId,
7346
7347
  runtime: this.config.runtime === "typescript" ? "tsx" : "node",
@@ -7371,7 +7372,7 @@ var init_VercelSandboxExecutor = __esm({
7371
7372
  }
7372
7373
  const executionTime = Date.now() - startTime;
7373
7374
  if (executeCmd.exitCode !== 0) {
7374
- logger16.error(
7375
+ logger18.error(
7375
7376
  {
7376
7377
  functionId,
7377
7378
  exitCode: executeCmd.exitCode,
@@ -7386,8 +7387,8 @@ var init_VercelSandboxExecutor = __esm({
7386
7387
  executionTime
7387
7388
  };
7388
7389
  }
7389
- const result = parseExecutionResult(executeStdout, functionId, logger16);
7390
- logger16.info(
7390
+ const result = parseExecutionResult(executeStdout, functionId, logger18);
7391
+ logger18.info(
7391
7392
  {
7392
7393
  functionId,
7393
7394
  executionTime
@@ -7407,7 +7408,7 @@ var init_VercelSandboxExecutor = __esm({
7407
7408
  } catch (error) {
7408
7409
  const executionTime = Date.now() - startTime;
7409
7410
  const errorMessage = error instanceof Error ? error.message : String(error);
7410
- logger16.error(
7411
+ logger18.error(
7411
7412
  {
7412
7413
  functionId,
7413
7414
  error: errorMessage,
@@ -7432,19 +7433,19 @@ var SandboxExecutorFactory_exports = {};
7432
7433
  __export(SandboxExecutorFactory_exports, {
7433
7434
  SandboxExecutorFactory: () => SandboxExecutorFactory
7434
7435
  });
7435
- var logger17, SandboxExecutorFactory;
7436
+ var logger19, SandboxExecutorFactory;
7436
7437
  var init_SandboxExecutorFactory = __esm({
7437
7438
  "src/tools/SandboxExecutorFactory.ts"() {
7438
7439
  init_logger();
7439
7440
  init_NativeSandboxExecutor();
7440
7441
  init_VercelSandboxExecutor();
7441
- logger17 = agentsCore.getLogger("SandboxExecutorFactory");
7442
+ logger19 = agentsCore.getLogger("SandboxExecutorFactory");
7442
7443
  SandboxExecutorFactory = class _SandboxExecutorFactory {
7443
7444
  static instance;
7444
7445
  nativeExecutor = null;
7445
7446
  vercelExecutors = /* @__PURE__ */ new Map();
7446
7447
  constructor() {
7447
- logger17.info({}, "SandboxExecutorFactory initialized");
7448
+ logger19.info({}, "SandboxExecutorFactory initialized");
7448
7449
  }
7449
7450
  /**
7450
7451
  * Get singleton instance of SandboxExecutorFactory
@@ -7477,7 +7478,7 @@ var init_SandboxExecutorFactory = __esm({
7477
7478
  async executeInNativeSandbox(functionId, args2, config) {
7478
7479
  if (!this.nativeExecutor) {
7479
7480
  this.nativeExecutor = NativeSandboxExecutor.getInstance();
7480
- logger17.info({}, "Native sandbox executor created");
7481
+ logger19.info({}, "Native sandbox executor created");
7481
7482
  }
7482
7483
  return this.nativeExecutor.executeFunctionTool(functionId, args2, config);
7483
7484
  }
@@ -7490,7 +7491,7 @@ var init_SandboxExecutorFactory = __esm({
7490
7491
  if (!this.vercelExecutors.has(configKey)) {
7491
7492
  const executor2 = VercelSandboxExecutor.getInstance(vercelConfig);
7492
7493
  this.vercelExecutors.set(configKey, executor2);
7493
- logger17.info(
7494
+ logger19.info(
7494
7495
  {
7495
7496
  teamId: vercelConfig.teamId,
7496
7497
  projectId: vercelConfig.projectId
@@ -7512,13 +7513,13 @@ var init_SandboxExecutorFactory = __esm({
7512
7513
  * Clean up all sandbox executors
7513
7514
  */
7514
7515
  async cleanup() {
7515
- logger17.info({}, "Cleaning up sandbox executors");
7516
+ logger19.info({}, "Cleaning up sandbox executors");
7516
7517
  this.nativeExecutor = null;
7517
7518
  for (const [key, executor] of this.vercelExecutors.entries()) {
7518
7519
  await executor.cleanup();
7519
7520
  this.vercelExecutors.delete(key);
7520
7521
  }
7521
- logger17.info({}, "Sandbox executor cleanup completed");
7522
+ logger19.info({}, "Sandbox executor cleanup completed");
7522
7523
  }
7523
7524
  };
7524
7525
  }
@@ -9236,7 +9237,7 @@ var ArtifactService = class _ArtifactService {
9236
9237
  const summaryValidation = validateAgainstSchema(summaryData, previewSchema);
9237
9238
  const fullValidation = validateAgainstSchema(fullData, fullSchema);
9238
9239
  if (!summaryValidation.hasRequiredFields) {
9239
- const error = new Error(
9240
+ new Error(
9240
9241
  `Cannot save artifact: Missing required fields [${summaryValidation.missingRequired.join(", ")}] for '${artifactType}' schema. Required: [${summaryValidation.missingRequired.join(", ")}]. Found: [${summaryValidation.actualFields.join(", ")}]. Consider using a different artifact component type that matches your data structure.`
9241
9242
  );
9242
9243
  logger6.error(
@@ -9247,9 +9248,13 @@ var ArtifactService = class _ArtifactService {
9247
9248
  actualFields: summaryValidation.actualFields,
9248
9249
  schemaExpected: previewSchema?.properties ? Object.keys(previewSchema.properties) : []
9249
9250
  },
9250
- "Blocking artifact save due to missing required fields"
9251
+ "Artifact creation failed due to missing required fields - continuing with generation"
9251
9252
  );
9252
- throw error;
9253
+ return {
9254
+ summary: summaryValidation,
9255
+ full: fullValidation,
9256
+ schemaFound: !!previewSchema
9257
+ };
9253
9258
  }
9254
9259
  if (!summaryValidation.hasExpectedFields || summaryValidation.extraFields.length > 0) {
9255
9260
  logger6.warn(
@@ -9399,7 +9404,7 @@ var ArtifactService = class _ArtifactService {
9399
9404
  * Used by AgentSession to save artifacts after name/description generation
9400
9405
  */
9401
9406
  async saveArtifact(artifact) {
9402
- let summaryData = artifact.data;
9407
+ let summaryData = artifact.summaryData || artifact.data;
9403
9408
  let fullData = artifact.data;
9404
9409
  if (this.context.artifactComponents) {
9405
9410
  const artifactComponent = this.context.artifactComponents.find(
@@ -9987,7 +9992,6 @@ var AgentSession = class {
9987
9992
  * Send data operation to stream when emit operations is enabled
9988
9993
  */
9989
9994
  async sendDataOperation(event) {
9990
- console.log("sendDataOperation called with event", Date.now());
9991
9995
  try {
9992
9996
  const streamHelper = getStreamHelper(this.sessionId);
9993
9997
  if (streamHelper) {
@@ -10038,6 +10042,8 @@ var AgentSession = class {
10038
10042
  return `Task completed: ${event.data.targetSubAgent} \u2192 ${event.data.fromSubAgent}`;
10039
10043
  case "artifact_saved":
10040
10044
  return `Artifact saved: ${event.data.artifactType || "unknown type"}`;
10045
+ case "compression":
10046
+ return `Compressed ${event.data.messageCount} messages and ${event.data.artifactCount} artifacts (${event.data.reason})`;
10041
10047
  default:
10042
10048
  return `${event.eventType} event`;
10043
10049
  }
@@ -10920,17 +10926,59 @@ ${this.statusUpdateState?.config.prompt?.trim() || ""}`;
10920
10926
  (event) => event.eventType === "tool_result" && event.data && "toolCallId" in event.data && event.data.toolCallId === artifactData.metadata?.toolCallId
10921
10927
  );
10922
10928
  const toolContext = toolCallEvent ? {
10923
- toolName: toolCallEvent.data.toolName,
10924
10929
  args: toolCallEvent.data.args
10925
10930
  } : null;
10926
- const prompt = `Name this artifact (max 50 chars) and describe it (max 150 chars).
10931
+ let existingNames = [];
10932
+ try {
10933
+ if (artifactData.tenantId && artifactData.projectId && artifactData.taskId) {
10934
+ const existingArtifacts = await agentsCore.getLedgerArtifacts(dbClient_default)({
10935
+ scopes: { tenantId: artifactData.tenantId, projectId: artifactData.projectId },
10936
+ taskId: artifactData.taskId
10937
+ });
10938
+ existingNames = existingArtifacts.map((a2) => a2.name).filter(Boolean);
10939
+ }
10940
+ } catch (error) {
10941
+ logger8.warn(
10942
+ {
10943
+ sessionId: this.sessionId,
10944
+ artifactId: artifactData.artifactId,
10945
+ error: error instanceof Error ? error.message : "Unknown error"
10946
+ },
10947
+ "Failed to fetch existing artifact names for context"
10948
+ );
10949
+ }
10950
+ const toolName = artifactData.metadata?.toolName || "unknown";
10951
+ const toolCallId = artifactData.metadata?.toolCallId || "unknown";
10952
+ const prompt = `Create a unique name and description for this tool result artifact.
10953
+
10954
+ CRITICAL: Your name must be different from these existing artifacts: ${existingNames.length > 0 ? existingNames.join(", ") : "None yet"}
10927
10955
 
10928
- Tool Context: ${toolContext ? JSON.stringify(toolContext, null, 2) : "No tool context"}
10929
- Context: ${conversationHistory?.slice(-200) || "Processing"}
10956
+ Tool Context: ${toolContext ? JSON.stringify(toolContext.args, null, 2) : "No args"}
10957
+ Context: ${conversationHistory?.slice(-200) || "No context"}
10930
10958
  Type: ${artifactData.artifactType || "data"}
10931
- Data: ${JSON.stringify(artifactData.data || artifactData.summaryData, null, 2)}
10959
+ Data: ${JSON.stringify(artifactData.data || artifactData.summaryData || {}, null, 2)}
10960
+
10961
+ Requirements:
10962
+ - Name: Max 50 chars, be extremely specific to THIS EXACT tool execution
10963
+ - Description: Max 150 chars, describe what THIS SPECIFIC tool call returned
10964
+ - Focus on the unique aspects of this particular tool execution result
10965
+ - Be descriptive about the actual content returned, not just the tool type
10932
10966
 
10933
- Make it specific and relevant.`;
10967
+ BAD Examples (too generic):
10968
+ - "Search Results"
10969
+ - "Tool Results"
10970
+ - "${toolName} Results"
10971
+ - "Data from ${toolName}"
10972
+ - "Tool Output"
10973
+ - "Search Data"
10974
+
10975
+ GOOD Examples:
10976
+ - "GitHub API Rate Limits & Auth Methods"
10977
+ - "React Component Props Documentation"
10978
+ - "Database Schema for User Tables"
10979
+ - "Pricing Tiers with Enterprise Features"
10980
+
10981
+ Make the name extremely specific to what this tool call actually returned, not generic.`;
10934
10982
  let modelToUse = this.statusUpdateState?.summarizerModel;
10935
10983
  if (!modelToUse?.model?.trim()) {
10936
10984
  if (!this.statusUpdateState?.baseModel?.model?.trim()) {
@@ -10984,9 +11032,10 @@ Make it specific and relevant.`;
10984
11032
  }
10985
11033
  let result;
10986
11034
  if (!modelToUse) {
11035
+ const toolCallSuffix = artifactData.metadata?.toolCallId?.slice(-8) || Date.now().toString().slice(-8);
10987
11036
  result = {
10988
- name: `Artifact ${artifactData.artifactId.substring(0, 8)}`,
10989
- description: `${artifactData.artifactType || "Data"} from ${artifactData.metadata?.toolCallId || "tool results"}`
11037
+ name: `${artifactData.artifactType || "Artifact"} ${toolCallSuffix}`,
11038
+ description: `${artifactData.artifactType || "Data"} from ${artifactData.metadata?.toolName || "tool"} (${artifactData.metadata?.toolCallId || "tool results"})`
10990
11039
  };
10991
11040
  } else {
10992
11041
  const model = agentsCore.ModelFactory.createModel(modelToUse);
@@ -11081,6 +11130,21 @@ Make it specific and relevant.`;
11081
11130
  );
11082
11131
  result = object;
11083
11132
  }
11133
+ if (existingNames.includes(result.name)) {
11134
+ const toolCallSuffix = toolCallId.slice(-8);
11135
+ const originalName = result.name;
11136
+ result.name = result.name.length + toolCallSuffix.length + 1 <= 50 ? `${result.name} ${toolCallSuffix}` : `${result.name.substring(0, 50 - toolCallSuffix.length - 1)} ${toolCallSuffix}`;
11137
+ logger8.info(
11138
+ {
11139
+ sessionId: this.sessionId,
11140
+ artifactId: artifactData.artifactId,
11141
+ originalName,
11142
+ uniqueName: result.name,
11143
+ reason: "Name conflict resolved with toolCallId suffix"
11144
+ },
11145
+ "Updated artifact name for uniqueness"
11146
+ );
11147
+ }
11084
11148
  try {
11085
11149
  if (!this.artifactService) {
11086
11150
  throw new Error("ArtifactService is not initialized");
@@ -11091,6 +11155,7 @@ Make it specific and relevant.`;
11091
11155
  description: result.description,
11092
11156
  type: artifactData.artifactType || "source",
11093
11157
  data: artifactData.data || {},
11158
+ summaryData: artifactData.summaryData,
11094
11159
  metadata: artifactData.metadata || {},
11095
11160
  toolCallId: artifactData.toolCallId
11096
11161
  });
@@ -11106,7 +11171,13 @@ Make it specific and relevant.`;
11106
11171
  {
11107
11172
  sessionId: this.sessionId,
11108
11173
  artifactId: artifactData.artifactId,
11109
- error: saveError instanceof Error ? saveError.message : "Unknown error"
11174
+ error: saveError instanceof Error ? saveError.message : "Unknown error",
11175
+ errorName: saveError instanceof Error ? saveError.name : void 0,
11176
+ errorCause: saveError instanceof Error ? saveError.cause : void 0,
11177
+ errorCode: saveError?.code || saveError?.errno || void 0,
11178
+ artifactType: artifactData.artifactType,
11179
+ dataKeys: artifactData.data ? Object.keys(artifactData.data) : [],
11180
+ metadataKeys: artifactData.metadata ? Object.keys(artifactData.metadata) : []
11110
11181
  },
11111
11182
  "Main artifact save failed, will attempt fallback"
11112
11183
  );
@@ -11127,6 +11198,7 @@ Make it specific and relevant.`;
11127
11198
  description: `${artifactData.artifactType || "Data"} from ${artifactData.metadata?.toolName || "tool results"}`,
11128
11199
  type: artifactData.artifactType || "source",
11129
11200
  data: artifactData.data || {},
11201
+ summaryData: artifactData.summaryData,
11130
11202
  metadata: artifactData.metadata || {},
11131
11203
  toolCallId: artifactData.toolCallId
11132
11204
  });
@@ -11145,7 +11217,13 @@ Make it specific and relevant.`;
11145
11217
  {
11146
11218
  sessionId: this.sessionId,
11147
11219
  artifactId: artifactData.artifactId,
11148
- error: fallbackError instanceof Error ? fallbackError.message : "Unknown error"
11220
+ error: fallbackError instanceof Error ? fallbackError.message : "Unknown error",
11221
+ errorName: fallbackError instanceof Error ? fallbackError.name : void 0,
11222
+ errorCause: fallbackError instanceof Error ? fallbackError.cause : void 0,
11223
+ errorCode: fallbackError?.code || fallbackError?.errno || void 0,
11224
+ artifactType: artifactData.artifactType,
11225
+ dataKeys: artifactData.data ? Object.keys(artifactData.data) : [],
11226
+ metadataKeys: artifactData.metadata ? Object.keys(artifactData.metadata) : []
11149
11227
  },
11150
11228
  "Failed to save artifact even with fallback"
11151
11229
  );
@@ -11791,9 +11869,660 @@ ${chunk}`;
11791
11869
  }
11792
11870
  };
11793
11871
 
11872
+ // src/services/MidGenerationCompressor.ts
11873
+ init_logger();
11874
+
11875
+ // src/tools/distill-conversation-tool.ts
11876
+ init_logger();
11877
+ var logger10 = agentsCore.getLogger("distill-conversation-tool");
11878
+ var ConversationSummarySchema = zod.z.object({
11879
+ type: zod.z.literal("conversation_summary_v1"),
11880
+ session_id: zod.z.string().nullable().optional(),
11881
+ high_level: zod.z.string().describe("1-3 sentences capturing what was discovered and learned"),
11882
+ user_intent: zod.z.string().describe("Current main goal or what the user wants to accomplish"),
11883
+ decisions: zod.z.array(zod.z.string()).describe("Concrete decisions made about approach or implementation (\u22645 items)"),
11884
+ open_questions: zod.z.array(zod.z.string()).describe("Unresolved questions about the subject matter (\u22645 items)"),
11885
+ next_steps: zod.z.object({
11886
+ for_agent: zod.z.array(zod.z.string()).describe(
11887
+ "Content-focused actions: what to discover, analyze, or present. Don't get trapped in an infinite loop of tool calls. You have already done a lot of work that is why you are being compressed. Don't encourage too much more work."
11888
+ ),
11889
+ for_user: zod.z.array(zod.z.string()).describe("Actions for user based on discovered content")
11890
+ }),
11891
+ related_artifacts: zod.z.array(
11892
+ zod.z.object({
11893
+ id: zod.z.string().describe("Artifact ID"),
11894
+ name: zod.z.string().describe("Human-readable name describing the content"),
11895
+ tool_name: zod.z.string().describe("Tool that generated this artifact (e.g. search-inkeep-docs)"),
11896
+ tool_call_id: zod.z.string().describe("Specific tool call ID for precise referencing"),
11897
+ content_type: zod.z.string().describe("Type of content (e.g. search_results, api_response, documentation)"),
11898
+ key_findings: zod.z.array(zod.z.string()).describe("2-3 most important findings from this specific artifact")
11899
+ })
11900
+ ).optional().describe("Artifacts containing detailed findings with citation info")
11901
+ });
11902
+ async function distillConversation(params) {
11903
+ const { messages, conversationId, currentSummary, summarizerModel, toolCallToArtifactMap } = params;
11904
+ try {
11905
+ const modelToUse = summarizerModel;
11906
+ if (!modelToUse?.model?.trim()) {
11907
+ throw new Error("Summarizer model is required");
11908
+ }
11909
+ const model = agentsCore.ModelFactory.createModel(modelToUse);
11910
+ const existingSummaryContext = currentSummary ? `**Current summary:**
11911
+
11912
+ \`\`\`json
11913
+ ${JSON.stringify(currentSummary, null, 2)}
11914
+ \`\`\`` : "**Current summary:** None (first distillation)";
11915
+ const formattedMessages = messages.map((msg) => {
11916
+ const parts2 = [];
11917
+ if (typeof msg.content === "string") {
11918
+ parts2.push(msg.content);
11919
+ } else if (Array.isArray(msg.content)) {
11920
+ for (const block of msg.content) {
11921
+ if (block.type === "text") {
11922
+ parts2.push(block.text);
11923
+ } else if (block.type === "tool-call") {
11924
+ parts2.push(
11925
+ `[TOOL CALL] ${block.toolName}(${JSON.stringify(block.input)}) [ID: ${block.toolCallId}]`
11926
+ );
11927
+ } else if (block.type === "tool-result") {
11928
+ const artifactId = toolCallToArtifactMap?.[block.toolCallId];
11929
+ const artifactInfo = artifactId ? `
11930
+ [ARTIFACT CREATED: ${artifactId}]` : "";
11931
+ parts2.push(
11932
+ `[TOOL RESULT] ${block.toolName} [ID: ${block.toolCallId}]${artifactInfo}
11933
+ Result: ${JSON.stringify(block.result)}`
11934
+ );
11935
+ }
11936
+ }
11937
+ } else if (msg.content?.text) {
11938
+ parts2.push(msg.content.text);
11939
+ }
11940
+ return parts2.length > 0 ? `${msg.role || "system"}: ${parts2.join("\n")}` : "";
11941
+ }).filter((line) => line.trim().length > 0).join("\n\n");
11942
+ logger10.debug(
11943
+ {
11944
+ conversationId,
11945
+ messageCount: messages.length,
11946
+ formattedLength: formattedMessages.length,
11947
+ sampleMessages: messages.slice(0, 2).map((m4) => ({ role: m4.role, contentType: typeof m4.content, hasContent: !!m4.content }))
11948
+ },
11949
+ "Formatting messages for distillation"
11950
+ );
11951
+ const prompt = `You are a conversation summarization assistant. Your job is to create or update a compact, structured summary that captures VALUABLE CONTENT and FINDINGS, not just operational details.
11952
+
11953
+ ${existingSummaryContext}
11954
+
11955
+ **Messages to summarize:**
11956
+
11957
+ \`\`\`text
11958
+ ${formattedMessages}
11959
+ \`\`\`
11960
+
11961
+ Create/update a summary using this exact JSON schema:
11962
+
11963
+ \`\`\`json
11964
+ {
11965
+ "type": "conversation_summary_v1",
11966
+ "session_id": "<conversationId>",
11967
+ "high_level": "<1\u20133 sentences capturing what was discovered and learned>",
11968
+ "user_intent": "<current main goal>",
11969
+ "decisions": ["<concrete decisions made>"],
11970
+ "open_questions": ["<unresolved issues>"],
11971
+ "next_steps": {
11972
+ "for_agent": ["<what agent should do>"],
11973
+ "for_user": ["<what user should do>"]
11974
+ },
11975
+ "related_artifacts": [
11976
+ {
11977
+ "id": "<artifact_id>",
11978
+ "name": "<descriptive name>",
11979
+ "tool_name": "<tool_name>",
11980
+ "tool_call_id": "<tool_call_id>",
11981
+ "content_type": "<search_results|api_response|documentation>",
11982
+ "key_findings": ["<important finding 1>", "<important finding 2>"]
11983
+ }
11984
+ ]
11985
+ }
11986
+ \`\`\`
11987
+
11988
+ **CRITICAL RULES - FOCUS ON CONTENT NOT OPERATIONS:**
11989
+ \u{1F3AF} **EXTRACT VALUABLE FINDINGS**: Capture the actual information discovered, data retrieved, insights gained
11990
+ \u{1F3AF} **IGNORE OPERATIONAL DETAILS**: Don't mention "tool was used", "artifact was created", "messages were compressed"
11991
+ \u{1F3AF} **PRESERVE SUBSTANCE**: Include specific facts, features, capabilities, configurations, results found
11992
+ \u{1F3AF} **BUILD KNOWLEDGE**: When updating existing summary, ADD new discoveries to existing knowledge
11993
+ \u{1F3AF} **BE CONCRETE**: Use specific details from tool results, not generic descriptions
11994
+ \u{1F3AF} **BE CONCISE**: Keep ALL fields brief - you are compressing to save context, not writing a report
11995
+ \u{1F3AF} **LIMIT NEXT STEPS**: Agent has already done substantial work - suggest minimal follow-up actions only
11996
+
11997
+ **Examples:**
11998
+ \u274C BAD: "Assistant used search tool and created artifacts"
11999
+ \u2705 GOOD: "Inkeep supports streaming structured objects, OpenAI-compatible APIs, analytics logging, and Zendesk integration"
12000
+
12001
+ \u274C BAD: "Tool calls were made to gather information"
12002
+ \u2705 GOOD: "Platform includes 10 feature categories: chat widgets, knowledge base, analytics, integrations, theming options"
12003
+
12004
+ **Focus on WHAT WAS LEARNED, not HOW IT WAS LEARNED**
12005
+
12006
+ Return **only** valid JSON.`;
12007
+ const { object: summary } = await ai.generateObject({
12008
+ model,
12009
+ prompt,
12010
+ schema: ConversationSummarySchema
12011
+ });
12012
+ summary.session_id = conversationId;
12013
+ logger10.info(
12014
+ {
12015
+ conversationId,
12016
+ messageCount: messages.length,
12017
+ artifactsCount: summary.related_artifacts?.length || 0,
12018
+ decisionsCount: summary.decisions.length
12019
+ },
12020
+ "Successfully distilled conversation"
12021
+ );
12022
+ return summary;
12023
+ } catch (error) {
12024
+ logger10.error(
12025
+ {
12026
+ conversationId,
12027
+ messageCount: messages.length,
12028
+ error: error instanceof Error ? error.message : "Unknown error"
12029
+ },
12030
+ "Failed to distill conversation"
12031
+ );
12032
+ return {
12033
+ type: "conversation_summary_v1",
12034
+ session_id: conversationId,
12035
+ high_level: "Ongoing conversation session",
12036
+ user_intent: "Continue working on current task",
12037
+ related_artifacts: [],
12038
+ decisions: [],
12039
+ open_questions: ["Review recent work and determine next steps"],
12040
+ next_steps: {
12041
+ for_agent: ["Continue with current task"],
12042
+ for_user: ["Provide additional guidance if needed"]
12043
+ }
12044
+ };
12045
+ }
12046
+ }
12047
+
12048
+ // src/services/MidGenerationCompressor.ts
12049
+ var logger11 = agentsCore.getLogger("MidGenerationCompressor");
12050
+ function getCompressionConfigFromEnv() {
12051
+ return {
12052
+ hardLimit: parseInt(process.env.AGENTS_COMPRESSION_HARD_LIMIT || "120000"),
12053
+ safetyBuffer: parseInt(process.env.AGENTS_COMPRESSION_SAFETY_BUFFER || "20000"),
12054
+ enabled: process.env.AGENTS_COMPRESSION_ENABLED !== "false"
12055
+ // Default enabled
12056
+ };
12057
+ }
12058
+ var MidGenerationCompressor = class {
12059
+ // Track cumulative summary across compression cycles
12060
+ constructor(sessionId, conversationId, tenantId, projectId, config, summarizerModel, baseModel) {
12061
+ this.sessionId = sessionId;
12062
+ this.conversationId = conversationId;
12063
+ this.tenantId = tenantId;
12064
+ this.projectId = projectId;
12065
+ this.config = config;
12066
+ this.summarizerModel = summarizerModel;
12067
+ this.baseModel = baseModel;
12068
+ }
12069
+ shouldCompress = false;
12070
+ processedToolCalls = /* @__PURE__ */ new Set();
12071
+ // Track already compressed tool call IDs
12072
+ lastProcessedMessageIndex = 0;
12073
+ // Track where we left off in message processing
12074
+ cumulativeSummary = null;
12075
+ /**
12076
+ * Get the hard limit for compression decisions
12077
+ */
12078
+ getHardLimit() {
12079
+ return this.config.hardLimit;
12080
+ }
12081
+ /**
12082
+ * Estimate tokens (4 chars = 1 token)
12083
+ */
12084
+ estimateTokens(content) {
12085
+ const text = typeof content === "string" ? content : JSON.stringify(content);
12086
+ return Math.ceil(text.length / 4);
12087
+ }
12088
+ /**
12089
+ * Calculate total context size
12090
+ */
12091
+ calculateContextSize(messages) {
12092
+ const messageTokens = messages.reduce((total, msg) => {
12093
+ let msgTokens = 0;
12094
+ if (Array.isArray(msg.content)) {
12095
+ for (const block of msg.content) {
12096
+ if (block.type === "text") {
12097
+ msgTokens += this.estimateTokens(block.text || "");
12098
+ } else if (block.type === "tool-call") {
12099
+ msgTokens += this.estimateTokens(
12100
+ JSON.stringify({
12101
+ toolCallId: block.toolCallId,
12102
+ toolName: block.toolName,
12103
+ input: block.input
12104
+ })
12105
+ );
12106
+ } else if (block.type === "tool-result") {
12107
+ msgTokens += this.estimateTokens(
12108
+ JSON.stringify({
12109
+ toolCallId: block.toolCallId,
12110
+ toolName: block.toolName,
12111
+ output: block.output
12112
+ })
12113
+ );
12114
+ }
12115
+ }
12116
+ } else if (typeof msg.content === "string") {
12117
+ msgTokens += this.estimateTokens(msg.content);
12118
+ } else if (msg.content) {
12119
+ msgTokens += this.estimateTokens(JSON.stringify(msg.content));
12120
+ }
12121
+ return total + msgTokens;
12122
+ }, 0);
12123
+ return messageTokens;
12124
+ }
12125
+ /**
12126
+ * Manual compression request from LLM tool
12127
+ */
12128
+ requestManualCompression(reason) {
12129
+ this.shouldCompress = true;
12130
+ logger11.info(
12131
+ {
12132
+ sessionId: this.sessionId,
12133
+ reason: reason || "Manual request from LLM"
12134
+ },
12135
+ "Manual compression requested"
12136
+ );
12137
+ }
12138
+ /**
12139
+ * Check if compression is needed (either automatic or manual)
12140
+ */
12141
+ isCompressionNeeded(messages) {
12142
+ if (this.shouldCompress) return true;
12143
+ const contextSize = this.calculateContextSize(messages);
12144
+ const remaining = this.config.hardLimit - contextSize;
12145
+ return remaining <= this.config.safetyBuffer;
12146
+ }
12147
+ /**
12148
+ * Perform compression: save all tool results as artifacts and create summary
12149
+ */
12150
+ async compress(messages) {
12151
+ const contextSizeBefore = this.calculateContextSize(messages);
12152
+ logger11.info(
12153
+ {
12154
+ sessionId: this.sessionId,
12155
+ messageCount: messages.length,
12156
+ contextSize: contextSizeBefore
12157
+ },
12158
+ "COMPRESSION: Starting compression"
12159
+ );
12160
+ const toolResultCount = messages.reduce((count, msg) => {
12161
+ if (Array.isArray(msg.content)) {
12162
+ return count + msg.content.filter((block) => block.type === "tool-result").length;
12163
+ }
12164
+ return count;
12165
+ }, 0);
12166
+ logger11.debug({ toolResultCount }, "Tool results found for compression");
12167
+ const toolCallToArtifactMap = await this.saveToolResultsAsArtifacts(messages);
12168
+ const summary = await this.createConversationSummary(messages, toolCallToArtifactMap);
12169
+ const contextSizeAfter = this.estimateTokens(JSON.stringify(summary));
12170
+ const session = agentSessionManager.getSession(this.sessionId);
12171
+ if (session) {
12172
+ const wasManualRequest = this.shouldCompress;
12173
+ session.recordEvent("compression", this.sessionId, {
12174
+ reason: wasManualRequest ? "manual" : "automatic",
12175
+ messageCount: messages.length,
12176
+ artifactCount: Object.keys(toolCallToArtifactMap).length,
12177
+ contextSizeBefore,
12178
+ contextSizeAfter,
12179
+ compressionType: "mid_generation"
12180
+ });
12181
+ }
12182
+ this.shouldCompress = false;
12183
+ logger11.info(
12184
+ {
12185
+ sessionId: this.sessionId,
12186
+ artifactsCreated: Object.keys(toolCallToArtifactMap).length,
12187
+ messageCount: messages.length,
12188
+ contextSizeBefore,
12189
+ contextSizeAfter,
12190
+ artifactIds: Object.values(toolCallToArtifactMap)
12191
+ },
12192
+ "COMPRESSION: Compression completed successfully"
12193
+ );
12194
+ return { artifactIds: Object.values(toolCallToArtifactMap), summary };
12195
+ }
12196
+ /**
12197
+ * 1. Save NEW tool results as artifacts (only process messages since last compression)
12198
+ */
12199
+ async saveToolResultsAsArtifacts(messages) {
12200
+ const session = agentSessionManager.getSession(this.sessionId);
12201
+ if (!session) {
12202
+ throw new Error(`No session found: ${this.sessionId}`);
12203
+ }
12204
+ const toolCallToArtifactMap = {};
12205
+ const newMessages = messages.slice(this.lastProcessedMessageIndex);
12206
+ logger11.debug(
12207
+ {
12208
+ totalMessages: messages.length,
12209
+ newMessages: newMessages.length,
12210
+ startIndex: this.lastProcessedMessageIndex
12211
+ },
12212
+ "Starting compression artifact processing"
12213
+ );
12214
+ for (const message of newMessages) {
12215
+ if (Array.isArray(message.content)) {
12216
+ for (const block of message.content) {
12217
+ if (block.type === "tool-result") {
12218
+ if (block.toolName === "get_reference_artifact" || block.toolName === "thinking_complete") {
12219
+ logger11.debug(
12220
+ {
12221
+ toolCallId: block.toolCallId,
12222
+ toolName: block.toolName
12223
+ },
12224
+ "Skipping special tool - not creating artifacts"
12225
+ );
12226
+ this.processedToolCalls.add(block.toolCallId);
12227
+ continue;
12228
+ }
12229
+ if (this.processedToolCalls.has(block.toolCallId)) {
12230
+ logger11.debug(
12231
+ {
12232
+ toolCallId: block.toolCallId,
12233
+ toolName: block.toolName
12234
+ },
12235
+ "Skipping already processed tool call"
12236
+ );
12237
+ continue;
12238
+ }
12239
+ const artifactId = `compress_${block.toolName || "tool"}_${block.toolCallId || Date.now()}_${crypto2.randomUUID().slice(0, 8)}`;
12240
+ logger11.debug(
12241
+ {
12242
+ artifactId,
12243
+ toolName: block.toolName,
12244
+ toolCallId: block.toolCallId
12245
+ },
12246
+ "Saving compression artifact"
12247
+ );
12248
+ let toolInput = null;
12249
+ if (Array.isArray(message.content)) {
12250
+ const toolCall = message.content.find(
12251
+ (b3) => b3.type === "tool-call" && b3.toolCallId === block.toolCallId
12252
+ );
12253
+ toolInput = toolCall?.input;
12254
+ }
12255
+ const cleanToolResult = this.removeStructureHints(block.output);
12256
+ const toolResultData = {
12257
+ toolName: block.toolName,
12258
+ toolInput,
12259
+ toolResult: cleanToolResult,
12260
+ compressedAt: (/* @__PURE__ */ new Date()).toISOString()
12261
+ };
12262
+ if (this.isEmpty(toolResultData)) {
12263
+ logger11.debug(
12264
+ {
12265
+ toolName: block.toolName,
12266
+ toolCallId: block.toolCallId
12267
+ },
12268
+ "Skipping empty tool result"
12269
+ );
12270
+ continue;
12271
+ }
12272
+ const artifactData = {
12273
+ artifactId,
12274
+ taskId: `task_${this.conversationId}-${this.sessionId}`,
12275
+ toolCallId: block.toolCallId,
12276
+ artifactType: "tool_result",
12277
+ pendingGeneration: true,
12278
+ // Triggers LLM-generated name/description
12279
+ tenantId: this.tenantId,
12280
+ projectId: this.projectId,
12281
+ contextId: this.conversationId,
12282
+ subAgentId: this.sessionId,
12283
+ metadata: {
12284
+ toolCallId: block.toolCallId,
12285
+ toolName: block.toolName,
12286
+ compressionReason: "mid_generation_context_limit"
12287
+ },
12288
+ // Pass data in the format expected by ArtifactSavedData interface
12289
+ summaryData: {
12290
+ toolName: block.toolName,
12291
+ note: "Compressed tool result - see full data for details"
12292
+ },
12293
+ data: toolResultData
12294
+ // Full tool result data
12295
+ };
12296
+ const fullData = artifactData.data;
12297
+ const hasFullData = fullData && typeof fullData === "object" && Object.keys(fullData).length > 0 && // Check if toolResult specifically has content
12298
+ fullData.toolResult && (typeof fullData.toolResult !== "object" || Object.keys(fullData.toolResult).length > 0);
12299
+ if (!hasFullData) {
12300
+ logger11.debug(
12301
+ {
12302
+ artifactId,
12303
+ toolName: block.toolName,
12304
+ toolCallId: block.toolCallId
12305
+ },
12306
+ "Skipping empty compression artifact"
12307
+ );
12308
+ continue;
12309
+ }
12310
+ session.recordEvent("artifact_saved", this.sessionId, artifactData);
12311
+ this.processedToolCalls.add(block.toolCallId);
12312
+ toolCallToArtifactMap[block.toolCallId] = artifactId;
12313
+ }
12314
+ }
12315
+ }
12316
+ }
12317
+ this.lastProcessedMessageIndex = messages.length;
12318
+ logger11.debug(
12319
+ {
12320
+ totalArtifactsCreated: Object.keys(toolCallToArtifactMap).length,
12321
+ newMessageIndex: this.lastProcessedMessageIndex
12322
+ },
12323
+ "Compression artifact processing completed"
12324
+ );
12325
+ return toolCallToArtifactMap;
12326
+ }
12327
+ /**
12328
+ * 3. Create conversation summary with artifact references
12329
+ */
12330
+ async createConversationSummary(messages, toolCallToArtifactMap) {
12331
+ const textMessages = this.extractTextMessages(messages, toolCallToArtifactMap);
12332
+ logger11.debug(
12333
+ {
12334
+ sessionId: this.sessionId,
12335
+ messageCount: messages.length,
12336
+ textMessageCount: textMessages.length,
12337
+ artifactCount: Object.keys(toolCallToArtifactMap).length,
12338
+ sampleMessages: messages.slice(0, 2).map((m4) => ({
12339
+ role: m4.role,
12340
+ contentType: typeof m4.content,
12341
+ contentPreview: typeof m4.content === "string" ? m4.content.substring(0, 100) : "array/object"
12342
+ }))
12343
+ },
12344
+ "Starting distillation with debug info"
12345
+ );
12346
+ const summary = await distillConversation({
12347
+ messages,
12348
+ conversationId: this.conversationId,
12349
+ currentSummary: this.cumulativeSummary,
12350
+ // Pass existing summary for cumulative building
12351
+ summarizerModel: this.summarizerModel,
12352
+ toolCallToArtifactMap
12353
+ // Pass mapping for message formatting
12354
+ });
12355
+ this.cumulativeSummary = summary;
12356
+ logger11.debug(
12357
+ {
12358
+ sessionId: this.sessionId,
12359
+ summaryGenerated: !!summary,
12360
+ summaryHighLevel: summary?.high_level,
12361
+ artifactsCount: summary?.related_artifacts?.length || 0
12362
+ },
12363
+ "Distillation completed"
12364
+ );
12365
+ return {
12366
+ text_messages: textMessages,
12367
+ summary
12368
+ };
12369
+ }
12370
+ /**
12371
+ * Extract text messages and convert tool calls to descriptive text
12372
+ * Avoids API tool-call/tool-result pairing issues while preserving context
12373
+ */
12374
+ extractTextMessages(messages, toolCallToArtifactMap) {
12375
+ const textMessages = [];
12376
+ const toolCallPairs = /* @__PURE__ */ new Map();
12377
+ for (const message of messages) {
12378
+ if (Array.isArray(message.content)) {
12379
+ for (const block of message.content) {
12380
+ if (block.type === "tool-call") {
12381
+ if (!toolCallPairs.has(block.toolCallId)) {
12382
+ toolCallPairs.set(block.toolCallId, { call: block, result: null });
12383
+ } else {
12384
+ toolCallPairs.get(block.toolCallId).call = block;
12385
+ }
12386
+ } else if (block.type === "tool-result") {
12387
+ if (!toolCallPairs.has(block.toolCallId)) {
12388
+ toolCallPairs.set(block.toolCallId, { call: null, result: block });
12389
+ } else {
12390
+ toolCallPairs.get(block.toolCallId).result = block;
12391
+ }
12392
+ }
12393
+ }
12394
+ }
12395
+ }
12396
+ for (const message of messages) {
12397
+ if (message.role === "assistant" && typeof message.content === "string") {
12398
+ textMessages.push({
12399
+ role: message.role,
12400
+ content: message.content
12401
+ });
12402
+ } else if (message.role === "assistant" && Array.isArray(message.content)) {
12403
+ const textParts = [];
12404
+ const toolCallsInMessage = /* @__PURE__ */ new Set();
12405
+ const preservedBlocks = [];
12406
+ for (const block of message.content) {
12407
+ if (block.type === "text") {
12408
+ textParts.push(block.text);
12409
+ } else if (block.type === "tool-call" && block.toolName === "thinking_complete") ; else if (block.type === "tool-call") {
12410
+ toolCallsInMessage.add(block.toolCallId);
12411
+ }
12412
+ }
12413
+ for (const toolCallId of toolCallsInMessage) {
12414
+ const pair = toolCallPairs.get(toolCallId);
12415
+ const artifactId = toolCallToArtifactMap[toolCallId];
12416
+ if (pair?.call) {
12417
+ const args2 = JSON.stringify(pair.call.input);
12418
+ const artifactText = artifactId ? ` Results compressed into artifact: ${artifactId}.` : " Results were compressed but not saved.";
12419
+ textParts.push(`I called ${pair.call.toolName}(${args2}).${artifactText}`);
12420
+ }
12421
+ }
12422
+ if (preservedBlocks.length > 0 && textParts.length > 0) {
12423
+ const content = [...preservedBlocks];
12424
+ if (textParts.length > 0) {
12425
+ content.push({ type: "text", text: textParts.join("\n\n") });
12426
+ }
12427
+ textMessages.push({
12428
+ role: message.role,
12429
+ content
12430
+ });
12431
+ } else if (preservedBlocks.length > 0) {
12432
+ textMessages.push({
12433
+ role: message.role,
12434
+ content: preservedBlocks
12435
+ });
12436
+ } else if (textParts.length > 0) {
12437
+ textMessages.push({
12438
+ role: message.role,
12439
+ content: textParts.join("\n\n")
12440
+ });
12441
+ }
12442
+ }
12443
+ }
12444
+ return textMessages;
12445
+ }
12446
+ // Removed focus hint helper methods - no longer needed since tool results are in formatted messages
12447
+ /**
12448
+ * Check if tool result data is effectively empty
12449
+ */
12450
+ isEmpty(toolResultData) {
12451
+ if (!toolResultData || typeof toolResultData !== "object") {
12452
+ return true;
12453
+ }
12454
+ const { toolResult } = toolResultData;
12455
+ if (!toolResult) {
12456
+ return true;
12457
+ }
12458
+ if (typeof toolResult === "object" && !Array.isArray(toolResult)) {
12459
+ const keys = Object.keys(toolResult);
12460
+ if (keys.length === 0) {
12461
+ return true;
12462
+ }
12463
+ return keys.every((key) => {
12464
+ const value = toolResult[key];
12465
+ if (value === null || value === void 0 || value === "") {
12466
+ return true;
12467
+ }
12468
+ if (Array.isArray(value) && value.length === 0) {
12469
+ return true;
12470
+ }
12471
+ if (typeof value === "object" && Object.keys(value).length === 0) {
12472
+ return true;
12473
+ }
12474
+ return false;
12475
+ });
12476
+ }
12477
+ if (Array.isArray(toolResult) && toolResult.length === 0) {
12478
+ return true;
12479
+ }
12480
+ if (typeof toolResult === "string" && toolResult.trim() === "") {
12481
+ return true;
12482
+ }
12483
+ return false;
12484
+ }
12485
+ /**
12486
+ * Recursively remove _structureHints from an object
12487
+ */
12488
+ removeStructureHints(obj) {
12489
+ if (obj === null || obj === void 0) {
12490
+ return obj;
12491
+ }
12492
+ if (Array.isArray(obj)) {
12493
+ return obj.map((item) => this.removeStructureHints(item));
12494
+ }
12495
+ if (typeof obj === "object") {
12496
+ const cleaned = {};
12497
+ for (const [key, value] of Object.entries(obj)) {
12498
+ if (key !== "_structureHints") {
12499
+ cleaned[key] = this.removeStructureHints(value);
12500
+ }
12501
+ }
12502
+ return cleaned;
12503
+ }
12504
+ return obj;
12505
+ }
12506
+ /**
12507
+ * Get current state for debugging
12508
+ */
12509
+ getState() {
12510
+ return {
12511
+ shouldCompress: this.shouldCompress,
12512
+ config: this.config
12513
+ };
12514
+ }
12515
+ /**
12516
+ * Get the current compression summary
12517
+ */
12518
+ getCompressionSummary() {
12519
+ return this.cumulativeSummary;
12520
+ }
12521
+ };
12522
+
11794
12523
  // src/services/PendingToolApprovalManager.ts
11795
12524
  init_logger();
11796
- var logger10 = agentsCore.getLogger("PendingToolApprovalManager");
12525
+ var logger12 = agentsCore.getLogger("PendingToolApprovalManager");
11797
12526
  var APPROVAL_CLEANUP_INTERVAL_MS = 2 * 60 * 1e3;
11798
12527
  var APPROVAL_TIMEOUT_MS = 10 * 60 * 1e3;
11799
12528
  var PendingToolApprovalManager = class _PendingToolApprovalManager {
@@ -11832,7 +12561,7 @@ var PendingToolApprovalManager = class _PendingToolApprovalManager {
11832
12561
  timeoutId
11833
12562
  };
11834
12563
  this.pendingApprovals.set(toolCallId, approval);
11835
- logger10.info(
12564
+ logger12.info(
11836
12565
  {
11837
12566
  toolCallId,
11838
12567
  toolName,
@@ -11849,10 +12578,10 @@ var PendingToolApprovalManager = class _PendingToolApprovalManager {
11849
12578
  approveToolCall(toolCallId) {
11850
12579
  const approval = this.pendingApprovals.get(toolCallId);
11851
12580
  if (!approval) {
11852
- logger10.warn({ toolCallId }, "Tool approval not found or already processed");
12581
+ logger12.warn({ toolCallId }, "Tool approval not found or already processed");
11853
12582
  return false;
11854
12583
  }
11855
- logger10.info(
12584
+ logger12.info(
11856
12585
  {
11857
12586
  toolCallId,
11858
12587
  toolName: approval.toolName,
@@ -11871,10 +12600,10 @@ var PendingToolApprovalManager = class _PendingToolApprovalManager {
11871
12600
  denyToolCall(toolCallId, reason) {
11872
12601
  const approval = this.pendingApprovals.get(toolCallId);
11873
12602
  if (!approval) {
11874
- logger10.warn({ toolCallId }, "Tool approval not found or already processed");
12603
+ logger12.warn({ toolCallId }, "Tool approval not found or already processed");
11875
12604
  return false;
11876
12605
  }
11877
- logger10.info(
12606
+ logger12.info(
11878
12607
  {
11879
12608
  toolCallId,
11880
12609
  toolName: approval.toolName,
@@ -11906,7 +12635,7 @@ var PendingToolApprovalManager = class _PendingToolApprovalManager {
11906
12635
  }
11907
12636
  }
11908
12637
  if (cleanedUp > 0) {
11909
- logger10.info({ cleanedUp }, "Cleaned up expired tool approvals");
12638
+ logger12.info({ cleanedUp }, "Cleaned up expired tool approvals");
11910
12639
  }
11911
12640
  }
11912
12641
  /**
@@ -11930,7 +12659,7 @@ var pendingToolApprovalManager = PendingToolApprovalManager.getInstance();
11930
12659
 
11931
12660
  // src/services/ResponseFormatter.ts
11932
12661
  init_logger();
11933
- var logger11 = agentsCore.getLogger("ResponseFormatter");
12662
+ var logger13 = agentsCore.getLogger("ResponseFormatter");
11934
12663
  var ResponseFormatter = class {
11935
12664
  artifactParser;
11936
12665
  subAgentId;
@@ -11989,7 +12718,7 @@ var ResponseFormatter = class {
11989
12718
  return { parts: parts2 };
11990
12719
  } catch (error) {
11991
12720
  agentsCore.setSpanWithError(span, error instanceof Error ? error : new Error(String(error)));
11992
- logger11.error({ error, responseObject }, "Error formatting object response");
12721
+ logger13.error({ error, responseObject }, "Error formatting object response");
11993
12722
  return {
11994
12723
  parts: [{ kind: "data", data: responseObject }]
11995
12724
  };
@@ -12045,7 +12774,7 @@ var ResponseFormatter = class {
12045
12774
  return { parts: parts2 };
12046
12775
  } catch (error) {
12047
12776
  agentsCore.setSpanWithError(span, error instanceof Error ? error : new Error(String(error)));
12048
- logger11.error({ error, responseText }, "Error formatting response");
12777
+ logger13.error({ error, responseText }, "Error formatting response");
12049
12778
  return { text: responseText };
12050
12779
  } finally {
12051
12780
  span.end();
@@ -12484,7 +13213,7 @@ function parseEmbeddedJson(data) {
12484
13213
 
12485
13214
  // src/a2a/client.ts
12486
13215
  init_logger();
12487
- var logger12 = agentsCore.getLogger("a2aClient");
13216
+ var logger14 = agentsCore.getLogger("a2aClient");
12488
13217
  var DEFAULT_BACKOFF = {
12489
13218
  initialInterval: 500,
12490
13219
  maxInterval: 6e4,
@@ -12691,7 +13420,7 @@ var A2AClient = class {
12691
13420
  try {
12692
13421
  const res = await fn2();
12693
13422
  if (attempt > 0) {
12694
- logger12.info(
13423
+ logger14.info(
12695
13424
  {
12696
13425
  attempts: attempt + 1,
12697
13426
  elapsedTime: Date.now() - start2
@@ -12706,7 +13435,7 @@ var A2AClient = class {
12706
13435
  }
12707
13436
  const elapsed = Date.now() - start2;
12708
13437
  if (elapsed > maxElapsedTime) {
12709
- logger12.warn(
13438
+ logger14.warn(
12710
13439
  {
12711
13440
  attempts: attempt + 1,
12712
13441
  elapsedTime: elapsed,
@@ -12727,7 +13456,7 @@ var A2AClient = class {
12727
13456
  retryInterval = initialInterval * attempt ** exponent + Math.random() * 1e3;
12728
13457
  }
12729
13458
  const delayMs = Math.min(retryInterval, maxInterval);
12730
- logger12.info(
13459
+ logger14.info(
12731
13460
  {
12732
13461
  attempt: attempt + 1,
12733
13462
  delayMs,
@@ -12818,7 +13547,7 @@ var A2AClient = class {
12818
13547
  });
12819
13548
  }
12820
13549
  if (rpcResponse.id !== requestId2) {
12821
- logger12.warn(
13550
+ logger14.warn(
12822
13551
  {
12823
13552
  method,
12824
13553
  expectedId: requestId2,
@@ -13015,7 +13744,7 @@ var A2AClient = class {
13015
13744
  try {
13016
13745
  while (true) {
13017
13746
  const { done, value } = await reader.read();
13018
- logger12.info({ done, value }, "parseA2ASseStream");
13747
+ logger14.info({ done, value }, "parseA2ASseStream");
13019
13748
  if (done) {
13020
13749
  if (eventDataBuffer.trim()) {
13021
13750
  const result = this._processSseEventData(
@@ -13106,7 +13835,7 @@ init_execution_limits();
13106
13835
  init_conversations();
13107
13836
  init_dbClient();
13108
13837
  init_logger();
13109
- var logger13 = agentsCore.getLogger("relationships Tools");
13838
+ var logger15 = agentsCore.getLogger("relationships Tools");
13110
13839
  var A2A_RETRY_STATUS_CODES = ["429", "500", "502", "503", "504"];
13111
13840
  var generateTransferToolDescription = (config) => {
13112
13841
  let toolsSection = "";
@@ -13131,9 +13860,9 @@ Can Delegate To:
13131
13860
  ${delegateList}`;
13132
13861
  }
13133
13862
  if (config.tools && config.tools.length > 0) {
13134
- const toolDescriptions = config.tools.map((tool3) => {
13135
- const toolsList = tool3.availableTools?.map((t2) => ` - ${t2.name}: ${t2.description || "No description available"}`).join("\n") || "";
13136
- return `MCP Server: ${tool3.name}
13863
+ const toolDescriptions = config.tools.map((tool4) => {
13864
+ const toolsList = tool4.availableTools?.map((t2) => ` - ${t2.name}: ${t2.description || "No description available"}`).join("\n") || "";
13865
+ return `MCP Server: ${tool4.name}
13137
13866
  ${toolsList}`;
13138
13867
  }).join("\n\n");
13139
13868
  toolsSection = `
@@ -13159,9 +13888,9 @@ var generateDelegateToolDescription = (delegateRelation) => {
13159
13888
  if (delegateRelation.type === "internal" && "tools" in config) {
13160
13889
  const agentConfig = config;
13161
13890
  if (agentConfig.tools && agentConfig.tools.length > 0) {
13162
- const toolDescriptions = agentConfig.tools.map((tool3) => {
13163
- const toolsList = tool3.availableTools?.map((t2) => ` - ${t2.name}: ${t2.description || "No description available"}`).join("\n") || "";
13164
- return `MCP Server: ${tool3.name}
13891
+ const toolDescriptions = agentConfig.tools.map((tool4) => {
13892
+ const toolsList = tool4.availableTools?.map((t2) => ` - ${t2.name}: ${t2.description || "No description available"}`).join("\n") || "";
13893
+ return `MCP Server: ${tool4.name}
13165
13894
  ${toolsList}`;
13166
13895
  }).join("\n\n");
13167
13896
  toolsSection = `
@@ -13217,7 +13946,7 @@ var createTransferToAgentTool = ({
13217
13946
  [agentsCore.SPAN_KEYS.TRANSFER_TO_SUB_AGENT_ID]: transferConfig.id ?? "unknown"
13218
13947
  });
13219
13948
  }
13220
- logger13.info(
13949
+ logger15.info(
13221
13950
  {
13222
13951
  transferTo: transferConfig.id ?? "unknown",
13223
13952
  fromSubAgent: callingAgentId
@@ -13238,7 +13967,7 @@ var createTransferToAgentTool = ({
13238
13967
  fromSubAgentId: callingAgentId
13239
13968
  // Include the calling agent ID for tracking
13240
13969
  };
13241
- logger13.info(
13970
+ logger15.info(
13242
13971
  {
13243
13972
  transferResult,
13244
13973
  transferResultKeys: Object.keys(transferResult)
@@ -13385,7 +14114,7 @@ function createDelegateToAgentTool({
13385
14114
  ...isInternal ? { fromSubAgentId: callingAgentId } : { fromExternalAgentId: callingAgentId }
13386
14115
  }
13387
14116
  };
13388
- logger13.info({ messageToSend }, "messageToSend");
14117
+ logger15.info({ messageToSend }, "messageToSend");
13389
14118
  await agentsCore.createMessage(dbClient_default)({
13390
14119
  id: agentsCore.generateId(),
13391
14120
  tenantId,
@@ -13448,7 +14177,7 @@ function createDelegateToAgentTool({
13448
14177
 
13449
14178
  // src/agents/SystemPromptBuilder.ts
13450
14179
  init_logger();
13451
- var logger14 = agentsCore.getLogger("SystemPromptBuilder");
14180
+ var logger16 = agentsCore.getLogger("SystemPromptBuilder");
13452
14181
  var SystemPromptBuilder = class {
13453
14182
  constructor(version, versionConfig) {
13454
14183
  this.version = version;
@@ -13464,12 +14193,12 @@ var SystemPromptBuilder = class {
13464
14193
  this.templates.set(name2, content);
13465
14194
  }
13466
14195
  this.loaded = true;
13467
- logger14.debug(
14196
+ logger16.debug(
13468
14197
  { templateCount: this.templates.size, version: this.version },
13469
14198
  `Loaded ${this.templates.size} templates for version ${this.version}`
13470
14199
  );
13471
14200
  } catch (error) {
13472
- logger14.error({ error }, `Failed to load templates for version ${this.version}`);
14201
+ logger16.error({ error }, `Failed to load templates for version ${this.version}`);
13473
14202
  throw new Error(`Template loading failed: ${error}`);
13474
14203
  }
13475
14204
  }
@@ -13860,6 +14589,8 @@ CREATING ARTIFACTS (SERVES AS CITATION):
13860
14589
  Use the artifact:create annotation to extract data from tool results. The creation itself serves as a citation.
13861
14590
  Format: <artifact:create id="unique-id" tool="tool_call_id" type="TypeName" base="selector.path" details='{"key":"jmespath_selector"}' />
13862
14591
 
14592
+ \u26A0\uFE0F IMPORTANT: Do not create artifacts from get_reference_artifact tool results - these are already compressed artifacts being retrieved. Only create artifacts from original research and analysis tools.
14593
+
13863
14594
  \u{1F6A8} CRITICAL: DETAILS PROPS USE JMESPATH SELECTORS, NOT LITERAL VALUES! \u{1F6A8}
13864
14595
 
13865
14596
  \u274C WRONG - Using literal values:
@@ -14078,27 +14809,27 @@ ${creationInstructions}
14078
14809
  if (tools.length === 0) {
14079
14810
  return '<available_tools description="No tools are currently available"></available_tools>';
14080
14811
  }
14081
- const toolsXml = tools.map((tool3) => this.generateToolXml(templates, tool3)).join("\n ");
14812
+ const toolsXml = tools.map((tool4) => this.generateToolXml(templates, tool4)).join("\n ");
14082
14813
  return `<available_tools description="These are the tools available for you to use to accomplish tasks">
14083
14814
  ${toolsXml}
14084
14815
  </available_tools>`;
14085
14816
  }
14086
- generateToolXml(templates, tool3) {
14817
+ generateToolXml(templates, tool4) {
14087
14818
  const toolTemplate = templates.get("tool");
14088
14819
  if (!toolTemplate) {
14089
14820
  throw new Error("Tool template not loaded");
14090
14821
  }
14091
14822
  let toolXml = toolTemplate;
14092
- toolXml = toolXml.replace("{{TOOL_NAME}}", tool3.name);
14823
+ toolXml = toolXml.replace("{{TOOL_NAME}}", tool4.name);
14093
14824
  toolXml = toolXml.replace(
14094
14825
  "{{TOOL_DESCRIPTION}}",
14095
- tool3.description || "No description available"
14826
+ tool4.description || "No description available"
14096
14827
  );
14097
14828
  toolXml = toolXml.replace(
14098
14829
  "{{TOOL_USAGE_GUIDELINES}}",
14099
- tool3.usageGuidelines || "Use this tool when appropriate."
14830
+ tool4.usageGuidelines || "Use this tool when appropriate."
14100
14831
  );
14101
- const parametersXml = this.generateParametersXml(tool3.inputSchema);
14832
+ const parametersXml = this.generateParametersXml(tool4.inputSchema);
14102
14833
  toolXml = toolXml.replace("{{TOOL_PARAMETERS_SCHEMA}}", parametersXml);
14103
14834
  return toolXml;
14104
14835
  }
@@ -14281,6 +15012,8 @@ CREATING ARTIFACTS (SERVES AS CITATION):
14281
15012
  Use the appropriate ArtifactCreate_[Type] component to extract and structure data from tool results.
14282
15013
  The creation itself serves as a citation - no additional reference needed.
14283
15014
 
15015
+ \u26A0\uFE0F IMPORTANT: Do not create artifacts from get_reference_artifact tool results - these are already compressed artifacts being retrieved. Only create artifacts from original research and analysis tools.
15016
+
14284
15017
  \u{1F6AB} FORBIDDEN JMESPATH PATTERNS:
14285
15018
  \u274C NEVER: [?title~'.*text.*'] (regex patterns with ~ operator)
14286
15019
  \u274C NEVER: [?field~'pattern.*'] (any ~ operator usage)
@@ -14560,7 +15293,7 @@ function hasToolCallWithPrefix(prefix) {
14560
15293
  return false;
14561
15294
  };
14562
15295
  }
14563
- var logger18 = agentsCore.getLogger("Agent");
15296
+ var logger20 = agentsCore.getLogger("Agent");
14564
15297
  function validateModel(modelString, modelType) {
14565
15298
  if (!modelString?.trim()) {
14566
15299
  throw new Error(
@@ -14569,8 +15302,8 @@ function validateModel(modelString, modelType) {
14569
15302
  }
14570
15303
  return modelString.trim();
14571
15304
  }
14572
- function isValidTool(tool3) {
14573
- return tool3 && typeof tool3 === "object" && typeof tool3.description === "string" && tool3.inputSchema && typeof tool3.execute === "function";
15305
+ function isValidTool(tool4) {
15306
+ return tool4 && typeof tool4 === "object" && typeof tool4.description === "string" && tool4.inputSchema && typeof tool4.execute === "function";
14574
15307
  }
14575
15308
  var Agent = class {
14576
15309
  config;
@@ -14586,6 +15319,7 @@ var Agent = class {
14586
15319
  credentialStoreRegistry;
14587
15320
  mcpClientCache = /* @__PURE__ */ new Map();
14588
15321
  mcpConnectionLocks = /* @__PURE__ */ new Map();
15322
+ currentCompressor = null;
14589
15323
  constructor(config, credentialStoreRegistry) {
14590
15324
  this.artifactComponents = config.artifactComponents || [];
14591
15325
  let processedDataComponents = config.dataComponents || [];
@@ -14671,17 +15405,17 @@ var Agent = class {
14671
15405
  }
14672
15406
  #getRelationshipIdForTool(toolName, toolType) {
14673
15407
  if (toolType === "mcp") {
14674
- const matchingTool = this.config.tools?.find((tool3) => {
14675
- if (tool3.config?.type !== "mcp") {
15408
+ const matchingTool = this.config.tools?.find((tool4) => {
15409
+ if (tool4.config?.type !== "mcp") {
14676
15410
  return false;
14677
15411
  }
14678
- if (tool3.availableTools?.some((available) => available.name === toolName)) {
15412
+ if (tool4.availableTools?.some((available) => available.name === toolName)) {
14679
15413
  return true;
14680
15414
  }
14681
- if (tool3.config.mcp.activeTools?.includes(toolName)) {
15415
+ if (tool4.config.mcp.activeTools?.includes(toolName)) {
14682
15416
  return true;
14683
15417
  }
14684
- return tool3.name === toolName;
15418
+ return tool4.name === toolName;
14685
15419
  });
14686
15420
  return matchingTool?.relationshipId;
14687
15421
  }
@@ -14735,9 +15469,59 @@ var Agent = class {
14735
15469
  providerOptions: baseConfig.providerOptions
14736
15470
  };
14737
15471
  }
15472
+ /**
15473
+ * Get the model settings for summarization/distillation
15474
+ * Falls back to base model if summarizer not configured
15475
+ */
15476
+ getSummarizerModel() {
15477
+ if (!this.config.models) {
15478
+ throw new Error(
15479
+ "Model configuration is required. Please configure models at the project level."
15480
+ );
15481
+ }
15482
+ const summarizerConfig = this.config.models.summarizer;
15483
+ const baseConfig = this.config.models.base;
15484
+ if (summarizerConfig) {
15485
+ return {
15486
+ model: validateModel(summarizerConfig.model, "Summarizer"),
15487
+ providerOptions: summarizerConfig.providerOptions
15488
+ };
15489
+ }
15490
+ if (!baseConfig) {
15491
+ throw new Error(
15492
+ "Base model configuration is required for summarizer fallback. Please configure models at the project level."
15493
+ );
15494
+ }
15495
+ return {
15496
+ model: validateModel(baseConfig.model, "Base (fallback for summarizer)"),
15497
+ providerOptions: baseConfig.providerOptions
15498
+ };
15499
+ }
14738
15500
  setConversationId(conversationId) {
14739
15501
  this.conversationId = conversationId;
14740
15502
  }
15503
+ /**
15504
+ * Simple compression fallback: drop oldest messages to fit under token limit
15505
+ */
15506
+ simpleCompression(messages, targetTokens) {
15507
+ if (messages.length === 0) return messages;
15508
+ const estimateTokens = (msg) => {
15509
+ const content = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
15510
+ return Math.ceil(content.length / 4);
15511
+ };
15512
+ let totalTokens = messages.reduce((sum, msg) => sum + estimateTokens(msg), 0);
15513
+ if (totalTokens <= targetTokens) {
15514
+ return messages;
15515
+ }
15516
+ const result = [...messages];
15517
+ while (totalTokens > targetTokens && result.length > 1) {
15518
+ const dropped = result.shift();
15519
+ if (dropped) {
15520
+ totalTokens -= estimateTokens(dropped);
15521
+ }
15522
+ }
15523
+ return result;
15524
+ }
14741
15525
  /**
14742
15526
  * Set delegation status for this agent instance
14743
15527
  */
@@ -14773,14 +15557,21 @@ var Agent = class {
14773
15557
  const toolCallId = context?.toolCallId || generateToolId();
14774
15558
  const activeSpan = api.trace.getActiveSpan();
14775
15559
  if (activeSpan) {
14776
- activeSpan.setAttributes({
15560
+ const attributes = {
14777
15561
  "conversation.id": this.conversationId,
14778
15562
  "tool.purpose": toolDefinition.description || "No description provided",
14779
15563
  "ai.toolType": toolType || "unknown",
14780
15564
  "subAgent.name": this.config.name || "unknown",
14781
15565
  "subAgent.id": this.config.id || "unknown",
14782
15566
  "agent.id": this.config.agentId || "unknown"
14783
- });
15567
+ };
15568
+ if (options?.mcpServerId) {
15569
+ attributes["ai.toolCall.mcpServerId"] = options.mcpServerId;
15570
+ }
15571
+ if (options?.mcpServerName) {
15572
+ attributes["ai.toolCall.mcpServerName"] = options.mcpServerName;
15573
+ }
15574
+ activeSpan.setAttributes(attributes);
14784
15575
  }
14785
15576
  const isInternalTool = toolName.includes("save_tool_result") || toolName.includes("thinking_complete") || toolName.startsWith("transfer_to_");
14786
15577
  const needsApproval = options?.needsApproval || false;
@@ -14833,7 +15624,7 @@ var Agent = class {
14833
15624
  };
14834
15625
  await agentsCore.createMessage(dbClient_default)(messagePayload);
14835
15626
  } catch (error) {
14836
- logger18.warn(
15627
+ logger20.warn(
14837
15628
  { error, toolName, toolCallId, conversationId: toolResultConversationId },
14838
15629
  "Failed to store tool result in conversation history"
14839
15630
  );
@@ -14921,10 +15712,10 @@ var Agent = class {
14921
15712
  ]);
14922
15713
  }
14923
15714
  async getMcpTools(sessionId, streamRequestId) {
14924
- const mcpTools = this.config.tools?.filter((tool3) => {
14925
- return tool3.config?.type === "mcp";
15715
+ const mcpTools = this.config.tools?.filter((tool4) => {
15716
+ return tool4.config?.type === "mcp";
14926
15717
  }) || [];
14927
- const tools = await Promise.all(mcpTools.map((tool3) => this.getMcpTool(tool3)) || []) || [];
15718
+ const tools = await Promise.all(mcpTools.map((tool4) => this.getMcpTool(tool4)) || []) || [];
14928
15719
  if (!sessionId) {
14929
15720
  const wrappedTools2 = {};
14930
15721
  for (const toolSet of tools) {
@@ -14939,7 +15730,11 @@ var Agent = class {
14939
15730
  enhancedTool,
14940
15731
  streamRequestId,
14941
15732
  "mcp",
14942
- { needsApproval }
15733
+ {
15734
+ needsApproval,
15735
+ mcpServerId: toolSet.mcpServerId,
15736
+ mcpServerName: toolSet.mcpServerName
15737
+ }
14943
15738
  );
14944
15739
  }
14945
15740
  }
@@ -14949,11 +15744,11 @@ var Agent = class {
14949
15744
  for (const toolResult of tools) {
14950
15745
  for (const [toolName, originalTool] of Object.entries(toolResult.tools)) {
14951
15746
  if (!isValidTool(originalTool)) {
14952
- logger18.error({ toolName }, "Invalid MCP tool structure - missing required properties");
15747
+ logger20.error({ toolName }, "Invalid MCP tool structure - missing required properties");
14953
15748
  continue;
14954
15749
  }
14955
15750
  const needsApproval = toolResult.toolPolicies?.[toolName]?.needsApproval || false;
14956
- logger18.debug(
15751
+ logger20.debug(
14957
15752
  {
14958
15753
  toolName,
14959
15754
  toolPolicies: toolResult.toolPolicies,
@@ -14967,7 +15762,7 @@ var Agent = class {
14967
15762
  inputSchema: originalTool.inputSchema,
14968
15763
  execute: async (args2, { toolCallId }) => {
14969
15764
  if (needsApproval) {
14970
- logger18.info(
15765
+ logger20.info(
14971
15766
  { toolName, toolCallId, args: args2 },
14972
15767
  "Tool requires approval - waiting for user response"
14973
15768
  );
@@ -15013,7 +15808,7 @@ var Agent = class {
15013
15808
  }
15014
15809
  },
15015
15810
  (denialSpan) => {
15016
- logger18.info(
15811
+ logger20.info(
15017
15812
  { toolName, toolCallId, reason: approvalResult.reason },
15018
15813
  "Tool execution denied by user"
15019
15814
  );
@@ -15034,18 +15829,18 @@ var Agent = class {
15034
15829
  }
15035
15830
  },
15036
15831
  (approvedSpan) => {
15037
- logger18.info({ toolName, toolCallId }, "Tool approved, continuing with execution");
15832
+ logger20.info({ toolName, toolCallId }, "Tool approved, continuing with execution");
15038
15833
  approvedSpan.setStatus({ code: api.SpanStatusCode.OK });
15039
15834
  approvedSpan.end();
15040
15835
  }
15041
15836
  );
15042
15837
  }
15043
- logger18.debug({ toolName, toolCallId }, "MCP Tool Called");
15838
+ logger20.debug({ toolName, toolCallId }, "MCP Tool Called");
15044
15839
  try {
15045
15840
  const rawResult = await originalTool.execute(args2, { toolCallId });
15046
15841
  if (rawResult && typeof rawResult === "object" && rawResult.isError) {
15047
15842
  const errorMessage = rawResult.content?.[0]?.text || "MCP tool returned an error";
15048
- logger18.error(
15843
+ logger20.error(
15049
15844
  { toolName, toolCallId, errorMessage, rawResult },
15050
15845
  "MCP tool returned error status"
15051
15846
  );
@@ -15096,7 +15891,7 @@ var Agent = class {
15096
15891
  });
15097
15892
  return { result: enhancedResult, toolCallId };
15098
15893
  } catch (error) {
15099
- logger18.error({ toolName, toolCallId, error }, "MCP tool execution failed");
15894
+ logger20.error({ toolName, toolCallId, error }, "MCP tool execution failed");
15100
15895
  throw error;
15101
15896
  }
15102
15897
  }
@@ -15106,7 +15901,11 @@ var Agent = class {
15106
15901
  sessionWrappedTool,
15107
15902
  streamRequestId,
15108
15903
  "mcp",
15109
- { needsApproval }
15904
+ {
15905
+ needsApproval,
15906
+ mcpServerId: toolResult.mcpServerId,
15907
+ mcpServerName: toolResult.mcpServerName
15908
+ }
15110
15909
  );
15111
15910
  }
15112
15911
  }
@@ -15115,28 +15914,28 @@ var Agent = class {
15115
15914
  /**
15116
15915
  * Convert database McpTool to builder MCPToolConfig format
15117
15916
  */
15118
- convertToMCPToolConfig(tool3, agentToolRelationHeaders) {
15119
- if (tool3.config.type !== "mcp") {
15120
- throw new Error(`Cannot convert non-MCP tool to MCP config: ${tool3.id}`);
15917
+ convertToMCPToolConfig(tool4, agentToolRelationHeaders) {
15918
+ if (tool4.config.type !== "mcp") {
15919
+ throw new Error(`Cannot convert non-MCP tool to MCP config: ${tool4.id}`);
15121
15920
  }
15122
15921
  return {
15123
- id: tool3.id,
15124
- name: tool3.name,
15125
- description: tool3.name,
15922
+ id: tool4.id,
15923
+ name: tool4.name,
15924
+ description: tool4.name,
15126
15925
  // Use name as description fallback
15127
- serverUrl: tool3.config.mcp.server.url,
15128
- activeTools: tool3.config.mcp.activeTools,
15129
- mcpType: tool3.config.mcp.server.url.includes("api.nango.dev") ? agentsCore.MCPServerType.nango : agentsCore.MCPServerType.generic,
15130
- transport: tool3.config.mcp.transport,
15926
+ serverUrl: tool4.config.mcp.server.url,
15927
+ activeTools: tool4.config.mcp.activeTools,
15928
+ mcpType: tool4.config.mcp.server.url.includes("api.nango.dev") ? agentsCore.MCPServerType.nango : agentsCore.MCPServerType.generic,
15929
+ transport: tool4.config.mcp.transport,
15131
15930
  headers: {
15132
- ...tool3.headers,
15931
+ ...tool4.headers,
15133
15932
  ...agentToolRelationHeaders
15134
15933
  }
15135
15934
  };
15136
15935
  }
15137
- async getMcpTool(tool3) {
15138
- const cacheKey = `${this.config.tenantId}-${this.config.projectId}-${tool3.id}-${tool3.credentialReferenceId || "no-cred"}`;
15139
- const credentialReferenceId = tool3.credentialReferenceId;
15936
+ async getMcpTool(tool4) {
15937
+ const cacheKey = `${this.config.tenantId}-${this.config.projectId}-${tool4.id}-${tool4.credentialReferenceId || "no-cred"}`;
15938
+ const credentialReferenceId = tool4.credentialReferenceId;
15140
15939
  const toolsForAgent = await agentsCore.getToolsForAgent(dbClient_default)({
15141
15940
  scopes: {
15142
15941
  tenantId: this.config.tenantId,
@@ -15145,12 +15944,12 @@ var Agent = class {
15145
15944
  subAgentId: this.config.id
15146
15945
  }
15147
15946
  });
15148
- const toolRelation = toolsForAgent.data.find((t2) => t2.toolId === tool3.id);
15947
+ const toolRelation = toolsForAgent.data.find((t2) => t2.toolId === tool4.id);
15149
15948
  const agentToolRelationHeaders = toolRelation?.headers || void 0;
15150
15949
  const selectedTools = toolRelation?.selectedTools || void 0;
15151
15950
  const toolPolicies = toolRelation?.toolPolicies || {};
15152
15951
  let serverConfig;
15153
- const isUserScoped = tool3.credentialScope === "user";
15952
+ const isUserScoped = tool4.credentialScope === "user";
15154
15953
  const userId = this.config.userId;
15155
15954
  if (isUserScoped && userId && this.credentialStuffer) {
15156
15955
  const userCredentialReference = await agentsCore.getUserScopedCredentialReference(dbClient_default)({
@@ -15158,7 +15957,7 @@ var Agent = class {
15158
15957
  tenantId: this.config.tenantId,
15159
15958
  projectId: this.config.projectId
15160
15959
  },
15161
- toolId: tool3.id,
15960
+ toolId: tool4.id,
15162
15961
  userId
15163
15962
  });
15164
15963
  if (userCredentialReference) {
@@ -15173,13 +15972,13 @@ var Agent = class {
15173
15972
  contextConfigId: this.config.contextConfigId || void 0,
15174
15973
  conversationId: this.conversationId || void 0
15175
15974
  },
15176
- this.convertToMCPToolConfig(tool3, agentToolRelationHeaders),
15975
+ this.convertToMCPToolConfig(tool4, agentToolRelationHeaders),
15177
15976
  storeReference,
15178
15977
  selectedTools
15179
15978
  );
15180
15979
  } else {
15181
- logger18.warn(
15182
- { toolId: tool3.id, userId },
15980
+ logger20.warn(
15981
+ { toolId: tool4.id, userId },
15183
15982
  "User-scoped tool has no credential connected for this user"
15184
15983
  );
15185
15984
  serverConfig = await this.credentialStuffer.buildMcpServerConfig(
@@ -15189,7 +15988,7 @@ var Agent = class {
15189
15988
  contextConfigId: this.config.contextConfigId || void 0,
15190
15989
  conversationId: this.conversationId || void 0
15191
15990
  },
15192
- this.convertToMCPToolConfig(tool3, agentToolRelationHeaders),
15991
+ this.convertToMCPToolConfig(tool4, agentToolRelationHeaders),
15193
15992
  void 0,
15194
15993
  selectedTools
15195
15994
  );
@@ -15216,7 +16015,7 @@ var Agent = class {
15216
16015
  contextConfigId: this.config.contextConfigId || void 0,
15217
16016
  conversationId: this.conversationId || void 0
15218
16017
  },
15219
- this.convertToMCPToolConfig(tool3, agentToolRelationHeaders),
16018
+ this.convertToMCPToolConfig(tool4, agentToolRelationHeaders),
15220
16019
  storeReference,
15221
16020
  selectedTools
15222
16021
  );
@@ -15228,28 +16027,41 @@ var Agent = class {
15228
16027
  contextConfigId: this.config.contextConfigId || void 0,
15229
16028
  conversationId: this.conversationId || void 0
15230
16029
  },
15231
- this.convertToMCPToolConfig(tool3, agentToolRelationHeaders),
16030
+ this.convertToMCPToolConfig(tool4, agentToolRelationHeaders),
15232
16031
  void 0,
15233
16032
  selectedTools
15234
16033
  );
15235
16034
  } else {
15236
- if (tool3.config.type !== "mcp") {
15237
- throw new Error(`Cannot build server config for non-MCP tool: ${tool3.id}`);
16035
+ if (tool4.config.type !== "mcp") {
16036
+ throw new Error(`Cannot build server config for non-MCP tool: ${tool4.id}`);
15238
16037
  }
15239
16038
  serverConfig = {
15240
- type: tool3.config.mcp.transport?.type || agentsCore.MCPTransportType.streamableHttp,
15241
- url: tool3.config.mcp.server.url,
15242
- activeTools: tool3.config.mcp.activeTools,
16039
+ type: tool4.config.mcp.transport?.type || agentsCore.MCPTransportType.streamableHttp,
16040
+ url: tool4.config.mcp.server.url,
16041
+ activeTools: tool4.config.mcp.activeTools,
15243
16042
  selectedTools,
15244
16043
  headers: agentToolRelationHeaders
15245
16044
  };
15246
16045
  }
15247
- logger18.info(
16046
+ if (serverConfig.url?.toString().includes("composio.dev")) {
16047
+ const urlObj = new URL(serverConfig.url.toString());
16048
+ if (isUserScoped && userId) {
16049
+ urlObj.searchParams.set("user_id", userId);
16050
+ } else {
16051
+ const SEPARATOR = "||";
16052
+ urlObj.searchParams.set(
16053
+ "user_id",
16054
+ `${this.config.tenantId}${SEPARATOR}${this.config.projectId}`
16055
+ );
16056
+ }
16057
+ serverConfig.url = urlObj.toString();
16058
+ }
16059
+ logger20.info(
15248
16060
  {
15249
- toolName: tool3.name,
16061
+ toolName: tool4.name,
15250
16062
  credentialReferenceId,
15251
16063
  transportType: serverConfig.type,
15252
- headers: tool3.headers
16064
+ headers: tool4.headers
15253
16065
  },
15254
16066
  "Built MCP server config with credentials"
15255
16067
  );
@@ -15261,7 +16073,7 @@ var Agent = class {
15261
16073
  if (!client) {
15262
16074
  let connectionPromise = this.mcpConnectionLocks.get(cacheKey);
15263
16075
  if (!connectionPromise) {
15264
- connectionPromise = this.createMcpConnection(tool3, serverConfig);
16076
+ connectionPromise = this.createMcpConnection(tool4, serverConfig);
15265
16077
  this.mcpConnectionLocks.set(cacheKey, connectionPromise);
15266
16078
  }
15267
16079
  try {
@@ -15269,9 +16081,9 @@ var Agent = class {
15269
16081
  this.mcpClientCache.set(cacheKey, client);
15270
16082
  } catch (error) {
15271
16083
  this.mcpConnectionLocks.delete(cacheKey);
15272
- logger18.error(
16084
+ logger20.error(
15273
16085
  {
15274
- toolName: tool3.name,
16086
+ toolName: tool4.name,
15275
16087
  subAgentId: this.config.id,
15276
16088
  cacheKey,
15277
16089
  error: error instanceof Error ? error.message : String(error)
@@ -15289,13 +16101,13 @@ var Agent = class {
15289
16101
  "ai.toolCall",
15290
16102
  {
15291
16103
  attributes: {
15292
- "ai.toolCall.name": tool3.name,
16104
+ "ai.toolCall.name": tool4.name,
15293
16105
  "ai.toolCall.args": JSON.stringify({ operation: "mcp_tool_discovery" }),
15294
16106
  "ai.toolCall.result": JSON.stringify({
15295
16107
  status: "no_tools_available",
15296
16108
  message: `MCP server has 0 effective tools. Double check the selected tools in your agent and the active tools in the MCP server configuration.`,
15297
- serverUrl: tool3.config.type === "mcp" ? tool3.config.mcp.server.url : "unknown",
15298
- originalToolName: tool3.name
16109
+ serverUrl: tool4.config.type === "mcp" ? tool4.config.mcp.server.url : "unknown",
16110
+ originalToolName: tool4.name
15299
16111
  }),
15300
16112
  "ai.toolType": "mcp",
15301
16113
  "subAgent.name": this.config.name || "unknown",
@@ -15307,14 +16119,14 @@ var Agent = class {
15307
16119
  }
15308
16120
  },
15309
16121
  (span) => {
15310
- agentsCore.setSpanWithError(span, new Error(`0 effective tools available for ${tool3.name}`));
16122
+ agentsCore.setSpanWithError(span, new Error(`0 effective tools available for ${tool4.name}`));
15311
16123
  agentSessionManager.recordEvent(streamRequestId, "error", this.config.id, {
15312
16124
  message: `MCP server has 0 effective tools. Double check the selected tools in your graph and the active tools in the MCP server configuration.`,
15313
16125
  code: "no_tools_available",
15314
16126
  severity: "error",
15315
16127
  context: {
15316
- toolName: tool3.name,
15317
- serverUrl: tool3.config.type === "mcp" ? tool3.config.mcp.server.url : "unknown",
16128
+ toolName: tool4.name,
16129
+ serverUrl: tool4.config.type === "mcp" ? tool4.config.mcp.server.url : "unknown",
15318
16130
  operation: "mcp_tool_discovery"
15319
16131
  }
15320
16132
  });
@@ -15323,20 +16135,20 @@ var Agent = class {
15323
16135
  );
15324
16136
  }
15325
16137
  }
15326
- return { tools, toolPolicies };
16138
+ return { tools, toolPolicies, mcpServerId: tool4.id, mcpServerName: tool4.name };
15327
16139
  }
15328
- async createMcpConnection(tool3, serverConfig) {
16140
+ async createMcpConnection(tool4, serverConfig) {
15329
16141
  const client = new agentsCore.McpClient({
15330
- name: tool3.name,
16142
+ name: tool4.name,
15331
16143
  server: serverConfig
15332
16144
  });
15333
16145
  try {
15334
16146
  await client.connect();
15335
16147
  return client;
15336
16148
  } catch (error) {
15337
- logger18.error(
16149
+ logger20.error(
15338
16150
  {
15339
- toolName: tool3.name,
16151
+ toolName: tool4.name,
15340
16152
  subAgentId: this.config.id,
15341
16153
  error: error instanceof Error ? error.message : String(error)
15342
16154
  },
@@ -15376,7 +16188,7 @@ var Agent = class {
15376
16188
  for (const functionToolDef of functionToolsData) {
15377
16189
  const functionId = functionToolDef.functionId;
15378
16190
  if (!functionId) {
15379
- logger18.warn(
16191
+ logger20.warn(
15380
16192
  { functionToolId: functionToolDef.id },
15381
16193
  "Function tool missing functionId reference"
15382
16194
  );
@@ -15390,7 +16202,7 @@ var Agent = class {
15390
16202
  }
15391
16203
  });
15392
16204
  if (!functionData) {
15393
- logger18.warn(
16205
+ logger20.warn(
15394
16206
  { functionId, functionToolId: functionToolDef.id },
15395
16207
  "Function not found in functions table"
15396
16208
  );
@@ -15401,7 +16213,7 @@ var Agent = class {
15401
16213
  description: functionToolDef.description || functionToolDef.name,
15402
16214
  inputSchema: zodSchema,
15403
16215
  execute: async (args2, { toolCallId }) => {
15404
- logger18.debug(
16216
+ logger20.debug(
15405
16217
  { toolName: functionToolDef.name, toolCallId, args: args2 },
15406
16218
  "Function Tool Called"
15407
16219
  );
@@ -15428,7 +16240,7 @@ var Agent = class {
15428
16240
  });
15429
16241
  return { result, toolCallId };
15430
16242
  } catch (error) {
15431
- logger18.error(
16243
+ logger20.error(
15432
16244
  {
15433
16245
  toolName: functionToolDef.name,
15434
16246
  toolCallId,
@@ -15448,7 +16260,7 @@ var Agent = class {
15448
16260
  );
15449
16261
  }
15450
16262
  } catch (error) {
15451
- logger18.error({ error }, "Failed to load function tools from database");
16263
+ logger20.error({ error }, "Failed to load function tools from database");
15452
16264
  }
15453
16265
  return functionTools;
15454
16266
  }
@@ -15458,7 +16270,7 @@ var Agent = class {
15458
16270
  async getResolvedContext(conversationId, headers2) {
15459
16271
  try {
15460
16272
  if (!this.config.contextConfigId) {
15461
- logger18.debug({ agentId: this.config.agentId }, "No context config found for agent");
16273
+ logger20.debug({ agentId: this.config.agentId }, "No context config found for agent");
15462
16274
  return null;
15463
16275
  }
15464
16276
  const contextConfig = await agentsCore.getContextConfigById(dbClient_default)({
@@ -15470,7 +16282,7 @@ var Agent = class {
15470
16282
  id: this.config.contextConfigId
15471
16283
  });
15472
16284
  if (!contextConfig) {
15473
- logger18.warn({ contextConfigId: this.config.contextConfigId }, "Context config not found");
16285
+ logger20.warn({ contextConfigId: this.config.contextConfigId }, "Context config not found");
15474
16286
  return null;
15475
16287
  }
15476
16288
  if (!this.contextResolver) {
@@ -15486,7 +16298,7 @@ var Agent = class {
15486
16298
  ...result.resolvedContext,
15487
16299
  $env: process.env
15488
16300
  };
15489
- logger18.debug(
16301
+ logger20.debug(
15490
16302
  {
15491
16303
  conversationId,
15492
16304
  contextConfigId: contextConfig.id,
@@ -15500,7 +16312,7 @@ var Agent = class {
15500
16312
  );
15501
16313
  return contextWithBuiltins;
15502
16314
  } catch (error) {
15503
- logger18.error(
16315
+ logger20.error(
15504
16316
  {
15505
16317
  conversationId,
15506
16318
  error: error instanceof Error ? error.message : "Unknown error"
@@ -15524,7 +16336,7 @@ var Agent = class {
15524
16336
  });
15525
16337
  return agentDefinition?.prompt || void 0;
15526
16338
  } catch (error) {
15527
- logger18.warn(
16339
+ logger20.warn(
15528
16340
  {
15529
16341
  agentId: this.config.agentId,
15530
16342
  error: error instanceof Error ? error.message : "Unknown error"
@@ -15553,7 +16365,7 @@ var Agent = class {
15553
16365
  (subAgent) => "artifactComponents" in subAgent && subAgent.artifactComponents && subAgent.artifactComponents.length > 0
15554
16366
  );
15555
16367
  } catch (error) {
15556
- logger18.warn(
16368
+ logger20.warn(
15557
16369
  {
15558
16370
  agentId: this.config.agentId,
15559
16371
  tenantId: this.config.tenantId,
@@ -15571,7 +16383,8 @@ var Agent = class {
15571
16383
  */
15572
16384
  async buildPhase2SystemPrompt(runtimeContext) {
15573
16385
  const phase2Config = new Phase2Config();
15574
- const hasAgentArtifactComponents = await this.hasAgentArtifactComponents();
16386
+ const compressionConfig = getCompressionConfigFromEnv();
16387
+ const hasAgentArtifactComponents = await this.hasAgentArtifactComponents() || compressionConfig.enabled;
15575
16388
  const conversationId = runtimeContext?.metadata?.conversationId || runtimeContext?.contextId;
15576
16389
  const resolvedContext = conversationId ? await this.getResolvedContext(conversationId) : null;
15577
16390
  let processedPrompt = this.config.prompt || "";
@@ -15582,7 +16395,7 @@ var Agent = class {
15582
16395
  preserveUnresolved: false
15583
16396
  });
15584
16397
  } catch (error) {
15585
- logger18.error(
16398
+ logger20.error(
15586
16399
  {
15587
16400
  conversationId,
15588
16401
  error: error instanceof Error ? error.message : "Unknown error"
@@ -15629,7 +16442,7 @@ var Agent = class {
15629
16442
  preserveUnresolved: false
15630
16443
  });
15631
16444
  } catch (error) {
15632
- logger18.error(
16445
+ logger20.error(
15633
16446
  {
15634
16447
  conversationId,
15635
16448
  error: error instanceof Error ? error.message : "Unknown error"
@@ -15644,25 +16457,25 @@ var Agent = class {
15644
16457
  const functionTools = await this.getFunctionTools(streamRequestId || "");
15645
16458
  const relationTools = this.getRelationTools(runtimeContext);
15646
16459
  const allTools = { ...mcpTools, ...functionTools, ...relationTools };
15647
- logger18.info(
16460
+ logger20.info(
15648
16461
  {
15649
16462
  mcpTools: Object.keys(mcpTools),
15650
16463
  functionTools: Object.keys(functionTools),
15651
16464
  relationTools: Object.keys(relationTools),
15652
16465
  allTools: Object.keys(allTools),
15653
- functionToolsDetails: Object.entries(functionTools).map(([name2, tool3]) => ({
16466
+ functionToolsDetails: Object.entries(functionTools).map(([name2, tool4]) => ({
15654
16467
  name: name2,
15655
- hasExecute: typeof tool3.execute === "function",
15656
- hasDescription: !!tool3.description,
15657
- hasInputSchema: !!tool3.inputSchema
16468
+ hasExecute: typeof tool4.execute === "function",
16469
+ hasDescription: !!tool4.description,
16470
+ hasInputSchema: !!tool4.inputSchema
15658
16471
  }))
15659
16472
  },
15660
16473
  "Tools loaded for agent"
15661
16474
  );
15662
- const toolDefinitions = Object.entries(allTools).map(([name2, tool3]) => ({
16475
+ const toolDefinitions = Object.entries(allTools).map(([name2, tool4]) => ({
15663
16476
  name: name2,
15664
- description: tool3.description || "",
15665
- inputSchema: tool3.inputSchema || tool3.parameters || {},
16477
+ description: tool4.description || "",
16478
+ inputSchema: tool4.inputSchema || tool4.parameters || {},
15666
16479
  usageGuidelines: name2.startsWith("transfer_to_") || name2.startsWith("delegate_to_") ? `Use this tool to ${name2.startsWith("transfer_to_") ? "transfer" : "delegate"} to another agent when appropriate.` : "Use this tool when appropriate for the task at hand."
15667
16480
  }));
15668
16481
  const { getConversationScopedArtifacts: getConversationScopedArtifacts2 } = await Promise.resolve().then(() => (init_conversations(), conversations_exports));
@@ -15683,7 +16496,7 @@ var Agent = class {
15683
16496
  preserveUnresolved: false
15684
16497
  });
15685
16498
  } catch (error) {
15686
- logger18.error(
16499
+ logger20.error(
15687
16500
  {
15688
16501
  conversationId,
15689
16502
  error: error instanceof Error ? error.message : "Unknown error"
@@ -15693,7 +16506,8 @@ var Agent = class {
15693
16506
  }
15694
16507
  }
15695
16508
  const shouldIncludeArtifactComponents = !excludeDataComponents;
15696
- const hasAgentArtifactComponents = await this.hasAgentArtifactComponents();
16509
+ const compressionConfig = getCompressionConfigFromEnv();
16510
+ const hasAgentArtifactComponents = await this.hasAgentArtifactComponents() || compressionConfig.enabled;
15697
16511
  const config = {
15698
16512
  corePrompt: processedPrompt,
15699
16513
  prompt,
@@ -15716,7 +16530,7 @@ var Agent = class {
15716
16530
  toolCallId: zodOpenapi.z.string().describe("The tool call ID associated with this artifact.")
15717
16531
  }),
15718
16532
  execute: async ({ artifactId, toolCallId }) => {
15719
- logger18.info({ artifactId, toolCallId }, "get_artifact_full executed");
16533
+ logger20.info({ artifactId, toolCallId }, "get_artifact_full executed");
15720
16534
  const streamRequestId = this.getStreamRequestId();
15721
16535
  const artifactService = agentSessionManager.getArtifactService(streamRequestId);
15722
16536
  if (!artifactService) {
@@ -15752,7 +16566,8 @@ var Agent = class {
15752
16566
  // Provide a default tool set that is always available to the agent.
15753
16567
  async getDefaultTools(streamRequestId) {
15754
16568
  const defaultTools = {};
15755
- if (await this.agentHasArtifactComponents()) {
16569
+ const compressionConfig = getCompressionConfigFromEnv();
16570
+ if (await this.agentHasArtifactComponents() || compressionConfig.enabled) {
15756
16571
  defaultTools.get_reference_artifact = this.getArtifactTools();
15757
16572
  }
15758
16573
  const hasStructuredOutput = this.config.dataComponents && this.config.dataComponents.length > 0;
@@ -15767,6 +16582,37 @@ var Agent = class {
15767
16582
  );
15768
16583
  }
15769
16584
  }
16585
+ logger20.info(
16586
+ { agentId: this.config.id, streamRequestId },
16587
+ "Adding compress_context tool to defaultTools"
16588
+ );
16589
+ defaultTools.compress_context = ai.tool({
16590
+ description: "Manually compress the current conversation context to save space. Use when shifting topics, completing major tasks, or when context feels cluttered.",
16591
+ inputSchema: zodOpenapi.z.object({
16592
+ reason: zodOpenapi.z.string().describe(
16593
+ 'Why you are requesting compression (e.g., "shifting from research to coding", "completed analysis phase")'
16594
+ )
16595
+ }),
16596
+ execute: async ({ reason }) => {
16597
+ logger20.info(
16598
+ {
16599
+ agentId: this.config.id,
16600
+ streamRequestId,
16601
+ reason
16602
+ },
16603
+ "Manual compression requested by LLM"
16604
+ );
16605
+ if (this.currentCompressor) {
16606
+ this.currentCompressor.requestManualCompression(reason);
16607
+ }
16608
+ return {
16609
+ status: "compression_requested",
16610
+ reason,
16611
+ message: "Context compression will be applied on the next generation step. Previous work has been summarized and saved as artifacts."
16612
+ };
16613
+ }
16614
+ });
16615
+ logger20.info("getDefaultTools returning tools:", Object.keys(defaultTools).join(", "));
15770
16616
  return defaultTools;
15771
16617
  }
15772
16618
  getStreamRequestId() {
@@ -16012,7 +16858,7 @@ ${output}`;
16012
16858
  };
16013
16859
  return enhanced;
16014
16860
  } catch (error) {
16015
- logger18.warn({ error }, "Failed to enhance tool result with structure hints");
16861
+ logger20.warn({ error }, "Failed to enhance tool result with structure hints");
16016
16862
  return result;
16017
16863
  }
16018
16864
  }
@@ -16027,7 +16873,7 @@ ${output}`;
16027
16873
  }
16028
16874
  });
16029
16875
  } catch (error) {
16030
- logger18.error(
16876
+ logger20.error(
16031
16877
  { error, agentId: this.config.agentId },
16032
16878
  "Failed to check agent artifact components"
16033
16879
  );
@@ -16144,7 +16990,7 @@ ${output}`;
16144
16990
  const configuredTimeout = modelSettings.maxDuration ? Math.min(modelSettings.maxDuration * 1e3, LLM_GENERATION_MAX_ALLOWED_TIMEOUT_MS) : shouldStreamPhase1 ? LLM_GENERATION_FIRST_CALL_TIMEOUT_MS_STREAMING : LLM_GENERATION_FIRST_CALL_TIMEOUT_MS_NON_STREAMING;
16145
16991
  const timeoutMs = Math.min(configuredTimeout, LLM_GENERATION_MAX_ALLOWED_TIMEOUT_MS);
16146
16992
  if (modelSettings.maxDuration && modelSettings.maxDuration * 1e3 > LLM_GENERATION_MAX_ALLOWED_TIMEOUT_MS) {
16147
- logger18.warn(
16993
+ logger20.warn(
16148
16994
  {
16149
16995
  requestedTimeout: modelSettings.maxDuration * 1e3,
16150
16996
  appliedTimeout: timeoutMs,
@@ -16163,6 +17009,18 @@ ${output}`;
16163
17009
  role: "user",
16164
17010
  content: userMessage
16165
17011
  });
17012
+ const originalMessageCount = messages.length;
17013
+ const compressionConfig = getCompressionConfigFromEnv();
17014
+ const compressor = compressionConfig.enabled ? new MidGenerationCompressor(
17015
+ sessionId,
17016
+ contextId,
17017
+ this.config.tenantId,
17018
+ this.config.projectId,
17019
+ compressionConfig,
17020
+ this.getSummarizerModel(),
17021
+ primaryModelSettings
17022
+ ) : null;
17023
+ this.currentCompressor = compressor;
16166
17024
  if (shouldStreamPhase1) {
16167
17025
  const streamConfig = {
16168
17026
  ...modelSettings,
@@ -16173,6 +17031,87 @@ ${output}`;
16173
17031
  ...streamConfig,
16174
17032
  messages,
16175
17033
  tools: sanitizedTools,
17034
+ prepareStep: async ({ messages: stepMessages }) => {
17035
+ if (!compressor) {
17036
+ return {};
17037
+ }
17038
+ const compressionNeeded = compressor.isCompressionNeeded(stepMessages);
17039
+ if (compressionNeeded) {
17040
+ logger20.info(
17041
+ {
17042
+ compressorState: compressor.getState()
17043
+ },
17044
+ "Triggering layered mid-generation compression"
17045
+ );
17046
+ try {
17047
+ const originalMessages = stepMessages.slice(0, originalMessageCount);
17048
+ const generatedMessages = stepMessages.slice(originalMessageCount);
17049
+ if (generatedMessages.length > 0) {
17050
+ const compressionResult = await compressor.compress(generatedMessages);
17051
+ const finalMessages = [...originalMessages];
17052
+ if (compressionResult.summary.text_messages && compressionResult.summary.text_messages.length > 0) {
17053
+ finalMessages.push(...compressionResult.summary.text_messages);
17054
+ }
17055
+ const summaryMessage = JSON.stringify({
17056
+ high_level: compressionResult.summary?.summary?.high_level,
17057
+ user_intent: compressionResult.summary?.summary?.user_intent,
17058
+ decisions: compressionResult.summary?.summary?.decisions,
17059
+ open_questions: compressionResult.summary?.summary?.open_questions,
17060
+ next_steps: compressionResult.summary?.summary?.next_steps,
17061
+ related_artifacts: compressionResult?.summary?.summary?.related_artifacts
17062
+ });
17063
+ finalMessages.push({
17064
+ role: "user",
17065
+ content: `Based on your research, here's what you've discovered: ${summaryMessage}
17066
+
17067
+ Now please provide your answer to my original question using this context.`
17068
+ });
17069
+ logger20.info(
17070
+ {
17071
+ originalTotal: stepMessages.length,
17072
+ compressed: finalMessages.length,
17073
+ originalKept: originalMessages.length,
17074
+ generatedCompressed: generatedMessages.length
17075
+ },
17076
+ "Generated content compression completed"
17077
+ );
17078
+ logger20.info({ summaryMessage }, "Summary message");
17079
+ return { messages: finalMessages };
17080
+ }
17081
+ return {};
17082
+ } catch (error) {
17083
+ logger20.error(
17084
+ {
17085
+ error: error instanceof Error ? error.message : String(error),
17086
+ stack: error instanceof Error ? error.stack : void 0
17087
+ },
17088
+ "Smart compression failed, falling back to simple compression"
17089
+ );
17090
+ try {
17091
+ const targetSize = Math.floor(compressor.getHardLimit() * 0.5);
17092
+ const fallbackMessages = this.simpleCompression(stepMessages, targetSize);
17093
+ logger20.info(
17094
+ {
17095
+ originalCount: stepMessages.length,
17096
+ compressedCount: fallbackMessages.length,
17097
+ compressionType: "simple_fallback"
17098
+ },
17099
+ "Simple compression fallback completed"
17100
+ );
17101
+ return { messages: fallbackMessages };
17102
+ } catch (fallbackError) {
17103
+ logger20.error(
17104
+ {
17105
+ error: fallbackError instanceof Error ? fallbackError.message : String(fallbackError)
17106
+ },
17107
+ "Fallback compression also failed, continuing without compression"
17108
+ );
17109
+ return {};
17110
+ }
17111
+ }
17112
+ }
17113
+ return {};
17114
+ },
16176
17115
  stopWhen: async ({ steps }) => {
16177
17116
  const last = steps.at(-1);
16178
17117
  if (last && "text" in last && last.text) {
@@ -16186,7 +17125,7 @@ ${output}`;
16186
17125
  }
16187
17126
  );
16188
17127
  } catch (error) {
16189
- logger18.debug({ error }, "Failed to track agent reasoning");
17128
+ logger20.debug({ error }, "Failed to track agent reasoning");
16190
17129
  }
16191
17130
  }
16192
17131
  if (last && last["content"] && last["content"].length > 0) {
@@ -16308,6 +17247,87 @@ ${output}`;
16308
17247
  ...genConfig,
16309
17248
  messages,
16310
17249
  tools: sanitizedTools,
17250
+ prepareStep: async ({ messages: stepMessages }) => {
17251
+ if (!compressor) {
17252
+ return {};
17253
+ }
17254
+ const compressionNeeded = compressor.isCompressionNeeded(stepMessages);
17255
+ if (compressionNeeded) {
17256
+ logger20.info(
17257
+ {
17258
+ compressorState: compressor.getState()
17259
+ },
17260
+ "Triggering layered mid-generation compression"
17261
+ );
17262
+ try {
17263
+ const originalMessages = stepMessages.slice(0, originalMessageCount);
17264
+ const generatedMessages = stepMessages.slice(originalMessageCount);
17265
+ if (generatedMessages.length > 0) {
17266
+ const compressionResult = await compressor.compress(generatedMessages);
17267
+ const finalMessages = [...originalMessages];
17268
+ if (compressionResult.summary.text_messages && compressionResult.summary.text_messages.length > 0) {
17269
+ finalMessages.push(...compressionResult.summary.text_messages);
17270
+ }
17271
+ const summaryMessage = JSON.stringify({
17272
+ high_level: compressionResult.summary?.summary?.high_level,
17273
+ user_intent: compressionResult.summary?.summary?.user_intent,
17274
+ decisions: compressionResult.summary?.summary?.decisions,
17275
+ open_questions: compressionResult.summary?.summary?.open_questions,
17276
+ next_steps: compressionResult.summary?.summary?.next_steps,
17277
+ related_artifacts: compressionResult?.summary?.summary?.related_artifacts
17278
+ });
17279
+ finalMessages.push({
17280
+ role: "user",
17281
+ content: `Based on your research, here's what you've discovered: ${summaryMessage}
17282
+
17283
+ Now please provide your answer to my original question using this context.`
17284
+ });
17285
+ logger20.info(
17286
+ {
17287
+ originalTotal: stepMessages.length,
17288
+ compressed: finalMessages.length,
17289
+ originalKept: originalMessages.length,
17290
+ generatedCompressed: generatedMessages.length
17291
+ },
17292
+ "Generated content compression completed"
17293
+ );
17294
+ logger20.info({ summaryMessage }, "Summary message");
17295
+ return { messages: finalMessages };
17296
+ }
17297
+ return {};
17298
+ } catch (error) {
17299
+ logger20.error(
17300
+ {
17301
+ error: error instanceof Error ? error.message : String(error),
17302
+ stack: error instanceof Error ? error.stack : void 0
17303
+ },
17304
+ "Smart compression failed, falling back to simple compression"
17305
+ );
17306
+ try {
17307
+ const targetSize = Math.floor(compressor.getHardLimit() * 0.5);
17308
+ const fallbackMessages = this.simpleCompression(stepMessages, targetSize);
17309
+ logger20.info(
17310
+ {
17311
+ originalCount: stepMessages.length,
17312
+ compressedCount: fallbackMessages.length,
17313
+ compressionType: "simple_fallback"
17314
+ },
17315
+ "Simple compression fallback completed"
17316
+ );
17317
+ return { messages: fallbackMessages };
17318
+ } catch (fallbackError) {
17319
+ logger20.error(
17320
+ {
17321
+ error: fallbackError instanceof Error ? fallbackError.message : String(fallbackError)
17322
+ },
17323
+ "Fallback compression also failed, continuing without compression"
17324
+ );
17325
+ return {};
17326
+ }
17327
+ }
17328
+ }
17329
+ return {};
17330
+ },
16311
17331
  stopWhen: async ({ steps }) => {
16312
17332
  const last = steps.at(-1);
16313
17333
  if (last && "text" in last && last.text) {
@@ -16321,7 +17341,7 @@ ${output}`;
16321
17341
  }
16322
17342
  );
16323
17343
  } catch (error) {
16324
- logger18.debug({ error }, "Failed to track agent reasoning");
17344
+ logger20.debug({ error }, "Failed to track agent reasoning");
16325
17345
  }
16326
17346
  }
16327
17347
  if (steps.length >= 2) {
@@ -16359,7 +17379,22 @@ ${output}`;
16359
17379
  const thinkingCompleteCall = response.steps?.flatMap((s4) => s4.toolCalls || [])?.find((tc) => tc.toolName === "thinking_complete");
16360
17380
  if (thinkingCompleteCall) {
16361
17381
  const reasoningFlow = [];
16362
- if (response.steps) {
17382
+ const compressionSummary = this.currentCompressor?.getCompressionSummary();
17383
+ if (compressionSummary) {
17384
+ const summaryContent = JSON.stringify(compressionSummary, null, 2);
17385
+ reasoningFlow.push({
17386
+ role: "assistant",
17387
+ content: `## Research Summary (Compressed)
17388
+
17389
+ Based on tool executions, here's the comprehensive summary:
17390
+
17391
+ \`\`\`json
17392
+ ${summaryContent}
17393
+ \`\`\`
17394
+
17395
+ This summary represents all tool execution results in compressed form. Full details are preserved in artifacts.`
17396
+ });
17397
+ } else if (response.steps) {
16363
17398
  response.steps.forEach((step) => {
16364
17399
  if (step.toolCalls && step.toolResults) {
16365
17400
  step.toolCalls.forEach((call, index) => {
@@ -16468,7 +17503,7 @@ ${output}${structureHintsFormatted}`;
16468
17503
  LLM_GENERATION_MAX_ALLOWED_TIMEOUT_MS
16469
17504
  );
16470
17505
  if (structuredModelSettings.maxDuration && structuredModelSettings.maxDuration * 1e3 > LLM_GENERATION_MAX_ALLOWED_TIMEOUT_MS) {
16471
- logger18.warn(
17506
+ logger20.warn(
16472
17507
  {
16473
17508
  requestedTimeout: structuredModelSettings.maxDuration * 1e3,
16474
17509
  appliedTimeout: phase2TimeoutMs,
@@ -16491,6 +17526,12 @@ ${output}${structureHintsFormatted}`;
16491
17526
  }
16492
17527
  phase2Messages.push({ role: "user", content: userMessage });
16493
17528
  phase2Messages.push(...reasoningFlow);
17529
+ if (reasoningFlow.length > 0 && reasoningFlow[reasoningFlow.length - 1]?.role === "assistant") {
17530
+ phase2Messages.push({
17531
+ role: "user",
17532
+ content: "Continue with the structured response."
17533
+ });
17534
+ }
16494
17535
  const streamResult = ai.streamObject({
16495
17536
  ...structuredModelSettings,
16496
17537
  messages: phase2Messages,
@@ -16561,6 +17602,12 @@ ${output}${structureHintsFormatted}`;
16561
17602
  }
16562
17603
  phase2Messages.push({ role: "user", content: userMessage });
16563
17604
  phase2Messages.push(...reasoningFlow);
17605
+ if (reasoningFlow.length > 0 && reasoningFlow[reasoningFlow.length - 1]?.role === "assistant") {
17606
+ phase2Messages.push({
17607
+ role: "user",
17608
+ content: "Continue with the structured response."
17609
+ });
17610
+ }
16564
17611
  const structuredResponse = await ai.generateObject(
16565
17612
  withJsonPostProcessing2({
16566
17613
  ...structuredModelSettings,
@@ -16631,8 +17678,10 @@ ${output}${structureHintsFormatted}`;
16631
17678
  generationType
16632
17679
  });
16633
17680
  }
17681
+ this.currentCompressor = null;
16634
17682
  return formattedResponse;
16635
17683
  } catch (error) {
17684
+ this.currentCompressor = null;
16636
17685
  const errorToThrow = error instanceof Error ? error : new Error(String(error));
16637
17686
  agentsCore.setSpanWithError(span, errorToThrow);
16638
17687
  span.end();
@@ -16644,7 +17693,7 @@ ${output}${structureHintsFormatted}`;
16644
17693
  };
16645
17694
 
16646
17695
  // src/agents/generateTaskHandler.ts
16647
- var logger19 = agentsCore.getLogger("generateTaskHandler");
17696
+ var logger21 = agentsCore.getLogger("generateTaskHandler");
16648
17697
  var createTaskHandler = (config, credentialStoreRegistry) => {
16649
17698
  return async (task) => {
16650
17699
  try {
@@ -16762,7 +17811,7 @@ var createTaskHandler = (config, credentialStoreRegistry) => {
16762
17811
  return { ...relation, description: enhancedDescription };
16763
17812
  }
16764
17813
  } catch (error) {
16765
- logger19.warn({ subAgentId: relation.id, error }, "Failed to enhance agent description");
17814
+ logger21.warn({ subAgentId: relation.id, error }, "Failed to enhance agent description");
16766
17815
  }
16767
17816
  return relation;
16768
17817
  })
@@ -16820,7 +17869,7 @@ var createTaskHandler = (config, credentialStoreRegistry) => {
16820
17869
  };
16821
17870
  }
16822
17871
  } catch (error) {
16823
- logger19.warn(
17872
+ logger21.warn(
16824
17873
  { targetAgentId: relation.targetAgentId, error },
16825
17874
  "Failed to enhance team agent description"
16826
17875
  );
@@ -16842,7 +17891,7 @@ var createTaskHandler = (config, credentialStoreRegistry) => {
16842
17891
  );
16843
17892
  if (item.selectedTools && item.selectedTools.length > 0) {
16844
17893
  const selectedToolsSet = new Set(item.selectedTools);
16845
- mcpTool.availableTools = mcpTool.availableTools?.filter((tool3) => selectedToolsSet.has(tool3.name)) || [];
17894
+ mcpTool.availableTools = mcpTool.availableTools?.filter((tool4) => selectedToolsSet.has(tool4.name)) || [];
16846
17895
  }
16847
17896
  return mcpTool;
16848
17897
  })
@@ -16909,7 +17958,7 @@ var createTaskHandler = (config, credentialStoreRegistry) => {
16909
17958
  targetTransferRelations = transferRel;
16910
17959
  targetDelegateRelations = delegateRel;
16911
17960
  } catch (err2) {
16912
- logger19.info(
17961
+ logger21.info(
16913
17962
  {
16914
17963
  agentId: relation.id,
16915
17964
  error: err2?.message || "Unknown error"
@@ -16929,7 +17978,7 @@ var createTaskHandler = (config, credentialStoreRegistry) => {
16929
17978
  if (item.selectedTools && item.selectedTools.length > 0) {
16930
17979
  const selectedToolsSet = new Set(item.selectedTools);
16931
17980
  mcpTool.availableTools = mcpTool.availableTools?.filter(
16932
- (tool3) => selectedToolsSet.has(tool3.name)
17981
+ (tool4) => selectedToolsSet.has(tool4.name)
16933
17982
  ) || [];
16934
17983
  }
16935
17984
  return mcpTool;
@@ -17050,7 +18099,7 @@ var createTaskHandler = (config, credentialStoreRegistry) => {
17050
18099
  const taskIdMatch = task.id.match(/^task_([^-]+-[^-]+-\d+)-/);
17051
18100
  if (taskIdMatch) {
17052
18101
  contextId = taskIdMatch[1];
17053
- logger19.info(
18102
+ logger21.info(
17054
18103
  {
17055
18104
  taskId: task.id,
17056
18105
  extractedContextId: contextId,
@@ -17068,7 +18117,7 @@ var createTaskHandler = (config, credentialStoreRegistry) => {
17068
18117
  agent.setDelegationStatus(isDelegation);
17069
18118
  agent.setDelegationId(delegationId);
17070
18119
  if (isDelegation) {
17071
- logger19.info(
18120
+ logger21.info(
17072
18121
  { subAgentId: config.subAgentId, taskId: task.id, delegationId },
17073
18122
  "Delegated agent - streaming disabled"
17074
18123
  );
@@ -17105,7 +18154,7 @@ var createTaskHandler = (config, credentialStoreRegistry) => {
17105
18154
  const toolResult = allToolResults.find(
17106
18155
  (result) => result.toolCallId === toolCall.toolCallId
17107
18156
  );
17108
- logger19.info(
18157
+ logger21.info(
17109
18158
  {
17110
18159
  toolCallName: toolCall.toolName,
17111
18160
  toolCallId: toolCall.toolCallId,
@@ -17122,7 +18171,7 @@ var createTaskHandler = (config, credentialStoreRegistry) => {
17122
18171
  const transferReason = responseText || allThoughts[allThoughts.length - 1]?.text || "Agent requested transfer. No reason provided.";
17123
18172
  if (toolResult?.output && isValidTransferResult(toolResult.output)) {
17124
18173
  const transferResult = toolResult.output;
17125
- logger19.info(
18174
+ logger21.info(
17126
18175
  {
17127
18176
  validationPassed: true,
17128
18177
  transferResult,
@@ -17139,7 +18188,7 @@ var createTaskHandler = (config, credentialStoreRegistry) => {
17139
18188
  reason: transferReason,
17140
18189
  original_message: userMessage
17141
18190
  };
17142
- logger19.info(
18191
+ logger21.info(
17143
18192
  {
17144
18193
  artifactData,
17145
18194
  artifactDataKeys: Object.keys(artifactData)
@@ -17164,7 +18213,7 @@ var createTaskHandler = (config, credentialStoreRegistry) => {
17164
18213
  ]
17165
18214
  };
17166
18215
  }
17167
- logger19.warn(
18216
+ logger21.warn(
17168
18217
  {
17169
18218
  hasToolResult: !!toolResult,
17170
18219
  hasOutput: !!toolResult?.output,
@@ -17258,7 +18307,7 @@ var createTaskHandlerConfig = async (params) => {
17258
18307
  // src/data/agents.ts
17259
18308
  init_logger();
17260
18309
  init_dbClient();
17261
- var logger20 = agentsCore.getLogger("agents");
18310
+ var logger22 = agentsCore.getLogger("agents");
17262
18311
  function createAgentCard({
17263
18312
  dbAgent,
17264
18313
  baseUrl
@@ -17369,7 +18418,7 @@ async function getRegisteredAgent(params) {
17369
18418
  const agent = await agentsCore.getAgentWithDefaultSubAgent(dbClient_default)({
17370
18419
  scopes: { tenantId, projectId, agentId }
17371
18420
  });
17372
- logger20.info({ agent }, "agent with default sub agent");
18421
+ logger22.info({ agent }, "agent with default sub agent");
17373
18422
  if (!agent || !agent.defaultSubAgent) {
17374
18423
  return null;
17375
18424
  }
@@ -17403,7 +18452,7 @@ async function getRegisteredAgent(params) {
17403
18452
  init_dbClient();
17404
18453
  init_logger();
17405
18454
  var app = new zodOpenapi.OpenAPIHono();
17406
- var logger21 = agentsCore.getLogger("agents");
18455
+ var logger23 = agentsCore.getLogger("agents");
17407
18456
  app.openapi(
17408
18457
  zodOpenapi.createRoute({
17409
18458
  method: "get",
@@ -17441,7 +18490,7 @@ app.openapi(
17441
18490
  tracestate: c2.req.header("tracestate"),
17442
18491
  baggage: c2.req.header("baggage")
17443
18492
  };
17444
- logger21.info(
18493
+ logger23.info(
17445
18494
  {
17446
18495
  otelHeaders,
17447
18496
  path: c2.req.path,
@@ -17451,8 +18500,8 @@ app.openapi(
17451
18500
  );
17452
18501
  const executionContext = agentsCore.getRequestExecutionContext(c2);
17453
18502
  const { tenantId, projectId, agentId, subAgentId } = executionContext;
17454
- logger21.info({ executionContext }, "executionContext");
17455
- logger21.info(
18503
+ logger23.info({ executionContext }, "executionContext");
18504
+ logger23.info(
17456
18505
  {
17457
18506
  message: "getRegisteredAgent (agent-level)",
17458
18507
  tenantId,
@@ -17469,7 +18518,7 @@ app.openapi(
17469
18518
  credentialStoreRegistry: credentialStores,
17470
18519
  sandboxConfig
17471
18520
  });
17472
- logger21.info({ agent }, "agent registered: well-known agent.json");
18521
+ logger23.info({ agent }, "agent registered: well-known agent.json");
17473
18522
  if (!agent) {
17474
18523
  throw agentsCore.createApiError({
17475
18524
  code: "not_found",
@@ -17485,7 +18534,7 @@ app.post("/a2a", async (c2) => {
17485
18534
  tracestate: c2.req.header("tracestate"),
17486
18535
  baggage: c2.req.header("baggage")
17487
18536
  };
17488
- logger21.info(
18537
+ logger23.info(
17489
18538
  {
17490
18539
  otelHeaders,
17491
18540
  path: c2.req.path,
@@ -17496,7 +18545,7 @@ app.post("/a2a", async (c2) => {
17496
18545
  const executionContext = agentsCore.getRequestExecutionContext(c2);
17497
18546
  const { tenantId, projectId, agentId, subAgentId } = executionContext;
17498
18547
  if (subAgentId) {
17499
- logger21.info(
18548
+ logger23.info(
17500
18549
  {
17501
18550
  message: "a2a (agent-level)",
17502
18551
  tenantId,
@@ -17525,7 +18574,7 @@ app.post("/a2a", async (c2) => {
17525
18574
  }
17526
18575
  return a2aHandler(c2, agent2);
17527
18576
  }
17528
- logger21.info(
18577
+ logger23.info(
17529
18578
  {
17530
18579
  message: "a2a (agent-level)",
17531
18580
  tenantId,
@@ -17610,14 +18659,14 @@ function extractTransferData(task) {
17610
18659
  }
17611
18660
 
17612
18661
  // src/a2a/transfer.ts
17613
- var logger22 = agentsCore.getLogger("Transfer");
18662
+ var logger24 = agentsCore.getLogger("Transfer");
17614
18663
  async function executeTransfer({
17615
18664
  tenantId,
17616
18665
  threadId,
17617
18666
  projectId,
17618
18667
  targetSubAgentId
17619
18668
  }) {
17620
- logger22.info(
18669
+ logger24.info(
17621
18670
  {
17622
18671
  targetAgent: targetSubAgentId,
17623
18672
  threadId,
@@ -17632,12 +18681,12 @@ async function executeTransfer({
17632
18681
  threadId,
17633
18682
  subAgentId: targetSubAgentId
17634
18683
  });
17635
- logger22.info(
18684
+ logger24.info(
17636
18685
  { targetAgent: targetSubAgentId, threadId },
17637
18686
  "Successfully updated active_sub_agent_id in database"
17638
18687
  );
17639
18688
  } catch (error) {
17640
- logger22.error(
18689
+ logger24.error(
17641
18690
  { error, targetAgent: targetSubAgentId, threadId },
17642
18691
  "Failed to update active_sub_agent_id"
17643
18692
  );
@@ -18209,7 +19258,7 @@ function createBufferingStreamHelper() {
18209
19258
  var createMCPStreamHelper = createBufferingStreamHelper;
18210
19259
 
18211
19260
  // src/handlers/executionHandler.ts
18212
- var logger23 = agentsCore.getLogger("ExecutionHandler");
19261
+ var logger25 = agentsCore.getLogger("ExecutionHandler");
18213
19262
  var ExecutionHandler = class {
18214
19263
  MAX_ERRORS = AGENT_EXECUTION_MAX_CONSECUTIVE_ERRORS;
18215
19264
  /**
@@ -18242,7 +19291,7 @@ var ExecutionHandler = class {
18242
19291
  if (emitOperations) {
18243
19292
  agentSessionManager.enableEmitOperations(requestId2);
18244
19293
  }
18245
- logger23.info(
19294
+ logger25.info(
18246
19295
  { sessionId: requestId2, agentId, conversationId, emitOperations },
18247
19296
  "Created AgentSession for message execution"
18248
19297
  );
@@ -18275,7 +19324,7 @@ var ExecutionHandler = class {
18275
19324
  );
18276
19325
  }
18277
19326
  } catch (modelError) {
18278
- logger23.warn(
19327
+ logger25.warn(
18279
19328
  {
18280
19329
  error: modelError instanceof Error ? modelError.message : "Unknown error",
18281
19330
  agentId
@@ -18290,7 +19339,7 @@ var ExecutionHandler = class {
18290
19339
  }
18291
19340
  }
18292
19341
  } catch (error) {
18293
- logger23.error(
19342
+ logger25.error(
18294
19343
  {
18295
19344
  error: error instanceof Error ? error.message : "Unknown error",
18296
19345
  stack: error instanceof Error ? error.stack : void 0
@@ -18306,7 +19355,7 @@ var ExecutionHandler = class {
18306
19355
  try {
18307
19356
  await sseHelper.writeOperation(agentInitializingOp(requestId2, agentId));
18308
19357
  const taskId = `task_${conversationId}-${requestId2}`;
18309
- logger23.info(
19358
+ logger25.info(
18310
19359
  { taskId, currentAgentId, conversationId, requestId: requestId2 },
18311
19360
  "Attempting to create or reuse existing task"
18312
19361
  );
@@ -18330,7 +19379,7 @@ var ExecutionHandler = class {
18330
19379
  sub_agent_id: currentAgentId
18331
19380
  }
18332
19381
  });
18333
- logger23.info(
19382
+ logger25.info(
18334
19383
  {
18335
19384
  taskId,
18336
19385
  createdTaskMetadata: Array.isArray(task) ? task[0]?.metadata : task?.metadata
@@ -18339,27 +19388,27 @@ var ExecutionHandler = class {
18339
19388
  );
18340
19389
  } catch (error) {
18341
19390
  if (error?.cause?.code === "23505") {
18342
- logger23.info(
19391
+ logger25.info(
18343
19392
  { taskId, error: error.message },
18344
19393
  "Task already exists, fetching existing task"
18345
19394
  );
18346
19395
  const existingTask = await agentsCore.getTask(dbClient_default)({ id: taskId });
18347
19396
  if (existingTask) {
18348
19397
  task = existingTask;
18349
- logger23.info(
19398
+ logger25.info(
18350
19399
  { taskId, existingTask },
18351
19400
  "Successfully reused existing task from race condition"
18352
19401
  );
18353
19402
  } else {
18354
- logger23.error({ taskId, error }, "Task constraint failed but task not found");
19403
+ logger25.error({ taskId, error }, "Task constraint failed but task not found");
18355
19404
  throw error;
18356
19405
  }
18357
19406
  } else {
18358
- logger23.error({ taskId, error }, "Failed to create task due to non-constraint error");
19407
+ logger25.error({ taskId, error }, "Failed to create task due to non-constraint error");
18359
19408
  throw error;
18360
19409
  }
18361
19410
  }
18362
- logger23.debug(
19411
+ logger25.debug(
18363
19412
  {
18364
19413
  timestamp: /* @__PURE__ */ new Date(),
18365
19414
  executionType: "create_initial_task",
@@ -18378,7 +19427,7 @@ var ExecutionHandler = class {
18378
19427
  const maxTransfers = agentConfig?.stopWhen?.transferCountIs ?? agentsCore.AGENT_EXECUTION_TRANSFER_COUNT_DEFAULT;
18379
19428
  while (iterations < maxTransfers) {
18380
19429
  iterations++;
18381
- logger23.info(
19430
+ logger25.info(
18382
19431
  { iterations, currentAgentId, agentId, conversationId, fromSubAgentId },
18383
19432
  `Execution loop iteration ${iterations} with agent ${currentAgentId}, transfer from: ${fromSubAgentId || "none"}`
18384
19433
  );
@@ -18386,10 +19435,10 @@ var ExecutionHandler = class {
18386
19435
  scopes: { tenantId, projectId },
18387
19436
  conversationId
18388
19437
  });
18389
- logger23.info({ activeAgent }, "activeAgent");
19438
+ logger25.info({ activeAgent }, "activeAgent");
18390
19439
  if (activeAgent && activeAgent.activeSubAgentId !== currentAgentId) {
18391
19440
  currentAgentId = activeAgent.activeSubAgentId;
18392
- logger23.info({ currentAgentId }, `Updated current agent to: ${currentAgentId}`);
19441
+ logger25.info({ currentAgentId }, `Updated current agent to: ${currentAgentId}`);
18393
19442
  }
18394
19443
  const agentBaseUrl = `${baseUrl}/agents`;
18395
19444
  const a2aClient = new A2AClient(agentBaseUrl, {
@@ -18430,13 +19479,13 @@ var ExecutionHandler = class {
18430
19479
  });
18431
19480
  if (!messageResponse?.result) {
18432
19481
  errorCount++;
18433
- logger23.error(
19482
+ logger25.error(
18434
19483
  { currentAgentId, iterations, errorCount },
18435
19484
  `No response from agent ${currentAgentId} on iteration ${iterations} (error ${errorCount}/${this.MAX_ERRORS})`
18436
19485
  );
18437
19486
  if (errorCount >= this.MAX_ERRORS) {
18438
19487
  const errorMessage2 = `Maximum error limit (${this.MAX_ERRORS}) reached`;
18439
- logger23.error({ maxErrors: this.MAX_ERRORS, errorCount }, errorMessage2);
19488
+ logger25.error({ maxErrors: this.MAX_ERRORS, errorCount }, errorMessage2);
18440
19489
  await sseHelper.writeOperation(errorOp(errorMessage2, currentAgentId || "system"));
18441
19490
  if (task) {
18442
19491
  await agentsCore.updateTask(dbClient_default)({
@@ -18460,7 +19509,7 @@ var ExecutionHandler = class {
18460
19509
  if (isTransferTask(messageResponse.result)) {
18461
19510
  const transferData = extractTransferData(messageResponse.result);
18462
19511
  if (!transferData) {
18463
- logger23.error(
19512
+ logger25.error(
18464
19513
  { result: messageResponse.result },
18465
19514
  "Transfer detected but no transfer data found"
18466
19515
  );
@@ -18469,7 +19518,7 @@ var ExecutionHandler = class {
18469
19518
  const { targetSubAgentId, fromSubAgentId: transferFromAgent } = transferData;
18470
19519
  const firstArtifact = messageResponse.result.artifacts[0];
18471
19520
  const transferReason = firstArtifact?.parts[1]?.kind === "text" ? firstArtifact.parts[1].text : "Transfer initiated";
18472
- logger23.info({ targetSubAgentId, transferReason, transferFromAgent }, "Transfer response");
19521
+ logger25.info({ targetSubAgentId, transferReason, transferFromAgent }, "Transfer response");
18473
19522
  await agentsCore.createMessage(dbClient_default)({
18474
19523
  id: agentsCore.generateId(),
18475
19524
  tenantId,
@@ -18500,7 +19549,7 @@ var ExecutionHandler = class {
18500
19549
  if (success) {
18501
19550
  fromSubAgentId = currentAgentId;
18502
19551
  currentAgentId = newAgentId;
18503
- logger23.info(
19552
+ logger25.info(
18504
19553
  {
18505
19554
  transferFrom: fromSubAgentId,
18506
19555
  transferTo: currentAgentId,
@@ -18514,7 +19563,7 @@ var ExecutionHandler = class {
18514
19563
  let responseParts = [];
18515
19564
  if (messageResponse.result.streamedContent?.parts) {
18516
19565
  responseParts = messageResponse.result.streamedContent.parts;
18517
- logger23.info(
19566
+ logger25.info(
18518
19567
  { partsCount: responseParts.length },
18519
19568
  "Using streamed content for conversation history"
18520
19569
  );
@@ -18522,7 +19571,7 @@ var ExecutionHandler = class {
18522
19571
  responseParts = messageResponse.result.artifacts?.flatMap(
18523
19572
  (artifact) => artifact.parts || []
18524
19573
  ) || [];
18525
- logger23.info(
19574
+ logger25.info(
18526
19575
  { partsCount: responseParts.length },
18527
19576
  "Using artifacts for conversation history (fallback)"
18528
19577
  );
@@ -18531,7 +19580,7 @@ var ExecutionHandler = class {
18531
19580
  const agentSessionData = agentSessionManager.getSession(requestId2);
18532
19581
  if (agentSessionData) {
18533
19582
  const sessionSummary = agentSessionData.getSummary();
18534
- logger23.info(sessionSummary, "AgentSession data after completion");
19583
+ logger25.info(sessionSummary, "AgentSession data after completion");
18535
19584
  }
18536
19585
  let textContent = "";
18537
19586
  for (const part of responseParts) {
@@ -18585,22 +19634,22 @@ var ExecutionHandler = class {
18585
19634
  }
18586
19635
  });
18587
19636
  const updateTaskEnd = Date.now();
18588
- logger23.info(
19637
+ logger25.info(
18589
19638
  { duration: updateTaskEnd - updateTaskStart },
18590
19639
  "Completed updateTask operation"
18591
19640
  );
18592
19641
  await sseHelper.writeOperation(completionOp(currentAgentId, iterations));
18593
19642
  await sseHelper.complete();
18594
- logger23.info({}, "Ending AgentSession and cleaning up");
19643
+ logger25.info({}, "Ending AgentSession and cleaning up");
18595
19644
  await agentSessionManager.endSession(requestId2);
18596
- logger23.info({}, "Cleaning up streamHelper");
19645
+ logger25.info({}, "Cleaning up streamHelper");
18597
19646
  unregisterStreamHelper(requestId2);
18598
19647
  let response;
18599
19648
  if (sseHelper instanceof BufferingStreamHelper) {
18600
19649
  const captured = sseHelper.getCapturedResponse();
18601
19650
  response = captured.text || "No response content";
18602
19651
  }
18603
- logger23.info({}, "ExecutionHandler returning success");
19652
+ logger25.info({}, "ExecutionHandler returning success");
18604
19653
  return { success: true, iterations, response };
18605
19654
  } catch (error) {
18606
19655
  agentsCore.setSpanWithError(span, error instanceof Error ? error : new Error(String(error)));
@@ -18611,13 +19660,13 @@ var ExecutionHandler = class {
18611
19660
  });
18612
19661
  }
18613
19662
  errorCount++;
18614
- logger23.warn(
19663
+ logger25.warn(
18615
19664
  { iterations, errorCount },
18616
19665
  `No valid response or transfer on iteration ${iterations} (error ${errorCount}/${this.MAX_ERRORS})`
18617
19666
  );
18618
19667
  if (errorCount >= this.MAX_ERRORS) {
18619
19668
  const errorMessage2 = `Maximum error limit (${this.MAX_ERRORS}) reached`;
18620
- logger23.error({ maxErrors: this.MAX_ERRORS, errorCount }, errorMessage2);
19669
+ logger25.error({ maxErrors: this.MAX_ERRORS, errorCount }, errorMessage2);
18621
19670
  await sseHelper.writeOperation(errorOp(errorMessage2, currentAgentId || "system"));
18622
19671
  if (task) {
18623
19672
  await agentsCore.updateTask(dbClient_default)({
@@ -18638,7 +19687,7 @@ var ExecutionHandler = class {
18638
19687
  }
18639
19688
  }
18640
19689
  const errorMessage = `Maximum transfer limit (${maxTransfers}) reached without completion`;
18641
- logger23.error({ maxTransfers, iterations }, errorMessage);
19690
+ logger25.error({ maxTransfers, iterations }, errorMessage);
18642
19691
  await sseHelper.writeOperation(errorOp(errorMessage, currentAgentId || "system"));
18643
19692
  if (task) {
18644
19693
  await agentsCore.updateTask(dbClient_default)({
@@ -18657,7 +19706,7 @@ var ExecutionHandler = class {
18657
19706
  unregisterStreamHelper(requestId2);
18658
19707
  return { success: false, error: errorMessage, iterations };
18659
19708
  } catch (error) {
18660
- logger23.error({ error }, "Error in execution handler");
19709
+ logger25.error({ error }, "Error in execution handler");
18661
19710
  const errorMessage = error instanceof Error ? error.message : "Unknown execution error";
18662
19711
  await sseHelper.writeOperation(
18663
19712
  errorOp(`Execution error: ${errorMessage}`, currentAgentId || "system")
@@ -18685,7 +19734,7 @@ var ExecutionHandler = class {
18685
19734
  // src/routes/chat.ts
18686
19735
  init_logger();
18687
19736
  var app2 = new zodOpenapi.OpenAPIHono();
18688
- var logger24 = agentsCore.getLogger("completionsHandler");
19737
+ var logger26 = agentsCore.getLogger("completionsHandler");
18689
19738
  var chatCompletionsRoute = zodOpenapi.createRoute({
18690
19739
  method: "post",
18691
19740
  path: "/completions",
@@ -18803,7 +19852,7 @@ app2.openapi(chatCompletionsRoute, async (c2) => {
18803
19852
  tracestate: c2.req.header("tracestate"),
18804
19853
  baggage: c2.req.header("baggage")
18805
19854
  };
18806
- logger24.info(
19855
+ logger26.info(
18807
19856
  {
18808
19857
  otelHeaders,
18809
19858
  path: c2.req.path,
@@ -18912,7 +19961,7 @@ app2.openapi(chatCompletionsRoute, async (c2) => {
18912
19961
  dbClient: dbClient_default,
18913
19962
  credentialStores
18914
19963
  });
18915
- logger24.info(
19964
+ logger26.info(
18916
19965
  {
18917
19966
  tenantId,
18918
19967
  projectId,
@@ -18960,7 +20009,7 @@ app2.openapi(chatCompletionsRoute, async (c2) => {
18960
20009
  try {
18961
20010
  const sseHelper = createSSEStreamHelper(stream2, requestId2, timestamp);
18962
20011
  await sseHelper.writeRole();
18963
- logger24.info({ subAgentId }, "Starting execution");
20012
+ logger26.info({ subAgentId }, "Starting execution");
18964
20013
  const emitOperationsHeader = c2.req.header("x-emit-operations");
18965
20014
  const emitOperations = emitOperationsHeader === "true";
18966
20015
  const executionHandler = new ExecutionHandler();
@@ -18973,7 +20022,7 @@ app2.openapi(chatCompletionsRoute, async (c2) => {
18973
20022
  sseHelper,
18974
20023
  emitOperations
18975
20024
  });
18976
- logger24.info(
20025
+ logger26.info(
18977
20026
  { result },
18978
20027
  `Execution completed: ${result.success ? "success" : "failed"} after ${result.iterations} iterations`
18979
20028
  );
@@ -18987,7 +20036,7 @@ app2.openapi(chatCompletionsRoute, async (c2) => {
18987
20036
  }
18988
20037
  await sseHelper.complete();
18989
20038
  } catch (error) {
18990
- logger24.error(
20039
+ logger26.error(
18991
20040
  {
18992
20041
  error: error instanceof Error ? error.message : error,
18993
20042
  stack: error instanceof Error ? error.stack : void 0
@@ -19004,13 +20053,13 @@ app2.openapi(chatCompletionsRoute, async (c2) => {
19004
20053
  );
19005
20054
  await sseHelper.complete();
19006
20055
  } catch (streamError) {
19007
- logger24.error({ streamError }, "Failed to write error to stream");
20056
+ logger26.error({ streamError }, "Failed to write error to stream");
19008
20057
  }
19009
20058
  }
19010
20059
  });
19011
20060
  });
19012
20061
  } catch (error) {
19013
- logger24.error(
20062
+ logger26.error(
19014
20063
  {
19015
20064
  error: error instanceof Error ? error.message : error,
19016
20065
  stack: error instanceof Error ? error.stack : void 0
@@ -19038,7 +20087,7 @@ var chat_default = app2;
19038
20087
  init_dbClient();
19039
20088
  init_logger();
19040
20089
  var app3 = new zodOpenapi.OpenAPIHono();
19041
- var logger25 = agentsCore.getLogger("chatDataStream");
20090
+ var logger27 = agentsCore.getLogger("chatDataStream");
19042
20091
  var chatDataStreamRoute = zodOpenapi.createRoute({
19043
20092
  method: "post",
19044
20093
  path: "/chat",
@@ -19165,7 +20214,7 @@ app3.openapi(chatDataStreamRoute, async (c2) => {
19165
20214
  });
19166
20215
  const lastUserMessage = body2.messages.filter((m4) => m4.role === "user").slice(-1)[0];
19167
20216
  const userText = typeof lastUserMessage?.content === "string" ? lastUserMessage.content : lastUserMessage?.parts?.map((p4) => p4.text).join("") || "";
19168
- logger25.info({ userText, lastUserMessage }, "userText");
20217
+ logger27.info({ userText, lastUserMessage }, "userText");
19169
20218
  const messageSpan = api.trace.getActiveSpan();
19170
20219
  if (messageSpan) {
19171
20220
  messageSpan.setAttributes({
@@ -19248,7 +20297,7 @@ app3.openapi(chatDataStreamRoute, async (c2) => {
19248
20297
  await streamHelper.writeOperation(errorOp("Unable to process request", "system"));
19249
20298
  }
19250
20299
  } catch (err2) {
19251
- logger25.error({ err: err2 }, "Streaming error");
20300
+ logger27.error({ err: err2 }, "Streaming error");
19252
20301
  await streamHelper.writeOperation(errorOp("Internal server error", "system"));
19253
20302
  } finally {
19254
20303
  if ("cleanup" in streamHelper && typeof streamHelper.cleanup === "function") {
@@ -19270,7 +20319,7 @@ app3.openapi(chatDataStreamRoute, async (c2) => {
19270
20319
  );
19271
20320
  });
19272
20321
  } catch (error) {
19273
- logger25.error(
20322
+ logger27.error(
19274
20323
  {
19275
20324
  error,
19276
20325
  errorMessage: error instanceof Error ? error.message : String(error),
@@ -19359,7 +20408,7 @@ app3.openapi(toolApprovalRoute, async (c2) => {
19359
20408
  const { tenantId, projectId } = executionContext;
19360
20409
  const requestBody = await c2.req.json();
19361
20410
  const { conversationId, toolCallId, approved, reason } = requestBody;
19362
- logger25.info(
20411
+ logger27.info(
19363
20412
  {
19364
20413
  conversationId,
19365
20414
  toolCallId,
@@ -19388,7 +20437,7 @@ app3.openapi(toolApprovalRoute, async (c2) => {
19388
20437
  span.setStatus({ code: 1, message: "Tool call not found" });
19389
20438
  return c2.json({ error: "Tool call not found or already processed" }, 404);
19390
20439
  }
19391
- logger25.info({ conversationId, toolCallId, approved }, "Tool approval processed successfully");
20440
+ logger27.info({ conversationId, toolCallId, approved }, "Tool approval processed successfully");
19392
20441
  span.setStatus({ code: 1, message: "Success" });
19393
20442
  return c2.json({
19394
20443
  success: true,
@@ -19396,7 +20445,7 @@ app3.openapi(toolApprovalRoute, async (c2) => {
19396
20445
  });
19397
20446
  } catch (error) {
19398
20447
  const errorMessage = error instanceof Error ? error.message : "Unknown error";
19399
- logger25.error(
20448
+ logger27.error(
19400
20449
  {
19401
20450
  error: errorMessage,
19402
20451
  stack: error instanceof Error ? error.stack : void 0
@@ -19421,7 +20470,7 @@ var chatDataStream_default = app3;
19421
20470
  // src/routes/mcp.ts
19422
20471
  init_dbClient();
19423
20472
  init_logger();
19424
- var logger26 = agentsCore.getLogger("mcp");
20473
+ var logger28 = agentsCore.getLogger("mcp");
19425
20474
  var MockResponseSingleton = class _MockResponseSingleton {
19426
20475
  static instance;
19427
20476
  mockRes;
@@ -19475,21 +20524,21 @@ var createSpoofInitMessage = (mcpProtocolVersion) => ({
19475
20524
  id: 0
19476
20525
  });
19477
20526
  var spoofTransportInitialization = async (transport, req, sessionId, mcpProtocolVersion) => {
19478
- logger26.info({ sessionId }, "Spoofing initialization message to set transport state");
20527
+ logger28.info({ sessionId }, "Spoofing initialization message to set transport state");
19479
20528
  const spoofInitMessage = createSpoofInitMessage(mcpProtocolVersion);
19480
20529
  const mockRes = MockResponseSingleton.getInstance().getMockResponse();
19481
20530
  try {
19482
20531
  await transport.handleRequest(req, mockRes, spoofInitMessage);
19483
- logger26.info({ sessionId }, "Successfully spoofed initialization");
20532
+ logger28.info({ sessionId }, "Successfully spoofed initialization");
19484
20533
  } catch (spoofError) {
19485
- logger26.warn({ sessionId, error: spoofError }, "Spoof initialization failed, continuing anyway");
20534
+ logger28.warn({ sessionId, error: spoofError }, "Spoof initialization failed, continuing anyway");
19486
20535
  }
19487
20536
  };
19488
20537
  var validateSession = async (req, res, body2, tenantId, projectId, agentId) => {
19489
20538
  const sessionId = req.headers["mcp-session-id"];
19490
- logger26.info({ sessionId }, "Received MCP session ID");
20539
+ logger28.info({ sessionId }, "Received MCP session ID");
19491
20540
  if (!sessionId) {
19492
- logger26.info({ body: body2 }, "Missing session ID");
20541
+ logger28.info({ body: body2 }, "Missing session ID");
19493
20542
  res.writeHead(400).end(
19494
20543
  JSON.stringify({
19495
20544
  jsonrpc: "2.0",
@@ -19516,7 +20565,7 @@ var validateSession = async (req, res, body2, tenantId, projectId, agentId) => {
19516
20565
  scopes: { tenantId, projectId },
19517
20566
  conversationId: sessionId
19518
20567
  });
19519
- logger26.info(
20568
+ logger28.info(
19520
20569
  {
19521
20570
  sessionId,
19522
20571
  conversationFound: !!conversation,
@@ -19527,7 +20576,7 @@ var validateSession = async (req, res, body2, tenantId, projectId, agentId) => {
19527
20576
  "Conversation lookup result"
19528
20577
  );
19529
20578
  if (!conversation || conversation.metadata?.sessionData?.sessionType !== "mcp" || conversation.metadata?.sessionData?.agentId !== agentId) {
19530
- logger26.info(
20579
+ logger28.info(
19531
20580
  { sessionId, conversationId: conversation?.id },
19532
20581
  "MCP session not found or invalid"
19533
20582
  );
@@ -19588,7 +20637,7 @@ var executeAgentQuery = async (executionContext, conversationId, query, defaultS
19588
20637
  requestId: requestId2,
19589
20638
  sseHelper: mcpStreamHelper
19590
20639
  });
19591
- logger26.info(
20640
+ logger28.info(
19592
20641
  { result },
19593
20642
  `Execution completed: ${result.success ? "success" : "failed"} after ${result.iterations} iterations`
19594
20643
  );
@@ -19672,7 +20721,7 @@ var getServer = async (headers2, executionContext, conversationId, credentialSto
19672
20721
  dbClient: dbClient_default,
19673
20722
  credentialStores
19674
20723
  });
19675
- logger26.info(
20724
+ logger28.info(
19676
20725
  {
19677
20726
  tenantId,
19678
20727
  projectId,
@@ -19734,7 +20783,7 @@ var validateRequestParameters = (c2) => {
19734
20783
  };
19735
20784
  var handleInitializationRequest = async (body2, executionContext, validatedContext, req, res, c2, credentialStores) => {
19736
20785
  const { tenantId, projectId, agentId } = executionContext;
19737
- logger26.info({ body: body2 }, "Received initialization request");
20786
+ logger28.info({ body: body2 }, "Received initialization request");
19738
20787
  const sessionId = agentsCore.getConversationId();
19739
20788
  const activeSpan = api.trace.getActiveSpan();
19740
20789
  if (activeSpan) {
@@ -19790,7 +20839,7 @@ var handleInitializationRequest = async (body2, executionContext, validatedConte
19790
20839
  }
19791
20840
  }
19792
20841
  });
19793
- logger26.info(
20842
+ logger28.info(
19794
20843
  { sessionId, conversationId: conversation.id },
19795
20844
  "Created MCP session as conversation"
19796
20845
  );
@@ -19799,9 +20848,9 @@ var handleInitializationRequest = async (body2, executionContext, validatedConte
19799
20848
  });
19800
20849
  const server = await getServer(validatedContext, executionContext, sessionId, credentialStores);
19801
20850
  await server.connect(transport);
19802
- logger26.info({ sessionId }, "Server connected for initialization");
20851
+ logger28.info({ sessionId }, "Server connected for initialization");
19803
20852
  res.setHeader("Mcp-Session-Id", sessionId);
19804
- logger26.info(
20853
+ logger28.info(
19805
20854
  {
19806
20855
  sessionId,
19807
20856
  bodyMethod: body2?.method,
@@ -19810,7 +20859,7 @@ var handleInitializationRequest = async (body2, executionContext, validatedConte
19810
20859
  "About to handle initialization request"
19811
20860
  );
19812
20861
  await transport.handleRequest(req, res, body2);
19813
- logger26.info({ sessionId }, "Successfully handled initialization request");
20862
+ logger28.info({ sessionId }, "Successfully handled initialization request");
19814
20863
  return fetchToNode.toFetchResponse(res);
19815
20864
  });
19816
20865
  };
@@ -19837,8 +20886,8 @@ var handleExistingSessionRequest = async (body2, executionContext, validatedCont
19837
20886
  sessionId,
19838
20887
  conversation.metadata?.session_data?.mcpProtocolVersion
19839
20888
  );
19840
- logger26.info({ sessionId }, "Server connected and transport initialized");
19841
- logger26.info(
20889
+ logger28.info({ sessionId }, "Server connected and transport initialized");
20890
+ logger28.info(
19842
20891
  {
19843
20892
  sessionId,
19844
20893
  bodyKeys: Object.keys(body2 || {}),
@@ -19852,9 +20901,9 @@ var handleExistingSessionRequest = async (body2, executionContext, validatedCont
19852
20901
  );
19853
20902
  try {
19854
20903
  await transport.handleRequest(req, res, body2);
19855
- logger26.info({ sessionId }, "Successfully handled MCP request");
20904
+ logger28.info({ sessionId }, "Successfully handled MCP request");
19856
20905
  } catch (transportError) {
19857
- logger26.error(
20906
+ logger28.error(
19858
20907
  {
19859
20908
  sessionId,
19860
20909
  error: transportError,
@@ -19905,13 +20954,13 @@ app4.openapi(
19905
20954
  }
19906
20955
  const { executionContext } = paramValidation;
19907
20956
  const body2 = c2.get("requestBody") || {};
19908
- logger26.info({ body: body2, bodyKeys: Object.keys(body2 || {}) }, "Parsed request body");
20957
+ logger28.info({ body: body2, bodyKeys: Object.keys(body2 || {}) }, "Parsed request body");
19909
20958
  const isInitRequest = body2.method === "initialize";
19910
20959
  const { req, res } = fetchToNode.toReqRes(c2.req.raw);
19911
20960
  const validatedContext = c2.get("validatedContext") || {};
19912
20961
  const credentialStores = c2.get("credentialStores");
19913
- logger26.info({ validatedContext }, "Validated context");
19914
- logger26.info({ req }, "request");
20962
+ logger28.info({ validatedContext }, "Validated context");
20963
+ logger28.info({ req }, "request");
19915
20964
  if (isInitRequest) {
19916
20965
  return await handleInitializationRequest(
19917
20966
  body2,
@@ -19932,7 +20981,7 @@ app4.openapi(
19932
20981
  credentialStores
19933
20982
  );
19934
20983
  } catch (e) {
19935
- logger26.error(
20984
+ logger28.error(
19936
20985
  {
19937
20986
  error: e instanceof Error ? e.message : e,
19938
20987
  stack: e instanceof Error ? e.stack : void 0
@@ -19944,7 +20993,7 @@ app4.openapi(
19944
20993
  }
19945
20994
  );
19946
20995
  app4.get("/", async (c2) => {
19947
- logger26.info({}, "Received GET MCP request");
20996
+ logger28.info({}, "Received GET MCP request");
19948
20997
  return c2.json(
19949
20998
  {
19950
20999
  jsonrpc: "2.0",
@@ -19958,7 +21007,7 @@ app4.get("/", async (c2) => {
19958
21007
  );
19959
21008
  });
19960
21009
  app4.delete("/", async (c2) => {
19961
- logger26.info({}, "Received DELETE MCP request");
21010
+ logger28.info({}, "Received DELETE MCP request");
19962
21011
  return c2.json(
19963
21012
  {
19964
21013
  jsonrpc: "2.0",
@@ -19971,7 +21020,7 @@ app4.delete("/", async (c2) => {
19971
21020
  var mcp_default = app4;
19972
21021
 
19973
21022
  // src/app.ts
19974
- var logger27 = agentsCore.getLogger("agents-run-api");
21023
+ var logger29 = agentsCore.getLogger("agents-run-api");
19975
21024
  function createExecutionHono(serverConfig, credentialStores, sandboxConfig) {
19976
21025
  const app6 = new zodOpenapi.OpenAPIHono();
19977
21026
  app6.use("*", otel.otel());
@@ -19990,7 +21039,7 @@ function createExecutionHono(serverConfig, credentialStores, sandboxConfig) {
19990
21039
  const body2 = await c2.req.json();
19991
21040
  c2.set("requestBody", body2);
19992
21041
  } catch (error) {
19993
- logger27.debug({ error }, "Failed to parse JSON body, continuing without parsed body");
21042
+ logger29.debug({ error }, "Failed to parse JSON body, continuing without parsed body");
19994
21043
  }
19995
21044
  }
19996
21045
  return next();
@@ -20041,8 +21090,8 @@ function createExecutionHono(serverConfig, credentialStores, sandboxConfig) {
20041
21090
  if (!isExpectedError) {
20042
21091
  const errorMessage = err2 instanceof Error ? err2.message : String(err2);
20043
21092
  const errorStack = err2 instanceof Error ? err2.stack : void 0;
20044
- if (logger27) {
20045
- logger27.error(
21093
+ if (logger29) {
21094
+ logger29.error(
20046
21095
  {
20047
21096
  error: err2,
20048
21097
  message: errorMessage,
@@ -20054,8 +21103,8 @@ function createExecutionHono(serverConfig, credentialStores, sandboxConfig) {
20054
21103
  );
20055
21104
  }
20056
21105
  } else {
20057
- if (logger27) {
20058
- logger27.error(
21106
+ if (logger29) {
21107
+ logger29.error(
20059
21108
  {
20060
21109
  error: err2,
20061
21110
  path: c2.req.path,
@@ -20072,8 +21121,8 @@ function createExecutionHono(serverConfig, credentialStores, sandboxConfig) {
20072
21121
  const response = err2.getResponse();
20073
21122
  return response;
20074
21123
  } catch (responseError) {
20075
- if (logger27) {
20076
- logger27.error({ error: responseError }, "Error while handling HTTPException response");
21124
+ if (logger29) {
21125
+ logger29.error({ error: responseError }, "Error while handling HTTPException response");
20077
21126
  }
20078
21127
  }
20079
21128
  }
@@ -20107,7 +21156,7 @@ function createExecutionHono(serverConfig, credentialStores, sandboxConfig) {
20107
21156
  app6.use("*", async (c2, next) => {
20108
21157
  const executionContext = c2.get("executionContext");
20109
21158
  if (!executionContext) {
20110
- logger27.debug({}, "Empty execution context");
21159
+ logger29.debug({}, "Empty execution context");
20111
21160
  return next();
20112
21161
  }
20113
21162
  const { tenantId, projectId, agentId } = executionContext;
@@ -20116,7 +21165,7 @@ function createExecutionHono(serverConfig, credentialStores, sandboxConfig) {
20116
21165
  if (requestBody) {
20117
21166
  conversationId = requestBody.conversationId;
20118
21167
  if (!conversationId) {
20119
- logger27.debug({ requestBody }, "No conversation ID found in request body");
21168
+ logger29.debug({ requestBody }, "No conversation ID found in request body");
20120
21169
  }
20121
21170
  }
20122
21171
  const entries = Object.fromEntries(
@@ -20131,7 +21180,7 @@ function createExecutionHono(serverConfig, credentialStores, sandboxConfig) {
20131
21180
  })
20132
21181
  );
20133
21182
  if (!Object.keys(entries).length) {
20134
- logger27.debug({}, "Empty entries for baggage");
21183
+ logger29.debug({}, "Empty entries for baggage");
20135
21184
  return next();
20136
21185
  }
20137
21186
  const bag = Object.entries(entries).reduce(