@inkeep/agents-run-api 0.2.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.cjs +538 -458
  2. package/dist/index.js +538 -458
  3. package/package.json +3 -2
package/dist/index.cjs CHANGED
@@ -30,6 +30,7 @@ var destr = require('destr');
30
30
  var traverse = require('traverse');
31
31
  var ai = require('ai');
32
32
  var anthropic = require('@ai-sdk/anthropic');
33
+ var google = require('@ai-sdk/google');
33
34
  var openai = require('@ai-sdk/openai');
34
35
  var jmespath = require('jmespath');
35
36
  var mcp_js = require('@modelcontextprotocol/sdk/server/mcp.js');
@@ -461,6 +462,7 @@ var apiKeyAuth = () => factory.createMiddleware(async (c, next) => {
461
462
  return;
462
463
  } else if (apiKey) {
463
464
  const executionContext = await extractContextFromApiKey(apiKey);
465
+ executionContext.agentId = agentId;
464
466
  c.set("executionContext", executionContext);
465
467
  logger.info({}, "API key authenticated successfully");
466
468
  await next();
@@ -478,12 +480,14 @@ var apiKeyAuth = () => factory.createMiddleware(async (c, next) => {
478
480
  }
479
481
  try {
480
482
  const executionContext = await extractContextFromApiKey(apiKey);
483
+ executionContext.agentId = agentId;
481
484
  c.set("executionContext", executionContext);
482
485
  logger.debug(
483
486
  {
484
487
  tenantId: executionContext.tenantId,
485
488
  projectId: executionContext.projectId,
486
- graphId: executionContext.graphId
489
+ graphId: executionContext.graphId,
490
+ agentId: executionContext.agentId
487
491
  },
488
492
  "API key authenticated successfully"
489
493
  );
@@ -1156,6 +1160,128 @@ async function handleTasksResubscribe(c, agent, request) {
1156
1160
  });
1157
1161
  }
1158
1162
  }
1163
+ init_dbClient();
1164
+ agentsCore.getLogger("agents");
1165
+ function createAgentCard({
1166
+ dbAgent,
1167
+ baseUrl
1168
+ }) {
1169
+ const description = dbAgent.description || "AI Agent";
1170
+ return {
1171
+ name: dbAgent.name,
1172
+ description,
1173
+ url: baseUrl ? `${baseUrl}/a2a` : "",
1174
+ version: "1.0.0",
1175
+ capabilities: {
1176
+ streaming: true,
1177
+ // Enable streaming for A2A compliance
1178
+ pushNotifications: false,
1179
+ stateTransitionHistory: false
1180
+ },
1181
+ defaultInputModes: ["text", "text/plain"],
1182
+ defaultOutputModes: ["text", "text/plain"],
1183
+ skills: [],
1184
+ // Add provider info if available
1185
+ ...baseUrl && {
1186
+ provider: {
1187
+ organization: "Inkeep",
1188
+ url: baseUrl
1189
+ }
1190
+ }
1191
+ };
1192
+ }
1193
+ function generateDescriptionWithTransfers(baseDescription, internalRelations, externalRelations) {
1194
+ const transfers = [
1195
+ ...internalRelations.filter((rel) => rel.relationType === "transfer"),
1196
+ ...externalRelations.filter((rel) => rel.relationType === "transfer")
1197
+ ];
1198
+ const delegates = [
1199
+ ...internalRelations.filter((rel) => rel.relationType === "delegate"),
1200
+ ...externalRelations.filter((rel) => rel.relationType === "delegate")
1201
+ ];
1202
+ if (transfers.length === 0 && delegates.length === 0) {
1203
+ return baseDescription;
1204
+ }
1205
+ let enhancedDescription = baseDescription;
1206
+ if (transfers.length > 0) {
1207
+ const transferList = transfers.map((rel) => {
1208
+ const name = rel.externalAgent?.name || rel.name;
1209
+ const desc = rel.externalAgent?.description || rel.description || "";
1210
+ return `- ${name}: ${desc}`;
1211
+ }).join("\n");
1212
+ enhancedDescription += `
1213
+
1214
+ Can transfer to:
1215
+ ${transferList}`;
1216
+ }
1217
+ if (delegates.length > 0) {
1218
+ const delegateList = delegates.map((rel) => {
1219
+ const name = rel.externalAgent?.name || rel.name;
1220
+ const desc = rel.externalAgent?.description || rel.description || "";
1221
+ return `- ${name}: ${desc}`;
1222
+ }).join("\n");
1223
+ enhancedDescription += `
1224
+
1225
+ Can delegate to:
1226
+ ${delegateList}`;
1227
+ }
1228
+ return enhancedDescription;
1229
+ }
1230
+ async function hydrateAgent({
1231
+ dbAgent,
1232
+ graphId,
1233
+ baseUrl,
1234
+ apiKey,
1235
+ credentialStoreRegistry
1236
+ }) {
1237
+ try {
1238
+ const taskHandlerConfig = await createTaskHandlerConfig({
1239
+ tenantId: dbAgent.tenantId,
1240
+ projectId: dbAgent.projectId,
1241
+ graphId,
1242
+ agentId: dbAgent.id,
1243
+ baseUrl,
1244
+ apiKey
1245
+ });
1246
+ const taskHandler = createTaskHandler(taskHandlerConfig, credentialStoreRegistry);
1247
+ const agentCard = createAgentCard({
1248
+ dbAgent,
1249
+ baseUrl
1250
+ });
1251
+ return {
1252
+ agentId: dbAgent.id,
1253
+ tenantId: dbAgent.tenantId,
1254
+ projectId: dbAgent.projectId,
1255
+ graphId,
1256
+ agentCard,
1257
+ taskHandler
1258
+ };
1259
+ } catch (error) {
1260
+ console.error(`\u274C Failed to hydrate agent ${dbAgent.id}:`, error);
1261
+ throw error;
1262
+ }
1263
+ }
1264
+ async function getRegisteredAgent(executionContext, credentialStoreRegistry) {
1265
+ const { tenantId, projectId, graphId, agentId, baseUrl, apiKey } = executionContext;
1266
+ if (!agentId) {
1267
+ throw new Error("Agent ID is required");
1268
+ }
1269
+ const dbAgent = await agentsCore.getAgentById(dbClient_default)({
1270
+ scopes: { tenantId, projectId },
1271
+ agentId
1272
+ });
1273
+ if (!dbAgent) {
1274
+ return null;
1275
+ }
1276
+ const agentFrameworkBaseUrl = `${baseUrl}/agents`;
1277
+ return hydrateAgent({
1278
+ dbAgent,
1279
+ graphId,
1280
+ baseUrl: agentFrameworkBaseUrl,
1281
+ credentialStoreRegistry,
1282
+ apiKey
1283
+ });
1284
+ }
1159
1285
 
1160
1286
  // src/agents/generateTaskHandler.ts
1161
1287
  init_dbClient();
@@ -1199,10 +1325,10 @@ function statusUpdateOp(ctx) {
1199
1325
  ctx
1200
1326
  };
1201
1327
  }
1202
- var logger3 = agentsCore.getLogger("DataComponentSchema");
1328
+ var logger4 = agentsCore.getLogger("DataComponentSchema");
1203
1329
  function jsonSchemaToZod(jsonSchema) {
1204
1330
  if (!jsonSchema || typeof jsonSchema !== "object") {
1205
- logger3.warn({ jsonSchema }, "Invalid JSON schema provided, using string fallback");
1331
+ logger4.warn({ jsonSchema }, "Invalid JSON schema provided, using string fallback");
1206
1332
  return z5.z.string();
1207
1333
  }
1208
1334
  switch (jsonSchema.type) {
@@ -1229,7 +1355,7 @@ function jsonSchemaToZod(jsonSchema) {
1229
1355
  case "null":
1230
1356
  return z5.z.null();
1231
1357
  default:
1232
- logger3.warn(
1358
+ logger4.warn(
1233
1359
  {
1234
1360
  unsupportedType: jsonSchema.type,
1235
1361
  schema: jsonSchema
@@ -1283,8 +1409,40 @@ __publicField(_ArtifactReferenceSchema, "ARTIFACT_PROPS_SCHEMA", {
1283
1409
  required: ["artifact_id", "task_id"]
1284
1410
  });
1285
1411
  var ArtifactReferenceSchema = _ArtifactReferenceSchema;
1286
- var logger4 = agentsCore.getLogger("ModelFactory");
1412
+ var logger5 = agentsCore.getLogger("ModelFactory");
1287
1413
  var _ModelFactory = class _ModelFactory {
1414
+ /**
1415
+ * Create a provider instance with custom configuration
1416
+ */
1417
+ static createProvider(provider, config2) {
1418
+ switch (provider) {
1419
+ case "anthropic":
1420
+ return anthropic.createAnthropic(config2);
1421
+ case "openai":
1422
+ return openai.createOpenAI(config2);
1423
+ case "google":
1424
+ return google.createGoogleGenerativeAI(config2);
1425
+ default:
1426
+ throw new Error(`Unsupported provider: ${provider}`);
1427
+ }
1428
+ }
1429
+ /**
1430
+ * Extract provider configuration from providerOptions
1431
+ * Only includes settings that go to the provider constructor (baseURL, apiKey, etc.)
1432
+ */
1433
+ static extractProviderConfig(providerOptions) {
1434
+ if (!providerOptions) {
1435
+ return {};
1436
+ }
1437
+ const providerConfig = {};
1438
+ if (providerOptions.baseUrl || providerOptions.baseURL) {
1439
+ providerConfig.baseURL = providerOptions.baseUrl || providerOptions.baseURL;
1440
+ }
1441
+ if (providerOptions.gateway) {
1442
+ Object.assign(providerConfig, providerOptions.gateway);
1443
+ }
1444
+ return providerConfig;
1445
+ }
1288
1446
  /**
1289
1447
  * Create a language model instance from configuration
1290
1448
  * Throws error if no config provided - models must be configured at project level
@@ -1298,7 +1456,7 @@ var _ModelFactory = class _ModelFactory {
1298
1456
  const modelSettings = config2;
1299
1457
  const modelString = modelSettings.model.trim();
1300
1458
  const { provider, modelName } = _ModelFactory.parseModelString(modelString);
1301
- logger4.debug(
1459
+ logger5.debug(
1302
1460
  {
1303
1461
  provider,
1304
1462
  model: modelName,
@@ -1307,49 +1465,40 @@ var _ModelFactory = class _ModelFactory {
1307
1465
  },
1308
1466
  "Creating language model from config"
1309
1467
  );
1310
- try {
1311
- switch (provider) {
1312
- case "anthropic":
1313
- return _ModelFactory.createAnthropicModel(modelName, modelSettings.providerOptions);
1314
- case "openai":
1315
- return _ModelFactory.createOpenAIModel(modelName, modelSettings.providerOptions);
1316
- default:
1317
- throw new Error(
1318
- `Unsupported provider: ${provider}. Supported providers are: ${_ModelFactory.SUPPORTED_PROVIDERS.join(", ")}`
1319
- );
1320
- }
1321
- } catch (error) {
1322
- logger4.error(
1323
- {
1324
- provider,
1325
- model: modelName,
1326
- error: error instanceof Error ? error.message : "Unknown error"
1327
- },
1328
- "Failed to create model"
1329
- );
1330
- throw new Error(
1331
- `Failed to create model ${modelString}: ${error instanceof Error ? error.message : "Unknown error"}`
1332
- );
1468
+ const providerConfig = _ModelFactory.extractProviderConfig(modelSettings.providerOptions);
1469
+ if (Object.keys(providerConfig).length > 0) {
1470
+ logger5.info({ config: providerConfig }, `Applying custom ${provider} provider configuration`);
1471
+ const customProvider = _ModelFactory.createProvider(provider, providerConfig);
1472
+ return customProvider.languageModel(modelName);
1473
+ }
1474
+ switch (provider) {
1475
+ case "anthropic":
1476
+ return anthropic.anthropic(modelName);
1477
+ case "openai":
1478
+ return openai.openai(modelName);
1479
+ case "google":
1480
+ return google.google(modelName);
1481
+ default:
1482
+ throw new Error(`Unsupported provider: ${provider}`);
1333
1483
  }
1334
1484
  }
1335
1485
  /**
1336
1486
  * Parse model string to extract provider and model name
1337
- * Examples: "anthropic/claude-4-sonnet" -> { provider: "anthropic", modelName: "claude-4-sonnet" }
1338
- * "claude-4-sonnet" -> { provider: "anthropic", modelName: "claude-4-sonnet" } (default to anthropic)
1487
+ * Examples: "anthropic/claude-sonnet-4" -> { provider: "anthropic", modelName: "claude-sonnet-4" }
1488
+ * "claude-sonnet-4" -> { provider: "anthropic", modelName: "claude-sonnet-4" } (default to anthropic)
1339
1489
  */
1340
1490
  static parseModelString(modelString) {
1341
1491
  if (modelString.includes("/")) {
1342
1492
  const [provider, ...modelParts] = modelString.split("/");
1343
1493
  const normalizedProvider = provider.toLowerCase();
1344
1494
  if (!_ModelFactory.SUPPORTED_PROVIDERS.includes(normalizedProvider)) {
1345
- logger4.warn(
1495
+ logger5.error(
1346
1496
  { provider: normalizedProvider, modelName: modelParts.join("/") },
1347
1497
  "Unsupported provider detected, falling back to anthropic"
1348
1498
  );
1349
- return {
1350
- provider: "anthropic",
1351
- modelName: modelParts.join("/")
1352
- };
1499
+ throw new Error(
1500
+ `Unsupported provider: ${normalizedProvider}. Please provide a model in the format of provider/model-name.`
1501
+ );
1353
1502
  }
1354
1503
  return {
1355
1504
  provider: normalizedProvider,
@@ -1357,51 +1506,9 @@ var _ModelFactory = class _ModelFactory {
1357
1506
  // In case model name has slashes
1358
1507
  };
1359
1508
  }
1360
- return {
1361
- provider: "anthropic",
1362
- modelName: modelString
1363
- };
1364
- }
1365
- /**
1366
- * Create an Anthropic model instance
1367
- */
1368
- static createAnthropicModel(modelName, providerOptions) {
1369
- const anthropicConfig = {};
1370
- if (providerOptions?.baseUrl || providerOptions?.baseURL) {
1371
- anthropicConfig.baseURL = providerOptions.baseUrl || providerOptions.baseURL;
1372
- }
1373
- if (providerOptions?.gateway) {
1374
- logger4.info(
1375
- { gateway: providerOptions.gateway },
1376
- "Setting up AI Gateway for Anthropic model"
1377
- );
1378
- Object.assign(anthropicConfig, providerOptions.gateway);
1379
- }
1380
- if (Object.keys(anthropicConfig).length > 0) {
1381
- logger4.info({ config: anthropicConfig }, "Applying custom Anthropic provider configuration");
1382
- const provider = anthropic.createAnthropic(anthropicConfig);
1383
- return provider(modelName);
1384
- }
1385
- return anthropic.anthropic(modelName);
1386
- }
1387
- /**
1388
- * Create an OpenAI model instance
1389
- */
1390
- static createOpenAIModel(modelName, providerOptions) {
1391
- const openaiConfig = {};
1392
- if (providerOptions?.baseUrl || providerOptions?.baseURL) {
1393
- openaiConfig.baseURL = providerOptions.baseUrl || providerOptions.baseURL;
1394
- }
1395
- if (providerOptions?.gateway) {
1396
- logger4.info({ gateway: providerOptions.gateway }, "Setting up AI Gateway for OpenAI model");
1397
- Object.assign(openaiConfig, providerOptions.gateway);
1398
- }
1399
- if (Object.keys(openaiConfig).length > 0) {
1400
- logger4.info({ config: openaiConfig }, "Applying custom OpenAI provider configuration");
1401
- const provider = openai.createOpenAI(openaiConfig);
1402
- return provider(modelName);
1403
- }
1404
- return openai.openai(modelName);
1509
+ throw new Error(
1510
+ `Invalid model provided: ${modelString}. Please provide a model in the format of provider/model-name.`
1511
+ );
1405
1512
  }
1406
1513
  /**
1407
1514
  * Get generation parameters from provider options
@@ -1426,7 +1533,7 @@ var _ModelFactory = class _ModelFactory {
1426
1533
  * Includes maxDuration if specified in provider options (in seconds, following Vercel standard)
1427
1534
  */
1428
1535
  static prepareGenerationConfig(modelSettings) {
1429
- const modelString = modelSettings?.model?.trim() || "anthropic/claude-4-sonnet-20250514";
1536
+ const modelString = modelSettings?.model?.trim();
1430
1537
  const model = _ModelFactory.createModel({
1431
1538
  model: modelString,
1432
1539
  providerOptions: modelSettings?.providerOptions
@@ -1467,7 +1574,7 @@ var _ModelFactory = class _ModelFactory {
1467
1574
  /**
1468
1575
  * Supported providers for security validation
1469
1576
  */
1470
- __publicField(_ModelFactory, "SUPPORTED_PROVIDERS", ["anthropic", "openai"]);
1577
+ __publicField(_ModelFactory, "SUPPORTED_PROVIDERS", ["anthropic", "openai", "google"]);
1471
1578
  var ModelFactory = _ModelFactory;
1472
1579
 
1473
1580
  // src/utils/graph-session.ts
@@ -1491,7 +1598,7 @@ function unregisterStreamHelper(requestId2) {
1491
1598
  }
1492
1599
 
1493
1600
  // src/utils/graph-session.ts
1494
- var logger5 = agentsCore.getLogger("GraphSession");
1601
+ var logger6 = agentsCore.getLogger("GraphSession");
1495
1602
  var GraphSession = class {
1496
1603
  // Track scheduled timeouts for cleanup
1497
1604
  constructor(sessionId, messageId, graphId, tenantId, projectId) {
@@ -1515,7 +1622,7 @@ var GraphSession = class {
1515
1622
  __publicField(this, "MAX_PENDING_ARTIFACTS", 100);
1516
1623
  // Prevent unbounded growth
1517
1624
  __publicField(this, "scheduledTimeouts");
1518
- logger5.debug({ sessionId, messageId, graphId }, "GraphSession created");
1625
+ logger6.debug({ sessionId, messageId, graphId }, "GraphSession created");
1519
1626
  }
1520
1627
  /**
1521
1628
  * Initialize status updates for this session
@@ -1529,15 +1636,15 @@ var GraphSession = class {
1529
1636
  summarizerModel,
1530
1637
  baseModel,
1531
1638
  config: {
1532
- numEvents: config2.numEvents || 10,
1533
- timeInSeconds: config2.timeInSeconds || 30,
1639
+ numEvents: config2.numEvents || 1,
1640
+ timeInSeconds: config2.timeInSeconds || 2,
1534
1641
  ...config2
1535
1642
  }
1536
1643
  };
1537
1644
  if (this.statusUpdateState.config.timeInSeconds) {
1538
1645
  this.statusUpdateTimer = setInterval(async () => {
1539
1646
  if (!this.statusUpdateState || this.isEnded) {
1540
- logger5.debug(
1647
+ logger6.debug(
1541
1648
  { sessionId: this.sessionId },
1542
1649
  "Timer triggered but session already cleaned up or ended"
1543
1650
  );
@@ -1549,7 +1656,7 @@ var GraphSession = class {
1549
1656
  }
1550
1657
  await this.checkAndSendTimeBasedUpdate();
1551
1658
  }, this.statusUpdateState.config.timeInSeconds * 1e3);
1552
- logger5.info(
1659
+ logger6.info(
1553
1660
  {
1554
1661
  sessionId: this.sessionId,
1555
1662
  intervalMs: this.statusUpdateState.config.timeInSeconds * 1e3
@@ -1563,7 +1670,7 @@ var GraphSession = class {
1563
1670
  */
1564
1671
  recordEvent(eventType, agentId, data) {
1565
1672
  if (this.isEnded) {
1566
- logger5.debug(
1673
+ logger6.debug(
1567
1674
  {
1568
1675
  sessionId: this.sessionId,
1569
1676
  eventType,
@@ -1583,7 +1690,7 @@ var GraphSession = class {
1583
1690
  if (eventType === "artifact_saved" && data.pendingGeneration) {
1584
1691
  const artifactId = data.artifactId;
1585
1692
  if (this.pendingArtifacts.size >= this.MAX_PENDING_ARTIFACTS) {
1586
- logger5.warn(
1693
+ logger6.warn(
1587
1694
  {
1588
1695
  sessionId: this.sessionId,
1589
1696
  artifactId,
@@ -1604,7 +1711,7 @@ var GraphSession = class {
1604
1711
  this.artifactProcessingErrors.set(artifactId, errorCount);
1605
1712
  if (errorCount >= this.MAX_ARTIFACT_RETRIES) {
1606
1713
  this.pendingArtifacts.delete(artifactId);
1607
- logger5.error(
1714
+ logger6.error(
1608
1715
  {
1609
1716
  sessionId: this.sessionId,
1610
1717
  artifactId,
@@ -1616,7 +1723,7 @@ var GraphSession = class {
1616
1723
  "Artifact processing failed after max retries, giving up"
1617
1724
  );
1618
1725
  } else {
1619
- logger5.warn(
1726
+ logger6.warn(
1620
1727
  {
1621
1728
  sessionId: this.sessionId,
1622
1729
  artifactId,
@@ -1638,14 +1745,14 @@ var GraphSession = class {
1638
1745
  */
1639
1746
  checkStatusUpdates() {
1640
1747
  if (this.isEnded) {
1641
- logger5.debug(
1748
+ logger6.debug(
1642
1749
  { sessionId: this.sessionId },
1643
1750
  "Session has ended - skipping status update check"
1644
1751
  );
1645
1752
  return;
1646
1753
  }
1647
1754
  if (!this.statusUpdateState) {
1648
- logger5.debug({ sessionId: this.sessionId }, "No status update state - skipping check");
1755
+ logger6.debug({ sessionId: this.sessionId }, "No status update state - skipping check");
1649
1756
  return;
1650
1757
  }
1651
1758
  const statusUpdateState = this.statusUpdateState;
@@ -1656,11 +1763,11 @@ var GraphSession = class {
1656
1763
  */
1657
1764
  async checkAndSendTimeBasedUpdate() {
1658
1765
  if (this.isEnded) {
1659
- logger5.debug({ sessionId: this.sessionId }, "Session has ended - skipping time-based update");
1766
+ logger6.debug({ sessionId: this.sessionId }, "Session has ended - skipping time-based update");
1660
1767
  return;
1661
1768
  }
1662
1769
  if (!this.statusUpdateState) {
1663
- logger5.debug(
1770
+ logger6.debug(
1664
1771
  { sessionId: this.sessionId },
1665
1772
  "No status updates configured for time-based check"
1666
1773
  );
@@ -1673,7 +1780,7 @@ var GraphSession = class {
1673
1780
  try {
1674
1781
  await this.generateAndSendUpdate();
1675
1782
  } catch (error) {
1676
- logger5.error(
1783
+ logger6.error(
1677
1784
  {
1678
1785
  sessionId: this.sessionId,
1679
1786
  error: error instanceof Error ? error.message : "Unknown error"
@@ -1766,29 +1873,29 @@ var GraphSession = class {
1766
1873
  */
1767
1874
  async generateAndSendUpdate() {
1768
1875
  if (this.isEnded) {
1769
- logger5.debug({ sessionId: this.sessionId }, "Session has ended - not generating update");
1876
+ logger6.debug({ sessionId: this.sessionId }, "Session has ended - not generating update");
1770
1877
  return;
1771
1878
  }
1772
1879
  if (this.isTextStreaming) {
1773
- logger5.debug(
1880
+ logger6.debug(
1774
1881
  { sessionId: this.sessionId },
1775
1882
  "Text is currently streaming - skipping status update"
1776
1883
  );
1777
1884
  return;
1778
1885
  }
1779
1886
  if (this.isGeneratingUpdate) {
1780
- logger5.debug(
1887
+ logger6.debug(
1781
1888
  { sessionId: this.sessionId },
1782
1889
  "Update already in progress - skipping duplicate generation"
1783
1890
  );
1784
1891
  return;
1785
1892
  }
1786
1893
  if (!this.statusUpdateState) {
1787
- logger5.warn({ sessionId: this.sessionId }, "No status update state - cannot generate update");
1894
+ logger6.warn({ sessionId: this.sessionId }, "No status update state - cannot generate update");
1788
1895
  return;
1789
1896
  }
1790
1897
  if (!this.graphId) {
1791
- logger5.warn({ sessionId: this.sessionId }, "No graph ID - cannot generate update");
1898
+ logger6.warn({ sessionId: this.sessionId }, "No graph ID - cannot generate update");
1792
1899
  return;
1793
1900
  }
1794
1901
  const newEventCount = this.events.length - this.statusUpdateState.lastEventCount;
@@ -1801,7 +1908,7 @@ var GraphSession = class {
1801
1908
  try {
1802
1909
  const streamHelper = getStreamHelper(this.sessionId);
1803
1910
  if (!streamHelper) {
1804
- logger5.warn(
1911
+ logger6.warn(
1805
1912
  { sessionId: this.sessionId },
1806
1913
  "No stream helper found - cannot send status update"
1807
1914
  );
@@ -1822,7 +1929,7 @@ var GraphSession = class {
1822
1929
  if (result.operations && result.operations.length > 0) {
1823
1930
  for (const op of result.operations) {
1824
1931
  if (!op || !op.type || !op.data || Object.keys(op.data).length === 0) {
1825
- logger5.warn(
1932
+ logger6.warn(
1826
1933
  {
1827
1934
  sessionId: this.sessionId,
1828
1935
  operation: op
@@ -1875,7 +1982,7 @@ var GraphSession = class {
1875
1982
  this.previousSummaries.shift();
1876
1983
  }
1877
1984
  if (!operation || !operation.type || !operation.ctx) {
1878
- logger5.warn(
1985
+ logger6.warn(
1879
1986
  {
1880
1987
  sessionId: this.sessionId,
1881
1988
  operation
@@ -1890,7 +1997,7 @@ var GraphSession = class {
1890
1997
  this.statusUpdateState.lastEventCount = this.events.length;
1891
1998
  }
1892
1999
  } catch (error) {
1893
- logger5.error(
2000
+ logger6.error(
1894
2001
  {
1895
2002
  sessionId: this.sessionId,
1896
2003
  error: error instanceof Error ? error.message : "Unknown error",
@@ -1928,7 +2035,7 @@ var GraphSession = class {
1928
2035
  this.releaseUpdateLock();
1929
2036
  }
1930
2037
  } catch (error) {
1931
- logger5.error(
2038
+ logger6.error(
1932
2039
  {
1933
2040
  sessionId: this.sessionId,
1934
2041
  error: error instanceof Error ? error.message : "Unknown error"
@@ -2005,7 +2112,7 @@ User's Question/Context:
2005
2112
  ${conversationHistory}
2006
2113
  ` : "";
2007
2114
  } catch (error) {
2008
- logger5.warn(
2115
+ logger6.warn(
2009
2116
  { sessionId: this.sessionId, error },
2010
2117
  "Failed to fetch conversation history for status update"
2011
2118
  );
@@ -2057,7 +2164,7 @@ ${this.statusUpdateState?.config.prompt?.trim() || ""}`;
2057
2164
  return text.trim();
2058
2165
  } catch (error) {
2059
2166
  agentsCore.setSpanWithError(span, error);
2060
- logger5.error({ error }, "Failed to generate summary, using fallback");
2167
+ logger6.error({ error }, "Failed to generate summary, using fallback");
2061
2168
  return this.generateFallbackSummary(newEvents, elapsedTime);
2062
2169
  } finally {
2063
2170
  span.end();
@@ -2103,7 +2210,7 @@ User's Question/Context:
2103
2210
  ${conversationHistory}
2104
2211
  ` : "";
2105
2212
  } catch (error) {
2106
- logger5.warn(
2213
+ logger6.warn(
2107
2214
  { sessionId: this.sessionId, error },
2108
2215
  "Failed to fetch conversation history for structured status update"
2109
2216
  );
@@ -2146,9 +2253,11 @@ Rules:
2146
2253
  - Labels MUST contain the ACTUAL information discovered ("Found X", "Learned Y", "Discovered Z requires A")
2147
2254
  - DO NOT use action words like "Searching", "Processing", "Analyzing" - state what was FOUND
2148
2255
  - Include specific details, numbers, requirements, or insights discovered
2149
- - You are ONE AI (no agents/delegations)
2150
- - Anonymize all internal operations so that the information appears descriptive and USER FRIENDLY. HIDE INTERNAL OPERATIONS!
2151
- - Bad examples: "Searching docs", "Processing request", "Status update", or not using the no_relevant_updates: e.g. "No New Updates", "No new info to report"
2256
+ - You are ONE unified AI system - NEVER mention agents, transfers, delegations, or routing
2257
+ - CRITICAL: NEVER use the words "transfer", "delegation", "agent", "routing", or any internal system terminology in labels
2258
+ - Present all operations as seamless actions by a single system
2259
+ - Anonymize all internal operations so that the information appears descriptive and USER FRIENDLY. HIDE ALL INTERNAL OPERATIONS!
2260
+ - Bad examples: "Transferring to search agent", "Delegating task", "Routing request", "Processing request", or not using the no_relevant_updates
2152
2261
  - Good examples: "Slack bot needs admin privileges", "Found 3-step OAuth flow required", "Channel limit is 500 per workspace", or use the no_relevant_updates component if nothing new to report.
2153
2262
 
2154
2263
  REMEMBER YOU CAN ONLY USE 'no_relevant_updates' ALONE! IT CANNOT BE CONCATENATED WITH OTHER STATUS UPDATES!
@@ -2202,7 +2311,7 @@ ${this.statusUpdateState?.config.prompt?.trim() || ""}`;
2202
2311
  return { operations };
2203
2312
  } catch (error) {
2204
2313
  agentsCore.setSpanWithError(span, error);
2205
- logger5.error({ error }, "Failed to generate structured update, using fallback");
2314
+ logger6.error({ error }, "Failed to generate structured update, using fallback");
2206
2315
  return { operations: [] };
2207
2316
  } finally {
2208
2317
  span.end();
@@ -2309,8 +2418,7 @@ ${this.statusUpdateState?.config.prompt?.trim() || ""}`;
2309
2418
  case "transfer": {
2310
2419
  const data = event.data;
2311
2420
  activities.push(
2312
- `\u{1F504} **Transfer**: ${data.fromAgent} \u2192 ${data.targetAgent}
2313
- ${data.reason ? `Reason: ${data.reason}` : "Control transfer"}
2421
+ `\u{1F504} **Continuing**: ${data.reason || "Processing request"}
2314
2422
  ${data.context ? `Context: ${JSON.stringify(data.context, null, 2)}` : ""}`
2315
2423
  );
2316
2424
  break;
@@ -2318,8 +2426,7 @@ ${this.statusUpdateState?.config.prompt?.trim() || ""}`;
2318
2426
  case "delegation_sent": {
2319
2427
  const data = event.data;
2320
2428
  activities.push(
2321
- `\u{1F4E4} **Delegation Sent** [${data.delegationId}]: ${data.fromAgent} \u2192 ${data.targetAgent}
2322
- Task: ${data.taskDescription}
2429
+ `\u{1F4E4} **Processing**: ${data.taskDescription}
2323
2430
  ${data.context ? `Context: ${JSON.stringify(data.context, null, 2)}` : ""}`
2324
2431
  );
2325
2432
  break;
@@ -2327,7 +2434,7 @@ ${this.statusUpdateState?.config.prompt?.trim() || ""}`;
2327
2434
  case "delegation_returned": {
2328
2435
  const data = event.data;
2329
2436
  activities.push(
2330
- `\u{1F4E5} **Delegation Returned** [${data.delegationId}]: ${data.fromAgent} \u2190 ${data.targetAgent}
2437
+ `\u{1F4E5} **Completed subtask**
2331
2438
  Result: ${JSON.stringify(data.result, null, 2)}`
2332
2439
  );
2333
2440
  break;
@@ -2346,16 +2453,16 @@ ${this.statusUpdateState?.config.prompt?.trim() || ""}`;
2346
2453
  case "agent_reasoning": {
2347
2454
  const data = event.data;
2348
2455
  activities.push(
2349
- `\u2699\uFE0F **Reasoning**: reasoning
2350
- Full Details: ${JSON.stringify(data.parts, null, 2)}`
2456
+ `\u2699\uFE0F **Analyzing request**
2457
+ Details: ${JSON.stringify(data.parts, null, 2)}`
2351
2458
  );
2352
2459
  break;
2353
2460
  }
2354
2461
  case "agent_generate": {
2355
2462
  const data = event.data;
2356
2463
  activities.push(
2357
- `\u2699\uFE0F **Generation**: ${data.generationType}
2358
- Full Details: ${JSON.stringify(data.parts, null, 2)}`
2464
+ `\u2699\uFE0F **Preparing response**
2465
+ Details: ${JSON.stringify(data.parts, null, 2)}`
2359
2466
  );
2360
2467
  break;
2361
2468
  }
@@ -2529,7 +2636,7 @@ Make it specific and relevant.`;
2529
2636
  taskId: artifactData.taskId,
2530
2637
  artifacts: [artifactToSave]
2531
2638
  });
2532
- logger5.info(
2639
+ logger6.info(
2533
2640
  {
2534
2641
  sessionId: this.sessionId,
2535
2642
  artifactId: artifactData.artifactId,
@@ -2546,7 +2653,7 @@ Make it specific and relevant.`;
2546
2653
  span.setStatus({ code: api.SpanStatusCode.OK });
2547
2654
  } catch (error) {
2548
2655
  agentsCore.setSpanWithError(span, error);
2549
- logger5.error(
2656
+ logger6.error(
2550
2657
  {
2551
2658
  sessionId: this.sessionId,
2552
2659
  artifactId: artifactData.artifactId,
@@ -2582,7 +2689,7 @@ Make it specific and relevant.`;
2582
2689
  taskId: artifactData.taskId,
2583
2690
  artifacts: [fallbackArtifact]
2584
2691
  });
2585
- logger5.info(
2692
+ logger6.info(
2586
2693
  {
2587
2694
  sessionId: this.sessionId,
2588
2695
  artifactId: artifactData.artifactId
@@ -2591,7 +2698,7 @@ Make it specific and relevant.`;
2591
2698
  );
2592
2699
  }
2593
2700
  } catch (fallbackError) {
2594
- logger5.error(
2701
+ logger6.error(
2595
2702
  {
2596
2703
  sessionId: this.sessionId,
2597
2704
  artifactId: artifactData.artifactId,
@@ -2618,7 +2725,7 @@ var GraphSessionManager = class {
2618
2725
  const sessionId = messageId;
2619
2726
  const session = new GraphSession(sessionId, messageId, graphId, tenantId, projectId);
2620
2727
  this.sessions.set(sessionId, session);
2621
- logger5.info({ sessionId, messageId, graphId, tenantId, projectId }, "GraphSession created");
2728
+ logger6.info({ sessionId, messageId, graphId, tenantId, projectId }, "GraphSession created");
2622
2729
  return sessionId;
2623
2730
  }
2624
2731
  /**
@@ -2629,7 +2736,7 @@ var GraphSessionManager = class {
2629
2736
  if (session) {
2630
2737
  session.initializeStatusUpdates(config2, summarizerModel);
2631
2738
  } else {
2632
- logger5.error(
2739
+ logger6.error(
2633
2740
  {
2634
2741
  sessionId,
2635
2742
  availableSessions: Array.from(this.sessions.keys())
@@ -2650,7 +2757,7 @@ var GraphSessionManager = class {
2650
2757
  recordEvent(sessionId, eventType, agentId, data) {
2651
2758
  const session = this.sessions.get(sessionId);
2652
2759
  if (!session) {
2653
- logger5.warn({ sessionId }, "Attempted to record event in non-existent session");
2760
+ logger6.warn({ sessionId }, "Attempted to record event in non-existent session");
2654
2761
  return;
2655
2762
  }
2656
2763
  session.recordEvent(eventType, agentId, data);
@@ -2661,12 +2768,12 @@ var GraphSessionManager = class {
2661
2768
  endSession(sessionId) {
2662
2769
  const session = this.sessions.get(sessionId);
2663
2770
  if (!session) {
2664
- logger5.warn({ sessionId }, "Attempted to end non-existent session");
2771
+ logger6.warn({ sessionId }, "Attempted to end non-existent session");
2665
2772
  return [];
2666
2773
  }
2667
2774
  const events = session.getEvents();
2668
2775
  const summary = session.getSummary();
2669
- logger5.info({ sessionId, summary }, "GraphSession ended");
2776
+ logger6.info({ sessionId, summary }, "GraphSession ended");
2670
2777
  session.cleanup();
2671
2778
  this.sessions.delete(sessionId);
2672
2779
  return events;
@@ -2695,7 +2802,7 @@ var graphSessionManager = new GraphSessionManager();
2695
2802
 
2696
2803
  // src/utils/artifact-parser.ts
2697
2804
  init_dbClient();
2698
- var logger6 = agentsCore.getLogger("ArtifactParser");
2805
+ var logger7 = agentsCore.getLogger("ArtifactParser");
2699
2806
  var _ArtifactParser = class _ArtifactParser {
2700
2807
  constructor(tenantId) {
2701
2808
  this.tenantId = tenantId;
@@ -2711,9 +2818,7 @@ var _ArtifactParser = class _ArtifactParser {
2711
2818
  * More robust detection that handles streaming fragments
2712
2819
  */
2713
2820
  hasIncompleteArtifact(text) {
2714
- return /^.*<(?:artifact(?::ref)?|a(?:r(?:t(?:i(?:f(?:a(?:c(?:t(?::(?:r(?:e(?:f)?)?)?)?)?)?)?)?)?)?)?)?$/.test(
2715
- text
2716
- ) || /^.*<artifact:ref(?:[^>]*)$/.test(text) || // Incomplete artifact:ref at end
2821
+ return /<(a(r(t(i(f(a(c(t(:?(r(e(f)?)?)?)?)?)?)?)?)?)?)?)?$/.test(text) || /<artifact:ref[^>]+$/.test(text) || // Incomplete artifact ref at end
2717
2822
  this.findSafeTextBoundary(text) < text.length;
2718
2823
  }
2719
2824
  /**
@@ -2722,10 +2827,10 @@ var _ArtifactParser = class _ArtifactParser {
2722
2827
  */
2723
2828
  findSafeTextBoundary(text) {
2724
2829
  const endPatterns = [
2725
- /^.*<artifact:ref(?:[^/>]+(?:[^>]*[^/])?)?$/,
2830
+ /<artifact:ref(?![^>]*\/>).*$/,
2726
2831
  // artifact:ref that doesn't end with />
2727
- /^.*<(?:artifact(?::ref)?|a(?:r(?:t(?:i(?:f(?:a(?:c(?:t(?::(?:r(?:e(?:f)?)?)?)?)?)?)?)?)?)?)?)?$/
2728
- // Safe partial artifact pattern
2832
+ /<(a(r(t(i(f(a(c(t(:?(r(e(f)?)?)?)?)?)?)?)?)?)?)?)?$/
2833
+ // Any partial artifact pattern at end
2729
2834
  ];
2730
2835
  for (const pattern of endPatterns) {
2731
2836
  const match = text.match(pattern);
@@ -2761,7 +2866,7 @@ var _ArtifactParser = class _ArtifactParser {
2761
2866
  id: taskId
2762
2867
  });
2763
2868
  if (!task) {
2764
- logger6.warn({ taskId }, "Task not found when fetching artifacts");
2869
+ logger7.warn({ taskId }, "Task not found when fetching artifacts");
2765
2870
  continue;
2766
2871
  }
2767
2872
  const taskArtifacts = await agentsCore.getLedgerArtifacts(dbClient_default)({
@@ -2773,9 +2878,9 @@ var _ArtifactParser = class _ArtifactParser {
2773
2878
  artifacts.set(key, artifact);
2774
2879
  }
2775
2880
  }
2776
- logger6.debug({ contextId, count: artifacts.size }, "Loaded context artifacts");
2881
+ logger7.debug({ contextId, count: artifacts.size }, "Loaded context artifacts");
2777
2882
  } catch (error) {
2778
- logger6.error({ error, contextId }, "Error loading context artifacts");
2883
+ logger7.error({ error, contextId }, "Error loading context artifacts");
2779
2884
  }
2780
2885
  return artifacts;
2781
2886
  }
@@ -2878,7 +2983,7 @@ var _ArtifactParser = class _ArtifactParser {
2878
2983
  id: taskId
2879
2984
  });
2880
2985
  if (!task) {
2881
- logger6.warn({ taskId }, "Task not found when fetching artifact");
2986
+ logger7.warn({ taskId }, "Task not found when fetching artifact");
2882
2987
  return null;
2883
2988
  }
2884
2989
  const artifacts = await agentsCore.getLedgerArtifacts(dbClient_default)({
@@ -2890,7 +2995,7 @@ var _ArtifactParser = class _ArtifactParser {
2890
2995
  return this.formatArtifactData(artifacts[0], artifactId, taskId);
2891
2996
  }
2892
2997
  } catch (error) {
2893
- logger6.warn({ artifactId, taskId, error }, "Failed to fetch artifact");
2998
+ logger7.warn({ artifactId, taskId, error }, "Failed to fetch artifact");
2894
2999
  }
2895
3000
  return null;
2896
3001
  }
@@ -2926,11 +3031,11 @@ var _ArtifactParser = class _ArtifactParser {
2926
3031
  __publicField(_ArtifactParser, "ARTIFACT_REGEX", /<artifact:ref\s+id="([^"]*?)"\s+task="([^"]*?)"\s*\/>/gs);
2927
3032
  __publicField(_ArtifactParser, "ARTIFACT_CHECK_REGEX", /<artifact:ref\s+(?=.*id="[^"]+")(?=.*task="[^"]+")[^>]*\/>/);
2928
3033
  // Regex for catching any partial artifact pattern (< + any prefix of "artifact:ref")
2929
- __publicField(_ArtifactParser, "INCOMPLETE_ARTIFACT_REGEX", /<(a(r(t(i(f(a(c(t(:(r(e(f?)?)?)?)?)?)?)?)?)?)?)?)?$/g);
3034
+ __publicField(_ArtifactParser, "INCOMPLETE_ARTIFACT_REGEX", /<(a(r(t(i(f(a(c(t(:?(r(e(f)?)?)?)?)?)?)?)?)?)?)?)?$/g);
2930
3035
  var ArtifactParser = _ArtifactParser;
2931
3036
 
2932
3037
  // src/utils/incremental-stream-parser.ts
2933
- var logger7 = agentsCore.getLogger("IncrementalStreamParser");
3038
+ var logger8 = agentsCore.getLogger("IncrementalStreamParser");
2934
3039
  var IncrementalStreamParser = class {
2935
3040
  constructor(streamHelper, tenantId, contextId) {
2936
3041
  __publicField(this, "buffer", "");
@@ -2990,13 +3095,19 @@ var IncrementalStreamParser = class {
2990
3095
  if (part.type === "tool-call-delta" && part.toolName === targetToolName) {
2991
3096
  const delta = part.argsTextDelta || "";
2992
3097
  if (jsonBuffer.length + delta.length > MAX_BUFFER_SIZE) {
2993
- logger7.warn({ bufferSize: jsonBuffer.length + delta.length, maxSize: MAX_BUFFER_SIZE }, "JSON buffer exceeded maximum size, truncating");
3098
+ logger8.warn(
3099
+ { bufferSize: jsonBuffer.length + delta.length, maxSize: MAX_BUFFER_SIZE },
3100
+ "JSON buffer exceeded maximum size, truncating"
3101
+ );
2994
3102
  jsonBuffer = jsonBuffer.slice(-MAX_BUFFER_SIZE / 2);
2995
3103
  }
2996
3104
  jsonBuffer += delta;
2997
3105
  for (const char of delta) {
2998
3106
  if (componentBuffer.length > MAX_BUFFER_SIZE) {
2999
- logger7.warn({ bufferSize: componentBuffer.length, maxSize: MAX_BUFFER_SIZE }, "Component buffer exceeded maximum size, resetting");
3107
+ logger8.warn(
3108
+ { bufferSize: componentBuffer.length, maxSize: MAX_BUFFER_SIZE },
3109
+ "Component buffer exceeded maximum size, resetting"
3110
+ );
3000
3111
  componentBuffer = "";
3001
3112
  depth = 0;
3002
3113
  continue;
@@ -3011,7 +3122,7 @@ var IncrementalStreamParser = class {
3011
3122
  if (componentMatch) {
3012
3123
  const MAX_COMPONENT_SIZE = 1024 * 1024;
3013
3124
  if (componentMatch[0].length > MAX_COMPONENT_SIZE) {
3014
- logger7.warn(
3125
+ logger8.warn(
3015
3126
  {
3016
3127
  size: componentMatch[0].length,
3017
3128
  maxSize: MAX_COMPONENT_SIZE
@@ -3024,7 +3135,7 @@ var IncrementalStreamParser = class {
3024
3135
  try {
3025
3136
  const component = JSON.parse(componentMatch[0]);
3026
3137
  if (typeof component !== "object" || !component.id) {
3027
- logger7.warn({ component }, "Invalid component structure, skipping");
3138
+ logger8.warn({ component }, "Invalid component structure, skipping");
3028
3139
  componentBuffer = "";
3029
3140
  continue;
3030
3141
  }
@@ -3037,7 +3148,7 @@ var IncrementalStreamParser = class {
3037
3148
  componentsStreamed++;
3038
3149
  componentBuffer = "";
3039
3150
  } catch (e) {
3040
- logger7.debug({ error: e }, "Failed to parse component, continuing to accumulate");
3151
+ logger8.debug({ error: e }, "Failed to parse component, continuing to accumulate");
3041
3152
  }
3042
3153
  }
3043
3154
  }
@@ -3054,7 +3165,7 @@ var IncrementalStreamParser = class {
3054
3165
  break;
3055
3166
  }
3056
3167
  }
3057
- logger7.debug({ componentsStreamed }, "Finished streaming components");
3168
+ logger8.debug({ componentsStreamed }, "Finished streaming components");
3058
3169
  }
3059
3170
  /**
3060
3171
  * Legacy method for backward compatibility - defaults to text processing
@@ -3198,7 +3309,7 @@ var IncrementalStreamParser = class {
3198
3309
  };
3199
3310
 
3200
3311
  // src/utils/response-formatter.ts
3201
- var logger8 = agentsCore.getLogger("ResponseFormatter");
3312
+ var logger9 = agentsCore.getLogger("ResponseFormatter");
3202
3313
  var ResponseFormatter = class {
3203
3314
  constructor(tenantId) {
3204
3315
  __publicField(this, "artifactParser");
@@ -3229,7 +3340,7 @@ var ResponseFormatter = class {
3229
3340
  return { parts };
3230
3341
  } catch (error) {
3231
3342
  agentsCore.setSpanWithError(span, error);
3232
- logger8.error({ error, responseObject }, "Error formatting object response");
3343
+ logger9.error({ error, responseObject }, "Error formatting object response");
3233
3344
  return {
3234
3345
  parts: [{ kind: "data", data: responseObject }]
3235
3346
  };
@@ -3280,7 +3391,7 @@ var ResponseFormatter = class {
3280
3391
  return { parts };
3281
3392
  } catch (error) {
3282
3393
  agentsCore.setSpanWithError(span, error);
3283
- logger8.error({ error, responseText }, "Error formatting response");
3394
+ logger9.error({ error, responseText }, "Error formatting response");
3284
3395
  return { text: responseText };
3285
3396
  } finally {
3286
3397
  span.end();
@@ -3325,7 +3436,7 @@ var ResponseFormatter = class {
3325
3436
  }
3326
3437
  }
3327
3438
  };
3328
- var logger9 = agentsCore.getLogger("ToolSessionManager");
3439
+ var logger10 = agentsCore.getLogger("ToolSessionManager");
3329
3440
  var _ToolSessionManager = class _ToolSessionManager {
3330
3441
  // 5 minutes
3331
3442
  constructor() {
@@ -3354,7 +3465,7 @@ var _ToolSessionManager = class _ToolSessionManager {
3354
3465
  createdAt: Date.now()
3355
3466
  };
3356
3467
  this.sessions.set(sessionId, session);
3357
- logger9.debug({ sessionId, tenantId, contextId, taskId }, "Created tool session");
3468
+ logger10.debug({ sessionId, tenantId, contextId, taskId }, "Created tool session");
3358
3469
  return sessionId;
3359
3470
  }
3360
3471
  /**
@@ -3363,7 +3474,7 @@ var _ToolSessionManager = class _ToolSessionManager {
3363
3474
  recordToolResult(sessionId, toolResult) {
3364
3475
  const session = this.sessions.get(sessionId);
3365
3476
  if (!session) {
3366
- logger9.warn(
3477
+ logger10.warn(
3367
3478
  { sessionId, toolCallId: toolResult.toolCallId },
3368
3479
  "Tool result recorded for unknown session"
3369
3480
  );
@@ -3377,12 +3488,12 @@ var _ToolSessionManager = class _ToolSessionManager {
3377
3488
  getToolResult(sessionId, toolCallId) {
3378
3489
  const session = this.sessions.get(sessionId);
3379
3490
  if (!session) {
3380
- logger9.warn({ sessionId, toolCallId }, "Requested tool result for unknown session");
3491
+ logger10.warn({ sessionId, toolCallId }, "Requested tool result for unknown session");
3381
3492
  return void 0;
3382
3493
  }
3383
3494
  const result = session.toolResults.get(toolCallId);
3384
3495
  if (!result) {
3385
- logger9.warn(
3496
+ logger10.warn(
3386
3497
  {
3387
3498
  sessionId,
3388
3499
  toolCallId,
@@ -3421,10 +3532,10 @@ var _ToolSessionManager = class _ToolSessionManager {
3421
3532
  }
3422
3533
  for (const sessionId of expiredSessions) {
3423
3534
  this.sessions.delete(sessionId);
3424
- logger9.debug({ sessionId }, "Cleaned up expired tool session");
3535
+ logger10.debug({ sessionId }, "Cleaned up expired tool session");
3425
3536
  }
3426
3537
  if (expiredSessions.length > 0) {
3427
- logger9.info({ expiredCount: expiredSessions.length }, "Cleaned up expired tool sessions");
3538
+ logger10.info({ expiredCount: expiredSessions.length }, "Cleaned up expired tool sessions");
3428
3539
  }
3429
3540
  }
3430
3541
  };
@@ -3433,7 +3544,7 @@ var ToolSessionManager = _ToolSessionManager;
3433
3544
  var toolSessionManager = ToolSessionManager.getInstance();
3434
3545
 
3435
3546
  // src/agents/artifactTools.ts
3436
- var logger10 = agentsCore.getLogger("artifactTools");
3547
+ var logger11 = agentsCore.getLogger("artifactTools");
3437
3548
  function buildKeyNestingMap(data, prefix = "", map = /* @__PURE__ */ new Map()) {
3438
3549
  if (typeof data === "object" && data !== null) {
3439
3550
  if (Array.isArray(data)) {
@@ -3536,7 +3647,7 @@ function createPropSelectorsSchema(artifactComponents) {
3536
3647
  Object.entries(summaryProps.properties).forEach(([propName, propDef]) => {
3537
3648
  const propDescription = propDef?.description || propDef?.title || `${propName} property`;
3538
3649
  propSchema[propName] = z5__default.default.string().describe(
3539
- `JMESPath selector for ${propName} (${propDescription}) - summary version, relative to base selector`
3650
+ `JMESPath selector for ${propName} (${propDescription}) - summary version, MUST be relative to your baseSelector target level. Access fields WITHIN the items your baseSelector returns.`
3540
3651
  );
3541
3652
  });
3542
3653
  }
@@ -3548,7 +3659,7 @@ function createPropSelectorsSchema(artifactComponents) {
3548
3659
  if (!propSchema[propName]) {
3549
3660
  const propDescription = propDef?.description || propDef?.title || `${propName} property`;
3550
3661
  propSchema[propName] = z5__default.default.string().describe(
3551
- `JMESPath selector for ${propName} (${propDescription}) - full version, relative to base selector`
3662
+ `JMESPath selector for ${propName} (${propDescription}) - MUST be relative to your baseSelector target level. If baseSelector stops at a document, this accesses fields WITHIN that document. Examples: "title", "content.body", "metadata.author"`
3552
3663
  );
3553
3664
  }
3554
3665
  });
@@ -3562,7 +3673,26 @@ function createPropSelectorsSchema(artifactComponents) {
3562
3673
  return z5__default.default.union(propSelectorSchemas);
3563
3674
  }
3564
3675
  return z5__default.default.record(z5__default.default.string(), z5__default.default.string()).describe(
3565
- "Prop selectors mapping schema properties to JMESPath expressions relative to base selector"
3676
+ `Prop selectors mapping schema properties to JMESPath expressions relative to base selector. Each path is relative to the item(s) your baseSelector returns.
3677
+
3678
+ \u{1F3AF} CRITICAL: PropSelectors work ONLY on the data your baseSelector returns!
3679
+ If baseSelector = "result.docs[0]" \u2192 propSelectors access fields INSIDE that doc
3680
+ If baseSelector = "result.docs[0].content[0]" \u2192 propSelectors access fields INSIDE that content item
3681
+
3682
+ \u2705 CORRECT EXAMPLES (paths relative to baseSelector target):
3683
+ \u2022 baseSelector: "result.documents[?type=='article']" \u2192 propSelectors: {"title": "title", "url": "url"}
3684
+ \u2022 baseSelector: "result.content[0].text" \u2192 propSelectors: {"content": "content[0].text", "source": "content[0].source"}
3685
+ \u2022 baseSelector: "result.items" \u2192 propSelectors: {"name": "profile.name", "email": "contact.email"}
3686
+
3687
+ \u274C WRONG EXAMPLES (accessing data not at baseSelector level):
3688
+ \u2022 baseSelector: "result.docs[0].content[0]" \u2192 propSelectors: {"title": "title"} \u2190 title is at doc level, not content level!
3689
+ \u2022 baseSelector: "result.source.content" \u2192 propSelectors: {"title": "content[4].text"} \u2190 baseSelector ends at array, can't index into it!
3690
+ \u2022 baseSelector: "result.items" \u2192 propSelectors: {"title": "documents[0].title"} \u2190 going deeper when baseSelector should handle depth
3691
+
3692
+ \u274C NEVER USE LITERAL VALUES:
3693
+ {"title": "Robert Tran", "url": "https://linkedin.com/..."}
3694
+
3695
+ \u{1F4A1} TIP: Match your baseSelector depth to where the properties you need actually exist!`
3566
3696
  );
3567
3697
  }
3568
3698
  function createInputSchema(artifactComponents) {
@@ -3571,7 +3701,18 @@ function createInputSchema(artifactComponents) {
3571
3701
  "EXACT toolCallId from a previous tool execution - copy it exactly from the tool call result. NEVER invent or make up tool call IDs."
3572
3702
  ),
3573
3703
  baseSelector: z5__default.default.string().describe(
3574
- `JMESPath selector to get to the main data array/object. ALWAYS start with "result." Example: "result.content[?type=='text']"`
3704
+ `JMESPath selector to get to the main data array/object. ALWAYS start with "result." That is a mandatory prefix.
3705
+
3706
+ Data structures are COMPLEX and NESTED. Examples:
3707
+ \u2022 "result.content[0].text.content[2]" - parsed JSON in text field
3708
+ \u2022 "result.structuredContent.content[1]" - direct structured data
3709
+ \u2022 "result.data.items[?type=='doc']" - filtered array
3710
+
3711
+ \u{1F6A8} CRITICAL: If you need data from array[4], your baseSelector must END at array[4], NOT at the array itself!
3712
+ \u2705 CORRECT: "result.source.content[4]" \u2192 propSelectors can access fields in that item
3713
+ \u274C WRONG: "result.source.content" \u2192 propSelectors can't use content[4] because baseSelector already selected the array
3714
+
3715
+ \u{1F525} IF YOUR PATH FAILS: READ THE ERROR MESSAGE! It tells you the correct path! \u{1F525}`
3575
3716
  ),
3576
3717
  propSelectors: createPropSelectorsSchema(artifactComponents)
3577
3718
  });
@@ -3590,6 +3731,9 @@ function createSaveToolResultTool(sessionId, streamRequestId, agentId, artifactC
3590
3731
  return ai.tool({
3591
3732
  description: `Save tool results as structured artifacts. Each artifact should represent ONE SPECIFIC, IMPORTANT, and UNIQUE document or data item.
3592
3733
 
3734
+ \u26A1 CRITICAL: JSON-like text content in tool results is AUTOMATICALLY PARSED into proper JSON objects - treat all data as structured, not text strings.
3735
+ \u{1F6A8} CRITICAL: Data structures are deeply nested. When your path fails, READ THE ERROR MESSAGE - it shows the correct path!
3736
+
3593
3737
  AVAILABLE ARTIFACT TYPES:
3594
3738
  ${availableTypesWithDescriptions}
3595
3739
 
@@ -3601,26 +3745,6 @@ Each artifact you save becomes a SEPARATE DATA COMPONENT in the structured respo
3601
3745
  \u2705 UNIQUE with distinct value from other artifacts
3602
3746
  \u2705 RENDERED AS INDIVIDUAL DATA COMPONENT in the UI
3603
3747
 
3604
- \u274C DO NOT save multiple different items in one artifact unless they are EXTREMELY SIMILAR
3605
- \u274C DO NOT batch unrelated items together - each item becomes its own data component
3606
- \u274C DO NOT save generic collections - break them into individual data components
3607
-
3608
- \u{1F3AF} STRUCTURED DATA COMPONENT PRINCIPLE:
3609
- Each artifact save creates ONE data component that will be rendered separately in the UI. If you have 5 important items, save them as 5 separate artifacts to create 5 separate data components for better user experience.
3610
-
3611
- THINK: "What is the ONE most important piece of information here that deserves its own data component?"
3612
-
3613
- EXAMPLES OF GOOD INDIVIDUAL ARTIFACTS (SEPARATE DATA COMPONENTS):
3614
- - Nick Gomez's founder profile (specific person) \u2192 Individual data component
3615
- - The /users/create API endpoint documentation (specific endpoint) \u2192 Individual data component
3616
- - Error message for authentication failure (specific error type) \u2192 Individual data component
3617
- - Configuration for Redis caching (specific config topic) \u2192 Individual data component
3618
-
3619
- EXAMPLES OF BAD BATCHING:
3620
- \u274C "All team members" \u2192 Should be separate artifacts for each important member (separate data components)
3621
- \u274C "All API endpoints" \u2192 Should be separate artifacts for each distinct endpoint (separate data components)
3622
- \u274C "All error types" \u2192 Should be separate artifacts for each error category (separate data components)
3623
-
3624
3748
  USAGE PATTERN:
3625
3749
  1. baseSelector: Navigate through nested structures to target ONE SPECIFIC item
3626
3750
  - Navigate through all necessary levels: "result.data.items.nested[?condition]"
@@ -3630,9 +3754,11 @@ USAGE PATTERN:
3630
3754
  - NOT: "result.items[*]" (too broad, gets everything)
3631
3755
 
3632
3756
  2. propSelectors: Extract properties relative to your selected item
3633
- - Always relative to the single item that baseSelector returns
3634
- - Simple paths from that item: { prop1: "field_x", prop2: "nested.field_y", prop3: "deep.nested.field_z" }
3635
- - The tool handles array iteration - your selectors work on individual items
3757
+ - \u{1F3AF} CRITICAL: Always relative to the single item that baseSelector returns
3758
+ - If baseSelector ends at a document \u2192 propSelectors access document fields
3759
+ - If baseSelector ends at content[0] \u2192 propSelectors access content[0] fields
3760
+ - Simple paths from that exact level: { prop1: "field_x", prop2: "nested.field_y" }
3761
+ - \u274C DON'T try to go back up or deeper - adjust your baseSelector instead!
3636
3762
 
3637
3763
  3. Result: ONE artifact representing ONE important, unique item \u2192 ONE data component
3638
3764
 
@@ -3641,20 +3767,12 @@ USAGE PATTERN:
3641
3767
  - Focus on getting to the right level with baseSelector, then keep propSelectors simple
3642
3768
  - Test your baseSelector: Does it return exactly the items you want?
3643
3769
 
3644
- \u26A0\uFE0F STRICT SELECTIVITY RULES FOR DATA COMPONENTS:
3645
- - ALWAYS ask: "Is this ONE specific, important thing that deserves its own data component?"
3646
- - If the answer is no, don't save it or find a more specific selector
3647
- - Multiple similar items = Multiple separate artifact saves (use the tool multiple times) \u2192 Multiple data components
3648
- - Each artifact should be independently valuable and uniquely identifiable \u2192 Each data component stands alone
3649
- - BETTER to save 3 individual, specific artifacts (3 data components) than 1 generic collection (1 data component)
3650
-
3651
- \u{1F504} MULTIPLE ARTIFACTS = MULTIPLE DATA COMPONENTS:
3652
- Remember: Each time you call this tool, you create a separate data component. Call it multiple times for multiple items to create a rich, structured response with individual data components for each important piece of information.`,
3770
+ Please use Error Messages to Debug when there is an error in the tool call.`,
3653
3771
  inputSchema,
3654
3772
  execute: async ({ toolCallId, baseSelector, propSelectors, ...rest }, _context) => {
3655
3773
  const artifactType = "artifactType" in rest ? rest.artifactType : void 0;
3656
3774
  if (!sessionId) {
3657
- logger10.warn({ toolCallId }, "No session ID provided to save_tool_result");
3775
+ logger11.warn({ toolCallId }, "No session ID provided to save_tool_result");
3658
3776
  return {
3659
3777
  saved: false,
3660
3778
  error: `[toolCallId: ${toolCallId}] No session context available`,
@@ -3664,7 +3782,7 @@ Remember: Each time you call this tool, you create a separate data component. Ca
3664
3782
  }
3665
3783
  const toolResult = toolSessionManager.getToolResult(sessionId, toolCallId);
3666
3784
  if (!toolResult) {
3667
- logger10.warn({ toolCallId, sessionId }, "Tool result not found in session");
3785
+ logger11.warn({ toolCallId, sessionId }, "Tool result not found in session");
3668
3786
  return {
3669
3787
  saved: false,
3670
3788
  error: `[toolCallId: ${toolCallId}] Tool result not found`,
@@ -3677,7 +3795,7 @@ Remember: Each time you call this tool, you create a separate data component. Ca
3677
3795
  const baseData = jmespath__default.default.search(parsedResult, baseSelector);
3678
3796
  if (!baseData || Array.isArray(baseData) && baseData.length === 0) {
3679
3797
  const debugInfo = analyzeSelectorFailure(parsedResult, baseSelector);
3680
- logger10.warn(
3798
+ logger11.warn(
3681
3799
  {
3682
3800
  baseSelector,
3683
3801
  toolCallId,
@@ -3720,7 +3838,7 @@ Remember: Each time you call this tool, you create a separate data component. Ca
3720
3838
  const fallbackValue = item[propName];
3721
3839
  if (fallbackValue !== null && fallbackValue !== void 0) {
3722
3840
  extractedItem[propName] = fallbackValue;
3723
- logger10.info(
3841
+ logger11.info(
3724
3842
  { propName, propSelector, context },
3725
3843
  `PropSelector failed, used fallback direct property access`
3726
3844
  );
@@ -3732,7 +3850,7 @@ Remember: Each time you call this tool, you create a separate data component. Ca
3732
3850
  const fallbackValue = item[propName];
3733
3851
  if (fallbackValue !== null && fallbackValue !== void 0) {
3734
3852
  extractedItem[propName] = fallbackValue;
3735
- logger10.warn(
3853
+ logger11.warn(
3736
3854
  { propName, propSelector, context, error: error.message },
3737
3855
  `PropSelector syntax error, used fallback direct property access`
3738
3856
  );
@@ -3845,7 +3963,7 @@ Remember: Each time you call this tool, you create a separate data component. Ca
3845
3963
  warnings
3846
3964
  };
3847
3965
  } catch (error) {
3848
- logger10.error({ error, toolCallId, sessionId }, "Error processing save_tool_result");
3966
+ logger11.error({ error, toolCallId, sessionId }, "Error processing save_tool_result");
3849
3967
  return {
3850
3968
  saved: false,
3851
3969
  error: `[toolCallId: ${toolCallId}] ${error instanceof Error ? error.message : "Unknown error"}`,
@@ -3857,7 +3975,7 @@ Remember: Each time you call this tool, you create a separate data component. Ca
3857
3975
  }
3858
3976
 
3859
3977
  // src/a2a/client.ts
3860
- var logger11 = agentsCore.getLogger("a2aClient");
3978
+ var logger12 = agentsCore.getLogger("a2aClient");
3861
3979
  var DEFAULT_BACKOFF = {
3862
3980
  initialInterval: 500,
3863
3981
  maxInterval: 6e4,
@@ -4063,7 +4181,7 @@ var A2AClient = class {
4063
4181
  try {
4064
4182
  const res = await fn();
4065
4183
  if (attempt > 0) {
4066
- logger11.info(
4184
+ logger12.info(
4067
4185
  {
4068
4186
  attempts: attempt + 1,
4069
4187
  elapsedTime: Date.now() - start
@@ -4078,7 +4196,7 @@ var A2AClient = class {
4078
4196
  }
4079
4197
  const elapsed = Date.now() - start;
4080
4198
  if (elapsed > maxElapsedTime) {
4081
- logger11.warn(
4199
+ logger12.warn(
4082
4200
  {
4083
4201
  attempts: attempt + 1,
4084
4202
  elapsedTime: elapsed,
@@ -4099,7 +4217,7 @@ var A2AClient = class {
4099
4217
  retryInterval = initialInterval * attempt ** exponent + Math.random() * 1e3;
4100
4218
  }
4101
4219
  const delayMs = Math.min(retryInterval, maxInterval);
4102
- logger11.info(
4220
+ logger12.info(
4103
4221
  {
4104
4222
  attempt: attempt + 1,
4105
4223
  delayMs,
@@ -4184,7 +4302,7 @@ var A2AClient = class {
4184
4302
  }
4185
4303
  const rpcResponse = await httpResponse.json();
4186
4304
  if (rpcResponse.id !== requestId2) {
4187
- logger11.warn(
4305
+ logger12.warn(
4188
4306
  {
4189
4307
  method,
4190
4308
  expectedId: requestId2,
@@ -4383,7 +4501,7 @@ var A2AClient = class {
4383
4501
  try {
4384
4502
  while (true) {
4385
4503
  const { done, value } = await reader.read();
4386
- logger11.info({ done, value }, "parseA2ASseStream");
4504
+ logger12.info({ done, value }, "parseA2ASseStream");
4387
4505
  if (done) {
4388
4506
  if (eventDataBuffer.trim()) {
4389
4507
  const result = this._processSseEventData(
@@ -4472,7 +4590,7 @@ var A2AClient = class {
4472
4590
  // src/agents/relationTools.ts
4473
4591
  init_conversations();
4474
4592
  init_dbClient();
4475
- var logger12 = agentsCore.getLogger("relationships Tools");
4593
+ var logger13 = agentsCore.getLogger("relationships Tools");
4476
4594
  var generateTransferToolDescription = (config2) => {
4477
4595
  return `Hand off the conversation to agent ${config2.id}.
4478
4596
 
@@ -4510,7 +4628,7 @@ var createTransferToAgentTool = ({
4510
4628
  "transfer.to_agent_id": transferConfig.id ?? "unknown"
4511
4629
  });
4512
4630
  }
4513
- logger12.info(
4631
+ logger13.info(
4514
4632
  {
4515
4633
  transferTo: transferConfig.id ?? "unknown",
4516
4634
  fromAgent: callingAgentId
@@ -4658,7 +4776,7 @@ function createDelegateToAgentTool({
4658
4776
  ...isInternal ? { fromAgentId: callingAgentId } : { fromExternalAgentId: callingAgentId }
4659
4777
  }
4660
4778
  };
4661
- logger12.info({ messageToSend }, "messageToSend");
4779
+ logger13.info({ messageToSend }, "messageToSend");
4662
4780
  await agentsCore.createMessage(dbClient_default)({
4663
4781
  id: nanoid.nanoid(),
4664
4782
  tenantId,
@@ -4720,7 +4838,7 @@ function createDelegateToAgentTool({
4720
4838
  }
4721
4839
 
4722
4840
  // src/agents/SystemPromptBuilder.ts
4723
- var logger13 = agentsCore.getLogger("SystemPromptBuilder");
4841
+ var logger14 = agentsCore.getLogger("SystemPromptBuilder");
4724
4842
  var SystemPromptBuilder = class {
4725
4843
  constructor(version, versionConfig) {
4726
4844
  this.version = version;
@@ -4736,9 +4854,12 @@ var SystemPromptBuilder = class {
4736
4854
  this.templates.set(name, content);
4737
4855
  }
4738
4856
  this.loaded = true;
4739
- logger13.debug({ templateCount: this.templates.size, version: this.version }, `Loaded ${this.templates.size} templates for version ${this.version}`);
4857
+ logger14.debug(
4858
+ { templateCount: this.templates.size, version: this.version },
4859
+ `Loaded ${this.templates.size} templates for version ${this.version}`
4860
+ );
4740
4861
  } catch (error) {
4741
- logger13.error({ error }, `Failed to load templates for version ${this.version}`);
4862
+ logger14.error({ error }, `Failed to load templates for version ${this.version}`);
4742
4863
  throw new Error(`Template loading failed: ${error}`);
4743
4864
  }
4744
4865
  }
@@ -5140,7 +5261,7 @@ function hasToolCallWithPrefix(prefix) {
5140
5261
  return false;
5141
5262
  };
5142
5263
  }
5143
- var logger14 = agentsCore.getLogger("Agent");
5264
+ var logger15 = agentsCore.getLogger("Agent");
5144
5265
  var CONSTANTS = {
5145
5266
  MAX_GENERATION_STEPS: 12,
5146
5267
  PHASE_1_TIMEOUT_MS: 27e4,
@@ -5393,14 +5514,14 @@ var Agent = class {
5393
5514
  for (const toolSet of tools) {
5394
5515
  for (const [toolName, originalTool] of Object.entries(toolSet)) {
5395
5516
  if (!isValidTool(originalTool)) {
5396
- logger14.error({ toolName }, "Invalid MCP tool structure - missing required properties");
5517
+ logger15.error({ toolName }, "Invalid MCP tool structure - missing required properties");
5397
5518
  continue;
5398
5519
  }
5399
5520
  const sessionWrappedTool = ai.tool({
5400
5521
  description: originalTool.description,
5401
5522
  inputSchema: originalTool.inputSchema,
5402
5523
  execute: async (args, { toolCallId }) => {
5403
- logger14.debug({ toolName, toolCallId }, "MCP Tool Called");
5524
+ logger15.debug({ toolName, toolCallId }, "MCP Tool Called");
5404
5525
  try {
5405
5526
  const result = await originalTool.execute(args, { toolCallId });
5406
5527
  toolSessionManager.recordToolResult(sessionId, {
@@ -5412,7 +5533,7 @@ var Agent = class {
5412
5533
  });
5413
5534
  return { result, toolCallId };
5414
5535
  } catch (error) {
5415
- logger14.error({ toolName, toolCallId, error }, "MCP tool execution failed");
5536
+ logger15.error({ toolName, toolCallId, error }, "MCP tool execution failed");
5416
5537
  throw error;
5417
5538
  }
5418
5539
  }
@@ -5497,7 +5618,7 @@ var Agent = class {
5497
5618
  selectedTools
5498
5619
  };
5499
5620
  }
5500
- logger14.info(
5621
+ logger15.info(
5501
5622
  {
5502
5623
  toolName: tool4.name,
5503
5624
  credentialReferenceId,
@@ -5537,7 +5658,7 @@ var Agent = class {
5537
5658
  async getResolvedContext(conversationId, requestContext) {
5538
5659
  try {
5539
5660
  if (!this.config.contextConfigId) {
5540
- logger14.debug({ graphId: this.config.graphId }, "No context config found for graph");
5661
+ logger15.debug({ graphId: this.config.graphId }, "No context config found for graph");
5541
5662
  return null;
5542
5663
  }
5543
5664
  const contextConfig = await agentsCore.getContextConfigById(dbClient_default)({
@@ -5545,7 +5666,7 @@ var Agent = class {
5545
5666
  id: this.config.contextConfigId
5546
5667
  });
5547
5668
  if (!contextConfig) {
5548
- logger14.warn({ contextConfigId: this.config.contextConfigId }, "Context config not found");
5669
+ logger15.warn({ contextConfigId: this.config.contextConfigId }, "Context config not found");
5549
5670
  return null;
5550
5671
  }
5551
5672
  if (!this.contextResolver) {
@@ -5562,7 +5683,7 @@ var Agent = class {
5562
5683
  $now: (/* @__PURE__ */ new Date()).toISOString(),
5563
5684
  $env: process.env
5564
5685
  };
5565
- logger14.debug(
5686
+ logger15.debug(
5566
5687
  {
5567
5688
  conversationId,
5568
5689
  contextConfigId: contextConfig.id,
@@ -5576,7 +5697,7 @@ var Agent = class {
5576
5697
  );
5577
5698
  return contextWithBuiltins;
5578
5699
  } catch (error) {
5579
- logger14.error(
5700
+ logger15.error(
5580
5701
  {
5581
5702
  conversationId,
5582
5703
  error: error instanceof Error ? error.message : "Unknown error"
@@ -5600,7 +5721,7 @@ var Agent = class {
5600
5721
  });
5601
5722
  return graphDefinition?.graphPrompt || void 0;
5602
5723
  } catch (error) {
5603
- logger14.warn(
5724
+ logger15.warn(
5604
5725
  {
5605
5726
  graphId: this.config.graphId,
5606
5727
  error: error instanceof Error ? error.message : "Unknown error"
@@ -5627,7 +5748,7 @@ var Agent = class {
5627
5748
  }
5628
5749
  return !!(graphDefinition.artifactComponents && Object.keys(graphDefinition.artifactComponents).length > 0);
5629
5750
  } catch (error) {
5630
- logger14.warn(
5751
+ logger15.warn(
5631
5752
  {
5632
5753
  graphId: this.config.graphId,
5633
5754
  tenantId: this.config.tenantId,
@@ -5687,7 +5808,7 @@ Key requirements:
5687
5808
  preserveUnresolved: false
5688
5809
  });
5689
5810
  } catch (error) {
5690
- logger14.error(
5811
+ logger15.error(
5691
5812
  {
5692
5813
  conversationId,
5693
5814
  error: error instanceof Error ? error.message : "Unknown error"
@@ -5732,7 +5853,7 @@ Key requirements:
5732
5853
  preserveUnresolved: false
5733
5854
  });
5734
5855
  } catch (error) {
5735
- logger14.error(
5856
+ logger15.error(
5736
5857
  {
5737
5858
  conversationId,
5738
5859
  error: error instanceof Error ? error.message : "Unknown error"
@@ -5760,7 +5881,7 @@ Key requirements:
5760
5881
  artifactId: z5.z.string().describe("The unique identifier of the artifact to get.")
5761
5882
  }),
5762
5883
  execute: async ({ artifactId }) => {
5763
- logger14.info({ artifactId }, "get_artifact executed");
5884
+ logger15.info({ artifactId }, "get_artifact executed");
5764
5885
  const artifact = await agentsCore.getLedgerArtifacts(dbClient_default)({
5765
5886
  scopes: {
5766
5887
  tenantId: this.config.tenantId,
@@ -5827,7 +5948,7 @@ Key requirements:
5827
5948
  graphId: this.config.graphId
5828
5949
  });
5829
5950
  } catch (error) {
5830
- logger14.error(
5951
+ logger15.error(
5831
5952
  { error, graphId: this.config.graphId },
5832
5953
  "Failed to check graph artifact components"
5833
5954
  );
@@ -5931,7 +6052,7 @@ Key requirements:
5931
6052
  const configuredTimeout = modelSettings.maxDuration ? Math.min(modelSettings.maxDuration * 1e3, MAX_ALLOWED_TIMEOUT_MS) : shouldStreamPhase1 ? CONSTANTS.PHASE_1_TIMEOUT_MS : CONSTANTS.NON_STREAMING_PHASE_1_TIMEOUT_MS;
5932
6053
  const timeoutMs = Math.min(configuredTimeout, MAX_ALLOWED_TIMEOUT_MS);
5933
6054
  if (modelSettings.maxDuration && modelSettings.maxDuration * 1e3 > MAX_ALLOWED_TIMEOUT_MS) {
5934
- logger14.warn(
6055
+ logger15.warn(
5935
6056
  {
5936
6057
  requestedTimeout: modelSettings.maxDuration * 1e3,
5937
6058
  appliedTimeout: timeoutMs,
@@ -5973,7 +6094,7 @@ Key requirements:
5973
6094
  }
5974
6095
  );
5975
6096
  } catch (error) {
5976
- logger14.debug({ error }, "Failed to track agent reasoning");
6097
+ logger15.debug({ error }, "Failed to track agent reasoning");
5977
6098
  }
5978
6099
  }
5979
6100
  if (last && "toolCalls" in last && last.toolCalls) {
@@ -6056,7 +6177,7 @@ Key requirements:
6056
6177
  }
6057
6178
  );
6058
6179
  } catch (error) {
6059
- logger14.debug({ error }, "Failed to track agent reasoning");
6180
+ logger15.debug({ error }, "Failed to track agent reasoning");
6060
6181
  }
6061
6182
  }
6062
6183
  if (last && "toolCalls" in last && last.toolCalls) {
@@ -6101,7 +6222,7 @@ Key requirements:
6101
6222
  return;
6102
6223
  }
6103
6224
  if (toolName === "save_artifact_tool" || toolName === "save_tool_result") {
6104
- logger14.info({ result }, "save_artifact_tool or save_tool_result");
6225
+ logger15.info({ result }, "save_artifact_tool or save_tool_result");
6105
6226
  if (result.output.artifacts) {
6106
6227
  for (const artifact of result.output.artifacts) {
6107
6228
  const artifactId = artifact?.artifactId || "N/A";
@@ -6176,7 +6297,7 @@ ${output}`;
6176
6297
  { role: "user", content: userMessage },
6177
6298
  ...reasoningFlow,
6178
6299
  {
6179
- role: "system",
6300
+ role: "user",
6180
6301
  content: await this.buildPhase2SystemPrompt()
6181
6302
  }
6182
6303
  ],
@@ -6275,7 +6396,9 @@ async function resolveModelConfig(graphId, agent) {
6275
6396
  summarizer: agent.models?.summarizer || project.models.summarizer || project.models.base
6276
6397
  };
6277
6398
  }
6278
- throw new Error("Base model configuration is required. Please configure models at the project level.");
6399
+ throw new Error(
6400
+ "Base model configuration is required. Please configure models at the project level."
6401
+ );
6279
6402
  }
6280
6403
 
6281
6404
  // src/agents/generateTaskHandler.ts
@@ -6289,7 +6412,7 @@ function parseEmbeddedJson(data) {
6289
6412
  }
6290
6413
  });
6291
6414
  }
6292
- var logger15 = agentsCore.getLogger("generateTaskHandler");
6415
+ var logger16 = agentsCore.getLogger("generateTaskHandler");
6293
6416
  var createTaskHandler = (config2, credentialStoreRegistry) => {
6294
6417
  return async (task) => {
6295
6418
  try {
@@ -6339,7 +6462,33 @@ var createTaskHandler = (config2, credentialStoreRegistry) => {
6339
6462
  agentId: config2.agentId
6340
6463
  })
6341
6464
  ]);
6342
- logger15.info({ toolsForAgent, internalRelations, externalRelations }, "agent stuff");
6465
+ logger16.info({ toolsForAgent, internalRelations, externalRelations }, "agent stuff");
6466
+ const enhancedInternalRelations = await Promise.all(
6467
+ internalRelations.map(async (relation) => {
6468
+ try {
6469
+ const relatedAgent = await agentsCore.getAgentById(dbClient_default)({
6470
+ scopes: { tenantId: config2.tenantId, projectId: config2.projectId },
6471
+ agentId: relation.id
6472
+ });
6473
+ if (relatedAgent) {
6474
+ const relatedAgentRelations = await agentsCore.getRelatedAgentsForGraph(dbClient_default)({
6475
+ scopes: { tenantId: config2.tenantId, projectId: config2.projectId },
6476
+ graphId: config2.graphId,
6477
+ agentId: relation.id
6478
+ });
6479
+ const enhancedDescription = generateDescriptionWithTransfers(
6480
+ relation.description || "",
6481
+ relatedAgentRelations.internalRelations,
6482
+ relatedAgentRelations.externalRelations
6483
+ );
6484
+ return { ...relation, description: enhancedDescription };
6485
+ }
6486
+ } catch (error) {
6487
+ logger16.warn({ agentId: relation.id, error }, "Failed to enhance agent description");
6488
+ }
6489
+ return relation;
6490
+ })
6491
+ );
6343
6492
  const agentPrompt = "prompt" in config2.agentSchema ? config2.agentSchema.prompt : "";
6344
6493
  const models = "models" in config2.agentSchema ? config2.agentSchema.models : void 0;
6345
6494
  const stopWhen = "stopWhen" in config2.agentSchema ? config2.agentSchema.stopWhen : void 0;
@@ -6356,7 +6505,7 @@ var createTaskHandler = (config2, credentialStoreRegistry) => {
6356
6505
  agentPrompt,
6357
6506
  models: models || void 0,
6358
6507
  stopWhen: stopWhen || void 0,
6359
- agentRelations: internalRelations.map((relation) => ({
6508
+ agentRelations: enhancedInternalRelations.map((relation) => ({
6360
6509
  id: relation.id,
6361
6510
  tenantId: config2.tenantId,
6362
6511
  projectId: config2.projectId,
@@ -6370,7 +6519,7 @@ var createTaskHandler = (config2, credentialStoreRegistry) => {
6370
6519
  agentRelations: [],
6371
6520
  transferRelations: []
6372
6521
  })),
6373
- transferRelations: internalRelations.filter((relation) => relation.relationType === "transfer").map((relation) => ({
6522
+ transferRelations: enhancedInternalRelations.filter((relation) => relation.relationType === "transfer").map((relation) => ({
6374
6523
  baseUrl: config2.baseUrl,
6375
6524
  apiKey: config2.apiKey,
6376
6525
  id: relation.id,
@@ -6386,7 +6535,7 @@ var createTaskHandler = (config2, credentialStoreRegistry) => {
6386
6535
  })),
6387
6536
  delegateRelations: [
6388
6537
  // Internal delegate relations
6389
- ...internalRelations.filter((relation) => relation.relationType === "delegate").map((relation) => ({
6538
+ ...enhancedInternalRelations.filter((relation) => relation.relationType === "delegate").map((relation) => ({
6390
6539
  type: "internal",
6391
6540
  config: {
6392
6541
  id: relation.id,
@@ -6439,7 +6588,7 @@ var createTaskHandler = (config2, credentialStoreRegistry) => {
6439
6588
  const taskIdMatch = task.id.match(/^task_([^-]+-[^-]+-\d+)-/);
6440
6589
  if (taskIdMatch) {
6441
6590
  contextId = taskIdMatch[1];
6442
- logger15.info(
6591
+ logger16.info(
6443
6592
  {
6444
6593
  taskId: task.id,
6445
6594
  extractedContextId: contextId,
@@ -6455,7 +6604,7 @@ var createTaskHandler = (config2, credentialStoreRegistry) => {
6455
6604
  const isDelegation = task.context?.metadata?.isDelegation === true;
6456
6605
  agent.setDelegationStatus(isDelegation);
6457
6606
  if (isDelegation) {
6458
- logger15.info(
6607
+ logger16.info(
6459
6608
  { agentId: config2.agentId, taskId: task.id },
6460
6609
  "Delegated agent - streaming disabled"
6461
6610
  );
@@ -6661,86 +6810,11 @@ async function getRegisteredGraph(executionContext) {
6661
6810
  const agentFrameworkBaseUrl = `${baseUrl}/agents`;
6662
6811
  return hydrateGraph({ dbGraph, baseUrl: agentFrameworkBaseUrl, apiKey });
6663
6812
  }
6664
- init_dbClient();
6665
- agentsCore.getLogger("agents");
6666
- async function hydrateAgent({
6667
- dbAgent,
6668
- graphId,
6669
- baseUrl,
6670
- apiKey,
6671
- credentialStoreRegistry
6672
- }) {
6673
- try {
6674
- const taskHandlerConfig = await createTaskHandlerConfig({
6675
- tenantId: dbAgent.tenantId,
6676
- projectId: dbAgent.projectId,
6677
- graphId,
6678
- agentId: dbAgent.id,
6679
- baseUrl,
6680
- apiKey
6681
- });
6682
- const taskHandler = createTaskHandler(taskHandlerConfig, credentialStoreRegistry);
6683
- const agentCard = {
6684
- name: dbAgent.name,
6685
- description: dbAgent.description || "AI Agent",
6686
- url: baseUrl ? `${baseUrl}/a2a` : "",
6687
- version: "1.0.0",
6688
- capabilities: {
6689
- streaming: true,
6690
- // Enable streaming for A2A compliance
6691
- pushNotifications: false,
6692
- stateTransitionHistory: false
6693
- },
6694
- defaultInputModes: ["text", "text/plain"],
6695
- defaultOutputModes: ["text", "text/plain"],
6696
- skills: [],
6697
- // Add provider info if available
6698
- ...baseUrl && {
6699
- provider: {
6700
- organization: "Inkeep",
6701
- url: baseUrl
6702
- }
6703
- }
6704
- };
6705
- return {
6706
- agentId: dbAgent.id,
6707
- tenantId: dbAgent.tenantId,
6708
- projectId: dbAgent.projectId,
6709
- graphId,
6710
- agentCard,
6711
- taskHandler
6712
- };
6713
- } catch (error) {
6714
- console.error(`\u274C Failed to hydrate agent ${dbAgent.id}:`, error);
6715
- throw error;
6716
- }
6717
- }
6718
- async function getRegisteredAgent(executionContext, credentialStoreRegistry) {
6719
- const { tenantId, projectId, graphId, agentId, baseUrl, apiKey } = executionContext;
6720
- if (!agentId) {
6721
- throw new Error("Agent ID is required");
6722
- }
6723
- const dbAgent = await agentsCore.getAgentById(dbClient_default)({
6724
- scopes: { tenantId, projectId },
6725
- agentId
6726
- });
6727
- if (!dbAgent) {
6728
- return null;
6729
- }
6730
- const agentFrameworkBaseUrl = `${baseUrl}/agents`;
6731
- return hydrateAgent({
6732
- dbAgent,
6733
- graphId,
6734
- baseUrl: agentFrameworkBaseUrl,
6735
- credentialStoreRegistry,
6736
- apiKey
6737
- });
6738
- }
6739
6813
 
6740
6814
  // src/routes/agents.ts
6741
6815
  init_dbClient();
6742
6816
  var app = new zodOpenapi.OpenAPIHono();
6743
- var logger16 = agentsCore.getLogger("agents");
6817
+ var logger17 = agentsCore.getLogger("agents");
6744
6818
  app.openapi(
6745
6819
  zodOpenapi.createRoute({
6746
6820
  method: "get",
@@ -6778,7 +6852,7 @@ app.openapi(
6778
6852
  tracestate: c.req.header("tracestate"),
6779
6853
  baggage: c.req.header("baggage")
6780
6854
  };
6781
- logger16.info(
6855
+ logger17.info(
6782
6856
  {
6783
6857
  otelHeaders,
6784
6858
  path: c.req.path,
@@ -6789,7 +6863,7 @@ app.openapi(
6789
6863
  const executionContext = agentsCore.getRequestExecutionContext(c);
6790
6864
  const { tenantId, projectId, graphId, agentId } = executionContext;
6791
6865
  if (agentId) {
6792
- logger16.info(
6866
+ logger17.info(
6793
6867
  {
6794
6868
  message: "getRegisteredAgent (agent-level)",
6795
6869
  tenantId,
@@ -6801,13 +6875,13 @@ app.openapi(
6801
6875
  );
6802
6876
  const credentialStores = c.get("credentialStores");
6803
6877
  const agent = await getRegisteredAgent(executionContext, credentialStores);
6804
- logger16.info({ agent }, "agent registered: well-known agent.json");
6878
+ logger17.info({ agent }, "agent registered: well-known agent.json");
6805
6879
  if (!agent) {
6806
6880
  return c.json({ error: "Agent not found" }, 404);
6807
6881
  }
6808
6882
  return c.json(agent.agentCard);
6809
6883
  } else {
6810
- logger16.info(
6884
+ logger17.info(
6811
6885
  {
6812
6886
  message: "getRegisteredGraph (graph-level)",
6813
6887
  tenantId,
@@ -6830,7 +6904,7 @@ app.post("/a2a", async (c) => {
6830
6904
  tracestate: c.req.header("tracestate"),
6831
6905
  baggage: c.req.header("baggage")
6832
6906
  };
6833
- logger16.info(
6907
+ logger17.info(
6834
6908
  {
6835
6909
  otelHeaders,
6836
6910
  path: c.req.path,
@@ -6841,7 +6915,7 @@ app.post("/a2a", async (c) => {
6841
6915
  const executionContext = agentsCore.getRequestExecutionContext(c);
6842
6916
  const { tenantId, projectId, graphId, agentId } = executionContext;
6843
6917
  if (agentId) {
6844
- logger16.info(
6918
+ logger17.info(
6845
6919
  {
6846
6920
  message: "a2a (agent-level)",
6847
6921
  tenantId,
@@ -6865,7 +6939,7 @@ app.post("/a2a", async (c) => {
6865
6939
  }
6866
6940
  return a2aHandler(c, agent);
6867
6941
  } else {
6868
- logger16.info(
6942
+ logger17.info(
6869
6943
  {
6870
6944
  message: "a2a (graph-level)",
6871
6945
  tenantId,
@@ -6911,14 +6985,14 @@ init_dbClient();
6911
6985
 
6912
6986
  // src/a2a/transfer.ts
6913
6987
  init_dbClient();
6914
- var logger17 = agentsCore.getLogger("Transfer");
6988
+ var logger18 = agentsCore.getLogger("Transfer");
6915
6989
  async function executeTransfer({
6916
6990
  tenantId,
6917
6991
  threadId,
6918
6992
  projectId,
6919
6993
  targetAgentId
6920
6994
  }) {
6921
- logger17.info({ targetAgent: targetAgentId }, "Executing transfer to agent");
6995
+ logger18.info({ targetAgent: targetAgentId }, "Executing transfer to agent");
6922
6996
  await agentsCore.setActiveAgentForThread(dbClient_default)({
6923
6997
  scopes: { tenantId, projectId },
6924
6998
  threadId,
@@ -7113,7 +7187,7 @@ var _VercelDataStreamHelper = class _VercelDataStreamHelper {
7113
7187
  __publicField(this, "queuedOperations", []);
7114
7188
  // Timing tracking for text sequences (text-end to text-start gap)
7115
7189
  __publicField(this, "lastTextEndTimestamp", 0);
7116
- __publicField(this, "TEXT_GAP_THRESHOLD", 1e3);
7190
+ __publicField(this, "TEXT_GAP_THRESHOLD", 50);
7117
7191
  // milliseconds - if gap between text sequences is less than this, queue operations
7118
7192
  // Connection management and forced cleanup
7119
7193
  __publicField(this, "connectionDropTimer");
@@ -7462,7 +7536,7 @@ function createMCPStreamHelper() {
7462
7536
 
7463
7537
  // src/handlers/executionHandler.ts
7464
7538
  init_dbClient();
7465
- var logger18 = agentsCore.getLogger("ExecutionHandler");
7539
+ var logger19 = agentsCore.getLogger("ExecutionHandler");
7466
7540
  var ExecutionHandler = class {
7467
7541
  constructor() {
7468
7542
  // Hardcoded error limit - separate from configurable stopWhen
@@ -7487,7 +7561,7 @@ var ExecutionHandler = class {
7487
7561
  const { tenantId, projectId, graphId, apiKey, baseUrl } = executionContext;
7488
7562
  registerStreamHelper(requestId2, sseHelper);
7489
7563
  graphSessionManager.createSession(requestId2, graphId, tenantId, projectId);
7490
- logger18.info({ sessionId: requestId2, graphId }, "Created GraphSession for message execution");
7564
+ logger19.info({ sessionId: requestId2, graphId }, "Created GraphSession for message execution");
7491
7565
  let graphConfig = null;
7492
7566
  try {
7493
7567
  graphConfig = await agentsCore.getFullGraph(dbClient_default)({ scopes: { tenantId, projectId }, graphId });
@@ -7499,7 +7573,7 @@ var ExecutionHandler = class {
7499
7573
  );
7500
7574
  }
7501
7575
  } catch (error) {
7502
- logger18.error(
7576
+ logger19.error(
7503
7577
  {
7504
7578
  error: error instanceof Error ? error.message : "Unknown error",
7505
7579
  stack: error instanceof Error ? error.stack : void 0
@@ -7515,7 +7589,7 @@ var ExecutionHandler = class {
7515
7589
  try {
7516
7590
  await sseHelper.writeOperation(agentInitializingOp(requestId2, graphId));
7517
7591
  const taskId = `task_${conversationId}-${requestId2}`;
7518
- logger18.info(
7592
+ logger19.info(
7519
7593
  { taskId, currentAgentId, conversationId, requestId: requestId2 },
7520
7594
  "Attempting to create or reuse existing task"
7521
7595
  );
@@ -7538,7 +7612,7 @@ var ExecutionHandler = class {
7538
7612
  agent_id: currentAgentId
7539
7613
  }
7540
7614
  });
7541
- logger18.info(
7615
+ logger19.info(
7542
7616
  {
7543
7617
  taskId,
7544
7618
  createdTaskMetadata: Array.isArray(task) ? task[0]?.metadata : task?.metadata
@@ -7547,27 +7621,27 @@ var ExecutionHandler = class {
7547
7621
  );
7548
7622
  } catch (error) {
7549
7623
  if (error?.message?.includes("UNIQUE constraint failed") || error?.message?.includes("PRIMARY KEY constraint failed") || error?.code === "SQLITE_CONSTRAINT_PRIMARYKEY") {
7550
- logger18.info(
7624
+ logger19.info(
7551
7625
  { taskId, error: error.message },
7552
7626
  "Task already exists, fetching existing task"
7553
7627
  );
7554
7628
  const existingTask = await agentsCore.getTask(dbClient_default)({ id: taskId });
7555
7629
  if (existingTask) {
7556
7630
  task = existingTask;
7557
- logger18.info(
7631
+ logger19.info(
7558
7632
  { taskId, existingTask },
7559
7633
  "Successfully reused existing task from race condition"
7560
7634
  );
7561
7635
  } else {
7562
- logger18.error({ taskId, error }, "Task constraint failed but task not found");
7636
+ logger19.error({ taskId, error }, "Task constraint failed but task not found");
7563
7637
  throw error;
7564
7638
  }
7565
7639
  } else {
7566
- logger18.error({ taskId, error }, "Failed to create task due to non-constraint error");
7640
+ logger19.error({ taskId, error }, "Failed to create task due to non-constraint error");
7567
7641
  throw error;
7568
7642
  }
7569
7643
  }
7570
- logger18.debug(
7644
+ logger19.debug(
7571
7645
  {
7572
7646
  timestamp: (/* @__PURE__ */ new Date()).toISOString(),
7573
7647
  executionType: "create_initial_task",
@@ -7585,7 +7659,7 @@ var ExecutionHandler = class {
7585
7659
  const maxTransfers = graphConfig?.stopWhen?.transferCountIs ?? 10;
7586
7660
  while (iterations < maxTransfers) {
7587
7661
  iterations++;
7588
- logger18.info(
7662
+ logger19.info(
7589
7663
  { iterations, currentAgentId, graphId, conversationId, fromAgentId },
7590
7664
  `Execution loop iteration ${iterations} with agent ${currentAgentId}, transfer from: ${fromAgentId || "none"}`
7591
7665
  );
@@ -7593,10 +7667,10 @@ var ExecutionHandler = class {
7593
7667
  scopes: { tenantId, projectId },
7594
7668
  conversationId
7595
7669
  });
7596
- logger18.info({ activeAgent }, "activeAgent");
7670
+ logger19.info({ activeAgent }, "activeAgent");
7597
7671
  if (activeAgent && activeAgent.activeAgentId !== currentAgentId) {
7598
7672
  currentAgentId = activeAgent.activeAgentId;
7599
- logger18.info({ currentAgentId }, `Updated current agent to: ${currentAgentId}`);
7673
+ logger19.info({ currentAgentId }, `Updated current agent to: ${currentAgentId}`);
7600
7674
  }
7601
7675
  const agentBaseUrl = `${baseUrl}/agents`;
7602
7676
  const a2aClient = new A2AClient(agentBaseUrl, {
@@ -7637,13 +7711,13 @@ var ExecutionHandler = class {
7637
7711
  });
7638
7712
  if (!messageResponse?.result) {
7639
7713
  errorCount++;
7640
- logger18.error(
7714
+ logger19.error(
7641
7715
  { currentAgentId, iterations, errorCount },
7642
7716
  `No response from agent ${currentAgentId} on iteration ${iterations} (error ${errorCount}/${this.MAX_ERRORS})`
7643
7717
  );
7644
7718
  if (errorCount >= this.MAX_ERRORS) {
7645
7719
  const errorMessage2 = `Maximum error limit (${this.MAX_ERRORS}) reached`;
7646
- logger18.error({ maxErrors: this.MAX_ERRORS, errorCount }, errorMessage2);
7720
+ logger19.error({ maxErrors: this.MAX_ERRORS, errorCount }, errorMessage2);
7647
7721
  await sseHelper.writeError(errorMessage2);
7648
7722
  await sseHelper.writeOperation(errorOp(errorMessage2, currentAgentId || "system"));
7649
7723
  if (task) {
@@ -7669,7 +7743,7 @@ var ExecutionHandler = class {
7669
7743
  const transferResponse = messageResponse.result;
7670
7744
  const targetAgentId = transferResponse.artifacts?.[0]?.parts?.[0]?.data?.targetAgentId;
7671
7745
  const transferReason = transferResponse.artifacts?.[0]?.parts?.[1]?.text;
7672
- logger18.info({ targetAgentId, transferReason }, "transfer response");
7746
+ logger19.info({ targetAgentId, transferReason }, "transfer response");
7673
7747
  currentMessage = `<transfer_context> ${transferReason} </transfer_context>`;
7674
7748
  const { success, targetAgentId: newAgentId } = await executeTransfer({
7675
7749
  projectId,
@@ -7680,7 +7754,7 @@ var ExecutionHandler = class {
7680
7754
  if (success) {
7681
7755
  fromAgentId = currentAgentId;
7682
7756
  currentAgentId = newAgentId;
7683
- logger18.info(
7757
+ logger19.info(
7684
7758
  {
7685
7759
  transferFrom: fromAgentId,
7686
7760
  transferTo: currentAgentId,
@@ -7698,7 +7772,7 @@ var ExecutionHandler = class {
7698
7772
  const graphSessionData = graphSessionManager.getSession(requestId2);
7699
7773
  if (graphSessionData) {
7700
7774
  const sessionSummary = graphSessionData.getSummary();
7701
- logger18.info(sessionSummary, "GraphSession data after completion");
7775
+ logger19.info(sessionSummary, "GraphSession data after completion");
7702
7776
  }
7703
7777
  let textContent = "";
7704
7778
  for (const part of responseParts) {
@@ -7707,78 +7781,84 @@ var ExecutionHandler = class {
7707
7781
  textContent += part.text;
7708
7782
  }
7709
7783
  }
7710
- const activeSpan = api.trace.getActiveSpan();
7711
- if (activeSpan) {
7712
- activeSpan.setAttributes({
7713
- "ai.response.content": textContent || "No response content",
7714
- "ai.response.timestamp": (/* @__PURE__ */ new Date()).toISOString(),
7715
- "ai.agent.name": currentAgentId
7716
- });
7717
- }
7718
- await agentsCore.createMessage(dbClient_default)({
7719
- id: nanoid.nanoid(),
7720
- tenantId,
7721
- projectId,
7722
- conversationId,
7723
- role: "agent",
7724
- content: {
7725
- text: textContent || void 0,
7726
- parts: responseParts.map((part) => ({
7727
- type: part.kind === "text" ? "text" : "data",
7728
- text: part.kind === "text" ? part.text : void 0,
7729
- data: part.kind === "data" ? JSON.stringify(part.data) : void 0
7730
- }))
7731
- },
7732
- visibility: "user-facing",
7733
- messageType: "chat",
7734
- agentId: currentAgentId,
7735
- fromAgentId: currentAgentId,
7736
- taskId: task.id
7737
- });
7738
- const updateTaskStart = Date.now();
7739
- await agentsCore.updateTask(dbClient_default)({
7740
- taskId: task.id,
7741
- data: {
7742
- status: "completed",
7743
- metadata: {
7744
- ...task.metadata,
7745
- completed_at: (/* @__PURE__ */ new Date()).toISOString(),
7746
- response: {
7747
- text: textContent,
7748
- parts: responseParts,
7749
- hasText: !!textContent,
7750
- hasData: responseParts.some((p) => p.kind === "data")
7784
+ return tracer.startActiveSpan("execution_handler.execute", {}, async (span) => {
7785
+ try {
7786
+ span.setAttributes({
7787
+ "ai.response.content": textContent || "No response content",
7788
+ "ai.response.timestamp": (/* @__PURE__ */ new Date()).toISOString(),
7789
+ "ai.agent.name": currentAgentId
7790
+ });
7791
+ await agentsCore.createMessage(dbClient_default)({
7792
+ id: nanoid.nanoid(),
7793
+ tenantId,
7794
+ projectId,
7795
+ conversationId,
7796
+ role: "agent",
7797
+ content: {
7798
+ text: textContent || void 0,
7799
+ parts: responseParts.map((part) => ({
7800
+ type: part.kind === "text" ? "text" : "data",
7801
+ text: part.kind === "text" ? part.text : void 0,
7802
+ data: part.kind === "data" ? JSON.stringify(part.data) : void 0
7803
+ }))
7804
+ },
7805
+ visibility: "user-facing",
7806
+ messageType: "chat",
7807
+ agentId: currentAgentId,
7808
+ fromAgentId: currentAgentId,
7809
+ taskId: task.id
7810
+ });
7811
+ const updateTaskStart = Date.now();
7812
+ await agentsCore.updateTask(dbClient_default)({
7813
+ taskId: task.id,
7814
+ data: {
7815
+ status: "completed",
7816
+ metadata: {
7817
+ ...task.metadata,
7818
+ completed_at: (/* @__PURE__ */ new Date()).toISOString(),
7819
+ response: {
7820
+ text: textContent,
7821
+ parts: responseParts,
7822
+ hasText: !!textContent,
7823
+ hasData: responseParts.some((p) => p.kind === "data")
7824
+ }
7825
+ }
7751
7826
  }
7827
+ });
7828
+ const updateTaskEnd = Date.now();
7829
+ logger19.info(
7830
+ { duration: updateTaskEnd - updateTaskStart },
7831
+ "Completed updateTask operation"
7832
+ );
7833
+ await sseHelper.writeOperation(completionOp(currentAgentId, iterations));
7834
+ await sseHelper.complete();
7835
+ logger19.info({}, "Ending GraphSession and cleaning up");
7836
+ graphSessionManager.endSession(requestId2);
7837
+ logger19.info({}, "Cleaning up streamHelper");
7838
+ unregisterStreamHelper(requestId2);
7839
+ let response;
7840
+ if (sseHelper instanceof MCPStreamHelper) {
7841
+ const captured = sseHelper.getCapturedResponse();
7842
+ response = captured.text || "No response content";
7752
7843
  }
7844
+ logger19.info({}, "ExecutionHandler returning success");
7845
+ return { success: true, iterations, response };
7846
+ } catch (error) {
7847
+ agentsCore.setSpanWithError(span, error);
7848
+ throw error;
7849
+ } finally {
7850
+ span.end();
7753
7851
  }
7754
7852
  });
7755
- const updateTaskEnd = Date.now();
7756
- logger18.info(
7757
- { duration: updateTaskEnd - updateTaskStart },
7758
- "Completed updateTask operation"
7759
- );
7760
- await sseHelper.writeOperation(completionOp(currentAgentId, iterations));
7761
- await sseHelper.complete();
7762
- logger18.info({}, "Ending GraphSession and cleaning up");
7763
- graphSessionManager.endSession(requestId2);
7764
- logger18.info({}, "Cleaning up streamHelper");
7765
- unregisterStreamHelper(requestId2);
7766
- let response;
7767
- if (sseHelper instanceof MCPStreamHelper) {
7768
- const captured = sseHelper.getCapturedResponse();
7769
- response = captured.text || "No response content";
7770
- }
7771
- logger18.info({}, "ExecutionHandler returning success");
7772
- return { success: true, iterations, response };
7773
7853
  }
7774
7854
  errorCount++;
7775
- logger18.warn(
7855
+ logger19.warn(
7776
7856
  { iterations, errorCount },
7777
7857
  `No valid response or transfer on iteration ${iterations} (error ${errorCount}/${this.MAX_ERRORS})`
7778
7858
  );
7779
7859
  if (errorCount >= this.MAX_ERRORS) {
7780
7860
  const errorMessage2 = `Maximum error limit (${this.MAX_ERRORS}) reached`;
7781
- logger18.error({ maxErrors: this.MAX_ERRORS, errorCount }, errorMessage2);
7861
+ logger19.error({ maxErrors: this.MAX_ERRORS, errorCount }, errorMessage2);
7782
7862
  await sseHelper.writeError(errorMessage2);
7783
7863
  await sseHelper.writeOperation(errorOp(errorMessage2, currentAgentId || "system"));
7784
7864
  if (task) {
@@ -7800,7 +7880,7 @@ var ExecutionHandler = class {
7800
7880
  }
7801
7881
  }
7802
7882
  const errorMessage = `Maximum transfer limit (${maxTransfers}) reached without completion`;
7803
- logger18.error({ maxTransfers, iterations }, errorMessage);
7883
+ logger19.error({ maxTransfers, iterations }, errorMessage);
7804
7884
  await sseHelper.writeError(errorMessage);
7805
7885
  await sseHelper.writeOperation(errorOp(errorMessage, currentAgentId || "system"));
7806
7886
  if (task) {
@@ -7820,7 +7900,7 @@ var ExecutionHandler = class {
7820
7900
  unregisterStreamHelper(requestId2);
7821
7901
  return { success: false, error: errorMessage, iterations };
7822
7902
  } catch (error) {
7823
- logger18.error({ error }, "Error in execution handler");
7903
+ logger19.error({ error }, "Error in execution handler");
7824
7904
  const errorMessage = error instanceof Error ? error.message : "Unknown execution error";
7825
7905
  await sseHelper.writeError(`Execution error: ${errorMessage}`);
7826
7906
  await sseHelper.writeOperation(errorOp(errorMessage, currentAgentId || "system"));
@@ -7846,7 +7926,7 @@ var ExecutionHandler = class {
7846
7926
 
7847
7927
  // src/routes/chat.ts
7848
7928
  var app2 = new zodOpenapi.OpenAPIHono();
7849
- var logger19 = agentsCore.getLogger("completionsHandler");
7929
+ var logger20 = agentsCore.getLogger("completionsHandler");
7850
7930
  var chatCompletionsRoute = zodOpenapi.createRoute({
7851
7931
  method: "post",
7852
7932
  path: "/completions",
@@ -7964,7 +8044,7 @@ app2.openapi(chatCompletionsRoute, async (c) => {
7964
8044
  tracestate: c.req.header("tracestate"),
7965
8045
  baggage: c.req.header("baggage")
7966
8046
  };
7967
- logger19.info(
8047
+ logger20.info(
7968
8048
  {
7969
8049
  otelHeaders,
7970
8050
  path: c.req.path,
@@ -8050,7 +8130,7 @@ app2.openapi(chatCompletionsRoute, async (c) => {
8050
8130
  dbClient_default,
8051
8131
  credentialStores
8052
8132
  );
8053
- logger19.info(
8133
+ logger20.info(
8054
8134
  {
8055
8135
  tenantId,
8056
8136
  graphId,
@@ -8096,7 +8176,7 @@ app2.openapi(chatCompletionsRoute, async (c) => {
8096
8176
  return streaming.streamSSE(c, async (stream2) => {
8097
8177
  const sseHelper = createSSEStreamHelper(stream2, requestId2, timestamp);
8098
8178
  await sseHelper.writeRole();
8099
- logger19.info({ agentId }, "Starting execution");
8179
+ logger20.info({ agentId }, "Starting execution");
8100
8180
  const executionHandler = new ExecutionHandler();
8101
8181
  const result = await executionHandler.execute({
8102
8182
  executionContext,
@@ -8106,7 +8186,7 @@ app2.openapi(chatCompletionsRoute, async (c) => {
8106
8186
  requestId: requestId2,
8107
8187
  sseHelper
8108
8188
  });
8109
- logger19.info(
8189
+ logger20.info(
8110
8190
  { result },
8111
8191
  `Execution completed: ${result.success ? "success" : "failed"} after ${result.iterations} iterations`
8112
8192
  );
@@ -8142,7 +8222,7 @@ var chat_default = app2;
8142
8222
  // src/routes/chatDataStream.ts
8143
8223
  init_dbClient();
8144
8224
  var app3 = new zodOpenapi.OpenAPIHono();
8145
- var logger20 = agentsCore.getLogger("chatDataStream");
8225
+ var logger21 = agentsCore.getLogger("chatDataStream");
8146
8226
  var chatDataStreamRoute = zodOpenapi.createRoute({
8147
8227
  method: "post",
8148
8228
  path: "/chat",
@@ -8247,7 +8327,7 @@ app3.openapi(chatDataStreamRoute, async (c) => {
8247
8327
  );
8248
8328
  const lastUserMessage = body.messages.filter((m) => m.role === "user").slice(-1)[0];
8249
8329
  const userText = typeof lastUserMessage?.content === "string" ? lastUserMessage.content : lastUserMessage?.parts?.map((p) => p.text).join("") || "";
8250
- logger20.info({ userText, lastUserMessage }, "userText");
8330
+ logger21.info({ userText, lastUserMessage }, "userText");
8251
8331
  const messageSpan = api.trace.getActiveSpan();
8252
8332
  if (messageSpan) {
8253
8333
  messageSpan.setAttributes({
@@ -8289,7 +8369,7 @@ app3.openapi(chatDataStreamRoute, async (c) => {
8289
8369
  await streamHelper.writeError("Unable to process request");
8290
8370
  }
8291
8371
  } catch (err) {
8292
- logger20.error({ err }, "Streaming error");
8372
+ logger21.error({ err }, "Streaming error");
8293
8373
  await streamHelper.writeError("Internal server error");
8294
8374
  } finally {
8295
8375
  if ("cleanup" in streamHelper && typeof streamHelper.cleanup === "function") {
@@ -8310,7 +8390,7 @@ app3.openapi(chatDataStreamRoute, async (c) => {
8310
8390
  )
8311
8391
  );
8312
8392
  } catch (error) {
8313
- logger20.error({ error }, "chatDataStream error");
8393
+ logger21.error({ error }, "chatDataStream error");
8314
8394
  return c.json({ error: "Failed to process chat completion" }, 500);
8315
8395
  }
8316
8396
  });
@@ -8321,7 +8401,7 @@ init_dbClient();
8321
8401
  function createMCPSchema(schema) {
8322
8402
  return schema;
8323
8403
  }
8324
- var logger21 = agentsCore.getLogger("mcp");
8404
+ var logger22 = agentsCore.getLogger("mcp");
8325
8405
  var _MockResponseSingleton = class _MockResponseSingleton {
8326
8406
  constructor() {
8327
8407
  __publicField(this, "mockRes");
@@ -8376,21 +8456,21 @@ var createSpoofInitMessage = (mcpProtocolVersion) => ({
8376
8456
  id: 0
8377
8457
  });
8378
8458
  var spoofTransportInitialization = async (transport, req, sessionId, mcpProtocolVersion) => {
8379
- logger21.info({ sessionId }, "Spoofing initialization message to set transport state");
8459
+ logger22.info({ sessionId }, "Spoofing initialization message to set transport state");
8380
8460
  const spoofInitMessage = createSpoofInitMessage(mcpProtocolVersion);
8381
8461
  const mockRes = MockResponseSingleton.getInstance().getMockResponse();
8382
8462
  try {
8383
8463
  await transport.handleRequest(req, mockRes, spoofInitMessage);
8384
- logger21.info({ sessionId }, "Successfully spoofed initialization");
8464
+ logger22.info({ sessionId }, "Successfully spoofed initialization");
8385
8465
  } catch (spoofError) {
8386
- logger21.warn({ sessionId, error: spoofError }, "Spoof initialization failed, continuing anyway");
8466
+ logger22.warn({ sessionId, error: spoofError }, "Spoof initialization failed, continuing anyway");
8387
8467
  }
8388
8468
  };
8389
8469
  var validateSession = async (req, res, body, tenantId, projectId, graphId) => {
8390
8470
  const sessionId = req.headers["mcp-session-id"];
8391
- logger21.info({ sessionId }, "Received MCP session ID");
8471
+ logger22.info({ sessionId }, "Received MCP session ID");
8392
8472
  if (!sessionId) {
8393
- logger21.info({ body }, "Missing session ID");
8473
+ logger22.info({ body }, "Missing session ID");
8394
8474
  res.writeHead(400).end(
8395
8475
  JSON.stringify({
8396
8476
  jsonrpc: "2.0",
@@ -8416,7 +8496,7 @@ var validateSession = async (req, res, body, tenantId, projectId, graphId) => {
8416
8496
  scopes: { tenantId, projectId },
8417
8497
  conversationId: sessionId
8418
8498
  });
8419
- logger21.info(
8499
+ logger22.info(
8420
8500
  {
8421
8501
  sessionId,
8422
8502
  conversationFound: !!conversation,
@@ -8427,7 +8507,7 @@ var validateSession = async (req, res, body, tenantId, projectId, graphId) => {
8427
8507
  "Conversation lookup result"
8428
8508
  );
8429
8509
  if (!conversation || conversation.metadata?.sessionData?.sessionType !== "mcp" || conversation.metadata?.sessionData?.graphId !== graphId) {
8430
- logger21.info(
8510
+ logger22.info(
8431
8511
  { sessionId, conversationId: conversation?.id },
8432
8512
  "MCP session not found or invalid"
8433
8513
  );
@@ -8488,7 +8568,7 @@ var executeAgentQuery = async (executionContext, conversationId, query, defaultA
8488
8568
  requestId: requestId2,
8489
8569
  sseHelper: mcpStreamHelper
8490
8570
  });
8491
- logger21.info(
8571
+ logger22.info(
8492
8572
  { result },
8493
8573
  `Execution completed: ${result.success ? "success" : "failed"} after ${result.iterations} iterations`
8494
8574
  );
@@ -8562,7 +8642,7 @@ var getServer = async (requestContext, executionContext, conversationId, credent
8562
8642
  dbClient_default,
8563
8643
  credentialStores
8564
8644
  );
8565
- logger21.info(
8645
+ logger22.info(
8566
8646
  {
8567
8647
  tenantId,
8568
8648
  graphId,
@@ -8623,7 +8703,7 @@ var validateRequestParameters = (c) => {
8623
8703
  };
8624
8704
  var handleInitializationRequest = async (body, executionContext, validatedContext, req, res, c, credentialStores) => {
8625
8705
  const { tenantId, projectId, graphId } = executionContext;
8626
- logger21.info({ body }, "Received initialization request");
8706
+ logger22.info({ body }, "Received initialization request");
8627
8707
  const sessionId = nanoid.nanoid();
8628
8708
  const agentGraph = await agentsCore.getAgentGraphWithDefaultAgent(dbClient_default)({
8629
8709
  scopes: { tenantId, projectId },
@@ -8654,7 +8734,7 @@ var handleInitializationRequest = async (body, executionContext, validatedContex
8654
8734
  }
8655
8735
  }
8656
8736
  });
8657
- logger21.info(
8737
+ logger22.info(
8658
8738
  { sessionId, conversationId: conversation.id },
8659
8739
  "Created MCP session as conversation"
8660
8740
  );
@@ -8663,9 +8743,9 @@ var handleInitializationRequest = async (body, executionContext, validatedContex
8663
8743
  });
8664
8744
  const server = await getServer(validatedContext, executionContext, sessionId, credentialStores);
8665
8745
  await server.connect(transport);
8666
- logger21.info({ sessionId }, "Server connected for initialization");
8746
+ logger22.info({ sessionId }, "Server connected for initialization");
8667
8747
  res.setHeader("Mcp-Session-Id", sessionId);
8668
- logger21.info(
8748
+ logger22.info(
8669
8749
  {
8670
8750
  sessionId,
8671
8751
  bodyMethod: body?.method,
@@ -8674,7 +8754,7 @@ var handleInitializationRequest = async (body, executionContext, validatedContex
8674
8754
  "About to handle initialization request"
8675
8755
  );
8676
8756
  await transport.handleRequest(req, res, body);
8677
- logger21.info({ sessionId }, "Successfully handled initialization request");
8757
+ logger22.info({ sessionId }, "Successfully handled initialization request");
8678
8758
  return fetchToNode.toFetchResponse(res);
8679
8759
  };
8680
8760
  var handleExistingSessionRequest = async (body, executionContext, validatedContext, req, res, credentialStores) => {
@@ -8702,8 +8782,8 @@ var handleExistingSessionRequest = async (body, executionContext, validatedConte
8702
8782
  sessionId,
8703
8783
  conversation.metadata?.session_data?.mcpProtocolVersion
8704
8784
  );
8705
- logger21.info({ sessionId }, "Server connected and transport initialized");
8706
- logger21.info(
8785
+ logger22.info({ sessionId }, "Server connected and transport initialized");
8786
+ logger22.info(
8707
8787
  {
8708
8788
  sessionId,
8709
8789
  bodyKeys: Object.keys(body || {}),
@@ -8717,9 +8797,9 @@ var handleExistingSessionRequest = async (body, executionContext, validatedConte
8717
8797
  );
8718
8798
  try {
8719
8799
  await transport.handleRequest(req, res, body);
8720
- logger21.info({ sessionId }, "Successfully handled MCP request");
8800
+ logger22.info({ sessionId }, "Successfully handled MCP request");
8721
8801
  } catch (transportError) {
8722
- logger21.error(
8802
+ logger22.error(
8723
8803
  {
8724
8804
  sessionId,
8725
8805
  error: transportError,
@@ -8770,13 +8850,13 @@ app4.openapi(
8770
8850
  }
8771
8851
  const { executionContext } = paramValidation;
8772
8852
  const body = c.get("requestBody") || {};
8773
- logger21.info({ body, bodyKeys: Object.keys(body || {}) }, "Parsed request body");
8853
+ logger22.info({ body, bodyKeys: Object.keys(body || {}) }, "Parsed request body");
8774
8854
  const isInitRequest = body.method === "initialize";
8775
8855
  const { req, res } = fetchToNode.toReqRes(c.req.raw);
8776
8856
  const validatedContext = c.get("validatedContext") || {};
8777
8857
  const credentialStores = c.get("credentialStores");
8778
- logger21.info({ validatedContext }, "Validated context");
8779
- logger21.info({ req }, "request");
8858
+ logger22.info({ validatedContext }, "Validated context");
8859
+ logger22.info({ req }, "request");
8780
8860
  if (isInitRequest) {
8781
8861
  return await handleInitializationRequest(
8782
8862
  body,
@@ -8798,7 +8878,7 @@ app4.openapi(
8798
8878
  );
8799
8879
  }
8800
8880
  } catch (e) {
8801
- logger21.error(
8881
+ logger22.error(
8802
8882
  {
8803
8883
  error: e instanceof Error ? e.message : e,
8804
8884
  stack: e instanceof Error ? e.stack : void 0
@@ -8810,7 +8890,7 @@ app4.openapi(
8810
8890
  }
8811
8891
  );
8812
8892
  app4.get("/", async (c) => {
8813
- logger21.info({}, "Received GET MCP request");
8893
+ logger22.info({}, "Received GET MCP request");
8814
8894
  return c.json(
8815
8895
  {
8816
8896
  jsonrpc: "2.0",
@@ -8824,7 +8904,7 @@ app4.get("/", async (c) => {
8824
8904
  );
8825
8905
  });
8826
8906
  app4.delete("/", async (c) => {
8827
- logger21.info({}, "Received DELETE MCP request");
8907
+ logger22.info({}, "Received DELETE MCP request");
8828
8908
  return c.json(
8829
8909
  {
8830
8910
  jsonrpc: "2.0",
@@ -8835,7 +8915,7 @@ app4.delete("/", async (c) => {
8835
8915
  );
8836
8916
  });
8837
8917
  var mcp_default = app4;
8838
- var logger22 = agentsCore.getLogger("agents-run-api");
8918
+ var logger23 = agentsCore.getLogger("agents-run-api");
8839
8919
  function createExecutionHono(serverConfig, credentialStores) {
8840
8920
  const app6 = new zodOpenapi.OpenAPIHono();
8841
8921
  app6.use("*", otel.otel());
@@ -8851,7 +8931,7 @@ function createExecutionHono(serverConfig, credentialStores) {
8851
8931
  const body = await c.req.json();
8852
8932
  c.set("requestBody", body);
8853
8933
  } catch (error) {
8854
- logger22.debug({ error }, "Failed to parse JSON body, continuing without parsed body");
8934
+ logger23.debug({ error }, "Failed to parse JSON body, continuing without parsed body");
8855
8935
  }
8856
8936
  }
8857
8937
  return next();
@@ -8902,8 +8982,8 @@ function createExecutionHono(serverConfig, credentialStores) {
8902
8982
  if (!isExpectedError) {
8903
8983
  const errorMessage = err instanceof Error ? err.message : String(err);
8904
8984
  const errorStack = err instanceof Error ? err.stack : void 0;
8905
- if (logger22) {
8906
- logger22.error(
8985
+ if (logger23) {
8986
+ logger23.error(
8907
8987
  {
8908
8988
  error: err,
8909
8989
  message: errorMessage,
@@ -8915,8 +8995,8 @@ function createExecutionHono(serverConfig, credentialStores) {
8915
8995
  );
8916
8996
  }
8917
8997
  } else {
8918
- if (logger22) {
8919
- logger22.error(
8998
+ if (logger23) {
8999
+ logger23.error(
8920
9000
  {
8921
9001
  error: err,
8922
9002
  path: c.req.path,
@@ -8933,8 +9013,8 @@ function createExecutionHono(serverConfig, credentialStores) {
8933
9013
  const response = err.getResponse();
8934
9014
  return response;
8935
9015
  } catch (responseError) {
8936
- if (logger22) {
8937
- logger22.error({ error: responseError }, "Error while handling HTTPException response");
9016
+ if (logger23) {
9017
+ logger23.error({ error: responseError }, "Error while handling HTTPException response");
8938
9018
  }
8939
9019
  }
8940
9020
  }
@@ -8968,7 +9048,7 @@ function createExecutionHono(serverConfig, credentialStores) {
8968
9048
  app6.use("*", async (c, next) => {
8969
9049
  const executionContext = c.get("executionContext");
8970
9050
  if (!executionContext) {
8971
- logger22.debug({}, "Empty execution context");
9051
+ logger23.debug({}, "Empty execution context");
8972
9052
  return next();
8973
9053
  }
8974
9054
  const { tenantId, projectId, graphId } = executionContext;
@@ -8977,7 +9057,7 @@ function createExecutionHono(serverConfig, credentialStores) {
8977
9057
  if (requestBody) {
8978
9058
  conversationId = requestBody.conversationId;
8979
9059
  if (!conversationId) {
8980
- logger22.debug({ requestBody }, "No conversation ID found in request body");
9060
+ logger23.debug({ requestBody }, "No conversation ID found in request body");
8981
9061
  }
8982
9062
  }
8983
9063
  const entries = Object.fromEntries(
@@ -8992,7 +9072,7 @@ function createExecutionHono(serverConfig, credentialStores) {
8992
9072
  })
8993
9073
  );
8994
9074
  if (!Object.keys(entries).length) {
8995
- logger22.debug({}, "Empty entries for baggage");
9075
+ logger23.debug({}, "Empty entries for baggage");
8996
9076
  return next();
8997
9077
  }
8998
9078
  const bag = Object.entries(entries).reduce(