@iqai/adk 0.0.4 → 0.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -19,24 +19,60 @@ var __copyProps = (to, from, except, desc) => {
19
19
  };
20
20
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
21
21
 
22
+ // src/helpers/debug.ts
23
+ var isDebugEnabled, debugLog;
24
+ var init_debug = __esm({
25
+ "src/helpers/debug.ts"() {
26
+ isDebugEnabled = () => {
27
+ return process.env.NODE_ENV === "development" || process.env.DEBUG === "true";
28
+ };
29
+ debugLog = (message, ...args) => {
30
+ const time = (/* @__PURE__ */ new Date()).toLocaleTimeString();
31
+ if (isDebugEnabled()) {
32
+ console.log(`[DEBUG] ${time}: ${message}`, ...args);
33
+ }
34
+ };
35
+ }
36
+ });
37
+
22
38
  // src/tools/base/base-tool.ts
23
39
  var BaseTool;
24
40
  var init_base_tool = __esm({
25
41
  "src/tools/base/base-tool.ts"() {
26
- "use strict";
42
+ init_debug();
27
43
  BaseTool = class {
44
+ /**
45
+ * Name of the tool
46
+ */
47
+ name;
48
+ /**
49
+ * Description of the tool
50
+ */
51
+ description;
52
+ /**
53
+ * Whether the tool is a long running operation
54
+ */
55
+ isLongRunning;
56
+ /**
57
+ * Whether the tool execution should be retried on failure
58
+ */
59
+ shouldRetryOnFailure;
60
+ /**
61
+ * Maximum retry attempts
62
+ */
63
+ maxRetryAttempts;
64
+ /**
65
+ * Base delay for retry in ms (will be used with exponential backoff)
66
+ */
67
+ baseRetryDelay = 1e3;
68
+ /**
69
+ * Maximum delay for retry in ms
70
+ */
71
+ maxRetryDelay = 1e4;
28
72
  /**
29
73
  * Constructor for BaseTool
30
74
  */
31
75
  constructor(config) {
32
- /**
33
- * Base delay for retry in ms (will be used with exponential backoff)
34
- */
35
- this.baseRetryDelay = 1e3;
36
- /**
37
- * Maximum delay for retry in ms
38
- */
39
- this.maxRetryDelay = 1e4;
40
76
  this.name = config.name;
41
77
  this.description = config.description;
42
78
  this.isLongRunning = config.isLongRunning || false;
@@ -93,11 +129,9 @@ var init_base_tool = __esm({
93
129
  while (attempts <= (this.shouldRetryOnFailure ? this.maxRetryAttempts : 0)) {
94
130
  try {
95
131
  if (attempts > 0) {
96
- if (process.env.DEBUG === "true") {
97
- console.log(
98
- `Retrying tool ${this.name} (attempt ${attempts} of ${this.maxRetryAttempts})...`
99
- );
100
- }
132
+ debugLog(
133
+ `[BaseTool] Retrying tool ${this.name} (attempt ${attempts} of ${this.maxRetryAttempts})...`
134
+ );
101
135
  const delay = Math.min(
102
136
  this.baseRetryDelay * 2 ** (attempts - 1) + Math.random() * 1e3,
103
137
  this.maxRetryDelay
@@ -231,7 +265,6 @@ function extractJSDocParams(funcStr) {
231
265
  }
232
266
  var init_function_utils = __esm({
233
267
  "src/tools/function/function-utils.ts"() {
234
- "use strict";
235
268
  }
236
269
  });
237
270
 
@@ -243,10 +276,11 @@ __export(function_tool_exports, {
243
276
  var FunctionTool;
244
277
  var init_function_tool = __esm({
245
278
  "src/tools/function/function-tool.ts"() {
246
- "use strict";
247
279
  init_base_tool();
248
280
  init_function_utils();
249
281
  FunctionTool = class extends BaseTool {
282
+ func;
283
+ mandatoryArgs = [];
250
284
  /**
251
285
  * Creates a new FunctionTool wrapping the provided function.
252
286
  *
@@ -263,7 +297,6 @@ var init_function_tool = __esm({
263
297
  shouldRetryOnFailure: options?.shouldRetryOnFailure || false,
264
298
  maxRetryAttempts: options?.maxRetryAttempts || 3
265
299
  });
266
- this.mandatoryArgs = [];
267
300
  this.func = func;
268
301
  this.mandatoryArgs = this.getMandatoryArgs(func);
269
302
  }
@@ -359,6 +392,25 @@ __export(agents_exports, {
359
392
 
360
393
  // src/agents/base-agent.ts
361
394
  var BaseAgent = class {
395
+ /**
396
+ * The agent's name
397
+ * Agent name must be a unique identifier within the agent tree
398
+ */
399
+ name;
400
+ /**
401
+ * Description about the agent's capability
402
+ * The LLM uses this to determine whether to delegate control to the agent
403
+ */
404
+ description;
405
+ /**
406
+ * The parent agent of this agent
407
+ * Note that an agent can ONLY be added as sub-agent once
408
+ */
409
+ parentAgent;
410
+ /**
411
+ * The sub-agents of this agent
412
+ */
413
+ subAgents;
362
414
  /**
363
415
  * Constructs a new BaseAgent
364
416
  */
@@ -422,8 +474,16 @@ var BaseAgent = class {
422
474
  }
423
475
  };
424
476
 
477
+ // src/agents/llm-agent.ts
478
+ init_debug();
479
+
425
480
  // src/models/llm-registry.ts
426
- var _LLMRegistry = class _LLMRegistry {
481
+ init_debug();
482
+ var LLMRegistry = class _LLMRegistry {
483
+ /**
484
+ * Map of model name regex to LLM class
485
+ */
486
+ static llmRegistry = /* @__PURE__ */ new Map();
427
487
  /**
428
488
  * Creates a new LLM instance
429
489
  *
@@ -475,22 +535,23 @@ var _LLMRegistry = class _LLMRegistry {
475
535
  * Logs all registered models for debugging
476
536
  */
477
537
  static logRegisteredModels() {
478
- if (process.env.DEBUG === "true") {
479
- console.log("Registered LLM models:");
480
- for (const [regex, llmClass] of _LLMRegistry.llmRegistry.entries()) {
481
- console.log(` - Pattern: ${regex.toString()}`);
482
- }
483
- }
538
+ debugLog(
539
+ "Registered LLM models:",
540
+ [..._LLMRegistry.llmRegistry.entries()].map(([regex]) => regex.toString())
541
+ );
484
542
  }
485
543
  };
486
- /**
487
- * Map of model name regex to LLM class
488
- */
489
- _LLMRegistry.llmRegistry = /* @__PURE__ */ new Map();
490
- var LLMRegistry = _LLMRegistry;
491
544
 
492
545
  // src/models/llm-request.ts
493
546
  var LLMRequest = class {
547
+ /**
548
+ * The conversation history
549
+ */
550
+ messages;
551
+ /**
552
+ * LLM configuration parameters
553
+ */
554
+ config;
494
555
  constructor(data) {
495
556
  this.messages = data.messages;
496
557
  this.config = data.config || {};
@@ -499,18 +560,34 @@ var LLMRequest = class {
499
560
 
500
561
  // src/tools/tool-context.ts
501
562
  var ToolContext = class {
563
+ /**
564
+ * The parent invocation context
565
+ */
566
+ invocationContext;
567
+ /**
568
+ * Authentication handler for the tool
569
+ */
570
+ auth;
571
+ /**
572
+ * Additional parameters for the tool
573
+ */
574
+ parameters;
575
+ /**
576
+ * Tool name
577
+ */
578
+ toolName = "";
579
+ /**
580
+ * Tool ID
581
+ */
582
+ toolId = "";
583
+ /**
584
+ * Variables stored in the context
585
+ */
586
+ _variables;
502
587
  /**
503
588
  * Constructor for ToolContext
504
589
  */
505
590
  constructor(options) {
506
- /**
507
- * Tool name
508
- */
509
- this.toolName = "";
510
- /**
511
- * Tool ID
512
- */
513
- this.toolId = "";
514
591
  this.invocationContext = options.invocationContext;
515
592
  this.auth = options.auth;
516
593
  this.parameters = options.parameters || {};
@@ -586,6 +663,36 @@ var StreamingMode = /* @__PURE__ */ ((StreamingMode2) => {
586
663
  return StreamingMode2;
587
664
  })(StreamingMode || {});
588
665
  var RunConfig = class {
666
+ /**
667
+ * Speech configuration for the live agent
668
+ */
669
+ speechConfig;
670
+ /**
671
+ * The output modalities
672
+ */
673
+ responseModalities;
674
+ /**
675
+ * Whether to save input blobs as artifacts
676
+ */
677
+ saveInputBlobsAsArtifacts;
678
+ /**
679
+ * Whether to support Compositional Function Calling
680
+ * Only applicable for StreamingMode.SSE
681
+ */
682
+ supportCFC;
683
+ /**
684
+ * Streaming mode
685
+ */
686
+ streamingMode;
687
+ /**
688
+ * Whether to stream the response
689
+ * This is derived from streamingMode and used by LLM implementations
690
+ */
691
+ stream;
692
+ /**
693
+ * Output audio transcription configuration
694
+ */
695
+ outputAudioTranscription;
589
696
  constructor(config) {
590
697
  this.speechConfig = config?.speechConfig;
591
698
  this.responseModalities = config?.responseModalities;
@@ -599,14 +706,50 @@ var RunConfig = class {
599
706
 
600
707
  // src/agents/invocation-context.ts
601
708
  var InvocationContext = class _InvocationContext {
709
+ /**
710
+ * Unique session ID for the current conversation
711
+ */
712
+ sessionId;
713
+ /**
714
+ * Current conversation history
715
+ */
716
+ messages;
717
+ /**
718
+ * Run configuration
719
+ */
720
+ config;
721
+ /**
722
+ * User identifier associated with the session
723
+ */
724
+ userId;
725
+ /**
726
+ * Application name (for multi-app environments)
727
+ */
728
+ appName;
729
+ /**
730
+ * Memory service for long-term storage
731
+ */
732
+ memoryService;
733
+ /**
734
+ * Session service for session management
735
+ */
736
+ sessionService;
737
+ /**
738
+ * Additional context metadata
739
+ */
740
+ metadata;
741
+ /**
742
+ * Variables stored in the context
743
+ */
744
+ variables;
745
+ /**
746
+ * In-memory storage for node execution results
747
+ */
748
+ memory = /* @__PURE__ */ new Map();
602
749
  /**
603
750
  * Constructor for InvocationContext
604
751
  */
605
752
  constructor(options = {}) {
606
- /**
607
- * In-memory storage for node execution results
608
- */
609
- this.memory = /* @__PURE__ */ new Map();
610
753
  this.sessionId = options.sessionId || this.generateSessionId();
611
754
  this.messages = options.messages || [];
612
755
  this.config = options.config || new RunConfig();
@@ -713,6 +856,54 @@ var InvocationContext = class _InvocationContext {
713
856
 
714
857
  // src/agents/llm-agent.ts
715
858
  var Agent = class extends BaseAgent {
859
+ /**
860
+ * The LLM model to use
861
+ */
862
+ model;
863
+ /**
864
+ * The LLM instance
865
+ */
866
+ llm;
867
+ /**
868
+ * Instructions for the agent
869
+ */
870
+ instructions;
871
+ /**
872
+ * Tools available to the agent
873
+ */
874
+ tools;
875
+ /**
876
+ * Maximum number of tool execution steps to prevent infinite loops
877
+ */
878
+ maxToolExecutionSteps;
879
+ /**
880
+ * Memory service for long-term storage and retrieval
881
+ */
882
+ memoryService;
883
+ /**
884
+ * Session service for managing conversations
885
+ */
886
+ sessionService;
887
+ /**
888
+ * User ID for the session
889
+ */
890
+ userId;
891
+ /**
892
+ * Application name
893
+ */
894
+ appName;
895
+ /**
896
+ * Whether to automatically augment prompts with relevant memory
897
+ */
898
+ useMemoryAugmentation;
899
+ /**
900
+ * The maximum number of memory items to include in augmentation
901
+ */
902
+ maxMemoryItems;
903
+ /**
904
+ * The minimum relevance score for memory augmentation (0-1)
905
+ */
906
+ memoryRelevanceThreshold;
716
907
  /**
717
908
  * Constructor for Agent
718
909
  */
@@ -745,9 +936,7 @@ var Agent = class extends BaseAgent {
745
936
  */
746
937
  async executeTool(toolCall, context) {
747
938
  const { name, arguments: argsString } = toolCall.function;
748
- if (process.env.DEBUG === "true") {
749
- console.log(`Executing tool: ${name}`);
750
- }
939
+ debugLog(`Executing tool: ${name}`);
751
940
  const tool = this.findTool(name);
752
941
  if (!tool) {
753
942
  console.warn(`Tool '${name}' not found`);
@@ -765,9 +954,7 @@ var Agent = class extends BaseAgent {
765
954
  toolContext.toolName = name;
766
955
  toolContext.toolId = toolCall.id;
767
956
  const result = await tool.runAsync(args, toolContext);
768
- if (process.env.DEBUG === "true") {
769
- console.log(`Tool ${name} execution complete`);
770
- }
957
+ debugLog(`Tool ${name} execution complete`);
771
958
  return {
772
959
  name,
773
960
  result: typeof result === "string" ? result : JSON.stringify(result)
@@ -936,10 +1123,7 @@ ${relevantInfo.join("\n\n")}`
936
1123
  let stepCount = 0;
937
1124
  while (stepCount < this.maxToolExecutionSteps) {
938
1125
  stepCount++;
939
- if (process.env.DEBUG === "true") {
940
- console.log(`
941
- [Agent] Step ${stepCount}: Thinking...`);
942
- }
1126
+ debugLog(`Step ${stepCount}: Thinking...`);
943
1127
  const llmRequest = new LLMRequest({
944
1128
  messages: context.messages,
945
1129
  config: {
@@ -956,9 +1140,7 @@ ${relevantInfo.join("\n\n")}`
956
1140
  throw new Error("No response from LLM");
957
1141
  }
958
1142
  if (currentResponse.tool_calls && currentResponse.tool_calls.length > 0) {
959
- if (process.env.DEBUG === "true") {
960
- console.log("[Agent] Executing tools...");
961
- }
1143
+ debugLog(`Tool calls: ${JSON.stringify(currentResponse.tool_calls)}`);
962
1144
  context.addMessage({
963
1145
  role: "assistant",
964
1146
  content: currentResponse.content || "",
@@ -977,9 +1159,7 @@ ${relevantInfo.join("\n\n")}`
977
1159
  });
978
1160
  }
979
1161
  } else {
980
- if (process.env.DEBUG === "true") {
981
- console.log("[Agent] No tool calls, finishing...");
982
- }
1162
+ debugLog("[Agent] No tool calls, finishing...");
983
1163
  context.addMessage({
984
1164
  role: "assistant",
985
1165
  content: currentResponse.content || ""
@@ -1017,10 +1197,7 @@ ${relevantInfo.join("\n\n")}`
1017
1197
  let stepCount = 0;
1018
1198
  let hadToolCalls = false;
1019
1199
  while (stepCount < this.maxToolExecutionSteps) {
1020
- if (process.env.DEBUG === "true") {
1021
- console.log(`
1022
- [Agent] Step ${stepCount + 1}: Thinking...`);
1023
- }
1200
+ debugLog(`[Agent] Step ${stepCount}: Thinking...`);
1024
1201
  const toolDeclarations = this.tools.map((tool) => tool.getDeclaration()).filter((declaration) => declaration !== null);
1025
1202
  const request = {
1026
1203
  messages: context.messages,
@@ -1049,14 +1226,10 @@ ${relevantInfo.join("\n\n")}`
1049
1226
  function_call: finalResponse.function_call
1050
1227
  });
1051
1228
  if (!hadToolCalls) {
1052
- if (process.env.DEBUG === "true") {
1053
- console.log("[Agent] No tool calls, finishing...");
1054
- }
1229
+ debugLog("[Agent] No tool calls, finishing...");
1055
1230
  break;
1056
1231
  }
1057
- if (process.env.DEBUG === "true") {
1058
- console.log("[Agent] Executing tools...");
1059
- }
1232
+ debugLog(`[Agent] Step ${stepCount + 1}: Executing tools...`);
1060
1233
  stepCount++;
1061
1234
  if (finalResponse.function_call) {
1062
1235
  const toolCall = {
@@ -1073,11 +1246,9 @@ ${relevantInfo.join("\n\n")}`
1073
1246
  content: JSON.stringify(result.result)
1074
1247
  });
1075
1248
  } else if (finalResponse.tool_calls && finalResponse.tool_calls.length > 0) {
1076
- if (process.env.DEBUG === "true") {
1077
- console.log(
1078
- `[Agent] Executing ${finalResponse.tool_calls.length} tool(s)...`
1079
- );
1080
- }
1249
+ debugLog(
1250
+ `[Agent] Step ${stepCount + 1}: Executing ${finalResponse.tool_calls.length} tool(s)...`
1251
+ );
1081
1252
  context.messages.pop();
1082
1253
  context.addMessage({
1083
1254
  role: "assistant",
@@ -1102,6 +1273,7 @@ ${relevantInfo.join("\n\n")}`
1102
1273
  };
1103
1274
 
1104
1275
  // src/agents/sequential-agent.ts
1276
+ init_debug();
1105
1277
  var SequentialAgent = class extends BaseAgent {
1106
1278
  /**
1107
1279
  * Constructor for SequentialAgent
@@ -1122,11 +1294,9 @@ var SequentialAgent = class extends BaseAgent {
1122
1294
  * Executes sub-agents sequentially, passing output from one to the next
1123
1295
  */
1124
1296
  async run(options) {
1125
- if (process.env.DEBUG === "true") {
1126
- console.log(
1127
- `[SequentialAgent] Running ${this.subAgents.length} sub-agents in sequence`
1128
- );
1129
- }
1297
+ debugLog(
1298
+ `[SequentialAgent] Running ${this.subAgents.length} sub-agents in sequence`
1299
+ );
1130
1300
  if (this.subAgents.length === 0) {
1131
1301
  return {
1132
1302
  content: "No sub-agents defined for sequential execution.",
@@ -1142,11 +1312,9 @@ var SequentialAgent = class extends BaseAgent {
1142
1312
  let finalResponse = null;
1143
1313
  for (let i = 0; i < this.subAgents.length; i++) {
1144
1314
  const agent = this.subAgents[i];
1145
- if (process.env.DEBUG === "true") {
1146
- console.log(
1147
- `[SequentialAgent] Running sub-agent ${i + 1}/${this.subAgents.length}: ${agent.name}`
1148
- );
1149
- }
1315
+ debugLog(
1316
+ `[SequentialAgent] Running sub-agent ${i + 1}/${this.subAgents.length}: ${agent.name}`
1317
+ );
1150
1318
  try {
1151
1319
  const response = await agent.run({
1152
1320
  messages: currentMessages,
@@ -1202,11 +1370,9 @@ var SequentialAgent = class extends BaseAgent {
1202
1370
  * Streams responses from each sub-agent in sequence
1203
1371
  */
1204
1372
  async *runStreaming(options) {
1205
- if (process.env.DEBUG === "true") {
1206
- console.log(
1207
- `[SequentialAgent] Streaming ${this.subAgents.length} sub-agents in sequence`
1208
- );
1209
- }
1373
+ debugLog(
1374
+ `[SequentialAgent] Streaming ${this.subAgents.length} sub-agents in sequence`
1375
+ );
1210
1376
  if (this.subAgents.length === 0) {
1211
1377
  yield {
1212
1378
  content: "No sub-agents defined for sequential execution.",
@@ -1222,11 +1388,9 @@ var SequentialAgent = class extends BaseAgent {
1222
1388
  const currentMessages = [...options.messages];
1223
1389
  for (let i = 0; i < this.subAgents.length; i++) {
1224
1390
  const agent = this.subAgents[i];
1225
- if (process.env.DEBUG === "true") {
1226
- console.log(
1227
- `[SequentialAgent] Streaming sub-agent ${i + 1}/${this.subAgents.length}: ${agent.name}`
1228
- );
1229
- }
1391
+ debugLog(
1392
+ `[SequentialAgent] Streaming sub-agent ${i + 1}/${this.subAgents.length}: ${agent.name}`
1393
+ );
1230
1394
  try {
1231
1395
  const streamGenerator = agent.runStreaming({
1232
1396
  messages: currentMessages,
@@ -1279,6 +1443,7 @@ var SequentialAgent = class extends BaseAgent {
1279
1443
  };
1280
1444
 
1281
1445
  // src/agents/parallel-agent.ts
1446
+ init_debug();
1282
1447
  var ParallelAgent = class extends BaseAgent {
1283
1448
  /**
1284
1449
  * Constructor for ParallelAgent
@@ -1299,11 +1464,9 @@ var ParallelAgent = class extends BaseAgent {
1299
1464
  * Executes all sub-agents in parallel
1300
1465
  */
1301
1466
  async run(options) {
1302
- if (process.env.DEBUG === "true") {
1303
- console.log(
1304
- `[ParallelAgent] Running ${this.subAgents.length} sub-agents in parallel`
1305
- );
1306
- }
1467
+ debugLog(
1468
+ `[ParallelAgent] Running ${this.subAgents.length} sub-agents in parallel`
1469
+ );
1307
1470
  if (this.subAgents.length === 0) {
1308
1471
  return {
1309
1472
  content: "No sub-agents defined for parallel execution.",
@@ -1346,11 +1509,9 @@ ${result.content || "No content"}
1346
1509
  * Collects streaming responses from all sub-agents
1347
1510
  */
1348
1511
  async *runStreaming(options) {
1349
- if (process.env.DEBUG === "true") {
1350
- console.log(
1351
- `[ParallelAgent] Streaming ${this.subAgents.length} sub-agents in parallel`
1352
- );
1353
- }
1512
+ debugLog(
1513
+ `[ParallelAgent] Streaming ${this.subAgents.length} sub-agents in parallel`
1514
+ );
1354
1515
  if (this.subAgents.length === 0) {
1355
1516
  yield {
1356
1517
  content: "No sub-agents defined for parallel execution.",
@@ -1416,7 +1577,20 @@ ${response.content || "No content"}
1416
1577
  };
1417
1578
 
1418
1579
  // src/agents/loop-agent.ts
1580
+ init_debug();
1419
1581
  var LoopAgent = class extends BaseAgent {
1582
+ /**
1583
+ * Maximum number of iterations to prevent infinite loops
1584
+ */
1585
+ maxIterations;
1586
+ /**
1587
+ * Agent that decides whether to continue the loop
1588
+ */
1589
+ conditionAgent;
1590
+ /**
1591
+ * Custom condition check function
1592
+ */
1593
+ conditionCheck;
1420
1594
  /**
1421
1595
  * Constructor for LoopAgent
1422
1596
  */
@@ -1444,28 +1618,20 @@ var LoopAgent = class extends BaseAgent {
1444
1618
  */
1445
1619
  async shouldContinue(response, iterationCount, messages, config) {
1446
1620
  if (iterationCount >= this.maxIterations) {
1447
- if (process.env.DEBUG === "true") {
1448
- console.log(
1449
- `[LoopAgent] Maximum iterations (${this.maxIterations}) reached. Stopping loop.`
1450
- );
1451
- }
1621
+ debugLog(
1622
+ `[LoopAgent] Maximum iterations (${this.maxIterations}) reached. Stopping loop.`
1623
+ );
1452
1624
  return false;
1453
1625
  }
1454
1626
  if (this.conditionCheck) {
1455
1627
  const shouldContinue = await this.conditionCheck(response);
1456
- if (process.env.DEBUG === "true") {
1457
- console.log(
1458
- `[LoopAgent] Custom condition check result: ${shouldContinue}`
1459
- );
1460
- }
1628
+ debugLog(`[LoopAgent] Custom condition check result: ${shouldContinue}`);
1461
1629
  return shouldContinue;
1462
1630
  }
1463
1631
  if (this.conditionAgent) {
1464
- if (process.env.DEBUG === "true") {
1465
- console.log(
1466
- `[LoopAgent] Using condition agent ${this.conditionAgent.name} to check loop condition`
1467
- );
1468
- }
1632
+ debugLog(
1633
+ `[LoopAgent] Using condition agent ${this.conditionAgent.name} to check loop condition`
1634
+ );
1469
1635
  const conditionMessages = [
1470
1636
  ...messages,
1471
1637
  {
@@ -1484,11 +1650,9 @@ var LoopAgent = class extends BaseAgent {
1484
1650
  });
1485
1651
  const content = conditionResponse.content?.toLowerCase() || "";
1486
1652
  const shouldContinue = content.includes("yes") && !content.includes("no");
1487
- if (process.env.DEBUG === "true") {
1488
- console.log(
1489
- `[LoopAgent] Condition agent result: ${shouldContinue ? "Continue loop" : "Stop loop"}`
1490
- );
1491
- }
1653
+ debugLog(
1654
+ `[LoopAgent] Condition agent result: ${shouldContinue ? "Continue loop" : "Stop loop"}`
1655
+ );
1492
1656
  return shouldContinue;
1493
1657
  } catch (error) {
1494
1658
  console.error("[LoopAgent] Error in condition agent:", error);
@@ -1502,11 +1666,9 @@ var LoopAgent = class extends BaseAgent {
1502
1666
  * Executes the sub-agent in a loop until the condition is met
1503
1667
  */
1504
1668
  async run(options) {
1505
- if (process.env.DEBUG === "true") {
1506
- console.log(
1507
- `[LoopAgent] Starting loop with max ${this.maxIterations} iterations`
1508
- );
1509
- }
1669
+ debugLog(
1670
+ `[LoopAgent] Starting loop with max ${this.maxIterations} iterations`
1671
+ );
1510
1672
  if (this.subAgents.length === 0) {
1511
1673
  return {
1512
1674
  content: "No sub-agent defined for loop execution.",
@@ -1520,11 +1682,9 @@ var LoopAgent = class extends BaseAgent {
1520
1682
  let shouldContinueLoop = true;
1521
1683
  while (shouldContinueLoop && iterationCount < this.maxIterations) {
1522
1684
  iterationCount++;
1523
- if (process.env.DEBUG === "true") {
1524
- console.log(
1525
- `[LoopAgent] Running iteration ${iterationCount}/${this.maxIterations}`
1526
- );
1527
- }
1685
+ debugLog(
1686
+ `[LoopAgent] Running iteration ${iterationCount}/${this.maxIterations}`
1687
+ );
1528
1688
  try {
1529
1689
  const response = await subAgent.run({
1530
1690
  messages: currentMessages,
@@ -1572,11 +1732,9 @@ ${lastResponse.content || ""}`,
1572
1732
  * Runs the agent with streaming support
1573
1733
  */
1574
1734
  async *runStreaming(options) {
1575
- if (process.env.DEBUG === "true") {
1576
- console.log(
1577
- `[LoopAgent] Starting loop with max ${this.maxIterations} iterations (streaming)`
1578
- );
1579
- }
1735
+ debugLog(
1736
+ `[LoopAgent] Starting loop with max ${this.maxIterations} iterations (streaming)`
1737
+ );
1580
1738
  if (this.subAgents.length === 0) {
1581
1739
  yield {
1582
1740
  content: "No sub-agent defined for loop execution.",
@@ -1595,11 +1753,9 @@ ${lastResponse.content || ""}`,
1595
1753
  };
1596
1754
  while (shouldContinueLoop && iterationCount < this.maxIterations) {
1597
1755
  iterationCount++;
1598
- if (process.env.DEBUG === "true") {
1599
- console.log(
1600
- `[LoopAgent] Running iteration ${iterationCount}/${this.maxIterations} (streaming)`
1601
- );
1602
- }
1756
+ debugLog(
1757
+ `[LoopAgent] Running iteration ${iterationCount}/${this.maxIterations} (streaming)`
1758
+ );
1603
1759
  yield {
1604
1760
  content: `Running iteration ${iterationCount}/${this.maxIterations}...`,
1605
1761
  role: "assistant",
@@ -1623,11 +1779,9 @@ ${lastResponse.content || ""}`,
1623
1779
  }
1624
1780
  }
1625
1781
  if (!lastChunk) {
1626
- if (process.env.DEBUG === "true") {
1627
- console.warn(
1628
- `[LoopAgent] No complete chunk received from iteration ${iterationCount}`
1629
- );
1630
- }
1782
+ debugLog(
1783
+ `[LoopAgent] No complete chunk received from iteration ${iterationCount}`
1784
+ );
1631
1785
  shouldContinueLoop = false;
1632
1786
  continue;
1633
1787
  }
@@ -1653,9 +1807,8 @@ ${lastResponse.content || ""}`,
1653
1807
  };
1654
1808
  }
1655
1809
  } catch (error) {
1656
- console.error(
1657
- `[LoopAgent] Error in loop iteration ${iterationCount}:`,
1658
- error
1810
+ debugLog(
1811
+ `[LoopAgent] Error in loop iteration ${iterationCount}: ${error instanceof Error ? error.message : String(error)}`
1659
1812
  );
1660
1813
  yield {
1661
1814
  content: `Error in loop iteration ${iterationCount}: ${error instanceof Error ? error.message : String(error)}`,
@@ -1672,7 +1825,24 @@ ${lastResponse.content || ""}`,
1672
1825
  };
1673
1826
 
1674
1827
  // src/agents/lang-graph-agent.ts
1828
+ init_debug();
1675
1829
  var LangGraphAgent = class extends BaseAgent {
1830
+ /**
1831
+ * Graph nodes (agents and their connections)
1832
+ */
1833
+ nodes;
1834
+ /**
1835
+ * Root node to start execution from
1836
+ */
1837
+ rootNode;
1838
+ /**
1839
+ * Maximum number of steps to prevent infinite loops
1840
+ */
1841
+ maxSteps;
1842
+ /**
1843
+ * Results from node executions
1844
+ */
1845
+ results = [];
1676
1846
  /**
1677
1847
  * Constructor for LangGraphAgent
1678
1848
  */
@@ -1681,10 +1851,6 @@ var LangGraphAgent = class extends BaseAgent {
1681
1851
  name: config.name,
1682
1852
  description: config.description
1683
1853
  });
1684
- /**
1685
- * Results from node executions
1686
- */
1687
- this.results = [];
1688
1854
  this.nodes = /* @__PURE__ */ new Map();
1689
1855
  for (const node of config.nodes) {
1690
1856
  if (this.nodes.has(node.name)) {
@@ -1757,11 +1923,9 @@ var LangGraphAgent = class extends BaseAgent {
1757
1923
  if (targetNode.condition) {
1758
1924
  const shouldExecute = await targetNode.condition(result, context);
1759
1925
  if (!shouldExecute) {
1760
- if (process.env.DEBUG === "true") {
1761
- console.log(
1762
- `[LangGraphAgent] Skipping node "${targetName}" due to condition`
1763
- );
1764
- }
1926
+ debugLog(
1927
+ `[LangGraphAgent] Skipping node "${targetName}" due to condition`
1928
+ );
1765
1929
  continue;
1766
1930
  }
1767
1931
  }
@@ -1786,11 +1950,9 @@ var LangGraphAgent = class extends BaseAgent {
1786
1950
  };
1787
1951
  const shouldExecute = await node.condition(mockResponse, mockContext);
1788
1952
  if (!shouldExecute) {
1789
- if (process.env.DEBUG === "true") {
1790
- console.log(
1791
- `[LangGraphAgent] Skipping node "${targetName}" due to condition`
1792
- );
1793
- }
1953
+ debugLog(
1954
+ `[LangGraphAgent] Skipping node "${targetName}" due to condition`
1955
+ );
1794
1956
  }
1795
1957
  return { shouldExecute };
1796
1958
  }
@@ -1803,11 +1965,9 @@ var LangGraphAgent = class extends BaseAgent {
1803
1965
  messages: options.messages,
1804
1966
  config: options.config
1805
1967
  });
1806
- if (process.env.DEBUG === "true") {
1807
- console.log(
1808
- `[LangGraphAgent] Starting graph execution from root node "${this.rootNode}"`
1809
- );
1810
- }
1968
+ debugLog(
1969
+ `[LangGraphAgent] Starting graph execution from root node "${this.rootNode}"`
1970
+ );
1811
1971
  if (this.nodes.size === 0) {
1812
1972
  return {
1813
1973
  content: "No nodes defined in the graph.",
@@ -1827,11 +1987,9 @@ var LangGraphAgent = class extends BaseAgent {
1827
1987
  while (nodesToExecute.length > 0 && stepCount < this.maxSteps) {
1828
1988
  stepCount++;
1829
1989
  const { node, messages } = nodesToExecute.shift();
1830
- if (process.env.DEBUG === "true") {
1831
- console.log(
1832
- `[LangGraphAgent] Step ${stepCount}: Executing node "${node.name}"`
1833
- );
1834
- }
1990
+ debugLog(
1991
+ `[LangGraphAgent] Step ${stepCount}: Executing node "${node.name}"`
1992
+ );
1835
1993
  executedNodes.push(node.name);
1836
1994
  try {
1837
1995
  const result = await node.agent.run({
@@ -1903,11 +2061,9 @@ var LangGraphAgent = class extends BaseAgent {
1903
2061
  messages: options.messages,
1904
2062
  config: options.config
1905
2063
  });
1906
- if (process.env.DEBUG === "true") {
1907
- console.log(
1908
- `[LangGraphAgent] Starting graph execution from root node "${this.rootNode}" (streaming)`
1909
- );
1910
- }
2064
+ debugLog(
2065
+ `[LangGraphAgent] Starting graph execution from root node "${this.rootNode}" (streaming)`
2066
+ );
1911
2067
  if (this.nodes.size === 0) {
1912
2068
  yield {
1913
2069
  content: "No nodes defined in the graph.",
@@ -1934,11 +2090,9 @@ var LangGraphAgent = class extends BaseAgent {
1934
2090
  while (nodesToExecute.length > 0 && stepCount < this.maxSteps) {
1935
2091
  stepCount++;
1936
2092
  const { node, messages } = nodesToExecute.shift();
1937
- if (process.env.DEBUG === "true") {
1938
- console.log(
1939
- `[LangGraphAgent] Step ${stepCount}: Executing node "${node.name}" (streaming)`
1940
- );
1941
- }
2093
+ debugLog(
2094
+ `[LangGraphAgent] Step ${stepCount}: Executing node "${node.name}" (streaming)`
2095
+ );
1942
2096
  executedNodes.push(node.name);
1943
2097
  try {
1944
2098
  const result = await node.agent.run({
@@ -2052,6 +2206,7 @@ function createFunctionTool(func, options) {
2052
2206
  init_function_utils();
2053
2207
 
2054
2208
  // src/tools/common/google-search.ts
2209
+ init_debug();
2055
2210
  init_base_tool();
2056
2211
  var GoogleSearch = class extends BaseTool {
2057
2212
  /**
@@ -2092,9 +2247,7 @@ var GoogleSearch = class extends BaseTool {
2092
2247
  * This is a simplified implementation that doesn't actually search, just returns mock results
2093
2248
  */
2094
2249
  async runAsync(args, _context) {
2095
- if (process.env.DEBUG === "true") {
2096
- console.log(`Executing Google search for: ${args.query}`);
2097
- }
2250
+ debugLog(`[GoogleSearch] Executing Google search for: ${args.query}`);
2098
2251
  return {
2099
2252
  results: [
2100
2253
  {
@@ -2236,6 +2389,7 @@ init_base_tool();
2236
2389
  import fs from "fs/promises";
2237
2390
  import path from "path";
2238
2391
  var FileOperationsTool = class extends BaseTool {
2392
+ basePath;
2239
2393
  constructor(options) {
2240
2394
  super({
2241
2395
  name: "file_operations",
@@ -2556,6 +2710,7 @@ var UserInteractionTool = class extends BaseTool {
2556
2710
  };
2557
2711
 
2558
2712
  // src/tools/common/exit-loop-tool.ts
2713
+ init_debug();
2559
2714
  init_base_tool();
2560
2715
  var ExitLoopTool = class extends BaseTool {
2561
2716
  /**
@@ -2585,9 +2740,7 @@ var ExitLoopTool = class extends BaseTool {
2585
2740
  * Execute the exit loop action
2586
2741
  */
2587
2742
  async runAsync(_args, context) {
2588
- if (process.env.DEBUG === "true") {
2589
- console.log("Executing exit loop tool");
2590
- }
2743
+ debugLog("[ExitLoopTool] Executing exit loop tool");
2591
2744
  if (context.actions) {
2592
2745
  context.actions.escalate = true;
2593
2746
  } else {
@@ -2602,6 +2755,7 @@ var ExitLoopTool = class extends BaseTool {
2602
2755
  };
2603
2756
 
2604
2757
  // src/tools/common/get-user-choice-tool.ts
2758
+ init_debug();
2605
2759
  init_base_tool();
2606
2760
  var GetUserChoiceTool = class extends BaseTool {
2607
2761
  /**
@@ -2646,13 +2800,13 @@ var GetUserChoiceTool = class extends BaseTool {
2646
2800
  * and the actual choice will be provided asynchronously
2647
2801
  */
2648
2802
  async runAsync(args, context) {
2649
- if (process.env.DEBUG === "true") {
2650
- console.log(
2651
- `Executing get_user_choice with options: ${args.options.join(", ")}`
2652
- );
2653
- if (args.question) {
2654
- console.log(`Question: ${args.question}`);
2655
- }
2803
+ debugLog(
2804
+ `[GetUserChoiceTool] Executing get_user_choice with options: ${args.options.join(
2805
+ ", "
2806
+ )}`
2807
+ );
2808
+ if (args.question) {
2809
+ debugLog(`[GetUserChoiceTool] Question: ${args.question}`);
2656
2810
  }
2657
2811
  if (context.actions) {
2658
2812
  context.actions.skip_summarization = true;
@@ -2666,6 +2820,7 @@ var GetUserChoiceTool = class extends BaseTool {
2666
2820
  };
2667
2821
 
2668
2822
  // src/tools/common/transfer-to-agent-tool.ts
2823
+ init_debug();
2669
2824
  init_base_tool();
2670
2825
  var TransferToAgentTool = class extends BaseTool {
2671
2826
  /**
@@ -2700,9 +2855,9 @@ var TransferToAgentTool = class extends BaseTool {
2700
2855
  * Execute the transfer to agent action
2701
2856
  */
2702
2857
  async runAsync(args, context) {
2703
- if (process.env.DEBUG === "true") {
2704
- console.log(`Executing transfer to agent: ${args.agent_name}`);
2705
- }
2858
+ debugLog(
2859
+ `[TransferToAgentTool] Executing transfer to agent: ${args.agent_name}`
2860
+ );
2706
2861
  if (context.actions) {
2707
2862
  context.actions.transfer_to_agent = args.agent_name;
2708
2863
  } else {
@@ -2717,6 +2872,7 @@ var TransferToAgentTool = class extends BaseTool {
2717
2872
  };
2718
2873
 
2719
2874
  // src/tools/common/load-memory-tool.ts
2875
+ init_debug();
2720
2876
  init_base_tool();
2721
2877
  var LoadMemoryTool = class extends BaseTool {
2722
2878
  /**
@@ -2751,9 +2907,9 @@ var LoadMemoryTool = class extends BaseTool {
2751
2907
  * Execute the memory loading action
2752
2908
  */
2753
2909
  async runAsync(args, context) {
2754
- if (process.env.DEBUG === "true") {
2755
- console.log(`Executing load_memory with query: ${args.query}`);
2756
- }
2910
+ debugLog(
2911
+ `[LoadMemoryTool] Executing load_memory with query: ${args.query}`
2912
+ );
2757
2913
  if (!context.memoryService) {
2758
2914
  return {
2759
2915
  error: "Memory service is not available",
@@ -2791,6 +2947,8 @@ var McpErrorType = /* @__PURE__ */ ((McpErrorType2) => {
2791
2947
  return McpErrorType2;
2792
2948
  })(McpErrorType || {});
2793
2949
  var McpError = class extends Error {
2950
+ type;
2951
+ originalError;
2794
2952
  constructor(message, type, originalError) {
2795
2953
  super(message);
2796
2954
  this.name = "McpError";
@@ -2829,10 +2987,11 @@ function withRetry(fn, instance, reinitMethod, maxRetries = 1) {
2829
2987
 
2830
2988
  // src/tools/mcp/client.ts
2831
2989
  var McpClientService = class {
2990
+ config;
2991
+ client = null;
2992
+ transport = null;
2993
+ isClosing = false;
2832
2994
  constructor(config) {
2833
- this.client = null;
2834
- this.transport = null;
2835
- this.isClosing = false;
2836
2995
  this.config = config;
2837
2996
  }
2838
2997
  /**
@@ -3036,6 +3195,7 @@ var McpClientService = class {
3036
3195
  };
3037
3196
 
3038
3197
  // src/tools/mcp/create-tool.ts
3198
+ init_debug();
3039
3199
  init_base_tool();
3040
3200
 
3041
3201
  // src/tools/mcp/schema-conversion.ts
@@ -3235,6 +3395,9 @@ async function createTool(mcpTool, client) {
3235
3395
  }
3236
3396
  }
3237
3397
  var McpToolAdapter = class extends BaseTool {
3398
+ mcpTool;
3399
+ client;
3400
+ clientService = null;
3238
3401
  constructor(mcpTool, client) {
3239
3402
  const metadata = mcpTool.metadata || {};
3240
3403
  super({
@@ -3244,7 +3407,6 @@ var McpToolAdapter = class extends BaseTool {
3244
3407
  shouldRetryOnFailure: metadata.shouldRetryOnFailure ?? false,
3245
3408
  maxRetryAttempts: metadata.maxRetryAttempts ?? 3
3246
3409
  });
3247
- this.clientService = null;
3248
3410
  this.mcpTool = mcpTool;
3249
3411
  this.client = client;
3250
3412
  if (client.reinitialize && typeof client.reinitialize === "function") {
@@ -3268,9 +3430,10 @@ var McpToolAdapter = class extends BaseTool {
3268
3430
  }
3269
3431
  }
3270
3432
  async runAsync(args, _context) {
3271
- if (process.env.DEBUG === "true") {
3272
- console.log(`Executing MCP tool ${this.name} with args:`, args);
3273
- }
3433
+ debugLog(
3434
+ `[McpToolAdapter] Executing MCP tool ${this.name} with args:`,
3435
+ args
3436
+ );
3274
3437
  try {
3275
3438
  if (typeof this.mcpTool.execute === "function") {
3276
3439
  return await this.mcpTool.execute(args);
@@ -3323,11 +3486,12 @@ var McpToolAdapter = class extends BaseTool {
3323
3486
 
3324
3487
  // src/tools/mcp/index.ts
3325
3488
  var McpToolset = class {
3489
+ config;
3490
+ clientService = null;
3491
+ toolFilter = null;
3492
+ tools = [];
3493
+ isClosing = false;
3326
3494
  constructor(config, toolFilter = null) {
3327
- this.clientService = null;
3328
- this.toolFilter = null;
3329
- this.tools = [];
3330
- this.isClosing = false;
3331
3495
  this.config = config;
3332
3496
  this.toolFilter = toolFilter;
3333
3497
  this.clientService = new McpClientService(config);
@@ -3505,6 +3669,30 @@ __export(models_exports, {
3505
3669
 
3506
3670
  // src/models/llm-response.ts
3507
3671
  var LLMResponse = class {
3672
+ /**
3673
+ * Content of the response
3674
+ */
3675
+ content;
3676
+ /**
3677
+ * Function calls in the response
3678
+ */
3679
+ function_call;
3680
+ /**
3681
+ * Tool calls in the response
3682
+ */
3683
+ tool_calls;
3684
+ /**
3685
+ * Role of the message (usually 'assistant')
3686
+ */
3687
+ role;
3688
+ /**
3689
+ * Whether this is a partial response in a stream
3690
+ */
3691
+ is_partial;
3692
+ /**
3693
+ * Raw provider response
3694
+ */
3695
+ raw_response;
3508
3696
  constructor(data) {
3509
3697
  this.content = data.content;
3510
3698
  this.function_call = data.function_call;
@@ -3517,6 +3705,10 @@ var LLMResponse = class {
3517
3705
 
3518
3706
  // src/models/base-llm.ts
3519
3707
  var BaseLLM = class {
3708
+ /**
3709
+ * The name of the LLM model
3710
+ */
3711
+ model;
3520
3712
  /**
3521
3713
  * Constructor for BaseLLM
3522
3714
  */
@@ -3542,12 +3734,10 @@ var BaseLLM = class {
3542
3734
 
3543
3735
  // src/models/base-llm-connection.ts
3544
3736
  var BaseLLMConnection = class {
3545
- constructor() {
3546
- /**
3547
- * Whether the connection is active
3548
- */
3549
- this._isActive = true;
3550
- }
3737
+ /**
3738
+ * Whether the connection is active
3739
+ */
3740
+ _isActive = true;
3551
3741
  /**
3552
3742
  * Gets whether the connection is active
3553
3743
  */
@@ -3563,22 +3753,50 @@ var BaseLLMConnection = class {
3563
3753
  };
3564
3754
 
3565
3755
  // src/models/anthropic-llm.ts
3756
+ init_debug();
3566
3757
  import axios from "axios";
3567
3758
 
3568
3759
  // src/models/anthropic-llm-connection.ts
3760
+ init_debug();
3569
3761
  var AnthropicLLMConnection = class extends BaseLLMConnection {
3570
3762
  /**
3571
- * Constructor
3763
+ * Axios instance for API calls
3572
3764
  */
3573
- constructor(client, model, initialRequest, defaultParams) {
3574
- super();
3575
- this.client = client;
3576
- this.model = model;
3577
- this.defaultParams = defaultParams;
3578
- this.systemMessage = this.extractSystemMessage(initialRequest.messages);
3579
- this.messages = this.convertMessages(
3580
- this.filterSystemMessages(initialRequest.messages)
3581
- );
3765
+ client;
3766
+ /**
3767
+ * Current model to use
3768
+ */
3769
+ model;
3770
+ /**
3771
+ * Current messages in the conversation
3772
+ */
3773
+ messages;
3774
+ /**
3775
+ * System message if present
3776
+ */
3777
+ systemMessage;
3778
+ /**
3779
+ * Default parameters for requests
3780
+ */
3781
+ defaultParams;
3782
+ /**
3783
+ * Callbacks for handling responses, errors, and connection end
3784
+ */
3785
+ responseCallback;
3786
+ errorCallback;
3787
+ endCallback;
3788
+ /**
3789
+ * Constructor
3790
+ */
3791
+ constructor(client, model, initialRequest, defaultParams) {
3792
+ super();
3793
+ this.client = client;
3794
+ this.model = model;
3795
+ this.defaultParams = defaultParams;
3796
+ this.systemMessage = this.extractSystemMessage(initialRequest.messages);
3797
+ this.messages = this.convertMessages(
3798
+ this.filterSystemMessages(initialRequest.messages)
3799
+ );
3582
3800
  }
3583
3801
  /**
3584
3802
  * Extract the system message from messages array
@@ -3669,19 +3887,14 @@ ${typeof message.content === "string" ? message.content : JSON.stringify(message
3669
3887
  if (!content?.length) return [];
3670
3888
  const toolUses = [];
3671
3889
  for (const block of content) {
3672
- if (process.env.DEBUG === "true") {
3673
- console.log(
3674
- "Connection - Processing content block of type:",
3675
- block.type
3676
- );
3677
- }
3890
+ debugLog(
3891
+ `[AnthropicLLMConnection] Processing content block of type: ${block.type}`
3892
+ );
3678
3893
  if (block.type === "tool_use") {
3679
- if (process.env.DEBUG === "true") {
3680
- console.log(
3681
- "Connection - Found tool_use block:",
3682
- JSON.stringify(block, null, 2)
3683
- );
3684
- }
3894
+ debugLog(
3895
+ "[AnthropicLLMConnection] Found tool_use block:",
3896
+ JSON.stringify(block, null, 2)
3897
+ );
3685
3898
  toolUses.push({
3686
3899
  id: block.id || "unknown-id",
3687
3900
  name: block.name || "unknown-name",
@@ -3689,14 +3902,14 @@ ${typeof message.content === "string" ? message.content : JSON.stringify(message
3689
3902
  });
3690
3903
  }
3691
3904
  }
3692
- if (process.env.DEBUG === "true") {
3693
- console.log(`Connection - Found ${toolUses.length} tool uses in content`);
3694
- if (toolUses.length > 0) {
3695
- console.log(
3696
- "Connection - Extracted tool uses:",
3697
- JSON.stringify(toolUses, null, 2)
3698
- );
3699
- }
3905
+ debugLog(
3906
+ `[AnthropicLLMConnection] Found ${toolUses.length} tool uses in content`
3907
+ );
3908
+ if (toolUses.length > 0) {
3909
+ debugLog(
3910
+ "[AnthropicLLMConnection] Extracted tool uses:",
3911
+ JSON.stringify(toolUses, null, 2)
3912
+ );
3700
3913
  }
3701
3914
  return toolUses;
3702
3915
  }
@@ -3790,43 +4003,30 @@ ${typeof message.content === "string" ? message.content : JSON.stringify(message
3790
4003
  }
3791
4004
  const toolUses = this.extractToolUses(apiResponse.content);
3792
4005
  const toolCalls = this.convertToolCalls(toolUses);
3793
- if (process.env.DEBUG === "true") {
3794
- if (toolUses.length > 0) {
3795
- console.log(
3796
- "Connection - Extracted Tool Uses:",
3797
- JSON.stringify(toolUses, null, 2)
3798
- );
3799
- console.log(
3800
- "Connection - Converted Tool Calls:",
3801
- JSON.stringify(toolCalls, null, 2)
3802
- );
3803
- }
3804
- }
4006
+ debugLog(
4007
+ `[AnthropicLLMConnection] - Extracted ${toolUses.length} tool uses in content and converted ${toolCalls?.length || 0} tool calls`
4008
+ );
3805
4009
  const llmResponse = new LLMResponse({
3806
4010
  role: "assistant",
3807
4011
  content,
3808
4012
  tool_calls: toolCalls?.length ? toolCalls : void 0,
3809
4013
  raw_response: apiResponse
3810
4014
  });
3811
- if (process.env.DEBUG === "true") {
3812
- console.log(
3813
- "Connection - Final LLMResponse object:",
3814
- JSON.stringify(
3815
- {
3816
- role: llmResponse.role,
3817
- content: llmResponse.content?.substring(0, 50) + (llmResponse.content && llmResponse.content.length > 50 ? "..." : ""),
3818
- tool_calls: llmResponse.tool_calls ? `[${llmResponse.tool_calls.length} calls]` : "undefined"
3819
- },
3820
- null,
3821
- 2
3822
- )
3823
- );
3824
- }
4015
+ const logObject = {
4016
+ role: llmResponse.role,
4017
+ content: llmResponse.content?.substring(0, 50) + (llmResponse.content && llmResponse.content.length > 50 ? "..." : ""),
4018
+ tool_calls: llmResponse.tool_calls ? `[${llmResponse.tool_calls.length} calls]` : "undefined"
4019
+ };
4020
+ debugLog(
4021
+ "[AnthropicLLMConnection] Final LLMResponse object:",
4022
+ JSON.stringify(logObject, null, 2)
4023
+ );
3825
4024
  return llmResponse;
3826
4025
  } catch (error) {
3827
- if (process.env.DEBUG === "true") {
3828
- console.error("Error sending message to Anthropic:", error);
3829
- }
4026
+ debugLog(
4027
+ "[AnthropicLLMConnection] Error sending message to Anthropic:",
4028
+ error
4029
+ );
3830
4030
  throw error;
3831
4031
  }
3832
4032
  }
@@ -3834,6 +4034,18 @@ ${typeof message.content === "string" ? message.content : JSON.stringify(message
3834
4034
 
3835
4035
  // src/models/anthropic-llm.ts
3836
4036
  var AnthropicLLM = class extends BaseLLM {
4037
+ /**
4038
+ * Anthropic API key
4039
+ */
4040
+ apiKey;
4041
+ /**
4042
+ * Anthropic API base URL
4043
+ */
4044
+ baseURL;
4045
+ /**
4046
+ * Default parameters for requests
4047
+ */
4048
+ defaultParams;
3837
4049
  /**
3838
4050
  * Constructor for AnthropicLLM
3839
4051
  */
@@ -3966,13 +4178,13 @@ ${typeof message.content === "string" ? message.content : JSON.stringify(message
3966
4178
  if (!content?.length) return [];
3967
4179
  const toolUses = [];
3968
4180
  for (const block of content) {
3969
- if (process.env.DEBUG === "true") {
3970
- console.log("Processing content block of type:", block.type);
3971
- }
4181
+ debugLog(
4182
+ `[AnthropicLLM] Processing content block of type: ${block.type}`
4183
+ );
3972
4184
  if (block.type === "tool_use") {
3973
- if (process.env.DEBUG === "true") {
3974
- console.log("Found tool_use block:", JSON.stringify(block, null, 2));
3975
- }
4185
+ debugLog(
4186
+ `[AnthropicLLM] Found tool_use block: ${JSON.stringify(block, null, 2)}`
4187
+ );
3976
4188
  toolUses.push({
3977
4189
  id: block.id || "unknown-id",
3978
4190
  name: block.name || "unknown-name",
@@ -3980,12 +4192,10 @@ ${typeof message.content === "string" ? message.content : JSON.stringify(message
3980
4192
  });
3981
4193
  }
3982
4194
  }
3983
- if (process.env.DEBUG === "true") {
3984
- console.log(`Found ${toolUses.length} tool uses in content`);
3985
- if (toolUses.length > 0) {
3986
- console.log("Extracted tool uses:", JSON.stringify(toolUses, null, 2));
3987
- }
3988
- }
4195
+ debugLog(
4196
+ `[AnthropicLLM] Found ${toolUses.length} tool uses in content`,
4197
+ toolUses
4198
+ );
3989
4199
  return toolUses;
3990
4200
  }
3991
4201
  /**
@@ -4007,16 +4217,14 @@ ${typeof message.content === "string" ? message.content : JSON.stringify(message
4007
4217
  },
4008
4218
  responseType: stream ? "stream" : "json"
4009
4219
  });
4010
- if (process.env.DEBUG === "true") {
4011
- console.log("Anthropic API Response Status:", response.status);
4012
- if (!stream) {
4013
- console.log("Response Data Structure:", Object.keys(response.data));
4014
- console.log(
4015
- "Response Content Structure:",
4016
- response.data.content.map((block) => ({ type: block.type }))
4017
- );
4018
- }
4019
- }
4220
+ debugLog(
4221
+ `[AnthropicLLM] API Response done with ${response.status}:`,
4222
+ response.data
4223
+ );
4224
+ debugLog(
4225
+ "[AnthropicLLM] API Response content:",
4226
+ response.data.content.map((block) => ({ type: block.type }))
4227
+ );
4020
4228
  return response.data;
4021
4229
  } catch (error) {
4022
4230
  console.error("Error calling Anthropic API:", error);
@@ -4042,24 +4250,17 @@ ${typeof message.content === "string" ? message.content : JSON.stringify(message
4042
4250
  top_p: llmRequest.config.top_p ?? this.defaultParams.top_p,
4043
4251
  tools: tools?.length ? tools : void 0
4044
4252
  };
4045
- if (process.env.DEBUG === "true") {
4046
- console.log("Anthropic API Request:", {
4047
- model: params.model,
4048
- messageCount: params.messages.length,
4049
- systemMessage: params.system ? "present" : "none",
4050
- tools: params.tools ? params.tools.map((t) => t.name) : "none"
4051
- });
4052
- }
4253
+ debugLog("[AnthropicLLM] API Request:", {
4254
+ model: params.model,
4255
+ messageCount: params.messages.length,
4256
+ systemMessage: params.system ? "present" : "none",
4257
+ tools: params.tools ? params.tools.map((t) => t.name) : "none"
4258
+ });
4053
4259
  if (stream) {
4054
4260
  throw new Error("Streaming is not supported in this implementation");
4055
4261
  }
4056
4262
  const response = await this.callAnthropicAPI(params);
4057
- if (process.env.DEBUG === "true") {
4058
- console.log(
4059
- "Full Response Content:",
4060
- JSON.stringify(response.content, null, 2)
4061
- );
4062
- }
4263
+ debugLog("[AnthropicLLM] Full Response Content:", response.content);
4063
4264
  let content = "";
4064
4265
  for (const block of response.content) {
4065
4266
  if (block.type === "text") {
@@ -4068,43 +4269,26 @@ ${typeof message.content === "string" ? message.content : JSON.stringify(message
4068
4269
  }
4069
4270
  const toolUses = this.extractToolUses(response.content);
4070
4271
  const toolCalls = this.convertToolUses(toolUses);
4071
- if (process.env.DEBUG === "true") {
4072
- if (toolUses.length > 0) {
4073
- console.log(
4074
- "Extracted Tool Uses:",
4075
- JSON.stringify(toolUses, null, 2)
4076
- );
4077
- console.log(
4078
- "Converted Tool Calls:",
4079
- JSON.stringify(toolCalls, null, 2)
4080
- );
4081
- }
4082
- }
4272
+ debugLog("[AnthropicLLM] Extracted Tool Uses:", toolUses);
4273
+ debugLog("[AnthropicLLM] Converted Tool Calls:", toolCalls);
4083
4274
  const llmResponse = new LLMResponse({
4084
4275
  role: "assistant",
4085
4276
  content,
4086
4277
  tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
4087
4278
  raw_response: response
4088
4279
  });
4089
- if (process.env.DEBUG === "true") {
4090
- console.log(
4091
- "Final LLMResponse object:",
4092
- JSON.stringify(
4093
- {
4094
- role: llmResponse.role,
4095
- content: llmResponse.content?.substring(0, 50) + (llmResponse.content && llmResponse.content.length > 50 ? "..." : ""),
4096
- tool_calls: llmResponse.tool_calls ? `[${llmResponse.tool_calls.length} calls]` : "undefined"
4097
- },
4098
- null,
4099
- 2
4100
- )
4101
- );
4102
- }
4280
+ const logObject = {
4281
+ role: llmResponse.role,
4282
+ content: llmResponse.content?.substring(0, 50) + (llmResponse.content && llmResponse.content.length > 50 ? "..." : ""),
4283
+ tool_calls: llmResponse.tool_calls ? `[${llmResponse.tool_calls.length} calls]` : "undefined"
4284
+ };
4285
+ debugLog(
4286
+ "[AnthropicLLM] Final LLMResponse object:",
4287
+ JSON.stringify(logObject, null, 2)
4288
+ );
4103
4289
  yield llmResponse;
4104
4290
  } catch (error) {
4105
- if (process.env.DEBUG === "true") {
4106
- console.error("Error calling Anthropic:", error);
4107
- }
4291
+ debugLog("[AnthropicLLM] Error:", error);
4108
4292
  throw error;
4109
4293
  }
4110
4294
  }
@@ -4134,6 +4318,14 @@ import {
4134
4318
  GoogleGenAI
4135
4319
  } from "@google/genai";
4136
4320
  var GoogleLLM = class extends BaseLLM {
4321
+ /**
4322
+ * Generative model instance
4323
+ */
4324
+ ai;
4325
+ /**
4326
+ * Default parameters for requests
4327
+ */
4328
+ defaultParams;
4137
4329
  /**
4138
4330
  * Constructor for GoogleLLM
4139
4331
  */
@@ -4491,19 +4683,48 @@ var GoogleLLM = class extends BaseLLM {
4491
4683
  };
4492
4684
 
4493
4685
  // src/models/openai-llm.ts
4686
+ init_debug();
4494
4687
  import OpenAI from "openai";
4495
4688
 
4496
4689
  // src/models/openai-llm-connection.ts
4497
4690
  var OpenAILLMConnection = class extends BaseLLMConnection {
4691
+ /**
4692
+ * OpenAI client
4693
+ */
4694
+ client;
4695
+ /**
4696
+ * The model name
4697
+ */
4698
+ model;
4699
+ /**
4700
+ * The initial request
4701
+ */
4702
+ initialRequest;
4703
+ /**
4704
+ * Default parameters
4705
+ */
4706
+ defaultParams;
4707
+ /**
4708
+ * Response callback
4709
+ */
4710
+ responseCallback;
4711
+ /**
4712
+ * Error callback
4713
+ */
4714
+ errorCallback;
4715
+ /**
4716
+ * End callback
4717
+ */
4718
+ endCallback;
4719
+ /**
4720
+ * Ongoing chat history
4721
+ */
4722
+ messages = [];
4498
4723
  /**
4499
4724
  * Constructor for OpenAILLMConnection
4500
4725
  */
4501
4726
  constructor(client, model, initialRequest, defaultParams) {
4502
4727
  super();
4503
- /**
4504
- * Ongoing chat history
4505
- */
4506
- this.messages = [];
4507
4728
  this.client = client;
4508
4729
  this.model = model;
4509
4730
  this.initialRequest = initialRequest;
@@ -4714,6 +4935,14 @@ var OpenAILLMConnection = class extends BaseLLMConnection {
4714
4935
 
4715
4936
  // src/models/openai-llm.ts
4716
4937
  var OpenAILLM = class extends BaseLLM {
4938
+ /**
4939
+ * OpenAI client instance
4940
+ */
4941
+ client;
4942
+ /**
4943
+ * Default parameters for requests
4944
+ */
4945
+ defaultParams;
4717
4946
  /**
4718
4947
  * Constructor for OpenAILLM
4719
4948
  */
@@ -4863,11 +5092,9 @@ var OpenAILLM = class extends BaseLLM {
4863
5092
  * Convert OpenAI streaming chunk to LLMResponse
4864
5093
  */
4865
5094
  convertChunk(chunk) {
4866
- if (process.env.DEBUG === "true") {
4867
- console.log(
4868
- `OpenAI: Converting chunk - delta: ${JSON.stringify(chunk.delta || {})}`
4869
- );
4870
- }
5095
+ debugLog(
5096
+ `[OpenAILLM]: Converting chunk - delta: ${JSON.stringify(chunk.delta || {})}`
5097
+ );
4871
5098
  const content = chunk.delta?.content;
4872
5099
  const result = new LLMResponse({
4873
5100
  content: content !== void 0 ? content : null,
@@ -4908,32 +5135,24 @@ var OpenAILLM = class extends BaseLLM {
4908
5135
  presence_penalty: llmRequest.config.presence_penalty ?? this.defaultParams.presence_penalty,
4909
5136
  stream: shouldStream
4910
5137
  };
4911
- if (process.env.DEBUG === "true") {
4912
- console.log(
4913
- `OpenAI: Streaming mode ${shouldStream ? "enabled" : "disabled"}`
4914
- );
4915
- }
5138
+ debugLog(
5139
+ `[OpenAILLM] Request parameters - model: ${params.model}, messages: ${params.messages.length}, functions: ${params.tools ? params.tools.length : 0}, streaming: ${shouldStream}`
5140
+ );
4916
5141
  if (tools && tools.length > 0) {
4917
5142
  params.tools = tools;
4918
5143
  }
4919
5144
  try {
4920
5145
  if (shouldStream) {
4921
- if (process.env.DEBUG === "true") {
4922
- console.log("OpenAI: Starting streaming request");
4923
- }
5146
+ debugLog("[OpenAILLM] Starting streaming request");
4924
5147
  const streamResponse = await this.client.chat.completions.create(params);
4925
5148
  let partialFunctionCall;
4926
5149
  const partialToolCalls = /* @__PURE__ */ new Map();
4927
5150
  let accumulatedContent = "";
4928
5151
  const asyncIterable = streamResponse;
4929
- if (process.env.DEBUG === "true") {
4930
- console.log("OpenAI: Stream response received, processing chunks");
4931
- }
5152
+ debugLog("[OpenAILLM] Stream response received, processing chunks");
4932
5153
  for await (const chunk of asyncIterable) {
4933
5154
  if (!chunk.choices || chunk.choices.length === 0) {
4934
- if (process.env.DEBUG === "true") {
4935
- console.log("OpenAI: Empty chunk received, skipping");
4936
- }
5155
+ debugLog("[OpenAILLM] Empty chunk received, skipping");
4937
5156
  continue;
4938
5157
  }
4939
5158
  const choice = chunk.choices[0];
@@ -4941,14 +5160,12 @@ var OpenAILLM = class extends BaseLLM {
4941
5160
  if (responseChunk.content !== null) {
4942
5161
  accumulatedContent += responseChunk.content;
4943
5162
  }
4944
- if (process.env.DEBUG === "true") {
4945
- console.log(
4946
- `OpenAI: Chunk received - delta: "${choice.delta?.content || ""}"`,
4947
- `responseChunk content: "${responseChunk.content || ""}"`,
4948
- `is_partial: ${responseChunk.is_partial}`,
4949
- `accumulated: "${accumulatedContent.substring(0, 30)}${accumulatedContent.length > 30 ? "..." : ""}"`
4950
- );
4951
- }
5163
+ debugLog(
5164
+ `[OpenAILLM] Chunk received - delta: "${choice.delta?.content || ""}"`,
5165
+ `responseChunk content: "${responseChunk.content || ""}"`,
5166
+ `is_partial: ${responseChunk.is_partial}`,
5167
+ `accumulated: "${accumulatedContent.substring(0, 30)}${accumulatedContent.length > 30 ? "..." : ""}"`
5168
+ );
4952
5169
  if (responseChunk.function_call) {
4953
5170
  if (!partialFunctionCall) {
4954
5171
  partialFunctionCall = {
@@ -4973,37 +5190,27 @@ var OpenAILLM = class extends BaseLLM {
4973
5190
  }
4974
5191
  responseChunk.tool_calls = Array.from(partialToolCalls.values());
4975
5192
  }
4976
- if (process.env.DEBUG === "true") {
4977
- console.log("OpenAI: Yielding chunk to caller");
4978
- }
5193
+ debugLog("[OpenAILLM] Yielding chunk to caller");
4979
5194
  yield responseChunk;
4980
5195
  }
4981
5196
  if (accumulatedContent.length > 0) {
4982
- if (process.env.DEBUG === "true") {
4983
- console.log(
4984
- `OpenAI: Yielding final accumulated content: "${accumulatedContent.substring(0, 30)}${accumulatedContent.length > 30 ? "..." : ""}"`
4985
- );
4986
- }
5197
+ debugLog(
5198
+ `[OpenAILLM] Yielding final accumulated content: "${accumulatedContent.substring(0, 30)}${accumulatedContent.length > 30 ? "..." : ""}"`
5199
+ );
4987
5200
  yield new LLMResponse({
4988
5201
  content: accumulatedContent,
4989
5202
  role: "assistant",
4990
5203
  is_partial: false
4991
5204
  });
4992
5205
  }
4993
- if (process.env.DEBUG === "true") {
4994
- console.log("OpenAI: Finished processing all stream chunks");
4995
- }
5206
+ debugLog("[OpenAILLM] Finished processing all stream chunks");
4996
5207
  } else {
4997
- if (process.env.DEBUG === "true") {
4998
- console.log("OpenAI: Making non-streaming request");
4999
- }
5208
+ debugLog("[OpenAILLM] Making non-streaming request");
5000
5209
  const response = await this.client.chat.completions.create(params);
5001
5210
  if (!response.choices || response.choices.length === 0) {
5002
5211
  throw new Error("No response from OpenAI");
5003
5212
  }
5004
- if (process.env.DEBUG === "true") {
5005
- console.log("OpenAI: Non-streaming response received");
5006
- }
5213
+ debugLog("[OpenAILLM] Non-streaming response received");
5007
5214
  yield this.convertResponse(response.choices[0]);
5008
5215
  }
5009
5216
  } catch (error) {
@@ -5042,6 +5249,10 @@ var AuthCredentialType = /* @__PURE__ */ ((AuthCredentialType2) => {
5042
5249
  return AuthCredentialType2;
5043
5250
  })(AuthCredentialType || {});
5044
5251
  var AuthCredential = class {
5252
+ /**
5253
+ * Type of credential
5254
+ */
5255
+ type;
5045
5256
  /**
5046
5257
  * Constructor for AuthCredential
5047
5258
  */
@@ -5062,6 +5273,10 @@ var AuthCredential = class {
5062
5273
  }
5063
5274
  };
5064
5275
  var ApiKeyCredential = class extends AuthCredential {
5276
+ /**
5277
+ * The API key
5278
+ */
5279
+ apiKey;
5065
5280
  /**
5066
5281
  * Constructor for ApiKeyCredential
5067
5282
  */
@@ -5087,6 +5302,14 @@ var ApiKeyCredential = class extends AuthCredential {
5087
5302
  }
5088
5303
  };
5089
5304
  var BasicAuthCredential = class extends AuthCredential {
5305
+ /**
5306
+ * The username
5307
+ */
5308
+ username;
5309
+ /**
5310
+ * The password
5311
+ */
5312
+ password;
5090
5313
  /**
5091
5314
  * Constructor for BasicAuthCredential
5092
5315
  */
@@ -5111,6 +5334,10 @@ var BasicAuthCredential = class extends AuthCredential {
5111
5334
  }
5112
5335
  };
5113
5336
  var BearerTokenCredential = class extends AuthCredential {
5337
+ /**
5338
+ * The bearer token
5339
+ */
5340
+ token;
5114
5341
  /**
5115
5342
  * Constructor for BearerTokenCredential
5116
5343
  */
@@ -5134,6 +5361,22 @@ var BearerTokenCredential = class extends AuthCredential {
5134
5361
  }
5135
5362
  };
5136
5363
  var OAuth2Credential = class extends AuthCredential {
5364
+ /**
5365
+ * The access token
5366
+ */
5367
+ accessToken;
5368
+ /**
5369
+ * The refresh token
5370
+ */
5371
+ refreshToken;
5372
+ /**
5373
+ * When the token expires
5374
+ */
5375
+ expiresAt;
5376
+ /**
5377
+ * Function to refresh the token
5378
+ */
5379
+ refreshFunction;
5137
5380
  /**
5138
5381
  * Constructor for OAuth2Credential
5139
5382
  */
@@ -5200,6 +5443,14 @@ var OAuth2Credential = class extends AuthCredential {
5200
5443
 
5201
5444
  // src/auth/auth-config.ts
5202
5445
  var AuthConfig = class {
5446
+ /**
5447
+ * The authentication scheme
5448
+ */
5449
+ authScheme;
5450
+ /**
5451
+ * Additional context properties
5452
+ */
5453
+ context;
5203
5454
  /**
5204
5455
  * Constructor for AuthConfig
5205
5456
  */
@@ -5211,6 +5462,14 @@ var AuthConfig = class {
5211
5462
 
5212
5463
  // src/auth/auth-handler.ts
5213
5464
  var AuthHandler = class {
5465
+ /**
5466
+ * The authentication configuration
5467
+ */
5468
+ authConfig;
5469
+ /**
5470
+ * The authentication credential
5471
+ */
5472
+ credential;
5214
5473
  /**
5215
5474
  * Constructor for AuthHandler
5216
5475
  */
@@ -5252,11 +5511,27 @@ var AuthSchemeType = /* @__PURE__ */ ((AuthSchemeType2) => {
5252
5511
  return AuthSchemeType2;
5253
5512
  })(AuthSchemeType || {});
5254
5513
  var AuthScheme = class {
5514
+ /**
5515
+ * The type of authentication scheme
5516
+ */
5517
+ type;
5255
5518
  constructor(type) {
5256
5519
  this.type = type;
5257
5520
  }
5258
5521
  };
5259
5522
  var ApiKeyScheme = class extends AuthScheme {
5523
+ /**
5524
+ * Where the API key is sent
5525
+ */
5526
+ in;
5527
+ /**
5528
+ * Name of the parameter
5529
+ */
5530
+ name;
5531
+ /**
5532
+ * Description of the API key
5533
+ */
5534
+ description;
5260
5535
  /**
5261
5536
  * Constructor for ApiKeyScheme
5262
5537
  */
@@ -5268,6 +5543,18 @@ var ApiKeyScheme = class extends AuthScheme {
5268
5543
  }
5269
5544
  };
5270
5545
  var HttpScheme = class extends AuthScheme {
5546
+ /**
5547
+ * The HTTP authentication scheme
5548
+ */
5549
+ scheme;
5550
+ /**
5551
+ * Bearer format when scheme is 'bearer'
5552
+ */
5553
+ bearerFormat;
5554
+ /**
5555
+ * Description of the scheme
5556
+ */
5557
+ description;
5271
5558
  /**
5272
5559
  * Constructor for HttpScheme
5273
5560
  */
@@ -5279,6 +5566,14 @@ var HttpScheme = class extends AuthScheme {
5279
5566
  }
5280
5567
  };
5281
5568
  var OAuth2Scheme = class extends AuthScheme {
5569
+ /**
5570
+ * OAuth flows
5571
+ */
5572
+ flows;
5573
+ /**
5574
+ * Description of the scheme
5575
+ */
5576
+ description;
5282
5577
  /**
5283
5578
  * Constructor for OAuth2Scheme
5284
5579
  */
@@ -5289,6 +5584,14 @@ var OAuth2Scheme = class extends AuthScheme {
5289
5584
  }
5290
5585
  };
5291
5586
  var OpenIdConnectScheme = class extends AuthScheme {
5587
+ /**
5588
+ * OpenID Connect URL
5589
+ */
5590
+ openIdConnectUrl;
5591
+ /**
5592
+ * Description of the scheme
5593
+ */
5594
+ description;
5292
5595
  /**
5293
5596
  * Constructor for OpenIdConnectScheme
5294
5597
  */
@@ -5301,6 +5604,8 @@ var OpenIdConnectScheme = class extends AuthScheme {
5301
5604
 
5302
5605
  // src/sessions/state.ts
5303
5606
  var SessionState = class _SessionState {
5607
+ state;
5608
+ dirty;
5304
5609
  constructor() {
5305
5610
  this.state = /* @__PURE__ */ new Map();
5306
5611
  this.dirty = /* @__PURE__ */ new Set();
@@ -5388,6 +5693,10 @@ __export(memory_exports, {
5388
5693
 
5389
5694
  // src/memory/in-memory-memory-service.ts
5390
5695
  var InMemoryMemoryService = class {
5696
+ /**
5697
+ * Map of sessions by ID
5698
+ */
5699
+ sessions;
5391
5700
  /**
5392
5701
  * Constructor for InMemoryMemoryService
5393
5702
  */
@@ -5488,9 +5797,22 @@ var InMemoryMemoryService = class {
5488
5797
  };
5489
5798
 
5490
5799
  // src/memory/persistent-memory-service.ts
5800
+ init_debug();
5491
5801
  import fs2 from "fs";
5492
5802
  import path2 from "path";
5493
5803
  var PersistentMemoryService = class {
5804
+ /**
5805
+ * In-memory service used for search operations
5806
+ */
5807
+ inMemoryService;
5808
+ /**
5809
+ * Directory where memory files will be stored
5810
+ */
5811
+ storageDir;
5812
+ /**
5813
+ * File prefix for memory files
5814
+ */
5815
+ filePrefix;
5494
5816
  /**
5495
5817
  * Constructor for PersistentMemoryService
5496
5818
  */
@@ -5579,11 +5901,9 @@ var PersistentMemoryService = class {
5579
5901
  }
5580
5902
  }
5581
5903
  }
5582
- if (process.env.DEBUG === "true") {
5583
- console.log(
5584
- `Loaded ${this.inMemoryService.getAllSessions().length} sessions from persistent storage`
5585
- );
5586
- }
5904
+ debugLog(
5905
+ `Loaded ${this.inMemoryService.getAllSessions().length} sessions from persistent storage`
5906
+ );
5587
5907
  } catch (error) {
5588
5908
  console.error("Error loading memory files:", error);
5589
5909
  }
@@ -5647,6 +5967,7 @@ __export(sessions_exports, {
5647
5967
  PgLiteSessionService: () => PgLiteSessionService,
5648
5968
  PostgresSessionService: () => PostgresSessionService,
5649
5969
  SessionState: () => SessionState,
5970
+ SqliteSessionService: () => SqliteSessionService,
5650
5971
  cloneSession: () => cloneSession,
5651
5972
  generateSessionId: () => generateSessionId,
5652
5973
  validateSession: () => validateSession
@@ -5654,6 +5975,10 @@ __export(sessions_exports, {
5654
5975
 
5655
5976
  // src/sessions/in-memory-session-service.ts
5656
5977
  var InMemorySessionService = class {
5978
+ /**
5979
+ * Map of sessions by ID
5980
+ */
5981
+ sessions;
5657
5982
  /**
5658
5983
  * Constructor for InMemorySessionService
5659
5984
  */
@@ -5799,6 +6124,8 @@ var sessionsSchema = pgTable("sessions", {
5799
6124
  // Store serialized SessionState as JSONB
5800
6125
  });
5801
6126
  var PostgresSessionService = class {
6127
+ db;
6128
+ sessionsTable;
5802
6129
  constructor(config) {
5803
6130
  this.db = config.db;
5804
6131
  this.sessionsTable = config.sessionsTable || sessionsSchema;
@@ -5921,22 +6248,25 @@ var PostgresSessionService = class {
5921
6248
  // src/sessions/pglite-session-service.ts
5922
6249
  import { eq as eq2 } from "drizzle-orm";
5923
6250
  import { jsonb as jsonb2, pgTable as pgTable2, timestamp as timestamp2, varchar as varchar2 } from "drizzle-orm/pg-core";
6251
+ import { drizzle } from "drizzle-orm/pglite";
5924
6252
  var sessionsSchema2 = pgTable2("sessions", {
5925
6253
  id: varchar2("id", { length: 255 }).primaryKey(),
5926
6254
  userId: varchar2("user_id", { length: 255 }).notNull(),
5927
6255
  messages: jsonb2("messages").default("[]").$type(),
5928
- // Store Message array as JSONB
5929
6256
  metadata: jsonb2("metadata").default("{}").$type(),
5930
6257
  createdAt: timestamp2("created_at", { withTimezone: true }).defaultNow().notNull(),
5931
6258
  updatedAt: timestamp2("updated_at", { withTimezone: true }).defaultNow().notNull(),
5932
6259
  state: jsonb2("state").default("{}").$type()
5933
- // Store serialized SessionState as JSONB
5934
6260
  });
5935
6261
  var PgLiteSessionService = class {
6262
+ db;
6263
+ sessionsTable;
6264
+ initialized = false;
5936
6265
  constructor(config) {
5937
- this.initialized = false;
5938
- this.db = config.db;
5939
- this.sessionsTable = config.sessionsTable || sessionsSchema2;
6266
+ this.db = drizzle(config.pglite, {
6267
+ schema: { sessions: sessionsSchema2 }
6268
+ });
6269
+ this.sessionsTable = sessionsSchema2;
5940
6270
  if (!config.skipTableCreation) {
5941
6271
  this.initializeDatabase().catch((error) => {
5942
6272
  console.error("Failed to initialize PgLite database:", error);
@@ -5991,9 +6321,7 @@ var PgLiteSessionService = class {
5991
6321
  metadata,
5992
6322
  createdAt: now,
5993
6323
  updatedAt: now,
5994
- // Drizzle's defaultNow() on schema handles this, but explicit is fine
5995
6324
  state: sessionState.toObject()
5996
- // Serialize SessionState
5997
6325
  };
5998
6326
  const results = await this.db.insert(this.sessionsTable).values(newSessionData).returning();
5999
6327
  const result = results[0];
@@ -6008,7 +6336,6 @@ var PgLiteSessionService = class {
6008
6336
  messages: Array.isArray(result.messages) ? result.messages : [],
6009
6337
  metadata: result.metadata || {},
6010
6338
  state: SessionState.fromObject(result.state || {}),
6011
- // Ensure dates are Date objects if Drizzle returns strings for some drivers/configs
6012
6339
  createdAt: new Date(result.createdAt),
6013
6340
  updatedAt: new Date(result.updatedAt)
6014
6341
  };
@@ -6036,7 +6363,6 @@ var PgLiteSessionService = class {
6036
6363
  userId: session.userId,
6037
6364
  messages: session.messages,
6038
6365
  metadata: session.metadata,
6039
- // createdAt should typically not be updated after creation
6040
6366
  updatedAt: /* @__PURE__ */ new Date(),
6041
6367
  state: session.state.toObject()
6042
6368
  };
@@ -6063,12 +6389,191 @@ var PgLiteSessionService = class {
6063
6389
  await this.ensureInitialized();
6064
6390
  await this.db.delete(this.sessionsTable).where(eq2(this.sessionsTable.id, sessionId));
6065
6391
  }
6392
+ async appendEvent(session, event) {
6393
+ await this.ensureInitialized();
6394
+ if (event.is_partial) {
6395
+ return event;
6396
+ }
6397
+ if (event.actions?.stateDelta) {
6398
+ for (const [key, value] of Object.entries(event.actions.stateDelta)) {
6399
+ if (key.startsWith("_temp_")) {
6400
+ continue;
6401
+ }
6402
+ session.state?.set(key, value);
6403
+ }
6404
+ }
6405
+ if (!session.events) {
6406
+ session.events = [];
6407
+ }
6408
+ session.events.push(event);
6409
+ session.updatedAt = /* @__PURE__ */ new Date();
6410
+ await this.updateSession(session);
6411
+ return event;
6412
+ }
6413
+ };
6414
+
6415
+ // src/sessions/sqlite-session-service.ts
6416
+ import * as fs3 from "fs";
6417
+ import * as path3 from "path";
6418
+ import { eq as eq3 } from "drizzle-orm";
6419
+ import {
6420
+ drizzle as drizzle2
6421
+ } from "drizzle-orm/better-sqlite3";
6422
+ import { integer, text } from "drizzle-orm/sqlite-core";
6423
+ import { sqliteTable } from "drizzle-orm/sqlite-core";
6424
+ var sessionsSchema3 = sqliteTable("sessions", {
6425
+ id: text("id").primaryKey(),
6426
+ userId: text("user_id").notNull(),
6427
+ messages: text("messages", { mode: "json" }).default("[]").$type(),
6428
+ metadata: text("metadata", { mode: "json" }).default("{}").$type(),
6429
+ createdAt: integer("created_at", { mode: "timestamp" }).notNull(),
6430
+ updatedAt: integer("updated_at", { mode: "timestamp" }).notNull(),
6431
+ state: text("state", { mode: "json" }).default("{}").$type()
6432
+ });
6433
+ var SqliteSessionService = class {
6434
+ db;
6435
+ sessionsTable;
6436
+ initialized = false;
6437
+ sqliteInstance;
6438
+ constructor(config) {
6439
+ this.sqliteInstance = config.sqlite;
6440
+ const dbPath = this.sqliteInstance.name;
6441
+ if (dbPath && dbPath !== ":memory:") {
6442
+ const dbDir = path3.dirname(dbPath);
6443
+ if (!fs3.existsSync(dbDir)) {
6444
+ fs3.mkdirSync(dbDir, { recursive: true });
6445
+ }
6446
+ }
6447
+ this.db = drizzle2(config.sqlite, {
6448
+ schema: { sessions: sessionsSchema3 }
6449
+ });
6450
+ this.sessionsTable = sessionsSchema3;
6451
+ if (!config.skipTableCreation) {
6452
+ this.initializeDatabase().catch((error) => {
6453
+ console.error("Failed to initialize SQLite database:", error);
6454
+ });
6455
+ }
6456
+ }
6066
6457
  /**
6067
- * Appends an event to a session object
6068
- * @param session The session to append the event to
6069
- * @param event The event to append
6070
- * @returns The appended event
6458
+ * Initialize the database by creating required tables if they don't exist
6459
+ */
6460
+ async initializeDatabase() {
6461
+ if (this.initialized) {
6462
+ return;
6463
+ }
6464
+ try {
6465
+ this.sqliteInstance.pragma("journal_mode = WAL");
6466
+ this.sqliteInstance.exec(`
6467
+ CREATE TABLE IF NOT EXISTS sessions (
6468
+ id TEXT PRIMARY KEY,
6469
+ user_id TEXT NOT NULL,
6470
+ messages TEXT DEFAULT '[]',
6471
+ metadata TEXT DEFAULT '{}',
6472
+ created_at INTEGER NOT NULL,
6473
+ updated_at INTEGER NOT NULL,
6474
+ state TEXT DEFAULT '{}'
6475
+ );
6476
+ `);
6477
+ this.sqliteInstance.exec(`
6478
+ CREATE INDEX IF NOT EXISTS idx_sessions_user_id ON sessions(user_id);
6479
+ `);
6480
+ this.initialized = true;
6481
+ } catch (error) {
6482
+ console.error("Error initializing SQLite database:", error);
6483
+ throw error;
6484
+ }
6485
+ }
6486
+ /**
6487
+ * Ensure database is initialized before any operation
6071
6488
  */
6489
+ async ensureInitialized() {
6490
+ if (!this.initialized) {
6491
+ await this.initializeDatabase();
6492
+ }
6493
+ }
6494
+ generateSessionId() {
6495
+ return `session-${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
6496
+ }
6497
+ async createSession(userId, metadata = {}) {
6498
+ await this.ensureInitialized();
6499
+ const sessionId = this.generateSessionId();
6500
+ const now = /* @__PURE__ */ new Date();
6501
+ const sessionState = new SessionState();
6502
+ const newSessionData = {
6503
+ id: sessionId,
6504
+ userId,
6505
+ messages: [],
6506
+ metadata,
6507
+ createdAt: now,
6508
+ updatedAt: now,
6509
+ state: sessionState.toObject()
6510
+ };
6511
+ const results = await this.db.insert(this.sessionsTable).values(newSessionData).returning();
6512
+ const result = results[0];
6513
+ if (!result) {
6514
+ throw new Error(
6515
+ "Failed to create session, no data returned from insert."
6516
+ );
6517
+ }
6518
+ return {
6519
+ id: result.id,
6520
+ userId: result.userId,
6521
+ messages: Array.isArray(result.messages) ? result.messages : [],
6522
+ metadata: result.metadata || {},
6523
+ state: SessionState.fromObject(result.state || {}),
6524
+ createdAt: result.createdAt,
6525
+ updatedAt: result.updatedAt
6526
+ };
6527
+ }
6528
+ async getSession(sessionId) {
6529
+ await this.ensureInitialized();
6530
+ const results = await this.db.select().from(this.sessionsTable).where(eq3(this.sessionsTable.id, sessionId)).limit(1);
6531
+ const sessionData = results[0];
6532
+ if (!sessionData) {
6533
+ return void 0;
6534
+ }
6535
+ return {
6536
+ id: sessionData.id,
6537
+ userId: sessionData.userId,
6538
+ messages: Array.isArray(sessionData.messages) ? sessionData.messages : [],
6539
+ metadata: sessionData.metadata || {},
6540
+ state: SessionState.fromObject(sessionData.state || {}),
6541
+ createdAt: sessionData.createdAt,
6542
+ updatedAt: sessionData.updatedAt
6543
+ };
6544
+ }
6545
+ async updateSession(session) {
6546
+ await this.ensureInitialized();
6547
+ const updateData = {
6548
+ userId: session.userId,
6549
+ messages: session.messages,
6550
+ metadata: session.metadata,
6551
+ updatedAt: /* @__PURE__ */ new Date(),
6552
+ state: session.state.toObject()
6553
+ };
6554
+ await this.db.update(this.sessionsTable).set(updateData).where(eq3(this.sessionsTable.id, session.id));
6555
+ }
6556
+ async listSessions(userId, options) {
6557
+ await this.ensureInitialized();
6558
+ let query = this.db.select().from(this.sessionsTable).where(eq3(this.sessionsTable.userId, userId));
6559
+ if (options?.limit !== void 0 && options.limit > 0) {
6560
+ query = query.limit(options.limit);
6561
+ }
6562
+ const results = await query;
6563
+ return results.map((sessionData) => ({
6564
+ id: sessionData.id,
6565
+ userId: sessionData.userId,
6566
+ messages: Array.isArray(sessionData.messages) ? sessionData.messages : [],
6567
+ metadata: sessionData.metadata || {},
6568
+ state: SessionState.fromObject(sessionData.state || {}),
6569
+ createdAt: sessionData.createdAt,
6570
+ updatedAt: sessionData.updatedAt
6571
+ }));
6572
+ }
6573
+ async deleteSession(sessionId) {
6574
+ await this.ensureInitialized();
6575
+ await this.db.delete(this.sessionsTable).where(eq3(this.sessionsTable.id, sessionId));
6576
+ }
6072
6577
  async appendEvent(session, event) {
6073
6578
  await this.ensureInitialized();
6074
6579
  if (event.is_partial) {
@@ -6090,11 +6595,6 @@ var PgLiteSessionService = class {
6090
6595
  await this.updateSession(session);
6091
6596
  return event;
6092
6597
  }
6093
- // TODO: Consider if table creation/migration logic is needed here or handled externally (e.g., drizzle-kit migrations)
6094
- // TODO: Implement methods corresponding to Python's append_event, list_events,
6095
- // get_app_state, update_app_state, get_user_state, update_user_state
6096
- // if full parity with Python's DatabaseSessionService is desired.
6097
- // This would require defining corresponding Drizzle schemas for Events, AppState, UserState.
6098
6598
  };
6099
6599
 
6100
6600
  // src/sessions/session-util.ts
@@ -6132,19 +6632,32 @@ import { v4 as uuidv4 } from "uuid";
6132
6632
 
6133
6633
  // src/events/event-actions.ts
6134
6634
  var EventActions = class {
6635
+ /**
6636
+ * If true, it won't call model to summarize function response.
6637
+ * Only used for function_response event.
6638
+ */
6639
+ skipSummarization;
6640
+ /**
6641
+ * Indicates that the event is updating the state with the given delta.
6642
+ */
6643
+ stateDelta = {};
6644
+ /**
6645
+ * Indicates that the event is updating an artifact. key is the filename,
6646
+ * value is the version.
6647
+ */
6648
+ artifactDelta = {};
6649
+ /**
6650
+ * If set, the event transfers to the specified agent.
6651
+ */
6652
+ transferToAgent;
6653
+ /**
6654
+ * The agent is escalating to a higher level agent.
6655
+ */
6656
+ escalate;
6135
6657
  /**
6136
6658
  * Constructor for EventActions
6137
6659
  */
6138
6660
  constructor(options = {}) {
6139
- /**
6140
- * Indicates that the event is updating the state with the given delta.
6141
- */
6142
- this.stateDelta = {};
6143
- /**
6144
- * Indicates that the event is updating an artifact. key is the filename,
6145
- * value is the version.
6146
- */
6147
- this.artifactDelta = {};
6148
6661
  this.skipSummarization = options.skipSummarization;
6149
6662
  this.stateDelta = options.stateDelta || {};
6150
6663
  this.artifactDelta = options.artifactDelta || {};
@@ -6155,6 +6668,40 @@ var EventActions = class {
6155
6668
 
6156
6669
  // src/events/event.ts
6157
6670
  var Event = class _Event extends LLMResponse {
6671
+ /**
6672
+ * The invocation ID of the event.
6673
+ */
6674
+ invocationId = "";
6675
+ /**
6676
+ * 'user' or the name of the agent, indicating who appended the event to the session.
6677
+ */
6678
+ author;
6679
+ /**
6680
+ * The actions taken by the agent.
6681
+ */
6682
+ actions = new EventActions();
6683
+ /**
6684
+ * Set of ids of the long running function calls.
6685
+ * Agent client will know from this field about which function call is long running.
6686
+ * Only valid for function call event.
6687
+ */
6688
+ longRunningToolIds;
6689
+ /**
6690
+ * The branch of the event.
6691
+ * The format is like agent_1.agent_2.agent_3, where agent_1 is the parent of
6692
+ * agent_2, and agent_2 is the parent of agent_3.
6693
+ * Branch is used when multiple sub-agent shouldn't see their peer agents'
6694
+ * conversation history.
6695
+ */
6696
+ branch;
6697
+ /**
6698
+ * The unique identifier of the event.
6699
+ */
6700
+ id = "";
6701
+ /**
6702
+ * The timestamp of the event.
6703
+ */
6704
+ timestamp;
6158
6705
  /**
6159
6706
  * Constructor for Event
6160
6707
  */
@@ -6181,18 +6728,6 @@ var Event = class _Event extends LLMResponse {
6181
6728
  is_partial: partial,
6182
6729
  raw_response
6183
6730
  });
6184
- /**
6185
- * The invocation ID of the event.
6186
- */
6187
- this.invocationId = "";
6188
- /**
6189
- * The actions taken by the agent.
6190
- */
6191
- this.actions = new EventActions();
6192
- /**
6193
- * The unique identifier of the event.
6194
- */
6195
- this.id = "";
6196
6731
  this.invocationId = invocationId;
6197
6732
  this.author = author;
6198
6733
  this.actions = actions;
@@ -6230,6 +6765,22 @@ var Event = class _Event extends LLMResponse {
6230
6765
 
6231
6766
  // src/runners.ts
6232
6767
  var Runner = class {
6768
+ /**
6769
+ * The app name of the runner.
6770
+ */
6771
+ appName;
6772
+ /**
6773
+ * The root agent to run.
6774
+ */
6775
+ agent;
6776
+ /**
6777
+ * The session service for the runner.
6778
+ */
6779
+ sessionService;
6780
+ /**
6781
+ * The memory service for the runner.
6782
+ */
6783
+ memoryService;
6233
6784
  /**
6234
6785
  * Initializes the Runner.
6235
6786
  */
@@ -6413,6 +6964,7 @@ export {
6413
6964
  SequentialAgent,
6414
6965
  SessionState,
6415
6966
  sessions_exports as Sessions,
6967
+ SqliteSessionService,
6416
6968
  StreamingMode,
6417
6969
  ToolContext,
6418
6970
  tools_exports as Tools,