@xalia/agent 0.6.7 → 0.6.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/.env.development +1 -0
  2. package/dist/agent/src/agent/agent.js +100 -77
  3. package/dist/agent/src/agent/agentUtils.js +21 -16
  4. package/dist/agent/src/agent/compressingContextManager.js +10 -14
  5. package/dist/agent/src/agent/context.js +101 -127
  6. package/dist/agent/src/agent/contextWithWorkspace.js +133 -0
  7. package/dist/agent/src/agent/imageGenLLM.js +0 -6
  8. package/dist/agent/src/agent/imageGenerator.js +2 -10
  9. package/dist/agent/src/agent/openAILLMStreaming.js +5 -2
  10. package/dist/agent/src/agent/sudoMcpServerManager.js +21 -9
  11. package/dist/agent/src/chat/client/chatClient.js +35 -2
  12. package/dist/agent/src/chat/client/connection.js +6 -1
  13. package/dist/agent/src/chat/client/sessionClient.js +0 -7
  14. package/dist/agent/src/chat/data/dbSessionMessages.js +11 -0
  15. package/dist/agent/src/chat/protocol/messages.js +4 -0
  16. package/dist/agent/src/chat/server/chatContextManager.js +149 -139
  17. package/dist/agent/src/chat/server/imageGeneratorTools.js +19 -8
  18. package/dist/agent/src/chat/server/openAIRouterLLM.js +114 -0
  19. package/dist/agent/src/chat/server/openSession.js +57 -58
  20. package/dist/agent/src/chat/server/server.js +6 -2
  21. package/dist/agent/src/chat/server/sessionRegistry.js +65 -6
  22. package/dist/agent/src/chat/server/sessionRegistry.test.js +1 -1
  23. package/dist/agent/src/chat/server/tools.js +52 -17
  24. package/dist/agent/src/test/chatContextManager.test.js +31 -29
  25. package/dist/agent/src/test/clientServerConnection.test.js +1 -2
  26. package/dist/agent/src/test/compressingContextManager.test.js +22 -36
  27. package/dist/agent/src/test/context.test.js +55 -17
  28. package/dist/agent/src/test/contextTestTools.js +87 -0
  29. package/dist/agent/src/tool/chatMain.js +22 -8
  30. package/package.json +1 -1
  31. package/scripts/test_chat +3 -0
  32. package/src/agent/agent.ts +170 -125
  33. package/src/agent/agentUtils.ts +31 -20
  34. package/src/agent/compressingContextManager.ts +13 -44
  35. package/src/agent/context.ts +165 -159
  36. package/src/agent/contextWithWorkspace.ts +162 -0
  37. package/src/agent/imageGenLLM.ts +0 -8
  38. package/src/agent/imageGenerator.ts +3 -18
  39. package/src/agent/openAILLMStreaming.ts +20 -3
  40. package/src/agent/sudoMcpServerManager.ts +41 -20
  41. package/src/chat/client/chatClient.ts +47 -3
  42. package/src/chat/client/connection.ts +11 -1
  43. package/src/chat/client/sessionClient.ts +0 -8
  44. package/src/chat/data/dataModels.ts +6 -0
  45. package/src/chat/data/dbSessionMessages.ts +34 -0
  46. package/src/chat/protocol/messages.ts +35 -8
  47. package/src/chat/server/chatContextManager.ts +210 -197
  48. package/src/chat/server/connectionManager.ts +1 -1
  49. package/src/chat/server/imageGeneratorTools.ts +31 -18
  50. package/src/chat/server/openAIRouterLLM.ts +171 -0
  51. package/src/chat/server/openSession.ts +87 -100
  52. package/src/chat/server/server.ts +6 -2
  53. package/src/chat/server/sessionFileManager.ts +5 -5
  54. package/src/chat/server/sessionRegistry.test.ts +0 -1
  55. package/src/chat/server/sessionRegistry.ts +100 -4
  56. package/src/chat/server/tools.ts +73 -35
  57. package/src/test/agent.test.ts +8 -7
  58. package/src/test/chatContextManager.test.ts +42 -37
  59. package/src/test/clientServerConnection.test.ts +0 -2
  60. package/src/test/compressingContextManager.test.ts +29 -34
  61. package/src/test/context.test.ts +59 -15
  62. package/src/test/contextTestTools.ts +95 -0
  63. package/src/tool/chatMain.ts +26 -12
  64. package/test_data/dummyllm_script_image_gen.json +13 -23
  65. package/test_data/dummyllm_script_image_gen_fe.json +29 -0
@@ -0,0 +1 @@
1
+ LLM_API_KEY_MAP={"openrouter":"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","together":"5928479bc38fd315acc8359ba42587f8efc804e01b06eb02eb5ee97e044afaa1","openai":"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"}
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Agent = exports.DEFAULT_LLM_URL = exports.AgentProfile = void 0;
3
+ exports.Agent = exports.AgentEx = exports.DEFAULT_LLM_URL = exports.AgentProfile = void 0;
4
4
  exports.createUserMessage = createUserMessage;
5
5
  exports.createUserMessageEnsure = createUserMessageEnsure;
6
6
  exports.completionToAssistantMessageParam = completionToAssistantMessageParam;
@@ -12,49 +12,25 @@ const mcpServerManager_1 = require("./mcpServerManager");
12
12
  exports.DEFAULT_LLM_URL = "http://localhost:5001/v1";
13
13
  const MAX_TOOL_CALL_RESPONSE_LENGTH = 4000;
14
14
  const logger = (0, sdk_2.getLogger)();
15
- class Agent {
16
- constructor(eventHandler, mcpServerManager, llm, contextManager) {
15
+ class AgentEx {
16
+ constructor(mcpServerManager, llm) {
17
17
  /// The full list of tools, ready to pass to the LLM
18
18
  this.tools = [];
19
19
  /// Handlers for "agent" (or "built-in") tools. These do not require
20
20
  /// approval from the user.
21
21
  this.agentTools = new Map();
22
- this.eventHandler = eventHandler;
23
22
  this.mcpServerManager = mcpServerManager;
24
23
  this.llm = llm;
25
- this.contextManager = contextManager;
26
- }
27
- static initializeWithLLM(eventHandler, llm, contextManager, mcpServerManager) {
28
- return new Agent(eventHandler, mcpServerManager ?? new mcpServerManager_1.McpServerManager(), llm, contextManager);
29
24
  }
30
25
  async shutdown() {
31
26
  return this.mcpServerManager.shutdown();
32
27
  }
33
- getAgentProfile() {
34
- return new sdk_2.AgentProfile(this.llm.getModel(), this.getSystemPrompt(), this.mcpServerManager.getMcpServerSettings());
35
- }
36
- getConversation() {
37
- const llmMessages = this.contextManager.getLLMContext();
38
- (0, assert_1.strict)(llmMessages[0].role === "system", "first message must have system role");
39
- return [...llmMessages.slice(1)];
40
- }
41
28
  getMcpServerManager() {
42
29
  return this.mcpServerManager;
43
30
  }
44
- /**
45
- * Like `userMessage`, but can be awaited, and accepts the user name.
46
- */
47
- async userMessageEx(msg, imageB64, name) {
48
- const userMessage = createUserMessage(msg, imageB64, name);
49
- if (!userMessage) {
50
- return undefined;
51
- }
52
- return this.userMessageRaw(userMessage);
53
- }
54
- async userMessageRaw(userMessage) {
55
- return this.userMessagesRaw([userMessage]);
56
- }
57
- async userMessagesRaw(userMessages) {
31
+ // TODO: rename
32
+ async userMessagesRaw(contextTx, eventHandler) {
33
+ // New user messages have already been added to the `contextTx`.
58
34
  // Image and audio handling
59
35
  //
60
36
  // `ChatCompletions` (responses from the LLM) can contain `audio` and
@@ -63,17 +39,12 @@ class Agent {
63
39
  //
64
40
  // As such, our current approach is to extract all assistant-generated
65
41
  // media and return it separately.
66
- // Note: `getLLMContext` returns a copy to we can mutate this array
67
- const context = this.contextManager.getLLMContext();
68
- const newMessagesIdx = context.length;
69
- // Add the new user messages
70
- context.push(...userMessages);
71
42
  const images = [];
72
43
  // We convert the `ChatCompletionsMessage` into a
73
44
  // `ChatCompletionAssistantMessageParam` and extract image data.
74
- let completion = await this.chatCompletion(context);
75
- let message = this.processCompletion(completion, images);
76
- context.push(message);
45
+ let completion = await this.chatCompletion(contextTx.getLLMContext(), eventHandler);
46
+ let message = this.processCompletion(completion, images, eventHandler);
47
+ contextTx.addMessage(message);
77
48
  // While there are tool calls to make, invoke them and loop
78
49
  while (message.tool_calls && message.tool_calls.length > 0) {
79
50
  // TODO: Execute all tool calls in parallel
@@ -83,8 +54,7 @@ class Agent {
83
54
  // Execute the tool call, add the result to the context as an LLM
84
55
  // mesage, and record the index of the message alongside the result in
85
56
  // `toolCallResults`.
86
- const result = await this.doToolCall(toolCall);
87
- toolCallResults.push([context.length, result]);
57
+ const result = await this.doToolCall(toolCall, eventHandler);
88
58
  const toolResult = {
89
59
  role: "tool",
90
60
  tool_call_id: toolCall.id,
@@ -94,7 +64,8 @@ class Agent {
94
64
  ? { structuredContent: result.structuredContent }
95
65
  : {}),
96
66
  };
97
- context.push(toolResult);
67
+ const toolResultHandle = contextTx.addMessage(toolResult);
68
+ toolCallResults.push([toolResultHandle, result]);
98
69
  // If the tool call requested that its args be redacted, this can be
99
70
  // done now - before the next LLM invocation.
100
71
  if (result.overwriteArgs) {
@@ -105,16 +76,16 @@ class Agent {
105
76
  }
106
77
  // Now that any args have been overwritten, signal the event handler of
107
78
  // the prevoius completion.
108
- this.eventHandler.onCompletion(message);
79
+ eventHandler.onCompletion(message);
109
80
  // Get a new completion using the untouched tool call results. Note
110
81
  // that, since we are deferring the `onToolCallResult` calls (so they
111
82
  // can be redacted), we must take care that the errors in
112
83
  // `chatCompletion` do not disrupt this, so the caller has a consistent
113
84
  // view of the conversation state.
114
85
  try {
115
- completion = await this.chatCompletion(context); // CAN THROW
116
- message = this.processCompletion(completion, images);
117
- context.push(message);
86
+ completion = await this.chatCompletion(contextTx.getLLMContext(), eventHandler);
87
+ message = this.processCompletion(completion, images, eventHandler);
88
+ contextTx.addMessage(message);
118
89
  }
119
90
  finally {
120
91
  // Now that the tool call results have been passed to the LLM, perform
@@ -122,44 +93,23 @@ class Agent {
122
93
  // messages to the event handler - note, we want to do this even if an
123
94
  // error occured, so that the caller has an up-to-date picture of the
124
95
  // context state when the error occured.
125
- toolCallResults.forEach(([indexInContext, tcr]) => {
126
- const ctxMsg = context[indexInContext];
96
+ toolCallResults.forEach(([handle, tcr]) => {
97
+ const ctxMsg = contextTx.getMessage(handle);
127
98
  if (tcr.overwriteResponse) {
128
99
  ctxMsg.content = tcr.overwriteResponse;
129
100
  }
130
101
  (0, assert_1.strict)(ctxMsg.role === "tool");
131
- this.eventHandler.onToolCallResult(ctxMsg);
102
+ eventHandler.onToolCallResult(ctxMsg);
132
103
  });
133
104
  // Note, if an error DID occur, the ContextManager does not see any of
134
105
  // the new context.
135
106
  }
136
107
  }
137
108
  // Signal the event handler of the final completion.
138
- this.eventHandler.onCompletion(message);
139
- // Add all new new messages to the context
140
- this.contextManager.addMessages(context.slice(newMessagesIdx));
109
+ eventHandler.onCompletion(message);
141
110
  return { message, images: images.length === 0 ? undefined : images };
142
111
  }
143
- userMessage(msg, imageB64) {
144
- void this.userMessageEx(msg, imageB64);
145
- }
146
- getModel() {
147
- return this.llm.getModel();
148
- }
149
- setModel(model) {
150
- logger.debug(`Set model ${model}`);
151
- this.llm.setModel(model);
152
- }
153
- getSystemPrompt() {
154
- return this.contextManager.getAgentPrompt();
155
- }
156
- /**
157
- * Set the system prompt
158
- */
159
- setSystemPrompt(systemMsg) {
160
- this.contextManager.setAgentPrompt(systemMsg);
161
- }
162
- async chatCompletion(context) {
112
+ async chatCompletion(context, eventHandler) {
163
113
  // Compute the full list of available tools
164
114
  let tools;
165
115
  const mcpTools = this.mcpServerManager.getOpenAITools();
@@ -169,7 +119,7 @@ class Agent {
169
119
  tools = enabledTools;
170
120
  }
171
121
  logger.debug(`[chatCompletion] tools: ${JSON.stringify(tools)}`);
172
- const completion = await this.llm.getConversationResponse(context, tools, this.eventHandler.onAgentMessage.bind(this.eventHandler), this.eventHandler.onReasoning.bind(this.eventHandler));
122
+ const completion = await this.llm.getConversationResponse(context, tools, eventHandler.onAgentMessage.bind(eventHandler), eventHandler.onReasoning.bind(eventHandler));
173
123
  logger.debug(`Received chat completion ${JSON.stringify(completion)}`);
174
124
  return completion;
175
125
  }
@@ -210,7 +160,7 @@ class Agent {
210
160
  * handler, informing the IAgentEventHandler of the result, and returns the
211
161
  * ChatCompletionToolMessageParam to be used in the conversation.
212
162
  */
213
- async doToolCall(toolCall) {
163
+ async doToolCall(toolCall, eventHandler) {
214
164
  // If the tool is and "agent" (internal) tool, we can just execute it.
215
165
  // Otherwise, call the event handler to get permission and invoke the
216
166
  // external tool handler.
@@ -221,7 +171,7 @@ class Agent {
221
171
  const isAgentTool = !!agentTool;
222
172
  if (isAgentTool) {
223
173
  // Internal (agent) tool
224
- if (!(await this.eventHandler.onToolCall(toolCall, true))) {
174
+ if (!(await eventHandler.onToolCall(toolCall, true))) {
225
175
  result = { response: "User denied tool request." };
226
176
  }
227
177
  else {
@@ -234,7 +184,7 @@ class Agent {
234
184
  // tool call data, get approval, and then invoke.
235
185
  const args = JSON.parse(toolCall.function.arguments || "{}");
236
186
  const tc = this.mcpServerManager.verifyToolCall(toolName, args);
237
- if (!(await this.eventHandler.onToolCall(toolCall, false))) {
187
+ if (!(await eventHandler.onToolCall(toolCall, false))) {
238
188
  result = { response: "User denied tool request." };
239
189
  }
240
190
  else {
@@ -269,18 +219,91 @@ class Agent {
269
219
  }
270
220
  return result;
271
221
  }
272
- processCompletion(completion, images) {
222
+ processCompletion(completion, images, eventHandler) {
273
223
  // Add any images into the list, and call the event handler
274
224
  const compMessage = completion.choices[0].message;
275
225
  if (compMessage.images) {
276
226
  for (const image of compMessage.images) {
277
- this.eventHandler.onImage(image);
227
+ eventHandler.onImage(image);
278
228
  images.push(image);
279
229
  }
280
230
  }
281
231
  return completionToAssistantMessageParam(compMessage);
282
232
  }
283
233
  }
234
+ exports.AgentEx = AgentEx;
235
+ /**
236
+ * Higher-level abstraction over AgentEx, which abstracts out the transactions
237
+ * to the context manager.
238
+ */
239
+ class Agent {
240
+ constructor(eventHandler, mcpServerManager, llm, contextManager) {
241
+ this.eventHandler = eventHandler;
242
+ this.contextManager = contextManager;
243
+ this.agentEx = new AgentEx(mcpServerManager, llm);
244
+ }
245
+ static initializeWithLLM(eventHandler, llm, contextManager, mcpServerManager) {
246
+ return new Agent(eventHandler, mcpServerManager ?? new mcpServerManager_1.McpServerManager(), llm, contextManager);
247
+ }
248
+ async shutdown() {
249
+ return this.agentEx.shutdown();
250
+ }
251
+ getAgentProfile() {
252
+ return new sdk_2.AgentProfile(this.agentEx.llm.getModel(), this.getSystemPrompt(), this.agentEx.mcpServerManager.getMcpServerSettings());
253
+ }
254
+ getConversation() {
255
+ const llmMessages = this.contextManager.getLLMContext();
256
+ (0, assert_1.strict)(llmMessages[0].role === "system", "first message must have system role");
257
+ return [...llmMessages.slice(1)];
258
+ }
259
+ getMcpServerManager() {
260
+ return this.agentEx.mcpServerManager;
261
+ }
262
+ /**
263
+ * Like `userMessage`, but can be awaited, and accepts the user name.
264
+ */
265
+ async userMessageEx(msg, imageB64, name) {
266
+ const userMessage = createUserMessage(msg, imageB64, name);
267
+ if (!userMessage) {
268
+ return undefined;
269
+ }
270
+ return this.userMessageRaw(userMessage);
271
+ }
272
+ async userMessageRaw(userMessage) {
273
+ return this.userMessagesRaw([userMessage]);
274
+ }
275
+ async userMessagesRaw(userMessages) {
276
+ const tx = await this.contextManager.startTx(userMessages);
277
+ const result = await this.agentEx.userMessagesRaw(tx, this.eventHandler);
278
+ await this.contextManager.commit(tx);
279
+ return result;
280
+ }
281
+ userMessage(msg, imageB64) {
282
+ void this.userMessageEx(msg, imageB64);
283
+ }
284
+ getModel() {
285
+ return this.agentEx.llm.getModel();
286
+ }
287
+ setModel(model) {
288
+ logger.debug(`Set model ${model}`);
289
+ this.agentEx.llm.setModel(model);
290
+ }
291
+ getSystemPrompt() {
292
+ return this.contextManager.getAgentPrompt();
293
+ }
294
+ /**
295
+ * Set the system prompt
296
+ */
297
+ setSystemPrompt(systemMsg) {
298
+ this.contextManager.setAgentPrompt(systemMsg);
299
+ }
300
+ addAgentToolProvider(toolProvider) {
301
+ return this.agentEx.addAgentToolProvider(toolProvider);
302
+ }
303
+ addAgentTool(tool, handler) {
304
+ this.agentEx.addAgentTool(tool, handler);
305
+ }
306
+ }
284
307
  exports.Agent = Agent;
285
308
  /**
286
309
  * Returns the ChatCompletionMessageParam constructed from (optional) text and
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.createAgentWithoutSkills = createAgentWithoutSkills;
4
4
  exports.createAgentWithSkills = createAgentWithSkills;
5
5
  exports.createAgentFromSkillManager = createAgentFromSkillManager;
6
+ exports.createSpecializedLLM = createSpecializedLLM;
6
7
  exports.createLLM = createLLM;
7
8
  exports.createNonInteractiveAgent = createNonInteractiveAgent;
8
9
  exports.runOneShot = runOneShot;
@@ -15,7 +16,6 @@ const dummyLLM_1 = require("./dummyLLM");
15
16
  const assert_1 = require("assert");
16
17
  const repeatLLM_1 = require("./repeatLLM");
17
18
  const context_1 = require("./context");
18
- const imageGenLLM_1 = require("./imageGenLLM");
19
19
  const logger = (0, sdk_1.getLogger)();
20
20
  async function createAgentWithoutSkills(llmUrl, model, eventHandler, platform, contextManager, llmApiKey, sudomcpConfig, authorizedUrl, stream = false) {
21
21
  // Init SudoMcpServerManager
@@ -46,7 +46,11 @@ async function createAgentFromSkillManager(llmUrl, model, eventHandler, platform
46
46
  logger.debug("[createAgentFromSkillManager] done");
47
47
  return agent;
48
48
  }
49
- async function createLLM(llmUrl, llmApiKey, model, stream = false, platform) {
49
+ /**
50
+ * Interpret the `model` string to create a specialized agent (dummy, repeat,
51
+ * etc) or return undefined if a specialized agent has not been requested.
52
+ */
53
+ async function createSpecializedLLM(model, platform) {
50
54
  let llm;
51
55
  if (model && model.startsWith("dummy:")) {
52
56
  llm = await dummyLLM_1.DummyLLM.initFromModelUrl(model, platform);
@@ -54,22 +58,23 @@ async function createLLM(llmUrl, llmApiKey, model, stream = false, platform) {
54
58
  else if (model === "repeat") {
55
59
  llm = new repeatLLM_1.RepeatLLM();
56
60
  }
57
- else if (model == imageGenLLM_1.DEFAULT_IMAGE_GEN_MODEL) {
58
- logger.info("ImageGenLLM");
59
- llm = new imageGenLLM_1.ImageGenLLM(llmApiKey, llmUrl, model);
61
+ return llm;
62
+ }
63
+ async function createLLM(llmUrl, llmApiKey, model, stream = false, platform) {
64
+ let llm = await createSpecializedLLM(model, platform);
65
+ if (llm) {
66
+ return llm;
67
+ }
68
+ // Regular Agent
69
+ if (!llmApiKey) {
70
+ throw new Error("Missing OpenAI API Key");
71
+ }
72
+ logger.debug(`Initializing Agent: ${llmUrl ?? "unknown"} - ${model}`);
73
+ if (stream) {
74
+ llm = new openAILLMStreaming_1.OpenAILLMStreaming(llmApiKey, llmUrl, model);
60
75
  }
61
76
  else {
62
- // Regular Agent
63
- if (!llmApiKey) {
64
- throw new Error("Missing OpenAI API Key");
65
- }
66
- logger.debug(`Initializing Agent: ${llmUrl ?? "unknown"} - ${model}`);
67
- if (stream) {
68
- llm = new openAILLMStreaming_1.OpenAILLMStreaming(llmApiKey, llmUrl, model);
69
- }
70
- else {
71
- llm = new openAILLM_1.OpenAILLM(llmApiKey, llmUrl, model);
72
- }
77
+ llm = new openAILLM_1.OpenAILLM(llmApiKey, llmUrl, model);
73
78
  }
74
79
  (0, assert_1.strict)(llm);
75
80
  return llm;
@@ -7,10 +7,9 @@ exports.createSummary = createSummary;
7
7
  const assert_1 = require("assert");
8
8
  const sdk_1 = require("@xalia/xmcp/sdk");
9
9
  const agent_1 = require("./agent");
10
- const nullPlatform_1 = require("./nullPlatform");
11
- const agentUtils_1 = require("./agentUtils");
12
10
  const context_1 = require("./context");
13
11
  const nullAgentEventHandler_1 = require("./nullAgentEventHandler");
12
+ const contextWithWorkspace_1 = require("./contextWithWorkspace");
14
13
  const logger = (0, sdk_1.getLogger)();
15
14
  /**
16
15
  * System prompt used to generate a conversation summary.
@@ -30,12 +29,11 @@ function createCheckpointMessage(summary) {
30
29
  content: CHECKPOINT_MESSAGE_PREFIX + summary,
31
30
  };
32
31
  }
33
- async function createCompressionAgent(compressionAgentUrl, compressionAgentModel, compressionAgentApiKey) {
34
- const llm = await (0, agentUtils_1.createLLM)(compressionAgentUrl, compressionAgentApiKey, compressionAgentModel, false /* stream */, nullPlatform_1.NULL_PLATFORM);
32
+ function createCompressionAgent(llm) {
35
33
  return agent_1.Agent.initializeWithLLM(nullAgentEventHandler_1.NULL_AGENT_EVENT_HANDLER, llm, new context_1.ContextManager(COMPRESSION_SYSTEM_PROMPT, []));
36
34
  }
37
- async function createSummary(compressionAgentUrl, compressionAgentModel, compressionAgentApiKey, conversation) {
38
- const agent = await createCompressionAgent(compressionAgentUrl, compressionAgentModel, compressionAgentApiKey);
35
+ async function createSummary(llm, conversation) {
36
+ const agent = createCompressionAgent(llm);
39
37
  const agentResp = await agent.userMessageEx(JSON.stringify(conversation));
40
38
  if (!agentResp) {
41
39
  throw new Error("compression agent returned null");
@@ -50,12 +48,10 @@ async function createSummary(compressionAgentUrl, compressionAgentModel, compres
50
48
  * the Agent) is responsible for committing the conversation and triggering
51
49
  * compression.
52
50
  */
53
- class CompressingContextManager extends context_1.ContextManagerWithCommit {
54
- constructor(systemPrompt, messages, compressionAgentUrl, compressionAgentModel, compressionAgentApiKey) {
51
+ class CompressingContextManager extends contextWithWorkspace_1.ContextManagerWithWorkspace {
52
+ constructor(systemPrompt, messages, getLLM) {
55
53
  super(systemPrompt, messages);
56
- this.compressionAgentUrl = compressionAgentUrl;
57
- this.compressionAgentModel = compressionAgentModel;
58
- this.compressionAgentApiKey = compressionAgentApiKey;
54
+ this.getLLM = getLLM;
59
55
  this.compressingMessages = undefined;
60
56
  // Sanity check the conversation form.
61
57
  //
@@ -77,15 +73,15 @@ class CompressingContextManager extends context_1.ContextManagerWithCommit {
77
73
  }
78
74
  }
79
75
  async compress() {
80
- // Only select messages for compression if they have been committed.
81
- const numToCompress = this.getCommittedLength();
76
+ const numToCompress = super.numMessages();
82
77
  const messagesToCompress = this.leadingMessages(numToCompress);
83
78
  (0, assert_1.strict)(messagesToCompress.length === numToCompress);
84
79
  this.compressingMessages = numToCompress;
85
80
  (0, assert_1.strict)(this.compressingMessages > 1, "<2 messages commited in the context");
86
81
  logger.debug(`[CompressingContextManager] start (${String(this.compressingMessages)})`);
87
82
  try {
88
- const summary = await createSummary(this.compressionAgentUrl, this.compressionAgentModel, this.compressionAgentApiKey, messagesToCompress);
83
+ const llm = await this.getLLM();
84
+ const summary = await createSummary(llm, messagesToCompress);
89
85
  logger.debug(`[CompressingContextManager] summary: ${summary}`);
90
86
  // Replace the context `messages` and update `lastCommittedMessage`
91
87
  // index.