@tyvm/knowhow 0.0.35 → 0.0.37
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/agents/tools/aiClient.ts +36 -0
- package/src/agents/tools/lintFile.ts +1 -1
- package/src/agents/tools/list.ts +34 -0
- package/src/agents/tools/ycmd/tools/diagnostics.ts +2 -0
- package/src/ai.ts +5 -4
- package/src/auth/browserLogin.ts +283 -0
- package/src/auth/errors.ts +6 -0
- package/src/auth/spinner.ts +23 -0
- package/src/chat/CliChatService.ts +25 -6
- package/src/chat/modules/AgentModule.ts +1 -2
- package/src/chat/modules/AskModule.ts +1 -2
- package/src/chat/types.ts +14 -4
- package/src/chat-old.ts +446 -0
- package/src/chat.ts +48 -433
- package/src/cli.ts +5 -12
- package/src/embeddings.ts +1 -1
- package/src/index.ts +0 -8
- package/src/login.ts +14 -1
- package/src/microphone.ts +0 -1
- package/src/plugins/downloader/downloader.ts +34 -122
- package/src/services/KnowhowClient.ts +3 -0
- package/src/services/index.ts +1 -2
- package/tests/manual/browser-login/README.md +189 -0
- package/tests/manual/browser-login/test_browser_login_basic.ts +115 -0
- package/tests/manual/browser-login/test_cli_integration.ts +169 -0
- package/tests/manual/browser-login/test_cross_platform_browser.ts +186 -0
- package/tests/manual/browser-login/test_error_scenarios.ts +223 -0
- package/tests/manual/cli/no-env.sh +256 -0
- package/ts_build/src/agents/tools/aiClient.d.ts +2 -0
- package/ts_build/src/agents/tools/aiClient.js +21 -1
- package/ts_build/src/agents/tools/aiClient.js.map +1 -1
- package/ts_build/src/agents/tools/lintFile.js +1 -1
- package/ts_build/src/agents/tools/lintFile.js.map +1 -1
- package/ts_build/src/agents/tools/list.js +32 -0
- package/ts_build/src/agents/tools/list.js.map +1 -1
- package/ts_build/src/agents/tools/ycmd/tools/diagnostics.js +1 -0
- package/ts_build/src/agents/tools/ycmd/tools/diagnostics.js.map +1 -1
- package/ts_build/src/ai.d.ts +1 -1
- package/ts_build/src/ai.js +2 -1
- package/ts_build/src/ai.js.map +1 -1
- package/ts_build/src/auth/browserLogin.d.ts +11 -0
- package/ts_build/src/auth/browserLogin.js +197 -0
- package/ts_build/src/auth/browserLogin.js.map +1 -0
- package/ts_build/src/auth/errors.d.ts +4 -0
- package/ts_build/src/auth/errors.js +13 -0
- package/ts_build/src/auth/errors.js.map +1 -0
- package/ts_build/src/auth/spinner.d.ts +7 -0
- package/ts_build/src/auth/spinner.js +23 -0
- package/ts_build/src/auth/spinner.js.map +1 -0
- package/ts_build/src/chat/CliChatService.d.ts +4 -3
- package/ts_build/src/chat/CliChatService.js +18 -4
- package/ts_build/src/chat/CliChatService.js.map +1 -1
- package/ts_build/src/chat/modules/AgentModule.d.ts +1 -1
- package/ts_build/src/chat/modules/AgentModule.js +1 -2
- package/ts_build/src/chat/modules/AgentModule.js.map +1 -1
- package/ts_build/src/chat/modules/AskModule.js +1 -2
- package/ts_build/src/chat/modules/AskModule.js.map +1 -1
- package/ts_build/src/chat/types.d.ts +5 -3
- package/ts_build/src/chat-old.d.ts +13 -0
- package/ts_build/src/chat-old.js +340 -0
- package/ts_build/src/chat-old.js.map +1 -0
- package/ts_build/src/chat.d.ts +3 -13
- package/ts_build/src/chat.js +38 -331
- package/ts_build/src/chat.js.map +1 -1
- package/ts_build/src/chat2.d.ts +1 -1
- package/ts_build/src/chat2.js +2 -2
- package/ts_build/src/chat2.js.map +1 -1
- package/ts_build/src/cli.js +3 -9
- package/ts_build/src/cli.js.map +1 -1
- package/ts_build/src/embeddings.js.map +1 -1
- package/ts_build/src/index.d.ts +0 -2
- package/ts_build/src/index.js +1 -9
- package/ts_build/src/index.js.map +1 -1
- package/ts_build/src/login.d.ts +1 -1
- package/ts_build/src/login.js +14 -0
- package/ts_build/src/login.js.map +1 -1
- package/ts_build/src/microphone.js.map +1 -1
- package/ts_build/src/plugins/downloader/downloader.d.ts +1 -6
- package/ts_build/src/plugins/downloader/downloader.js +26 -97
- package/ts_build/src/plugins/downloader/downloader.js.map +1 -1
- package/ts_build/src/services/KnowhowClient.js +3 -0
- package/ts_build/src/services/KnowhowClient.js.map +1 -1
- package/ts_build/src/services/index.js +1 -2
- package/ts_build/src/services/index.js.map +1 -1
- package/ts_build/tests/manual/browser-login/test_browser_login_basic.d.ts +2 -0
- package/ts_build/tests/manual/browser-login/test_browser_login_basic.js +108 -0
- package/ts_build/tests/manual/browser-login/test_browser_login_basic.js.map +1 -0
- package/ts_build/tests/manual/browser-login/test_cli_integration.d.ts +2 -0
- package/ts_build/tests/manual/browser-login/test_cli_integration.js +153 -0
- package/ts_build/tests/manual/browser-login/test_cli_integration.js.map +1 -0
- package/ts_build/tests/manual/browser-login/test_cross_platform_browser.d.ts +2 -0
- package/ts_build/tests/manual/browser-login/test_cross_platform_browser.js +159 -0
- package/ts_build/tests/manual/browser-login/test_cross_platform_browser.js.map +1 -0
- package/ts_build/tests/manual/browser-login/test_error_scenarios.d.ts +2 -0
- package/ts_build/tests/manual/browser-login/test_error_scenarios.js +197 -0
- package/ts_build/tests/manual/browser-login/test_error_scenarios.js.map +1 -0
- package/src/agents/vim/vim.ts +0 -152
- package/src/chat2.ts +0 -62
package/src/chat-old.ts
ADDED
|
@@ -0,0 +1,446 @@
|
|
|
1
|
+
import { ChatCompletionMessageParam } from "openai/resources/chat";
|
|
2
|
+
import Ora from "ora";
|
|
3
|
+
import editor from "@inquirer/editor";
|
|
4
|
+
import { cosineSimilarity } from "./utils";
|
|
5
|
+
import {
|
|
6
|
+
EmbeddingBase,
|
|
7
|
+
GptQuestionEmbedding,
|
|
8
|
+
Embeddable,
|
|
9
|
+
ChatInteraction,
|
|
10
|
+
} from "./types";
|
|
11
|
+
import { Marked } from "./utils";
|
|
12
|
+
import { ask } from "./utils";
|
|
13
|
+
import { Plugins } from "./plugins/plugins";
|
|
14
|
+
import { queryEmbedding, getConfiguredEmbeddingMap } from "./embeddings";
|
|
15
|
+
import { services } from "./services/";
|
|
16
|
+
import { FlagsService } from "./services/flags";
|
|
17
|
+
import { IAgent } from "./agents/interface";
|
|
18
|
+
import { Message } from "./clients";
|
|
19
|
+
import { recordAudio, voiceToText } from "./microphone";
|
|
20
|
+
import { Models } from "./ai";
|
|
21
|
+
import { BaseAgent } from "./agents";
|
|
22
|
+
import { getConfig } from "./config";
|
|
23
|
+
import { TokenCompressor } from "./processors/TokenCompressor";
|
|
24
|
+
import { ToolResponseCache } from "./processors/ToolResponseCache";
|
|
25
|
+
import { CustomVariables, XmlToolCallProcessor, HarmonyToolProcessor } from "./processors";
|
|
26
|
+
|
|
27
|
+
enum ChatFlags {
|
|
28
|
+
agent = "agent",
|
|
29
|
+
agents = "agents",
|
|
30
|
+
debug = "debug",
|
|
31
|
+
multi = "multi",
|
|
32
|
+
model = "model",
|
|
33
|
+
search = "search",
|
|
34
|
+
clear = "clear",
|
|
35
|
+
provider = "provider",
|
|
36
|
+
voice = "voice",
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
const Flags = new FlagsService(
|
|
40
|
+
[ChatFlags.agent, ChatFlags.debug, ChatFlags.multi, ChatFlags.voice],
|
|
41
|
+
true
|
|
42
|
+
);
|
|
43
|
+
|
|
44
|
+
const taskRegistry = new Map<string, BaseAgent>();
|
|
45
|
+
|
|
46
|
+
export async function askEmbedding<E>(promptText: string) {
|
|
47
|
+
const options = ["next", "exit", "embeddings", "use"];
|
|
48
|
+
console.log(`Commands: ${options.join(", ")}`);
|
|
49
|
+
let input = await ask(promptText + ": ", options);
|
|
50
|
+
let answer: EmbeddingBase<any> | undefined;
|
|
51
|
+
let results = new Array<EmbeddingBase>();
|
|
52
|
+
let embedMap = await getConfiguredEmbeddingMap();
|
|
53
|
+
const config = await getConfig();
|
|
54
|
+
const files = Object.keys(embedMap);
|
|
55
|
+
|
|
56
|
+
while (input !== "exit") {
|
|
57
|
+
const embeddings = Object.values(embedMap).flat();
|
|
58
|
+
|
|
59
|
+
switch (input) {
|
|
60
|
+
case "next":
|
|
61
|
+
answer = results.shift();
|
|
62
|
+
break;
|
|
63
|
+
case "embeddings":
|
|
64
|
+
console.log(files);
|
|
65
|
+
break;
|
|
66
|
+
case "use":
|
|
67
|
+
const searchOptions = ["all", ...files];
|
|
68
|
+
console.log(searchOptions);
|
|
69
|
+
const embeddingName = await ask("Embedding to search: ", searchOptions);
|
|
70
|
+
if (embeddingName === "all") {
|
|
71
|
+
embedMap = await getConfiguredEmbeddingMap();
|
|
72
|
+
break;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
embedMap = { ...{ [embeddingName]: embedMap[embeddingName] } };
|
|
76
|
+
break;
|
|
77
|
+
default:
|
|
78
|
+
results = await queryEmbedding(
|
|
79
|
+
input,
|
|
80
|
+
embeddings,
|
|
81
|
+
config.embeddingModel
|
|
82
|
+
);
|
|
83
|
+
answer = results.shift();
|
|
84
|
+
break;
|
|
85
|
+
}
|
|
86
|
+
if (answer) {
|
|
87
|
+
console.log(
|
|
88
|
+
Marked.parse(
|
|
89
|
+
"### TEXT \n" +
|
|
90
|
+
answer.text +
|
|
91
|
+
"\n### METADATA \n" +
|
|
92
|
+
JSON.stringify(answer.metadata, null, 2)
|
|
93
|
+
)
|
|
94
|
+
);
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
input = await ask(promptText + ": ");
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
const ChatModelDefaults = {
|
|
102
|
+
openai: Models.openai.GPT_4o,
|
|
103
|
+
anthropic: Models.anthropic.Sonnet4,
|
|
104
|
+
google: Models.google.Gemini_25_Flash_Preview,
|
|
105
|
+
xai: Models.xai.Grok3Beta,
|
|
106
|
+
};
|
|
107
|
+
export async function askAI<E extends EmbeddingBase>(
|
|
108
|
+
query: string,
|
|
109
|
+
provider = "openai",
|
|
110
|
+
model = ChatModelDefaults[provider]
|
|
111
|
+
) {
|
|
112
|
+
const gptPrompt = `
|
|
113
|
+
|
|
114
|
+
The user has asked:
|
|
115
|
+
${query}
|
|
116
|
+
|
|
117
|
+
Output Format in Markdown
|
|
118
|
+
`;
|
|
119
|
+
if (Flags.enabled("debugger")) {
|
|
120
|
+
console.log(gptPrompt);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
const thread = [
|
|
124
|
+
{
|
|
125
|
+
role: "system",
|
|
126
|
+
content:
|
|
127
|
+
"Helpful Codebase assistant. Answer users questions using the embedding data that is provided with the user's question. You have limited access to the codebase based off of how similar the codebase is to the user's question. You may reference file paths by using the IDs present in the embedding data, but be sure to remove the chunk from the end of the filepaths.",
|
|
128
|
+
},
|
|
129
|
+
{ role: "user", content: gptPrompt },
|
|
130
|
+
] as Message[];
|
|
131
|
+
|
|
132
|
+
const { Clients } = services();
|
|
133
|
+
const response = await Clients.createCompletion(provider, {
|
|
134
|
+
messages: thread,
|
|
135
|
+
model,
|
|
136
|
+
});
|
|
137
|
+
|
|
138
|
+
return response.choices[0].message.content;
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
export async function getInput(
|
|
142
|
+
question: string,
|
|
143
|
+
options = [],
|
|
144
|
+
chatHistory: ChatInteraction[] = []
|
|
145
|
+
): Promise<string> {
|
|
146
|
+
const multiLine = Flags.enabled(ChatFlags.multi);
|
|
147
|
+
const voice = Flags.enabled(ChatFlags.voice);
|
|
148
|
+
|
|
149
|
+
let value = "";
|
|
150
|
+
if (voice) {
|
|
151
|
+
value = await voiceToText();
|
|
152
|
+
} else if (multiLine) {
|
|
153
|
+
value = await editor({ message: question });
|
|
154
|
+
Flags.disable(ChatFlags.multi);
|
|
155
|
+
} else {
|
|
156
|
+
const history = chatHistory.map((c) => c.input).reverse();
|
|
157
|
+
value = await ask(question, options, history);
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
return value.trim();
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
export async function formatChatInput(
|
|
164
|
+
input: string,
|
|
165
|
+
plugins: string[] = [],
|
|
166
|
+
chatHistory: ChatInteraction[] = []
|
|
167
|
+
) {
|
|
168
|
+
const pluginText = await Plugins.callMany(plugins, input);
|
|
169
|
+
const historyMessage = `<PreviousChats>
|
|
170
|
+
This information is provided as historical context and is likely not related to the current task:
|
|
171
|
+
${JSON.stringify(chatHistory)}
|
|
172
|
+
</PreviousChats>`;
|
|
173
|
+
const fullPrompt = `
|
|
174
|
+
${historyMessage} \n
|
|
175
|
+
<PluginContext> ${pluginText} </PluginContext>
|
|
176
|
+
<CurrentTask>${input}</CurrentTask>
|
|
177
|
+
`;
|
|
178
|
+
return fullPrompt;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
export async function chatLoop<E extends GptQuestionEmbedding>(
|
|
182
|
+
aiName: string,
|
|
183
|
+
embeddings: Embeddable<E>[],
|
|
184
|
+
plugins: string[] = []
|
|
185
|
+
) {
|
|
186
|
+
const { Agents, Clients } = services();
|
|
187
|
+
let activeAgent = Agents.getAgent("Developer") as BaseAgent;
|
|
188
|
+
let provider = "openai" as keyof typeof Clients.clients;
|
|
189
|
+
let model = ChatModelDefaults[provider];
|
|
190
|
+
const providers = Object.keys(Clients.clients);
|
|
191
|
+
const commands = [
|
|
192
|
+
"agent",
|
|
193
|
+
"agents",
|
|
194
|
+
"clear",
|
|
195
|
+
"debugger",
|
|
196
|
+
"exit",
|
|
197
|
+
"model",
|
|
198
|
+
"multi",
|
|
199
|
+
"provider",
|
|
200
|
+
"search",
|
|
201
|
+
"voice",
|
|
202
|
+
];
|
|
203
|
+
console.log("Commands: ", commands.join(", "));
|
|
204
|
+
const promptText = () =>
|
|
205
|
+
Flags.enabled(ChatFlags.agent)
|
|
206
|
+
? `\nAsk ${aiName} ${activeAgent.name}: `
|
|
207
|
+
: `\nAsk ${aiName}: `;
|
|
208
|
+
|
|
209
|
+
let chatHistory = new Array<ChatInteraction>();
|
|
210
|
+
let input = await getInput(promptText(), commands, chatHistory);
|
|
211
|
+
|
|
212
|
+
let results = "";
|
|
213
|
+
while (input !== "exit") {
|
|
214
|
+
try {
|
|
215
|
+
switch (input) {
|
|
216
|
+
case ChatFlags.agents:
|
|
217
|
+
Flags.enable(ChatFlags.agent);
|
|
218
|
+
const agents = Agents.listAgents();
|
|
219
|
+
console.log(agents);
|
|
220
|
+
const selected = await ask(
|
|
221
|
+
"Which agent would you like to use: ",
|
|
222
|
+
agents
|
|
223
|
+
);
|
|
224
|
+
activeAgent = Agents.getAgent(selected) as BaseAgent;
|
|
225
|
+
model = activeAgent.getModel();
|
|
226
|
+
provider = activeAgent.getProvider() as keyof typeof Clients.clients;
|
|
227
|
+
break;
|
|
228
|
+
case ChatFlags.agent:
|
|
229
|
+
Flags.flip(ChatFlags.agent);
|
|
230
|
+
break;
|
|
231
|
+
case ChatFlags.debug:
|
|
232
|
+
Flags.flip(ChatFlags.debug);
|
|
233
|
+
break;
|
|
234
|
+
case ChatFlags.multi:
|
|
235
|
+
Flags.flip(ChatFlags.multi);
|
|
236
|
+
break;
|
|
237
|
+
case ChatFlags.voice:
|
|
238
|
+
Flags.flip(ChatFlags.voice);
|
|
239
|
+
break;
|
|
240
|
+
case ChatFlags.search:
|
|
241
|
+
await askEmbedding("searching");
|
|
242
|
+
break;
|
|
243
|
+
case ChatFlags.clear:
|
|
244
|
+
chatHistory = [];
|
|
245
|
+
break;
|
|
246
|
+
case ChatFlags.provider:
|
|
247
|
+
console.log(providers);
|
|
248
|
+
provider = await ask(
|
|
249
|
+
`\n\nCurrent Provider: ${provider}\nCurrent Model: ${model}\n\nWhich provider would you like to use: `,
|
|
250
|
+
providers
|
|
251
|
+
);
|
|
252
|
+
model =
|
|
253
|
+
ChatModelDefaults[provider] ||
|
|
254
|
+
(await Clients.getRegisteredModels(provider))[0];
|
|
255
|
+
|
|
256
|
+
if (Flags.enabled("agent")) {
|
|
257
|
+
activeAgent.setProvider(provider);
|
|
258
|
+
activeAgent.setModel(model);
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
break;
|
|
262
|
+
case ChatFlags.model:
|
|
263
|
+
const models = Clients.getRegisteredModels(provider);
|
|
264
|
+
console.log(models);
|
|
265
|
+
const selectedModel = await ask(
|
|
266
|
+
`\n\nCurrent Provider: ${provider}\nCurrent Model: ${model}\n\nWhich model would you like to use: `,
|
|
267
|
+
models
|
|
268
|
+
);
|
|
269
|
+
model = selectedModel;
|
|
270
|
+
|
|
271
|
+
if (Flags.enabled("agent")) {
|
|
272
|
+
activeAgent.setProvider(provider);
|
|
273
|
+
activeAgent.setModel(model);
|
|
274
|
+
}
|
|
275
|
+
break;
|
|
276
|
+
case "attach":
|
|
277
|
+
if (taskRegistry.size > 0) {
|
|
278
|
+
const options = Array.from(taskRegistry.keys());
|
|
279
|
+
const selectedInitialMessage = await ask(
|
|
280
|
+
"Select an agent to attach to:",
|
|
281
|
+
options
|
|
282
|
+
);
|
|
283
|
+
activeAgent = taskRegistry.get(selectedInitialMessage)!;
|
|
284
|
+
console.log(
|
|
285
|
+
`Attached to agent with task: "${selectedInitialMessage}"`
|
|
286
|
+
);
|
|
287
|
+
await startAgent(activeAgent, null, true);
|
|
288
|
+
} else {
|
|
289
|
+
console.log("No detached agents available.");
|
|
290
|
+
}
|
|
291
|
+
case "":
|
|
292
|
+
break;
|
|
293
|
+
default:
|
|
294
|
+
console.log("Thinking...");
|
|
295
|
+
console.log(input);
|
|
296
|
+
const interaction = {
|
|
297
|
+
input,
|
|
298
|
+
output: "",
|
|
299
|
+
} as ChatInteraction;
|
|
300
|
+
if (Flags.enabled("agent")) {
|
|
301
|
+
taskRegistry.set(input, activeAgent);
|
|
302
|
+
await startAgent(activeAgent, {
|
|
303
|
+
initialInput: input,
|
|
304
|
+
plugins,
|
|
305
|
+
chatHistory,
|
|
306
|
+
interaction,
|
|
307
|
+
});
|
|
308
|
+
} else {
|
|
309
|
+
const formattedPrompt = await formatChatInput(
|
|
310
|
+
input,
|
|
311
|
+
plugins,
|
|
312
|
+
chatHistory
|
|
313
|
+
);
|
|
314
|
+
results = await askAI(formattedPrompt, provider, model);
|
|
315
|
+
interaction.output = results;
|
|
316
|
+
console.log(Marked.parse(results || "No response from the AI"));
|
|
317
|
+
}
|
|
318
|
+
console.log("\n\n");
|
|
319
|
+
chatHistory.push(interaction);
|
|
320
|
+
break;
|
|
321
|
+
}
|
|
322
|
+
} catch (e) {
|
|
323
|
+
console.log(e);
|
|
324
|
+
} finally {
|
|
325
|
+
input = await getInput(promptText(), commands, chatHistory);
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
export async function startAgent(
|
|
331
|
+
activeAgent: BaseAgent,
|
|
332
|
+
newTask?: {
|
|
333
|
+
initialInput: string;
|
|
334
|
+
plugins: string[];
|
|
335
|
+
chatHistory: ChatInteraction[];
|
|
336
|
+
interaction: ChatInteraction;
|
|
337
|
+
},
|
|
338
|
+
attach = false
|
|
339
|
+
) {
|
|
340
|
+
let done = false;
|
|
341
|
+
let output = "Done";
|
|
342
|
+
|
|
343
|
+
if (newTask) {
|
|
344
|
+
const { initialInput, plugins, chatHistory, interaction } = newTask;
|
|
345
|
+
await activeAgent.newTask();
|
|
346
|
+
const formattedPrompt = await formatChatInput(
|
|
347
|
+
initialInput,
|
|
348
|
+
plugins,
|
|
349
|
+
chatHistory
|
|
350
|
+
);
|
|
351
|
+
activeAgent.call(formattedPrompt);
|
|
352
|
+
|
|
353
|
+
// Compress tokens of tool responses
|
|
354
|
+
activeAgent.messageProcessor.setProcessors("pre_call", [
|
|
355
|
+
new ToolResponseCache(activeAgent.tools).createProcessor(),
|
|
356
|
+
|
|
357
|
+
new TokenCompressor(activeAgent.tools).createProcessor((msg) =>
|
|
358
|
+
Boolean(msg.role === "tool" && msg.tool_call_id)
|
|
359
|
+
),
|
|
360
|
+
new CustomVariables(activeAgent.tools).createProcessor(),
|
|
361
|
+
]);
|
|
362
|
+
|
|
363
|
+
// Process XML and Harmony tool calls in assistant responses
|
|
364
|
+
activeAgent.messageProcessor.setProcessors("post_call", [
|
|
365
|
+
new XmlToolCallProcessor().createProcessor(),
|
|
366
|
+
new HarmonyToolProcessor().createProcessor(),
|
|
367
|
+
]);
|
|
368
|
+
|
|
369
|
+
if (
|
|
370
|
+
!activeAgent.agentEvents.listenerCount(activeAgent.eventTypes.toolUsed)
|
|
371
|
+
) {
|
|
372
|
+
activeAgent.agentEvents.on(
|
|
373
|
+
activeAgent.eventTypes.toolUsed,
|
|
374
|
+
(responseMsg) => {
|
|
375
|
+
console.log(` 🔨 Tool used: ${JSON.stringify(responseMsg, null, 2)}`);
|
|
376
|
+
}
|
|
377
|
+
);
|
|
378
|
+
}
|
|
379
|
+
|
|
380
|
+
activeAgent.agentEvents.once(activeAgent.eventTypes.done, (doneMsg) => {
|
|
381
|
+
console.log("Agent has finished.");
|
|
382
|
+
done = true;
|
|
383
|
+
taskRegistry.delete(initialInput);
|
|
384
|
+
output = doneMsg || "No response from the AI";
|
|
385
|
+
interaction.output = output;
|
|
386
|
+
console.log(Marked.parse(output));
|
|
387
|
+
});
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
// Define available commands
|
|
391
|
+
const commands = ["pause", "unpause", "kill", "detach"];
|
|
392
|
+
const history = [];
|
|
393
|
+
|
|
394
|
+
let input = await getInput(
|
|
395
|
+
`Enter command or message for ${activeAgent.name}: `,
|
|
396
|
+
commands,
|
|
397
|
+
history
|
|
398
|
+
);
|
|
399
|
+
|
|
400
|
+
history.push(input);
|
|
401
|
+
|
|
402
|
+
const donePromise = new Promise<string>((resolve) => {
|
|
403
|
+
activeAgent.agentEvents.on(activeAgent.eventTypes.done, () => {
|
|
404
|
+
done = true;
|
|
405
|
+
resolve("done");
|
|
406
|
+
});
|
|
407
|
+
});
|
|
408
|
+
|
|
409
|
+
while (!done) {
|
|
410
|
+
switch (input) {
|
|
411
|
+
case "":
|
|
412
|
+
break;
|
|
413
|
+
case "done":
|
|
414
|
+
output = "Exited agent interaction.";
|
|
415
|
+
break;
|
|
416
|
+
case "pause":
|
|
417
|
+
await activeAgent.pause();
|
|
418
|
+
console.log("Agent paused.");
|
|
419
|
+
break;
|
|
420
|
+
case "unpause":
|
|
421
|
+
await activeAgent.unpause();
|
|
422
|
+
console.log("Agent unpaused.");
|
|
423
|
+
break;
|
|
424
|
+
case "kill":
|
|
425
|
+
await activeAgent.kill();
|
|
426
|
+
console.log("Agent terminated.");
|
|
427
|
+
break;
|
|
428
|
+
case "detach":
|
|
429
|
+
return "Detached from agent";
|
|
430
|
+
break;
|
|
431
|
+
default:
|
|
432
|
+
activeAgent.addPendingUserMessage({ role: "user", content: input });
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
input = await Promise.race([
|
|
436
|
+
getInput(
|
|
437
|
+
`Enter command or message for ${activeAgent.name}: `,
|
|
438
|
+
commands,
|
|
439
|
+
history
|
|
440
|
+
),
|
|
441
|
+
donePromise,
|
|
442
|
+
]);
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
return output;
|
|
446
|
+
}
|