@kishlay42/moth-ai 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -0
- package/dist/agent/orchestrator.js +97 -0
- package/dist/agent/types.js +1 -0
- package/dist/config/configManager.js +62 -0
- package/dist/config/keychain.js +20 -0
- package/dist/context/ignore.js +27 -0
- package/dist/context/manager.js +62 -0
- package/dist/context/scanner.js +41 -0
- package/dist/context/types.js +1 -0
- package/dist/editing/patcher.js +37 -0
- package/dist/index.js +390 -0
- package/dist/llm/claudeAdapter.js +47 -0
- package/dist/llm/cohereAdapter.js +42 -0
- package/dist/llm/factory.js +30 -0
- package/dist/llm/geminiAdapter.js +55 -0
- package/dist/llm/openAIAdapter.js +45 -0
- package/dist/llm/types.js +1 -0
- package/dist/planning/todoManager.js +23 -0
- package/dist/tools/definitions.js +187 -0
- package/dist/tools/factory.js +196 -0
- package/dist/tools/registry.js +21 -0
- package/dist/tools/types.js +1 -0
- package/dist/ui/App.js +182 -0
- package/dist/ui/ProfileManager.js +51 -0
- package/dist/ui/components/FlameLogo.js +40 -0
- package/dist/ui/components/WordFlame.js +10 -0
- package/dist/ui/components/WordMoth.js +10 -0
- package/dist/ui/wizards/LLMRemover.js +68 -0
- package/dist/ui/wizards/LLMWizard.js +149 -0
- package/dist/utils/paths.js +22 -0
- package/dist/utils/text.js +49 -0
- package/docs/architecture.md +63 -0
- package/docs/core_logic.md +53 -0
- package/docs/index.md +30 -0
- package/docs/llm_integration.md +49 -0
- package/docs/ui_components.md +44 -0
- package/package.json +70 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,390 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { Command } from 'commander';
|
|
3
|
+
import React from 'react';
|
|
4
|
+
import { render } from 'ink';
|
|
5
|
+
import { App } from './ui/App.js';
|
|
6
|
+
import { findProjectRoot } from './utils/paths.js';
|
|
7
|
+
import { createLLMClient } from './llm/factory.js';
|
|
8
|
+
import { loadConfig, saveConfig, addProfile, setActiveProfile, removeProfile, setUsername } from './config/configManager.js';
|
|
9
|
+
import { setApiKey, deleteApiKey } from './config/keychain.js';
|
|
10
|
+
// Strict Mode: Prompts must come from UI
|
|
11
|
+
import { ProjectScanner } from './context/scanner.js';
|
|
12
|
+
import { ContextManager } from './context/manager.js';
|
|
13
|
+
const program = new Command();
|
|
14
|
+
program
|
|
15
|
+
.name('moth')
|
|
16
|
+
.description('Local, LLM-agnostic code intelligence CLI')
|
|
17
|
+
.version('1.0.0', '-v, --version')
|
|
18
|
+
// No arguments allowed on top level.
|
|
19
|
+
.action(() => {
|
|
20
|
+
startChatSession();
|
|
21
|
+
});
|
|
22
|
+
import { TodoManager } from './planning/todoManager.js';
|
|
23
|
+
// Helper to render Ink UI
|
|
24
|
+
const renderUI = (command, args) => {
|
|
25
|
+
const todoManager = new TodoManager(); // In a real persistent app, we'd load this
|
|
26
|
+
const { username, ...cmdArgs } = args;
|
|
27
|
+
render(React.createElement(App, { command, args: cmdArgs, todoManager, username }));
|
|
28
|
+
};
|
|
29
|
+
const startChatSession = async () => {
|
|
30
|
+
// No prompt handling here.
|
|
31
|
+
const prompt = "";
|
|
32
|
+
let config = loadConfig();
|
|
33
|
+
let activeProfileName = config.activeProfile;
|
|
34
|
+
// 1. If no profiles at all -> Force Wizard
|
|
35
|
+
if (!activeProfileName || config.profiles.length === 0) {
|
|
36
|
+
if (config.profiles.length === 0) {
|
|
37
|
+
console.log("No profiles found. Setting up your first LLM profile...");
|
|
38
|
+
await new Promise((resolve) => {
|
|
39
|
+
const { unmount } = render(React.createElement(LLMWizard, {
|
|
40
|
+
onComplete: async (resultConfig) => {
|
|
41
|
+
let newConfig = loadConfig();
|
|
42
|
+
newConfig = addProfile(newConfig, resultConfig);
|
|
43
|
+
if (resultConfig.apiKey) {
|
|
44
|
+
await setApiKey(resultConfig.name, resultConfig.apiKey);
|
|
45
|
+
}
|
|
46
|
+
newConfig = setActiveProfile(newConfig, resultConfig.name);
|
|
47
|
+
saveConfig(newConfig);
|
|
48
|
+
console.log(`Profile '${resultConfig.name}' created.`);
|
|
49
|
+
setTimeout(() => {
|
|
50
|
+
unmount();
|
|
51
|
+
resolve();
|
|
52
|
+
}, 500); // Brief pause to see success
|
|
53
|
+
},
|
|
54
|
+
onCancel: () => {
|
|
55
|
+
console.log('Setup cancelled.');
|
|
56
|
+
process.exit(0);
|
|
57
|
+
}
|
|
58
|
+
}));
|
|
59
|
+
});
|
|
60
|
+
// Reload config and proceed
|
|
61
|
+
config = loadConfig();
|
|
62
|
+
activeProfileName = config.activeProfile;
|
|
63
|
+
}
|
|
64
|
+
else if (!activeProfileName) {
|
|
65
|
+
// 2. If profiles exist but none active -> Force Selector
|
|
66
|
+
console.log("No active profile selected. Please select one:");
|
|
67
|
+
await new Promise((resolve) => {
|
|
68
|
+
const { unmount } = render(React.createElement(ProfileManager, {
|
|
69
|
+
config,
|
|
70
|
+
onSelect: (selected) => {
|
|
71
|
+
// already saved active in ProfileManager logic, but let's just resolve
|
|
72
|
+
unmount();
|
|
73
|
+
resolve();
|
|
74
|
+
}
|
|
75
|
+
}));
|
|
76
|
+
});
|
|
77
|
+
// Reload
|
|
78
|
+
config = loadConfig();
|
|
79
|
+
activeProfileName = config.activeProfile;
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
// 3. Main Chat Flow
|
|
83
|
+
const activeProfile = config.profiles.find(p => p.name === activeProfileName);
|
|
84
|
+
if (!activeProfile) {
|
|
85
|
+
console.error(`Active profile '${activeProfileName}' not found (or data corruption).`);
|
|
86
|
+
process.exit(1);
|
|
87
|
+
}
|
|
88
|
+
try {
|
|
89
|
+
const client = await createLLMClient(activeProfile);
|
|
90
|
+
renderUI('run', {
|
|
91
|
+
prompt,
|
|
92
|
+
client,
|
|
93
|
+
profile: activeProfile,
|
|
94
|
+
username: config.username
|
|
95
|
+
});
|
|
96
|
+
}
|
|
97
|
+
catch (error) {
|
|
98
|
+
console.error('Failed to initialize LLM:', error.message);
|
|
99
|
+
process.exit(1);
|
|
100
|
+
}
|
|
101
|
+
};
|
|
102
|
+
// Explicit run command is still good to keep
|
|
103
|
+
program
|
|
104
|
+
.command('run')
|
|
105
|
+
.description('Run the main loop')
|
|
106
|
+
.argument('[prompt...]', 'Initial prompt')
|
|
107
|
+
.action(startChatSession);
|
|
108
|
+
const llm = program.command('llm').description('Manage LLM profiles');
|
|
109
|
+
// Top-level aliases for convenience and error prevention
|
|
110
|
+
program.command('use')
|
|
111
|
+
.description('Alias for "llm use"')
|
|
112
|
+
.argument('[name]', 'Profile name')
|
|
113
|
+
.action((name) => {
|
|
114
|
+
let config = loadConfig();
|
|
115
|
+
if (!name) {
|
|
116
|
+
console.log("No active profile selected. Please select one:");
|
|
117
|
+
render(React.createElement(ProfileManager, {
|
|
118
|
+
config,
|
|
119
|
+
onSelect: (selected) => {
|
|
120
|
+
// onSelect saves internally
|
|
121
|
+
process.exit(0);
|
|
122
|
+
}
|
|
123
|
+
}));
|
|
124
|
+
return;
|
|
125
|
+
}
|
|
126
|
+
config = setActiveProfile(config, name);
|
|
127
|
+
saveConfig(config);
|
|
128
|
+
console.log(`Active profile set to '${name}'.`);
|
|
129
|
+
});
|
|
130
|
+
program.command('list')
|
|
131
|
+
.description('Alias for "llm list"')
|
|
132
|
+
.action(() => {
|
|
133
|
+
const config = loadConfig();
|
|
134
|
+
render(React.createElement(ProfileManager, { config }));
|
|
135
|
+
});
|
|
136
|
+
program.command('remove')
|
|
137
|
+
.description('Alias for "llm remove"')
|
|
138
|
+
.argument('[name]', 'Profile name')
|
|
139
|
+
.action(async (name) => {
|
|
140
|
+
let config = loadConfig();
|
|
141
|
+
if (!name) {
|
|
142
|
+
render(React.createElement(LLMRemover, {
|
|
143
|
+
config,
|
|
144
|
+
onExit: () => process.exit(0)
|
|
145
|
+
}));
|
|
146
|
+
return;
|
|
147
|
+
}
|
|
148
|
+
config = removeProfile(config, name);
|
|
149
|
+
saveConfig(config);
|
|
150
|
+
await deleteApiKey(name);
|
|
151
|
+
console.log(`Profile '${name}' removed.`);
|
|
152
|
+
});
|
|
153
|
+
program.command('add')
|
|
154
|
+
.description('Alias for "llm add"')
|
|
155
|
+
.option('-n, --name <name>', 'Profile name')
|
|
156
|
+
.option('-p, --provider <provider>', 'Adapter type')
|
|
157
|
+
.option('-m, --model <model>', 'Model name')
|
|
158
|
+
.option('-u, --url <url>', 'Base URL')
|
|
159
|
+
.option('-k, --key <key>', 'API Key')
|
|
160
|
+
.action(async (options) => {
|
|
161
|
+
// Re-use exactly the same logic as llm add
|
|
162
|
+
if (!options.name && !options.provider && !options.model) {
|
|
163
|
+
render(React.createElement(LLMWizard, {
|
|
164
|
+
onComplete: async (resultConfig) => {
|
|
165
|
+
let config = loadConfig();
|
|
166
|
+
config = addProfile(config, resultConfig);
|
|
167
|
+
if (resultConfig.apiKey) {
|
|
168
|
+
await setApiKey(resultConfig.name, resultConfig.apiKey);
|
|
169
|
+
}
|
|
170
|
+
config = setActiveProfile(config, resultConfig.name);
|
|
171
|
+
saveConfig(config);
|
|
172
|
+
console.log(`Profile '${resultConfig.name}' created.`);
|
|
173
|
+
// Chain to main chat
|
|
174
|
+
process.exit(0); // For now, just exit as per current wizard
|
|
175
|
+
},
|
|
176
|
+
onCancel: () => {
|
|
177
|
+
console.log('Setup cancelled.');
|
|
178
|
+
process.exit(0);
|
|
179
|
+
}
|
|
180
|
+
}));
|
|
181
|
+
return;
|
|
182
|
+
}
|
|
183
|
+
// Legacy logic copy
|
|
184
|
+
if (!options.name || !options.provider || !options.model) {
|
|
185
|
+
console.error('Error: When using CLI flags, --name, --provider, and --model are required.');
|
|
186
|
+
process.exit(1);
|
|
187
|
+
}
|
|
188
|
+
let config = loadConfig();
|
|
189
|
+
const validProviders = ['openai-compatible', 'ollama', 'claude-native', 'gemini-native', 'cohere-native'];
|
|
190
|
+
if (!validProviders.includes(options.provider)) {
|
|
191
|
+
console.error(`Invalid provider. Must be one of: ${validProviders.join(', ')}`);
|
|
192
|
+
process.exit(1);
|
|
193
|
+
}
|
|
194
|
+
if (options.provider === 'openai-compatible' && !options.url) {
|
|
195
|
+
console.error('Base URL (--url) is required for openai-compatible adapter.');
|
|
196
|
+
process.exit(1);
|
|
197
|
+
}
|
|
198
|
+
config = addProfile(config, {
|
|
199
|
+
name: options.name,
|
|
200
|
+
provider: options.provider,
|
|
201
|
+
model: options.model,
|
|
202
|
+
baseUrl: options.url
|
|
203
|
+
});
|
|
204
|
+
if (options.key) {
|
|
205
|
+
await setApiKey(options.name, options.key);
|
|
206
|
+
}
|
|
207
|
+
if (config.profiles.length === 1) {
|
|
208
|
+
config = setActiveProfile(config, options.name);
|
|
209
|
+
}
|
|
210
|
+
saveConfig(config);
|
|
211
|
+
console.log(`Profile '${options.name}' added.`);
|
|
212
|
+
});
|
|
213
|
+
import { ProfileManager } from './ui/ProfileManager.js';
|
|
214
|
+
import { LLMWizard } from './ui/wizards/LLMWizard.js';
|
|
215
|
+
llm.command('list')
|
|
216
|
+
.description('List all profiles')
|
|
217
|
+
.action(() => {
|
|
218
|
+
const config = loadConfig();
|
|
219
|
+
render(React.createElement(ProfileManager, { config }));
|
|
220
|
+
});
|
|
221
|
+
llm.command('add')
|
|
222
|
+
.description('Add a new profile')
|
|
223
|
+
.option('-n, --name <name>', 'Profile name')
|
|
224
|
+
.option('-p, --provider <provider>', 'Adapter type')
|
|
225
|
+
.option('-m, --model <model>', 'Model name')
|
|
226
|
+
.option('-u, --url <url>', 'Base URL')
|
|
227
|
+
.option('-k, --key <key>', 'API Key')
|
|
228
|
+
.action(async (options) => {
|
|
229
|
+
// If no options provided, launch wizard
|
|
230
|
+
if (!options.name && !options.provider && !options.model) {
|
|
231
|
+
render(React.createElement(LLMWizard, {
|
|
232
|
+
onComplete: async (resultConfig) => {
|
|
233
|
+
let config = loadConfig();
|
|
234
|
+
config = addProfile(config, resultConfig);
|
|
235
|
+
if (resultConfig.apiKey) {
|
|
236
|
+
await setApiKey(resultConfig.name, resultConfig.apiKey);
|
|
237
|
+
}
|
|
238
|
+
if (config.profiles.length === 1) {
|
|
239
|
+
config = setActiveProfile(config, resultConfig.name);
|
|
240
|
+
}
|
|
241
|
+
saveConfig(config);
|
|
242
|
+
console.log(`Profile '${resultConfig.name}' added successfully!`);
|
|
243
|
+
process.exit(0);
|
|
244
|
+
},
|
|
245
|
+
onCancel: () => {
|
|
246
|
+
console.log('Setup cancelled.');
|
|
247
|
+
process.exit(0);
|
|
248
|
+
}
|
|
249
|
+
}));
|
|
250
|
+
return;
|
|
251
|
+
}
|
|
252
|
+
// Legacy CLI mode - strict validation
|
|
253
|
+
if (!options.name || !options.provider || !options.model) {
|
|
254
|
+
console.error('Error: When using CLI flags, --name, --provider, and --model are required.');
|
|
255
|
+
console.error('Run "moth llm add" without arguments for the interactive wizard.');
|
|
256
|
+
process.exit(1);
|
|
257
|
+
}
|
|
258
|
+
let config = loadConfig();
|
|
259
|
+
// strict validation
|
|
260
|
+
const validProviders = ['openai-compatible', 'ollama', 'claude-native', 'gemini-native', 'cohere-native'];
|
|
261
|
+
if (!validProviders.includes(options.provider)) {
|
|
262
|
+
console.error(`Invalid provider. Must be one of: ${validProviders.join(', ')}`);
|
|
263
|
+
process.exit(1);
|
|
264
|
+
}
|
|
265
|
+
if (options.provider === 'openai-compatible' && !options.url) {
|
|
266
|
+
console.error('Base URL (--url) is required for openai-compatible adapter.');
|
|
267
|
+
process.exit(1);
|
|
268
|
+
}
|
|
269
|
+
config = addProfile(config, {
|
|
270
|
+
name: options.name,
|
|
271
|
+
provider: options.provider,
|
|
272
|
+
model: options.model,
|
|
273
|
+
baseUrl: options.url
|
|
274
|
+
});
|
|
275
|
+
if (options.key) {
|
|
276
|
+
await setApiKey(options.name, options.key);
|
|
277
|
+
}
|
|
278
|
+
// Set as active if it's the first one
|
|
279
|
+
if (config.profiles.length === 1) {
|
|
280
|
+
config = setActiveProfile(config, options.name);
|
|
281
|
+
}
|
|
282
|
+
saveConfig(config);
|
|
283
|
+
console.log(`Profile '${options.name}' added.`);
|
|
284
|
+
});
|
|
285
|
+
llm.command('use')
|
|
286
|
+
.description('Set active profile')
|
|
287
|
+
.argument('<name>', 'Profile name')
|
|
288
|
+
.action((name) => {
|
|
289
|
+
let config = loadConfig();
|
|
290
|
+
config = setActiveProfile(config, name);
|
|
291
|
+
saveConfig(config);
|
|
292
|
+
console.log(`Active profile set to '${name}'.`);
|
|
293
|
+
});
|
|
294
|
+
import { LLMRemover } from './ui/wizards/LLMRemover.js';
|
|
295
|
+
llm.command('remove')
|
|
296
|
+
.description('Remove a profile')
|
|
297
|
+
.argument('[name]', 'Profile name')
|
|
298
|
+
.action(async (name) => {
|
|
299
|
+
let config = loadConfig();
|
|
300
|
+
if (!name) {
|
|
301
|
+
render(React.createElement(LLMRemover, {
|
|
302
|
+
config,
|
|
303
|
+
onExit: () => process.exit(0)
|
|
304
|
+
}));
|
|
305
|
+
return;
|
|
306
|
+
}
|
|
307
|
+
config = removeProfile(config, name);
|
|
308
|
+
saveConfig(config);
|
|
309
|
+
await deleteApiKey(name);
|
|
310
|
+
console.log(`Profile '${name}' removed.`);
|
|
311
|
+
});
|
|
312
|
+
program
|
|
313
|
+
.command('config')
|
|
314
|
+
.description('Manage configuration')
|
|
315
|
+
.command('name')
|
|
316
|
+
.description('Set your display name')
|
|
317
|
+
.argument('<name>', 'Display name')
|
|
318
|
+
.action((name) => {
|
|
319
|
+
let config = loadConfig();
|
|
320
|
+
config = setUsername(config, name);
|
|
321
|
+
saveConfig(config);
|
|
322
|
+
console.log(`Display name set to '${name}'.`);
|
|
323
|
+
});
|
|
324
|
+
const context = program.command('context').description('Manage context');
|
|
325
|
+
context.command('scan')
|
|
326
|
+
.description('Scan project files respecting .gitignore')
|
|
327
|
+
.action(async () => {
|
|
328
|
+
const root = findProjectRoot();
|
|
329
|
+
if (!root) {
|
|
330
|
+
console.error('Project root not found.');
|
|
331
|
+
process.exit(1);
|
|
332
|
+
}
|
|
333
|
+
console.log(`Scanning project at: ${root}`);
|
|
334
|
+
const scanner = new ProjectScanner(root);
|
|
335
|
+
const files = await scanner.scan();
|
|
336
|
+
console.log(`Found ${files.length} files:`);
|
|
337
|
+
files.slice(0, 20).forEach(f => console.log(` - ${f}`));
|
|
338
|
+
if (files.length > 20) {
|
|
339
|
+
console.log(`... and ${files.length - 20} more.`);
|
|
340
|
+
}
|
|
341
|
+
});
|
|
342
|
+
context.command('gather')
|
|
343
|
+
.description('Gather context for a query (debug)')
|
|
344
|
+
.argument('<query...>', 'Query to score files against')
|
|
345
|
+
.action(async (queryParts) => {
|
|
346
|
+
const query = queryParts.join(' ');
|
|
347
|
+
const root = findProjectRoot();
|
|
348
|
+
if (!root) {
|
|
349
|
+
console.error('Project root not found.');
|
|
350
|
+
process.exit(1);
|
|
351
|
+
}
|
|
352
|
+
console.log(`Gathering context for: "${query}"`);
|
|
353
|
+
const manager = new ContextManager(root);
|
|
354
|
+
const result = await manager.gather({ query });
|
|
355
|
+
console.log(`Top 10 Relevant Files:`);
|
|
356
|
+
result.files.slice(0, 10).forEach(f => {
|
|
357
|
+
console.log(`[${f.relevance.toFixed(1)}] ${f.tier.toUpperCase()} - ${f.path}`);
|
|
358
|
+
});
|
|
359
|
+
});
|
|
360
|
+
import { Patcher } from './editing/patcher.js';
|
|
361
|
+
import * as fs from 'fs/promises';
|
|
362
|
+
const patchCmd = program.command('patch').description('Manage patches');
|
|
363
|
+
patchCmd.command('apply')
|
|
364
|
+
.description('Apply a diff file to a target file')
|
|
365
|
+
.argument('<target>', 'Target file path')
|
|
366
|
+
.argument('<patchFile>', 'Path to file containing unified diff')
|
|
367
|
+
.action(async (target, patchFile) => {
|
|
368
|
+
const root = findProjectRoot();
|
|
369
|
+
if (!root) {
|
|
370
|
+
console.error('Project root not found.');
|
|
371
|
+
process.exit(1);
|
|
372
|
+
}
|
|
373
|
+
try {
|
|
374
|
+
const patchContent = await fs.readFile(patchFile, 'utf8');
|
|
375
|
+
const patcher = new Patcher(root);
|
|
376
|
+
const success = await patcher.applyPatch(target, patchContent);
|
|
377
|
+
if (success) {
|
|
378
|
+
console.log(`Successfully patched ${target}`);
|
|
379
|
+
}
|
|
380
|
+
else {
|
|
381
|
+
console.log(`Failed to patch ${target}`);
|
|
382
|
+
process.exit(1);
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
catch (e) {
|
|
386
|
+
console.error('Error:', e.message);
|
|
387
|
+
process.exit(1);
|
|
388
|
+
}
|
|
389
|
+
});
|
|
390
|
+
program.parse(process.argv);
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
export class ClaudeClient {
|
|
2
|
+
apiKey;
|
|
3
|
+
model;
|
|
4
|
+
constructor(apiKey, model) {
|
|
5
|
+
this.apiKey = apiKey;
|
|
6
|
+
this.model = model;
|
|
7
|
+
}
|
|
8
|
+
async chat(messages) {
|
|
9
|
+
const url = 'https://api.anthropic.com/v1/messages';
|
|
10
|
+
// Transform messages to Anthropic format
|
|
11
|
+
// Anthropic requires 'user' or 'assistant' roles. System prompt is separate (usually).
|
|
12
|
+
// For simplicity, we'll map system messages to user messages or extract them if complex handling needed.
|
|
13
|
+
// Spec says 'chat(messages) -> string', we map as best as possible.
|
|
14
|
+
const systemMessage = messages.find(m => m.role === 'system');
|
|
15
|
+
const conversationMessages = messages.filter(m => m.role !== 'system').map(m => ({
|
|
16
|
+
role: m.role,
|
|
17
|
+
content: m.content
|
|
18
|
+
}));
|
|
19
|
+
const body = {
|
|
20
|
+
model: this.model,
|
|
21
|
+
messages: conversationMessages,
|
|
22
|
+
max_tokens: 1024,
|
|
23
|
+
};
|
|
24
|
+
if (systemMessage) {
|
|
25
|
+
body.system = systemMessage.content;
|
|
26
|
+
}
|
|
27
|
+
const response = await fetch(url, {
|
|
28
|
+
method: 'POST',
|
|
29
|
+
headers: {
|
|
30
|
+
'x-api-key': this.apiKey,
|
|
31
|
+
'anthropic-version': '2023-06-01',
|
|
32
|
+
'content-type': 'application/json'
|
|
33
|
+
},
|
|
34
|
+
body: JSON.stringify(body)
|
|
35
|
+
});
|
|
36
|
+
if (!response.ok) {
|
|
37
|
+
const errorText = await response.text();
|
|
38
|
+
throw new Error(`Anthropic Provider Error (${response.status}): ${errorText}`);
|
|
39
|
+
}
|
|
40
|
+
const data = await response.json();
|
|
41
|
+
return data.content?.[0]?.text || "";
|
|
42
|
+
}
|
|
43
|
+
async *chatStream(messages) {
|
|
44
|
+
const result = await this.chat(messages);
|
|
45
|
+
yield result;
|
|
46
|
+
}
|
|
47
|
+
}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
export class CohereClient {
|
|
2
|
+
apiKey;
|
|
3
|
+
model;
|
|
4
|
+
constructor(apiKey, model) {
|
|
5
|
+
this.apiKey = apiKey;
|
|
6
|
+
this.model = model;
|
|
7
|
+
}
|
|
8
|
+
async chat(messages) {
|
|
9
|
+
const url = 'https://api.cohere.ai/v1/chat';
|
|
10
|
+
// Cohere uses 'message' + 'chat_history' usually, or just 'message'
|
|
11
|
+
// Simplified mapping for "Unified Interface":
|
|
12
|
+
const lastMessage = messages[messages.length - 1];
|
|
13
|
+
const history = messages.slice(0, -1).map(m => ({
|
|
14
|
+
role: m.role === 'user' ? 'USER' : 'CHATBOT',
|
|
15
|
+
message: m.content
|
|
16
|
+
}));
|
|
17
|
+
const body = {
|
|
18
|
+
model: this.model,
|
|
19
|
+
message: lastMessage.content,
|
|
20
|
+
chat_history: history.length > 0 ? history : undefined
|
|
21
|
+
};
|
|
22
|
+
const response = await fetch(url, {
|
|
23
|
+
method: 'POST',
|
|
24
|
+
headers: {
|
|
25
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
26
|
+
'Content-Type': 'application/json',
|
|
27
|
+
'Request-Source': 'moth-cli'
|
|
28
|
+
},
|
|
29
|
+
body: JSON.stringify(body)
|
|
30
|
+
});
|
|
31
|
+
if (!response.ok) {
|
|
32
|
+
const errorText = await response.text();
|
|
33
|
+
throw new Error(`Cohere Provider Error (${response.status}): ${errorText}`);
|
|
34
|
+
}
|
|
35
|
+
const data = await response.json();
|
|
36
|
+
return data.text || "";
|
|
37
|
+
}
|
|
38
|
+
async *chatStream(messages) {
|
|
39
|
+
const result = await this.chat(messages);
|
|
40
|
+
yield result;
|
|
41
|
+
}
|
|
42
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import { GeminiClient } from './geminiAdapter.js';
|
|
2
|
+
import { OpenAICompatibleAdapter } from './openAIAdapter.js';
|
|
3
|
+
import { ClaudeClient } from './claudeAdapter.js';
|
|
4
|
+
import { CohereClient } from './cohereAdapter.js';
|
|
5
|
+
import { getApiKey } from '../config/keychain.js';
|
|
6
|
+
export async function createLLMClient(profile) {
|
|
7
|
+
const apiKey = await getApiKey(profile.name);
|
|
8
|
+
switch (profile.provider) {
|
|
9
|
+
case 'gemini-native':
|
|
10
|
+
case 'gemini':
|
|
11
|
+
if (!apiKey)
|
|
12
|
+
throw new Error('API Key required for Gemini Native');
|
|
13
|
+
return new GeminiClient(apiKey, profile.model);
|
|
14
|
+
case 'openai-compatible':
|
|
15
|
+
// API Key is optional for some local providers
|
|
16
|
+
return new OpenAICompatibleAdapter(profile.model, profile.baseUrl, apiKey || undefined);
|
|
17
|
+
case 'ollama':
|
|
18
|
+
return new OpenAICompatibleAdapter(profile.model, profile.baseUrl || 'http://127.0.0.1:11434/v1', apiKey || undefined);
|
|
19
|
+
case 'claude-native':
|
|
20
|
+
if (!apiKey)
|
|
21
|
+
throw new Error('API Key required for Claude Native');
|
|
22
|
+
return new ClaudeClient(apiKey, profile.model);
|
|
23
|
+
case 'cohere-native':
|
|
24
|
+
if (!apiKey)
|
|
25
|
+
throw new Error('API Key required for Cohere Native');
|
|
26
|
+
return new CohereClient(apiKey, profile.model);
|
|
27
|
+
default:
|
|
28
|
+
throw new Error(`Unsupported adapter: ${profile.provider}`);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from '@google/generative-ai';
|
|
2
|
+
export class GeminiClient {
|
|
3
|
+
model;
|
|
4
|
+
constructor(apiKey, modelName) {
|
|
5
|
+
const genAI = new GoogleGenerativeAI(apiKey);
|
|
6
|
+
this.model = genAI.getGenerativeModel({
|
|
7
|
+
model: modelName,
|
|
8
|
+
safetySettings: [
|
|
9
|
+
{
|
|
10
|
+
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
|
|
11
|
+
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
12
|
+
},
|
|
13
|
+
{
|
|
14
|
+
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
15
|
+
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
16
|
+
},
|
|
17
|
+
{
|
|
18
|
+
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
|
19
|
+
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
20
|
+
},
|
|
21
|
+
{
|
|
22
|
+
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
|
23
|
+
threshold: HarmBlockThreshold.BLOCK_NONE,
|
|
24
|
+
},
|
|
25
|
+
],
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
async chat(messages) {
|
|
29
|
+
const chat = this.model.startChat({
|
|
30
|
+
history: messages.slice(0, -1).map(m => ({
|
|
31
|
+
role: m.role === 'assistant' ? 'model' : 'user', // Gemini uses 'model'
|
|
32
|
+
parts: [{ text: m.content }],
|
|
33
|
+
})),
|
|
34
|
+
});
|
|
35
|
+
const lastMessage = messages[messages.length - 1];
|
|
36
|
+
const result = await chat.sendMessage(lastMessage.content);
|
|
37
|
+
return result.response.text();
|
|
38
|
+
}
|
|
39
|
+
async *chatStream(messages) {
|
|
40
|
+
const chat = this.model.startChat({
|
|
41
|
+
history: messages.slice(0, -1).map(m => ({
|
|
42
|
+
role: m.role === 'assistant' ? 'model' : 'user',
|
|
43
|
+
parts: [{ text: m.content }],
|
|
44
|
+
})),
|
|
45
|
+
});
|
|
46
|
+
const lastMessage = messages[messages.length - 1];
|
|
47
|
+
const result = await chat.sendMessageStream(lastMessage.content);
|
|
48
|
+
for await (const chunk of result.stream) {
|
|
49
|
+
const text = chunk.text();
|
|
50
|
+
if (text) {
|
|
51
|
+
yield text;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
}
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
export class OpenAICompatibleAdapter {
|
|
2
|
+
baseUrl;
|
|
3
|
+
apiKey;
|
|
4
|
+
model;
|
|
5
|
+
constructor(model, baseUrl, apiKey) {
|
|
6
|
+
this.model = model;
|
|
7
|
+
this.baseUrl = baseUrl || 'https://api.openai.com/v1';
|
|
8
|
+
this.apiKey = apiKey;
|
|
9
|
+
}
|
|
10
|
+
async chat(messages) {
|
|
11
|
+
const url = `${this.baseUrl}/chat/completions`;
|
|
12
|
+
const headers = {
|
|
13
|
+
'Content-Type': 'application/json',
|
|
14
|
+
};
|
|
15
|
+
if (this.apiKey) {
|
|
16
|
+
headers['Authorization'] = `Bearer ${this.apiKey}`;
|
|
17
|
+
}
|
|
18
|
+
const body = {
|
|
19
|
+
model: this.model,
|
|
20
|
+
messages: messages.map(m => ({
|
|
21
|
+
role: m.role,
|
|
22
|
+
content: m.content
|
|
23
|
+
})),
|
|
24
|
+
};
|
|
25
|
+
const response = await fetch(url, {
|
|
26
|
+
method: 'POST',
|
|
27
|
+
headers,
|
|
28
|
+
body: JSON.stringify(body),
|
|
29
|
+
});
|
|
30
|
+
if (!response.ok) {
|
|
31
|
+
const errorText = await response.text();
|
|
32
|
+
throw new Error(`OpenAI Provider Error (${response.status}): ${errorText}`);
|
|
33
|
+
}
|
|
34
|
+
const data = await response.json();
|
|
35
|
+
return data.choices?.[0]?.message?.content || "";
|
|
36
|
+
}
|
|
37
|
+
async *chatStream(messages) {
|
|
38
|
+
// Basic non-streaming fallback for now or implementation if needed.
|
|
39
|
+
// Requirement says "chat(messages) -> string", streaming not explicitly prioritized in "Core Objective" v1 but good to have.
|
|
40
|
+
// Implementing strict "chat" as per PRD "Unified LLM Interface: chat(messages) -> string".
|
|
41
|
+
// I will implement non-streaming first to satisfy the strict PRD.
|
|
42
|
+
const result = await this.chat(messages);
|
|
43
|
+
yield result;
|
|
44
|
+
}
|
|
45
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
export class TodoManager {
|
|
2
|
+
todos = [];
|
|
3
|
+
add(text) {
|
|
4
|
+
const todo = {
|
|
5
|
+
id: Math.random().toString(36).substring(7),
|
|
6
|
+
text,
|
|
7
|
+
status: 'pending'
|
|
8
|
+
};
|
|
9
|
+
this.todos.push(todo);
|
|
10
|
+
return todo;
|
|
11
|
+
}
|
|
12
|
+
updateStatus(id, status) {
|
|
13
|
+
const todo = this.todos.find(t => t.id === id);
|
|
14
|
+
if (todo) {
|
|
15
|
+
todo.status = status;
|
|
16
|
+
return true;
|
|
17
|
+
}
|
|
18
|
+
return false;
|
|
19
|
+
}
|
|
20
|
+
list() {
|
|
21
|
+
return [...this.todos];
|
|
22
|
+
}
|
|
23
|
+
}
|