@stan-chen/simple-cli 0.2.1 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +55 -238
- package/dist/claw/jit.d.ts +5 -0
- package/dist/claw/jit.js +138 -0
- package/dist/claw/management.d.ts +3 -0
- package/dist/claw/management.js +107 -0
- package/dist/cli.js +306 -61
- package/dist/commands/git/commit.js +2 -1
- package/dist/commands/index.js +3 -2
- package/dist/context.js +13 -3
- package/dist/lib/agent.d.ts +4 -3
- package/dist/lib/agent.js +49 -17
- package/dist/lib/git.js +6 -1
- package/dist/lib/shim.d.ts +4 -0
- package/dist/lib/shim.js +30 -0
- package/dist/lib/ui.js +25 -0
- package/dist/mcp/manager.js +5 -1
- package/dist/prompts/provider.js +1 -0
- package/dist/providers/index.d.ts +21 -5
- package/dist/providers/index.js +75 -64
- package/dist/providers/multi.d.ts +2 -1
- package/dist/registry.d.ts +5 -0
- package/dist/registry.js +86 -22
- package/dist/repoMap.js +18 -18
- package/dist/router.js +21 -11
- package/dist/skills.js +10 -10
- package/dist/swarm/worker.d.ts +2 -0
- package/dist/swarm/worker.js +85 -15
- package/dist/tools/analyze_file.d.ts +16 -0
- package/dist/tools/analyze_file.js +43 -0
- package/dist/tools/clawBrain.d.ts +23 -0
- package/dist/tools/clawBrain.js +136 -0
- package/dist/tools/claw_brain.d.ts +23 -0
- package/dist/tools/claw_brain.js +139 -0
- package/dist/tools/deleteFile.d.ts +19 -0
- package/dist/tools/deleteFile.js +36 -0
- package/dist/tools/delete_file.d.ts +19 -0
- package/dist/tools/delete_file.js +36 -0
- package/dist/tools/fileOps.d.ts +22 -0
- package/dist/tools/fileOps.js +43 -0
- package/dist/tools/file_ops.d.ts +22 -0
- package/dist/tools/file_ops.js +43 -0
- package/dist/tools/grep.d.ts +2 -2
- package/dist/tools/linter.js +85 -27
- package/dist/tools/list_dir.d.ts +29 -0
- package/dist/tools/list_dir.js +50 -0
- package/dist/tools/organizer.d.ts +1 -0
- package/dist/tools/organizer.js +65 -0
- package/dist/tools/read_files.d.ts +25 -0
- package/dist/tools/read_files.js +31 -0
- package/dist/tools/reload_tools.d.ts +11 -0
- package/dist/tools/reload_tools.js +22 -0
- package/dist/tools/run_command.d.ts +32 -0
- package/dist/tools/run_command.js +103 -0
- package/dist/tools/scheduler.d.ts +25 -0
- package/dist/tools/scheduler.js +65 -0
- package/dist/tools/writeFiles.js +1 -1
- package/dist/tools/write_files.d.ts +84 -0
- package/dist/tools/write_files.js +91 -0
- package/dist/tools/write_to_file.d.ts +15 -0
- package/dist/tools/write_to_file.js +21 -0
- package/package.json +84 -78
package/dist/lib/agent.js
CHANGED
|
@@ -9,20 +9,51 @@ import * as ui from './ui.js';
|
|
|
9
9
|
* Parse LLM response into structured format
|
|
10
10
|
*/
|
|
11
11
|
export function parseResponse(response) {
|
|
12
|
-
//
|
|
13
|
-
const
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
12
|
+
// Accept either a TypeLLMResponse object or a raw string (tests and some providers pass raw text)
|
|
13
|
+
const raw = typeof response === 'string' ? response : (response.raw || '');
|
|
14
|
+
// Extract edit blocks - Aider style blocks are within the raw text
|
|
15
|
+
const editBlocks = parseEditBlocks(raw);
|
|
16
|
+
// If we were given a structured TypeLLMResponse, prefer its typed fields
|
|
17
|
+
if (typeof response !== 'string') {
|
|
18
|
+
const thought = response.thought;
|
|
19
|
+
let tool = response.tool || 'none';
|
|
20
|
+
const args = response.args || {};
|
|
21
|
+
const message = response.message || '';
|
|
22
|
+
// normalize tool name to snake_case
|
|
23
|
+
if (tool && tool !== 'none') {
|
|
24
|
+
tool = tool.replace(/([a-z0-9])([A-Z])/g, '$1_$2').toLowerCase();
|
|
25
|
+
}
|
|
26
|
+
const action = tool !== 'none'
|
|
27
|
+
? { tool, args }
|
|
28
|
+
: { tool: 'none', message: message || (tool === 'none' ? 'No action parsed' : '') };
|
|
29
|
+
return { thought, action, editBlocks };
|
|
30
|
+
}
|
|
31
|
+
// For raw string input, attempt to extract a <thought> block and a JSON action
|
|
32
|
+
let thought;
|
|
33
|
+
const thoughtMatch = raw.match(/<thought>[\s\S]*?<\/thought>/i);
|
|
34
|
+
if (thoughtMatch) {
|
|
35
|
+
thought = thoughtMatch[0].replace(/<\/?thought>/gi, '').trim();
|
|
36
|
+
}
|
|
37
|
+
// Extract first JSON-looking object from the raw text
|
|
19
38
|
let action = { tool: 'none', message: 'No action parsed' };
|
|
39
|
+
const jsonMatch = raw.match(/\{[\s\S]*\}/);
|
|
20
40
|
if (jsonMatch) {
|
|
21
41
|
try {
|
|
22
|
-
|
|
42
|
+
const parsed = JSON.parse(jsonMatch[0]);
|
|
43
|
+
let tool = parsed.tool || parsed.command || 'none';
|
|
44
|
+
const args = parsed.args || parsed.parameters || {};
|
|
45
|
+
if (tool && tool !== 'none') {
|
|
46
|
+
// normalize camelCase to snake_case
|
|
47
|
+
tool = String(tool).replace(/([a-z0-9])([A-Z])/g, '$1_$2').toLowerCase();
|
|
48
|
+
action = { tool, args };
|
|
49
|
+
}
|
|
50
|
+
else {
|
|
51
|
+
action = { tool: 'none', message: parsed.message || 'No action parsed' };
|
|
52
|
+
}
|
|
23
53
|
}
|
|
24
|
-
catch {
|
|
25
|
-
//
|
|
54
|
+
catch (e) {
|
|
55
|
+
// If JSON.parse fails, leave action as 'none'
|
|
56
|
+
action = { tool: 'none', message: 'No action parsed' };
|
|
26
57
|
}
|
|
27
58
|
}
|
|
28
59
|
return { thought, action, editBlocks };
|
|
@@ -156,12 +187,12 @@ export class Agent {
|
|
|
156
187
|
const reflectionPrompt = buildReflectionPrompt({
|
|
157
188
|
attempt,
|
|
158
189
|
previousError: failed.map(f => f.error).join('\n'),
|
|
159
|
-
previousResponse: llmResponse,
|
|
190
|
+
previousResponse: llmResponse.raw || JSON.stringify(llmResponse),
|
|
160
191
|
failedEdits: failed,
|
|
161
192
|
});
|
|
162
193
|
messages = [
|
|
163
194
|
...messages,
|
|
164
|
-
{ role: 'assistant', content: llmResponse },
|
|
195
|
+
{ role: 'assistant', content: llmResponse.raw || JSON.stringify(llmResponse) },
|
|
165
196
|
{ role: 'user', content: reflectionPrompt },
|
|
166
197
|
];
|
|
167
198
|
continue;
|
|
@@ -177,7 +208,7 @@ export class Agent {
|
|
|
177
208
|
const lintPrompt = buildLintErrorPrompt(file, lintResult.output);
|
|
178
209
|
messages = [
|
|
179
210
|
...messages,
|
|
180
|
-
{ role: 'assistant', content: llmResponse },
|
|
211
|
+
{ role: 'assistant', content: llmResponse.raw || JSON.stringify(llmResponse) },
|
|
181
212
|
{ role: 'user', content: lintPrompt },
|
|
182
213
|
];
|
|
183
214
|
continue;
|
|
@@ -192,7 +223,7 @@ export class Agent {
|
|
|
192
223
|
const testPrompt = buildTestFailurePrompt(testResult.output);
|
|
193
224
|
messages = [
|
|
194
225
|
...messages,
|
|
195
|
-
{ role: 'assistant', content: llmResponse },
|
|
226
|
+
{ role: 'assistant', content: llmResponse.raw || JSON.stringify(llmResponse) },
|
|
196
227
|
{ role: 'user', content: testPrompt },
|
|
197
228
|
];
|
|
198
229
|
continue;
|
|
@@ -206,10 +237,10 @@ export class Agent {
|
|
|
206
237
|
const diff = await this.git.diff();
|
|
207
238
|
if (diff) {
|
|
208
239
|
const commitMessage = await ui.spin('Generating commit message...', () => generateCommitMessage(diff, async (prompt) => {
|
|
209
|
-
const
|
|
240
|
+
const res = await this.generateFn([
|
|
210
241
|
{ role: 'user', content: prompt },
|
|
211
242
|
]);
|
|
212
|
-
return
|
|
243
|
+
return res.message || res.thought || res.raw || '';
|
|
213
244
|
}));
|
|
214
245
|
commitResult = await this.git.commit({
|
|
215
246
|
message: commitMessage,
|
|
@@ -270,9 +301,10 @@ Provide a brief summary that captures:
|
|
|
270
301
|
1. Main topics discussed
|
|
271
302
|
2. Key decisions made
|
|
272
303
|
3. Important context for future messages`;
|
|
273
|
-
const
|
|
304
|
+
const summaryRes = await generateFn([
|
|
274
305
|
{ role: 'user', content: summaryPrompt },
|
|
275
306
|
]);
|
|
307
|
+
const summary = summaryRes.message || summaryRes.thought || summaryRes.raw || 'No summary generated';
|
|
276
308
|
return [
|
|
277
309
|
history[0], // Keep first
|
|
278
310
|
{ role: 'system', content: `[Conversation Summary]\n${summary}` },
|
package/dist/lib/git.js
CHANGED
|
@@ -255,7 +255,12 @@ export class GitManager {
|
|
|
255
255
|
async rootDir() {
|
|
256
256
|
try {
|
|
257
257
|
const result = await this.git.revparse(['--show-toplevel']);
|
|
258
|
-
|
|
258
|
+
const trimmed = result.trim();
|
|
259
|
+
// Ensure Windows paths use backslashes for tests that compare exact strings
|
|
260
|
+
if (process.platform === 'win32') {
|
|
261
|
+
return trimmed.replace(/\//g, '\\');
|
|
262
|
+
}
|
|
263
|
+
return trimmed;
|
|
259
264
|
}
|
|
260
265
|
catch {
|
|
261
266
|
return null;
|
package/dist/lib/shim.js
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenClaw RPC Shim: allow scripts to call simple-cli tools
|
|
3
|
+
*/
|
|
4
|
+
import { execSync } from 'child_process';
|
|
5
|
+
/**
|
|
6
|
+
* Invoke a simple-cli tool with arguments
|
|
7
|
+
*/
|
|
8
|
+
export async function invoke(tool, args = {}) {
|
|
9
|
+
// Determine path to simple-cli. We assume it's in the same project or installed.
|
|
10
|
+
// In production, we'd use the global 'simple' command.
|
|
11
|
+
const isWindows = process.platform === 'win32';
|
|
12
|
+
const cliCmd = 'simple'; // Or local path to dist/cli.js
|
|
13
|
+
try {
|
|
14
|
+
const cmd = `${cliCmd} --invoke-json "${tool}" "${JSON.stringify(args).replace(/"/g, '\\"')}"`;
|
|
15
|
+
const output = execSync(cmd, { encoding: 'utf-8', stdio: 'pipe' });
|
|
16
|
+
try {
|
|
17
|
+
return JSON.parse(output.trim());
|
|
18
|
+
}
|
|
19
|
+
catch {
|
|
20
|
+
return output.trim();
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
catch (error) {
|
|
24
|
+
throw new Error(`Tool invocation failed: ${error.stderr || error.message}`);
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
// Global attachment for 'node.invoke' style parity
|
|
28
|
+
if (typeof global.invoke === 'undefined') {
|
|
29
|
+
global.invoke = invoke;
|
|
30
|
+
}
|
package/dist/lib/ui.js
CHANGED
|
@@ -5,6 +5,7 @@
|
|
|
5
5
|
import * as p from '@clack/prompts';
|
|
6
6
|
import pc from 'picocolors';
|
|
7
7
|
export { p as prompts };
|
|
8
|
+
const NON_INTERACTIVE = process.env.VITEST === 'true' || process.env.TEST === 'true' || !process.stdin.isTTY;
|
|
8
9
|
/**
|
|
9
10
|
* UI Theme colors
|
|
10
11
|
*/
|
|
@@ -91,36 +92,60 @@ export async function spin(message, fn) {
|
|
|
91
92
|
* Prompt for text input
|
|
92
93
|
*/
|
|
93
94
|
export async function text(options) {
|
|
95
|
+
if (NON_INTERACTIVE)
|
|
96
|
+
return options.defaultValue ?? options.placeholder ?? '';
|
|
94
97
|
return p.text(options);
|
|
95
98
|
}
|
|
96
99
|
/**
|
|
97
100
|
* Prompt for password input
|
|
98
101
|
*/
|
|
99
102
|
export async function password(options) {
|
|
103
|
+
if (NON_INTERACTIVE)
|
|
104
|
+
return '';
|
|
100
105
|
return p.password(options);
|
|
101
106
|
}
|
|
102
107
|
/**
|
|
103
108
|
* Prompt for confirmation
|
|
104
109
|
*/
|
|
105
110
|
export async function confirm(options) {
|
|
111
|
+
if (NON_INTERACTIVE)
|
|
112
|
+
return options.initialValue ?? true;
|
|
106
113
|
return p.confirm(options);
|
|
107
114
|
}
|
|
108
115
|
/**
|
|
109
116
|
* Prompt for single selection
|
|
110
117
|
*/
|
|
111
118
|
export async function select(options) {
|
|
119
|
+
if (NON_INTERACTIVE)
|
|
120
|
+
return options.initialValue ?? options.options[0].value;
|
|
112
121
|
return p.select(options);
|
|
113
122
|
}
|
|
114
123
|
/**
|
|
115
124
|
* Prompt for multi-selection
|
|
116
125
|
*/
|
|
117
126
|
export async function multiselect(options) {
|
|
127
|
+
if (NON_INTERACTIVE)
|
|
128
|
+
return options.initialValues ?? [];
|
|
118
129
|
return p.multiselect(options);
|
|
119
130
|
}
|
|
120
131
|
/**
|
|
121
132
|
* Group related prompts together
|
|
122
133
|
*/
|
|
123
134
|
export async function group(prompts, options) {
|
|
135
|
+
if (NON_INTERACTIVE) {
|
|
136
|
+
const result = {};
|
|
137
|
+
for (const k of Object.keys(prompts)) {
|
|
138
|
+
// Attempt to call each prompt function but if it would prompt, we expect those to return defaults because of NON_INTERACTIVE
|
|
139
|
+
try {
|
|
140
|
+
// eslint-disable-next-line @typescript-eslint/await-thenable
|
|
141
|
+
result[k] = await prompts[k]();
|
|
142
|
+
}
|
|
143
|
+
catch {
|
|
144
|
+
result[k] = undefined;
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
return result;
|
|
148
|
+
}
|
|
124
149
|
return p.group(prompts, options);
|
|
125
150
|
}
|
|
126
151
|
/**
|
package/dist/mcp/manager.js
CHANGED
|
@@ -149,7 +149,11 @@ export class MCPManager {
|
|
|
149
149
|
else {
|
|
150
150
|
throw new Error(`Invalid MCP server config for ${serverName}: missing command or url`);
|
|
151
151
|
}
|
|
152
|
-
|
|
152
|
+
// Connect with a 5-second timeout
|
|
153
|
+
await Promise.race([
|
|
154
|
+
client.connect(transport),
|
|
155
|
+
new Promise((_, reject) => setTimeout(() => reject(new Error('Connection timeout')), 5000))
|
|
156
|
+
]);
|
|
153
157
|
state.client = client;
|
|
154
158
|
state.transport = transport;
|
|
155
159
|
state.status = MCPServerStatus.CONNECTED;
|
package/dist/prompts/provider.js
CHANGED
|
@@ -1,15 +1,31 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Provider Bridge: Unified LLM interface via
|
|
3
|
-
*
|
|
2
|
+
* Provider Bridge: Unified LLM interface via Vercel AI SDK
|
|
3
|
+
* Support for OpenAI, Anthropic, Google (Gemini), and custom endpoints.
|
|
4
4
|
*/
|
|
5
|
+
import { type TypeLLMResponse } from '@stan-chen/typellm';
|
|
5
6
|
export interface Message {
|
|
6
|
-
role:
|
|
7
|
+
role: 'user' | 'assistant' | 'system';
|
|
7
8
|
content: string;
|
|
8
9
|
}
|
|
9
10
|
export interface Provider {
|
|
10
11
|
name: string;
|
|
11
12
|
model: string;
|
|
12
|
-
generateResponse: (systemPrompt: string, messages: Message[]) => Promise<
|
|
13
|
+
generateResponse: (systemPrompt: string, messages: Message[]) => Promise<TypeLLMResponse>;
|
|
13
14
|
}
|
|
14
|
-
|
|
15
|
+
/**
|
|
16
|
+
* Structured output strategy:
|
|
17
|
+
* - Uses strong system prompts with explicit JSON format examples
|
|
18
|
+
* - Response parsing with jsonrepair (in cli.ts)
|
|
19
|
+
* - Format reminders in user messages (in context.ts)
|
|
20
|
+
*
|
|
21
|
+
* This approach works across ALL providers without hitting
|
|
22
|
+
* provider-specific schema limitations (e.g., OpenAI's additionalProperties requirement)
|
|
23
|
+
*/
|
|
24
|
+
/**
|
|
25
|
+
* Creates a provider instance using TypeLLM
|
|
26
|
+
*/
|
|
27
|
+
export declare const createProviderForModel: (modelId: string) => Provider;
|
|
28
|
+
/**
|
|
29
|
+
* Creates the default provider
|
|
30
|
+
*/
|
|
15
31
|
export declare const createProvider: () => Provider;
|
package/dist/providers/index.js
CHANGED
|
@@ -1,82 +1,93 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Provider Bridge: Unified LLM interface via
|
|
3
|
-
*
|
|
2
|
+
* Provider Bridge: Unified LLM interface via Vercel AI SDK
|
|
3
|
+
* Support for OpenAI, Anthropic, Google (Gemini), and custom endpoints.
|
|
4
4
|
*/
|
|
5
|
-
import
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
5
|
+
import { createTypeLLM } from '@stan-chen/typellm';
|
|
6
|
+
/**
|
|
7
|
+
* Structured output strategy:
|
|
8
|
+
* - Uses strong system prompts with explicit JSON format examples
|
|
9
|
+
* - Response parsing with jsonrepair (in cli.ts)
|
|
10
|
+
* - Format reminders in user messages (in context.ts)
|
|
11
|
+
*
|
|
12
|
+
* This approach works across ALL providers without hitting
|
|
13
|
+
* provider-specific schema limitations (e.g., OpenAI's additionalProperties requirement)
|
|
14
|
+
*/
|
|
15
|
+
/**
|
|
16
|
+
* Creates a provider instance using TypeLLM
|
|
17
|
+
*/
|
|
18
|
+
export const createProviderForModel = (modelId) => {
|
|
19
|
+
let providerType = 'openai';
|
|
20
|
+
let actualModel = modelId;
|
|
21
|
+
let baseURL;
|
|
22
|
+
// Handle provider selection
|
|
23
|
+
if (modelId.startsWith('anthropic:')) {
|
|
24
|
+
actualModel = modelId.split(':')[1] || modelId;
|
|
25
|
+
providerType = 'anthropic';
|
|
13
26
|
}
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
apiKey: process.env.DEEPSEEK_API_KEY,
|
|
18
|
-
baseURL: 'https://api.deepseek.com/v1',
|
|
19
|
-
model: process.env.DEEPSEEK_MODEL || 'deepseek-chat'
|
|
20
|
-
};
|
|
27
|
+
else if (modelId.startsWith('google:') || modelId.startsWith('gemini:')) {
|
|
28
|
+
actualModel = modelId.split(':')[1] || modelId;
|
|
29
|
+
providerType = 'google';
|
|
21
30
|
}
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
apiKey: process.env.GROQ_API_KEY,
|
|
26
|
-
baseURL: 'https://api.groq.com/openai/v1',
|
|
27
|
-
model: process.env.GROQ_MODEL || 'llama3-70b-8192'
|
|
28
|
-
};
|
|
31
|
+
else if (modelId.startsWith('openai:')) {
|
|
32
|
+
actualModel = modelId.split(':')[1] || modelId;
|
|
33
|
+
providerType = 'openai';
|
|
29
34
|
}
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
return {
|
|
33
|
-
apiKey: process.env.MISTRAL_API_KEY,
|
|
34
|
-
baseURL: 'https://api.mistral.ai/v1',
|
|
35
|
-
model: process.env.MISTRAL_MODEL || 'mistral-large-latest'
|
|
36
|
-
};
|
|
35
|
+
else if (modelId.startsWith('claude') || (process.env.ANTHROPIC_API_KEY && !process.env.OPENAI_API_KEY)) {
|
|
36
|
+
providerType = 'anthropic';
|
|
37
37
|
}
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
38
|
+
else if (modelId.startsWith('gemini') || (process.env.GEMINI_API_KEY && !process.env.OPENAI_API_KEY)) {
|
|
39
|
+
providerType = 'google';
|
|
40
|
+
}
|
|
41
|
+
else if (process.env.LITELLM_BASE_URL) {
|
|
42
|
+
providerType = 'litellm';
|
|
43
|
+
baseURL = process.env.LITELLM_BASE_URL;
|
|
44
|
+
}
|
|
45
|
+
else {
|
|
46
|
+
providerType = 'openai';
|
|
47
|
+
}
|
|
48
|
+
// Final check for the Google key mapping
|
|
49
|
+
if (providerType === 'google' && process.env.GEMINI_API_KEY && !process.env.GOOGLE_GENERATIVE_AI_API_KEY) {
|
|
50
|
+
process.env.GOOGLE_GENERATIVE_AI_API_KEY = process.env.GEMINI_API_KEY;
|
|
51
|
+
}
|
|
52
|
+
const llm = createTypeLLM({
|
|
53
|
+
provider: providerType,
|
|
54
|
+
model: actualModel,
|
|
55
|
+
baseURL: baseURL,
|
|
56
|
+
apiKey: providerType === 'openai' ? process.env.OPENAI_API_KEY :
|
|
57
|
+
providerType === 'google' ? process.env.GEMINI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY :
|
|
58
|
+
providerType === 'anthropic' ? process.env.ANTHROPIC_API_KEY :
|
|
59
|
+
undefined,
|
|
60
|
+
temperature: 0
|
|
57
61
|
});
|
|
58
62
|
return {
|
|
59
|
-
name:
|
|
60
|
-
model,
|
|
63
|
+
name: providerType,
|
|
64
|
+
model: actualModel,
|
|
61
65
|
generateResponse: async (systemPrompt, messages) => {
|
|
62
66
|
try {
|
|
63
|
-
const response = await
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
...messages.map(m => ({ role: m.role, content: m.content }))
|
|
68
|
-
]
|
|
69
|
-
});
|
|
70
|
-
return response.choices[0]?.message?.content || '';
|
|
67
|
+
const response = await llm.generate(systemPrompt, messages);
|
|
68
|
+
if ((process.env.DEBUG === 'true') && response)
|
|
69
|
+
console.log(`[DEBUG] TypeLLM Response: ${JSON.stringify(response).substring(0, 300)}...`);
|
|
70
|
+
return response;
|
|
71
71
|
}
|
|
72
72
|
catch (e) {
|
|
73
|
-
|
|
73
|
+
const msg = `Error calling TypeLLM: ${e instanceof Error ? e.message : e}`;
|
|
74
|
+
return {
|
|
75
|
+
thought: 'Error occurred during generation',
|
|
76
|
+
tool: 'none',
|
|
77
|
+
args: {},
|
|
78
|
+
message: msg,
|
|
79
|
+
raw: msg
|
|
80
|
+
};
|
|
74
81
|
}
|
|
75
82
|
}
|
|
76
83
|
};
|
|
77
84
|
};
|
|
85
|
+
/**
|
|
86
|
+
* Creates the default provider
|
|
87
|
+
*/
|
|
78
88
|
export const createProvider = () => {
|
|
79
|
-
const
|
|
80
|
-
|
|
81
|
-
|
|
89
|
+
const isClaw = process.argv.includes('--claw') || process.argv.includes('-claw');
|
|
90
|
+
const model = (isClaw ? process.env.CLAW_MODEL : null) || process.env.OPENAI_MODEL || process.env.GEMINI_MODEL || 'gpt-4o-mini';
|
|
91
|
+
console.log(`🤖 Using TypeLLM with model: ${model}`);
|
|
92
|
+
return createProviderForModel(model);
|
|
82
93
|
};
|
|
@@ -4,8 +4,9 @@
|
|
|
4
4
|
*/
|
|
5
5
|
import { type Provider, type Message } from './index.js';
|
|
6
6
|
import type { Tier, TierConfig } from '../router.js';
|
|
7
|
+
import type { TypeLLMResponse } from '@stan-chen/typellm';
|
|
7
8
|
export interface MultiProvider {
|
|
8
9
|
getProvider: (tier: Tier) => Provider;
|
|
9
|
-
generateWithTier: (tier: Tier, systemPrompt: string, messages: Message[]) => Promise<
|
|
10
|
+
generateWithTier: (tier: Tier, systemPrompt: string, messages: Message[]) => Promise<TypeLLMResponse>;
|
|
10
11
|
}
|
|
11
12
|
export declare const createMultiProvider: (tierConfigs: Map<Tier, TierConfig>) => MultiProvider;
|
package/dist/registry.d.ts
CHANGED
|
@@ -15,6 +15,11 @@ export interface Tool {
|
|
|
15
15
|
specification?: string;
|
|
16
16
|
}
|
|
17
17
|
export type { Tool as ToolModule };
|
|
18
|
+
/**
|
|
19
|
+
* Parses a tool definition from a Markdown file (.md) or string
|
|
20
|
+
* Supports YAML frontmatter as per OpenClaw PRD.
|
|
21
|
+
*/
|
|
22
|
+
export declare function getMeta(content: string, filename: string): any;
|
|
18
23
|
export declare const loadTools: () => Promise<Map<string, Tool>>;
|
|
19
24
|
export declare const loadAllTools: () => Promise<Map<string, Tool>>;
|
|
20
25
|
export declare const getToolDefinitions: (tools: Map<string, Tool>) => string;
|