@stan-chen/simple-cli 0.2.1 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/README.md +55 -238
  2. package/dist/claw/jit.d.ts +5 -0
  3. package/dist/claw/jit.js +138 -0
  4. package/dist/claw/management.d.ts +3 -0
  5. package/dist/claw/management.js +107 -0
  6. package/dist/cli.js +306 -61
  7. package/dist/commands/git/commit.js +2 -1
  8. package/dist/commands/index.js +3 -2
  9. package/dist/context.js +13 -3
  10. package/dist/lib/agent.d.ts +4 -3
  11. package/dist/lib/agent.js +49 -17
  12. package/dist/lib/git.js +6 -1
  13. package/dist/lib/shim.d.ts +4 -0
  14. package/dist/lib/shim.js +30 -0
  15. package/dist/lib/ui.js +25 -0
  16. package/dist/mcp/manager.js +5 -1
  17. package/dist/prompts/provider.js +1 -0
  18. package/dist/providers/index.d.ts +21 -5
  19. package/dist/providers/index.js +75 -64
  20. package/dist/providers/multi.d.ts +2 -1
  21. package/dist/registry.d.ts +5 -0
  22. package/dist/registry.js +86 -22
  23. package/dist/repoMap.js +18 -18
  24. package/dist/router.js +21 -11
  25. package/dist/skills.js +10 -10
  26. package/dist/swarm/worker.d.ts +2 -0
  27. package/dist/swarm/worker.js +85 -15
  28. package/dist/tools/analyze_file.d.ts +16 -0
  29. package/dist/tools/analyze_file.js +43 -0
  30. package/dist/tools/clawBrain.d.ts +23 -0
  31. package/dist/tools/clawBrain.js +136 -0
  32. package/dist/tools/claw_brain.d.ts +23 -0
  33. package/dist/tools/claw_brain.js +139 -0
  34. package/dist/tools/deleteFile.d.ts +19 -0
  35. package/dist/tools/deleteFile.js +36 -0
  36. package/dist/tools/delete_file.d.ts +19 -0
  37. package/dist/tools/delete_file.js +36 -0
  38. package/dist/tools/fileOps.d.ts +22 -0
  39. package/dist/tools/fileOps.js +43 -0
  40. package/dist/tools/file_ops.d.ts +22 -0
  41. package/dist/tools/file_ops.js +43 -0
  42. package/dist/tools/grep.d.ts +2 -2
  43. package/dist/tools/linter.js +85 -27
  44. package/dist/tools/list_dir.d.ts +29 -0
  45. package/dist/tools/list_dir.js +50 -0
  46. package/dist/tools/organizer.d.ts +1 -0
  47. package/dist/tools/organizer.js +65 -0
  48. package/dist/tools/read_files.d.ts +25 -0
  49. package/dist/tools/read_files.js +31 -0
  50. package/dist/tools/reload_tools.d.ts +11 -0
  51. package/dist/tools/reload_tools.js +22 -0
  52. package/dist/tools/run_command.d.ts +32 -0
  53. package/dist/tools/run_command.js +103 -0
  54. package/dist/tools/scheduler.d.ts +25 -0
  55. package/dist/tools/scheduler.js +65 -0
  56. package/dist/tools/writeFiles.js +1 -1
  57. package/dist/tools/write_files.d.ts +84 -0
  58. package/dist/tools/write_files.js +91 -0
  59. package/dist/tools/write_to_file.d.ts +15 -0
  60. package/dist/tools/write_to_file.js +21 -0
  61. package/package.json +84 -78
package/dist/lib/agent.js CHANGED
@@ -9,20 +9,51 @@ import * as ui from './ui.js';
9
9
  * Parse LLM response into structured format
10
10
  */
11
11
  export function parseResponse(response) {
12
- // Extract thought/reasoning
13
- const thoughtMatch = response.match(/<thought>([\s\S]*?)<\/thought>/i);
14
- const thought = thoughtMatch?.[1]?.trim();
15
- // Extract edit blocks
16
- const editBlocks = parseEditBlocks(response);
17
- // Extract tool action
18
- const jsonMatch = response.match(/\{[\s\S]*"tool"[\s\S]*\}/);
12
+ // Accept either a TypeLLMResponse object or a raw string (tests and some providers pass raw text)
13
+ const raw = typeof response === 'string' ? response : (response.raw || '');
14
+ // Extract edit blocks - Aider style blocks are within the raw text
15
+ const editBlocks = parseEditBlocks(raw);
16
+ // If we were given a structured TypeLLMResponse, prefer its typed fields
17
+ if (typeof response !== 'string') {
18
+ const thought = response.thought;
19
+ let tool = response.tool || 'none';
20
+ const args = response.args || {};
21
+ const message = response.message || '';
22
+ // normalize tool name to snake_case
23
+ if (tool && tool !== 'none') {
24
+ tool = tool.replace(/([a-z0-9])([A-Z])/g, '$1_$2').toLowerCase();
25
+ }
26
+ const action = tool !== 'none'
27
+ ? { tool, args }
28
+ : { tool: 'none', message: message || (tool === 'none' ? 'No action parsed' : '') };
29
+ return { thought, action, editBlocks };
30
+ }
31
+ // For raw string input, attempt to extract a <thought> block and a JSON action
32
+ let thought;
33
+ const thoughtMatch = raw.match(/<thought>[\s\S]*?<\/thought>/i);
34
+ if (thoughtMatch) {
35
+ thought = thoughtMatch[0].replace(/<\/?thought>/gi, '').trim();
36
+ }
37
+ // Extract first JSON-looking object from the raw text
19
38
  let action = { tool: 'none', message: 'No action parsed' };
39
+ const jsonMatch = raw.match(/\{[\s\S]*\}/);
20
40
  if (jsonMatch) {
21
41
  try {
22
- action = JSON.parse(jsonMatch[0]);
42
+ const parsed = JSON.parse(jsonMatch[0]);
43
+ let tool = parsed.tool || parsed.command || 'none';
44
+ const args = parsed.args || parsed.parameters || {};
45
+ if (tool && tool !== 'none') {
46
+ // normalize camelCase to snake_case
47
+ tool = String(tool).replace(/([a-z0-9])([A-Z])/g, '$1_$2').toLowerCase();
48
+ action = { tool, args };
49
+ }
50
+ else {
51
+ action = { tool: 'none', message: parsed.message || 'No action parsed' };
52
+ }
23
53
  }
24
- catch {
25
- // Keep default
54
+ catch (e) {
55
+ // If JSON.parse fails, leave action as 'none'
56
+ action = { tool: 'none', message: 'No action parsed' };
26
57
  }
27
58
  }
28
59
  return { thought, action, editBlocks };
@@ -156,12 +187,12 @@ export class Agent {
156
187
  const reflectionPrompt = buildReflectionPrompt({
157
188
  attempt,
158
189
  previousError: failed.map(f => f.error).join('\n'),
159
- previousResponse: llmResponse,
190
+ previousResponse: llmResponse.raw || JSON.stringify(llmResponse),
160
191
  failedEdits: failed,
161
192
  });
162
193
  messages = [
163
194
  ...messages,
164
- { role: 'assistant', content: llmResponse },
195
+ { role: 'assistant', content: llmResponse.raw || JSON.stringify(llmResponse) },
165
196
  { role: 'user', content: reflectionPrompt },
166
197
  ];
167
198
  continue;
@@ -177,7 +208,7 @@ export class Agent {
177
208
  const lintPrompt = buildLintErrorPrompt(file, lintResult.output);
178
209
  messages = [
179
210
  ...messages,
180
- { role: 'assistant', content: llmResponse },
211
+ { role: 'assistant', content: llmResponse.raw || JSON.stringify(llmResponse) },
181
212
  { role: 'user', content: lintPrompt },
182
213
  ];
183
214
  continue;
@@ -192,7 +223,7 @@ export class Agent {
192
223
  const testPrompt = buildTestFailurePrompt(testResult.output);
193
224
  messages = [
194
225
  ...messages,
195
- { role: 'assistant', content: llmResponse },
226
+ { role: 'assistant', content: llmResponse.raw || JSON.stringify(llmResponse) },
196
227
  { role: 'user', content: testPrompt },
197
228
  ];
198
229
  continue;
@@ -206,10 +237,10 @@ export class Agent {
206
237
  const diff = await this.git.diff();
207
238
  if (diff) {
208
239
  const commitMessage = await ui.spin('Generating commit message...', () => generateCommitMessage(diff, async (prompt) => {
209
- const result = await this.generateFn([
240
+ const res = await this.generateFn([
210
241
  { role: 'user', content: prompt },
211
242
  ]);
212
- return result;
243
+ return res.message || res.thought || res.raw || '';
213
244
  }));
214
245
  commitResult = await this.git.commit({
215
246
  message: commitMessage,
@@ -270,9 +301,10 @@ Provide a brief summary that captures:
270
301
  1. Main topics discussed
271
302
  2. Key decisions made
272
303
  3. Important context for future messages`;
273
- const summary = await generateFn([
304
+ const summaryRes = await generateFn([
274
305
  { role: 'user', content: summaryPrompt },
275
306
  ]);
307
+ const summary = summaryRes.message || summaryRes.thought || summaryRes.raw || 'No summary generated';
276
308
  return [
277
309
  history[0], // Keep first
278
310
  { role: 'system', content: `[Conversation Summary]\n${summary}` },
package/dist/lib/git.js CHANGED
@@ -255,7 +255,12 @@ export class GitManager {
255
255
  async rootDir() {
256
256
  try {
257
257
  const result = await this.git.revparse(['--show-toplevel']);
258
- return result.trim();
258
+ const trimmed = result.trim();
259
+ // Ensure Windows paths use backslashes for tests that compare exact strings
260
+ if (process.platform === 'win32') {
261
+ return trimmed.replace(/\//g, '\\');
262
+ }
263
+ return trimmed;
259
264
  }
260
265
  catch {
261
266
  return null;
@@ -0,0 +1,4 @@
1
+ /**
2
+ * Invoke a simple-cli tool with arguments
3
+ */
4
+ export declare function invoke(tool: string, args?: Record<string, any>): Promise<any>;
@@ -0,0 +1,30 @@
1
+ /**
2
+ * OpenClaw RPC Shim: allow scripts to call simple-cli tools
3
+ */
4
+ import { execSync } from 'child_process';
5
+ /**
6
+ * Invoke a simple-cli tool with arguments
7
+ */
8
+ export async function invoke(tool, args = {}) {
9
+ // Determine path to simple-cli. We assume it's in the same project or installed.
10
+ // In production, we'd use the global 'simple' command.
11
+ const isWindows = process.platform === 'win32';
12
+ const cliCmd = 'simple'; // Or local path to dist/cli.js
13
+ try {
14
+ const cmd = `${cliCmd} --invoke-json "${tool}" "${JSON.stringify(args).replace(/"/g, '\\"')}"`;
15
+ const output = execSync(cmd, { encoding: 'utf-8', stdio: 'pipe' });
16
+ try {
17
+ return JSON.parse(output.trim());
18
+ }
19
+ catch {
20
+ return output.trim();
21
+ }
22
+ }
23
+ catch (error) {
24
+ throw new Error(`Tool invocation failed: ${error.stderr || error.message}`);
25
+ }
26
+ }
27
+ // Global attachment for 'node.invoke' style parity
28
+ if (typeof global.invoke === 'undefined') {
29
+ global.invoke = invoke;
30
+ }
package/dist/lib/ui.js CHANGED
@@ -5,6 +5,7 @@
5
5
  import * as p from '@clack/prompts';
6
6
  import pc from 'picocolors';
7
7
  export { p as prompts };
8
+ const NON_INTERACTIVE = process.env.VITEST === 'true' || process.env.TEST === 'true' || !process.stdin.isTTY;
8
9
  /**
9
10
  * UI Theme colors
10
11
  */
@@ -91,36 +92,60 @@ export async function spin(message, fn) {
91
92
  * Prompt for text input
92
93
  */
93
94
  export async function text(options) {
95
+ if (NON_INTERACTIVE)
96
+ return options.defaultValue ?? options.placeholder ?? '';
94
97
  return p.text(options);
95
98
  }
96
99
  /**
97
100
  * Prompt for password input
98
101
  */
99
102
  export async function password(options) {
103
+ if (NON_INTERACTIVE)
104
+ return '';
100
105
  return p.password(options);
101
106
  }
102
107
  /**
103
108
  * Prompt for confirmation
104
109
  */
105
110
  export async function confirm(options) {
111
+ if (NON_INTERACTIVE)
112
+ return options.initialValue ?? true;
106
113
  return p.confirm(options);
107
114
  }
108
115
  /**
109
116
  * Prompt for single selection
110
117
  */
111
118
  export async function select(options) {
119
+ if (NON_INTERACTIVE)
120
+ return options.initialValue ?? options.options[0].value;
112
121
  return p.select(options);
113
122
  }
114
123
  /**
115
124
  * Prompt for multi-selection
116
125
  */
117
126
  export async function multiselect(options) {
127
+ if (NON_INTERACTIVE)
128
+ return options.initialValues ?? [];
118
129
  return p.multiselect(options);
119
130
  }
120
131
  /**
121
132
  * Group related prompts together
122
133
  */
123
134
  export async function group(prompts, options) {
135
+ if (NON_INTERACTIVE) {
136
+ const result = {};
137
+ for (const k of Object.keys(prompts)) {
138
+ // Attempt to call each prompt function but if it would prompt, we expect those to return defaults because of NON_INTERACTIVE
139
+ try {
140
+ // eslint-disable-next-line @typescript-eslint/await-thenable
141
+ result[k] = await prompts[k]();
142
+ }
143
+ catch {
144
+ result[k] = undefined;
145
+ }
146
+ }
147
+ return result;
148
+ }
124
149
  return p.group(prompts, options);
125
150
  }
126
151
  /**
@@ -149,7 +149,11 @@ export class MCPManager {
149
149
  else {
150
150
  throw new Error(`Invalid MCP server config for ${serverName}: missing command or url`);
151
151
  }
152
- await client.connect(transport);
152
+ // Connect with a 5-second timeout
153
+ await Promise.race([
154
+ client.connect(transport),
155
+ new Promise((_, reject) => setTimeout(() => reject(new Error('Connection timeout')), 5000))
156
+ ]);
153
157
  state.client = client;
154
158
  state.transport = transport;
155
159
  state.status = MCPServerStatus.CONNECTED;
@@ -53,6 +53,7 @@ export class PromptProvider {
53
53
  }
54
54
  loadProjectRules(cwd) {
55
55
  const commonPaths = [
56
+ '.simple/workdir/AGENT.md',
56
57
  '.simple/AGENT.md',
57
58
  '.agent/AGENT.md',
58
59
  'AGENT.md',
@@ -1,15 +1,31 @@
1
1
  /**
2
- * Provider Bridge: Unified LLM interface via OpenAI SDK
3
- * Supports OpenAI, DeepSeek, Groq, and other OpenAI-compatible endpoints.
2
+ * Provider Bridge: Unified LLM interface via Vercel AI SDK
3
+ * Support for OpenAI, Anthropic, Google (Gemini), and custom endpoints.
4
4
  */
5
+ import { type TypeLLMResponse } from '@stan-chen/typellm';
5
6
  export interface Message {
6
- role: string;
7
+ role: 'user' | 'assistant' | 'system';
7
8
  content: string;
8
9
  }
9
10
  export interface Provider {
10
11
  name: string;
11
12
  model: string;
12
- generateResponse: (systemPrompt: string, messages: Message[]) => Promise<string>;
13
+ generateResponse: (systemPrompt: string, messages: Message[]) => Promise<TypeLLMResponse>;
13
14
  }
14
- export declare const createProviderForModel: (model: string) => Provider;
15
+ /**
16
+ * Structured output strategy:
17
+ * - Uses strong system prompts with explicit JSON format examples
18
+ * - Response parsing with jsonrepair (in cli.ts)
19
+ * - Format reminders in user messages (in context.ts)
20
+ *
21
+ * This approach works across ALL providers without hitting
22
+ * provider-specific schema limitations (e.g., OpenAI's additionalProperties requirement)
23
+ */
24
+ /**
25
+ * Creates a provider instance using TypeLLM
26
+ */
27
+ export declare const createProviderForModel: (modelId: string) => Provider;
28
+ /**
29
+ * Creates the default provider
30
+ */
15
31
  export declare const createProvider: () => Provider;
@@ -1,82 +1,93 @@
1
1
  /**
2
- * Provider Bridge: Unified LLM interface via OpenAI SDK
3
- * Supports OpenAI, DeepSeek, Groq, and other OpenAI-compatible endpoints.
2
+ * Provider Bridge: Unified LLM interface via Vercel AI SDK
3
+ * Support for OpenAI, Anthropic, Google (Gemini), and custom endpoints.
4
4
  */
5
- import OpenAI from 'openai';
6
- const getProviderConfig = () => {
7
- // 1. OpenAI (Default)
8
- if (process.env.OPENAI_API_KEY) {
9
- return {
10
- apiKey: process.env.OPENAI_API_KEY,
11
- model: process.env.OPENAI_MODEL || 'gpt-4o'
12
- };
5
+ import { createTypeLLM } from '@stan-chen/typellm';
6
+ /**
7
+ * Structured output strategy:
8
+ * - Uses strong system prompts with explicit JSON format examples
9
+ * - Response parsing with jsonrepair (in cli.ts)
10
+ * - Format reminders in user messages (in context.ts)
11
+ *
12
+ * This approach works across ALL providers without hitting
13
+ * provider-specific schema limitations (e.g., OpenAI's additionalProperties requirement)
14
+ */
15
+ /**
16
+ * Creates a provider instance using TypeLLM
17
+ */
18
+ export const createProviderForModel = (modelId) => {
19
+ let providerType = 'openai';
20
+ let actualModel = modelId;
21
+ let baseURL;
22
+ // Handle provider selection
23
+ if (modelId.startsWith('anthropic:')) {
24
+ actualModel = modelId.split(':')[1] || modelId;
25
+ providerType = 'anthropic';
13
26
  }
14
- // 2. DeepSeek
15
- if (process.env.DEEPSEEK_API_KEY) {
16
- return {
17
- apiKey: process.env.DEEPSEEK_API_KEY,
18
- baseURL: 'https://api.deepseek.com/v1',
19
- model: process.env.DEEPSEEK_MODEL || 'deepseek-chat'
20
- };
27
+ else if (modelId.startsWith('google:') || modelId.startsWith('gemini:')) {
28
+ actualModel = modelId.split(':')[1] || modelId;
29
+ providerType = 'google';
21
30
  }
22
- // 3. Groq
23
- if (process.env.GROQ_API_KEY) {
24
- return {
25
- apiKey: process.env.GROQ_API_KEY,
26
- baseURL: 'https://api.groq.com/openai/v1',
27
- model: process.env.GROQ_MODEL || 'llama3-70b-8192'
28
- };
31
+ else if (modelId.startsWith('openai:')) {
32
+ actualModel = modelId.split(':')[1] || modelId;
33
+ providerType = 'openai';
29
34
  }
30
- // 4. Mistral
31
- if (process.env.MISTRAL_API_KEY) {
32
- return {
33
- apiKey: process.env.MISTRAL_API_KEY,
34
- baseURL: 'https://api.mistral.ai/v1',
35
- model: process.env.MISTRAL_MODEL || 'mistral-large-latest'
36
- };
35
+ else if (modelId.startsWith('claude') || (process.env.ANTHROPIC_API_KEY && !process.env.OPENAI_API_KEY)) {
36
+ providerType = 'anthropic';
37
37
  }
38
- throw new Error('No supported API key found (OPENAI_API_KEY, DEEPSEEK_API_KEY, GROQ_API_KEY, MISTRAL_API_KEY)');
39
- };
40
- export const createProviderForModel = (model) => {
41
- // Quick heuristic to determine provider for specific model overrides
42
- // logic can be improved, but this assumes the environment variables set the *default* linkage
43
- // If a specific model is requested (e.g. for MoE), we try to route it.
44
- let config = getProviderConfig();
45
- // Override config if model implies a different provider?
46
- // For the sake of "Simple-CLI", we assume the default connected provider serves the requested model
47
- // or we just use OpenAI SDK's flexibility.
48
- if (model.includes('gpt'))
49
- config = { ...config, apiKey: process.env.OPENAI_API_KEY, baseURL: undefined };
50
- else if (model.includes('deepseek'))
51
- config = { ...config, apiKey: process.env.DEEPSEEK_API_KEY, baseURL: 'https://api.deepseek.com/v1' };
52
- if (!config.apiKey)
53
- throw new Error(`Cannot route for model ${model} - missing API key`);
54
- const client = new OpenAI({
55
- apiKey: config.apiKey,
56
- baseURL: config.baseURL
38
+ else if (modelId.startsWith('gemini') || (process.env.GEMINI_API_KEY && !process.env.OPENAI_API_KEY)) {
39
+ providerType = 'google';
40
+ }
41
+ else if (process.env.LITELLM_BASE_URL) {
42
+ providerType = 'litellm';
43
+ baseURL = process.env.LITELLM_BASE_URL;
44
+ }
45
+ else {
46
+ providerType = 'openai';
47
+ }
48
+ // Final check for the Google key mapping
49
+ if (providerType === 'google' && process.env.GEMINI_API_KEY && !process.env.GOOGLE_GENERATIVE_AI_API_KEY) {
50
+ process.env.GOOGLE_GENERATIVE_AI_API_KEY = process.env.GEMINI_API_KEY;
51
+ }
52
+ const llm = createTypeLLM({
53
+ provider: providerType,
54
+ model: actualModel,
55
+ baseURL: baseURL,
56
+ apiKey: providerType === 'openai' ? process.env.OPENAI_API_KEY :
57
+ providerType === 'google' ? process.env.GEMINI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY :
58
+ providerType === 'anthropic' ? process.env.ANTHROPIC_API_KEY :
59
+ undefined,
60
+ temperature: 0
57
61
  });
58
62
  return {
59
- name: 'openai-compatible',
60
- model,
63
+ name: providerType,
64
+ model: actualModel,
61
65
  generateResponse: async (systemPrompt, messages) => {
62
66
  try {
63
- const response = await client.chat.completions.create({
64
- model: model,
65
- messages: [
66
- { role: 'system', content: systemPrompt },
67
- ...messages.map(m => ({ role: m.role, content: m.content }))
68
- ]
69
- });
70
- return response.choices[0]?.message?.content || '';
67
+ const response = await llm.generate(systemPrompt, messages);
68
+ if ((process.env.DEBUG === 'true') && response)
69
+ console.log(`[DEBUG] TypeLLM Response: ${JSON.stringify(response).substring(0, 300)}...`);
70
+ return response;
71
71
  }
72
72
  catch (e) {
73
- return `Error calling LLM: ${e instanceof Error ? e.message : e}`; // Fail gracefully
73
+ const msg = `Error calling TypeLLM: ${e instanceof Error ? e.message : e}`;
74
+ return {
75
+ thought: 'Error occurred during generation',
76
+ tool: 'none',
77
+ args: {},
78
+ message: msg,
79
+ raw: msg
80
+ };
74
81
  }
75
82
  }
76
83
  };
77
84
  };
85
+ /**
86
+ * Creates the default provider
87
+ */
78
88
  export const createProvider = () => {
79
- const config = getProviderConfig();
80
- console.log(`🤖 Using model: ${config.model}`);
81
- return createProviderForModel(config.model);
89
+ const isClaw = process.argv.includes('--claw') || process.argv.includes('-claw');
90
+ const model = (isClaw ? process.env.CLAW_MODEL : null) || process.env.OPENAI_MODEL || process.env.GEMINI_MODEL || 'gpt-4o-mini';
91
+ console.log(`🤖 Using TypeLLM with model: ${model}`);
92
+ return createProviderForModel(model);
82
93
  };
@@ -4,8 +4,9 @@
4
4
  */
5
5
  import { type Provider, type Message } from './index.js';
6
6
  import type { Tier, TierConfig } from '../router.js';
7
+ import type { TypeLLMResponse } from '@stan-chen/typellm';
7
8
  export interface MultiProvider {
8
9
  getProvider: (tier: Tier) => Provider;
9
- generateWithTier: (tier: Tier, systemPrompt: string, messages: Message[]) => Promise<string>;
10
+ generateWithTier: (tier: Tier, systemPrompt: string, messages: Message[]) => Promise<TypeLLMResponse>;
10
11
  }
11
12
  export declare const createMultiProvider: (tierConfigs: Map<Tier, TierConfig>) => MultiProvider;
@@ -15,6 +15,11 @@ export interface Tool {
15
15
  specification?: string;
16
16
  }
17
17
  export type { Tool as ToolModule };
18
+ /**
19
+ * Parses a tool definition from a Markdown file (.md) or string
20
+ * Supports YAML frontmatter as per OpenClaw PRD.
21
+ */
22
+ export declare function getMeta(content: string, filename: string): any;
18
23
  export declare const loadTools: () => Promise<Map<string, Tool>>;
19
24
  export declare const loadAllTools: () => Promise<Map<string, Tool>>;
20
25
  export declare const getToolDefinitions: (tools: Map<string, Tool>) => string;