@stan-chen/simple-cli 0.2.5 → 0.2.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/engine.js CHANGED
@@ -3,7 +3,7 @@ import { existsSync } from 'fs';
3
3
  import { join, relative } from 'path';
4
4
  import { pathToFileURL } from 'url';
5
5
  import pc from 'picocolors';
6
- import { text, isCancel } from '@clack/prompts';
6
+ import { text, isCancel, log, spinner } from '@clack/prompts';
7
7
  import { LearningManager } from './learnings.js';
8
8
  async function getRepoMap(cwd) {
9
9
  const files = [];
@@ -36,7 +36,14 @@ export class Context {
36
36
  }
37
37
  async buildPrompt(tools) {
38
38
  const repoMap = await getRepoMap(this.cwd);
39
- const toolDefs = Array.from(tools.values()).map(t => `- ${t.name}: ${t.description}`).join('\n');
39
+ const toolDefs = Array.from(tools.values()).map(t => {
40
+ const schema = t.inputSchema;
41
+ if (schema && schema.shape) {
42
+ const args = Object.keys(schema.shape).join(', ');
43
+ return `- ${t.name}(${args}): ${t.description}`;
44
+ }
45
+ return `- ${t.name}: ${t.description}`;
46
+ }).join('\n');
40
47
  return `${this.skill.systemPrompt}\n\n## Tools\n${toolDefs}\n\n## Repository\n${repoMap}\n\n## Active Files\n${Array.from(this.activeFiles).map(f => relative(this.cwd, f)).join(', ')}`;
41
48
  }
42
49
  }
@@ -76,7 +83,7 @@ export class Engine {
76
83
  await this.registry.loadProjectTools(ctx.cwd);
77
84
  while (true) {
78
85
  if (!input) {
79
- if (!options.interactive)
86
+ if (!options.interactive || !process.stdout.isTTY)
80
87
  break;
81
88
  const res = await text({ message: pc.cyan('Chat') });
82
89
  if (isCancel(res))
@@ -94,43 +101,81 @@ export class Engine {
94
101
  prompt += `\n\n## Past Learnings\n${learnings.map(l => `- ${l}`).join('\n')}`;
95
102
  }
96
103
  const response = await this.llm.generate(prompt, ctx.history);
97
- const { thought, tool, args, message } = response;
104
+ const { thought, tool, args, message, tools } = response;
98
105
  if (thought)
99
- console.log(pc.dim(`💭 ${thought}`));
100
- if (tool && tool !== 'none') {
101
- const t = this.registry.tools.get(tool);
102
- if (t) {
103
- console.log(pc.yellow(`⚙ Executing ${tool}...`));
104
- try {
105
- const result = await t.execute(args);
106
- // Reload tools if create_tool was used
107
- if (tool === 'create_tool') {
108
- await this.registry.loadProjectTools(ctx.cwd);
109
- console.log(pc.magenta('🔄 Tools reloaded.'));
106
+ log.info(pc.dim(thought));
107
+ // Determine execution list
108
+ const executionList = tools && tools.length > 0
109
+ ? tools
110
+ : (tool && tool !== 'none' ? [{ tool, args }] : []);
111
+ if (executionList.length > 0) {
112
+ let allExecuted = true;
113
+ for (const item of executionList) {
114
+ const tName = item.tool;
115
+ const tArgs = item.args;
116
+ const t = this.registry.tools.get(tName);
117
+ if (t) {
118
+ const s = spinner();
119
+ s.start(`Executing ${tName}...`);
120
+ let toolExecuted = false;
121
+ try {
122
+ const result = await t.execute(tArgs);
123
+ s.stop(`Executed ${tName}`);
124
+ toolExecuted = true;
125
+ // Reload tools if create_tool was used
126
+ if (tName === 'create_tool') {
127
+ await this.registry.loadProjectTools(ctx.cwd);
128
+ log.success('Tools reloaded.');
129
+ }
130
+ // Add individual tool execution to history to keep context updated
131
+ // We mock a single tool response for history consistency
132
+ ctx.history.push({
133
+ role: 'assistant',
134
+ content: JSON.stringify({ thought: '', tool: tName, args: tArgs })
135
+ });
136
+ ctx.history.push({ role: 'user', content: `Result: ${JSON.stringify(result)}` });
137
+ // --- Supervisor Loop (QA & Reflection) ---
138
+ log.step(`[Supervisor] Verifying work from ${tName}...`);
139
+ let qaPrompt = `Analyze the result of the tool execution: ${JSON.stringify(result)}. Did it satisfy the user's request: "${input || userHistory.pop()?.content}"? If specific files were mentioned (like flask app), check if they exist or look correct based on the tool output.`;
140
+ if (tName === 'delegate_cli') {
141
+ qaPrompt += " Since this was delegated to an external CLI, be extra critical. Does the output explicitly confirm file creation?";
142
+ }
143
+ const qaCheck = await this.llm.generate(qaPrompt, [...ctx.history, { role: 'user', content: qaPrompt }]);
144
+ log.step(`[Supervisor] ${qaCheck.message || qaCheck.thought}`);
145
+ if (qaCheck.message && qaCheck.message.toLowerCase().includes('fail')) {
146
+ log.error(`[Supervisor] QA FAILED for ${tName}. Asking for retry...`);
147
+ input = "The previous attempt failed. Please retry or fix the issue.";
148
+ allExecuted = false;
149
+ break; // Stop batch execution on failure
150
+ }
151
+ else {
152
+ log.success('[Supervisor] QA PASSED. Work verified.');
153
+ // Optional: Learnings can be aggregated or skipped for batch to save tokens/time
154
+ }
110
155
  }
111
- ctx.history.push({ role: 'assistant', content: JSON.stringify(response) });
112
- ctx.history.push({ role: 'user', content: `Result: ${JSON.stringify(result)}` });
113
- // Reflection: Learning loop
114
- const reflectPrompt = "Analyze the previous tool execution. What went well? What failed? Summarize as a concise learning point for future reference.";
115
- const reflection = await this.llm.generate(reflectPrompt, [...ctx.history, { role: 'user', content: reflectPrompt }]);
116
- if (reflection.message) {
117
- // Find the relevant task description
118
- const userHistory = ctx.history.filter(m => m.role === 'user' && !['Continue.', 'Fix the error.'].includes(m.content));
119
- const task = (input && !['Continue.', 'Fix the error.'].includes(input)) ? input : (userHistory[userHistory.length - 1]?.content || 'Task');
120
- await this.learningManager.add(task, reflection.message);
121
- console.log(pc.blue(`📝 Learning stored: ${reflection.message}`));
156
+ catch (e) {
157
+ if (!toolExecuted)
158
+ s.stop(`Error executing ${tName}`);
159
+ else
160
+ log.error(`Error during verification: ${e.message}`);
161
+ ctx.history.push({ role: 'user', content: `Error executing ${tName}: ${e.message}` });
162
+ input = 'Fix the error.';
163
+ allExecuted = false;
164
+ break;
122
165
  }
123
- input = 'The previous tool execution was successful. Proceed with the next step.';
124
- continue;
125
- }
126
- catch (e) {
127
- ctx.history.push({ role: 'user', content: `Error: ${e.message}` });
128
- input = 'Fix the error.';
129
- continue;
130
166
  }
131
167
  }
168
+ if (allExecuted) {
169
+ input = 'The tool executions were verified. Proceed.';
170
+ }
171
+ continue;
172
+ }
173
+ if (message || response.raw) {
174
+ console.log();
175
+ console.log(pc.blue('Agent:'));
176
+ console.log(message || response.raw);
177
+ console.log();
132
178
  }
133
- console.log(`\n${pc.green('🤖')} ${message || response.raw}\n`);
134
179
  ctx.history.push({ role: 'assistant', content: message || response.raw });
135
180
  input = undefined;
136
181
  }
package/dist/llm.d.ts CHANGED
@@ -4,15 +4,21 @@ export interface LLMResponse {
4
4
  args: any;
5
5
  message?: string;
6
6
  raw: string;
7
+ tools?: {
8
+ tool: string;
9
+ args: any;
10
+ }[];
7
11
  }
12
+ export type LLMConfig = {
13
+ provider: string;
14
+ model: string;
15
+ apiKey?: string;
16
+ };
8
17
  export declare class LLM {
9
- private config;
10
- constructor(config: {
11
- provider: string;
12
- model: string;
13
- apiKey?: string;
14
- });
18
+ private configs;
19
+ constructor(config: LLMConfig | LLMConfig[]);
15
20
  generate(system: string, history: any[]): Promise<LLMResponse>;
21
+ private getEnvKey;
16
22
  private parse;
17
23
  }
18
24
  export declare const createLLM: (model?: string) => LLM;
package/dist/llm.js CHANGED
@@ -1,73 +1,179 @@
1
- import { spawn } from 'child_process';
2
- import { dirname, join } from 'path';
3
- import { fileURLToPath } from 'url';
1
+ import { generateText } from 'ai';
2
+ import { createOpenAI } from '@ai-sdk/openai';
3
+ import { createAnthropic } from '@ai-sdk/anthropic';
4
+ import { createGoogleGenerativeAI } from '@ai-sdk/google';
4
5
  import { jsonrepair } from 'jsonrepair';
5
- import fs from 'fs';
6
- const __dirname = dirname(fileURLToPath(import.meta.url));
7
6
  export class LLM {
8
- config;
7
+ configs;
9
8
  constructor(config) {
10
- this.config = config;
9
+ this.configs = Array.isArray(config) ? config : [config];
11
10
  }
12
11
  async generate(system, history) {
13
- const payload = {
14
- ...this.config,
15
- messages: [{ role: 'system', content: system }, ...history],
16
- api_key: this.config.apiKey || process.env.OPENAI_API_KEY || process.env.GEMINI_API_KEY || process.env.ANTHROPIC_API_KEY
17
- };
18
- return new Promise((resolve, reject) => {
19
- // Find python bridge
20
- let py = join(__dirname, 'anyllm.py');
21
- if (!fs.existsSync(py))
22
- py = join(process.cwd(), 'src/lib/anyllm.py'); // Fallback
23
- // Try to find a working python command
24
- const getPyCmd = () => {
25
- const isWin = process.platform === 'win32';
26
- // On Windows, preferred is 'python' or 'py' if they aren't store aliases
27
- // On Linux/Mac, preferred is 'python3'
28
- return isWin ? 'python' : 'python3';
29
- };
30
- const child = spawn(getPyCmd(), [py]);
31
- let out = '';
32
- let err = '';
33
- child.stdout.on('data', d => out += d);
34
- child.stderr.on('data', d => err += d);
35
- child.on('close', code => {
36
- if (code !== 0)
37
- return reject(new Error(err));
38
- try {
39
- const res = JSON.parse(out);
40
- if (res.error)
41
- return reject(new Error(res.error));
42
- resolve(this.parse(res.content));
12
+ let lastError = null;
13
+ const lastUserMessage = history.filter(m => m.role === 'user').pop()?.content || '';
14
+ for (const config of this.configs) {
15
+ try {
16
+ const providerName = config.provider.toLowerCase();
17
+ // --- Meta-Orchestrator Delegation (Ralph Mode) ---
18
+ if (['codex', 'gemini', 'claude'].includes(providerName)) {
19
+ console.log(`\n[Ralph] I found an expert for this! specialized agent: ${providerName} CLI...`);
20
+ // We return a TOOL call. This ensures the Engine sees an action was taken,
21
+ // triggers the execution (which we will define in builtins), and then
22
+ // triggers the QA/Reflection loop.
23
+ return {
24
+ thought: `Task is complex. Delegating to specialized agent: ${providerName}.`,
25
+ tool: 'delegate_cli',
26
+ args: {
27
+ cli: providerName,
28
+ task: lastUserMessage
29
+ },
30
+ message: '', // No message yet, the tool output will provide it
31
+ raw: ''
32
+ };
33
+ }
34
+ // --- Fallback: Internal API Logic ---
35
+ const modelName = config.model;
36
+ let model;
37
+ const apiKey = config.apiKey || this.getEnvKey(providerName);
38
+ if (providerName === 'openai') {
39
+ model = createOpenAI({ apiKey })(modelName);
40
+ }
41
+ else if (providerName === 'anthropic') {
42
+ model = createAnthropic({ apiKey });
43
+ model = model(modelName);
44
+ }
45
+ else if (providerName === 'google' || providerName === 'gemini') { // This handles the API fallback if CLI isn't meant
46
+ model = createGoogleGenerativeAI({ apiKey });
47
+ model = model(modelName);
43
48
  }
44
- catch (e) {
45
- reject(e);
49
+ else {
50
+ continue; // Skip unsupported
46
51
  }
47
- });
48
- child.stdin.write(JSON.stringify(payload));
49
- child.stdin.end();
50
- });
52
+ const { text } = await generateText({
53
+ model,
54
+ system,
55
+ messages: history,
56
+ });
57
+ return this.parse(text);
58
+ }
59
+ catch (e) {
60
+ lastError = e;
61
+ console.warn(`[LLM] Provider ${config.provider}:${config.model} failed, trying next...`);
62
+ }
63
+ }
64
+ throw new Error(`All LLM providers failed. Last error: ${lastError?.message}`);
65
+ }
66
+ getEnvKey(providerName) {
67
+ if (providerName === 'openai')
68
+ return process.env.OPENAI_API_KEY;
69
+ if (providerName === 'anthropic')
70
+ return process.env.ANTHROPIC_API_KEY;
71
+ if (providerName === 'google' || providerName === 'gemini')
72
+ return process.env.GOOGLE_API_KEY || process.env.GEMINI_API_KEY;
73
+ return undefined;
51
74
  }
52
75
  parse(raw) {
53
- try {
54
- const repaired = jsonrepair(raw.trim().match(/\{[\s\S]*\}/)?.[0] || raw);
55
- const p = JSON.parse(repaired);
56
- return {
57
- thought: p.thought || '',
58
- tool: (p.tool || p.command || 'none').toLowerCase(),
59
- args: p.args || p.parameters || {},
60
- message: p.message || '',
61
- raw
62
- };
76
+ const tools = [];
77
+ let thought = '';
78
+ let message = '';
79
+ const rawTrimmed = raw.trim();
80
+ // 1. Attempt to extract multiple JSON objects
81
+ let braceBalance = 0;
82
+ let startIndex = -1;
83
+ let inString = false;
84
+ let escape = false;
85
+ for (let i = 0; i < rawTrimmed.length; i++) {
86
+ const char = rawTrimmed[i];
87
+ if (inString) {
88
+ if (escape) {
89
+ escape = false;
90
+ }
91
+ else if (char === '\\') {
92
+ escape = true;
93
+ }
94
+ else if (char === '"') {
95
+ inString = false;
96
+ }
97
+ continue;
98
+ }
99
+ if (char === '"') {
100
+ inString = true;
101
+ }
102
+ else if (char === '{') {
103
+ if (braceBalance === 0) {
104
+ startIndex = i;
105
+ }
106
+ braceBalance++;
107
+ }
108
+ else if (char === '}') {
109
+ braceBalance--;
110
+ if (braceBalance === 0 && startIndex !== -1) {
111
+ const jsonStr = rawTrimmed.substring(startIndex, i + 1);
112
+ try {
113
+ const repaired = jsonrepair(jsonStr);
114
+ const obj = JSON.parse(repaired);
115
+ // Extract tool call
116
+ if (obj.tool && obj.tool !== 'none') {
117
+ tools.push({ tool: obj.tool.toLowerCase(), args: obj.args || obj.parameters || {} });
118
+ }
119
+ // Aggregate thought and message
120
+ if (obj.thought)
121
+ thought += (thought ? '\n' : '') + obj.thought;
122
+ if (obj.message)
123
+ message += (message ? '\n' : '') + obj.message;
124
+ }
125
+ catch (e) {
126
+ // Ignore malformed blocks inside mixed content
127
+ }
128
+ startIndex = -1;
129
+ }
130
+ }
63
131
  }
64
- catch {
65
- return { thought: '', tool: 'none', args: {}, message: raw, raw };
132
+ // 2. Fallback: If no tools found via loop, try single block extraction (legacy behavior)
133
+ if (tools.length === 0) {
134
+ try {
135
+ const jsonPart = rawTrimmed.match(/\{[\s\S]*\}/)?.[0] || rawTrimmed;
136
+ const repaired = jsonrepair(jsonPart);
137
+ const p = JSON.parse(repaired);
138
+ return {
139
+ thought: p.thought || '',
140
+ tool: (p.tool || p.command || 'none').toLowerCase(),
141
+ args: p.args || p.parameters || {},
142
+ message: p.message || '',
143
+ raw
144
+ };
145
+ }
146
+ catch {
147
+ return { thought: '', tool: 'none', args: {}, message: raw, raw };
148
+ }
66
149
  }
150
+ return {
151
+ thought: thought.trim(),
152
+ tool: tools[0]?.tool || 'none',
153
+ args: tools[0]?.args || {},
154
+ message: message.trim(),
155
+ raw,
156
+ tools
157
+ };
67
158
  }
68
159
  }
69
160
  export const createLLM = (model) => {
161
+ // Primary model
70
162
  const m = model || process.env.MODEL || 'openai:gpt-5.2-codex';
71
163
  const [p, n] = m.includes(':') ? m.split(':') : ['openai', m];
72
- return new LLM({ provider: p, model: n });
164
+ // Define Failover Chain
165
+ const configs = [{ provider: p, model: n }];
166
+ // Add fallbacks if they aren't the primary
167
+ const fallbacks = [
168
+ { provider: 'anthropic', model: 'claude-3-7-sonnet-latest' },
169
+ { provider: 'google', model: 'gemini-2.0-flash-001' },
170
+ { provider: 'openai', model: 'gpt-4o' }
171
+ ];
172
+ for (const f of fallbacks) {
173
+ // Prevent duplicate provider/model combinations
174
+ if (!(f.provider === p && f.model === n)) {
175
+ configs.push(f);
176
+ }
177
+ }
178
+ return new LLM(configs);
73
179
  };
package/dist/mcp.js CHANGED
@@ -2,6 +2,7 @@ import { Client } from '@modelcontextprotocol/sdk/client/index.js';
2
2
  import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
3
3
  import { existsSync, readFileSync } from 'fs';
4
4
  import { join } from 'path';
5
+ import { log } from '@clack/prompts';
5
6
  export class MCP {
6
7
  clients = new Map();
7
8
  async init() {
@@ -20,10 +21,10 @@ export class MCP {
20
21
  });
21
22
  await client.connect(transport);
22
23
  this.clients.set(name, client);
23
- console.log(`✓ Connected to MCP: ${name}`);
24
+ log.success(`Connected to MCP: ${name}`);
24
25
  }
25
26
  catch (e) {
26
- console.error(`✗ Failed to connect to MCP ${name}:`, e);
27
+ log.error(`Failed to connect to MCP ${name}: ${e}`);
27
28
  }
28
29
  }
29
30
  }
@@ -0,0 +1,23 @@
1
+ export interface ScheduledTask {
2
+ id: string;
3
+ cron: string;
4
+ prompt: string;
5
+ description: string;
6
+ lastRun: number;
7
+ enabled: boolean;
8
+ failureCount: number;
9
+ }
10
+ export declare class Scheduler {
11
+ private static instance;
12
+ private tasks;
13
+ private filePath;
14
+ private constructor();
15
+ static getInstance(cwd?: string): Scheduler;
16
+ load(): Promise<void>;
17
+ save(): Promise<void>;
18
+ scheduleTask(cron: string, prompt: string, description: string): Promise<string>;
19
+ getDueTasks(): Promise<ScheduledTask[]>;
20
+ private shouldRun;
21
+ private matchCron;
22
+ markTaskRun(id: string, success: boolean): Promise<void>;
23
+ }
@@ -0,0 +1,145 @@
1
+ import { readFile, writeFile, mkdir } from 'fs/promises';
2
+ import { join, dirname } from 'path';
3
+ import { existsSync } from 'fs';
4
+ export class Scheduler {
5
+ static instance;
6
+ tasks = [];
7
+ filePath;
8
+ constructor(cwd) {
9
+ this.filePath = join(cwd, '.agent', 'scheduler.json');
10
+ }
11
+ static getInstance(cwd = process.cwd()) {
12
+ if (!Scheduler.instance) {
13
+ Scheduler.instance = new Scheduler(cwd);
14
+ }
15
+ return Scheduler.instance;
16
+ }
17
+ async load() {
18
+ if (existsSync(this.filePath)) {
19
+ try {
20
+ const content = await readFile(this.filePath, 'utf-8');
21
+ this.tasks = JSON.parse(content);
22
+ }
23
+ catch (e) {
24
+ console.error('Failed to load scheduler config:', e);
25
+ this.tasks = [];
26
+ }
27
+ }
28
+ else {
29
+ this.tasks = [];
30
+ }
31
+ }
32
+ async save() {
33
+ try {
34
+ const dir = dirname(this.filePath);
35
+ if (!existsSync(dir)) {
36
+ await mkdir(dir, { recursive: true });
37
+ }
38
+ await writeFile(this.filePath, JSON.stringify(this.tasks, null, 2));
39
+ }
40
+ catch (e) {
41
+ console.error('Failed to save scheduler config:', e);
42
+ }
43
+ }
44
+ async scheduleTask(cron, prompt, description) {
45
+ await this.load();
46
+ const id = Math.random().toString(36).substring(2, 9);
47
+ this.tasks.push({
48
+ id,
49
+ cron,
50
+ prompt,
51
+ description,
52
+ lastRun: Date.now(), // Set to now to avoid immediate catch-up
53
+ enabled: true,
54
+ failureCount: 0
55
+ });
56
+ await this.save();
57
+ return id;
58
+ }
59
+ async getDueTasks() {
60
+ await this.load();
61
+ const now = new Date();
62
+ const due = [];
63
+ for (const task of this.tasks) {
64
+ if (!task.enabled)
65
+ continue;
66
+ // Check catch-up (max 24h back)
67
+ if (this.shouldRun(task, now)) {
68
+ due.push(task);
69
+ }
70
+ }
71
+ return due;
72
+ }
73
+ shouldRun(task, now) {
74
+ // Start checking from lastRun + 1 minute
75
+ let cursor = new Date(task.lastRun + 60000);
76
+ // Safety: don't check more than 24 hours back
77
+ const cutoff = new Date(now.getTime() - 24 * 60 * 60 * 1000);
78
+ if (cursor < cutoff)
79
+ cursor = cutoff;
80
+ // Align cursor to seconds 0
81
+ cursor.setSeconds(0, 0);
82
+ // Iterate minute by minute up to now
83
+ while (cursor <= now) {
84
+ if (this.matchCron(task.cron, cursor)) {
85
+ return true;
86
+ }
87
+ cursor = new Date(cursor.getTime() + 60000);
88
+ }
89
+ return false;
90
+ }
91
+ matchCron(cron, date) {
92
+ const parts = cron.split(/\s+/);
93
+ if (parts.length < 5)
94
+ return false;
95
+ const [min, hour, day, month, dayWeek] = parts;
96
+ const check = (val, current) => {
97
+ if (val === '*')
98
+ return true;
99
+ // Handle lists: 1,2,3
100
+ if (val.includes(',')) {
101
+ return val.split(',').some(v => check(v, current));
102
+ }
103
+ // Handle steps: */5 or 1-10/2
104
+ if (val.includes('/')) {
105
+ const [base, step] = val.split('/');
106
+ const stepNum = parseInt(step);
107
+ if (isNaN(stepNum))
108
+ return false;
109
+ if (base === '*')
110
+ return current % stepNum === 0;
111
+ // Treat range with step later if needed, simple */n is most common
112
+ return check(base, current) && (current % stepNum === 0);
113
+ }
114
+ // Handle ranges: 1-5
115
+ if (val.includes('-')) {
116
+ const [start, end] = val.split('-').map(Number);
117
+ return current >= start && current <= end;
118
+ }
119
+ return parseInt(val) === current;
120
+ };
121
+ return check(min, date.getMinutes()) &&
122
+ check(hour, date.getHours()) &&
123
+ check(day, date.getDate()) &&
124
+ check(month, date.getMonth() + 1) &&
125
+ check(dayWeek, date.getDay());
126
+ }
127
+ async markTaskRun(id, success) {
128
+ await this.load();
129
+ const task = this.tasks.find(t => t.id === id);
130
+ if (task) {
131
+ task.lastRun = Date.now();
132
+ if (!success) {
133
+ task.failureCount++;
134
+ if (task.failureCount >= 3) {
135
+ task.enabled = false;
136
+ console.warn(`[Scheduler] Task '${task.description}' (${task.id}) has failed 3 times in a row. Auto-disabling (forgetting) it.`);
137
+ }
138
+ }
139
+ else {
140
+ task.failureCount = 0;
141
+ }
142
+ await this.save();
143
+ }
144
+ }
145
+ }
@@ -0,0 +1,14 @@
1
+ import { Server } from './server.js';
2
+ export declare class RemoteWorker {
3
+ private server;
4
+ constructor(server: Server);
5
+ run(prompt: string): Promise<{
6
+ status: string;
7
+ message: string;
8
+ tool: string;
9
+ } | {
10
+ status: string;
11
+ message: any;
12
+ tool?: undefined;
13
+ }>;
14
+ }
@@ -0,0 +1,9 @@
1
+ export class RemoteWorker {
2
+ server;
3
+ constructor(server) {
4
+ this.server = server;
5
+ }
6
+ async run(prompt) {
7
+ return this.server.handle(prompt);
8
+ }
9
+ }
@@ -0,0 +1,17 @@
1
+ import { Engine } from '../engine.js';
2
+ export declare class Server {
3
+ private engine;
4
+ constructor(engine?: Engine);
5
+ handle(input: string | {
6
+ tool_name: string;
7
+ args: any;
8
+ }): Promise<{
9
+ status: string;
10
+ message: string;
11
+ tool: string;
12
+ } | {
13
+ status: string;
14
+ message: any;
15
+ tool?: undefined;
16
+ }>;
17
+ }