ai-control-center 1.15.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +584 -0
- package/bin/aicc.js +772 -0
- package/lib/actions/approve.js +71 -0
- package/lib/actions/assign-project.js +132 -0
- package/lib/actions/browser-test.js +64 -0
- package/lib/actions/cleanup.js +174 -0
- package/lib/actions/debug.js +298 -0
- package/lib/actions/deploy.js +1229 -0
- package/lib/actions/fix-bug.js +134 -0
- package/lib/actions/new-feature.js +255 -0
- package/lib/actions/reject.js +307 -0
- package/lib/actions/review.js +706 -0
- package/lib/actions/status.js +47 -0
- package/lib/agents/browser-qa-agent.js +611 -0
- package/lib/agents/payment-agent.js +116 -0
- package/lib/agents/suggestion-agent.js +88 -0
- package/lib/cli.js +303 -0
- package/lib/config.js +243 -0
- package/lib/hub/hub-server.js +440 -0
- package/lib/hub/project-poller.js +75 -0
- package/lib/hub/skill-registry.js +89 -0
- package/lib/hub/state-aggregator.js +204 -0
- package/lib/index.js +471 -0
- package/lib/init/doctor.js +523 -0
- package/lib/init/presets.js +222 -0
- package/lib/init/skill-fetcher.js +77 -0
- package/lib/init/wizard.js +973 -0
- package/lib/integrations/codex-runner.js +128 -0
- package/lib/integrations/github-actions.js +248 -0
- package/lib/integrations/github-reporter.js +229 -0
- package/lib/integrations/screenshot-store.js +102 -0
- package/lib/openclaw/bridge.js +650 -0
- package/lib/openclaw/generate-skill.js +235 -0
- package/lib/openclaw/openclaw.json +64 -0
- package/lib/orchestrator/autonomous-loop.js +429 -0
- package/lib/orchestrator/thread-triggers.js +63 -0
- package/lib/roleplay/agent-messenger.js +75 -0
- package/lib/roleplay/discussion-threads.js +303 -0
- package/lib/roleplay/health-monitor.js +121 -0
- package/lib/roleplay/pm-agent.js +513 -0
- package/lib/roleplay/roleplay-config.js +25 -0
- package/lib/roleplay/room.js +164 -0
- package/lib/shared/action-runner.js +2330 -0
- package/lib/shared/event-bus.js +185 -0
- package/lib/slack/bot.js +378 -0
- package/lib/telegram/bot.js +416 -0
- package/lib/telegram/commands.js +1267 -0
- package/lib/telegram/keyboards.js +113 -0
- package/lib/telegram/notifications.js +247 -0
- package/lib/twitch/bot.js +354 -0
- package/lib/twitch/commands.js +302 -0
- package/lib/twitch/notifications.js +63 -0
- package/lib/utils/achievements.js +191 -0
- package/lib/utils/activity-log.js +182 -0
- package/lib/utils/agent-leaderboard.js +119 -0
- package/lib/utils/audit-logger.js +232 -0
- package/lib/utils/codebase-context.js +288 -0
- package/lib/utils/codebase-indexer.js +381 -0
- package/lib/utils/config-schema.js +230 -0
- package/lib/utils/context-compressor.js +172 -0
- package/lib/utils/correlation.js +63 -0
- package/lib/utils/cost-tracker.js +423 -0
- package/lib/utils/cron-scheduler.js +53 -0
- package/lib/utils/db-adapter.js +293 -0
- package/lib/utils/display.js +272 -0
- package/lib/utils/errors.js +116 -0
- package/lib/utils/format.js +134 -0
- package/lib/utils/intent-engine.js +464 -0
- package/lib/utils/mcp-client.js +238 -0
- package/lib/utils/model-ab-test.js +164 -0
- package/lib/utils/notify.js +122 -0
- package/lib/utils/persona-loader.js +80 -0
- package/lib/utils/pipeline-lock.js +73 -0
- package/lib/utils/pipeline.js +214 -0
- package/lib/utils/plugin-runner.js +234 -0
- package/lib/utils/rate-limiter.js +84 -0
- package/lib/utils/rbac.js +74 -0
- package/lib/utils/runner.js +1809 -0
- package/lib/utils/security.js +191 -0
- package/lib/utils/self-healer.js +144 -0
- package/lib/utils/skill-loader.js +255 -0
- package/lib/utils/spinner.js +132 -0
- package/lib/utils/stage-queue.js +50 -0
- package/lib/utils/state-machine.js +89 -0
- package/lib/utils/status-bar.js +327 -0
- package/lib/utils/token-estimator.js +101 -0
- package/lib/utils/ux-analyzer.js +101 -0
- package/lib/utils/webhook-emitter.js +83 -0
- package/lib/web/public/css/styles.css +417 -0
- package/lib/web/public/dark-mode.js +44 -0
- package/lib/web/public/hub/kanban.html +206 -0
- package/lib/web/public/index.html +45 -0
- package/lib/web/public/js/app.js +71 -0
- package/lib/web/public/js/ask.js +110 -0
- package/lib/web/public/js/dashboard.js +165 -0
- package/lib/web/public/js/deploy.js +72 -0
- package/lib/web/public/js/feature.js +79 -0
- package/lib/web/public/js/health.js +65 -0
- package/lib/web/public/js/logs.js +93 -0
- package/lib/web/public/js/review.js +123 -0
- package/lib/web/public/js/ws-client.js +82 -0
- package/lib/web/public/office/css/office.css +678 -0
- package/lib/web/public/office/index.html +148 -0
- package/lib/web/public/office/js/achievements-ui.js +117 -0
- package/lib/web/public/office/js/character.js +1056 -0
- package/lib/web/public/office/js/chat-bubbles.js +177 -0
- package/lib/web/public/office/js/cost-overlay.js +123 -0
- package/lib/web/public/office/js/day-night.js +68 -0
- package/lib/web/public/office/js/effects.js +632 -0
- package/lib/web/public/office/js/engine.js +146 -0
- package/lib/web/public/office/js/feature-ticket.js +216 -0
- package/lib/web/public/office/js/hub-client.js +60 -0
- package/lib/web/public/office/js/main.js +1757 -0
- package/lib/web/public/office/js/office-layout.js +1524 -0
- package/lib/web/public/office/js/pathfinding.js +144 -0
- package/lib/web/public/office/js/pixel-sprites.js +1454 -0
- package/lib/web/public/office/js/progress-bars.js +117 -0
- package/lib/web/public/office/js/replay.js +191 -0
- package/lib/web/public/office/js/sound-effects.js +91 -0
- package/lib/web/public/office/js/sprite-renderer.js +211 -0
- package/lib/web/public/office/js/stamina-system.js +89 -0
- package/lib/web/public/office/js/ui.js +107 -0
- package/lib/web/public/onboarding/index.html +243 -0
- package/lib/web/public/timeline/index.html +195 -0
- package/lib/web/routes/api.js +499 -0
- package/lib/web/routes/logs.js +20 -0
- package/lib/web/routes/metrics.js +99 -0
- package/lib/web/server.js +183 -0
- package/lib/web/ws/handler.js +65 -0
- package/package.json +67 -0
- package/templates/agent-architect.md +69 -0
- package/templates/agent-gemini-pm.md +49 -0
- package/templates/agent-gemini-reviewer.md +52 -0
- package/templates/copilot-instructions.md +36 -0
- package/templates/pipelines/mobile.json +27 -0
- package/templates/pipelines/nodejs-api.json +27 -0
- package/templates/pipelines/python.json +27 -0
- package/templates/pipelines/react.json +27 -0
- package/templates/pipelines/salesforce.json +27 -0
- package/templates/role-gemini.md +97 -0
- package/templates/skill-architect.md +114 -0
- package/templates/skill-browser-qa.md +50 -0
- package/templates/skill-bug-from-qa.md +58 -0
- package/templates/skill-chatbot.md +93 -0
- package/templates/skill-implement.md +78 -0
- package/templates/skill-openclaw.md +174 -0
- package/templates/skill-payment.md +110 -0
- package/templates/skill-pm-spec.md +77 -0
- package/templates/skill-requirement-capture.md +97 -0
- package/templates/skill-review.md +108 -0
- package/templates/skill-reviewer-qa.md +44 -0
- package/templates/skill-suggestion.md +45 -0
- package/templates/skill-template.md +142 -0
|
@@ -0,0 +1,1809 @@
|
|
|
1
|
+
import { spawn } from 'child_process';
|
|
2
|
+
import { createHash } from 'crypto';
|
|
3
|
+
import { existsSync, mkdirSync, readFileSync, unlinkSync, writeFileSync } from 'fs';
|
|
4
|
+
import { homedir, tmpdir } from 'os';
|
|
5
|
+
import { resolve } from 'path';
|
|
6
|
+
import { env, getPipelineStage, getOllamaConfig } from '../config.js';
|
|
7
|
+
import { bus } from '../shared/event-bus.js';
|
|
8
|
+
import { logActivityLine, logRawBlock } from './activity-log.js';
|
|
9
|
+
import { trackUsage } from './cost-tracker.js';
|
|
10
|
+
import { AITimeoutError, CircuitOpenError, ErrorCodes, ResponseValidationError } from './errors.js';
|
|
11
|
+
import { atomicWriteSync, getRootDir, getWorkflowDir } from './pipeline.js';
|
|
12
|
+
import { compressForStage } from './context-compressor.js';
|
|
13
|
+
import { statusBar } from './status-bar.js';
|
|
14
|
+
|
|
15
|
+
// ─── Default Timeouts Per Provider ────────────────────────────────────────────
|
|
16
|
+
|
|
17
|
+
const DEFAULT_TIMEOUTS = {
|
|
18
|
+
gemini: 180_000, // 180s
|
|
19
|
+
claude: 300_000, // 300s
|
|
20
|
+
copilot: 600_000, // 600s
|
|
21
|
+
ollama: 120_000, // 120s
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
// ─── Token Limits Per Stage ───────────────────────────────────────────────────
|
|
25
|
+
|
|
26
|
+
const TOKEN_LIMITS = {
|
|
27
|
+
pm: 4000,
|
|
28
|
+
explore: 2000,
|
|
29
|
+
arch: 6000,
|
|
30
|
+
impl: 8000,
|
|
31
|
+
review: 4000,
|
|
32
|
+
chat: 1000,
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
function getTokenLimit(stage) {
|
|
36
|
+
try {
|
|
37
|
+
const config = _modelsConfig();
|
|
38
|
+
if (config?.tokenLimits?.[stage]) return config.tokenLimits[stage];
|
|
39
|
+
} catch { /* */ }
|
|
40
|
+
return TOKEN_LIMITS[stage] || 8000;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// ─── Subprocess Timeout Wrapper ───────────────────────────────────────────────
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Run a command with an independent timeout watchdog.
|
|
47
|
+
* If timeout fires: SIGTERM → wait 5s → SIGKILL.
|
|
48
|
+
* Throws AITimeoutError on timeout.
|
|
49
|
+
*
|
|
50
|
+
* @param {string} cmd - Command to run
|
|
51
|
+
* @param {string[]} args - Command arguments
|
|
52
|
+
* @param {object} opts - spawn options (cwd, env, shell, etc.)
|
|
53
|
+
* @param {number} timeoutMs - Timeout in milliseconds
|
|
54
|
+
* @param {string} agent - Agent label for logging (e.g. 'GEMINI')
|
|
55
|
+
* @returns {Promise<{stdout: string, stderr: string, code: number}>}
|
|
56
|
+
*/
|
|
57
|
+
export function runWithTimeout(cmd, args, opts, timeoutMs, agent = 'UNKNOWN') {
|
|
58
|
+
return new Promise((resolvePromise, reject) => {
|
|
59
|
+
const proc = spawn(cmd, args, {
|
|
60
|
+
cwd: opts.cwd || getRootDir(),
|
|
61
|
+
shell: opts.shell !== false,
|
|
62
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
63
|
+
env: opts.env || process.env,
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
let stdout = '';
|
|
67
|
+
let stderr = '';
|
|
68
|
+
let settled = false;
|
|
69
|
+
let lastOutputTime = Date.now();
|
|
70
|
+
|
|
71
|
+
if (proc.stdout) proc.stdout.on('data', d => { stdout += d.toString(); lastOutputTime = Date.now(); });
|
|
72
|
+
if (proc.stderr) proc.stderr.on('data', d => { stderr += d.toString(); lastOutputTime = Date.now(); });
|
|
73
|
+
|
|
74
|
+
// Write stdin if provided
|
|
75
|
+
if (opts.stdin && proc.stdin) {
|
|
76
|
+
proc.stdin.write(opts.stdin);
|
|
77
|
+
proc.stdin.end();
|
|
78
|
+
} else if (proc.stdin) {
|
|
79
|
+
proc.stdin.end();
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
const settle = (fn, val) => {
|
|
83
|
+
if (!settled) {
|
|
84
|
+
settled = true;
|
|
85
|
+
clearTimeout(timer);
|
|
86
|
+
if (outputWatchdog) clearInterval(outputWatchdog);
|
|
87
|
+
fn(val);
|
|
88
|
+
}
|
|
89
|
+
};
|
|
90
|
+
|
|
91
|
+
// Main timeout watchdog
|
|
92
|
+
const timer = setTimeout(() => {
|
|
93
|
+
if (settled) return;
|
|
94
|
+
logActivityLine(agent, `TIMEOUT after ${Math.round(timeoutMs / 1000)}s — killing process`);
|
|
95
|
+
try { proc.kill('SIGTERM'); } catch { /* */ }
|
|
96
|
+
// Escalate to SIGKILL after 5s
|
|
97
|
+
setTimeout(() => {
|
|
98
|
+
try { proc.kill('SIGKILL'); } catch { /* already dead */ }
|
|
99
|
+
}, 5000);
|
|
100
|
+
settle(reject, new AITimeoutError(agent, timeoutMs, cmd));
|
|
101
|
+
}, timeoutMs);
|
|
102
|
+
|
|
103
|
+
// Gemini-specific output watchdog: kill if no output for 60s
|
|
104
|
+
const outputTimeoutMs = (agent === 'GEMINI')
|
|
105
|
+
? (() => { try { return _modelsConfig()?.gemini?.outputTimeout || 60000; } catch { return 60000; } })()
|
|
106
|
+
: 0;
|
|
107
|
+
const outputWatchdog = outputTimeoutMs > 0
|
|
108
|
+
? setInterval(() => {
|
|
109
|
+
if (settled) return;
|
|
110
|
+
if (Date.now() - lastOutputTime > outputTimeoutMs) {
|
|
111
|
+
logActivityLine(agent, `No output for ${Math.round(outputTimeoutMs / 1000)}s — process appears hung, killing`);
|
|
112
|
+
try { proc.kill('SIGTERM'); } catch { /* */ }
|
|
113
|
+
setTimeout(() => { try { proc.kill('SIGKILL'); } catch { /* */ } }, 5000);
|
|
114
|
+
settle(reject, new AITimeoutError(agent, timeoutMs, `${cmd} (output watchdog)`));
|
|
115
|
+
}
|
|
116
|
+
}, 5000)
|
|
117
|
+
: null;
|
|
118
|
+
|
|
119
|
+
proc.on('close', code => {
|
|
120
|
+
settle(resolvePromise, { stdout: stdout.trim(), stderr: stderr.trim(), code: code ?? 0 });
|
|
121
|
+
});
|
|
122
|
+
proc.on('error', err => settle(reject, err));
|
|
123
|
+
});
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// ─── Circuit Breaker Per Provider ─────────────────────────────────────────────
|
|
127
|
+
|
|
128
|
+
const CIRCUIT_STATES = { CLOSED: 'CLOSED', OPEN: 'OPEN', HALF_OPEN: 'HALF_OPEN' };
|
|
129
|
+
const CIRCUIT_FAILURE_THRESHOLD = 3;
|
|
130
|
+
const CIRCUIT_WINDOW_MS = 10 * 60 * 1000; // 10 minutes
|
|
131
|
+
const CIRCUIT_OPEN_DURATION_MS = 5 * 60 * 1000; // 5 minutes
|
|
132
|
+
const CIRCUIT_REOPEN_DURATION_MS = 10 * 60 * 1000; // 10 minutes on second open
|
|
133
|
+
|
|
134
|
+
class CircuitBreaker {
|
|
135
|
+
constructor() {
|
|
136
|
+
this._breakers = new Map();
|
|
137
|
+
this._loaded = false;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
_filePath() {
|
|
141
|
+
try { return resolve(getWorkflowDir(), 'circuit-breakers.json'); }
|
|
142
|
+
catch { return null; }
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
_load() {
|
|
146
|
+
if (this._loaded) return;
|
|
147
|
+
this._loaded = true;
|
|
148
|
+
const fp = this._filePath();
|
|
149
|
+
if (!fp || !existsSync(fp)) return;
|
|
150
|
+
try {
|
|
151
|
+
const data = JSON.parse(readFileSync(fp, 'utf8'));
|
|
152
|
+
for (const [provider, state] of Object.entries(data)) {
|
|
153
|
+
this._breakers.set(provider, state);
|
|
154
|
+
}
|
|
155
|
+
} catch { /* non-fatal */ }
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
_persist() {
|
|
159
|
+
const fp = this._filePath();
|
|
160
|
+
if (!fp) return;
|
|
161
|
+
try {
|
|
162
|
+
const data = {};
|
|
163
|
+
for (const [provider, state] of this._breakers) data[provider] = state;
|
|
164
|
+
atomicWriteSync(fp, JSON.stringify(data, null, 2));
|
|
165
|
+
} catch { /* non-fatal */ }
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
_getState(provider) {
|
|
169
|
+
this._load();
|
|
170
|
+
if (!this._breakers.has(provider)) {
|
|
171
|
+
this._breakers.set(provider, {
|
|
172
|
+
state: CIRCUIT_STATES.CLOSED,
|
|
173
|
+
failures: [],
|
|
174
|
+
openedAt: null,
|
|
175
|
+
openDurationMs: CIRCUIT_OPEN_DURATION_MS,
|
|
176
|
+
});
|
|
177
|
+
}
|
|
178
|
+
return this._breakers.get(provider);
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
canExecute(provider) {
|
|
182
|
+
const cb = this._getState(provider);
|
|
183
|
+
if (cb.state === CIRCUIT_STATES.CLOSED) return true;
|
|
184
|
+
if (cb.state === CIRCUIT_STATES.OPEN) {
|
|
185
|
+
const elapsed = Date.now() - cb.openedAt;
|
|
186
|
+
if (elapsed >= cb.openDurationMs) {
|
|
187
|
+
cb.state = CIRCUIT_STATES.HALF_OPEN;
|
|
188
|
+
logActivityLine('SYSTEM', `Circuit breaker HALF_OPEN for ${provider} — testing with one request`);
|
|
189
|
+
this._persist();
|
|
190
|
+
return true;
|
|
191
|
+
}
|
|
192
|
+
return false;
|
|
193
|
+
}
|
|
194
|
+
// HALF_OPEN — allow one request
|
|
195
|
+
return true;
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
recordSuccess(provider) {
|
|
199
|
+
const cb = this._getState(provider);
|
|
200
|
+
cb.state = CIRCUIT_STATES.CLOSED;
|
|
201
|
+
cb.failures = [];
|
|
202
|
+
cb.openedAt = null;
|
|
203
|
+
cb.openDurationMs = CIRCUIT_OPEN_DURATION_MS;
|
|
204
|
+
this._persist();
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
recordFailure(provider) {
|
|
208
|
+
const cb = this._getState(provider);
|
|
209
|
+
const now = Date.now();
|
|
210
|
+
|
|
211
|
+
if (cb.state === CIRCUIT_STATES.HALF_OPEN) {
|
|
212
|
+
cb.state = CIRCUIT_STATES.OPEN;
|
|
213
|
+
cb.openedAt = now;
|
|
214
|
+
cb.openDurationMs = CIRCUIT_REOPEN_DURATION_MS;
|
|
215
|
+
logActivityLine('SYSTEM', `Circuit breaker re-OPEN for ${provider} (half-open test failed) — ${CIRCUIT_REOPEN_DURATION_MS / 60000}min cooldown`);
|
|
216
|
+
this._persist();
|
|
217
|
+
return;
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
// Prune old failures outside window
|
|
221
|
+
cb.failures = cb.failures.filter(t => now - t < CIRCUIT_WINDOW_MS);
|
|
222
|
+
cb.failures.push(now);
|
|
223
|
+
|
|
224
|
+
if (cb.failures.length >= CIRCUIT_FAILURE_THRESHOLD) {
|
|
225
|
+
cb.state = CIRCUIT_STATES.OPEN;
|
|
226
|
+
cb.openedAt = now;
|
|
227
|
+
cb.openDurationMs = CIRCUIT_OPEN_DURATION_MS;
|
|
228
|
+
logActivityLine('SYSTEM', `Circuit breaker OPEN for ${provider} — ${cb.failures.length} failures in ${CIRCUIT_WINDOW_MS / 60000}min — skipping for ${CIRCUIT_OPEN_DURATION_MS / 60000}min`);
|
|
229
|
+
this._persist();
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
getState(provider) {
|
|
234
|
+
const cb = this._getState(provider);
|
|
235
|
+
// Check if open circuit has expired
|
|
236
|
+
if (cb.state === CIRCUIT_STATES.OPEN && cb.openedAt) {
|
|
237
|
+
if (Date.now() - cb.openedAt >= cb.openDurationMs) {
|
|
238
|
+
cb.state = CIRCUIT_STATES.HALF_OPEN;
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
return {
|
|
242
|
+
state: cb.state,
|
|
243
|
+
failures: cb.failures.length,
|
|
244
|
+
openedAt: cb.openedAt ? new Date(cb.openedAt).toISOString() : null,
|
|
245
|
+
};
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
export const circuitBreaker = new CircuitBreaker();
|
|
250
|
+
|
|
251
|
+
// ─── Response Validation ──────────────────────────────────────────────────────
|
|
252
|
+
|
|
253
|
+
/**
|
|
254
|
+
* Validate an AI response for completeness and quality.
|
|
255
|
+
* Returns { valid: boolean, reason: string }
|
|
256
|
+
*/
|
|
257
|
+
export function validateResponse(stage, response) {
|
|
258
|
+
if (!response || response.trim().length < 100) {
|
|
259
|
+
return { valid: false, reason: 'Response too short (< 100 chars) — likely empty or error-only' };
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
if (stage === 'pm' || stage === 'spec') {
|
|
263
|
+
const hasUserStories = /user stor(y|ies)/i.test(response);
|
|
264
|
+
const hasAcceptance = /acceptance criteria/i.test(response);
|
|
265
|
+
if (!hasUserStories && !hasAcceptance) {
|
|
266
|
+
return { valid: false, reason: 'Missing expected PM sections (User Stories, Acceptance Criteria)' };
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
if (stage === 'review') {
|
|
271
|
+
const hasVerdict = /\b(PASS|FAIL|NEEDS.?CHANGES|APPROVED|REJECTED)\b/i.test(response);
|
|
272
|
+
if (!hasVerdict) {
|
|
273
|
+
return { valid: false, reason: 'Missing verdict section (PASS/FAIL/NEEDS_CHANGES)' };
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
return { valid: true, reason: 'ok' };
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
// ─── AI Usage Limit Detection ─────────────────────────────────────────────────
|
|
281
|
+
//
|
|
282
|
+
// Two distinct error classes require different responses:
|
|
283
|
+
//
|
|
284
|
+
// RATELIMIT — temporary (seconds/minutes). Fix: wait ~65 s, retry SAME model.
|
|
285
|
+
// Examples: HTTP 429, gRPC RESOURCE_EXHAUSTED, overloaded.
|
|
286
|
+
//
|
|
287
|
+
// CAPACITY — daily quota exhausted (hours). Fix: downgrade model immediately.
|
|
288
|
+
// Examples: "You have exhausted your capacity... reset after 20h53m"
|
|
289
|
+
//
|
|
290
|
+
// The system always starts with the best model in the chain and automatically
|
|
291
|
+
// falls back to the next model when capacity is exhausted.
|
|
292
|
+
|
|
293
|
+
// Patterns for TEMPORARY rate limits — wait + retry same model
|
|
294
|
+
const RATELIMIT_PATTERNS = [
|
|
295
|
+
/(?<![\w:/])429(?!:\d)/, // HTTP 429 — NOT :429:18 line numbers in stack traces
|
|
296
|
+
/too many requests/i,
|
|
297
|
+
/rate.{0,5}limit.{0,10}exceeded/i,
|
|
298
|
+
/rate_limit_error/i, // Claude API error name
|
|
299
|
+
/overloaded_error/i, // Claude API error name
|
|
300
|
+
/model.{0,15}overloaded/i,
|
|
301
|
+
];
|
|
302
|
+
|
|
303
|
+
// Patterns for CAPACITY EXHAUSTION — skip wait, downgrade model immediately
|
|
304
|
+
const CAPACITY_PATTERNS = [
|
|
305
|
+
/no capacity available for model/i, // Gemini: "No capacity available for model X on the server"
|
|
306
|
+
/MODEL_CAPACITY_EXHAUSTED/i, // Gemini gRPC reason code
|
|
307
|
+
/RESOURCE_EXHAUSTED/i, // Gemini/gRPC: model capacity or quota exhausted
|
|
308
|
+
/retryablequotaerror/i, // Gemini CLI error class after internal retries exhausted
|
|
309
|
+
/max attempts reached/i, // Gemini CLI: gave up internally — no point retrying ourselves
|
|
310
|
+
/exhausted your capacity/i,
|
|
311
|
+
/quota will reset after \d+h/i, // "reset after 20h53m" — multi-hour reset
|
|
312
|
+
/quota[_ ]exceeded/i,
|
|
313
|
+
/quota has been exceeded/i,
|
|
314
|
+
/you.{0,10}(ve|have) exceeded/i, // "You've exceeded your quota"
|
|
315
|
+
/usage limit reached/i,
|
|
316
|
+
/usage limit exceeded/i,
|
|
317
|
+
/daily (usage |quota )?limit/i,
|
|
318
|
+
/model.{0,15}capacity/i, // "model capacity exhausted"
|
|
319
|
+
/you have no quota/i, // Copilot: "402 You have no quota"
|
|
320
|
+
/402 .{0,20}no quota/i, // Copilot: HTTP 402 no quota
|
|
321
|
+
];
|
|
322
|
+
|
|
323
|
+
/**
|
|
324
|
+
* Scan output for limit errors. Returns { type, line } or { type: null }.
|
|
325
|
+
*
|
|
326
|
+
* type = 'capacity' → daily quota hit, downgrade model immediately
|
|
327
|
+
* type = 'ratelimit' → temporary rate limit, wait and retry same model
|
|
328
|
+
* type = null → no limit error found
|
|
329
|
+
*/
|
|
330
|
+
function detectLimit(output) {
|
|
331
|
+
const lines = output.split('\n');
|
|
332
|
+
for (const line of lines) {
|
|
333
|
+
// Check capacity FIRST — it's the more specific / severe case
|
|
334
|
+
if (CAPACITY_PATTERNS.some(p => p.test(line))) return { type: 'capacity', line: line.trim() };
|
|
335
|
+
if (RATELIMIT_PATTERNS.some(p => p.test(line))) return { type: 'ratelimit', line: line.trim() };
|
|
336
|
+
}
|
|
337
|
+
return { type: null, line: '' };
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
// ─── Model Availability Cache ────────────────────────────────────────────────
|
|
341
|
+
//
|
|
342
|
+
// In-memory cache of models that are known to be unavailable.
|
|
343
|
+
// Populated when a run fails with a detected limit error.
|
|
344
|
+
// Pre-flight check: isModelAvailable() is called BEFORE running any AI.
|
|
345
|
+
//
|
|
346
|
+
// TTLs:
|
|
347
|
+
// capacity → 4 hours (daily quota reset takes hours; skip completely)
|
|
348
|
+
// ratelimit → 90 seconds (short burst; by next attempt, usually clear)
|
|
349
|
+
//
|
|
350
|
+
// Persistence: bans are written to .ai-workflow/model-bans.json so a bot restart
|
|
351
|
+
// doesn't immediately retry an exhausted model (e.g. Gemini 4h quota ban).
|
|
352
|
+
//
|
|
353
|
+
const _modelBans = new Map(); // model → { type, bannedAt, ttlMs }
|
|
354
|
+
const CAPACITY_BAN_TTL = 4 * 60 * 60 * 1000; // 4 hours
|
|
355
|
+
const RATELIMIT_BAN_TTL = 90 * 1000; // 90 seconds
|
|
356
|
+
|
|
357
|
+
// Lazy-load flag — bans are read from disk on the first call to banModel/isModelAvailable
|
|
358
|
+
let _bansLoaded = false;
|
|
359
|
+
|
|
360
|
+
function _getBansFilePath() {
|
|
361
|
+
try { return resolve(getWorkflowDir(), 'model-bans.json'); }
|
|
362
|
+
catch { return null; }
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
function _loadBansFromDisk() {
|
|
366
|
+
if (_bansLoaded) return;
|
|
367
|
+
_bansLoaded = true;
|
|
368
|
+
const filePath = _getBansFilePath();
|
|
369
|
+
if (!filePath || !existsSync(filePath)) return;
|
|
370
|
+
try {
|
|
371
|
+
const saved = JSON.parse(readFileSync(filePath, 'utf8'));
|
|
372
|
+
const now = Date.now();
|
|
373
|
+
let n = 0;
|
|
374
|
+
for (const [model, ban] of Object.entries(saved)) {
|
|
375
|
+
if (now - ban.bannedAt < ban.ttlMs) { // skip expired
|
|
376
|
+
_modelBans.set(model, ban);
|
|
377
|
+
n++;
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
if (n > 0) logActivityLine('RUNNER', `Loaded ${n} persisted model ban(s) from disk`);
|
|
381
|
+
} catch { /* non-fatal — corrupt file */ }
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
function _persistBansToDisk() {
|
|
385
|
+
const filePath = _getBansFilePath();
|
|
386
|
+
if (!filePath) return;
|
|
387
|
+
try {
|
|
388
|
+
const plain = {};
|
|
389
|
+
for (const [model, ban] of _modelBans) plain[model] = ban;
|
|
390
|
+
atomicWriteSync(filePath, JSON.stringify(plain, null, 2));
|
|
391
|
+
} catch { /* non-fatal */ }
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
/**
|
|
395
|
+
* Mark a model as temporarily unavailable after a detected limit error.
|
|
396
|
+
* type = 'capacity' | 'ratelimit'
|
|
397
|
+
*/
|
|
398
|
+
export function banModel(model, type) {
|
|
399
|
+
_loadBansFromDisk(); // merge any persisted bans from previous session before writing
|
|
400
|
+
const ttlMs = type === 'capacity' ? CAPACITY_BAN_TTL : RATELIMIT_BAN_TTL;
|
|
401
|
+
_modelBans.set(model, { type, bannedAt: Date.now(), ttlMs });
|
|
402
|
+
const ttlLabel = type === 'capacity' ? '4h' : '90s';
|
|
403
|
+
logActivityLine('RUNNER', `Model ${model} marked unavailable (${type}) for ${ttlLabel}`);
|
|
404
|
+
_persistBansToDisk(); // write to disk so the ban survives a bot restart
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
/**
|
|
408
|
+
* Check if a model is currently available (not banned).
|
|
409
|
+
* Returns { available: true } or { available: false, reason, type }
|
|
410
|
+
*/
|
|
411
|
+
export function isModelAvailable(model) {
|
|
412
|
+
_loadBansFromDisk(); // ensure persisted bans from previous session are respected
|
|
413
|
+
const ban = _modelBans.get(model);
|
|
414
|
+
if (!ban) return { available: true };
|
|
415
|
+
if (Date.now() - ban.bannedAt >= ban.ttlMs) {
|
|
416
|
+
_modelBans.delete(model);
|
|
417
|
+
return { available: true };
|
|
418
|
+
}
|
|
419
|
+
const remaining = Math.ceil((ban.ttlMs - (Date.now() - ban.bannedAt)) / 1000);
|
|
420
|
+
const label = remaining >= 60 ? `${Math.ceil(remaining / 60)}m` : `${remaining}s`;
|
|
421
|
+
return {
|
|
422
|
+
available: false,
|
|
423
|
+
type: ban.type,
|
|
424
|
+
reason: `${model} is ${ban.type === 'capacity' ? 'quota-exhausted' : 'rate-limited'} (clears in ~${label})`,
|
|
425
|
+
};
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
/**
|
|
429
|
+
* Get a summary of all currently banned models (useful for /status or diagnostics).
|
|
430
|
+
*/
|
|
431
|
+
export function getBannedModels() {
|
|
432
|
+
const result = [];
|
|
433
|
+
for (const [model, ban] of _modelBans) {
|
|
434
|
+
const remaining = Math.ceil((ban.ttlMs - (Date.now() - ban.bannedAt)) / 1000);
|
|
435
|
+
if (remaining > 0) {
|
|
436
|
+
const label = remaining >= 60 ? `${Math.ceil(remaining / 60)}m` : `${remaining}s`;
|
|
437
|
+
result.push({ model, type: ban.type, clearsIn: label });
|
|
438
|
+
} else {
|
|
439
|
+
_modelBans.delete(model);
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
return result;
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
export class AILimitError extends Error {
|
|
446
|
+
constructor(agent, rawMessage) {
|
|
447
|
+
// Show the line that actually matched a pattern — not just the first line.
|
|
448
|
+
// Gemini CLI prefixes output with startup messages ("Loaded cached credentials.")
|
|
449
|
+
// that are unrelated to the actual error.
|
|
450
|
+
const { line } = detectLimit(rawMessage);
|
|
451
|
+
const display = line
|
|
452
|
+
|| rawMessage.split('\n').map(l => l.trim()).filter(Boolean)[0]
|
|
453
|
+
|| rawMessage;
|
|
454
|
+
super(display.slice(0, 200));
|
|
455
|
+
this.name = 'AILimitError';
|
|
456
|
+
this.agent = agent; // 'GEMINI' | 'CLAUDE' | 'COPILOT'
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
// ─── AI Model Configuration ────────────────────────────────────────────────────
|
|
461
|
+
//
|
|
462
|
+
// ─── Model Configuration ──────────────────────────────────────────────────────
|
|
463
|
+
// Model chains are read from aicc.config.js → models section at runtime.
|
|
464
|
+
// These defaults are used as fallbacks when config is not yet loaded
|
|
465
|
+
// (e.g. during `aicc init` before a config file exists).
|
|
466
|
+
//
|
|
467
|
+
// ─── Pipeline-Driven Model Resolution ────────────────────────────────────────
|
|
468
|
+
// All model selection is driven by the `pipeline[]` array in config.
|
|
469
|
+
// Each stage has { provider, model, fallbacks }.
|
|
470
|
+
// Env var overrides: GEMINI_MODEL, CLAUDE_MODEL, COPILOT_MODEL, OLLAMA_MODEL
|
|
471
|
+
//
|
|
472
|
+
// The internal model chain for each provider is built from all pipeline entries
|
|
473
|
+
// that use that provider, so retry/fallback within a provider still works.
|
|
474
|
+
|
|
475
|
+
const _DEFAULT_MODEL_CHAINS = {
|
|
476
|
+
gemini: ['gemini-2.5-pro', 'gemini-2.5-flash'],
|
|
477
|
+
claude: ['claude-sonnet-4-6', 'claude-haiku-4-5-20251001'],
|
|
478
|
+
copilot: ['claude-sonnet-4.6', 'claude-haiku-4.5'],
|
|
479
|
+
};
|
|
480
|
+
|
|
481
|
+
/** Read the pipeline array from config. */
|
|
482
|
+
function _pipelineConfig() {
|
|
483
|
+
try { return getConfig().pipeline; } catch { return null; }
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
/** Backward compat: read old models config if pipeline not available. */
|
|
487
|
+
function _modelsConfig() {
|
|
488
|
+
try { return getConfig().models; } catch { return null; }
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
/**
|
|
492
|
+
* Get the pipeline entry for a stage — the single source of truth.
|
|
493
|
+
* Returns { stage, provider, model, fallbacks }.
|
|
494
|
+
*/
|
|
495
|
+
function _getPipelineEntry(stage) {
|
|
496
|
+
try { return getPipelineStage(stage); }
|
|
497
|
+
catch { return { stage, provider: 'copilot', model: 'claude-sonnet-4.6', fallbacks: ['gemini:gemini-2.5-pro', 'ollama'] }; }
|
|
498
|
+
}
|
|
499
|
+
|
|
500
|
+
/**
|
|
501
|
+
* Build model chains per provider from pipeline entries.
|
|
502
|
+
* Collects all unique models used by each provider across all stages.
|
|
503
|
+
*/
|
|
504
|
+
function getConfiguredChains() {
|
|
505
|
+
const pipeline = _pipelineConfig();
|
|
506
|
+
if (pipeline) {
|
|
507
|
+
const chains = { gemini: [], claude: [], copilot: [] };
|
|
508
|
+
for (const entry of pipeline) {
|
|
509
|
+
if (chains[entry.provider] && !chains[entry.provider].includes(entry.model)) {
|
|
510
|
+
chains[entry.provider].push(entry.model);
|
|
511
|
+
}
|
|
512
|
+
// Also collect models from fallback entries
|
|
513
|
+
for (const fb of (entry.fallbacks || [])) {
|
|
514
|
+
const [prov, mod] = fb.includes(':') ? fb.split(':', 2) : [fb, null];
|
|
515
|
+
if (mod && chains[prov] && !chains[prov].includes(mod)) {
|
|
516
|
+
chains[prov].push(mod);
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
// Ensure at least one model per provider
|
|
521
|
+
return {
|
|
522
|
+
gemini: chains.gemini.length ? chains.gemini : _DEFAULT_MODEL_CHAINS.gemini,
|
|
523
|
+
claude: chains.claude.length ? chains.claude : _DEFAULT_MODEL_CHAINS.claude,
|
|
524
|
+
copilot: chains.copilot.length ? chains.copilot : _DEFAULT_MODEL_CHAINS.copilot,
|
|
525
|
+
};
|
|
526
|
+
}
|
|
527
|
+
// Backward compat: old models config
|
|
528
|
+
const m = _modelsConfig();
|
|
529
|
+
return {
|
|
530
|
+
gemini: m?.geminiChain || _DEFAULT_MODEL_CHAINS.gemini,
|
|
531
|
+
claude: m?.claudeChain || _DEFAULT_MODEL_CHAINS.claude,
|
|
532
|
+
copilot: m?.copilotChain || _DEFAULT_MODEL_CHAINS.copilot,
|
|
533
|
+
};
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
/** Get starting model for each provider: env var > pipeline primary > default. */
|
|
537
|
+
function getStartModels() {
|
|
538
|
+
const chains = getConfiguredChains();
|
|
539
|
+
const ollamaCfg = (() => { try { return getOllamaConfig(); } catch { return { model: 'llama3.1' }; } })();
|
|
540
|
+
return {
|
|
541
|
+
gemini: process.env.GEMINI_MODEL || chains.gemini[0],
|
|
542
|
+
claude: process.env.CLAUDE_MODEL || chains.claude[0],
|
|
543
|
+
copilot: process.env.COPILOT_MODEL || chains.copilot[0],
|
|
544
|
+
ollama: process.env.OLLAMA_MODEL || ollamaCfg.model || 'llama3.1',
|
|
545
|
+
};
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
/** Get Ollama base URL: env var > config > default. */
|
|
549
|
+
function getOllamaUrl() {
|
|
550
|
+
const ollamaCfg = (() => { try { return getOllamaConfig(); } catch { return {}; } })();
|
|
551
|
+
return process.env.OLLAMA_BASE_URL || ollamaCfg.baseUrl || 'http://localhost:11434';
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
/** Get the Copilot model for a given stage (from pipeline entry). */
|
|
555
|
+
function getCrossAiCopilotModel() {
|
|
556
|
+
// Find the first non-implement, non-chat copilot entry
|
|
557
|
+
const pipeline = _pipelineConfig();
|
|
558
|
+
if (pipeline) {
|
|
559
|
+
const entry = pipeline.find(e => e.provider === 'copilot' && e.stage !== 'chat' && e.stage !== 'implement');
|
|
560
|
+
if (entry) return entry.model;
|
|
561
|
+
}
|
|
562
|
+
return _modelsConfig()?.copilotCrossAiModel || 'claude-sonnet-4.6';
|
|
563
|
+
}
|
|
564
|
+
const AI_MODE = env('AI_MODE') || 'hybrid'; // hybrid | cloud | local
|
|
565
|
+
|
|
566
|
+
// ─── OpenClaw Session Cache ───────────────────────────────────────────────────
|
|
567
|
+
// Reuse a single session ID per process lifetime (~3.7s warm vs ~6.5s cold).
|
|
568
|
+
// Also cache the isAvailable result so we don't spawn `which openclaw` per message.
|
|
569
|
+
let _openclawSessionId = null;
|
|
570
|
+
let _openclawAvailable = null; // null = unchecked, true/false = cached
|
|
571
|
+
|
|
572
|
+
function getOpenclawSessionId() {
|
|
573
|
+
if (!_openclawSessionId) _openclawSessionId = `aicc-chat-${Date.now()}`;
|
|
574
|
+
return _openclawSessionId;
|
|
575
|
+
}
|
|
576
|
+
|
|
577
|
+
// ─── Per-provider cooldown ──────────────────────────────────────────────────
|
|
578
|
+
// Prevents rapid-fire calls to the same provider (e.g. review→fix→review in seconds).
|
|
579
|
+
// Minimum gap between consecutive calls to the same provider.
|
|
580
|
+
const PROVIDER_COOLDOWN_MS = 30_000; // 30 seconds between calls to same provider
|
|
581
|
+
const _lastCallTime = new Map(); // provider → timestamp
|
|
582
|
+
|
|
583
|
+
async function enforceProviderCooldown(provider) {
|
|
584
|
+
const last = _lastCallTime.get(provider);
|
|
585
|
+
if (last) {
|
|
586
|
+
const elapsed = Date.now() - last;
|
|
587
|
+
if (elapsed < PROVIDER_COOLDOWN_MS) {
|
|
588
|
+
const waitMs = PROVIDER_COOLDOWN_MS - elapsed;
|
|
589
|
+
logActivityLine(provider.toUpperCase(), `Cooldown: waiting ${(waitMs / 1000).toFixed(0)}s before next call...`);
|
|
590
|
+
await new Promise(r => setTimeout(r, waitMs));
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
_lastCallTime.set(provider, Date.now());
|
|
594
|
+
}
|
|
595
|
+
|
|
596
|
+
/**
|
|
597
|
+
* Return the model chain for `agent`, starting from the currently configured model.
|
|
598
|
+
* If the configured model is not in the chain (e.g. a custom env-var model),
|
|
599
|
+
* it is prepended so the chain always begins at the right point.
|
|
600
|
+
*
|
|
601
|
+
* Example: GEMINI_MODEL=gemini-2.5-pro → chain is ['gemini-2.5-pro', 'gemini-2.5-flash']
|
|
602
|
+
*/
|
|
603
|
+
function getModelChain(agent) {
|
|
604
|
+
const models = getStartModels();
|
|
605
|
+
const chains = getConfiguredChains();
|
|
606
|
+
const configured = models[agent];
|
|
607
|
+
const chain = chains[agent] || [configured];
|
|
608
|
+
const startIdx = chain.indexOf(configured);
|
|
609
|
+
return startIdx >= 0 ? chain.slice(startIdx) : [configured, ...chain];
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
/**
|
|
613
|
+
* Run a shell command streaming output to terminal.
|
|
614
|
+
* Pass options.label = 'CLAUDE'|'GEMINI'|'COPILOT' to prefix each output line.
|
|
615
|
+
*/
|
|
616
|
+
export function run(command, args = [], options = {}) {
|
|
617
|
+
return new Promise((resolvePromise, reject) => {
|
|
618
|
+
const useLabel = !!options.label;
|
|
619
|
+
const proc = spawn(command, args, {
|
|
620
|
+
cwd: options.cwd || getRootDir(),
|
|
621
|
+
shell: options.shell !== false,
|
|
622
|
+
stdio: useLabel ? ['inherit', 'pipe', 'pipe'] : 'inherit',
|
|
623
|
+
env: { ...process.env, ...options.env },
|
|
624
|
+
});
|
|
625
|
+
|
|
626
|
+
let settled = false;
|
|
627
|
+
const settle = (fn, val) => { if (!settled) { settled = true; fn(val); } };
|
|
628
|
+
|
|
629
|
+
// Timeout support — kills the process if it exceeds the limit
|
|
630
|
+
if (options.timeout) {
|
|
631
|
+
setTimeout(() => {
|
|
632
|
+
if (!settled) {
|
|
633
|
+
try { proc.kill('SIGTERM'); } catch { /* */ }
|
|
634
|
+
settle(reject, new Error(`Command timed out after ${options.timeout}ms`));
|
|
635
|
+
}
|
|
636
|
+
}, options.timeout);
|
|
637
|
+
}
|
|
638
|
+
|
|
639
|
+
if (useLabel) {
|
|
640
|
+
let buffer = '';
|
|
641
|
+
const flush = (chunk) => {
|
|
642
|
+
buffer += chunk;
|
|
643
|
+
const lines = buffer.split('\n');
|
|
644
|
+
buffer = lines.pop();
|
|
645
|
+
lines.forEach(line => logActivityLine(options.label, line));
|
|
646
|
+
};
|
|
647
|
+
if (proc.stdout) proc.stdout.on('data', d => flush(d.toString()));
|
|
648
|
+
if (proc.stderr) proc.stderr.on('data', d => flush(d.toString()));
|
|
649
|
+
proc.on('close', () => { if (buffer.trim()) logActivityLine(options.label, buffer); });
|
|
650
|
+
}
|
|
651
|
+
|
|
652
|
+
proc.on('close', code => {
|
|
653
|
+
if (code === 0 || options.ignoreExitCode) settle(resolvePromise, code);
|
|
654
|
+
else settle(reject, new Error(`Command failed with exit code ${code}`));
|
|
655
|
+
});
|
|
656
|
+
proc.on('error', e => settle(reject, e));
|
|
657
|
+
});
|
|
658
|
+
}
|
|
659
|
+
|
|
660
|
+
/**
|
|
661
|
+
* Run a shell command and capture output silently.
|
|
662
|
+
*/
|
|
663
|
+
export function capture(command, args = [], options = {}) {
|
|
664
|
+
return new Promise((resolvePromise) => {
|
|
665
|
+
const proc = spawn(command, args, {
|
|
666
|
+
cwd: options.cwd || getRootDir(),
|
|
667
|
+
shell: options.shell !== false,
|
|
668
|
+
env: { ...process.env, ...options.env },
|
|
669
|
+
});
|
|
670
|
+
|
|
671
|
+
let settled = false;
|
|
672
|
+
let stdout = '';
|
|
673
|
+
let stderr = '';
|
|
674
|
+
if (proc.stdout) proc.stdout.on('data', d => { stdout += d.toString(); });
|
|
675
|
+
if (proc.stderr) proc.stderr.on('data', d => { stderr += d.toString(); });
|
|
676
|
+
|
|
677
|
+
// Timeout support — kills the process and resolves with code -1
|
|
678
|
+
let timer;
|
|
679
|
+
if (options.timeout) {
|
|
680
|
+
timer = setTimeout(() => {
|
|
681
|
+
if (!settled) {
|
|
682
|
+
settled = true;
|
|
683
|
+
try { proc.kill('SIGTERM'); } catch { /* */ }
|
|
684
|
+
resolvePromise({ stdout: stdout.trim(), stderr: 'Timed out', code: -1 });
|
|
685
|
+
}
|
|
686
|
+
}, options.timeout);
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
proc.on('close', code => {
|
|
690
|
+
if (!settled) {
|
|
691
|
+
settled = true;
|
|
692
|
+
if (timer) clearTimeout(timer);
|
|
693
|
+
resolvePromise({ stdout: stdout.trim(), stderr: stderr.trim(), code });
|
|
694
|
+
}
|
|
695
|
+
});
|
|
696
|
+
proc.on('error', err => {
|
|
697
|
+
if (!settled) {
|
|
698
|
+
settled = true;
|
|
699
|
+
if (timer) clearTimeout(timer);
|
|
700
|
+
resolvePromise({ stdout: '', stderr: err.message, code: 1 });
|
|
701
|
+
}
|
|
702
|
+
});
|
|
703
|
+
});
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
/**
|
|
707
|
+
* Check if a CLI tool is installed.
|
|
708
|
+
* For 'ollama', checks HTTP availability instead of CLI binary.
|
|
709
|
+
*/
|
|
710
|
+
export async function isAvailable(tool) {
|
|
711
|
+
if (tool === 'ollama') {
|
|
712
|
+
const { available } = await isOllamaAvailable();
|
|
713
|
+
return available;
|
|
714
|
+
}
|
|
715
|
+
const result = await capture(`which ${tool}`);
|
|
716
|
+
return result.code === 0;
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
/**
|
|
720
|
+
* Run Claude Code in non-interactive (--print) mode.
|
|
721
|
+
*
|
|
722
|
+
* Auto-downgrade: on capacity exhaustion, tries the next model in MODEL_CHAINS.claude.
|
|
723
|
+
* On temporary 429 rate limits, throws AILimitError immediately (Claude sessions are
|
|
724
|
+
* stateful — retrying mid-session risks corrupting context).
|
|
725
|
+
*
|
|
726
|
+
* Session tracking modes — pick ONE:
|
|
727
|
+
* featureId {string} Per-feature conversation via --resume <sessionId>.
|
|
728
|
+
* continueSession {bool} Legacy: --continue (resumes whatever was last active).
|
|
729
|
+
*/
|
|
730
|
+
export async function runClaude(prompt, options = {}) {
|
|
731
|
+
// Circuit breaker check
|
|
732
|
+
if (!circuitBreaker.canExecute('claude')) {
|
|
733
|
+
logActivityLine('SYSTEM', `Circuit breaker OPEN for Claude — skipping to fallback`);
|
|
734
|
+
throw new AILimitError('CLAUDE', 'Circuit breaker OPEN for Claude');
|
|
735
|
+
}
|
|
736
|
+
|
|
737
|
+
await enforceProviderCooldown('claude');
|
|
738
|
+
const estimatedTokens = Math.ceil(prompt.length / 4);
|
|
739
|
+
const shortTask = prompt.replace(/\n/g, ' ').trim().slice(0, 55);
|
|
740
|
+
|
|
741
|
+
const sessionsDir = resolve(getWorkflowDir(), 'sessions');
|
|
742
|
+
if (!existsSync(sessionsDir)) mkdirSync(sessionsDir, { recursive: true });
|
|
743
|
+
|
|
744
|
+
// Compress context if stage info is available
|
|
745
|
+
let processedPrompt = prompt;
|
|
746
|
+
if (options.stage && options.featureId) {
|
|
747
|
+
try {
|
|
748
|
+
processedPrompt = compressForStage(options.stage, options.featureId, prompt);
|
|
749
|
+
const originalLen = prompt.length;
|
|
750
|
+
const compressedLen = processedPrompt.length;
|
|
751
|
+
if (compressedLen < originalLen) {
|
|
752
|
+
const reduction = Math.round((1 - compressedLen / originalLen) * 100);
|
|
753
|
+
logActivityLine('SYSTEM', `Context compressed: ${originalLen} → ${compressedLen} chars (${reduction}% reduction)`);
|
|
754
|
+
}
|
|
755
|
+
} catch (e) {
|
|
756
|
+
processedPrompt = prompt;
|
|
757
|
+
}
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
const tmpFile = resolve(tmpdir(), `aicc-claude-${Date.now()}.txt`);
|
|
761
|
+
writeFileSync(tmpFile, processedPrompt, 'utf8');
|
|
762
|
+
|
|
763
|
+
// failFast: try primary model once with a 60s timeout — caller falls back immediately on failure
|
|
764
|
+
const fullChain = getModelChain('claude');
|
|
765
|
+
const modelChain = options.failFast ? fullChain.slice(0, 1) : fullChain;
|
|
766
|
+
const claudeTimeout = options.failFast ? 60000 : undefined;
|
|
767
|
+
const exhaustedMsgs = [];
|
|
768
|
+
const _costStart = Date.now();
|
|
769
|
+
|
|
770
|
+
for (const model of modelChain) {
|
|
771
|
+
// ── Pre-flight: skip models known to be unavailable ────────────────────────
|
|
772
|
+
const avail = isModelAvailable(model);
|
|
773
|
+
if (!avail.available) {
|
|
774
|
+
logActivityLine('CLAUDE', `Skipping ${model} — ${avail.reason}`);
|
|
775
|
+
exhaustedMsgs.push(avail.reason);
|
|
776
|
+
continue;
|
|
777
|
+
}
|
|
778
|
+
|
|
779
|
+
statusBar.set('CLAUDE', shortTask, estimatedTokens);
|
|
780
|
+
if (model !== modelChain[0]) {
|
|
781
|
+
logActivityLine('CLAUDE', `Trying fallback model: ${model}`);
|
|
782
|
+
}
|
|
783
|
+
|
|
784
|
+
// ── Feature-ID session: --resume keeps Claude in the same conversation ────
|
|
785
|
+
if (options.featureId) {
|
|
786
|
+
const sessionFile = resolve(sessionsDir, `CLAUDE-${options.featureId}.json`);
|
|
787
|
+
const flags = ['--model', model, '--print', '--output-format', 'json', '--dangerously-skip-permissions'];
|
|
788
|
+
|
|
789
|
+
if (existsSync(sessionFile)) {
|
|
790
|
+
try {
|
|
791
|
+
const { sessionId } = JSON.parse(readFileSync(sessionFile, 'utf8'));
|
|
792
|
+
if (sessionId) flags.push('--resume', sessionId);
|
|
793
|
+
} catch { /* corrupt session file — start fresh */ }
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
try {
|
|
797
|
+
const result = await capture(
|
|
798
|
+
`cat "${tmpFile}" | claude ${flags.join(' ')}`,
|
|
799
|
+
[], { shell: true, timeout: claudeTimeout }
|
|
800
|
+
);
|
|
801
|
+
|
|
802
|
+
// Treat timeout (code -1) as a hard failure when failFast
|
|
803
|
+
if (result.code === -1 && options.failFast) {
|
|
804
|
+
logActivityLine('CLAUDE', `Claude timed out (60s) — falling back to Copilot`);
|
|
805
|
+
throw new Error('Claude timed out');
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
const combinedOutput = result.stdout + '\n' + result.stderr;
|
|
809
|
+
const { type, line } = detectLimit(combinedOutput);
|
|
810
|
+
|
|
811
|
+
if (type === 'capacity') {
|
|
812
|
+
banModel(model, 'capacity');
|
|
813
|
+
exhaustedMsgs.push(`${model}: ${line}`);
|
|
814
|
+
logActivityLine('CLAUDE', `Model ${model} quota exhausted — trying next model`);
|
|
815
|
+
continue; // try next model
|
|
816
|
+
}
|
|
817
|
+
if (type === 'ratelimit') {
|
|
818
|
+
banModel(model, 'ratelimit');
|
|
819
|
+
throw new AILimitError('CLAUDE', combinedOutput); // surface immediately
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
try {
|
|
823
|
+
const json = JSON.parse(result.stdout);
|
|
824
|
+
if (json.session_id) {
|
|
825
|
+
writeFileSync(sessionFile,
|
|
826
|
+
JSON.stringify({ sessionId: json.session_id, featureId: options.featureId }, null, 2)
|
|
827
|
+
);
|
|
828
|
+
}
|
|
829
|
+
const text = json.result || json.content || '';
|
|
830
|
+
text.split('\n').forEach(l => logActivityLine('CLAUDE', l));
|
|
831
|
+
} catch (e) {
|
|
832
|
+
if (e instanceof AILimitError) throw e;
|
|
833
|
+
result.stdout.split('\n').forEach(l => logActivityLine('CLAUDE', l));
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
if (model !== getStartModels().claude) {
|
|
837
|
+
logActivityLine('CLAUDE', `Task completed using fallback model: ${model}`);
|
|
838
|
+
}
|
|
839
|
+
circuitBreaker.recordSuccess('claude');
|
|
840
|
+
trackUsage({ provider: 'claude', model, stage: options.stage, featureId: options.featureId, promptText: prompt, responseText: result.stdout || '', durationMs: Date.now() - _costStart });
|
|
841
|
+
return result.code;
|
|
842
|
+
} finally {
|
|
843
|
+
statusBar.clear();
|
|
844
|
+
}
|
|
845
|
+
}
|
|
846
|
+
|
|
847
|
+
// ── Legacy: no featureId → stream output, optional --continue ─────────────
|
|
848
|
+
const flags = ['--model', model, '--print', '--output-format', 'text', '--dangerously-skip-permissions'];
|
|
849
|
+
if (options.continueSession) flags.push('--continue');
|
|
850
|
+
|
|
851
|
+
try {
|
|
852
|
+
const code = await run(
|
|
853
|
+
`cat "${tmpFile}" | claude ${flags.join(' ')}`,
|
|
854
|
+
[], { shell: true, label: 'CLAUDE', ignoreExitCode: options.ignoreExitCode, timeout: claudeTimeout }
|
|
855
|
+
);
|
|
856
|
+
if (model !== getStartModels().claude) {
|
|
857
|
+
logActivityLine('CLAUDE', `Task completed using fallback model: ${model}`);
|
|
858
|
+
}
|
|
859
|
+
circuitBreaker.recordSuccess('claude');
|
|
860
|
+
trackUsage({ provider: 'claude', model, stage: options.stage, featureId: options.featureId, promptText: prompt, durationMs: Date.now() - _costStart });
|
|
861
|
+
return code;
|
|
862
|
+
} catch (e) {
|
|
863
|
+
// run() rejects on non-zero exit — re-throw non-limit errors
|
|
864
|
+
throw e;
|
|
865
|
+
} finally {
|
|
866
|
+
statusBar.clear();
|
|
867
|
+
}
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
// All models exhausted
|
|
871
|
+
const detail = exhaustedMsgs.map(m => `\n ${m}`).join('');
|
|
872
|
+
throw new AILimitError('CLAUDE', `All models exhausted:${detail}`);
|
|
873
|
+
}
|
|
874
|
+
|
|
875
|
+
/**
|
|
876
|
+
* Run Gemini CLI with a prompt string. Writes the response to outputFile.
|
|
877
|
+
*
|
|
878
|
+
* Model auto-downgrade strategy:
|
|
879
|
+
* - Capacity exhausted ("reset after 20h") → try next model in chain immediately
|
|
880
|
+
* - Rate limit 429 → wait RETRY_WAIT_S seconds, retry SAME model (up to MAX_RETRIES)
|
|
881
|
+
* - After MAX_RETRIES on same model with 429 → try next model in chain
|
|
882
|
+
* - All models exhausted → throw AILimitError
|
|
883
|
+
*
|
|
884
|
+
* @param {string} prompt - Full task description (may reference file paths)
|
|
885
|
+
* @param {string} outputFile - Path where Gemini's response will be written
|
|
886
|
+
*/
|
|
887
|
+
export async function runGemini(prompt, outputFile, options = {}) {
|
|
888
|
+
// Circuit breaker check
|
|
889
|
+
if (!circuitBreaker.canExecute('gemini')) {
|
|
890
|
+
logActivityLine('SYSTEM', `Circuit breaker OPEN for Gemini — skipping to fallback`);
|
|
891
|
+
throw new AILimitError('GEMINI', 'Circuit breaker OPEN for Gemini');
|
|
892
|
+
}
|
|
893
|
+
|
|
894
|
+
await enforceProviderCooldown('gemini');
|
|
895
|
+
// failFast: try once with primary model only — caller handles fallback immediately
|
|
896
|
+
const MAX_RETRIES = options.failFast ? 1 : 2;
|
|
897
|
+
const RETRY_WAIT_S = 30;
|
|
898
|
+
|
|
899
|
+
// Compress context if stage info is available
|
|
900
|
+
let processedPrompt = prompt;
|
|
901
|
+
if (options.stage && options.featureId) {
|
|
902
|
+
try {
|
|
903
|
+
processedPrompt = compressForStage(options.stage, options.featureId, prompt);
|
|
904
|
+
const originalLen = prompt.length;
|
|
905
|
+
const compressedLen = processedPrompt.length;
|
|
906
|
+
if (compressedLen < originalLen) {
|
|
907
|
+
const reduction = Math.round((1 - compressedLen / originalLen) * 100);
|
|
908
|
+
logActivityLine('SYSTEM', `Context compressed: ${originalLen} → ${compressedLen} chars (${reduction}% reduction)`);
|
|
909
|
+
}
|
|
910
|
+
} catch (e) {
|
|
911
|
+
processedPrompt = prompt;
|
|
912
|
+
}
|
|
913
|
+
}
|
|
914
|
+
|
|
915
|
+
const promptFile = resolve(tmpdir(), `aicc-gemini-${Date.now()}.txt`);
|
|
916
|
+
writeFileSync(promptFile, processedPrompt, 'utf8');
|
|
917
|
+
|
|
918
|
+
const estimatedTokens = Math.ceil(processedPrompt.length / 4);
|
|
919
|
+
const shortTask = prompt.split('\n').find(l => l.trim())?.trim().slice(0, 55) || 'Gemini task';
|
|
920
|
+
const stderrFile = `${outputFile}.stderr`;
|
|
921
|
+
|
|
922
|
+
// failFast: only try primary model; otherwise try full chain
|
|
923
|
+
// options.modelChain: stage-specific override (e.g. PM uses flash-lite, review uses pro first)
|
|
924
|
+
const fullChain = options.modelChain || getModelChain('gemini');
|
|
925
|
+
const modelChain = options.failFast ? fullChain.slice(0, 1) : fullChain;
|
|
926
|
+
const exhaustedMsgs = [];
|
|
927
|
+
const _costStart = Date.now();
|
|
928
|
+
|
|
929
|
+
for (const model of modelChain) {
|
|
930
|
+
// ── Pre-flight: skip models known to be unavailable ────────────────────────
|
|
931
|
+
const avail = isModelAvailable(model);
|
|
932
|
+
if (!avail.available) {
|
|
933
|
+
logActivityLine('GEMINI', `Skipping ${model} — ${avail.reason}`);
|
|
934
|
+
exhaustedMsgs.push(avail.reason);
|
|
935
|
+
continue;
|
|
936
|
+
}
|
|
937
|
+
|
|
938
|
+
if (model !== modelChain[0]) {
|
|
939
|
+
logActivityLine('GEMINI', `Trying fallback model: ${model}`);
|
|
940
|
+
}
|
|
941
|
+
|
|
942
|
+
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
|
|
943
|
+
statusBar.set('GEMINI', `[${model}] ${shortTask}`, estimatedTokens);
|
|
944
|
+
|
|
945
|
+
try {
|
|
946
|
+
// Strip ALL IDE-related env vars so Gemini CLI never tries to connect to VS Code.
|
|
947
|
+
// Fixes: "[IDEClient] Directory mismatch" when running from a different project
|
|
948
|
+
// than the one open in the IDE (e.g. PickleBall bot triggering review while
|
|
949
|
+
// VS Code has XConnector open).
|
|
950
|
+
const IDE_PREFIXES = ['GEMINI_CLI_IDE_', 'VSCODE_', 'GIT_ASKPASS', 'GIT_TERMINAL_PROMPT'];
|
|
951
|
+
const IDE_EXACT = new Set(['TERM_PROGRAM', 'TERM_PROGRAM_VERSION']);
|
|
952
|
+
const cleanEnv = {};
|
|
953
|
+
for (const [k, v] of Object.entries(process.env)) {
|
|
954
|
+
if (IDE_PREFIXES.some(p => k.startsWith(p))) continue;
|
|
955
|
+
if (IDE_EXACT.has(k)) continue;
|
|
956
|
+
cleanEnv[k] = v;
|
|
957
|
+
}
|
|
958
|
+
|
|
959
|
+
// --yolo: auto-approve all tool calls (file reads, git commands) so Gemini
|
|
960
|
+
// doesn't block waiting for confirmations in non-interactive mode.
|
|
961
|
+
// cwd: run from the git repo root so Gemini's built-in git tools see the
|
|
962
|
+
// correct repository (config root is often a monorepo parent with no .git).
|
|
963
|
+
const runCwd = options.cwd || getRootDir();
|
|
964
|
+
const maxTokens = getTokenLimit(options.stage || 'impl');
|
|
965
|
+
await run(
|
|
966
|
+
`cat "${promptFile}" | gemini --yolo -m ${model} > "${outputFile}" 2> "${stderrFile}"`,
|
|
967
|
+
[], { shell: true, label: 'GEMINI', ignoreExitCode: true,
|
|
968
|
+
timeout: options.failFast ? 60000 : DEFAULT_TIMEOUTS.gemini, env: cleanEnv, cwd: runCwd }
|
|
969
|
+
);
|
|
970
|
+
|
|
971
|
+
// Check stderr for limit errors
|
|
972
|
+
if (existsSync(stderrFile)) {
|
|
973
|
+
try {
|
|
974
|
+
const errText = readFileSync(stderrFile, 'utf8').trim();
|
|
975
|
+
if (errText) {
|
|
976
|
+
logRawBlock('GEMINI', 'Gemini CLI stderr', errText);
|
|
977
|
+
const { type, line } = detectLimit(errText);
|
|
978
|
+
|
|
979
|
+
if (type === 'capacity') {
|
|
980
|
+
// Daily quota hit — ban this model, skip remaining retries, try next model
|
|
981
|
+
banModel(model, 'capacity');
|
|
982
|
+
exhaustedMsgs.push(`${model}: ${line}`);
|
|
983
|
+
logActivityLine('GEMINI', `Model ${model} quota exhausted (may reset in hours) — trying next model`);
|
|
984
|
+
try { if (existsSync(outputFile)) unlinkSync(outputFile); } catch { /* non-fatal */ }
|
|
985
|
+
break; // exit attempt loop → outer model loop continues
|
|
986
|
+
}
|
|
987
|
+
|
|
988
|
+
if (type === 'ratelimit') {
|
|
989
|
+
if (attempt < MAX_RETRIES) {
|
|
990
|
+
// Temporary rate limit — wait and retry same model
|
|
991
|
+
banModel(model, 'ratelimit');
|
|
992
|
+
logActivityLine('GEMINI', `Rate limit hit (attempt ${attempt}/${MAX_RETRIES}) — waiting ${RETRY_WAIT_S}s...`);
|
|
993
|
+
statusBar.set('GEMINI', `Rate limited — retry ${attempt + 1}/${MAX_RETRIES} in ${RETRY_WAIT_S}s`, 0);
|
|
994
|
+
await new Promise(r => setTimeout(r, RETRY_WAIT_S * 1000));
|
|
995
|
+
try { if (existsSync(outputFile)) unlinkSync(outputFile); } catch { /* non-fatal */ }
|
|
996
|
+
try { if (existsSync(stderrFile)) unlinkSync(stderrFile); } catch { /* non-fatal */ }
|
|
997
|
+
continue; // next attempt, same model
|
|
998
|
+
}
|
|
999
|
+
// MAX_RETRIES exhausted on this model — try next model
|
|
1000
|
+
banModel(model, 'ratelimit');
|
|
1001
|
+
exhaustedMsgs.push(`${model}: ${line} (after ${MAX_RETRIES} retries)`);
|
|
1002
|
+
logActivityLine('GEMINI', `Max retries reached for ${model} — trying next model`);
|
|
1003
|
+
try { if (existsSync(outputFile)) unlinkSync(outputFile); } catch { /* non-fatal */ }
|
|
1004
|
+
break; // exit attempt loop → outer model loop continues
|
|
1005
|
+
}
|
|
1006
|
+
}
|
|
1007
|
+
} catch (e) {
|
|
1008
|
+
if (e instanceof AILimitError) throw e;
|
|
1009
|
+
/* other read errors are non-fatal */
|
|
1010
|
+
}
|
|
1011
|
+
}
|
|
1012
|
+
|
|
1013
|
+
// No limit error — validate and log the response
|
|
1014
|
+
// NOTE: Do NOT run detectLimit() on outputFile content — review text may
|
|
1015
|
+
// legitimately mention "rate_limit", "quota_exceeded", etc. when reviewing
|
|
1016
|
+
// code that handles those topics (e.g. XConn_RateLimiter).
|
|
1017
|
+
if (existsSync(outputFile)) {
|
|
1018
|
+
try {
|
|
1019
|
+
const out = readFileSync(outputFile, 'utf8');
|
|
1020
|
+
if (out.trim()) logRawBlock('GEMINI', 'Full Gemini Response', out);
|
|
1021
|
+
|
|
1022
|
+
// Validate response quality
|
|
1023
|
+
const validation = validateResponse(options.stage, out);
|
|
1024
|
+
if (!validation.valid) {
|
|
1025
|
+
logActivityLine('GEMINI', `Response validation failed: ${validation.reason} — falling back to next model`);
|
|
1026
|
+
circuitBreaker.recordFailure('gemini');
|
|
1027
|
+
try { unlinkSync(outputFile); } catch { /* */ }
|
|
1028
|
+
break; // exit attempt loop, try next model
|
|
1029
|
+
}
|
|
1030
|
+
} catch { /* non-fatal */ }
|
|
1031
|
+
}
|
|
1032
|
+
|
|
1033
|
+
if (model !== getStartModels().gemini) {
|
|
1034
|
+
logActivityLine('GEMINI', `Task completed using fallback model: ${model}`);
|
|
1035
|
+
}
|
|
1036
|
+
|
|
1037
|
+
// Clean up temp files
|
|
1038
|
+
circuitBreaker.recordSuccess('gemini');
|
|
1039
|
+
try { if (existsSync(stderrFile)) unlinkSync(stderrFile); } catch { /* non-fatal */ }
|
|
1040
|
+
try { if (existsSync(promptFile)) unlinkSync(promptFile); } catch { /* non-fatal */ }
|
|
1041
|
+
try { trackUsage({ provider: 'gemini', model, stage: options.stage, featureId: options.featureId, promptText: prompt, responseText: existsSync(outputFile) ? readFileSync(outputFile, 'utf8') : '', durationMs: Date.now() - _costStart, maxTokens: getTokenLimit(options.stage || 'impl') }); } catch { /* non-fatal */ }
|
|
1042
|
+
statusBar.clear();
|
|
1043
|
+
return; // success
|
|
1044
|
+
|
|
1045
|
+
} finally {
|
|
1046
|
+
statusBar.clear();
|
|
1047
|
+
}
|
|
1048
|
+
} // end attempt loop
|
|
1049
|
+
} // end model loop
|
|
1050
|
+
|
|
1051
|
+
// All models in chain exhausted
|
|
1052
|
+
try { if (existsSync(promptFile)) unlinkSync(promptFile); } catch { /* non-fatal */ }
|
|
1053
|
+
|
|
1054
|
+
const detail = exhaustedMsgs.map(m => `\n ${m}`).join('');
|
|
1055
|
+
const hint = `\nSet GEMINI_MODEL=gemini-2.5-flash to skip Pro models.`;
|
|
1056
|
+
throw new AILimitError('GEMINI', `All models exhausted:${detail}${hint}`);
|
|
1057
|
+
}
|
|
1058
|
+
|
|
1059
|
+
/**
|
|
1060
|
+
* Sync ~/.copilot/config.json model to match the given model name.
|
|
1061
|
+
* Copilot CLI rejects non-interactive calls if --model differs from the saved config model.
|
|
1062
|
+
*/
|
|
1063
|
+
function syncCopilotConfigModel(model) {
|
|
1064
|
+
const configPath = resolve(homedir(), '.copilot', 'config.json');
|
|
1065
|
+
if (!existsSync(configPath)) return;
|
|
1066
|
+
try {
|
|
1067
|
+
const config = JSON.parse(readFileSync(configPath, 'utf8'));
|
|
1068
|
+
if (config.model !== model) {
|
|
1069
|
+
config.model = model;
|
|
1070
|
+
writeFileSync(configPath, JSON.stringify(config, null, 4), 'utf8');
|
|
1071
|
+
logActivityLine('COPILOT', `Config model updated → ${model}`);
|
|
1072
|
+
}
|
|
1073
|
+
} catch { /* non-fatal — Copilot will fall back to its default */ }
|
|
1074
|
+
}
|
|
1075
|
+
|
|
1076
|
+
/**
|
|
1077
|
+
* Run GitHub Copilot CLI in non-interactive (-p) mode.
|
|
1078
|
+
*
|
|
1079
|
+
* Auto-downgrade: on capacity exhaustion, tries the next model in MODEL_CHAINS.copilot.
|
|
1080
|
+
* Session tracking: each featureId maps to a deterministic UUID via MD5 hash.
|
|
1081
|
+
*
|
|
1082
|
+
* @param {string} prompt - Full task description for Copilot
|
|
1083
|
+
* @param {object} options
|
|
1084
|
+
* featureId {string} - Feature ID for session tracking
|
|
1085
|
+
* ignoreExitCode {bool} - Don't reject on non-zero exit
|
|
1086
|
+
*/
|
|
1087
|
+
export async function runCopilot(prompt, options = {}) {
|
|
1088
|
+
// Circuit breaker check
|
|
1089
|
+
if (!circuitBreaker.canExecute('copilot')) {
|
|
1090
|
+
logActivityLine('SYSTEM', `Circuit breaker OPEN for Copilot — skipping to fallback`);
|
|
1091
|
+
throw new AILimitError('COPILOT', 'Circuit breaker OPEN for Copilot');
|
|
1092
|
+
}
|
|
1093
|
+
|
|
1094
|
+
await enforceProviderCooldown('copilot');
|
|
1095
|
+
const estimatedTokens = Math.ceil(prompt.length / 4);
|
|
1096
|
+
const shortTask = prompt.replace(/\n/g, ' ').trim().slice(0, 55);
|
|
1097
|
+
|
|
1098
|
+
// Compress context if stage info is available
|
|
1099
|
+
let processedPrompt = prompt;
|
|
1100
|
+
if (options.stage && options.featureId) {
|
|
1101
|
+
try {
|
|
1102
|
+
processedPrompt = compressForStage(options.stage, options.featureId, prompt);
|
|
1103
|
+
const originalLen = prompt.length;
|
|
1104
|
+
const compressedLen = processedPrompt.length;
|
|
1105
|
+
if (compressedLen < originalLen) {
|
|
1106
|
+
const reduction = Math.round((1 - compressedLen / originalLen) * 100);
|
|
1107
|
+
logActivityLine('SYSTEM', `Context compressed: ${originalLen} → ${compressedLen} chars (${reduction}% reduction)`);
|
|
1108
|
+
}
|
|
1109
|
+
} catch (e) {
|
|
1110
|
+
processedPrompt = prompt;
|
|
1111
|
+
}
|
|
1112
|
+
}
|
|
1113
|
+
|
|
1114
|
+
const tmpFile = resolve(tmpdir(), `aicc-copilot-${Date.now()}.txt`);
|
|
1115
|
+
writeFileSync(tmpFile, processedPrompt, 'utf8');
|
|
1116
|
+
|
|
1117
|
+
// Build session UUID once (same for all model attempts — same conversation)
|
|
1118
|
+
let sessionUUID = null;
|
|
1119
|
+
if (options.featureId) {
|
|
1120
|
+
const sessionsDir = resolve(getWorkflowDir(), 'sessions');
|
|
1121
|
+
if (!existsSync(sessionsDir)) mkdirSync(sessionsDir, { recursive: true });
|
|
1122
|
+
|
|
1123
|
+
const sessionFile = resolve(sessionsDir, `COPILOT-${options.featureId}.json`);
|
|
1124
|
+
if (existsSync(sessionFile)) {
|
|
1125
|
+
try {
|
|
1126
|
+
const saved = JSON.parse(readFileSync(sessionFile, 'utf8'));
|
|
1127
|
+
sessionUUID = saved.sessionId;
|
|
1128
|
+
} catch { /* corrupt file — regenerate */ }
|
|
1129
|
+
}
|
|
1130
|
+
if (!sessionUUID) {
|
|
1131
|
+
sessionUUID = featureToSessionUUID(options.featureId);
|
|
1132
|
+
writeFileSync(sessionFile, JSON.stringify({
|
|
1133
|
+
sessionId: sessionUUID, featureId: options.featureId, createdAt: new Date().toISOString(),
|
|
1134
|
+
}, null, 2));
|
|
1135
|
+
logActivityLine('COPILOT', `New session → sessions/COPILOT-${options.featureId}.json`);
|
|
1136
|
+
} else {
|
|
1137
|
+
logActivityLine('COPILOT', `Resuming session ${sessionUUID}`);
|
|
1138
|
+
}
|
|
1139
|
+
}
|
|
1140
|
+
|
|
1141
|
+
const modelChain = options._forceModel ? [options._forceModel] : getModelChain('copilot');
|
|
1142
|
+
const exhaustedMsgs = [];
|
|
1143
|
+
const _costStart = Date.now();
|
|
1144
|
+
|
|
1145
|
+
for (const model of modelChain) {
|
|
1146
|
+
// ── Pre-flight: skip models known to be unavailable ────────────────────────
|
|
1147
|
+
const avail = isModelAvailable(model);
|
|
1148
|
+
if (!avail.available) {
|
|
1149
|
+
logActivityLine('COPILOT', `Skipping ${model} — ${avail.reason}`);
|
|
1150
|
+
exhaustedMsgs.push(avail.reason);
|
|
1151
|
+
continue;
|
|
1152
|
+
}
|
|
1153
|
+
|
|
1154
|
+
syncCopilotConfigModel(model);
|
|
1155
|
+
statusBar.set('COPILOT', shortTask, estimatedTokens);
|
|
1156
|
+
|
|
1157
|
+
if (model !== modelChain[0]) {
|
|
1158
|
+
logActivityLine('COPILOT', `Trying fallback model: ${model}`);
|
|
1159
|
+
}
|
|
1160
|
+
|
|
1161
|
+
const flags = ['--model', model, '--yolo'];
|
|
1162
|
+
if (sessionUUID) flags.push('--resume', sessionUUID);
|
|
1163
|
+
|
|
1164
|
+
try {
|
|
1165
|
+
const result = await new Promise((resolvePromise, reject) => {
|
|
1166
|
+
const args = ['-p', processedPrompt, ...flags];
|
|
1167
|
+
const proc = spawn('copilot', args, {
|
|
1168
|
+
cwd: getRootDir(),
|
|
1169
|
+
shell: false,
|
|
1170
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
1171
|
+
env: { ...process.env },
|
|
1172
|
+
});
|
|
1173
|
+
|
|
1174
|
+
let buffer = '';
|
|
1175
|
+
let allOutput = '';
|
|
1176
|
+
const flush = (chunk) => {
|
|
1177
|
+
buffer += chunk;
|
|
1178
|
+
allOutput += chunk;
|
|
1179
|
+
const lines = buffer.split('\n');
|
|
1180
|
+
buffer = lines.pop();
|
|
1181
|
+
lines.forEach(line => logActivityLine('COPILOT', line));
|
|
1182
|
+
};
|
|
1183
|
+
if (proc.stdout) proc.stdout.on('data', d => flush(d.toString()));
|
|
1184
|
+
if (proc.stderr) proc.stderr.on('data', d => flush(d.toString()));
|
|
1185
|
+
proc.on('close', (code) => {
|
|
1186
|
+
if (buffer.trim()) logActivityLine('COPILOT', buffer);
|
|
1187
|
+
if (allOutput.trim()) logRawBlock('COPILOT', `Full Copilot Output (exit ${code ?? '?'})`, allOutput);
|
|
1188
|
+
|
|
1189
|
+
if (allOutput.includes('in interactive mode to enable this model')) {
|
|
1190
|
+
logActivityLine('COPILOT', `Model '${model}' is locked. Run: copilot --model ${model}`);
|
|
1191
|
+
logActivityLine('COPILOT', ' Then type a message and Ctrl+C to unlock it.');
|
|
1192
|
+
}
|
|
1193
|
+
|
|
1194
|
+
resolvePromise({ code: code ?? 0, allOutput });
|
|
1195
|
+
});
|
|
1196
|
+
proc.on('error', reject);
|
|
1197
|
+
});
|
|
1198
|
+
|
|
1199
|
+
// Check for limit errors in output
|
|
1200
|
+
const { type, line } = detectLimit(result.allOutput);
|
|
1201
|
+
if (type === 'capacity') {
|
|
1202
|
+
banModel(model, 'capacity');
|
|
1203
|
+
exhaustedMsgs.push(`${model}: ${line}`);
|
|
1204
|
+
logActivityLine('COPILOT', `Model ${model} quota exhausted — trying next model`);
|
|
1205
|
+
continue; // try next model
|
|
1206
|
+
}
|
|
1207
|
+
if (type === 'ratelimit') {
|
|
1208
|
+
banModel(model, 'ratelimit');
|
|
1209
|
+
throw new AILimitError('COPILOT', result.allOutput);
|
|
1210
|
+
}
|
|
1211
|
+
|
|
1212
|
+
if (model !== getStartModels().copilot) {
|
|
1213
|
+
logActivityLine('COPILOT', `Task completed using fallback model: ${model}`);
|
|
1214
|
+
}
|
|
1215
|
+
circuitBreaker.recordSuccess('copilot');
|
|
1216
|
+
trackUsage({ provider: 'copilot', model, stage: options.stage, featureId: options.featureId, promptText: prompt, responseText: result.allOutput || '', durationMs: Date.now() - _costStart });
|
|
1217
|
+
return result.code;
|
|
1218
|
+
|
|
1219
|
+
} finally {
|
|
1220
|
+
statusBar.clear();
|
|
1221
|
+
}
|
|
1222
|
+
}
|
|
1223
|
+
|
|
1224
|
+
// All models exhausted
|
|
1225
|
+
const detail = exhaustedMsgs.map(m => `\n ${m}`).join('');
|
|
1226
|
+
throw new AILimitError('COPILOT', `All models exhausted:${detail}`);
|
|
1227
|
+
}
|
|
1228
|
+
|
|
1229
|
+
/**
|
|
1230
|
+
* Generate a deterministic UUID from a featureId for Copilot session tracking.
|
|
1231
|
+
* Same featureId always maps to the same UUID → same Copilot conversation.
|
|
1232
|
+
* UUID v4 format: xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx
|
|
1233
|
+
*/
|
|
1234
|
+
function featureToSessionUUID(featureId) {
|
|
1235
|
+
const hash = createHash('md5').update(`aicc-copilot-${featureId}`).digest('hex');
|
|
1236
|
+
return `${hash.slice(0,8)}-${hash.slice(8,12)}-4${hash.slice(13,16)}-8${hash.slice(17,20)}-${hash.slice(20,32)}`;
|
|
1237
|
+
}
|
|
1238
|
+
|
|
1239
|
+
/**
|
|
1240
|
+
* Run Copilot with a SPECIFIC model (e.g. claude-opus-4.6), bypassing the model chain.
|
|
1241
|
+
* Use this when the user has explicitly confirmed a premium model.
|
|
1242
|
+
*/
|
|
1243
|
+
export async function runCopilotWithModel(model, prompt, options = {}) {
|
|
1244
|
+
syncCopilotConfigModel(model);
|
|
1245
|
+
return runCopilot(prompt, { ...options, _forceModel: model });
|
|
1246
|
+
}
|
|
1247
|
+
|
|
1248
|
+
// ─── Ollama Local AI ─────────────────────────────────────────────────────────
|
|
1249
|
+
//
|
|
1250
|
+
// Ollama exposes an OpenAI-compatible API at localhost:11434/v1.
|
|
1251
|
+
// Used for lightweight tasks: summaries, explanations, quick Q&A.
|
|
1252
|
+
// No CLI tool needed — pure HTTP calls.
|
|
1253
|
+
|
|
1254
|
+
/**
|
|
1255
|
+
* Check if Ollama is running and reachable.
|
|
1256
|
+
* Returns { available, models } where models is the list of installed model names.
|
|
1257
|
+
*/
|
|
1258
|
+
export async function isOllamaAvailable() {
|
|
1259
|
+
try {
|
|
1260
|
+
const controller = new AbortController();
|
|
1261
|
+
const timeout = setTimeout(() => controller.abort(), 3000);
|
|
1262
|
+
const res = await fetch(`${getOllamaUrl()}/api/tags`, { signal: controller.signal });
|
|
1263
|
+
clearTimeout(timeout);
|
|
1264
|
+
if (!res.ok) return { available: false, models: [] };
|
|
1265
|
+
const data = await res.json();
|
|
1266
|
+
const models = (data.models || []).map(m => m.name);
|
|
1267
|
+
return { available: true, models };
|
|
1268
|
+
} catch {
|
|
1269
|
+
return { available: false, models: [] };
|
|
1270
|
+
}
|
|
1271
|
+
}
|
|
1272
|
+
|
|
1273
|
+
/**
|
|
1274
|
+
* Run a prompt through Ollama's local AI.
|
|
1275
|
+
*
|
|
1276
|
+
* Lightweight — no session management, no file output, no model chains.
|
|
1277
|
+
* Returns the response text directly. Throws on error.
|
|
1278
|
+
*
|
|
1279
|
+
* @param {string} prompt - The question or task
|
|
1280
|
+
* @param {object} options
|
|
1281
|
+
* model {string} - Override model (default: configured ollama model)
|
|
1282
|
+
* timeout {number} - Timeout in ms (default: 60000 — first call may load the model)
|
|
1283
|
+
* system {string} - Optional system prompt for context
|
|
1284
|
+
*/
|
|
1285
|
+
export async function runOllama(prompt, options = {}) {
|
|
1286
|
+
const model = options.model || getStartModels().ollama;
|
|
1287
|
+
const timeout = options.timeout || 60000;
|
|
1288
|
+
const _costStart = Date.now();
|
|
1289
|
+
|
|
1290
|
+
logActivityLine('OLLAMA', `[${model}] ${prompt.slice(0, 60).replace(/\n/g, ' ')}...`);
|
|
1291
|
+
|
|
1292
|
+
const messages = [];
|
|
1293
|
+
if (options.system) {
|
|
1294
|
+
messages.push({ role: 'system', content: options.system });
|
|
1295
|
+
}
|
|
1296
|
+
messages.push({ role: 'user', content: prompt });
|
|
1297
|
+
|
|
1298
|
+
const controller = new AbortController();
|
|
1299
|
+
const timer = setTimeout(() => controller.abort(), timeout);
|
|
1300
|
+
|
|
1301
|
+
try {
|
|
1302
|
+
const res = await fetch(`${getOllamaUrl()}/v1/chat/completions`, {
|
|
1303
|
+
method: 'POST',
|
|
1304
|
+
headers: { 'Content-Type': 'application/json' },
|
|
1305
|
+
body: JSON.stringify({ model, messages, stream: false }),
|
|
1306
|
+
signal: controller.signal,
|
|
1307
|
+
});
|
|
1308
|
+
clearTimeout(timer);
|
|
1309
|
+
|
|
1310
|
+
if (!res.ok) {
|
|
1311
|
+
const body = await res.text();
|
|
1312
|
+
if (body.includes('model') && body.includes('not found')) {
|
|
1313
|
+
throw new Error(`Model "${model}" not found. Run: ollama pull ${model}`);
|
|
1314
|
+
}
|
|
1315
|
+
throw new Error(`Ollama API error ${res.status}: ${body.slice(0, 200)}`);
|
|
1316
|
+
}
|
|
1317
|
+
|
|
1318
|
+
const data = await res.json();
|
|
1319
|
+
const text = data.choices?.[0]?.message?.content || '';
|
|
1320
|
+
logActivityLine('OLLAMA', `Response: ${text.length} chars`);
|
|
1321
|
+
trackUsage({ provider: 'ollama', model, stage: options.stage, featureId: options.featureId, promptText: prompt, responseText: text, inputTokens: data.usage?.prompt_tokens, outputTokens: data.usage?.completion_tokens, durationMs: Date.now() - _costStart });
|
|
1322
|
+
return text;
|
|
1323
|
+
} catch (err) {
|
|
1324
|
+
clearTimeout(timer);
|
|
1325
|
+
if (err.name === 'AbortError') {
|
|
1326
|
+
throw new Error(`Ollama timed out after ${timeout / 1000}s — model may still be loading. Try again.`);
|
|
1327
|
+
}
|
|
1328
|
+
if (err.cause?.code === 'ECONNREFUSED') {
|
|
1329
|
+
throw new Error('Ollama is not running. Start it with: ollama serve');
|
|
1330
|
+
}
|
|
1331
|
+
throw err;
|
|
1332
|
+
}
|
|
1333
|
+
}
|
|
1334
|
+
|
|
1335
|
+
/**
|
|
1336
|
+
* Smart AI router — picks local (Ollama) or cloud based on task weight and config.
|
|
1337
|
+
*
|
|
1338
|
+
* @param {string} prompt - The question or task
|
|
1339
|
+
* @param {object} options
|
|
1340
|
+
* weight {'light'|'heavy'} - Task complexity (default: 'light')
|
|
1341
|
+
* fallback {boolean} - Fall back to cloud if Ollama unavailable (default: true)
|
|
1342
|
+
* system {string} - Optional system prompt
|
|
1343
|
+
* model {string} - Override Ollama model
|
|
1344
|
+
* outputFile {string} - For heavy tasks routed to Gemini
|
|
1345
|
+
*
|
|
1346
|
+
* @returns {string} AI response text
|
|
1347
|
+
*/
|
|
1348
|
+
export async function runAI(prompt, options = {}) {
|
|
1349
|
+
const { weight = 'light', fallback = true } = options;
|
|
1350
|
+
const mode = AI_MODE; // hybrid | cloud | local
|
|
1351
|
+
const _costStartAI = Date.now();
|
|
1352
|
+
|
|
1353
|
+
// Heavy tasks always go to cloud (unless mode is 'local')
|
|
1354
|
+
if (weight === 'heavy' && mode !== 'local') {
|
|
1355
|
+
if (options.outputFile) {
|
|
1356
|
+
await runGemini(prompt, options.outputFile);
|
|
1357
|
+
return readFileSync(options.outputFile, 'utf8');
|
|
1358
|
+
}
|
|
1359
|
+
throw new Error('Heavy tasks require an outputFile for Gemini.');
|
|
1360
|
+
}
|
|
1361
|
+
|
|
1362
|
+
// Local mode — Ollama only
|
|
1363
|
+
if (mode === 'local') {
|
|
1364
|
+
const ollamaStatus = await isOllamaAvailable();
|
|
1365
|
+
if (!ollamaStatus.available) {
|
|
1366
|
+
throw new Error('Ollama is not running and AI_MODE=local. Start Ollama or switch to hybrid mode.');
|
|
1367
|
+
}
|
|
1368
|
+
return await runOllama(prompt, { system: options.system, model: options.model });
|
|
1369
|
+
}
|
|
1370
|
+
|
|
1371
|
+
// Cloud or hybrid mode — use pipeline 'chat' entry with fallbacks
|
|
1372
|
+
// The chat pipeline entry defines: primary provider + model + fallbacks
|
|
1373
|
+
// e.g. { stage: 'chat', provider: 'openclaw', model: 'auto', fallbacks: ['copilot:claude-haiku-4.5'] }
|
|
1374
|
+
const chatEntry = _getPipelineEntry('chat');
|
|
1375
|
+
const chatProviders = [
|
|
1376
|
+
{ provider: chatEntry.provider, model: chatEntry.model },
|
|
1377
|
+
...(chatEntry.fallbacks || []).map(fb => {
|
|
1378
|
+
if (fb.includes(':')) {
|
|
1379
|
+
const [p, m] = fb.split(':', 2);
|
|
1380
|
+
return { provider: p, model: m };
|
|
1381
|
+
}
|
|
1382
|
+
return { provider: fb, model: null };
|
|
1383
|
+
}),
|
|
1384
|
+
];
|
|
1385
|
+
|
|
1386
|
+
for (const { provider: chatProvider, model: chatProviderModel } of chatProviders) {
|
|
1387
|
+
try {
|
|
1388
|
+
if (chatProvider === 'openclaw') {
|
|
1389
|
+
if (_openclawAvailable === null) _openclawAvailable = await isAvailable('openclaw');
|
|
1390
|
+
if (!_openclawAvailable) continue;
|
|
1391
|
+
const fullPrompt = options.system ? `${options.system}\n\n${prompt}` : prompt;
|
|
1392
|
+
const sessionId = getOpenclawSessionId();
|
|
1393
|
+
const result = await capture(
|
|
1394
|
+
'openclaw', ['agent', '--session-id', sessionId, '-m', fullPrompt, '--json'],
|
|
1395
|
+
{ timeout: 60000, shell: false }
|
|
1396
|
+
);
|
|
1397
|
+
if (result.code === 0 && result.stdout) {
|
|
1398
|
+
const jsonStart = result.stdout.indexOf('{');
|
|
1399
|
+
if (jsonStart >= 0) {
|
|
1400
|
+
const parsed = JSON.parse(result.stdout.slice(jsonStart));
|
|
1401
|
+
const text = parsed?.result?.payloads?.[0]?.text?.trim();
|
|
1402
|
+
const aiModel = parsed?.result?.meta?.agentMeta?.model || 'unknown';
|
|
1403
|
+
if (text) {
|
|
1404
|
+
logActivityLine('OPENCLAW', `Response via ${aiModel}: ${text.length} chars`);
|
|
1405
|
+
trackUsage({ provider: 'openclaw', model: aiModel, featureId: options.featureId, promptText: prompt, responseText: text, durationMs: Date.now() - _costStartAI });
|
|
1406
|
+
return text;
|
|
1407
|
+
}
|
|
1408
|
+
}
|
|
1409
|
+
}
|
|
1410
|
+
logActivityLine('OPENCLAW', `Exit ${result.code} — trying next provider...`);
|
|
1411
|
+
} else if (chatProvider === 'copilot') {
|
|
1412
|
+
const copilotAvailable = await isAvailable('copilot');
|
|
1413
|
+
if (!copilotAvailable) continue;
|
|
1414
|
+
const tmpOut = resolve(tmpdir(), `aicc-ai-copilot-${Date.now()}.txt`);
|
|
1415
|
+
const fullPrompt = options.system ? `${options.system}\n\n${prompt}` : prompt;
|
|
1416
|
+
writeFileSync(tmpOut, fullPrompt, 'utf8');
|
|
1417
|
+
const copilotModel = options.model || chatProviderModel || 'claude-haiku-4.5';
|
|
1418
|
+
const result = await capture(
|
|
1419
|
+
`copilot --model ${copilotModel} -p "$(cat '${tmpOut}')"`, [],
|
|
1420
|
+
{ timeout: 60000 }
|
|
1421
|
+
);
|
|
1422
|
+
try { unlinkSync(tmpOut); } catch { /* */ }
|
|
1423
|
+
const output = (result.stdout || '').trim();
|
|
1424
|
+
if (output && result.code === 0) {
|
|
1425
|
+
const { type } = detectLimit(output + '\n' + (result.stderr || ''));
|
|
1426
|
+
if (!type) {
|
|
1427
|
+
logActivityLine('COPILOT', `Response: ${output.length} chars`);
|
|
1428
|
+
trackUsage({ provider: 'copilot', model: copilotModel, featureId: options.featureId, promptText: prompt, responseText: output, durationMs: Date.now() - _costStartAI });
|
|
1429
|
+
return output;
|
|
1430
|
+
}
|
|
1431
|
+
logActivityLine('COPILOT', 'Rate limited — trying next provider...');
|
|
1432
|
+
} else if (result.code !== 0) {
|
|
1433
|
+
logActivityLine('COPILOT', `Exit code ${result.code} — trying next provider...`);
|
|
1434
|
+
}
|
|
1435
|
+
} else if (chatProvider === 'gemini') {
|
|
1436
|
+
const geminiAvailable = await isAvailable('gemini');
|
|
1437
|
+
if (!geminiAvailable) continue;
|
|
1438
|
+
const tmpOut = resolve(tmpdir(), `aicc-ai-gemini-${Date.now()}.md`);
|
|
1439
|
+
await runGemini(options.system ? `${options.system}\n\n${prompt}` : prompt, tmpOut);
|
|
1440
|
+
const text = readFileSync(tmpOut, 'utf8');
|
|
1441
|
+
try { unlinkSync(tmpOut); } catch { /* */ }
|
|
1442
|
+
return text;
|
|
1443
|
+
} else if (chatProvider === 'claude') {
|
|
1444
|
+
const claudeAvailable = await isAvailable('claude');
|
|
1445
|
+
if (!claudeAvailable) continue;
|
|
1446
|
+
const claudeModel = options.model || chatProviderModel || 'claude-sonnet-4-6';
|
|
1447
|
+
|
|
1448
|
+
// Build flags — use --resume for session continuity so AI remembers context
|
|
1449
|
+
const flags = ['--model', claudeModel, '--print', '--output-format', 'json', '--dangerously-skip-permissions'];
|
|
1450
|
+
|
|
1451
|
+
// Chat session: persist conversation across messages
|
|
1452
|
+
const sessionsDir = resolve(getWorkflowDir(), 'sessions');
|
|
1453
|
+
if (!existsSync(sessionsDir)) mkdirSync(sessionsDir, { recursive: true });
|
|
1454
|
+
const chatSessionFile = resolve(sessionsDir, 'CLAUDE-chat.json');
|
|
1455
|
+
let hasSession = false;
|
|
1456
|
+
if (existsSync(chatSessionFile)) {
|
|
1457
|
+
try {
|
|
1458
|
+
const { sessionId } = JSON.parse(readFileSync(chatSessionFile, 'utf8'));
|
|
1459
|
+
if (sessionId) {
|
|
1460
|
+
flags.push('--resume', sessionId);
|
|
1461
|
+
hasSession = true;
|
|
1462
|
+
}
|
|
1463
|
+
} catch { /* corrupt — start fresh */ }
|
|
1464
|
+
}
|
|
1465
|
+
|
|
1466
|
+
// Always include system prompt — resumed sessions lose the original system context,
|
|
1467
|
+
// causing AI to forget _action JSON instructions and respond conversationally instead.
|
|
1468
|
+
const fullPrompt = options.system
|
|
1469
|
+
? `${options.system}\n\n${prompt}`
|
|
1470
|
+
: prompt;
|
|
1471
|
+
const tmpOut = resolve(tmpdir(), `aicc-ai-claude-${Date.now()}.txt`);
|
|
1472
|
+
writeFileSync(tmpOut, fullPrompt, 'utf8');
|
|
1473
|
+
|
|
1474
|
+
// System prompt is always included now, so allow full timeout for all calls
|
|
1475
|
+
const chatTimeout = 120000;
|
|
1476
|
+
const result = await capture(
|
|
1477
|
+
`cat "${tmpOut}" | claude ${flags.join(' ')}`,
|
|
1478
|
+
[], { shell: true, timeout: chatTimeout }
|
|
1479
|
+
);
|
|
1480
|
+
try { unlinkSync(tmpOut); } catch { /* */ }
|
|
1481
|
+
|
|
1482
|
+
const stdout = (result.stdout || '').trim();
|
|
1483
|
+
if (stdout && result.code === 0) {
|
|
1484
|
+
// Parse JSON output to extract text and session_id
|
|
1485
|
+
let output = stdout;
|
|
1486
|
+
try {
|
|
1487
|
+
const jsonStart = stdout.indexOf('{');
|
|
1488
|
+
if (jsonStart >= 0) {
|
|
1489
|
+
const parsed = JSON.parse(stdout.slice(jsonStart));
|
|
1490
|
+
// Save session ID for conversation continuity
|
|
1491
|
+
if (parsed.session_id) {
|
|
1492
|
+
writeFileSync(chatSessionFile, JSON.stringify({ sessionId: parsed.session_id }));
|
|
1493
|
+
}
|
|
1494
|
+
// Extract text from JSON result
|
|
1495
|
+
if (parsed.result) {
|
|
1496
|
+
output = parsed.result;
|
|
1497
|
+
}
|
|
1498
|
+
}
|
|
1499
|
+
} catch { /* not valid JSON — use raw stdout */ }
|
|
1500
|
+
|
|
1501
|
+
const { type } = detectLimit(output + '\n' + (result.stderr || ''));
|
|
1502
|
+
if (!type) {
|
|
1503
|
+
logActivityLine('CLAUDE', `Chat response via ${claudeModel}: ${output.length} chars`);
|
|
1504
|
+
trackUsage({ provider: 'claude', model: claudeModel, featureId: options.featureId, promptText: prompt, responseText: output, durationMs: Date.now() - _costStartAI });
|
|
1505
|
+
return output;
|
|
1506
|
+
}
|
|
1507
|
+
logActivityLine('CLAUDE', 'Rate limited — trying next provider...');
|
|
1508
|
+
} else if (result.code !== 0) {
|
|
1509
|
+
// Session might be expired — clear it and let next attempt start fresh
|
|
1510
|
+
try { unlinkSync(chatSessionFile); } catch { /* */ }
|
|
1511
|
+
logActivityLine('CLAUDE', `Exit code ${result.code} — trying next provider...`);
|
|
1512
|
+
}
|
|
1513
|
+
} else if (chatProvider === 'ollama') {
|
|
1514
|
+
const ollamaStatus = await isOllamaAvailable();
|
|
1515
|
+
if (!ollamaStatus.available) continue;
|
|
1516
|
+
return await runOllama(prompt, { system: options.system, model: options.model || chatProviderModel });
|
|
1517
|
+
}
|
|
1518
|
+
} catch (err) {
|
|
1519
|
+
logActivityLine(chatProvider.toUpperCase(), `Error: ${err.message} — trying next provider...`);
|
|
1520
|
+
}
|
|
1521
|
+
}
|
|
1522
|
+
|
|
1523
|
+
// Final fallback: try Ollama if not already tried
|
|
1524
|
+
if (!fallback) throw new Error('Cloud AIs unavailable and fallback=false.');
|
|
1525
|
+
const ollamaStatus = await isOllamaAvailable();
|
|
1526
|
+
if (ollamaStatus.available) {
|
|
1527
|
+
return await runOllama(prompt, { system: options.system, model: options.model });
|
|
1528
|
+
}
|
|
1529
|
+
|
|
1530
|
+
throw new Error('All AI providers unavailable. Check your setup or pipeline config.');
|
|
1531
|
+
}
|
|
1532
|
+
|
|
1533
|
+
/** Get the current AI mode setting. */
|
|
1534
|
+
export function getAIMode() { return AI_MODE; }
|
|
1535
|
+
|
|
1536
|
+
/** Get the configured Ollama model. */
|
|
1537
|
+
export function getOllamaModel() { return getStartModels().ollama; }
|
|
1538
|
+
|
|
1539
|
+
// ─── Cross-AI Fallback Pipeline ───────────────────────────────────────────────
|
|
1540
|
+
//
|
|
1541
|
+
// Provider order per stage is driven by `models.stageProviders` in config:
|
|
1542
|
+
//
|
|
1543
|
+
// Stage: pm / architect / review / deploy (Copilot's job)
|
|
1544
|
+
// Copilot (sonnet-4.6) → Gemini (3.1 Flash Lite → 2.5 Pro) → Ollama
|
|
1545
|
+
//
|
|
1546
|
+
// Stage: implement (Claude Code's job)
|
|
1547
|
+
// Claude → Copilot → Ollama
|
|
1548
|
+
//
|
|
1549
|
+
// Chat / user conversations:
|
|
1550
|
+
// Copilot (haiku-4.5 — cheap & fast) → Gemini → Ollama
|
|
1551
|
+
//
|
|
1552
|
+
// At each fallback transition, a bus event is emitted so the Telegram bot can
|
|
1553
|
+
// notify the user in real time.
|
|
1554
|
+
|
|
1555
|
+
// Copilot model for cross-AI fallback (read from config at call time)
|
|
1556
|
+
// Configure via aicc.config.js → models.copilotCrossAiModel
|
|
1557
|
+
|
|
1558
|
+
// Opus 4.6 model identifier (used when user explicitly confirms premium usage)
|
|
1559
|
+
export const OPUS_MODEL = 'claude-opus-4.6';
|
|
1560
|
+
|
|
1561
|
+
/**
|
|
1562
|
+
* Emit a rate-limit event so Telegram + web dashboard can notify the user.
|
|
1563
|
+
*/
|
|
1564
|
+
function emitLimitEvent(agent, limitType, model, fallbackAgent) {
|
|
1565
|
+
const data = { agent, limitType, model, fallbackAgent, timestamp: new Date().toISOString() };
|
|
1566
|
+
logActivityLine('PIPELINE', `${agent} ${limitType} on ${model} → falling back to ${fallbackAgent}`);
|
|
1567
|
+
bus.emitEvent('ai_rate_limit', data);
|
|
1568
|
+
}
|
|
1569
|
+
|
|
1570
|
+
/**
|
|
1571
|
+
* Run a pipeline stage with full cross-AI fallback.
|
|
1572
|
+
*
|
|
1573
|
+
* This is the top-level function that new-feature.js and review.js should call
|
|
1574
|
+
* instead of runGemini / runClaude / runCopilot directly, for stages where
|
|
1575
|
+
* rate limits are expected.
|
|
1576
|
+
*
|
|
1577
|
+
* @param {string} stage - 'architect' | 'pm' | 'review' | 'implement'
|
|
1578
|
+
* @param {string} prompt - Full prompt text
|
|
1579
|
+
* @param {object} options
|
|
1580
|
+
* outputFile {string} - Required for Gemini-primary tasks (pm, review)
|
|
1581
|
+
* featureId {string} - For session tracking (Claude, Copilot)
|
|
1582
|
+
*
|
|
1583
|
+
* @returns {*} Result from whichever AI succeeded
|
|
1584
|
+
*/
|
|
1585
|
+
export async function runPipelineStage(stage, prompt, options = {}) {
|
|
1586
|
+
const fallbackChain = buildFallbackChain(stage);
|
|
1587
|
+
const requiresDocument = ['pm', 'review'].includes(stage) && options.outputFile;
|
|
1588
|
+
|
|
1589
|
+
for (let i = 0; i < fallbackChain.length; i++) {
|
|
1590
|
+
const { agent, runner, label } = fallbackChain[i];
|
|
1591
|
+
const isLast = i === fallbackChain.length - 1;
|
|
1592
|
+
|
|
1593
|
+
try {
|
|
1594
|
+
if (i > 0) {
|
|
1595
|
+
const prevAgent = fallbackChain[i - 1].label;
|
|
1596
|
+
emitLimitEvent(prevAgent, 'exhausted', 'all models', label);
|
|
1597
|
+
}
|
|
1598
|
+
|
|
1599
|
+
logActivityLine('PIPELINE', `Running ${stage} stage with ${label}...`);
|
|
1600
|
+
const result = await runner(prompt, options);
|
|
1601
|
+
|
|
1602
|
+
// ── Document verification — NEVER skip document creation ──────────────
|
|
1603
|
+
if (requiresDocument) {
|
|
1604
|
+
const docExists = existsSync(options.outputFile);
|
|
1605
|
+
const docContent = docExists ? readFileSync(options.outputFile, 'utf8').trim() : '';
|
|
1606
|
+
const MIN_DOC_LENGTH = 100;
|
|
1607
|
+
|
|
1608
|
+
if (!docExists || docContent.length < MIN_DOC_LENGTH) {
|
|
1609
|
+
logActivityLine('PIPELINE', `${label} completed but document is missing or too short (${docContent.length} chars)`);
|
|
1610
|
+
logActivityLine('PIPELINE', `Document verification FAILED — every stage MUST produce its document`);
|
|
1611
|
+
|
|
1612
|
+
if (isLast) {
|
|
1613
|
+
bus.emitEvent('ai_all_exhausted', {
|
|
1614
|
+
stage,
|
|
1615
|
+
agents: fallbackChain.map(f => f.label).join(' → '),
|
|
1616
|
+
error: `All AIs completed but none produced a valid ${stage} document. Pipeline STOPPED — documents are required.`,
|
|
1617
|
+
});
|
|
1618
|
+
throw new Error(
|
|
1619
|
+
`Pipeline STOPPED: ${stage} stage requires a document but none was created.\n` +
|
|
1620
|
+
`Tried: ${fallbackChain.map(f => f.label).join(' → ')}\n` +
|
|
1621
|
+
`Documents are the audit trail — the pipeline cannot proceed without them.`
|
|
1622
|
+
);
|
|
1623
|
+
}
|
|
1624
|
+
continue;
|
|
1625
|
+
}
|
|
1626
|
+
|
|
1627
|
+
logActivityLine('PIPELINE', `Document verified: ${options.outputFile.split('/').pop()} (${docContent.length} chars)`);
|
|
1628
|
+
}
|
|
1629
|
+
|
|
1630
|
+
return result;
|
|
1631
|
+
} catch (err) {
|
|
1632
|
+
if (err instanceof AILimitError || err.name === 'AILimitError') {
|
|
1633
|
+
logActivityLine(agent, `All models exhausted: ${err.message}`);
|
|
1634
|
+
if (isLast) {
|
|
1635
|
+
bus.emitEvent('ai_all_exhausted', {
|
|
1636
|
+
stage,
|
|
1637
|
+
agents: fallbackChain.map(f => f.label).join(' → '),
|
|
1638
|
+
error: err.message,
|
|
1639
|
+
});
|
|
1640
|
+
throw err;
|
|
1641
|
+
}
|
|
1642
|
+
continue;
|
|
1643
|
+
}
|
|
1644
|
+
throw err;
|
|
1645
|
+
}
|
|
1646
|
+
}
|
|
1647
|
+
}
|
|
1648
|
+
|
|
1649
|
+
/**
|
|
1650
|
+
* Build the ordered fallback chain for a pipeline stage.
|
|
1651
|
+
* Each entry has: { agent, runner, label }
|
|
1652
|
+
*
|
|
1653
|
+
* Driven by the `pipeline[]` array in config. Each stage entry specifies
|
|
1654
|
+
* the primary provider + model and an optional fallbacks array:
|
|
1655
|
+
*
|
|
1656
|
+
* pipeline: [
|
|
1657
|
+
* { stage: 'pm', provider: 'copilot', model: 'claude-sonnet-4.6', fallbacks: ['gemini:gemini-2.5-pro', 'ollama'] },
|
|
1658
|
+
* ...
|
|
1659
|
+
* ]
|
|
1660
|
+
*/
|
|
1661
|
+
function buildFallbackChain(stage) {
|
|
1662
|
+
const entry = _getPipelineEntry(stage);
|
|
1663
|
+
const chain = [];
|
|
1664
|
+
|
|
1665
|
+
// Helper: create a chain entry for a given provider + model
|
|
1666
|
+
function addProviderEntry(provider, model) {
|
|
1667
|
+
switch (provider) {
|
|
1668
|
+
case 'copilot': {
|
|
1669
|
+
if (stage === 'implement') {
|
|
1670
|
+
chain.push({
|
|
1671
|
+
agent: 'COPILOT', label: `Copilot (${model})`,
|
|
1672
|
+
runner: (prompt, opts) => runCopilot(prompt, opts),
|
|
1673
|
+
});
|
|
1674
|
+
} else {
|
|
1675
|
+
chain.push({
|
|
1676
|
+
agent: 'COPILOT', label: `Copilot (${model})`,
|
|
1677
|
+
runner: async (prompt, opts) => {
|
|
1678
|
+
syncCopilotConfigModel(model);
|
|
1679
|
+
const tmpFile = resolve(tmpdir(), `aicc-fallback-${Date.now()}.txt`);
|
|
1680
|
+
writeFileSync(tmpFile, prompt, 'utf8');
|
|
1681
|
+
const result = await capture(
|
|
1682
|
+
`cat "${tmpFile}" | copilot -p --model ${model} --yolo`,
|
|
1683
|
+
[], { shell: true, timeout: DEFAULT_TIMEOUTS.copilot }
|
|
1684
|
+
);
|
|
1685
|
+
try { unlinkSync(tmpFile); } catch { /* */ }
|
|
1686
|
+
if (opts.outputFile && result.stdout.trim()) {
|
|
1687
|
+
writeFileSync(opts.outputFile, result.stdout);
|
|
1688
|
+
}
|
|
1689
|
+
const { type } = detectLimit(result.stdout + '\n' + result.stderr);
|
|
1690
|
+
if (type) throw new AILimitError('COPILOT', result.stdout + '\n' + result.stderr);
|
|
1691
|
+
return result.code;
|
|
1692
|
+
},
|
|
1693
|
+
});
|
|
1694
|
+
}
|
|
1695
|
+
break;
|
|
1696
|
+
}
|
|
1697
|
+
case 'claude':
|
|
1698
|
+
chain.push({
|
|
1699
|
+
agent: 'CLAUDE', label: `Claude (${model})`,
|
|
1700
|
+
runner: (prompt, opts) => runClaude(prompt, opts),
|
|
1701
|
+
});
|
|
1702
|
+
break;
|
|
1703
|
+
case 'gemini':
|
|
1704
|
+
chain.push({
|
|
1705
|
+
agent: 'GEMINI', label: `Gemini (${model})`,
|
|
1706
|
+
runner: (prompt, opts) => runGemini(prompt, opts.outputFile, { modelChain: [model, ...getConfiguredChains().gemini.filter(m => m !== model)] }),
|
|
1707
|
+
});
|
|
1708
|
+
break;
|
|
1709
|
+
case 'openclaw':
|
|
1710
|
+
chain.push({
|
|
1711
|
+
agent: 'OPENCLAW', label: `OpenClaw (${model})`,
|
|
1712
|
+
runner: async (prompt, opts) => {
|
|
1713
|
+
if (_openclawAvailable === null) _openclawAvailable = await isAvailable('openclaw');
|
|
1714
|
+
if (!_openclawAvailable) throw new AILimitError('OPENCLAW', 'OpenClaw CLI not available');
|
|
1715
|
+
const sessionId = getOpenclawSessionId();
|
|
1716
|
+
const fullPrompt = opts.system ? `${opts.system}\n\n${prompt}` : prompt;
|
|
1717
|
+
const result = await capture(
|
|
1718
|
+
'openclaw', ['agent', '--session-id', sessionId, '-m', fullPrompt, '--json'],
|
|
1719
|
+
{ timeout: 60000, shell: false }
|
|
1720
|
+
);
|
|
1721
|
+
if (result.code === 0 && result.stdout) {
|
|
1722
|
+
const jsonStart = result.stdout.indexOf('{');
|
|
1723
|
+
if (jsonStart >= 0) {
|
|
1724
|
+
const parsed = JSON.parse(result.stdout.slice(jsonStart));
|
|
1725
|
+
const text = parsed?.result?.payloads?.[0]?.text?.trim();
|
|
1726
|
+
if (text) {
|
|
1727
|
+
logActivityLine('OPENCLAW', `Response: ${text.length} chars`);
|
|
1728
|
+
trackUsage({ provider: 'openclaw', model: parsed?.result?.meta?.agentMeta?.model || model, featureId: opts.featureId, promptText: prompt, responseText: text, durationMs: 0 });
|
|
1729
|
+
if (opts.outputFile) writeFileSync(opts.outputFile, text);
|
|
1730
|
+
return text;
|
|
1731
|
+
}
|
|
1732
|
+
}
|
|
1733
|
+
}
|
|
1734
|
+
throw new AILimitError('OPENCLAW', 'OpenClaw returned empty response');
|
|
1735
|
+
},
|
|
1736
|
+
});
|
|
1737
|
+
break;
|
|
1738
|
+
case 'ollama':
|
|
1739
|
+
chain.push(ollamaFallbackEntry(stage));
|
|
1740
|
+
break;
|
|
1741
|
+
default:
|
|
1742
|
+
logActivityLine('SYSTEM', `Unknown provider "${provider}" in pipeline — skipped`);
|
|
1743
|
+
break;
|
|
1744
|
+
}
|
|
1745
|
+
}
|
|
1746
|
+
|
|
1747
|
+
// Primary provider + model from pipeline entry
|
|
1748
|
+
addProviderEntry(entry.provider, entry.model);
|
|
1749
|
+
|
|
1750
|
+
// Fallback entries
|
|
1751
|
+
for (const fb of (entry.fallbacks || [])) {
|
|
1752
|
+
if (fb.includes(':')) {
|
|
1753
|
+
const [prov, mod] = fb.split(':', 2);
|
|
1754
|
+
addProviderEntry(prov, mod);
|
|
1755
|
+
} else {
|
|
1756
|
+
// Just provider name, no model — use defaults
|
|
1757
|
+
const defaultModel = getStartModels()[fb] || fb;
|
|
1758
|
+
addProviderEntry(fb, defaultModel);
|
|
1759
|
+
}
|
|
1760
|
+
}
|
|
1761
|
+
|
|
1762
|
+
return chain;
|
|
1763
|
+
}
|
|
1764
|
+
|
|
1765
|
+
/**
|
|
1766
|
+
* Ollama last-resort fallback.
|
|
1767
|
+
* 3-second timeout — if it can't respond in 3s, just return an error message.
|
|
1768
|
+
*/
|
|
1769
|
+
function ollamaFallbackEntry(stage) {
|
|
1770
|
+
return {
|
|
1771
|
+
agent: 'OLLAMA', label: 'Ollama (last resort)',
|
|
1772
|
+
runner: async (prompt, opts) => {
|
|
1773
|
+
const ollamaStatus = await isOllamaAvailable();
|
|
1774
|
+
if (!ollamaStatus.available) {
|
|
1775
|
+
const errorMsg = `All cloud AIs are rate-limited for the "${stage}" stage. ` +
|
|
1776
|
+
`Ollama is not running. Please wait and retry, or switch AI models.`;
|
|
1777
|
+
logActivityLine('OLLAMA', errorMsg);
|
|
1778
|
+
if (opts.outputFile) writeFileSync(opts.outputFile, `# Error\n\n${errorMsg}\n`);
|
|
1779
|
+
bus.emitEvent('ai_all_exhausted', { stage, error: errorMsg });
|
|
1780
|
+
throw new AILimitError('OLLAMA', errorMsg);
|
|
1781
|
+
}
|
|
1782
|
+
|
|
1783
|
+
try {
|
|
1784
|
+
const response = await runOllama(
|
|
1785
|
+
`You are a fallback AI. The primary AIs (Gemini, Claude, Copilot) are all rate-limited.\n\n` +
|
|
1786
|
+
`Summarize what was requested and inform the user to retry later:\n\n` +
|
|
1787
|
+
prompt.slice(0, 2000),
|
|
1788
|
+
{ timeout: 3000 }
|
|
1789
|
+
);
|
|
1790
|
+
|
|
1791
|
+
if (opts.outputFile) {
|
|
1792
|
+
writeFileSync(opts.outputFile,
|
|
1793
|
+
`# Fallback Response (Ollama)\n\n` +
|
|
1794
|
+
`> All cloud AIs were rate-limited. This is a local AI fallback summary.\n\n` +
|
|
1795
|
+
response
|
|
1796
|
+
);
|
|
1797
|
+
}
|
|
1798
|
+
return 0;
|
|
1799
|
+
} catch (err) {
|
|
1800
|
+
const errorMsg = `All AIs exhausted for "${stage}" stage. Ollama timed out (3s limit). ` +
|
|
1801
|
+
`Please wait for rate limits to reset and retry.`;
|
|
1802
|
+
logActivityLine('OLLAMA', errorMsg);
|
|
1803
|
+
if (opts.outputFile) writeFileSync(opts.outputFile, `# Error\n\n${errorMsg}\n`);
|
|
1804
|
+
bus.emitEvent('ai_all_exhausted', { stage, error: errorMsg });
|
|
1805
|
+
throw new AILimitError('OLLAMA', errorMsg);
|
|
1806
|
+
}
|
|
1807
|
+
},
|
|
1808
|
+
};
|
|
1809
|
+
}
|