@workermill/agent 0.3.0 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/plan-validator.d.ts +3 -1
- package/dist/plan-validator.js +15 -3
- package/dist/planner.d.ts +2 -1
- package/dist/planner.js +44 -9
- package/dist/poller.js +5 -3
- package/dist/providers.d.ts +18 -0
- package/dist/providers.js +118 -0
- package/dist/spawner.d.ts +5 -0
- package/dist/spawner.js +7 -2
- package/package.json +1 -1
package/dist/plan-validator.d.ts
CHANGED
|
@@ -9,6 +9,7 @@
|
|
|
9
9
|
* This ensures remote agent plans get the same quality gates as cloud plans,
|
|
10
10
|
* even though the planning prompt runs locally via Claude CLI.
|
|
11
11
|
*/
|
|
12
|
+
import { type AIProvider } from "./providers.js";
|
|
12
13
|
export interface PlannedStory {
|
|
13
14
|
id: string;
|
|
14
15
|
title: string;
|
|
@@ -76,7 +77,8 @@ export declare function runCriticCli(claudePath: string, model: string, prompt:
|
|
|
76
77
|
export declare function formatCriticFeedback(critic: CriticResult): string;
|
|
77
78
|
/**
|
|
78
79
|
* Run critic validation on a parsed plan.
|
|
80
|
+
* Routes to Claude CLI (Anthropic) or HTTP API (other providers).
|
|
79
81
|
* Returns the critic result, or null if critic fails (non-blocking).
|
|
80
82
|
*/
|
|
81
|
-
export declare function runCriticValidation(claudePath: string, model: string, prd: string, plan: ExecutionPlan, env: Record<string, string | undefined>, taskLabel: string): Promise<CriticResult | null>;
|
|
83
|
+
export declare function runCriticValidation(claudePath: string, model: string, prd: string, plan: ExecutionPlan, env: Record<string, string | undefined>, taskLabel: string, provider?: AIProvider, providerApiKey?: string): Promise<CriticResult | null>;
|
|
82
84
|
export { AUTO_APPROVAL_THRESHOLD };
|
package/dist/plan-validator.js
CHANGED
|
@@ -11,6 +11,7 @@
|
|
|
11
11
|
*/
|
|
12
12
|
import { spawn } from "child_process";
|
|
13
13
|
import chalk from "chalk";
|
|
14
|
+
import { generateText } from "./providers.js";
|
|
14
15
|
// ============================================================================
|
|
15
16
|
// CONSTANTS
|
|
16
17
|
// ============================================================================
|
|
@@ -245,13 +246,24 @@ function ts() {
|
|
|
245
246
|
}
|
|
246
247
|
/**
|
|
247
248
|
* Run critic validation on a parsed plan.
|
|
249
|
+
* Routes to Claude CLI (Anthropic) or HTTP API (other providers).
|
|
248
250
|
* Returns the critic result, or null if critic fails (non-blocking).
|
|
249
251
|
*/
|
|
250
|
-
export async function runCriticValidation(claudePath, model, prd, plan, env, taskLabel) {
|
|
252
|
+
export async function runCriticValidation(claudePath, model, prd, plan, env, taskLabel, provider, providerApiKey) {
|
|
251
253
|
const criticPrompt = buildCriticPrompt(prd, plan);
|
|
252
|
-
|
|
254
|
+
const effectiveProvider = provider || "anthropic";
|
|
255
|
+
console.log(`${ts()} ${taskLabel} ${chalk.dim(`Running critic validation (${effectiveProvider})...`)}`);
|
|
253
256
|
try {
|
|
254
|
-
|
|
257
|
+
let rawCriticOutput;
|
|
258
|
+
if (effectiveProvider === "anthropic") {
|
|
259
|
+
rawCriticOutput = await runCriticCli(claudePath, model, criticPrompt, env);
|
|
260
|
+
}
|
|
261
|
+
else {
|
|
262
|
+
if (!providerApiKey) {
|
|
263
|
+
throw new Error(`No API key for critic provider "${effectiveProvider}"`);
|
|
264
|
+
}
|
|
265
|
+
rawCriticOutput = await generateText(effectiveProvider, model, criticPrompt, providerApiKey, { maxTokens: 4096, temperature: 0.3, timeoutMs: 180_000 });
|
|
266
|
+
}
|
|
255
267
|
const result = parseCriticResponse(rawCriticOutput);
|
|
256
268
|
const statusIcon = result.score >= AUTO_APPROVAL_THRESHOLD
|
|
257
269
|
? chalk.green("✓")
|
package/dist/planner.d.ts
CHANGED
|
@@ -15,6 +15,7 @@
|
|
|
15
15
|
* sees the same planning progress as cloud mode.
|
|
16
16
|
*/
|
|
17
17
|
import { type AgentConfig } from "./config.js";
|
|
18
|
+
import type { ClaimCredentials } from "./spawner.js";
|
|
18
19
|
export interface PlanningTask {
|
|
19
20
|
id: string;
|
|
20
21
|
summary: string;
|
|
@@ -32,4 +33,4 @@ export interface PlanningTask {
|
|
|
32
33
|
* 6. If critic rejects: re-run planner with feedback (up to MAX_ITERATIONS)
|
|
33
34
|
* 7. After MAX_ITERATIONS without approval: fail the task
|
|
34
35
|
*/
|
|
35
|
-
export declare function planTask(task: PlanningTask, config: AgentConfig): Promise<boolean>;
|
|
36
|
+
export declare function planTask(task: PlanningTask, config: AgentConfig, credentials?: ClaimCredentials): Promise<boolean>;
|
package/dist/planner.js
CHANGED
|
@@ -19,6 +19,7 @@ import { spawn } from "child_process";
|
|
|
19
19
|
import { findClaudePath } from "./config.js";
|
|
20
20
|
import { api } from "./api.js";
|
|
21
21
|
import { parseExecutionPlan, applyFileCap, serializePlan, runCriticValidation, formatCriticFeedback, AUTO_APPROVAL_THRESHOLD, } from "./plan-validator.js";
|
|
22
|
+
import { generateText } from "./providers.js";
|
|
22
23
|
/** Max Planner-Critic iterations before giving up */
|
|
23
24
|
const MAX_ITERATIONS = 3;
|
|
24
25
|
/** Timestamp prefix */
|
|
@@ -228,6 +229,26 @@ function runClaudeCli(claudePath, model, prompt, env, taskId, startTime) {
|
|
|
228
229
|
});
|
|
229
230
|
});
|
|
230
231
|
}
|
|
232
|
+
/**
|
|
233
|
+
* Resolve the API key for a given provider from claim credentials.
|
|
234
|
+
* For Ollama, returns the base URL instead of an API key.
|
|
235
|
+
*/
|
|
236
|
+
function resolveProviderApiKey(provider, credentials) {
|
|
237
|
+
if (!credentials)
|
|
238
|
+
return undefined;
|
|
239
|
+
switch (provider) {
|
|
240
|
+
case "anthropic":
|
|
241
|
+
return credentials.anthropicApiKey;
|
|
242
|
+
case "openai":
|
|
243
|
+
return credentials.openaiApiKey;
|
|
244
|
+
case "google":
|
|
245
|
+
return credentials.googleApiKey;
|
|
246
|
+
case "ollama":
|
|
247
|
+
return credentials.ollamaBaseUrl || "http://localhost:11434";
|
|
248
|
+
default:
|
|
249
|
+
return undefined;
|
|
250
|
+
}
|
|
251
|
+
}
|
|
231
252
|
/**
|
|
232
253
|
* Run planning for a task with Planner-Critic validation loop.
|
|
233
254
|
*
|
|
@@ -240,7 +261,7 @@ function runClaudeCli(claudePath, model, prompt, env, taskId, startTime) {
|
|
|
240
261
|
* 6. If critic rejects: re-run planner with feedback (up to MAX_ITERATIONS)
|
|
241
262
|
* 7. After MAX_ITERATIONS without approval: fail the task
|
|
242
263
|
*/
|
|
243
|
-
export async function planTask(task, config) {
|
|
264
|
+
export async function planTask(task, config, credentials) {
|
|
244
265
|
const taskLabel = chalk.cyan(task.id.slice(0, 8));
|
|
245
266
|
console.log(`${ts()} ${taskLabel} Fetching planning prompt...`);
|
|
246
267
|
await postLog(task.id, `${PREFIX} Fetching planning prompt from cloud API...`);
|
|
@@ -248,11 +269,15 @@ export async function planTask(task, config) {
|
|
|
248
269
|
const promptResponse = await api.get("/api/agent/planning-prompt", {
|
|
249
270
|
params: { taskId: task.id },
|
|
250
271
|
});
|
|
251
|
-
const { prompt: basePrompt, model } = promptResponse.data;
|
|
272
|
+
const { prompt: basePrompt, model, provider: planningProvider } = promptResponse.data;
|
|
252
273
|
const cliModel = model || "sonnet";
|
|
274
|
+
const provider = (planningProvider || "anthropic");
|
|
275
|
+
const isAnthropicPlanning = provider === "anthropic";
|
|
253
276
|
const claudePath = process.env.CLAUDE_CLI_PATH || findClaudePath() || "claude";
|
|
254
277
|
const cleanEnv = { ...process.env };
|
|
255
278
|
delete cleanEnv.CLAUDE_CODE_OAUTH_TOKEN;
|
|
279
|
+
// Resolve provider API key for non-Anthropic planning
|
|
280
|
+
const providerApiKey = resolveProviderApiKey(provider, credentials);
|
|
256
281
|
const startTime = Date.now();
|
|
257
282
|
// PRD for critic validation: use task description, fall back to summary
|
|
258
283
|
const prd = task.description || task.summary;
|
|
@@ -265,18 +290,28 @@ export async function planTask(task, config) {
|
|
|
265
290
|
let totalFileCapTruncations = 0;
|
|
266
291
|
for (let iteration = 1; iteration <= MAX_ITERATIONS; iteration++) {
|
|
267
292
|
const iterLabel = MAX_ITERATIONS > 1 ? ` (attempt ${iteration}/${MAX_ITERATIONS})` : "";
|
|
293
|
+
const providerLabel = `${provider}/${cliModel}`;
|
|
268
294
|
if (iteration > 1) {
|
|
269
|
-
console.log(`${ts()} ${taskLabel} Running
|
|
270
|
-
await postLog(task.id, `${PREFIX} Re-planning${iterLabel} using
|
|
295
|
+
console.log(`${ts()} ${taskLabel} Running planner${iterLabel} ${chalk.dim(`(${chalk.yellow(providerLabel)})`)}`);
|
|
296
|
+
await postLog(task.id, `${PREFIX} Re-planning${iterLabel} using ${providerLabel}`);
|
|
271
297
|
}
|
|
272
298
|
else {
|
|
273
|
-
console.log(`${ts()} ${taskLabel} Running
|
|
274
|
-
await postLog(task.id, `${PREFIX} Starting planning agent using
|
|
299
|
+
console.log(`${ts()} ${taskLabel} Running planner ${chalk.dim(`(${chalk.yellow(providerLabel)})`)}`);
|
|
300
|
+
await postLog(task.id, `${PREFIX} Starting planning agent using ${providerLabel}`);
|
|
275
301
|
}
|
|
276
|
-
// 2a.
|
|
302
|
+
// 2a. Generate plan via Claude CLI (Anthropic) or HTTP API (other providers)
|
|
277
303
|
let rawOutput;
|
|
278
304
|
try {
|
|
279
|
-
|
|
305
|
+
if (isAnthropicPlanning) {
|
|
306
|
+
rawOutput = await runClaudeCli(claudePath, cliModel, currentPrompt, cleanEnv, task.id, startTime);
|
|
307
|
+
}
|
|
308
|
+
else {
|
|
309
|
+
if (!providerApiKey) {
|
|
310
|
+
throw new Error(`No API key available for provider "${provider}". Configure it in Settings > Integrations.`);
|
|
311
|
+
}
|
|
312
|
+
await postProgress(task.id, "generating_plan", 0, "Generating plan via API...", 0, 0);
|
|
313
|
+
rawOutput = await generateText(provider, cliModel, currentPrompt, providerApiKey);
|
|
314
|
+
}
|
|
280
315
|
}
|
|
281
316
|
catch (error) {
|
|
282
317
|
const elapsed = Math.round((Date.now() - startTime) / 1000);
|
|
@@ -313,7 +348,7 @@ export async function planTask(task, config) {
|
|
|
313
348
|
console.log(`${ts()} ${taskLabel} Plan: ${chalk.bold(plan.stories.length)} stories`);
|
|
314
349
|
await postLog(task.id, `${PREFIX} Plan generated: ${plan.stories.length} stories (${formatElapsed(elapsed)}). Running critic validation...`);
|
|
315
350
|
// 2d. Run critic validation
|
|
316
|
-
const criticResult = await runCriticValidation(claudePath, cliModel, prd, plan, cleanEnv, taskLabel);
|
|
351
|
+
const criticResult = await runCriticValidation(claudePath, cliModel, prd, plan, cleanEnv, taskLabel, provider, providerApiKey);
|
|
317
352
|
// Track best plan across iterations
|
|
318
353
|
if (criticResult && criticResult.score > bestScore) {
|
|
319
354
|
bestPlan = plan;
|
package/dist/poller.js
CHANGED
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
import chalk from "chalk";
|
|
8
8
|
import { api } from "./api.js";
|
|
9
9
|
import { planTask } from "./planner.js";
|
|
10
|
-
import { spawnWorker, getActiveCount, getActiveTaskIds, stopTask } from "./spawner.js";
|
|
10
|
+
import { spawnWorker, getActiveCount, getActiveTaskIds, stopTask, } from "./spawner.js";
|
|
11
11
|
import { AGENT_VERSION } from "./version.js";
|
|
12
12
|
import { selfUpdate, restartAgent } from "./updater.js";
|
|
13
13
|
// Track tasks currently being planned (to avoid double-dispatching)
|
|
@@ -76,7 +76,8 @@ async function pollOnce(config) {
|
|
|
76
76
|
* Handle a task in "planning" status.
|
|
77
77
|
*/
|
|
78
78
|
async function handlePlanningTask(task, config) {
|
|
79
|
-
// Claim the task
|
|
79
|
+
// Claim the task (also returns org credentials for provider API keys)
|
|
80
|
+
let credentials;
|
|
80
81
|
try {
|
|
81
82
|
const claimResponse = await api.post("/api/agent/claim", {
|
|
82
83
|
taskId: task.id,
|
|
@@ -85,6 +86,7 @@ async function handlePlanningTask(task, config) {
|
|
|
85
86
|
if (!claimResponse.data.claimed) {
|
|
86
87
|
return; // Another agent or cloud orchestrator claimed it
|
|
87
88
|
}
|
|
89
|
+
credentials = claimResponse.data.credentials;
|
|
88
90
|
}
|
|
89
91
|
catch {
|
|
90
92
|
return;
|
|
@@ -94,7 +96,7 @@ async function handlePlanningTask(task, config) {
|
|
|
94
96
|
console.log(`${ts()} ${chalk.magenta("◆ PLANNING")} ${taskLabel} ${task.summary.substring(0, 60)}`);
|
|
95
97
|
planningInProgress.add(task.id);
|
|
96
98
|
// Run planning asynchronously (don't block the poll loop)
|
|
97
|
-
planTask(task, config)
|
|
99
|
+
planTask(task, config, credentials)
|
|
98
100
|
.then((success) => {
|
|
99
101
|
if (success) {
|
|
100
102
|
console.log(`${ts()} ${chalk.green("✓")} Planning complete for ${taskLabel}`);
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Lightweight HTTP wrappers for text generation across AI providers.
|
|
3
|
+
*
|
|
4
|
+
* Used for planning and critic stages when the org's provider is not Anthropic.
|
|
5
|
+
* No heavy SDK dependencies — uses the existing axios dependency for HTTP calls.
|
|
6
|
+
* Streaming is not needed (planning/critic are text-only, result matters).
|
|
7
|
+
*/
|
|
8
|
+
export type AIProvider = "anthropic" | "openai" | "google" | "ollama";
|
|
9
|
+
export interface GenerateTextOptions {
|
|
10
|
+
maxTokens?: number;
|
|
11
|
+
temperature?: number;
|
|
12
|
+
timeoutMs?: number;
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Generate text from any supported AI provider via direct HTTP API calls.
|
|
16
|
+
* Returns the raw text response.
|
|
17
|
+
*/
|
|
18
|
+
export declare function generateText(provider: AIProvider, model: string, prompt: string, apiKey: string, options?: GenerateTextOptions): Promise<string>;
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Lightweight HTTP wrappers for text generation across AI providers.
|
|
3
|
+
*
|
|
4
|
+
* Used for planning and critic stages when the org's provider is not Anthropic.
|
|
5
|
+
* No heavy SDK dependencies — uses the existing axios dependency for HTTP calls.
|
|
6
|
+
* Streaming is not needed (planning/critic are text-only, result matters).
|
|
7
|
+
*/
|
|
8
|
+
import axios from "axios";
|
|
9
|
+
const DEFAULT_MAX_TOKENS = 16384;
|
|
10
|
+
const DEFAULT_TIMEOUT_MS = 600_000; // 10 minutes (matches Claude CLI timeout)
|
|
11
|
+
/**
|
|
12
|
+
* Generate text from any supported AI provider via direct HTTP API calls.
|
|
13
|
+
* Returns the raw text response.
|
|
14
|
+
*/
|
|
15
|
+
export async function generateText(provider, model, prompt, apiKey, options) {
|
|
16
|
+
const maxTokens = options?.maxTokens ?? DEFAULT_MAX_TOKENS;
|
|
17
|
+
const temperature = options?.temperature ?? 0.7;
|
|
18
|
+
const timeoutMs = options?.timeoutMs ?? DEFAULT_TIMEOUT_MS;
|
|
19
|
+
switch (provider) {
|
|
20
|
+
case "anthropic":
|
|
21
|
+
return generateAnthropic(model, prompt, apiKey, maxTokens, temperature, timeoutMs);
|
|
22
|
+
case "openai":
|
|
23
|
+
return generateOpenAI(model, prompt, apiKey, maxTokens, temperature, timeoutMs);
|
|
24
|
+
case "google":
|
|
25
|
+
return generateGoogle(model, prompt, apiKey, maxTokens, temperature, timeoutMs);
|
|
26
|
+
case "ollama":
|
|
27
|
+
return generateOllama(model, prompt, apiKey, maxTokens, temperature, timeoutMs);
|
|
28
|
+
default:
|
|
29
|
+
throw new Error(`Unsupported AI provider: ${provider}`);
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Anthropic Messages API (for orgs with API key, not CLI).
|
|
34
|
+
*/
|
|
35
|
+
async function generateAnthropic(model, prompt, apiKey, maxTokens, temperature, timeoutMs) {
|
|
36
|
+
const response = await axios.post("https://api.anthropic.com/v1/messages", {
|
|
37
|
+
model,
|
|
38
|
+
max_tokens: maxTokens,
|
|
39
|
+
temperature,
|
|
40
|
+
messages: [{ role: "user", content: prompt }],
|
|
41
|
+
}, {
|
|
42
|
+
headers: {
|
|
43
|
+
"x-api-key": apiKey,
|
|
44
|
+
"anthropic-version": "2023-06-01",
|
|
45
|
+
"Content-Type": "application/json",
|
|
46
|
+
},
|
|
47
|
+
timeout: timeoutMs,
|
|
48
|
+
});
|
|
49
|
+
const content = response.data?.content;
|
|
50
|
+
if (Array.isArray(content)) {
|
|
51
|
+
return content
|
|
52
|
+
.filter((block) => block.type === "text")
|
|
53
|
+
.map((block) => block.text)
|
|
54
|
+
.join("");
|
|
55
|
+
}
|
|
56
|
+
throw new Error("Unexpected Anthropic API response format");
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* OpenAI Chat Completions API.
|
|
60
|
+
*/
|
|
61
|
+
async function generateOpenAI(model, prompt, apiKey, maxTokens, temperature, timeoutMs) {
|
|
62
|
+
const response = await axios.post("https://api.openai.com/v1/chat/completions", {
|
|
63
|
+
model,
|
|
64
|
+
max_tokens: maxTokens,
|
|
65
|
+
temperature,
|
|
66
|
+
messages: [{ role: "user", content: prompt }],
|
|
67
|
+
}, {
|
|
68
|
+
headers: {
|
|
69
|
+
Authorization: `Bearer ${apiKey}`,
|
|
70
|
+
"Content-Type": "application/json",
|
|
71
|
+
},
|
|
72
|
+
timeout: timeoutMs,
|
|
73
|
+
});
|
|
74
|
+
const message = response.data?.choices?.[0]?.message;
|
|
75
|
+
if (message?.content)
|
|
76
|
+
return message.content;
|
|
77
|
+
throw new Error("Unexpected OpenAI API response format");
|
|
78
|
+
}
|
|
79
|
+
/**
|
|
80
|
+
* Google Generative AI (Gemini) API.
|
|
81
|
+
*/
|
|
82
|
+
async function generateGoogle(model, prompt, apiKey, maxTokens, temperature, timeoutMs) {
|
|
83
|
+
const response = await axios.post(`https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`, {
|
|
84
|
+
contents: [{ parts: [{ text: prompt }] }],
|
|
85
|
+
generationConfig: {
|
|
86
|
+
maxOutputTokens: maxTokens,
|
|
87
|
+
temperature,
|
|
88
|
+
},
|
|
89
|
+
}, {
|
|
90
|
+
headers: { "Content-Type": "application/json" },
|
|
91
|
+
timeout: timeoutMs,
|
|
92
|
+
});
|
|
93
|
+
const candidate = response.data?.candidates?.[0];
|
|
94
|
+
const text = candidate?.content?.parts?.[0]?.text;
|
|
95
|
+
if (text)
|
|
96
|
+
return text;
|
|
97
|
+
throw new Error("Unexpected Google AI API response format");
|
|
98
|
+
}
|
|
99
|
+
/**
|
|
100
|
+
* Ollama Generate API (self-hosted).
|
|
101
|
+
* `apiKey` is used as the Ollama base URL (e.g. "http://localhost:11434").
|
|
102
|
+
*/
|
|
103
|
+
async function generateOllama(model, prompt, ollamaHost, _maxTokens, temperature, timeoutMs) {
|
|
104
|
+
const baseUrl = ollamaHost || "http://localhost:11434";
|
|
105
|
+
const response = await axios.post(`${baseUrl}/api/generate`, {
|
|
106
|
+
model,
|
|
107
|
+
prompt,
|
|
108
|
+
stream: false,
|
|
109
|
+
options: { temperature },
|
|
110
|
+
}, {
|
|
111
|
+
headers: { "Content-Type": "application/json" },
|
|
112
|
+
timeout: timeoutMs,
|
|
113
|
+
});
|
|
114
|
+
const text = response.data?.response;
|
|
115
|
+
if (text)
|
|
116
|
+
return text;
|
|
117
|
+
throw new Error("Unexpected Ollama API response format");
|
|
118
|
+
}
|
package/dist/spawner.d.ts
CHANGED
|
@@ -14,6 +14,7 @@ export interface SpawnableTask {
|
|
|
14
14
|
description: string | null;
|
|
15
15
|
jiraIssueKey: string | null;
|
|
16
16
|
workerModel: string;
|
|
17
|
+
workerProvider?: string;
|
|
17
18
|
githubRepo: string;
|
|
18
19
|
scmProvider: string;
|
|
19
20
|
skipManagerReview?: boolean;
|
|
@@ -34,6 +35,10 @@ export interface ClaimCredentials {
|
|
|
34
35
|
customerAwsRegion?: string;
|
|
35
36
|
issueTrackerProvider?: string;
|
|
36
37
|
bitbucketEmail?: string;
|
|
38
|
+
anthropicApiKey?: string;
|
|
39
|
+
openaiApiKey?: string;
|
|
40
|
+
googleApiKey?: string;
|
|
41
|
+
ollamaBaseUrl?: string;
|
|
37
42
|
}
|
|
38
43
|
/**
|
|
39
44
|
* Spawn a Docker worker container for a task.
|
package/dist/spawner.js
CHANGED
|
@@ -193,8 +193,13 @@ export async function spawnWorker(task, config, orgConfig, credentials) {
|
|
|
193
193
|
BITBUCKET_EMAIL: credentials?.bitbucketEmail || "",
|
|
194
194
|
// Task notes from dashboard
|
|
195
195
|
TASK_NOTES: task.taskNotes || "",
|
|
196
|
-
//
|
|
197
|
-
ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY || "",
|
|
196
|
+
// AI provider configuration
|
|
197
|
+
ANTHROPIC_API_KEY: credentials?.anthropicApiKey || process.env.ANTHROPIC_API_KEY || "",
|
|
198
|
+
WORKER_PROVIDER: task.workerProvider || "anthropic",
|
|
199
|
+
OPENAI_API_KEY: credentials?.openaiApiKey || "",
|
|
200
|
+
GOOGLE_API_KEY: credentials?.googleApiKey || "",
|
|
201
|
+
GOOGLE_GENERATIVE_AI_API_KEY: credentials?.googleApiKey || "",
|
|
202
|
+
OLLAMA_HOST: credentials?.ollamaBaseUrl || "",
|
|
198
203
|
// Resilience settings from org config
|
|
199
204
|
BLOCKER_MAX_AUTO_RETRIES: String(orgConfig.blockerMaxAutoRetries ?? 3),
|
|
200
205
|
BLOCKER_AUTO_RETRY_ENABLED: orgConfig.blockerAutoRetryEnabled !== false ? "true" : "false",
|