@malayvuong/agent-orchestra-providers 2026.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/dist/index.d.ts +249 -0
- package/dist/index.js +695 -0
- package/package.json +42 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Nguyen Minh Vuong
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
import { ProviderOutput } from '@malayvuong/agent-orchestra-core';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Input to an LLM provider adapter.
|
|
5
|
+
*
|
|
6
|
+
* Spec v1.3 §23.3 — the provider adapter receives this and returns
|
|
7
|
+
* a ProviderOutput after calling the upstream API.
|
|
8
|
+
*/
|
|
9
|
+
type ProviderInput = {
|
|
10
|
+
/** System-level instructions for the model */
|
|
11
|
+
systemPrompt: string;
|
|
12
|
+
/** User-facing prompt content */
|
|
13
|
+
userPrompt: string;
|
|
14
|
+
/** Model identifier (e.g. 'gpt-4o', 'claude-sonnet-4-20250514') */
|
|
15
|
+
model: string;
|
|
16
|
+
/** Maximum tokens to generate */
|
|
17
|
+
maxTokens?: number;
|
|
18
|
+
/** Sampling temperature (0-2) */
|
|
19
|
+
temperature?: number;
|
|
20
|
+
/** Request timeout in milliseconds */
|
|
21
|
+
timeoutMs?: number;
|
|
22
|
+
/** Abort signal for cancellation support */
|
|
23
|
+
abortSignal?: AbortSignal;
|
|
24
|
+
};
|
|
25
|
+
/**
|
|
26
|
+
* Contract for LLM provider adapters.
|
|
27
|
+
*
|
|
28
|
+
* Spec v1.3 §23.3 — each provider handles its own response format
|
|
29
|
+
* internally and returns a consistent ProviderOutput.
|
|
30
|
+
*/
|
|
31
|
+
interface AgentProvider {
|
|
32
|
+
/** Execute a prompt against the provider and return normalized output */
|
|
33
|
+
run(input: ProviderInput): Promise<ProviderOutput>;
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* Configuration for provider error responses.
|
|
37
|
+
*/
|
|
38
|
+
type ProviderErrorCode = 'rate_limit' | 'auth_error' | 'server_error' | 'timeout' | 'network_error' | 'invalid_response';
|
|
39
|
+
/**
|
|
40
|
+
* Structured error thrown by provider adapters.
|
|
41
|
+
*/
|
|
42
|
+
declare class ProviderError extends Error {
|
|
43
|
+
readonly code: ProviderErrorCode;
|
|
44
|
+
readonly statusCode?: number | undefined;
|
|
45
|
+
readonly retryable: boolean;
|
|
46
|
+
readonly retryAfterMs?: number | undefined;
|
|
47
|
+
constructor(message: string, code: ProviderErrorCode, statusCode?: number | undefined, retryable?: boolean, retryAfterMs?: number | undefined);
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Configuration options for the OpenAI provider adapter.
|
|
52
|
+
*/
|
|
53
|
+
type OpenAIProviderConfig = {
|
|
54
|
+
/** API key; defaults to process.env.OPENAI_API_KEY */
|
|
55
|
+
apiKey?: string;
|
|
56
|
+
/** Base URL for the API; defaults to 'https://api.openai.com' */
|
|
57
|
+
baseUrl?: string;
|
|
58
|
+
/** Default model to use when not specified in ProviderInput */
|
|
59
|
+
defaultModel?: string;
|
|
60
|
+
};
|
|
61
|
+
/**
|
|
62
|
+
* OpenAI-compatible provider adapter.
|
|
63
|
+
*
|
|
64
|
+
* Spec v1.3 §13.1 — covers OpenAI, Azure, and local proxies via
|
|
65
|
+
* configurable baseUrl. Uses native fetch (Node 20+), no SDK dependency.
|
|
66
|
+
*/
|
|
67
|
+
declare class OpenAIProvider implements AgentProvider {
|
|
68
|
+
private readonly apiKey;
|
|
69
|
+
private readonly baseUrl;
|
|
70
|
+
private readonly defaultModel;
|
|
71
|
+
constructor(config?: OpenAIProviderConfig);
|
|
72
|
+
/**
|
|
73
|
+
* Execute a chat completion request against the OpenAI API.
|
|
74
|
+
*
|
|
75
|
+
* @param input - The provider input containing prompts and parameters
|
|
76
|
+
* @returns ProviderOutput with rawText, usage data, and optional structured sections
|
|
77
|
+
* @throws ProviderError on rate limit (429), auth failure (401), server error (5xx), or timeout
|
|
78
|
+
*/
|
|
79
|
+
run(input: ProviderInput): Promise<ProviderOutput>;
|
|
80
|
+
/**
|
|
81
|
+
* Parse the OpenAI API response into a ProviderOutput.
|
|
82
|
+
*/
|
|
83
|
+
private parseResponse;
|
|
84
|
+
/**
|
|
85
|
+
* Handle non-OK HTTP responses from the OpenAI API.
|
|
86
|
+
*/
|
|
87
|
+
private handleErrorResponse;
|
|
88
|
+
/**
|
|
89
|
+
* Estimate cost based on model pricing and token usage.
|
|
90
|
+
*/
|
|
91
|
+
private estimateCost;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Configuration options for the Anthropic provider adapter.
|
|
96
|
+
*/
|
|
97
|
+
type AnthropicProviderConfig = {
|
|
98
|
+
/** API key; defaults to process.env.ANTHROPIC_API_KEY */
|
|
99
|
+
apiKey?: string;
|
|
100
|
+
/** Default model to use when not specified in ProviderInput */
|
|
101
|
+
defaultModel?: string;
|
|
102
|
+
};
|
|
103
|
+
/**
|
|
104
|
+
* Anthropic Messages API provider adapter.
|
|
105
|
+
*
|
|
106
|
+
* Spec v1.3 §13.1 — native Anthropic adapter for non-OpenAI-compatible
|
|
107
|
+
* features. Uses native fetch (Node 20+), no SDK dependency.
|
|
108
|
+
*/
|
|
109
|
+
declare class AnthropicProvider implements AgentProvider {
|
|
110
|
+
private readonly apiKey;
|
|
111
|
+
private readonly defaultModel;
|
|
112
|
+
constructor(config?: AnthropicProviderConfig);
|
|
113
|
+
/**
|
|
114
|
+
* Execute a message request against the Anthropic Messages API.
|
|
115
|
+
*
|
|
116
|
+
* @param input - The provider input containing prompts and parameters
|
|
117
|
+
* @returns ProviderOutput with rawText, usage data, and optional structured sections
|
|
118
|
+
* @throws ProviderError on rate limit (429), auth failure (401), server error (5xx), or timeout
|
|
119
|
+
*/
|
|
120
|
+
run(input: ProviderInput): Promise<ProviderOutput>;
|
|
121
|
+
/**
|
|
122
|
+
* Parse the Anthropic Messages API response into a ProviderOutput.
|
|
123
|
+
*/
|
|
124
|
+
private parseResponse;
|
|
125
|
+
/**
|
|
126
|
+
* Handle non-OK HTTP responses from the Anthropic API.
|
|
127
|
+
*/
|
|
128
|
+
private handleErrorResponse;
|
|
129
|
+
/**
|
|
130
|
+
* Estimate cost based on model pricing and token usage.
|
|
131
|
+
*/
|
|
132
|
+
private estimateCost;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Configuration options for the Claude CLI provider.
|
|
137
|
+
*/
|
|
138
|
+
type ClaudeCliProviderConfig = {
|
|
139
|
+
/** Path to the claude binary; defaults to 'claude' (from PATH) */
|
|
140
|
+
command?: string;
|
|
141
|
+
/** Default model; defaults to 'sonnet' */
|
|
142
|
+
defaultModel?: string;
|
|
143
|
+
};
|
|
144
|
+
/**
|
|
145
|
+
* Provider adapter that uses the Claude Code CLI (`claude`) as the LLM backend.
|
|
146
|
+
*
|
|
147
|
+
* Invokes `claude -p <prompt> --model <model> --output-format text` as a subprocess.
|
|
148
|
+
* The CLI handles its own authentication — no API key required if already logged in.
|
|
149
|
+
*
|
|
150
|
+
* Prompts are piped via stdin to avoid shell escaping issues with large content.
|
|
151
|
+
*/
|
|
152
|
+
declare class ClaudeCliProvider implements AgentProvider {
|
|
153
|
+
private readonly command;
|
|
154
|
+
private readonly defaultModel;
|
|
155
|
+
constructor(config?: ClaudeCliProviderConfig);
|
|
156
|
+
run(input: ProviderInput): Promise<ProviderOutput>;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Configuration options for the Codex CLI provider.
|
|
161
|
+
*/
|
|
162
|
+
type CodexCliProviderConfig = {
|
|
163
|
+
/** Path to the codex binary; defaults to 'codex' (from PATH) */
|
|
164
|
+
command?: string;
|
|
165
|
+
/** Default model; defaults to 'o4-mini' */
|
|
166
|
+
defaultModel?: string;
|
|
167
|
+
};
|
|
168
|
+
/**
|
|
169
|
+
* Provider adapter that uses the OpenAI Codex CLI (`codex`) as the LLM backend.
|
|
170
|
+
*
|
|
171
|
+
* Invokes `codex -p <prompt> --model <model>` as a subprocess.
|
|
172
|
+
* The CLI handles its own authentication — no API key required if already logged in.
|
|
173
|
+
*
|
|
174
|
+
* Prompts are passed via the -p flag with the full combined prompt.
|
|
175
|
+
*/
|
|
176
|
+
declare class CodexCliProvider implements AgentProvider {
|
|
177
|
+
private readonly command;
|
|
178
|
+
private readonly defaultModel;
|
|
179
|
+
constructor(config?: CodexCliProviderConfig);
|
|
180
|
+
run(input: ProviderInput): Promise<ProviderOutput>;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
/**
|
|
184
|
+
* Check whether a CLI command is available on the system PATH.
|
|
185
|
+
*/
|
|
186
|
+
declare function isCommandAvailable(command: string): Promise<boolean>;
|
|
187
|
+
/** Result of detecting available CLI providers */
|
|
188
|
+
type DetectedProviders = {
|
|
189
|
+
claudeCli: boolean;
|
|
190
|
+
codexCli: boolean;
|
|
191
|
+
preferred: 'claude-cli' | 'codex-cli' | null;
|
|
192
|
+
};
|
|
193
|
+
/**
|
|
194
|
+
* Detect which CLI-based LLM tools are available on the system.
|
|
195
|
+
* Returns the preferred provider (claude-cli first, then codex-cli).
|
|
196
|
+
*/
|
|
197
|
+
declare function detectCliProviders(): Promise<DetectedProviders>;
|
|
198
|
+
|
|
199
|
+
/**
|
|
200
|
+
* Minimal agent shape needed for provider routing.
|
|
201
|
+
* Matches AgentAssignment from core without importing it.
|
|
202
|
+
*/
|
|
203
|
+
interface AgentRef {
|
|
204
|
+
id: string;
|
|
205
|
+
providerKey: string;
|
|
206
|
+
modelOrCommand: string;
|
|
207
|
+
}
|
|
208
|
+
/**
|
|
209
|
+
* ProviderRouter dispatches provider calls to per-agent provider instances.
|
|
210
|
+
*
|
|
211
|
+
* The protocol runner calls `router.forAgent(agent).run(input)` instead of
|
|
212
|
+
* calling a single provider directly. This enables different agents to use
|
|
213
|
+
* different LLM backends (CLI tools, API providers, etc.)
|
|
214
|
+
*
|
|
215
|
+
* Backward-compatible: when constructed with only a default provider,
|
|
216
|
+
* `forAgent()` always returns that default.
|
|
217
|
+
*/
|
|
218
|
+
declare class ProviderRouter {
|
|
219
|
+
private readonly providers;
|
|
220
|
+
private readonly defaultProvider;
|
|
221
|
+
constructor(defaultProvider: AgentProvider);
|
|
222
|
+
/**
|
|
223
|
+
* Register a provider for a specific agent ID.
|
|
224
|
+
*/
|
|
225
|
+
setProvider(agentId: string, provider: AgentProvider): void;
|
|
226
|
+
/**
|
|
227
|
+
* Register a provider for agents matching a provider key.
|
|
228
|
+
* This is a convenience for config-driven setups where the agent ID
|
|
229
|
+
* isn't known yet but the provider key is.
|
|
230
|
+
*/
|
|
231
|
+
setProviderByKey(providerKey: string, provider: AgentProvider): void;
|
|
232
|
+
/**
|
|
233
|
+
* Resolve the provider for a given agent.
|
|
234
|
+
*
|
|
235
|
+
* Lookup order:
|
|
236
|
+
* 1. Exact agent ID match
|
|
237
|
+
* 2. Provider key match (via setProviderByKey)
|
|
238
|
+
* 3. Default provider
|
|
239
|
+
*/
|
|
240
|
+
forAgent(agent: AgentRef): AgentProvider;
|
|
241
|
+
/**
|
|
242
|
+
* Also implement the AgentProvider interface directly so the router
|
|
243
|
+
* can be used as a drop-in replacement for a single provider.
|
|
244
|
+
* Uses the default provider.
|
|
245
|
+
*/
|
|
246
|
+
run(input: ProviderInput): Promise<ProviderOutput>;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
export { type AgentProvider, AnthropicProvider, type AnthropicProviderConfig, ClaudeCliProvider, type ClaudeCliProviderConfig, CodexCliProvider, type CodexCliProviderConfig, type DetectedProviders, OpenAIProvider, type OpenAIProviderConfig, ProviderError, type ProviderErrorCode, type ProviderInput, ProviderRouter, detectCliProviders, isCommandAvailable };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,695 @@
|
|
|
1
|
+
// src/types.ts
|
|
2
|
+
var ProviderError = class extends Error {
|
|
3
|
+
constructor(message, code, statusCode, retryable = false, retryAfterMs) {
|
|
4
|
+
super(message);
|
|
5
|
+
this.code = code;
|
|
6
|
+
this.statusCode = statusCode;
|
|
7
|
+
this.retryable = retryable;
|
|
8
|
+
this.retryAfterMs = retryAfterMs;
|
|
9
|
+
this.name = "ProviderError";
|
|
10
|
+
}
|
|
11
|
+
};
|
|
12
|
+
|
|
13
|
+
// src/openai/adapter.ts
|
|
14
|
+
var MODEL_PRICING = {
|
|
15
|
+
"gpt-4o": { input: 2.5, output: 10 },
|
|
16
|
+
"gpt-4o-mini": { input: 0.15, output: 0.6 },
|
|
17
|
+
"gpt-4-turbo": { input: 10, output: 30 },
|
|
18
|
+
"gpt-4": { input: 30, output: 60 },
|
|
19
|
+
"gpt-3.5-turbo": { input: 0.5, output: 1.5 }
|
|
20
|
+
};
|
|
21
|
+
var DEFAULT_TIMEOUT_MS = 12e4;
|
|
22
|
+
var DEFAULT_MAX_TOKENS = 4096;
|
|
23
|
+
var OpenAIProvider = class {
|
|
24
|
+
apiKey;
|
|
25
|
+
baseUrl;
|
|
26
|
+
defaultModel;
|
|
27
|
+
constructor(config = {}) {
|
|
28
|
+
this.apiKey = config.apiKey ?? process.env.OPENAI_API_KEY ?? "";
|
|
29
|
+
this.baseUrl = (config.baseUrl ?? "https://api.openai.com").replace(/\/+$/, "");
|
|
30
|
+
this.defaultModel = config.defaultModel ?? "gpt-4o";
|
|
31
|
+
if (!this.apiKey) {
|
|
32
|
+
throw new ProviderError(
|
|
33
|
+
"OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass apiKey in config.",
|
|
34
|
+
"auth_error"
|
|
35
|
+
);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Execute a chat completion request against the OpenAI API.
|
|
40
|
+
*
|
|
41
|
+
* @param input - The provider input containing prompts and parameters
|
|
42
|
+
* @returns ProviderOutput with rawText, usage data, and optional structured sections
|
|
43
|
+
* @throws ProviderError on rate limit (429), auth failure (401), server error (5xx), or timeout
|
|
44
|
+
*/
|
|
45
|
+
async run(input) {
|
|
46
|
+
const model = input.model || this.defaultModel;
|
|
47
|
+
const maxTokens = input.maxTokens ?? DEFAULT_MAX_TOKENS;
|
|
48
|
+
const timeoutMs = input.timeoutMs ?? DEFAULT_TIMEOUT_MS;
|
|
49
|
+
const requestBody = {
|
|
50
|
+
model,
|
|
51
|
+
messages: [
|
|
52
|
+
{ role: "system", content: input.systemPrompt },
|
|
53
|
+
{ role: "user", content: input.userPrompt }
|
|
54
|
+
],
|
|
55
|
+
max_tokens: maxTokens,
|
|
56
|
+
...input.temperature !== void 0 && { temperature: input.temperature }
|
|
57
|
+
};
|
|
58
|
+
const controller = new AbortController();
|
|
59
|
+
const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
|
|
60
|
+
if (input.abortSignal) {
|
|
61
|
+
if (input.abortSignal.aborted) {
|
|
62
|
+
controller.abort();
|
|
63
|
+
} else {
|
|
64
|
+
input.abortSignal.addEventListener("abort", () => controller.abort(), { once: true });
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
const startTime = Date.now();
|
|
68
|
+
try {
|
|
69
|
+
const response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
|
|
70
|
+
method: "POST",
|
|
71
|
+
headers: {
|
|
72
|
+
"Content-Type": "application/json",
|
|
73
|
+
Authorization: `Bearer ${this.apiKey}`
|
|
74
|
+
},
|
|
75
|
+
body: JSON.stringify(requestBody),
|
|
76
|
+
signal: controller.signal
|
|
77
|
+
});
|
|
78
|
+
const latencyMs = Date.now() - startTime;
|
|
79
|
+
if (!response.ok) {
|
|
80
|
+
await this.handleErrorResponse(response);
|
|
81
|
+
}
|
|
82
|
+
const data = await response.json();
|
|
83
|
+
return this.parseResponse(data, model, latencyMs);
|
|
84
|
+
} catch (error) {
|
|
85
|
+
if (error instanceof ProviderError) {
|
|
86
|
+
throw error;
|
|
87
|
+
}
|
|
88
|
+
if (error instanceof DOMException && error.name === "AbortError") {
|
|
89
|
+
throw new ProviderError(
|
|
90
|
+
`OpenAI request timed out after ${timeoutMs}ms`,
|
|
91
|
+
"timeout",
|
|
92
|
+
void 0,
|
|
93
|
+
true
|
|
94
|
+
);
|
|
95
|
+
}
|
|
96
|
+
throw new ProviderError(
|
|
97
|
+
`OpenAI request failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
98
|
+
"network_error",
|
|
99
|
+
void 0,
|
|
100
|
+
true
|
|
101
|
+
);
|
|
102
|
+
} finally {
|
|
103
|
+
clearTimeout(timeoutId);
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
/**
|
|
107
|
+
* Parse the OpenAI API response into a ProviderOutput.
|
|
108
|
+
*/
|
|
109
|
+
parseResponse(data, model, latencyMs) {
|
|
110
|
+
const choices = data.choices;
|
|
111
|
+
if (!choices || choices.length === 0) {
|
|
112
|
+
throw new ProviderError("OpenAI response contained no choices", "invalid_response");
|
|
113
|
+
}
|
|
114
|
+
const rawText = choices[0]?.message?.content ?? "";
|
|
115
|
+
const usage = data.usage;
|
|
116
|
+
const inputTokens = usage?.prompt_tokens;
|
|
117
|
+
const outputTokens = usage?.completion_tokens;
|
|
118
|
+
const cost = this.estimateCost(model, inputTokens, outputTokens);
|
|
119
|
+
const warnings = [];
|
|
120
|
+
if (choices[0]?.finish_reason === "length") {
|
|
121
|
+
warnings.push("Response was truncated due to max_tokens limit");
|
|
122
|
+
}
|
|
123
|
+
return {
|
|
124
|
+
rawText,
|
|
125
|
+
warnings: warnings.length > 0 ? warnings : void 0,
|
|
126
|
+
usage: {
|
|
127
|
+
inputTokens,
|
|
128
|
+
outputTokens,
|
|
129
|
+
cost,
|
|
130
|
+
latencyMs
|
|
131
|
+
}
|
|
132
|
+
};
|
|
133
|
+
}
|
|
134
|
+
/**
|
|
135
|
+
* Handle non-OK HTTP responses from the OpenAI API.
|
|
136
|
+
*/
|
|
137
|
+
async handleErrorResponse(response) {
|
|
138
|
+
let errorMessage;
|
|
139
|
+
try {
|
|
140
|
+
const errorData = await response.json();
|
|
141
|
+
errorMessage = errorData.error?.message ?? response.statusText;
|
|
142
|
+
} catch {
|
|
143
|
+
errorMessage = response.statusText;
|
|
144
|
+
}
|
|
145
|
+
switch (response.status) {
|
|
146
|
+
case 401:
|
|
147
|
+
throw new ProviderError(`OpenAI authentication failed: ${errorMessage}`, "auth_error", 401);
|
|
148
|
+
case 429: {
|
|
149
|
+
const retryAfter = response.headers.get("retry-after");
|
|
150
|
+
const retryAfterMs = retryAfter ? parseInt(retryAfter, 10) * 1e3 : void 0;
|
|
151
|
+
throw new ProviderError(
|
|
152
|
+
`OpenAI rate limit exceeded: ${errorMessage}`,
|
|
153
|
+
"rate_limit",
|
|
154
|
+
429,
|
|
155
|
+
true,
|
|
156
|
+
retryAfterMs
|
|
157
|
+
);
|
|
158
|
+
}
|
|
159
|
+
default:
|
|
160
|
+
if (response.status >= 500) {
|
|
161
|
+
throw new ProviderError(
|
|
162
|
+
`OpenAI server error (${response.status}): ${errorMessage}`,
|
|
163
|
+
"server_error",
|
|
164
|
+
response.status,
|
|
165
|
+
true
|
|
166
|
+
);
|
|
167
|
+
}
|
|
168
|
+
throw new ProviderError(
|
|
169
|
+
`OpenAI request failed (${response.status}): ${errorMessage}`,
|
|
170
|
+
"server_error",
|
|
171
|
+
response.status
|
|
172
|
+
);
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
/**
|
|
176
|
+
* Estimate cost based on model pricing and token usage.
|
|
177
|
+
*/
|
|
178
|
+
estimateCost(model, inputTokens, outputTokens) {
|
|
179
|
+
const pricing = MODEL_PRICING[model];
|
|
180
|
+
if (!pricing || inputTokens === void 0 || outputTokens === void 0) {
|
|
181
|
+
return void 0;
|
|
182
|
+
}
|
|
183
|
+
return inputTokens / 1e6 * pricing.input + outputTokens / 1e6 * pricing.output;
|
|
184
|
+
}
|
|
185
|
+
};
|
|
186
|
+
|
|
187
|
+
// src/anthropic/adapter.ts
|
|
188
|
+
var ANTHROPIC_API_URL = "https://api.anthropic.com";
|
|
189
|
+
var ANTHROPIC_VERSION = "2023-06-01";
|
|
190
|
+
var MODEL_PRICING2 = {
|
|
191
|
+
"claude-sonnet-4-20250514": { input: 3, output: 15 },
|
|
192
|
+
"claude-opus-4-20250514": { input: 15, output: 75 },
|
|
193
|
+
"claude-3-5-sonnet-20241022": { input: 3, output: 15 },
|
|
194
|
+
"claude-3-5-haiku-20241022": { input: 0.8, output: 4 },
|
|
195
|
+
"claude-3-opus-20240229": { input: 15, output: 75 },
|
|
196
|
+
"claude-3-haiku-20240307": { input: 0.25, output: 1.25 }
|
|
197
|
+
};
|
|
198
|
+
var DEFAULT_TIMEOUT_MS2 = 12e4;
|
|
199
|
+
var DEFAULT_MAX_TOKENS2 = 4096;
|
|
200
|
+
var AnthropicProvider = class {
|
|
201
|
+
apiKey;
|
|
202
|
+
defaultModel;
|
|
203
|
+
constructor(config = {}) {
|
|
204
|
+
this.apiKey = config.apiKey ?? process.env.ANTHROPIC_API_KEY ?? "";
|
|
205
|
+
this.defaultModel = config.defaultModel ?? "claude-sonnet-4-20250514";
|
|
206
|
+
if (!this.apiKey) {
|
|
207
|
+
throw new ProviderError(
|
|
208
|
+
"Anthropic API key is required. Set ANTHROPIC_API_KEY environment variable or pass apiKey in config.",
|
|
209
|
+
"auth_error"
|
|
210
|
+
);
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
/**
|
|
214
|
+
* Execute a message request against the Anthropic Messages API.
|
|
215
|
+
*
|
|
216
|
+
* @param input - The provider input containing prompts and parameters
|
|
217
|
+
* @returns ProviderOutput with rawText, usage data, and optional structured sections
|
|
218
|
+
* @throws ProviderError on rate limit (429), auth failure (401), server error (5xx), or timeout
|
|
219
|
+
*/
|
|
220
|
+
async run(input) {
|
|
221
|
+
const model = input.model || this.defaultModel;
|
|
222
|
+
const maxTokens = input.maxTokens ?? DEFAULT_MAX_TOKENS2;
|
|
223
|
+
const timeoutMs = input.timeoutMs ?? DEFAULT_TIMEOUT_MS2;
|
|
224
|
+
const requestBody = {
|
|
225
|
+
model,
|
|
226
|
+
max_tokens: maxTokens,
|
|
227
|
+
system: input.systemPrompt,
|
|
228
|
+
messages: [{ role: "user", content: input.userPrompt }],
|
|
229
|
+
...input.temperature !== void 0 && { temperature: input.temperature }
|
|
230
|
+
};
|
|
231
|
+
const controller = new AbortController();
|
|
232
|
+
const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
|
|
233
|
+
if (input.abortSignal) {
|
|
234
|
+
if (input.abortSignal.aborted) {
|
|
235
|
+
controller.abort();
|
|
236
|
+
} else {
|
|
237
|
+
input.abortSignal.addEventListener("abort", () => controller.abort(), { once: true });
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
const startTime = Date.now();
|
|
241
|
+
try {
|
|
242
|
+
const response = await fetch(`${ANTHROPIC_API_URL}/v1/messages`, {
|
|
243
|
+
method: "POST",
|
|
244
|
+
headers: {
|
|
245
|
+
"Content-Type": "application/json",
|
|
246
|
+
"x-api-key": this.apiKey,
|
|
247
|
+
"anthropic-version": ANTHROPIC_VERSION
|
|
248
|
+
},
|
|
249
|
+
body: JSON.stringify(requestBody),
|
|
250
|
+
signal: controller.signal
|
|
251
|
+
});
|
|
252
|
+
const latencyMs = Date.now() - startTime;
|
|
253
|
+
if (!response.ok) {
|
|
254
|
+
await this.handleErrorResponse(response);
|
|
255
|
+
}
|
|
256
|
+
const data = await response.json();
|
|
257
|
+
return this.parseResponse(data, model, latencyMs);
|
|
258
|
+
} catch (error) {
|
|
259
|
+
if (error instanceof ProviderError) {
|
|
260
|
+
throw error;
|
|
261
|
+
}
|
|
262
|
+
if (error instanceof DOMException && error.name === "AbortError") {
|
|
263
|
+
throw new ProviderError(
|
|
264
|
+
`Anthropic request timed out after ${timeoutMs}ms`,
|
|
265
|
+
"timeout",
|
|
266
|
+
void 0,
|
|
267
|
+
true
|
|
268
|
+
);
|
|
269
|
+
}
|
|
270
|
+
throw new ProviderError(
|
|
271
|
+
`Anthropic request failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
272
|
+
"network_error",
|
|
273
|
+
void 0,
|
|
274
|
+
true
|
|
275
|
+
);
|
|
276
|
+
} finally {
|
|
277
|
+
clearTimeout(timeoutId);
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
/**
|
|
281
|
+
* Parse the Anthropic Messages API response into a ProviderOutput.
|
|
282
|
+
*/
|
|
283
|
+
parseResponse(data, model, latencyMs) {
|
|
284
|
+
const content = data.content;
|
|
285
|
+
if (!content || content.length === 0) {
|
|
286
|
+
throw new ProviderError("Anthropic response contained no content blocks", "invalid_response");
|
|
287
|
+
}
|
|
288
|
+
const textBlock = content.find((block) => block.type === "text");
|
|
289
|
+
const rawText = textBlock?.text ?? "";
|
|
290
|
+
const usage = data.usage;
|
|
291
|
+
const inputTokens = usage?.input_tokens;
|
|
292
|
+
const outputTokens = usage?.output_tokens;
|
|
293
|
+
const cost = this.estimateCost(model, inputTokens, outputTokens);
|
|
294
|
+
const warnings = [];
|
|
295
|
+
const stopReason = data.stop_reason;
|
|
296
|
+
if (stopReason === "max_tokens") {
|
|
297
|
+
warnings.push("Response was truncated due to max_tokens limit");
|
|
298
|
+
}
|
|
299
|
+
return {
|
|
300
|
+
rawText,
|
|
301
|
+
warnings: warnings.length > 0 ? warnings : void 0,
|
|
302
|
+
usage: {
|
|
303
|
+
inputTokens,
|
|
304
|
+
outputTokens,
|
|
305
|
+
cost,
|
|
306
|
+
latencyMs
|
|
307
|
+
}
|
|
308
|
+
};
|
|
309
|
+
}
|
|
310
|
+
/**
|
|
311
|
+
* Handle non-OK HTTP responses from the Anthropic API.
|
|
312
|
+
*/
|
|
313
|
+
async handleErrorResponse(response) {
|
|
314
|
+
let errorMessage;
|
|
315
|
+
try {
|
|
316
|
+
const errorData = await response.json();
|
|
317
|
+
errorMessage = errorData.error?.message ?? response.statusText;
|
|
318
|
+
} catch {
|
|
319
|
+
errorMessage = response.statusText;
|
|
320
|
+
}
|
|
321
|
+
switch (response.status) {
|
|
322
|
+
case 401:
|
|
323
|
+
throw new ProviderError(
|
|
324
|
+
`Anthropic authentication failed: ${errorMessage}`,
|
|
325
|
+
"auth_error",
|
|
326
|
+
401
|
|
327
|
+
);
|
|
328
|
+
case 429: {
|
|
329
|
+
const retryAfter = response.headers.get("retry-after");
|
|
330
|
+
const retryAfterMs = retryAfter ? parseInt(retryAfter, 10) * 1e3 : void 0;
|
|
331
|
+
throw new ProviderError(
|
|
332
|
+
`Anthropic rate limit exceeded: ${errorMessage}`,
|
|
333
|
+
"rate_limit",
|
|
334
|
+
429,
|
|
335
|
+
true,
|
|
336
|
+
retryAfterMs
|
|
337
|
+
);
|
|
338
|
+
}
|
|
339
|
+
default:
|
|
340
|
+
if (response.status >= 500) {
|
|
341
|
+
throw new ProviderError(
|
|
342
|
+
`Anthropic server error (${response.status}): ${errorMessage}`,
|
|
343
|
+
"server_error",
|
|
344
|
+
response.status,
|
|
345
|
+
true
|
|
346
|
+
);
|
|
347
|
+
}
|
|
348
|
+
throw new ProviderError(
|
|
349
|
+
`Anthropic request failed (${response.status}): ${errorMessage}`,
|
|
350
|
+
"server_error",
|
|
351
|
+
response.status
|
|
352
|
+
);
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
/**
|
|
356
|
+
* Estimate cost based on model pricing and token usage.
|
|
357
|
+
*/
|
|
358
|
+
estimateCost(model, inputTokens, outputTokens) {
|
|
359
|
+
const pricing = MODEL_PRICING2[model];
|
|
360
|
+
if (!pricing || inputTokens === void 0 || outputTokens === void 0) {
|
|
361
|
+
return void 0;
|
|
362
|
+
}
|
|
363
|
+
return inputTokens / 1e6 * pricing.input + outputTokens / 1e6 * pricing.output;
|
|
364
|
+
}
|
|
365
|
+
};
|
|
366
|
+
|
|
367
|
+
// src/cli/claude-cli.ts
|
|
368
|
+
import { spawn } from "child_process";
|
|
369
|
+
var DEFAULT_TIMEOUT_MS3 = 9e5;
|
|
370
|
+
var ClaudeCliProvider = class {
|
|
371
|
+
command;
|
|
372
|
+
defaultModel;
|
|
373
|
+
constructor(config = {}) {
|
|
374
|
+
this.command = config.command ?? "claude";
|
|
375
|
+
this.defaultModel = config.defaultModel ?? "sonnet";
|
|
376
|
+
}
|
|
377
|
+
async run(input) {
|
|
378
|
+
const model = input.model || this.defaultModel;
|
|
379
|
+
const timeoutMs = input.timeoutMs ?? DEFAULT_TIMEOUT_MS3;
|
|
380
|
+
const startTime = Date.now();
|
|
381
|
+
const combinedPrompt = `${input.systemPrompt}
|
|
382
|
+
|
|
383
|
+
---
|
|
384
|
+
|
|
385
|
+
${input.userPrompt}`;
|
|
386
|
+
const args = ["-p", "--model", model, "--output-format", "text"];
|
|
387
|
+
if (input.maxTokens) {
|
|
388
|
+
args.push("--max-tokens", String(input.maxTokens));
|
|
389
|
+
}
|
|
390
|
+
return new Promise((resolve, reject) => {
|
|
391
|
+
let settled = false;
|
|
392
|
+
const resolveOnce = (value) => {
|
|
393
|
+
if (settled) return;
|
|
394
|
+
settled = true;
|
|
395
|
+
resolve(value);
|
|
396
|
+
};
|
|
397
|
+
const rejectOnce = (error) => {
|
|
398
|
+
if (settled) return;
|
|
399
|
+
settled = true;
|
|
400
|
+
reject(error);
|
|
401
|
+
};
|
|
402
|
+
const child = spawn(this.command, args, {
|
|
403
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
404
|
+
timeout: timeoutMs
|
|
405
|
+
});
|
|
406
|
+
child.stdin.on("error", (err) => {
|
|
407
|
+
const code = err.code;
|
|
408
|
+
if (code === "EPIPE" || code === "ERR_STREAM_DESTROYED") {
|
|
409
|
+
return;
|
|
410
|
+
}
|
|
411
|
+
rejectOnce(
|
|
412
|
+
new ProviderError(
|
|
413
|
+
`Claude CLI stdin error: ${err.message}`,
|
|
414
|
+
"server_error",
|
|
415
|
+
void 0,
|
|
416
|
+
true
|
|
417
|
+
)
|
|
418
|
+
);
|
|
419
|
+
});
|
|
420
|
+
child.stdin.end(combinedPrompt);
|
|
421
|
+
let stdout = "";
|
|
422
|
+
let stderr = "";
|
|
423
|
+
child.stdout.on("data", (chunk) => {
|
|
424
|
+
stdout += chunk.toString();
|
|
425
|
+
});
|
|
426
|
+
child.stderr.on("data", (chunk) => {
|
|
427
|
+
stderr += chunk.toString();
|
|
428
|
+
});
|
|
429
|
+
if (input.abortSignal) {
|
|
430
|
+
if (input.abortSignal.aborted) {
|
|
431
|
+
child.kill("SIGTERM");
|
|
432
|
+
} else {
|
|
433
|
+
input.abortSignal.addEventListener(
|
|
434
|
+
"abort",
|
|
435
|
+
() => {
|
|
436
|
+
child.kill("SIGTERM");
|
|
437
|
+
},
|
|
438
|
+
{ once: true }
|
|
439
|
+
);
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
child.on("error", (err) => {
|
|
443
|
+
if (err.code === "ENOENT") {
|
|
444
|
+
rejectOnce(
|
|
445
|
+
new ProviderError(
|
|
446
|
+
`Claude CLI not found. Install it from https://claude.ai/download or check your PATH.`,
|
|
447
|
+
"auth_error"
|
|
448
|
+
)
|
|
449
|
+
);
|
|
450
|
+
} else {
|
|
451
|
+
rejectOnce(
|
|
452
|
+
new ProviderError(`Claude CLI error: ${err.message}`, "server_error", void 0, true)
|
|
453
|
+
);
|
|
454
|
+
}
|
|
455
|
+
});
|
|
456
|
+
child.on("close", (code) => {
|
|
457
|
+
const latencyMs = Date.now() - startTime;
|
|
458
|
+
if (code !== 0) {
|
|
459
|
+
if (stderr.includes("authentication") || stderr.includes("login") || stderr.includes("not logged in")) {
|
|
460
|
+
rejectOnce(
|
|
461
|
+
new ProviderError(
|
|
462
|
+
`Claude CLI authentication failed. Run 'claude login' first. stderr: ${stderr.slice(0, 300)}`,
|
|
463
|
+
"auth_error"
|
|
464
|
+
)
|
|
465
|
+
);
|
|
466
|
+
return;
|
|
467
|
+
}
|
|
468
|
+
if (code === null) {
|
|
469
|
+
rejectOnce(
|
|
470
|
+
new ProviderError(
|
|
471
|
+
`Claude CLI timed out after ${timeoutMs}ms`,
|
|
472
|
+
"timeout",
|
|
473
|
+
void 0,
|
|
474
|
+
true
|
|
475
|
+
)
|
|
476
|
+
);
|
|
477
|
+
return;
|
|
478
|
+
}
|
|
479
|
+
rejectOnce(
|
|
480
|
+
new ProviderError(
|
|
481
|
+
`Claude CLI exited with code ${code}. stderr: ${stderr.slice(0, 500)}`,
|
|
482
|
+
"server_error",
|
|
483
|
+
code,
|
|
484
|
+
true
|
|
485
|
+
)
|
|
486
|
+
);
|
|
487
|
+
return;
|
|
488
|
+
}
|
|
489
|
+
const warnings = [];
|
|
490
|
+
if (stderr && !stderr.startsWith("[")) {
|
|
491
|
+
const filtered = stderr.trim();
|
|
492
|
+
if (filtered) warnings.push(filtered.slice(0, 300));
|
|
493
|
+
}
|
|
494
|
+
resolveOnce({
|
|
495
|
+
rawText: stdout.trim(),
|
|
496
|
+
warnings: warnings.length > 0 ? warnings : void 0,
|
|
497
|
+
usage: {
|
|
498
|
+
latencyMs
|
|
499
|
+
},
|
|
500
|
+
exitCode: code,
|
|
501
|
+
stderrText: stderr || void 0
|
|
502
|
+
});
|
|
503
|
+
});
|
|
504
|
+
});
|
|
505
|
+
}
|
|
506
|
+
};
|
|
507
|
+
|
|
508
|
+
// src/cli/codex-cli.ts
|
|
509
|
+
import { spawn as spawn2 } from "child_process";
|
|
510
|
+
var DEFAULT_TIMEOUT_MS4 = 3e5;
|
|
511
|
+
var CodexCliProvider = class {
|
|
512
|
+
command;
|
|
513
|
+
defaultModel;
|
|
514
|
+
constructor(config = {}) {
|
|
515
|
+
this.command = config.command ?? "codex";
|
|
516
|
+
this.defaultModel = config.defaultModel ?? "o4-mini";
|
|
517
|
+
}
|
|
518
|
+
async run(input) {
|
|
519
|
+
const model = input.model || this.defaultModel;
|
|
520
|
+
const timeoutMs = input.timeoutMs ?? DEFAULT_TIMEOUT_MS4;
|
|
521
|
+
const startTime = Date.now();
|
|
522
|
+
const combinedPrompt = `${input.systemPrompt}
|
|
523
|
+
|
|
524
|
+
---
|
|
525
|
+
|
|
526
|
+
${input.userPrompt}`;
|
|
527
|
+
const args = ["-p", combinedPrompt, "--model", model];
|
|
528
|
+
return new Promise((resolve, reject) => {
|
|
529
|
+
const child = spawn2(this.command, args, {
|
|
530
|
+
stdio: ["ignore", "pipe", "pipe"],
|
|
531
|
+
timeout: timeoutMs
|
|
532
|
+
});
|
|
533
|
+
let stdout = "";
|
|
534
|
+
let stderr = "";
|
|
535
|
+
child.stdout.on("data", (chunk) => {
|
|
536
|
+
stdout += chunk.toString();
|
|
537
|
+
});
|
|
538
|
+
child.stderr.on("data", (chunk) => {
|
|
539
|
+
stderr += chunk.toString();
|
|
540
|
+
});
|
|
541
|
+
if (input.abortSignal) {
|
|
542
|
+
if (input.abortSignal.aborted) {
|
|
543
|
+
child.kill("SIGTERM");
|
|
544
|
+
} else {
|
|
545
|
+
input.abortSignal.addEventListener(
|
|
546
|
+
"abort",
|
|
547
|
+
() => {
|
|
548
|
+
child.kill("SIGTERM");
|
|
549
|
+
},
|
|
550
|
+
{ once: true }
|
|
551
|
+
);
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
child.on("error", (err) => {
|
|
555
|
+
if (err.code === "ENOENT") {
|
|
556
|
+
reject(
|
|
557
|
+
new ProviderError(
|
|
558
|
+
`Codex CLI not found. Install it with 'npm install -g @openai/codex' or check your PATH.`,
|
|
559
|
+
"auth_error"
|
|
560
|
+
)
|
|
561
|
+
);
|
|
562
|
+
} else {
|
|
563
|
+
reject(
|
|
564
|
+
new ProviderError(`Codex CLI error: ${err.message}`, "server_error", void 0, true)
|
|
565
|
+
);
|
|
566
|
+
}
|
|
567
|
+
});
|
|
568
|
+
child.on("close", (code) => {
|
|
569
|
+
const latencyMs = Date.now() - startTime;
|
|
570
|
+
if (code !== 0) {
|
|
571
|
+
if (stderr.includes("authentication") || stderr.includes("API key") || stderr.includes("login")) {
|
|
572
|
+
reject(
|
|
573
|
+
new ProviderError(
|
|
574
|
+
`Codex CLI authentication failed. Run 'codex login' or set OPENAI_API_KEY. stderr: ${stderr.slice(0, 300)}`,
|
|
575
|
+
"auth_error"
|
|
576
|
+
)
|
|
577
|
+
);
|
|
578
|
+
return;
|
|
579
|
+
}
|
|
580
|
+
if (code === null) {
|
|
581
|
+
reject(
|
|
582
|
+
new ProviderError(
|
|
583
|
+
`Codex CLI timed out after ${timeoutMs}ms`,
|
|
584
|
+
"timeout",
|
|
585
|
+
void 0,
|
|
586
|
+
true
|
|
587
|
+
)
|
|
588
|
+
);
|
|
589
|
+
return;
|
|
590
|
+
}
|
|
591
|
+
reject(
|
|
592
|
+
new ProviderError(
|
|
593
|
+
`Codex CLI exited with code ${code}. stderr: ${stderr.slice(0, 500)}`,
|
|
594
|
+
"server_error",
|
|
595
|
+
code,
|
|
596
|
+
true
|
|
597
|
+
)
|
|
598
|
+
);
|
|
599
|
+
return;
|
|
600
|
+
}
|
|
601
|
+
const warnings = [];
|
|
602
|
+
if (stderr) {
|
|
603
|
+
const filtered = stderr.trim();
|
|
604
|
+
if (filtered) warnings.push(filtered.slice(0, 300));
|
|
605
|
+
}
|
|
606
|
+
resolve({
|
|
607
|
+
rawText: stdout.trim(),
|
|
608
|
+
warnings: warnings.length > 0 ? warnings : void 0,
|
|
609
|
+
usage: {
|
|
610
|
+
latencyMs
|
|
611
|
+
},
|
|
612
|
+
exitCode: code,
|
|
613
|
+
stderrText: stderr || void 0
|
|
614
|
+
});
|
|
615
|
+
});
|
|
616
|
+
});
|
|
617
|
+
}
|
|
618
|
+
};
|
|
619
|
+
|
|
620
|
+
// src/cli/detect.ts
|
|
621
|
+
import { execFile } from "child_process";
|
|
622
|
+
async function isCommandAvailable(command) {
|
|
623
|
+
const whichCmd = process.platform === "win32" ? "where" : "which";
|
|
624
|
+
return new Promise((resolve) => {
|
|
625
|
+
execFile(whichCmd, [command], (error) => {
|
|
626
|
+
resolve(!error);
|
|
627
|
+
});
|
|
628
|
+
});
|
|
629
|
+
}
|
|
630
|
+
async function detectCliProviders() {
|
|
631
|
+
const [claudeCli, codexCli] = await Promise.all([
|
|
632
|
+
isCommandAvailable("claude"),
|
|
633
|
+
isCommandAvailable("codex")
|
|
634
|
+
]);
|
|
635
|
+
let preferred = null;
|
|
636
|
+
if (claudeCli) preferred = "claude-cli";
|
|
637
|
+
else if (codexCli) preferred = "codex-cli";
|
|
638
|
+
return { claudeCli, codexCli, preferred };
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
// src/router.ts
|
|
642
|
+
var ProviderRouter = class {
|
|
643
|
+
providers = /* @__PURE__ */ new Map();
|
|
644
|
+
defaultProvider;
|
|
645
|
+
constructor(defaultProvider) {
|
|
646
|
+
this.defaultProvider = defaultProvider;
|
|
647
|
+
}
|
|
648
|
+
/**
|
|
649
|
+
* Register a provider for a specific agent ID.
|
|
650
|
+
*/
|
|
651
|
+
setProvider(agentId, provider) {
|
|
652
|
+
this.providers.set(agentId, provider);
|
|
653
|
+
}
|
|
654
|
+
/**
|
|
655
|
+
* Register a provider for agents matching a provider key.
|
|
656
|
+
* This is a convenience for config-driven setups where the agent ID
|
|
657
|
+
* isn't known yet but the provider key is.
|
|
658
|
+
*/
|
|
659
|
+
setProviderByKey(providerKey, provider) {
|
|
660
|
+
this.providers.set(`key:${providerKey}`, provider);
|
|
661
|
+
}
|
|
662
|
+
/**
|
|
663
|
+
* Resolve the provider for a given agent.
|
|
664
|
+
*
|
|
665
|
+
* Lookup order:
|
|
666
|
+
* 1. Exact agent ID match
|
|
667
|
+
* 2. Provider key match (via setProviderByKey)
|
|
668
|
+
* 3. Default provider
|
|
669
|
+
*/
|
|
670
|
+
forAgent(agent) {
|
|
671
|
+
const byId = this.providers.get(agent.id);
|
|
672
|
+
if (byId) return byId;
|
|
673
|
+
const byKey = this.providers.get(`key:${agent.providerKey}`);
|
|
674
|
+
if (byKey) return byKey;
|
|
675
|
+
return this.defaultProvider;
|
|
676
|
+
}
|
|
677
|
+
/**
|
|
678
|
+
* Also implement the AgentProvider interface directly so the router
|
|
679
|
+
* can be used as a drop-in replacement for a single provider.
|
|
680
|
+
* Uses the default provider.
|
|
681
|
+
*/
|
|
682
|
+
async run(input) {
|
|
683
|
+
return this.defaultProvider.run(input);
|
|
684
|
+
}
|
|
685
|
+
};
|
|
686
|
+
export {
|
|
687
|
+
AnthropicProvider,
|
|
688
|
+
ClaudeCliProvider,
|
|
689
|
+
CodexCliProvider,
|
|
690
|
+
OpenAIProvider,
|
|
691
|
+
ProviderError,
|
|
692
|
+
ProviderRouter,
|
|
693
|
+
detectCliProviders,
|
|
694
|
+
isCommandAvailable
|
|
695
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@malayvuong/agent-orchestra-providers",
|
|
3
|
+
"version": "2026.3.2",
|
|
4
|
+
"description": "LLM provider adapters for Agent Orchestra",
|
|
5
|
+
"license": "MIT",
|
|
6
|
+
"type": "module",
|
|
7
|
+
"main": "dist/index.js",
|
|
8
|
+
"types": "dist/index.d.ts",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": {
|
|
11
|
+
"import": "./dist/index.js",
|
|
12
|
+
"types": "./dist/index.d.ts"
|
|
13
|
+
}
|
|
14
|
+
},
|
|
15
|
+
"files": [
|
|
16
|
+
"dist"
|
|
17
|
+
],
|
|
18
|
+
"sideEffects": false,
|
|
19
|
+
"engines": {
|
|
20
|
+
"node": ">=20.0.0"
|
|
21
|
+
},
|
|
22
|
+
"repository": {
|
|
23
|
+
"type": "git",
|
|
24
|
+
"url": "https://github.com/nicemvp/agent-orchestra.git",
|
|
25
|
+
"directory": "packages/providers"
|
|
26
|
+
},
|
|
27
|
+
"homepage": "https://github.com/nicemvp/agent-orchestra#readme",
|
|
28
|
+
"bugs": {
|
|
29
|
+
"url": "https://github.com/nicemvp/agent-orchestra/issues"
|
|
30
|
+
},
|
|
31
|
+
"publishConfig": {
|
|
32
|
+
"access": "public"
|
|
33
|
+
},
|
|
34
|
+
"dependencies": {
|
|
35
|
+
"@malayvuong/agent-orchestra-core": "2026.3.2",
|
|
36
|
+
"@malayvuong/agent-orchestra-shared": "2026.3.2"
|
|
37
|
+
},
|
|
38
|
+
"scripts": {
|
|
39
|
+
"build": "tsup",
|
|
40
|
+
"typecheck": "tsc --noEmit"
|
|
41
|
+
}
|
|
42
|
+
}
|