@orchagent/cli 0.3.17 → 0.3.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/call.js +44 -4
- package/dist/commands/run.js +19 -5
- package/package.json +1 -1
package/dist/commands/call.js
CHANGED
|
@@ -138,6 +138,7 @@ function registerCallCommand(program) {
|
|
|
138
138
|
.option('--input <json>', 'Alias for --data')
|
|
139
139
|
.option('--key <key>', 'LLM API key (overrides env vars)')
|
|
140
140
|
.option('--provider <provider>', 'LLM provider (openai, anthropic, gemini)')
|
|
141
|
+
.option('--model <model>', 'LLM model to use (overrides agent default)')
|
|
141
142
|
.option('--json', 'Output raw JSON')
|
|
142
143
|
.option('--output <file>', 'Save response body to a file')
|
|
143
144
|
.option('--skills <skills>', 'Add skills (comma-separated)')
|
|
@@ -189,12 +190,46 @@ argument or --file option instead.
|
|
|
189
190
|
throw new errors_1.CliError('When using --key, you must also specify --provider (openai, anthropic, or gemini)');
|
|
190
191
|
}
|
|
191
192
|
(0, llm_1.validateProvider)(options.provider);
|
|
193
|
+
// Warn on potential model/provider mismatch
|
|
194
|
+
if (options.model && options.provider) {
|
|
195
|
+
const modelLower = options.model.toLowerCase();
|
|
196
|
+
const providerPatterns = {
|
|
197
|
+
openai: /^(gpt-|o1-|o3-|davinci|text-)/,
|
|
198
|
+
anthropic: /^claude-/,
|
|
199
|
+
gemini: /^gemini-/,
|
|
200
|
+
ollama: /^(llama|mistral|deepseek|phi|qwen)/,
|
|
201
|
+
};
|
|
202
|
+
const expectedPattern = providerPatterns[options.provider];
|
|
203
|
+
if (expectedPattern && !expectedPattern.test(modelLower)) {
|
|
204
|
+
process.stderr.write(`Warning: Model '${options.model}' may not be a ${options.provider} model.\n\n`);
|
|
205
|
+
}
|
|
206
|
+
}
|
|
192
207
|
llmKey = options.key;
|
|
193
208
|
llmProvider = options.provider;
|
|
194
209
|
}
|
|
195
210
|
else {
|
|
196
211
|
// Try to detect from environment or server
|
|
197
|
-
|
|
212
|
+
// If --provider specified, prioritize that provider
|
|
213
|
+
let providersToCheck = supportedProviders;
|
|
214
|
+
if (options.provider) {
|
|
215
|
+
(0, llm_1.validateProvider)(options.provider);
|
|
216
|
+
providersToCheck = [options.provider];
|
|
217
|
+
// Warn on potential model/provider mismatch
|
|
218
|
+
if (options.model) {
|
|
219
|
+
const modelLower = options.model.toLowerCase();
|
|
220
|
+
const providerPatterns = {
|
|
221
|
+
openai: /^(gpt-|o1-|o3-|davinci|text-)/,
|
|
222
|
+
anthropic: /^claude-/,
|
|
223
|
+
gemini: /^gemini-/,
|
|
224
|
+
ollama: /^(llama|mistral|deepseek|phi|qwen)/,
|
|
225
|
+
};
|
|
226
|
+
const expectedPattern = providerPatterns[options.provider];
|
|
227
|
+
if (expectedPattern && !expectedPattern.test(modelLower)) {
|
|
228
|
+
process.stderr.write(`Warning: Model '${options.model}' may not be a ${options.provider} model.\n\n`);
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
const detected = await (0, llm_1.detectLlmKey)(providersToCheck, resolved);
|
|
198
233
|
if (detected) {
|
|
199
234
|
llmKey = detected.key;
|
|
200
235
|
llmProvider = detected.provider;
|
|
@@ -204,12 +239,17 @@ argument or --file option instead.
|
|
|
204
239
|
// Headers can be logged by proxies/load balancers, body is not logged by default
|
|
205
240
|
let llmCredentials;
|
|
206
241
|
if (llmKey && llmProvider) {
|
|
207
|
-
llmCredentials = {
|
|
242
|
+
llmCredentials = {
|
|
243
|
+
api_key: llmKey,
|
|
244
|
+
provider: llmProvider,
|
|
245
|
+
...(options.model && { model: options.model }),
|
|
246
|
+
};
|
|
208
247
|
}
|
|
209
248
|
else if (agentMeta.type === 'prompt') {
|
|
210
249
|
// Warn if no key found for prompt-based agent
|
|
211
|
-
const
|
|
212
|
-
|
|
250
|
+
const searchedProviders = options.provider ? [options.provider] : supportedProviders;
|
|
251
|
+
const providerList = searchedProviders.join(', ');
|
|
252
|
+
process.stderr.write(`Warning: No LLM key found for provider(s): ${providerList}\n` +
|
|
213
253
|
`Set an env var (e.g., OPENAI_API_KEY), run 'orchagent keys add <provider>', use --key, or configure in web dashboard\n\n`);
|
|
214
254
|
}
|
|
215
255
|
// Add skill headers
|
package/dist/commands/run.js
CHANGED
|
@@ -291,7 +291,7 @@ async function detectAllLlmKeys(supportedProviders, config) {
|
|
|
291
291
|
}
|
|
292
292
|
return providers;
|
|
293
293
|
}
|
|
294
|
-
async function executePromptLocally(agentData, inputData, skillPrompts = [], config, providerOverride) {
|
|
294
|
+
async function executePromptLocally(agentData, inputData, skillPrompts = [], config, providerOverride, modelOverride) {
|
|
295
295
|
// If provider override specified, validate and use only that provider
|
|
296
296
|
if (providerOverride) {
|
|
297
297
|
(0, llm_1.validateProvider)(providerOverride);
|
|
@@ -318,7 +318,7 @@ async function executePromptLocally(agentData, inputData, skillPrompts = [], con
|
|
|
318
318
|
// Apply agent default models to each provider config
|
|
319
319
|
const providersWithModels = allProviders.map((p) => ({
|
|
320
320
|
...p,
|
|
321
|
-
model: agentData.default_models?.[p.provider] || p.model,
|
|
321
|
+
model: modelOverride || agentData.default_models?.[p.provider] || p.model,
|
|
322
322
|
}));
|
|
323
323
|
// Show which provider is being used (primary)
|
|
324
324
|
const primary = providersWithModels[0];
|
|
@@ -345,8 +345,8 @@ async function executePromptLocally(agentData, inputData, skillPrompts = [], con
|
|
|
345
345
|
`Set an environment variable (e.g., OPENAI_API_KEY), run 'orchagent keys add <provider>', or configure in web dashboard`);
|
|
346
346
|
}
|
|
347
347
|
const { provider, key, model: serverModel } = detected;
|
|
348
|
-
// Priority: server config model > agent default model > hardcoded default
|
|
349
|
-
const model = serverModel || agentData.default_models?.[provider] || (0, llm_1.getDefaultModel)(provider);
|
|
348
|
+
// Priority: CLI override > server config model > agent default model > hardcoded default
|
|
349
|
+
const model = modelOverride || serverModel || agentData.default_models?.[provider] || (0, llm_1.getDefaultModel)(provider);
|
|
350
350
|
// Show which provider is being used (helpful for debugging rate limits)
|
|
351
351
|
process.stderr.write(`Running with ${provider} (${model})...\n`);
|
|
352
352
|
// Call the LLM directly
|
|
@@ -772,6 +772,7 @@ function registerRunCommand(program) {
|
|
|
772
772
|
.option('--here', 'Scan current directory (passes absolute path to agent)')
|
|
773
773
|
.option('--path <dir>', 'Shorthand for --input \'{"path": "<dir>"}\'')
|
|
774
774
|
.option('--provider <name>', 'LLM provider to use (openai, anthropic, gemini, ollama)')
|
|
775
|
+
.option('--model <model>', 'LLM model to use (overrides agent default)')
|
|
775
776
|
.addHelpText('after', `
|
|
776
777
|
Examples:
|
|
777
778
|
orch run orchagent/leak-finder --input '{"path": "."}'
|
|
@@ -793,6 +794,19 @@ Note: Use 'run' for local execution, 'call' for server-side execution.
|
|
|
793
794
|
else if (options.path) {
|
|
794
795
|
options.input = JSON.stringify({ path: options.path });
|
|
795
796
|
}
|
|
797
|
+
if (options.model && options.provider) {
|
|
798
|
+
const modelLower = options.model.toLowerCase();
|
|
799
|
+
const providerPatterns = {
|
|
800
|
+
openai: /^(gpt-|o1-|o3-|davinci|text-)/,
|
|
801
|
+
anthropic: /^claude-/,
|
|
802
|
+
gemini: /^gemini-/,
|
|
803
|
+
ollama: /^(llama|mistral|deepseek|phi|qwen)/,
|
|
804
|
+
};
|
|
805
|
+
const expectedPattern = providerPatterns[options.provider];
|
|
806
|
+
if (expectedPattern && !expectedPattern.test(modelLower)) {
|
|
807
|
+
process.stderr.write(`Warning: Model '${options.model}' may not be a ${options.provider} model.\n\n`);
|
|
808
|
+
}
|
|
809
|
+
}
|
|
796
810
|
const resolved = await (0, config_1.getResolvedConfig)();
|
|
797
811
|
const parsed = parseAgentRef(agentRef);
|
|
798
812
|
const org = parsed.org ?? resolved.defaultOrg;
|
|
@@ -941,7 +955,7 @@ Note: Use 'run' for local execution, 'call' for server-side execution.
|
|
|
941
955
|
}
|
|
942
956
|
// Execute locally
|
|
943
957
|
process.stderr.write(`Executing locally...\n\n`);
|
|
944
|
-
const result = await executePromptLocally(agentData, inputData, skillPrompts, resolved, options.provider);
|
|
958
|
+
const result = await executePromptLocally(agentData, inputData, skillPrompts, resolved, options.provider, options.model);
|
|
945
959
|
if (options.json) {
|
|
946
960
|
(0, output_1.printJson)(result);
|
|
947
961
|
}
|