@orchagent/cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,252 @@
1
+ "use strict";
2
+ /**
3
+ * LLM Provider Abstraction
4
+ *
5
+ * Centralized LLM provider configuration and utilities.
6
+ * Used by run, call, and skill commands.
7
+ */
8
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
9
+ if (k2 === undefined) k2 = k;
10
+ var desc = Object.getOwnPropertyDescriptor(m, k);
11
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
12
+ desc = { enumerable: true, get: function() { return m[k]; } };
13
+ }
14
+ Object.defineProperty(o, k2, desc);
15
+ }) : (function(o, m, k, k2) {
16
+ if (k2 === undefined) k2 = k;
17
+ o[k2] = m[k];
18
+ }));
19
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
20
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
21
+ }) : function(o, v) {
22
+ o["default"] = v;
23
+ });
24
+ var __importStar = (this && this.__importStar) || (function () {
25
+ var ownKeys = function(o) {
26
+ ownKeys = Object.getOwnPropertyNames || function (o) {
27
+ var ar = [];
28
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
29
+ return ar;
30
+ };
31
+ return ownKeys(o);
32
+ };
33
+ return function (mod) {
34
+ if (mod && mod.__esModule) return mod;
35
+ var result = {};
36
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
37
+ __setModuleDefault(result, mod);
38
+ return result;
39
+ };
40
+ })();
41
+ Object.defineProperty(exports, "__esModule", { value: true });
42
+ exports.DEFAULT_MODELS = exports.PROVIDER_ENV_VARS = void 0;
43
+ exports.detectLlmKeyFromEnv = detectLlmKeyFromEnv;
44
+ exports.detectLlmKey = detectLlmKey;
45
+ exports.getDefaultModel = getDefaultModel;
46
+ exports.buildPrompt = buildPrompt;
47
+ exports.callLlm = callLlm;
48
+ exports.validateProvider = validateProvider;
49
+ const errors_1 = require("./errors");
50
+ // Environment variable names for each provider
51
+ exports.PROVIDER_ENV_VARS = {
52
+ openai: 'OPENAI_API_KEY',
53
+ anthropic: 'ANTHROPIC_API_KEY',
54
+ gemini: 'GEMINI_API_KEY',
55
+ };
56
+ // Default models for each provider
57
+ exports.DEFAULT_MODELS = {
58
+ openai: 'gpt-4o',
59
+ anthropic: 'claude-sonnet-4-20250514',
60
+ gemini: 'gemini-1.5-pro',
61
+ };
62
+ /**
63
+ * Detect LLM API key from environment variables based on supported providers.
64
+ * Returns the first matching provider/key pair found.
65
+ */
66
+ function detectLlmKeyFromEnv(supportedProviders) {
67
+ for (const provider of supportedProviders) {
68
+ if (provider === 'any') {
69
+ // 'any' means check all providers in order
70
+ for (const [p, envVar] of Object.entries(exports.PROVIDER_ENV_VARS)) {
71
+ const key = process.env[envVar];
72
+ if (key) {
73
+ return { provider: p, key };
74
+ }
75
+ }
76
+ }
77
+ else {
78
+ const envVar = exports.PROVIDER_ENV_VARS[provider];
79
+ if (envVar) {
80
+ const key = process.env[envVar];
81
+ if (key) {
82
+ return { provider, key };
83
+ }
84
+ }
85
+ }
86
+ }
87
+ return null;
88
+ }
89
+ /**
90
+ * Detect LLM API key with server fallback.
91
+ * Checks local env vars first, then fetches from server if available.
92
+ * Returns provider, key, and optionally the model from server config.
93
+ */
94
+ async function detectLlmKey(supportedProviders, config) {
95
+ // 1. Check local env vars first (fast path)
96
+ const envKey = detectLlmKeyFromEnv(supportedProviders);
97
+ if (envKey)
98
+ return envKey;
99
+ // 2. If no env var, try server
100
+ if (config?.apiKey) {
101
+ try {
102
+ const { fetchLlmKeys } = await Promise.resolve().then(() => __importStar(require('./api')));
103
+ const serverKeys = await fetchLlmKeys(config);
104
+ for (const provider of supportedProviders) {
105
+ if (provider === 'any') {
106
+ // Return first available key
107
+ if (serverKeys.length > 0) {
108
+ const first = serverKeys[0];
109
+ return { provider: first.provider, key: first.api_key, model: first.model };
110
+ }
111
+ }
112
+ else {
113
+ const match = serverKeys.find((k) => k.provider === provider);
114
+ if (match) {
115
+ return { provider: match.provider, key: match.api_key, model: match.model };
116
+ }
117
+ }
118
+ }
119
+ }
120
+ catch {
121
+ // Server fetch failed, continue without
122
+ }
123
+ }
124
+ return null;
125
+ }
126
+ /**
127
+ * Get the default model for a provider.
128
+ */
129
+ function getDefaultModel(provider) {
130
+ return exports.DEFAULT_MODELS[provider] || 'gpt-4o';
131
+ }
132
+ /**
133
+ * Build a full prompt by injecting input data into the template.
134
+ * Matches server behavior in gateway/src/gateway/llm.py:build_prompt
135
+ */
136
+ function buildPrompt(template, inputData) {
137
+ let prompt = template;
138
+ // Simple variable substitution: {{key}} -> value
139
+ for (const [key, value] of Object.entries(inputData)) {
140
+ const placeholder = `{{${key}}}`;
141
+ if (prompt.includes(placeholder)) {
142
+ prompt = prompt.split(placeholder).join(String(value));
143
+ }
144
+ }
145
+ // Also append input as JSON for complex inputs
146
+ if (Object.keys(inputData).length > 0) {
147
+ prompt += `\n\nInput:\n\`\`\`json\n${JSON.stringify(inputData, null, 2)}\n\`\`\``;
148
+ }
149
+ return prompt;
150
+ }
151
+ /**
152
+ * Call an LLM provider directly (for local execution).
153
+ */
154
+ async function callLlm(provider, apiKey, model, prompt, outputSchema) {
155
+ if (provider === 'openai') {
156
+ return callOpenAI(apiKey, model, prompt, outputSchema);
157
+ }
158
+ else if (provider === 'anthropic') {
159
+ return callAnthropic(apiKey, model, prompt, outputSchema);
160
+ }
161
+ else if (provider === 'gemini') {
162
+ return callGemini(apiKey, model, prompt, outputSchema);
163
+ }
164
+ throw new errors_1.CliError(`Unsupported provider: ${provider}`);
165
+ }
166
+ async function callOpenAI(apiKey, model, prompt, outputSchema) {
167
+ const body = {
168
+ model,
169
+ messages: [{ role: 'user', content: prompt }],
170
+ };
171
+ if (outputSchema) {
172
+ body.response_format = { type: 'json_object' };
173
+ }
174
+ const response = await fetch('https://api.openai.com/v1/chat/completions', {
175
+ method: 'POST',
176
+ headers: {
177
+ 'Content-Type': 'application/json',
178
+ Authorization: `Bearer ${apiKey}`,
179
+ },
180
+ body: JSON.stringify(body),
181
+ });
182
+ if (!response.ok) {
183
+ const text = await response.text();
184
+ throw new errors_1.CliError(`OpenAI API error: ${text}`);
185
+ }
186
+ const data = (await response.json());
187
+ const content = data.choices?.[0]?.message?.content || '';
188
+ try {
189
+ return JSON.parse(content);
190
+ }
191
+ catch {
192
+ return { result: content };
193
+ }
194
+ }
195
+ async function callAnthropic(apiKey, model, prompt, _outputSchema) {
196
+ const response = await fetch('https://api.anthropic.com/v1/messages', {
197
+ method: 'POST',
198
+ headers: {
199
+ 'Content-Type': 'application/json',
200
+ 'x-api-key': apiKey,
201
+ 'anthropic-version': '2023-06-01',
202
+ },
203
+ body: JSON.stringify({
204
+ model,
205
+ max_tokens: 4096,
206
+ messages: [{ role: 'user', content: prompt }],
207
+ }),
208
+ });
209
+ if (!response.ok) {
210
+ const text = await response.text();
211
+ throw new errors_1.CliError(`Anthropic API error: ${text}`);
212
+ }
213
+ const data = (await response.json());
214
+ const content = data.content?.[0]?.text || '';
215
+ try {
216
+ return JSON.parse(content);
217
+ }
218
+ catch {
219
+ return { result: content };
220
+ }
221
+ }
222
+ async function callGemini(apiKey, model, prompt, _outputSchema) {
223
+ const url = `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`;
224
+ const response = await fetch(url, {
225
+ method: 'POST',
226
+ headers: { 'Content-Type': 'application/json' },
227
+ body: JSON.stringify({
228
+ contents: [{ parts: [{ text: prompt }] }],
229
+ }),
230
+ });
231
+ if (!response.ok) {
232
+ const text = await response.text();
233
+ throw new errors_1.CliError(`Gemini API error: ${text}`);
234
+ }
235
+ const data = (await response.json());
236
+ const content = data.candidates?.[0]?.content?.parts?.[0]?.text || '';
237
+ try {
238
+ return JSON.parse(content);
239
+ }
240
+ catch {
241
+ return { result: content };
242
+ }
243
+ }
244
+ /**
245
+ * Validate a provider string against known providers.
246
+ */
247
+ function validateProvider(provider) {
248
+ const validProviders = ['openai', 'anthropic', 'gemini'];
249
+ if (!validProviders.includes(provider)) {
250
+ throw new errors_1.CliError(`Invalid provider: ${provider}. Valid: ${validProviders.join(', ')}`);
251
+ }
252
+ }
@@ -0,0 +1,50 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.printJson = printJson;
7
+ exports.printAgentsTable = printAgentsTable;
8
+ const cli_table3_1 = __importDefault(require("cli-table3"));
9
+ const chalk_1 = __importDefault(require("chalk"));
10
+ function printJson(value) {
11
+ process.stdout.write(`${JSON.stringify(value, null, 2)}\n`);
12
+ }
13
+ function printAgentsTable(agents) {
14
+ const table = new cli_table3_1.default({
15
+ head: [
16
+ chalk_1.default.bold('Agent'),
17
+ chalk_1.default.bold('Type'),
18
+ chalk_1.default.bold('Providers'),
19
+ chalk_1.default.bold('Stars'),
20
+ chalk_1.default.bold('Description'),
21
+ ],
22
+ });
23
+ agents.forEach((agent) => {
24
+ const fullName = `${agent.org_slug}/${agent.name}`;
25
+ const type = agent.type === 'prompt' ? 'prompt' : 'code';
26
+ const providers = formatProviders(agent.supported_providers);
27
+ const stars = agent.stars_count ?? 0;
28
+ const desc = agent.description
29
+ ? agent.description.length > 30
30
+ ? agent.description.slice(0, 27) + '...'
31
+ : agent.description
32
+ : '-';
33
+ table.push([fullName, type, providers, stars.toString(), desc]);
34
+ });
35
+ process.stdout.write(`${table.toString()}\n`);
36
+ }
37
+ function formatProviders(providers) {
38
+ if (!providers || providers.length === 0 || providers.includes('any')) {
39
+ return chalk_1.default.green('any');
40
+ }
41
+ return providers.map(p => {
42
+ if (p === 'openai')
43
+ return chalk_1.default.cyan('openai');
44
+ if (p === 'anthropic')
45
+ return chalk_1.default.magenta('anthropic');
46
+ if (p === 'gemini')
47
+ return chalk_1.default.yellow('gemini');
48
+ return p;
49
+ }).join(', ');
50
+ }
package/dist/types.js ADDED
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
package/package.json ADDED
@@ -0,0 +1,59 @@
1
+ {
2
+ "name": "@orchagent/cli",
3
+ "version": "0.1.0",
4
+ "description": "Command-line interface for the OrchAgent AI agent marketplace",
5
+ "license": "MIT",
6
+ "author": "OrchAgent <hello@orchagent.io>",
7
+ "homepage": "https://orchagent.io",
8
+ "repository": {
9
+ "type": "git",
10
+ "url": "https://github.com/orchagent/orchagent.git",
11
+ "directory": "cli"
12
+ },
13
+ "bugs": {
14
+ "url": "https://github.com/orchagent/orchagent/issues"
15
+ },
16
+ "keywords": [
17
+ "orchagent",
18
+ "ai",
19
+ "agents",
20
+ "cli",
21
+ "marketplace",
22
+ "llm"
23
+ ],
24
+ "engines": {
25
+ "node": ">=18"
26
+ },
27
+ "bin": {
28
+ "orchagent": "dist/index.js"
29
+ },
30
+ "files": [
31
+ "dist"
32
+ ],
33
+ "scripts": {
34
+ "build": "tsc -p tsconfig.json",
35
+ "dev": "tsc -p tsconfig.json --watch",
36
+ "start": "node dist/index.js",
37
+ "test": "vitest run",
38
+ "test:watch": "vitest",
39
+ "test:coverage": "vitest run --coverage",
40
+ "prepublishOnly": "npm run build"
41
+ },
42
+ "dependencies": {
43
+ "@sentry/node": "^9.3.0",
44
+ "archiver": "^7.0.0",
45
+ "chalk": "^4.1.2",
46
+ "cli-table3": "^0.6.3",
47
+ "commander": "^11.1.0",
48
+ "open": "^10.0.0",
49
+ "posthog-node": "^4.0.0",
50
+ "yaml": "^2.8.2"
51
+ },
52
+ "devDependencies": {
53
+ "@types/archiver": "^6.0.0",
54
+ "@types/node": "^20.14.2",
55
+ "@vitest/coverage-v8": "^2.1.0",
56
+ "typescript": "^5.9.3",
57
+ "vitest": "^2.1.0"
58
+ }
59
+ }