@mndrk/agx 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +83 -0
- package/index.js +228 -0
- package/package.json +27 -0
package/README.md
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# agx
|
|
2
|
+
|
|
3
|
+
Unified AI Agent Wrapper for Gemini, Claude, and Ollama.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
From the `agx` directory:
|
|
8
|
+
```bash
|
|
9
|
+
npm link
|
|
10
|
+
```
|
|
11
|
+
Now you can use `agx` globally.
|
|
12
|
+
|
|
13
|
+
## Usage
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
agx <provider> [options] --prompt "<prompt>"
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
### Providers
|
|
20
|
+
|
|
21
|
+
| Provider | Aliases | Backend |
|
|
22
|
+
|----------|---------|---------|
|
|
23
|
+
| `gemini` | `gem`, `g` | Google Gemini CLI |
|
|
24
|
+
| `claude` | `cl`, `c` | Anthropic Claude CLI |
|
|
25
|
+
| `ollama` | `ol`, `o` | Local Ollama via Claude interface |
|
|
26
|
+
|
|
27
|
+
### Options
|
|
28
|
+
|
|
29
|
+
| Option | Short | Description |
|
|
30
|
+
|--------|-------|-------------|
|
|
31
|
+
| `--prompt <text>` | `-p` | The prompt to send |
|
|
32
|
+
| `--model <name>` | `-m` | Model name to use |
|
|
33
|
+
| `--yolo` | `-y` | Skip permission prompts |
|
|
34
|
+
| `--print` | | Non-interactive mode (output and exit) |
|
|
35
|
+
| `--interactive` | `-i` | Force interactive mode |
|
|
36
|
+
| `--sandbox` | `-s` | Enable sandbox (gemini only) |
|
|
37
|
+
| `--debug` | `-d` | Enable debug output |
|
|
38
|
+
| `--mcp <config>` | | MCP config file (claude/ollama only) |
|
|
39
|
+
|
|
40
|
+
### Raw Passthrough
|
|
41
|
+
|
|
42
|
+
Use `--` to pass arguments directly to the underlying CLI:
|
|
43
|
+
```bash
|
|
44
|
+
agx claude -- --resume
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## LLM-Predictable Command Patterns
|
|
48
|
+
|
|
49
|
+
For LLMs constructing commands, use these canonical patterns:
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
# Pattern: agx <provider> --prompt "<prompt>"
|
|
53
|
+
agx claude --prompt "explain this code"
|
|
54
|
+
agx gemini --prompt "summarize the file"
|
|
55
|
+
agx ollama --prompt "write a function"
|
|
56
|
+
|
|
57
|
+
# Pattern: agx <provider> --model <model> --prompt "<prompt>"
|
|
58
|
+
agx claude --model claude-sonnet-4-20250514 --prompt "fix the bug"
|
|
59
|
+
agx gemini --model gemini-2.0-flash --prompt "optimize this"
|
|
60
|
+
agx ollama --model qwen3:8b --prompt "refactor"
|
|
61
|
+
|
|
62
|
+
# Pattern: agx <provider> --yolo --prompt "<prompt>"
|
|
63
|
+
agx claude --yolo --prompt "run the tests"
|
|
64
|
+
|
|
65
|
+
# Pattern: agx <provider> --print --prompt "<prompt>"
|
|
66
|
+
agx claude --print --prompt "what is 2+2"
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### Command Structure
|
|
70
|
+
|
|
71
|
+
```
|
|
72
|
+
agx <provider> [--model <name>] [--yolo] [--print] --prompt "<prompt>"
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
**Rules for LLMs:**
|
|
76
|
+
1. Always use `--prompt` flag for the prompt text
|
|
77
|
+
2. Quote the prompt with double quotes
|
|
78
|
+
3. Place options before `--prompt`
|
|
79
|
+
4. Use full provider names (`claude`, `gemini`, `ollama`) for clarity
|
|
80
|
+
|
|
81
|
+
## Ollama Support
|
|
82
|
+
|
|
83
|
+
`agx ollama` automatically configures the environment to use a local Ollama instance as the backend for Claude Code. Default model is `glm-4.7:cloud` unless specified with `--model`.
|
package/index.js
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
const { spawn } = require('child_process');
|
|
4
|
+
|
|
5
|
+
const args = process.argv.slice(2);
|
|
6
|
+
let provider = args[0];
|
|
7
|
+
|
|
8
|
+
// Normalize provider aliases
|
|
9
|
+
const PROVIDER_ALIASES = {
|
|
10
|
+
'g': 'gemini',
|
|
11
|
+
'gem': 'gemini',
|
|
12
|
+
'gemini': 'gemini',
|
|
13
|
+
'c': 'claude',
|
|
14
|
+
'cl': 'claude',
|
|
15
|
+
'claude': 'claude',
|
|
16
|
+
'o': 'ollama',
|
|
17
|
+
'ol': 'ollama',
|
|
18
|
+
'ollama': 'ollama'
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
const VALID_PROVIDERS = ['gemini', 'claude', 'ollama'];
|
|
22
|
+
|
|
23
|
+
// Handle help/version before provider check
|
|
24
|
+
if (args.includes('--help') || args.includes('-h') || !provider) {
|
|
25
|
+
console.log(`agx - Unified AI Agent CLI
|
|
26
|
+
|
|
27
|
+
SYNTAX:
|
|
28
|
+
agx <provider> [options] "<prompt>"
|
|
29
|
+
agx <provider> [options] --prompt "<prompt>"
|
|
30
|
+
|
|
31
|
+
PROVIDERS:
|
|
32
|
+
gemini, gem, g Google Gemini
|
|
33
|
+
claude, cl, c Anthropic Claude
|
|
34
|
+
ollama, ol, o Local Ollama (via Claude interface)
|
|
35
|
+
|
|
36
|
+
OPTIONS:
|
|
37
|
+
--prompt, -p <text> The prompt to send (recommended for clarity)
|
|
38
|
+
--model, -m <name> Model name to use
|
|
39
|
+
--yolo, -y Skip permission prompts
|
|
40
|
+
--print Non-interactive mode (output and exit)
|
|
41
|
+
--interactive, -i Force interactive mode
|
|
42
|
+
--sandbox, -s Enable sandbox (gemini only)
|
|
43
|
+
--debug, -d Enable debug output
|
|
44
|
+
--mcp <config> MCP config file (claude/ollama only)
|
|
45
|
+
|
|
46
|
+
EXAMPLES:
|
|
47
|
+
agx claude --prompt "explain this code"
|
|
48
|
+
agx gemini -m gemini-2.0-flash --prompt "hello"
|
|
49
|
+
agx ollama --model qwen3:8b --prompt "write a poem"
|
|
50
|
+
agx c --yolo -p "fix the bug"
|
|
51
|
+
|
|
52
|
+
RAW PASSTHROUGH:
|
|
53
|
+
Use -- to pass arguments directly to underlying CLI:
|
|
54
|
+
agx claude -- --resume
|
|
55
|
+
|
|
56
|
+
NOTE: For predictable LLM usage, always use --prompt or -p flag.`);
|
|
57
|
+
process.exit(0);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// Resolve provider
|
|
61
|
+
provider = PROVIDER_ALIASES[provider.toLowerCase()];
|
|
62
|
+
if (!provider) {
|
|
63
|
+
console.error(`Error: Unknown provider "${args[0]}"`);
|
|
64
|
+
console.error(`Valid providers: ${VALID_PROVIDERS.join(', ')}`);
|
|
65
|
+
process.exit(1);
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
const remainingArgs = args.slice(1);
|
|
69
|
+
const translatedArgs = [];
|
|
70
|
+
const rawArgs = [];
|
|
71
|
+
let env = { ...process.env };
|
|
72
|
+
|
|
73
|
+
// Split raw arguments at --
|
|
74
|
+
const dashIndex = remainingArgs.indexOf('--');
|
|
75
|
+
let processedArgs = remainingArgs;
|
|
76
|
+
if (dashIndex !== -1) {
|
|
77
|
+
processedArgs = remainingArgs.slice(0, dashIndex);
|
|
78
|
+
rawArgs.push(...remainingArgs.slice(dashIndex + 1));
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// Parsed options (explicit structure for predictability)
|
|
82
|
+
const options = {
|
|
83
|
+
prompt: null,
|
|
84
|
+
model: null,
|
|
85
|
+
yolo: false,
|
|
86
|
+
print: false,
|
|
87
|
+
interactive: false,
|
|
88
|
+
sandbox: false,
|
|
89
|
+
debug: false,
|
|
90
|
+
mcp: null
|
|
91
|
+
};
|
|
92
|
+
|
|
93
|
+
// Collect positional args (legacy support, but --prompt is preferred)
|
|
94
|
+
const positionalArgs = [];
|
|
95
|
+
|
|
96
|
+
for (let i = 0; i < processedArgs.length; i++) {
|
|
97
|
+
const arg = processedArgs[i];
|
|
98
|
+
const nextArg = processedArgs[i + 1];
|
|
99
|
+
|
|
100
|
+
switch (arg) {
|
|
101
|
+
case '--prompt':
|
|
102
|
+
case '-p':
|
|
103
|
+
if (nextArg && !nextArg.startsWith('-')) {
|
|
104
|
+
options.prompt = nextArg;
|
|
105
|
+
i++;
|
|
106
|
+
}
|
|
107
|
+
break;
|
|
108
|
+
case '--model':
|
|
109
|
+
case '-m':
|
|
110
|
+
if (nextArg && !nextArg.startsWith('-')) {
|
|
111
|
+
options.model = nextArg;
|
|
112
|
+
i++;
|
|
113
|
+
}
|
|
114
|
+
break;
|
|
115
|
+
case '--yolo':
|
|
116
|
+
case '-y':
|
|
117
|
+
options.yolo = true;
|
|
118
|
+
break;
|
|
119
|
+
case '--print':
|
|
120
|
+
options.print = true;
|
|
121
|
+
break;
|
|
122
|
+
case '--interactive':
|
|
123
|
+
case '-i':
|
|
124
|
+
options.interactive = true;
|
|
125
|
+
break;
|
|
126
|
+
case '--sandbox':
|
|
127
|
+
case '-s':
|
|
128
|
+
options.sandbox = true;
|
|
129
|
+
break;
|
|
130
|
+
case '--debug':
|
|
131
|
+
case '-d':
|
|
132
|
+
options.debug = true;
|
|
133
|
+
break;
|
|
134
|
+
case '--mcp':
|
|
135
|
+
if (nextArg && !nextArg.startsWith('-')) {
|
|
136
|
+
options.mcp = nextArg;
|
|
137
|
+
i++;
|
|
138
|
+
}
|
|
139
|
+
break;
|
|
140
|
+
default:
|
|
141
|
+
if (arg.startsWith('-')) {
|
|
142
|
+
// Unknown flag - pass through
|
|
143
|
+
translatedArgs.push(arg);
|
|
144
|
+
} else {
|
|
145
|
+
// Positional argument (legacy prompt support)
|
|
146
|
+
positionalArgs.push(arg);
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// Determine final prompt: explicit --prompt takes precedence
|
|
152
|
+
const finalPrompt = options.prompt || positionalArgs.join(' ') || null;
|
|
153
|
+
|
|
154
|
+
// Build command based on provider
|
|
155
|
+
let command = '';
|
|
156
|
+
|
|
157
|
+
// Apply common options to translatedArgs
|
|
158
|
+
if (options.model) {
|
|
159
|
+
translatedArgs.push('--model', options.model);
|
|
160
|
+
}
|
|
161
|
+
if (options.debug) {
|
|
162
|
+
translatedArgs.push('--debug');
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
if (provider === 'gemini') {
|
|
166
|
+
command = 'gemini';
|
|
167
|
+
|
|
168
|
+
// Gemini-specific translations
|
|
169
|
+
if (options.yolo) translatedArgs.push('--yolo');
|
|
170
|
+
if (options.sandbox) translatedArgs.push('--sandbox');
|
|
171
|
+
|
|
172
|
+
// Gemini prompt handling
|
|
173
|
+
if (finalPrompt) {
|
|
174
|
+
if (options.print) {
|
|
175
|
+
translatedArgs.push('--prompt', finalPrompt);
|
|
176
|
+
} else if (options.interactive) {
|
|
177
|
+
translatedArgs.push('--prompt-interactive', finalPrompt);
|
|
178
|
+
} else {
|
|
179
|
+
translatedArgs.push(finalPrompt);
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
} else {
|
|
183
|
+
// Claude or Ollama
|
|
184
|
+
command = 'claude';
|
|
185
|
+
|
|
186
|
+
// Claude-specific translations
|
|
187
|
+
if (options.yolo) translatedArgs.push('--dangerously-skip-permissions');
|
|
188
|
+
if (options.print) translatedArgs.push('--print');
|
|
189
|
+
if (options.mcp) translatedArgs.push('--mcp-config', options.mcp);
|
|
190
|
+
|
|
191
|
+
// Ollama-specific environment setup
|
|
192
|
+
if (provider === 'ollama') {
|
|
193
|
+
env.ANTHROPIC_AUTH_TOKEN = 'ollama';
|
|
194
|
+
env.ANTHROPIC_BASE_URL = 'http://localhost:11434';
|
|
195
|
+
env.ANTHROPIC_API_KEY = 'none';
|
|
196
|
+
// Default model for Ollama if not specified
|
|
197
|
+
if (!options.model) {
|
|
198
|
+
translatedArgs.push('--model', 'glm-4.7:cloud');
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// Claude prompt (positional at end)
|
|
203
|
+
if (finalPrompt) {
|
|
204
|
+
translatedArgs.push(finalPrompt);
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// Append raw args at the end
|
|
209
|
+
translatedArgs.push(...rawArgs);
|
|
210
|
+
|
|
211
|
+
const child = spawn(command, translatedArgs, {
|
|
212
|
+
env,
|
|
213
|
+
stdio: 'inherit',
|
|
214
|
+
shell: false
|
|
215
|
+
});
|
|
216
|
+
|
|
217
|
+
child.on('exit', (code) => {
|
|
218
|
+
process.exit(code || 0);
|
|
219
|
+
});
|
|
220
|
+
|
|
221
|
+
child.on('error', (err) => {
|
|
222
|
+
if (err.code === 'ENOENT') {
|
|
223
|
+
console.error(`Error: "${command}" command not found.`);
|
|
224
|
+
} else {
|
|
225
|
+
console.error(`Failed to start ${command}:`, err);
|
|
226
|
+
}
|
|
227
|
+
process.exit(1);
|
|
228
|
+
});
|
package/package.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@mndrk/agx",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Unified AI Agent Wrapper for Gemini, Claude, and Ollama",
|
|
5
|
+
"main": "index.js",
|
|
6
|
+
"bin": {
|
|
7
|
+
"agx": "./index.js"
|
|
8
|
+
},
|
|
9
|
+
"scripts": {
|
|
10
|
+
"test": "echo \"Error: no test specified\" && exit 1"
|
|
11
|
+
},
|
|
12
|
+
"keywords": [
|
|
13
|
+
"ai",
|
|
14
|
+
"agent",
|
|
15
|
+
"cli",
|
|
16
|
+
"gemini",
|
|
17
|
+
"claude",
|
|
18
|
+
"ollama",
|
|
19
|
+
"llm"
|
|
20
|
+
],
|
|
21
|
+
"author": "mndrk",
|
|
22
|
+
"license": "MIT",
|
|
23
|
+
"repository": {
|
|
24
|
+
"type": "git",
|
|
25
|
+
"url": "https://github.com/mndrk/agx"
|
|
26
|
+
}
|
|
27
|
+
}
|