cogeai 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +122 -0
- package/bin/publish.sh +27 -0
- package/bin/sync-models.js +60 -0
- package/coge.js +479 -0
- package/lib/bandit.js +198 -0
- package/lib/config.js +129 -0
- package/lib/default-config.json +422 -0
- package/lib/init-config.js +12 -0
- package/lib/model-classify.js +70 -0
- package/lib/stats.js +83 -0
- package/package.json +39 -0
- package/providers/cerebras.js +11 -0
- package/providers/cloudflare.js +41 -0
- package/providers/codestral.js +7 -0
- package/providers/cohere.js +60 -0
- package/providers/gemini.js +52 -0
- package/providers/github-models.js +29 -0
- package/providers/groq.js +11 -0
- package/providers/huggingface.js +14 -0
- package/providers/index.js +156 -0
- package/providers/mistral.js +12 -0
- package/providers/ollama.js +20 -0
- package/providers/openai-compatible.js +97 -0
- package/providers/openai.js +11 -0
- package/providers/openrouter.js +71 -0
- package/providers/vercel-ai.js +11 -0
package/README.md
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
# coge
|
|
2
|
+
|
|
3
|
+
AI-powered command generator. Describe what you want — get a shell command back.
|
|
4
|
+
|
|
5
|
+
**Free to use** with reasonable daily consumption — all supported providers offer free API tiers. Just grab the API keys and go. Provider list based on [free-llm-api-resources](https://github.com/cheahjs/free-llm-api-resources).
|
|
6
|
+
|
|
7
|
+
Supports multiple LLM providers. When multiple API keys are configured, **races them in parallel** and returns the fastest response.
|
|
8
|
+
|
|
9
|
+
## Quick Start
|
|
10
|
+
|
|
11
|
+
1. Install globally:
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
npm install -g cogeai
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
2. Set at least one provider API key:
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
export COGE_GEMINI_API_KEY="your-key"
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
3. Run:
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
coge "find all TODO comments in javascript files"
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
4. Press **Enter** to execute, **c** to copy, **Esc** to cancel.
|
|
30
|
+
|
|
31
|
+
## Usage
|
|
32
|
+
|
|
33
|
+
```
|
|
34
|
+
coge <prompt> Generate a shell command from description
|
|
35
|
+
coge --configure | -c Configure provider and model
|
|
36
|
+
coge --pull models <provider> Fetch available models for a provider
|
|
37
|
+
|
|
38
|
+
Options:
|
|
39
|
+
--non-interactive Print command and exit (for pipelines)
|
|
40
|
+
--debug Show config, provider, and timing info
|
|
41
|
+
--help, -h Show this help message
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
### Examples
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
# Generate and execute a command
|
|
48
|
+
coge "list files larger than 100MB"
|
|
49
|
+
|
|
50
|
+
# Use in a pipeline
|
|
51
|
+
coge --non-interactive "compress all pngs in current dir" | bash
|
|
52
|
+
|
|
53
|
+
# See which provider won the race
|
|
54
|
+
coge --debug "disk usage by directory"
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
## Race Mode
|
|
58
|
+
|
|
59
|
+
When multiple providers have API keys configured, coge automatically races them in parallel:
|
|
60
|
+
|
|
61
|
+
- **>3 providers** configured — races 3 random ones (always including your default)
|
|
62
|
+
- **2-3 providers** configured — races all of them
|
|
63
|
+
- **1 provider** configured — uses it directly, no race
|
|
64
|
+
|
|
65
|
+
This is the default behavior — no extra flags needed.
|
|
66
|
+
|
|
67
|
+
## Providers
|
|
68
|
+
|
|
69
|
+
| Provider | Environment Variable |
|
|
70
|
+
|---|---|
|
|
71
|
+
| gemini | `COGE_GEMINI_API_KEY` |
|
|
72
|
+
| openai | `COGE_OPENAI_API_KEY` |
|
|
73
|
+
| openrouter | `COGE_OPENROUTER_API_KEY` |
|
|
74
|
+
| groq | `COGE_GROQ_API_KEY` |
|
|
75
|
+
| cerebras | `COGE_CEREBRAS_API_KEY` |
|
|
76
|
+
| mistral | `COGE_MISTRAL_API_KEY` |
|
|
77
|
+
| codestral | `COGE_CODESTRAL_API_KEY` |
|
|
78
|
+
| cohere | `COGE_COHERE_API_KEY` |
|
|
79
|
+
| cloudflare | `COGE_CLOUDFLARE_API_KEY` |
|
|
80
|
+
| github-models | `COGE_GITHUB_MODELS_TOKEN` |
|
|
81
|
+
| huggingface | `COGE_HUGGINGFACE_API_KEY` |
|
|
82
|
+
| vercel-ai | `COGE_VERCEL_API_KEY` |
|
|
83
|
+
| ollama | _(no key needed — local)_ |
|
|
84
|
+
|
|
85
|
+
Set the env vars for any providers you want to use. The more you set, the faster the race.
|
|
86
|
+
|
|
87
|
+
## Configuration
|
|
88
|
+
|
|
89
|
+
Config file location: `~/.config/coge/config.json` (or `$XDG_CONFIG_HOME/coge/config.json`).
|
|
90
|
+
|
|
91
|
+
Created automatically on first run. Edit manually or use:
|
|
92
|
+
|
|
93
|
+
```bash
|
|
94
|
+
coge --configure
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
### Config structure
|
|
98
|
+
|
|
99
|
+
```json
|
|
100
|
+
{
|
|
101
|
+
"provider": "gemini",
|
|
102
|
+
"model": "gemini-2.5-flash",
|
|
103
|
+
"providers": {
|
|
104
|
+
"gemini": {
|
|
105
|
+
"default": "gemini-2.5-flash",
|
|
106
|
+
"available": ["gemini-2.5-flash", "gemini-2.5-pro"]
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
### Updating available models
|
|
113
|
+
|
|
114
|
+
Fetch the latest model list from a provider's API:
|
|
115
|
+
|
|
116
|
+
```bash
|
|
117
|
+
coge --pull models gemini
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
## License
|
|
121
|
+
|
|
122
|
+
ISC
|
package/bin/publish.sh
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
set -euo pipefail
|
|
3
|
+
|
|
4
|
+
# Publish cogeai to npm
|
|
5
|
+
# Usage: ./bin/publish.sh [--dry-run]
|
|
6
|
+
|
|
7
|
+
DRY_RUN=""
|
|
8
|
+
if [[ "${1:-}" == "--dry-run" ]]; then
|
|
9
|
+
DRY_RUN="--dry-run"
|
|
10
|
+
echo "==> Dry run mode"
|
|
11
|
+
fi
|
|
12
|
+
|
|
13
|
+
echo "==> Running tests..."
|
|
14
|
+
npm test
|
|
15
|
+
|
|
16
|
+
echo "==> Checking package contents..."
|
|
17
|
+
npm pack --dry-run
|
|
18
|
+
|
|
19
|
+
echo ""
|
|
20
|
+
read -rp "Publish cogeai@$(node -p "require('./package.json').version") to npm? [y/N] " confirm
|
|
21
|
+
if [[ "$confirm" != "y" && "$confirm" != "Y" ]]; then
|
|
22
|
+
echo "Aborted."
|
|
23
|
+
exit 0
|
|
24
|
+
fi
|
|
25
|
+
|
|
26
|
+
npm publish $DRY_RUN
|
|
27
|
+
echo "==> Done."
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Syncs model lists from user config (~/.config/coge/config.json)
|
|
5
|
+
* into the shipped default-config.json.
|
|
6
|
+
*
|
|
7
|
+
* Only updates `available` arrays; never touches `default`, `provider`, or `model`.
|
|
8
|
+
* Warns when the default model is missing from the available list.
|
|
9
|
+
*
|
|
10
|
+
* Usage: node bin/sync-defaults.js
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { readFileSync, writeFileSync } from "fs";
|
|
14
|
+
import { getConfigPath } from "../lib/config.js";
|
|
15
|
+
import { classifyModel, sortByCategory } from "../lib/model-classify.js";
|
|
16
|
+
|
|
17
|
+
const defaultConfigPath = new URL("../lib/default-config.json", import.meta.url).pathname;
|
|
18
|
+
|
|
19
|
+
const userConfig = JSON.parse(readFileSync(getConfigPath(), "utf8"));
|
|
20
|
+
const defConfig = JSON.parse(readFileSync(defaultConfigPath, "utf8"));
|
|
21
|
+
|
|
22
|
+
let updated = 0;
|
|
23
|
+
let skipped = 0;
|
|
24
|
+
|
|
25
|
+
for (const name of Object.keys(defConfig.providers)) {
|
|
26
|
+
const userProv = (userConfig.providers || {})[name];
|
|
27
|
+
if (!userProv) {
|
|
28
|
+
console.log(` skip ${name} (not in user config)`);
|
|
29
|
+
skipped++;
|
|
30
|
+
continue;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
const userIds = [...new Set(userProv.available || [])];
|
|
34
|
+
const defIds = defConfig.providers[name].available;
|
|
35
|
+
const defModel = defConfig.providers[name].default;
|
|
36
|
+
|
|
37
|
+
if (defModel && !userIds.includes(defModel)) {
|
|
38
|
+
console.log(` warn ${name}: default model "${defModel}" not in available list`);
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
const sorted = sortByCategory(userIds.map((id) => ({ id, category: classifyModel(id) }))).map((m) => m.id);
|
|
42
|
+
|
|
43
|
+
if (JSON.stringify(sorted) === JSON.stringify(defIds)) {
|
|
44
|
+
console.log(` ok ${name} (${defIds.length} models, unchanged)`);
|
|
45
|
+
continue;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
defConfig.providers[name].available = sorted;
|
|
49
|
+
console.log(` sync ${name}: ${defIds.length} → ${sorted.length} models`);
|
|
50
|
+
updated++;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
if (updated > 0) {
|
|
54
|
+
writeFileSync(defaultConfigPath, JSON.stringify(defConfig, null, 2) + "\n");
|
|
55
|
+
console.log(`\nWrote ${defaultConfigPath}`);
|
|
56
|
+
} else {
|
|
57
|
+
console.log("\nAlready in sync, nothing to write.");
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
console.log(`${updated} updated, ${skipped} skipped`);
|
package/coge.js
ADDED
|
@@ -0,0 +1,479 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
import readline from "readline";
|
|
4
|
+
import { execSync } from "child_process";
|
|
5
|
+
import { loadConfig, writeDefaultConfigIfMissing, writeConfig, getConfigPath, defaultConfig } from "./lib/config.js";
|
|
6
|
+
import { getProvider, getConfiguredProviders, getDefaultModels, PROVIDER_MODELS, PROVIDER_FETCH_MODELS, PROVIDER_PAGE_URLS, getAvailableModels } from "./providers/index.js";
|
|
7
|
+
import { loadBanditState, saveBanditState, updateArm, pickProviders } from "./lib/bandit.js";
|
|
8
|
+
import { recordAction, loadStats, formatStats } from "./lib/stats.js";
|
|
9
|
+
import { normalizeAvailableEntry, sortByCategory, isBlacklisted } from "./lib/model-classify.js";
|
|
10
|
+
import clipboard from "clipboardy";
|
|
11
|
+
|
|
12
|
+
process.noDeprecation = true;
|
|
13
|
+
|
|
14
|
+
writeDefaultConfigIfMissing();
|
|
15
|
+
const config = loadConfig();
|
|
16
|
+
const args = process.argv.slice(2);
|
|
17
|
+
|
|
18
|
+
const PROVIDERS = Object.keys(PROVIDER_MODELS);
|
|
19
|
+
|
|
20
|
+
function ask(rl, question, defaultValue = "") {
|
|
21
|
+
const suffix = defaultValue ? ` [${defaultValue}]` : "";
|
|
22
|
+
return new Promise((resolve) => {
|
|
23
|
+
rl.question(question + suffix + ": ", (answer) => {
|
|
24
|
+
resolve((answer || defaultValue).trim());
|
|
25
|
+
});
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
async function runConfigure() {
|
|
30
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
31
|
+
const current = loadConfig();
|
|
32
|
+
console.log("Current config:", getConfigPath());
|
|
33
|
+
|
|
34
|
+
const strategyAnswer = await ask(rl, "Strategy: auto (bandit picks fastest) / manual (you choose)", current.strategy ?? "auto");
|
|
35
|
+
const strategy = strategyAnswer.toLowerCase() === "manual" ? "manual" : "auto";
|
|
36
|
+
|
|
37
|
+
if (strategy === "auto") {
|
|
38
|
+
rl.close();
|
|
39
|
+
const newConfig = { ...current, strategy: "auto" };
|
|
40
|
+
writeConfig(newConfig);
|
|
41
|
+
console.log("Strategy set to auto (bandit will learn fastest providers).");
|
|
42
|
+
console.log("Config saved to", getConfigPath());
|
|
43
|
+
return;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
const provider = await ask(rl, `Provider (${PROVIDERS.join(", ")})`, current.provider);
|
|
47
|
+
if (!PROVIDERS.includes(provider.toLowerCase())) {
|
|
48
|
+
throw new Error(`Unknown provider: ${provider}. Use: ${PROVIDERS.join(", ")}`);
|
|
49
|
+
}
|
|
50
|
+
const providerLower = provider.toLowerCase();
|
|
51
|
+
const blacklist = current.providers?.[providerLower]?.blacklist ?? [];
|
|
52
|
+
const rawAvailable = current.providers?.[providerLower]?.available ?? getAvailableModels(providerLower);
|
|
53
|
+
if (rawAvailable.length > 0) {
|
|
54
|
+
const models = sortByCategory(rawAvailable.map((entry) => {
|
|
55
|
+
const norm = normalizeAvailableEntry(entry);
|
|
56
|
+
if (isBlacklisted(norm.id, blacklist)) norm.category = "blacklist";
|
|
57
|
+
return norm;
|
|
58
|
+
}));
|
|
59
|
+
const colored = models.map((m) => {
|
|
60
|
+
if (m.category === "blacklist") return `\x1b[90m\x1b[9m${m.id}\x1b[0m`;
|
|
61
|
+
if (m.category === "top") return `\x1b[33m${m.id}\x1b[0m`;
|
|
62
|
+
if (m.category === "small") return `\x1b[32m${m.id}\x1b[0m`;
|
|
63
|
+
if (m.category === "irrelevant") return `\x1b[2m${m.id}\x1b[0m`;
|
|
64
|
+
return m.id;
|
|
65
|
+
});
|
|
66
|
+
console.log(`Available models: ${colored.join(", ")}`);
|
|
67
|
+
}
|
|
68
|
+
const defModel = current.providers?.[provider]?.default ?? current.model ?? defaultConfig.model;
|
|
69
|
+
const model = await ask(rl, `Model for ${provider}`, defModel);
|
|
70
|
+
rl.close();
|
|
71
|
+
const providerKey = provider.toLowerCase();
|
|
72
|
+
const chosenModel = model || defModel;
|
|
73
|
+
const updatedProviders = { ...current.providers };
|
|
74
|
+
if (updatedProviders[providerKey]) {
|
|
75
|
+
const sorted = sortByCategory(updatedProviders[providerKey].available.map(normalizeAvailableEntry)).map((m) => m.id);
|
|
76
|
+
updatedProviders[providerKey] = { ...updatedProviders[providerKey], default: chosenModel, available: sorted };
|
|
77
|
+
} else {
|
|
78
|
+
const knownAvailable = sortByCategory(getAvailableModels(providerKey).map(normalizeAvailableEntry)).map((m) => m.id);
|
|
79
|
+
updatedProviders[providerKey] = { default: chosenModel, available: knownAvailable };
|
|
80
|
+
}
|
|
81
|
+
const newConfig = {
|
|
82
|
+
provider: providerKey,
|
|
83
|
+
model: chosenModel,
|
|
84
|
+
providers: updatedProviders,
|
|
85
|
+
strategy: "manual",
|
|
86
|
+
};
|
|
87
|
+
writeConfig(newConfig);
|
|
88
|
+
console.log("Config saved to", getConfigPath());
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Strips HTML tags and collapses whitespace to extract readable text.
|
|
93
|
+
* @param {string} html
|
|
94
|
+
* @returns {string}
|
|
95
|
+
*/
|
|
96
|
+
function htmlToText(html) {
|
|
97
|
+
return html
|
|
98
|
+
.replace(/<script[^>]*>[\s\S]*?<\/script>/gi, "")
|
|
99
|
+
.replace(/<style[^>]*>[\s\S]*?<\/style>/gi, "")
|
|
100
|
+
.replace(/<[^>]+>/g, " ")
|
|
101
|
+
.replace(/&/g, "&")
|
|
102
|
+
.replace(/</g, "<")
|
|
103
|
+
.replace(/>/g, ">")
|
|
104
|
+
.replace(/"/g, '"')
|
|
105
|
+
.replace(/'/g, "'")
|
|
106
|
+
.replace(/ /g, " ")
|
|
107
|
+
.replace(/\s+/g, " ")
|
|
108
|
+
.trim();
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
/**
|
|
112
|
+
* Fetches model IDs by scraping a documentation page and extracting via LLM.
|
|
113
|
+
* @param {string} url
|
|
114
|
+
* @param {import("./providers/index.js").Provider} llmProvider
|
|
115
|
+
* @returns {Promise<string[]>}
|
|
116
|
+
*/
|
|
117
|
+
async function fetchModelsFromPage(url, llmProvider) {
|
|
118
|
+
const { classifyModel } = await import("./lib/model-classify.js");
|
|
119
|
+
const res = await fetch(url);
|
|
120
|
+
if (!res.ok) throw new Error(`HTTP ${res.status}`);
|
|
121
|
+
const html = await res.text();
|
|
122
|
+
const pageText = htmlToText(html).slice(0, 30000);
|
|
123
|
+
|
|
124
|
+
const extractionPrompt = [
|
|
125
|
+
"You are given the text content of a documentation page that lists AI models for an API provider.",
|
|
126
|
+
"Extract all available model IDs (the exact strings used in API calls) from this page.",
|
|
127
|
+
"Return ONLY a JSON array of model ID strings. No explanations, no markdown, no backticks.",
|
|
128
|
+
'Example: ["model-a", "model-b"]',
|
|
129
|
+
].join(" ");
|
|
130
|
+
|
|
131
|
+
const response = await llmProvider.generateContent(extractionPrompt, pageText);
|
|
132
|
+
const modelIds = JSON.parse(response);
|
|
133
|
+
if (!Array.isArray(modelIds) || modelIds.length === 0) {
|
|
134
|
+
throw new Error("LLM returned empty or non-array result");
|
|
135
|
+
}
|
|
136
|
+
return modelIds.map((id) => ({ id, category: classifyModel(id) }));
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
async function runPullModels(providerName) {
|
|
140
|
+
if (!providerName) {
|
|
141
|
+
console.error("Usage: coge --pull models <provider>");
|
|
142
|
+
console.error(`Available providers: ${PROVIDERS.join(", ")}`);
|
|
143
|
+
process.exit(1);
|
|
144
|
+
}
|
|
145
|
+
const name = providerName.toLowerCase();
|
|
146
|
+
if (!PROVIDERS.includes(name)) {
|
|
147
|
+
console.error(`Unknown provider: ${name}. Available: ${PROVIDERS.join(", ")}`);
|
|
148
|
+
process.exit(1);
|
|
149
|
+
}
|
|
150
|
+
const providerMeta = PROVIDER_MODELS[name];
|
|
151
|
+
const fetchFn = PROVIDER_FETCH_MODELS[name];
|
|
152
|
+
const modelsPageUrl = PROVIDER_PAGE_URLS[name];
|
|
153
|
+
|
|
154
|
+
if (!fetchFn && !modelsPageUrl) {
|
|
155
|
+
console.error(`Provider "${name}" does not support fetching models.`);
|
|
156
|
+
process.exit(1);
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
let modelIds;
|
|
160
|
+
|
|
161
|
+
// Try provider's own fetchModels first
|
|
162
|
+
if (fetchFn) {
|
|
163
|
+
console.log(`Fetching models for ${name}...`);
|
|
164
|
+
try {
|
|
165
|
+
modelIds = await fetchFn();
|
|
166
|
+
} catch (err) {
|
|
167
|
+
if (modelsPageUrl) {
|
|
168
|
+
console.log(`API fetch failed (${err.message}), falling back to page scraping...`);
|
|
169
|
+
} else {
|
|
170
|
+
console.error(`Failed to fetch models: ${err.message}`);
|
|
171
|
+
process.exit(1);
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// Fall back to page scraping + LLM extraction
|
|
177
|
+
if (!modelIds && modelsPageUrl) {
|
|
178
|
+
console.log(`Fetching models page: ${modelsPageUrl}`);
|
|
179
|
+
try {
|
|
180
|
+
const llmProvider = getProvider(config);
|
|
181
|
+
modelIds = await fetchModelsFromPage(modelsPageUrl, llmProvider);
|
|
182
|
+
} catch (err) {
|
|
183
|
+
console.error(`Failed to extract model list: ${err.message}`);
|
|
184
|
+
process.exit(1);
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
sortByCategory(modelIds);
|
|
189
|
+
|
|
190
|
+
const current = loadConfig();
|
|
191
|
+
const updatedProviders = { ...current.providers };
|
|
192
|
+
const existing = updatedProviders[name] ?? { default: providerMeta.default, available: [] };
|
|
193
|
+
updatedProviders[name] = { ...existing, available: modelIds.map((m) => m.id) };
|
|
194
|
+
writeConfig({ ...current, providers: updatedProviders });
|
|
195
|
+
|
|
196
|
+
const blacklist = updatedProviders[name]?.blacklist ?? [];
|
|
197
|
+
console.log(`Updated ${name} available models (${modelIds.length}):`);
|
|
198
|
+
for (const m of modelIds) {
|
|
199
|
+
if (blacklist.includes(m.id)) {
|
|
200
|
+
console.log(` \x1b[9m\x1b[2m${m.id}\x1b[0m [blacklisted]`);
|
|
201
|
+
} else {
|
|
202
|
+
const color = m.category === "top" ? "\x1b[33m" : m.category === "small" ? "\x1b[32m" : m.category === "irrelevant" ? "\x1b[2m" : "";
|
|
203
|
+
const reset = color ? "\x1b[0m" : "";
|
|
204
|
+
console.log(` ${color}${m.id}${reset}`);
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
async function runPtestAll() {
|
|
210
|
+
const configured = getConfiguredProviders();
|
|
211
|
+
if (configured.length === 0) {
|
|
212
|
+
console.error("No providers configured. Set API key environment variables first.");
|
|
213
|
+
process.exit(1);
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
const current = loadConfig();
|
|
217
|
+
const defaults = getDefaultModels();
|
|
218
|
+
const systemPrompt = buildSystemPrompt();
|
|
219
|
+
const testPrompt = "list of files";
|
|
220
|
+
|
|
221
|
+
console.log("Testing all configured providers...\n");
|
|
222
|
+
|
|
223
|
+
const results = [];
|
|
224
|
+
|
|
225
|
+
for (const name of configured) {
|
|
226
|
+
const model = current.providers?.[name]?.default ?? defaults[name] ?? "unknown";
|
|
227
|
+
const label = `${name} (${model})`;
|
|
228
|
+
const padded = label.padEnd(40);
|
|
229
|
+
|
|
230
|
+
try {
|
|
231
|
+
const start = Date.now();
|
|
232
|
+
const provider = getProvider({ ...current, provider: name, model });
|
|
233
|
+
const response = await provider.generateContent(systemPrompt, testPrompt);
|
|
234
|
+
const latency = Date.now() - start;
|
|
235
|
+
const preview = response.split("\n")[0].slice(0, 60);
|
|
236
|
+
const startsWithLs = /^ls\b/i.test(response.trim());
|
|
237
|
+
|
|
238
|
+
if (startsWithLs) {
|
|
239
|
+
console.log(` \x1b[32m✓\x1b[0m ${padded} ${latency}ms "${preview}"`);
|
|
240
|
+
results.push(true);
|
|
241
|
+
} else {
|
|
242
|
+
console.log(` \x1b[32m✓\x1b[0m ${padded} ${latency}ms "${preview}" \x1b[33m(warning: does not start with "ls")\x1b[0m`);
|
|
243
|
+
results.push(true);
|
|
244
|
+
}
|
|
245
|
+
} catch (err) {
|
|
246
|
+
const msg = err.message?.split("\n")[0].slice(0, 80) ?? "unknown error";
|
|
247
|
+
console.log(` \x1b[31m✗\x1b[0m ${padded} ${msg}`);
|
|
248
|
+
results.push(false);
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
const passed = results.filter(Boolean).length;
|
|
253
|
+
console.log(`\nResults: ${passed}/${results.length} passed`);
|
|
254
|
+
process.exit(passed === results.length ? 0 : 1);
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
function buildSystemPrompt() {
|
|
258
|
+
const platform = process.platform;
|
|
259
|
+
if (platform === "win32") {
|
|
260
|
+
return [
|
|
261
|
+
"You are a Windows command-line assistant. You receive instructions and reply with a single PowerShell command. ",
|
|
262
|
+
"One line only. No explanations, no markdown, no backticks. ",
|
|
263
|
+
"Chain multiple commands with ; or |. Output nothing but the command.",
|
|
264
|
+
].join("");
|
|
265
|
+
}
|
|
266
|
+
const shell = platform === "darwin" ? "zsh" : "bash";
|
|
267
|
+
return [
|
|
268
|
+
`You are a ${platform === "darwin" ? "macOS" : "Linux"} command-line assistant. You receive instructions and reply with a single ${shell} command. `,
|
|
269
|
+
"One line only. No explanations, no markdown, no backticks. ",
|
|
270
|
+
"Chain multiple commands with && or |. Output nothing but the command.",
|
|
271
|
+
].join("");
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
if (args[0] === "--help" || args[0] === "-h") {
|
|
275
|
+
console.log(`coge - AI-powered command generator
|
|
276
|
+
|
|
277
|
+
Usage:
|
|
278
|
+
coge <prompt> Generate a shell command from description
|
|
279
|
+
coge --configure | -c Configure provider and model
|
|
280
|
+
coge --pull models <provider> Fetch available models for a provider
|
|
281
|
+
coge --ptestall Test all configured providers
|
|
282
|
+
coge --stats Show usage statistics per provider/model
|
|
283
|
+
|
|
284
|
+
Options:
|
|
285
|
+
--non-interactive Print command and exit (for pipelines)
|
|
286
|
+
--debug Show config, provider, and timing info
|
|
287
|
+
--help, -h Show this help message
|
|
288
|
+
|
|
289
|
+
Providers: ${PROVIDERS.join(", ")}`);
|
|
290
|
+
process.exit(0);
|
|
291
|
+
} else if (args[0] === "--configure" || args[0] === "-c") {
|
|
292
|
+
runConfigure()
|
|
293
|
+
.then(() => process.exit(0))
|
|
294
|
+
.catch((err) => {
|
|
295
|
+
console.error("Error:", err);
|
|
296
|
+
process.exit(1);
|
|
297
|
+
});
|
|
298
|
+
} else if (args[0] === "--ptestall") {
|
|
299
|
+
runPtestAll()
|
|
300
|
+
.then(() => process.exit(0))
|
|
301
|
+
.catch((err) => {
|
|
302
|
+
console.error("Error:", err);
|
|
303
|
+
process.exit(1);
|
|
304
|
+
});
|
|
305
|
+
} else if (args[0] === "--stats") {
|
|
306
|
+
console.log(formatStats(loadStats()));
|
|
307
|
+
process.exit(0);
|
|
308
|
+
} else if (args[0] === "--pull" && args[1] === "models") {
|
|
309
|
+
runPullModels(args[2])
|
|
310
|
+
.then(() => process.exit(0))
|
|
311
|
+
.catch((err) => {
|
|
312
|
+
console.error("Error:", err);
|
|
313
|
+
process.exit(1);
|
|
314
|
+
});
|
|
315
|
+
} else if (args[0] === "config") {
|
|
316
|
+
console.error("Use --configure or -c to configure.");
|
|
317
|
+
process.exit(0);
|
|
318
|
+
} else {
|
|
319
|
+
const nonInteractive = args.includes("--non-interactive");
|
|
320
|
+
const debug = args.includes("--debug");
|
|
321
|
+
const filteredArgs = args.filter((a) => a !== "--non-interactive" && a !== "--debug");
|
|
322
|
+
const promptText = filteredArgs.join(" ");
|
|
323
|
+
if (!promptText) {
|
|
324
|
+
console.error("No prompt provided.");
|
|
325
|
+
process.exit(1);
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
const systemPrompt = buildSystemPrompt();
|
|
329
|
+
const defaultModels = getDefaultModels();
|
|
330
|
+
|
|
331
|
+
function autoBlacklist(results, cfg) {
|
|
332
|
+
const MODEL_ERRORS = /unknown_model|unavailable_model/i;
|
|
333
|
+
const current = loadConfig();
|
|
334
|
+
let changed = false;
|
|
335
|
+
for (const { name, success, error } of results) {
|
|
336
|
+
if (success || !error || !MODEL_ERRORS.test(error)) continue;
|
|
337
|
+
const model = cfg.providers?.[name]?.default ?? defaultModels[name];
|
|
338
|
+
if (!model) continue;
|
|
339
|
+
const entry = current.providers?.[name];
|
|
340
|
+
if (!entry) continue;
|
|
341
|
+
const bl = entry.blacklist ?? [];
|
|
342
|
+
if (!bl.includes(model)) {
|
|
343
|
+
bl.push(model);
|
|
344
|
+
entry.blacklist = bl;
|
|
345
|
+
changed = true;
|
|
346
|
+
console.error(`Auto-blacklisted ${name}:${model} (${error})`);
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
if (changed) writeConfig(current);
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
function updateBanditAfterRace(results, cfg) {
|
|
353
|
+
const state = loadBanditState();
|
|
354
|
+
for (const { name, latency, success } of results) {
|
|
355
|
+
const model = cfg.providers?.[name]?.default ?? defaultModels[name] ?? "unknown";
|
|
356
|
+
const armKey = `${name}:${model}`;
|
|
357
|
+
updateArm(state, armKey, latency, success);
|
|
358
|
+
}
|
|
359
|
+
saveBanditState(state);
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
async function raceProviders(providerNames, cfg, sysPrompt, userPrompt) {
|
|
363
|
+
const results = [];
|
|
364
|
+
|
|
365
|
+
const wrappedPromises = providerNames.map(async (name) => {
|
|
366
|
+
const start = Date.now();
|
|
367
|
+
try {
|
|
368
|
+
const provider = getProvider({ ...cfg, provider: name, model: cfg.providers?.[name]?.default });
|
|
369
|
+
const result = await provider.generateContent(sysPrompt, userPrompt);
|
|
370
|
+
const latency = Date.now() - start;
|
|
371
|
+
results.push({ name, latency, success: true });
|
|
372
|
+
return { command: result, provider: name };
|
|
373
|
+
} catch (err) {
|
|
374
|
+
const latency = Date.now() - start;
|
|
375
|
+
results.push({ name, latency, success: false, error: err.message });
|
|
376
|
+
throw err;
|
|
377
|
+
}
|
|
378
|
+
});
|
|
379
|
+
|
|
380
|
+
const winner = await Promise.any(wrappedPromises);
|
|
381
|
+
|
|
382
|
+
// Wait for stragglers to settle, then update bandit state + auto-blacklist (fire and forget)
|
|
383
|
+
Promise.allSettled(wrappedPromises).then(() => {
|
|
384
|
+
updateBanditAfterRace(results, cfg);
|
|
385
|
+
autoBlacklist(results, cfg);
|
|
386
|
+
});
|
|
387
|
+
|
|
388
|
+
return winner;
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
async function main() {
|
|
392
|
+
if (debug) {
|
|
393
|
+
console.log(`Config: ${getConfigPath()}`);
|
|
394
|
+
console.log(`Provider: ${config.provider}`);
|
|
395
|
+
console.log(`Model: ${config.model ?? "default"}`);
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
const configured = getConfiguredProviders();
|
|
399
|
+
const { selected, banditArms } = pickProviders(config, configured, defaultModels, 3);
|
|
400
|
+
if (debug && banditArms) console.log(`Bandit arms: ${banditArms.join(", ")}`);
|
|
401
|
+
|
|
402
|
+
let command;
|
|
403
|
+
let winnerArm;
|
|
404
|
+
const startTime = Date.now();
|
|
405
|
+
|
|
406
|
+
if (selected.length <= 1) {
|
|
407
|
+
// 0 or 1 configured — use the user's configured provider directly
|
|
408
|
+
const provider = getProvider(config);
|
|
409
|
+
command = await provider.generateContent(systemPrompt, promptText);
|
|
410
|
+
const model = config.providers?.[config.provider]?.default ?? defaultModels[config.provider] ?? config.model ?? "unknown";
|
|
411
|
+
winnerArm = `${config.provider}:${model}`;
|
|
412
|
+
} else {
|
|
413
|
+
if (debug) {
|
|
414
|
+
console.log(`Racing providers: ${selected.join(", ")}`);
|
|
415
|
+
}
|
|
416
|
+
try {
|
|
417
|
+
const { command: result, provider: winner } = await raceProviders(
|
|
418
|
+
selected, config, systemPrompt, promptText
|
|
419
|
+
);
|
|
420
|
+
command = result;
|
|
421
|
+
const winnerModel = config.providers?.[winner]?.default ?? defaultModels[winner] ?? "unknown";
|
|
422
|
+
winnerArm = `${winner}:${winnerModel}`;
|
|
423
|
+
if (debug) console.log(`Winner: ${winner}`);
|
|
424
|
+
} catch (err) {
|
|
425
|
+
if (err instanceof AggregateError) {
|
|
426
|
+
console.error("All providers failed:");
|
|
427
|
+
for (const e of err.errors) console.error(` - ${e.message}`);
|
|
428
|
+
process.exit(1);
|
|
429
|
+
}
|
|
430
|
+
throw err;
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
const elapsed = Date.now() - startTime;
|
|
435
|
+
|
|
436
|
+
if (debug) {
|
|
437
|
+
console.log(`Response time: ${elapsed}ms`);
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
if (nonInteractive) {
|
|
441
|
+
recordAction(winnerArm, "execute");
|
|
442
|
+
process.stdout.write(command + "\n");
|
|
443
|
+
process.exit(0);
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
process.stdout.write(`\n${command}\n`);
|
|
447
|
+
process.stdout.write(`\x1b[2m [Enter] Execute [c] Copy [Esc] Cancel\x1b[0m\n`);
|
|
448
|
+
|
|
449
|
+
process.stdin.setRawMode(true);
|
|
450
|
+
process.stdin.resume();
|
|
451
|
+
process.stdin.setEncoding("utf8");
|
|
452
|
+
|
|
453
|
+
process.stdin.on("data", (key) => {
|
|
454
|
+
if (key === "\r") {
|
|
455
|
+
recordAction(winnerArm, "execute");
|
|
456
|
+
process.stdin.setRawMode(false);
|
|
457
|
+
process.stdin.pause();
|
|
458
|
+
try {
|
|
459
|
+
execSync(command, { stdio: "inherit", shell: true });
|
|
460
|
+
process.exit(0);
|
|
461
|
+
} catch (err) {
|
|
462
|
+
process.exit(err.status ?? 1);
|
|
463
|
+
}
|
|
464
|
+
} else if (key === "c") {
|
|
465
|
+
recordAction(winnerArm, "copy");
|
|
466
|
+
clipboard.writeSync(command);
|
|
467
|
+
process.exit(0);
|
|
468
|
+
} else if (key === "\u001b") {
|
|
469
|
+
recordAction(winnerArm, "cancel");
|
|
470
|
+
process.exit(0);
|
|
471
|
+
}
|
|
472
|
+
});
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
main().catch((err) => {
|
|
476
|
+
console.error("Error:", err);
|
|
477
|
+
process.exit(1);
|
|
478
|
+
});
|
|
479
|
+
}
|