ultra-dex 1.8.0 → 2.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +165 -140
- package/assets/agents/0-orchestration/orchestrator.md +2 -2
- package/assets/docs/QUICK-REFERENCE.md +3 -3
- package/assets/docs/ROADMAP.md +5 -5
- package/assets/docs/WORKFLOW-DIAGRAMS.md +1 -1
- package/assets/templates/README.md +1 -1
- package/bin/ultra-dex.js +27 -1893
- package/lib/commands/agents.js +151 -0
- package/lib/commands/audit.js +135 -0
- package/lib/commands/banner.js +21 -0
- package/lib/commands/build.js +214 -0
- package/lib/commands/examples.js +34 -0
- package/lib/commands/fetch.js +186 -0
- package/lib/commands/generate.js +217 -0
- package/lib/commands/hooks.js +105 -0
- package/lib/commands/init.js +335 -0
- package/lib/commands/placeholders.js +11 -0
- package/lib/commands/review.js +287 -0
- package/lib/commands/serve.js +173 -0
- package/lib/commands/suggest.js +126 -0
- package/lib/commands/sync.js +35 -0
- package/lib/commands/validate.js +140 -0
- package/lib/commands/workflows.js +185 -0
- package/lib/config/paths.js +9 -0
- package/lib/config/urls.js +16 -0
- package/lib/providers/base.js +82 -0
- package/lib/providers/claude.js +177 -0
- package/lib/providers/gemini.js +170 -0
- package/lib/providers/index.js +93 -0
- package/lib/providers/openai.js +163 -0
- package/lib/templates/context.js +26 -0
- package/lib/templates/embedded.js +141 -0
- package/lib/templates/prompts/generate-plan.js +147 -0
- package/lib/templates/prompts/review-code.js +57 -0
- package/lib/templates/prompts/section-prompts.js +275 -0
- package/lib/templates/prompts/system-prompt.md +58 -0
- package/lib/templates/quick-start.js +43 -0
- package/lib/utils/build-helpers.js +257 -0
- package/lib/utils/fallback.js +38 -0
- package/lib/utils/files.js +26 -0
- package/lib/utils/network.js +18 -0
- package/lib/utils/output.js +20 -0
- package/lib/utils/parser.js +155 -0
- package/lib/utils/prompt-builder.js +93 -0
- package/lib/utils/review-helpers.js +334 -0
- package/lib/utils/sync.js +216 -0
- package/lib/utils/validation.js +34 -0
- package/package.json +17 -3
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
export const GITHUB_REPO = 'Srujan0798/Ultra-Dex';
|
|
2
|
+
export const GITHUB_WEB_BASE = `https://github.com/${GITHUB_REPO}`;
|
|
3
|
+
export const GITHUB_RAW_BASE = `https://raw.githubusercontent.com/${GITHUB_REPO}/main`;
|
|
4
|
+
|
|
5
|
+
export function githubBlobUrl(pathname) {
|
|
6
|
+
return `${GITHUB_WEB_BASE}/blob/main/${pathname}`;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
export function githubTreeUrl(pathname) {
|
|
10
|
+
return `${GITHUB_WEB_BASE}/tree/main/${pathname}`;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export function githubWebUrl(pathname = '') {
|
|
14
|
+
if (!pathname) return GITHUB_WEB_BASE;
|
|
15
|
+
return `${GITHUB_WEB_BASE}/${pathname}`;
|
|
16
|
+
}
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Base AI Provider Interface
|
|
3
|
+
* All providers (Claude, OpenAI, Gemini) must implement this interface
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
export class BaseProvider {
|
|
7
|
+
constructor(apiKey, options = {}) {
|
|
8
|
+
if (new.target === BaseProvider) {
|
|
9
|
+
throw new Error('BaseProvider is abstract and cannot be instantiated directly');
|
|
10
|
+
}
|
|
11
|
+
this.apiKey = apiKey;
|
|
12
|
+
this.model = options.model || this.getDefaultModel();
|
|
13
|
+
this.maxTokens = options.maxTokens || 8192;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Get the default model for this provider
|
|
18
|
+
* @returns {string} Default model identifier
|
|
19
|
+
*/
|
|
20
|
+
getDefaultModel() {
|
|
21
|
+
throw new Error('getDefaultModel() must be implemented by subclass');
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Get available models for this provider
|
|
26
|
+
* @returns {Array<{id: string, name: string, maxTokens: number}>}
|
|
27
|
+
*/
|
|
28
|
+
getAvailableModels() {
|
|
29
|
+
throw new Error('getAvailableModels() must be implemented by subclass');
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Estimate the cost for a given number of tokens
|
|
34
|
+
* @param {number} inputTokens - Number of input tokens
|
|
35
|
+
* @param {number} outputTokens - Number of output tokens
|
|
36
|
+
* @returns {{input: number, output: number, total: number}} Cost in USD
|
|
37
|
+
*/
|
|
38
|
+
estimateCost(inputTokens, outputTokens) {
|
|
39
|
+
throw new Error('estimateCost() must be implemented by subclass');
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Generate a completion from the AI model
|
|
44
|
+
* @param {string} systemPrompt - System instructions
|
|
45
|
+
* @param {string} userPrompt - User message/request
|
|
46
|
+
* @param {Object} options - Additional options
|
|
47
|
+
* @returns {Promise<{content: string, usage: {inputTokens: number, outputTokens: number}}>}
|
|
48
|
+
*/
|
|
49
|
+
async generate(systemPrompt, userPrompt, options = {}) {
|
|
50
|
+
throw new Error('generate() must be implemented by subclass');
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Generate a completion with streaming support
|
|
55
|
+
* @param {string} systemPrompt - System instructions
|
|
56
|
+
* @param {string} userPrompt - User message/request
|
|
57
|
+
* @param {Function} onChunk - Callback for each chunk: (text: string) => void
|
|
58
|
+
* @param {Object} options - Additional options
|
|
59
|
+
* @returns {Promise<{content: string, usage: {inputTokens: number, outputTokens: number}}>}
|
|
60
|
+
*/
|
|
61
|
+
async generateStream(systemPrompt, userPrompt, onChunk, options = {}) {
|
|
62
|
+
throw new Error('generateStream() must be implemented by subclass');
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Validate that the API key is configured and working
|
|
67
|
+
* @returns {Promise<boolean>}
|
|
68
|
+
*/
|
|
69
|
+
async validateApiKey() {
|
|
70
|
+
throw new Error('validateApiKey() must be implemented by subclass');
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Get the provider name
|
|
75
|
+
* @returns {string}
|
|
76
|
+
*/
|
|
77
|
+
getName() {
|
|
78
|
+
throw new Error('getName() must be implemented by subclass');
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
export default BaseProvider;
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Claude AI Provider (Anthropic)
|
|
3
|
+
* Primary provider for Ultra-Dex generate command
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { BaseProvider } from './base.js';
|
|
7
|
+
|
|
8
|
+
// Model pricing per 1M tokens (as of Jan 2026)
|
|
9
|
+
const PRICING = {
|
|
10
|
+
'claude-sonnet-4-20250514': { input: 3.00, output: 15.00 },
|
|
11
|
+
'claude-3-5-sonnet-20241022': { input: 3.00, output: 15.00 },
|
|
12
|
+
'claude-3-opus-20240229': { input: 15.00, output: 75.00 },
|
|
13
|
+
'claude-3-haiku-20240307': { input: 0.25, output: 1.25 },
|
|
14
|
+
};
|
|
15
|
+
|
|
16
|
+
const MODELS = [
|
|
17
|
+
{ id: 'claude-sonnet-4-20250514', name: 'Claude Sonnet 4 (Latest)', maxTokens: 8192, default: true },
|
|
18
|
+
{ id: 'claude-3-5-sonnet-20241022', name: 'Claude 3.5 Sonnet', maxTokens: 8192 },
|
|
19
|
+
{ id: 'claude-3-opus-20240229', name: 'Claude 3 Opus (Premium)', maxTokens: 4096 },
|
|
20
|
+
{ id: 'claude-3-haiku-20240307', name: 'Claude 3 Haiku (Fast)', maxTokens: 4096 },
|
|
21
|
+
];
|
|
22
|
+
|
|
23
|
+
export class ClaudeProvider extends BaseProvider {
|
|
24
|
+
constructor(apiKey, options = {}) {
|
|
25
|
+
super(apiKey, options);
|
|
26
|
+
this.baseUrl = 'https://api.anthropic.com/v1';
|
|
27
|
+
this.apiVersion = '2023-06-01';
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
getName() {
|
|
31
|
+
return 'Claude (Anthropic)';
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
getDefaultModel() {
|
|
35
|
+
return 'claude-sonnet-4-20250514';
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
getAvailableModels() {
|
|
39
|
+
return MODELS;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
estimateCost(inputTokens, outputTokens) {
|
|
43
|
+
const pricing = PRICING[this.model] || PRICING['claude-sonnet-4-20250514'];
|
|
44
|
+
const inputCost = (inputTokens / 1_000_000) * pricing.input;
|
|
45
|
+
const outputCost = (outputTokens / 1_000_000) * pricing.output;
|
|
46
|
+
return {
|
|
47
|
+
input: inputCost,
|
|
48
|
+
output: outputCost,
|
|
49
|
+
total: inputCost + outputCost,
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
async generate(systemPrompt, userPrompt, options = {}) {
|
|
54
|
+
const response = await fetch(`${this.baseUrl}/messages`, {
|
|
55
|
+
method: 'POST',
|
|
56
|
+
headers: {
|
|
57
|
+
'Content-Type': 'application/json',
|
|
58
|
+
'x-api-key': this.apiKey,
|
|
59
|
+
'anthropic-version': this.apiVersion,
|
|
60
|
+
},
|
|
61
|
+
body: JSON.stringify({
|
|
62
|
+
model: this.model,
|
|
63
|
+
max_tokens: options.maxTokens || this.maxTokens,
|
|
64
|
+
system: systemPrompt,
|
|
65
|
+
messages: [
|
|
66
|
+
{ role: 'user', content: userPrompt }
|
|
67
|
+
],
|
|
68
|
+
}),
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
if (!response.ok) {
|
|
72
|
+
const error = await response.json().catch(() => ({}));
|
|
73
|
+
throw new Error(`Claude API error: ${error.error?.message || response.statusText}`);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
const data = await response.json();
|
|
77
|
+
|
|
78
|
+
return {
|
|
79
|
+
content: data.content[0]?.text || '',
|
|
80
|
+
usage: {
|
|
81
|
+
inputTokens: data.usage?.input_tokens || 0,
|
|
82
|
+
outputTokens: data.usage?.output_tokens || 0,
|
|
83
|
+
},
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
async generateStream(systemPrompt, userPrompt, onChunk, options = {}) {
|
|
88
|
+
const response = await fetch(`${this.baseUrl}/messages`, {
|
|
89
|
+
method: 'POST',
|
|
90
|
+
headers: {
|
|
91
|
+
'Content-Type': 'application/json',
|
|
92
|
+
'x-api-key': this.apiKey,
|
|
93
|
+
'anthropic-version': this.apiVersion,
|
|
94
|
+
},
|
|
95
|
+
body: JSON.stringify({
|
|
96
|
+
model: this.model,
|
|
97
|
+
max_tokens: options.maxTokens || this.maxTokens,
|
|
98
|
+
stream: true,
|
|
99
|
+
system: systemPrompt,
|
|
100
|
+
messages: [
|
|
101
|
+
{ role: 'user', content: userPrompt }
|
|
102
|
+
],
|
|
103
|
+
}),
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
if (!response.ok) {
|
|
107
|
+
const error = await response.json().catch(() => ({}));
|
|
108
|
+
throw new Error(`Claude API error: ${error.error?.message || response.statusText}`);
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
const reader = response.body.getReader();
|
|
112
|
+
const decoder = new TextDecoder();
|
|
113
|
+
let fullContent = '';
|
|
114
|
+
let usage = { inputTokens: 0, outputTokens: 0 };
|
|
115
|
+
|
|
116
|
+
while (true) {
|
|
117
|
+
const { done, value } = await reader.read();
|
|
118
|
+
if (done) break;
|
|
119
|
+
|
|
120
|
+
const chunk = decoder.decode(value);
|
|
121
|
+
const lines = chunk.split('\n');
|
|
122
|
+
|
|
123
|
+
for (const line of lines) {
|
|
124
|
+
if (line.startsWith('data: ')) {
|
|
125
|
+
const data = line.slice(6);
|
|
126
|
+
if (data === '[DONE]') continue;
|
|
127
|
+
|
|
128
|
+
try {
|
|
129
|
+
const parsed = JSON.parse(data);
|
|
130
|
+
|
|
131
|
+
if (parsed.type === 'content_block_delta' && parsed.delta?.text) {
|
|
132
|
+
fullContent += parsed.delta.text;
|
|
133
|
+
onChunk(parsed.delta.text);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
if (parsed.type === 'message_delta' && parsed.usage) {
|
|
137
|
+
usage.outputTokens = parsed.usage.output_tokens || 0;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
if (parsed.type === 'message_start' && parsed.message?.usage) {
|
|
141
|
+
usage.inputTokens = parsed.message.usage.input_tokens || 0;
|
|
142
|
+
}
|
|
143
|
+
} catch {
|
|
144
|
+
// Skip malformed JSON
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
return { content: fullContent, usage };
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
async validateApiKey() {
|
|
154
|
+
try {
|
|
155
|
+
// Make a minimal request to check API key validity
|
|
156
|
+
const response = await fetch(`${this.baseUrl}/messages`, {
|
|
157
|
+
method: 'POST',
|
|
158
|
+
headers: {
|
|
159
|
+
'Content-Type': 'application/json',
|
|
160
|
+
'x-api-key': this.apiKey,
|
|
161
|
+
'anthropic-version': this.apiVersion,
|
|
162
|
+
},
|
|
163
|
+
body: JSON.stringify({
|
|
164
|
+
model: 'claude-3-haiku-20240307',
|
|
165
|
+
max_tokens: 10,
|
|
166
|
+
messages: [{ role: 'user', content: 'Hi' }],
|
|
167
|
+
}),
|
|
168
|
+
});
|
|
169
|
+
|
|
170
|
+
return response.ok || response.status === 400; // 400 is OK, means key is valid but request malformed
|
|
171
|
+
} catch {
|
|
172
|
+
return false;
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
export default ClaudeProvider;
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Google Gemini Provider
|
|
3
|
+
* Gemini models for Ultra-Dex generate command
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { BaseProvider } from './base.js';
|
|
7
|
+
|
|
8
|
+
// Model pricing per 1M tokens (as of Jan 2026)
|
|
9
|
+
const PRICING = {
|
|
10
|
+
'gemini-1.5-pro': { input: 1.25, output: 5.00 },
|
|
11
|
+
'gemini-1.5-flash': { input: 0.075, output: 0.30 },
|
|
12
|
+
'gemini-2.0-flash-exp': { input: 0.10, output: 0.40 },
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
const MODELS = [
|
|
16
|
+
{ id: 'gemini-1.5-pro', name: 'Gemini 1.5 Pro', maxTokens: 8192, default: true },
|
|
17
|
+
{ id: 'gemini-1.5-flash', name: 'Gemini 1.5 Flash (Fast)', maxTokens: 8192 },
|
|
18
|
+
{ id: 'gemini-2.0-flash-exp', name: 'Gemini 2.0 Flash (Experimental)', maxTokens: 8192 },
|
|
19
|
+
];
|
|
20
|
+
|
|
21
|
+
export class GeminiProvider extends BaseProvider {
|
|
22
|
+
constructor(apiKey, options = {}) {
|
|
23
|
+
super(apiKey, options);
|
|
24
|
+
this.baseUrl = 'https://generativelanguage.googleapis.com/v1beta';
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
getName() {
|
|
28
|
+
return 'Google Gemini';
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
getDefaultModel() {
|
|
32
|
+
return 'gemini-1.5-pro';
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
getAvailableModels() {
|
|
36
|
+
return MODELS;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
estimateCost(inputTokens, outputTokens) {
|
|
40
|
+
const pricing = PRICING[this.model] || PRICING['gemini-1.5-pro'];
|
|
41
|
+
const inputCost = (inputTokens / 1_000_000) * pricing.input;
|
|
42
|
+
const outputCost = (outputTokens / 1_000_000) * pricing.output;
|
|
43
|
+
return {
|
|
44
|
+
input: inputCost,
|
|
45
|
+
output: outputCost,
|
|
46
|
+
total: inputCost + outputCost,
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
async generate(systemPrompt, userPrompt, options = {}) {
|
|
51
|
+
const url = `${this.baseUrl}/models/${this.model}:generateContent?key=${this.apiKey}`;
|
|
52
|
+
|
|
53
|
+
const response = await fetch(url, {
|
|
54
|
+
method: 'POST',
|
|
55
|
+
headers: {
|
|
56
|
+
'Content-Type': 'application/json',
|
|
57
|
+
},
|
|
58
|
+
body: JSON.stringify({
|
|
59
|
+
systemInstruction: {
|
|
60
|
+
parts: [{ text: systemPrompt }],
|
|
61
|
+
},
|
|
62
|
+
contents: [
|
|
63
|
+
{
|
|
64
|
+
role: 'user',
|
|
65
|
+
parts: [{ text: userPrompt }],
|
|
66
|
+
},
|
|
67
|
+
],
|
|
68
|
+
generationConfig: {
|
|
69
|
+
maxOutputTokens: options.maxTokens || this.maxTokens,
|
|
70
|
+
},
|
|
71
|
+
}),
|
|
72
|
+
});
|
|
73
|
+
|
|
74
|
+
if (!response.ok) {
|
|
75
|
+
const error = await response.json().catch(() => ({}));
|
|
76
|
+
throw new Error(`Gemini API error: ${error.error?.message || response.statusText}`);
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
const data = await response.json();
|
|
80
|
+
const content = data.candidates?.[0]?.content?.parts?.[0]?.text || '';
|
|
81
|
+
|
|
82
|
+
return {
|
|
83
|
+
content,
|
|
84
|
+
usage: {
|
|
85
|
+
inputTokens: data.usageMetadata?.promptTokenCount || 0,
|
|
86
|
+
outputTokens: data.usageMetadata?.candidatesTokenCount || 0,
|
|
87
|
+
},
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
async generateStream(systemPrompt, userPrompt, onChunk, options = {}) {
|
|
92
|
+
const url = `${this.baseUrl}/models/${this.model}:streamGenerateContent?key=${this.apiKey}&alt=sse`;
|
|
93
|
+
|
|
94
|
+
const response = await fetch(url, {
|
|
95
|
+
method: 'POST',
|
|
96
|
+
headers: {
|
|
97
|
+
'Content-Type': 'application/json',
|
|
98
|
+
},
|
|
99
|
+
body: JSON.stringify({
|
|
100
|
+
systemInstruction: {
|
|
101
|
+
parts: [{ text: systemPrompt }],
|
|
102
|
+
},
|
|
103
|
+
contents: [
|
|
104
|
+
{
|
|
105
|
+
role: 'user',
|
|
106
|
+
parts: [{ text: userPrompt }],
|
|
107
|
+
},
|
|
108
|
+
],
|
|
109
|
+
generationConfig: {
|
|
110
|
+
maxOutputTokens: options.maxTokens || this.maxTokens,
|
|
111
|
+
},
|
|
112
|
+
}),
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
if (!response.ok) {
|
|
116
|
+
const error = await response.json().catch(() => ({}));
|
|
117
|
+
throw new Error(`Gemini API error: ${error.error?.message || response.statusText}`);
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
const reader = response.body.getReader();
|
|
121
|
+
const decoder = new TextDecoder();
|
|
122
|
+
let fullContent = '';
|
|
123
|
+
let usage = { inputTokens: 0, outputTokens: 0 };
|
|
124
|
+
|
|
125
|
+
while (true) {
|
|
126
|
+
const { done, value } = await reader.read();
|
|
127
|
+
if (done) break;
|
|
128
|
+
|
|
129
|
+
const chunk = decoder.decode(value);
|
|
130
|
+
const lines = chunk.split('\n');
|
|
131
|
+
|
|
132
|
+
for (const line of lines) {
|
|
133
|
+
if (line.startsWith('data: ')) {
|
|
134
|
+
const data = line.slice(6);
|
|
135
|
+
|
|
136
|
+
try {
|
|
137
|
+
const parsed = JSON.parse(data);
|
|
138
|
+
|
|
139
|
+
const text = parsed.candidates?.[0]?.content?.parts?.[0]?.text;
|
|
140
|
+
if (text) {
|
|
141
|
+
fullContent += text;
|
|
142
|
+
onChunk(text);
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
if (parsed.usageMetadata) {
|
|
146
|
+
usage.inputTokens = parsed.usageMetadata.promptTokenCount || 0;
|
|
147
|
+
usage.outputTokens = parsed.usageMetadata.candidatesTokenCount || 0;
|
|
148
|
+
}
|
|
149
|
+
} catch {
|
|
150
|
+
// Skip malformed JSON
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
return { content: fullContent, usage };
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
async validateApiKey() {
|
|
160
|
+
try {
|
|
161
|
+
const url = `${this.baseUrl}/models?key=${this.apiKey}`;
|
|
162
|
+
const response = await fetch(url);
|
|
163
|
+
return response.ok;
|
|
164
|
+
} catch {
|
|
165
|
+
return false;
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
export default GeminiProvider;
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI Provider Factory
|
|
3
|
+
* Creates and manages AI providers for the generate command
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { ClaudeProvider } from './claude.js';
|
|
7
|
+
import { OpenAIProvider } from './openai.js';
|
|
8
|
+
import { GeminiProvider } from './gemini.js';
|
|
9
|
+
|
|
10
|
+
const PROVIDERS = {
|
|
11
|
+
claude: {
|
|
12
|
+
class: ClaudeProvider,
|
|
13
|
+
envKey: 'ANTHROPIC_API_KEY',
|
|
14
|
+
name: 'Claude (Anthropic)',
|
|
15
|
+
},
|
|
16
|
+
openai: {
|
|
17
|
+
class: OpenAIProvider,
|
|
18
|
+
envKey: 'OPENAI_API_KEY',
|
|
19
|
+
name: 'OpenAI',
|
|
20
|
+
},
|
|
21
|
+
gemini: {
|
|
22
|
+
class: GeminiProvider,
|
|
23
|
+
envKey: 'GOOGLE_AI_KEY',
|
|
24
|
+
name: 'Google Gemini',
|
|
25
|
+
},
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Get the list of available providers
|
|
30
|
+
* @returns {Array<{id: string, name: string, envKey: string}>}
|
|
31
|
+
*/
|
|
32
|
+
export function getAvailableProviders() {
|
|
33
|
+
return Object.entries(PROVIDERS).map(([id, config]) => ({
|
|
34
|
+
id,
|
|
35
|
+
name: config.name,
|
|
36
|
+
envKey: config.envKey,
|
|
37
|
+
}));
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Create an AI provider instance
|
|
42
|
+
* @param {string} providerId - Provider identifier (claude, openai, gemini)
|
|
43
|
+
* @param {Object} options - Provider options
|
|
44
|
+
* @param {string} options.apiKey - API key (optional, will use env var if not provided)
|
|
45
|
+
* @param {string} options.model - Model to use (optional)
|
|
46
|
+
* @returns {BaseProvider}
|
|
47
|
+
*/
|
|
48
|
+
export function createProvider(providerId, options = {}) {
|
|
49
|
+
const providerConfig = PROVIDERS[providerId];
|
|
50
|
+
|
|
51
|
+
if (!providerConfig) {
|
|
52
|
+
throw new Error(`Unknown provider: ${providerId}. Available: ${Object.keys(PROVIDERS).join(', ')}`);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// Get API key from options or environment
|
|
56
|
+
const apiKey = options.apiKey || process.env[providerConfig.envKey];
|
|
57
|
+
|
|
58
|
+
if (!apiKey) {
|
|
59
|
+
throw new Error(
|
|
60
|
+
`API key not found for ${providerConfig.name}.\n` +
|
|
61
|
+
`Set the ${providerConfig.envKey} environment variable or use --key option.`
|
|
62
|
+
);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
return new providerConfig.class(apiKey, options);
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Get the default provider based on available API keys
|
|
70
|
+
* @returns {string|null} Provider ID or null if none available
|
|
71
|
+
*/
|
|
72
|
+
export function getDefaultProvider() {
|
|
73
|
+
// Check environment variables in order of preference
|
|
74
|
+
if (process.env.ANTHROPIC_API_KEY) return 'claude';
|
|
75
|
+
if (process.env.OPENAI_API_KEY) return 'openai';
|
|
76
|
+
if (process.env.GOOGLE_AI_KEY) return 'gemini';
|
|
77
|
+
return null;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Check which providers have API keys configured
|
|
82
|
+
* @returns {Array<{id: string, name: string, configured: boolean}>}
|
|
83
|
+
*/
|
|
84
|
+
export function checkConfiguredProviders() {
|
|
85
|
+
return Object.entries(PROVIDERS).map(([id, config]) => ({
|
|
86
|
+
id,
|
|
87
|
+
name: config.name,
|
|
88
|
+
envKey: config.envKey,
|
|
89
|
+
configured: !!process.env[config.envKey],
|
|
90
|
+
}));
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
export { ClaudeProvider, OpenAIProvider, GeminiProvider };
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Provider
|
|
3
|
+
* GPT models for Ultra-Dex generate command
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { BaseProvider } from './base.js';
|
|
7
|
+
|
|
8
|
+
// Model pricing per 1M tokens (as of Jan 2026)
|
|
9
|
+
const PRICING = {
|
|
10
|
+
'gpt-4o': { input: 2.50, output: 10.00 },
|
|
11
|
+
'gpt-4o-mini': { input: 0.15, output: 0.60 },
|
|
12
|
+
'gpt-4-turbo': { input: 10.00, output: 30.00 },
|
|
13
|
+
'gpt-4': { input: 30.00, output: 60.00 },
|
|
14
|
+
};
|
|
15
|
+
|
|
16
|
+
const MODELS = [
|
|
17
|
+
{ id: 'gpt-4o', name: 'GPT-4o (Latest)', maxTokens: 16384, default: true },
|
|
18
|
+
{ id: 'gpt-4o-mini', name: 'GPT-4o Mini (Fast)', maxTokens: 16384 },
|
|
19
|
+
{ id: 'gpt-4-turbo', name: 'GPT-4 Turbo', maxTokens: 4096 },
|
|
20
|
+
{ id: 'gpt-4', name: 'GPT-4', maxTokens: 8192 },
|
|
21
|
+
];
|
|
22
|
+
|
|
23
|
+
export class OpenAIProvider extends BaseProvider {
|
|
24
|
+
constructor(apiKey, options = {}) {
|
|
25
|
+
super(apiKey, options);
|
|
26
|
+
this.baseUrl = 'https://api.openai.com/v1';
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
getName() {
|
|
30
|
+
return 'OpenAI';
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
getDefaultModel() {
|
|
34
|
+
return 'gpt-4o';
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
getAvailableModels() {
|
|
38
|
+
return MODELS;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
estimateCost(inputTokens, outputTokens) {
|
|
42
|
+
const pricing = PRICING[this.model] || PRICING['gpt-4o'];
|
|
43
|
+
const inputCost = (inputTokens / 1_000_000) * pricing.input;
|
|
44
|
+
const outputCost = (outputTokens / 1_000_000) * pricing.output;
|
|
45
|
+
return {
|
|
46
|
+
input: inputCost,
|
|
47
|
+
output: outputCost,
|
|
48
|
+
total: inputCost + outputCost,
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
async generate(systemPrompt, userPrompt, options = {}) {
|
|
53
|
+
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
|
54
|
+
method: 'POST',
|
|
55
|
+
headers: {
|
|
56
|
+
'Content-Type': 'application/json',
|
|
57
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
58
|
+
},
|
|
59
|
+
body: JSON.stringify({
|
|
60
|
+
model: this.model,
|
|
61
|
+
max_tokens: options.maxTokens || this.maxTokens,
|
|
62
|
+
messages: [
|
|
63
|
+
{ role: 'system', content: systemPrompt },
|
|
64
|
+
{ role: 'user', content: userPrompt },
|
|
65
|
+
],
|
|
66
|
+
}),
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
if (!response.ok) {
|
|
70
|
+
const error = await response.json().catch(() => ({}));
|
|
71
|
+
throw new Error(`OpenAI API error: ${error.error?.message || response.statusText}`);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
const data = await response.json();
|
|
75
|
+
|
|
76
|
+
return {
|
|
77
|
+
content: data.choices[0]?.message?.content || '',
|
|
78
|
+
usage: {
|
|
79
|
+
inputTokens: data.usage?.prompt_tokens || 0,
|
|
80
|
+
outputTokens: data.usage?.completion_tokens || 0,
|
|
81
|
+
},
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
async generateStream(systemPrompt, userPrompt, onChunk, options = {}) {
|
|
86
|
+
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
|
87
|
+
method: 'POST',
|
|
88
|
+
headers: {
|
|
89
|
+
'Content-Type': 'application/json',
|
|
90
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
91
|
+
},
|
|
92
|
+
body: JSON.stringify({
|
|
93
|
+
model: this.model,
|
|
94
|
+
max_tokens: options.maxTokens || this.maxTokens,
|
|
95
|
+
stream: true,
|
|
96
|
+
stream_options: { include_usage: true },
|
|
97
|
+
messages: [
|
|
98
|
+
{ role: 'system', content: systemPrompt },
|
|
99
|
+
{ role: 'user', content: userPrompt },
|
|
100
|
+
],
|
|
101
|
+
}),
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
if (!response.ok) {
|
|
105
|
+
const error = await response.json().catch(() => ({}));
|
|
106
|
+
throw new Error(`OpenAI API error: ${error.error?.message || response.statusText}`);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
const reader = response.body.getReader();
|
|
110
|
+
const decoder = new TextDecoder();
|
|
111
|
+
let fullContent = '';
|
|
112
|
+
let usage = { inputTokens: 0, outputTokens: 0 };
|
|
113
|
+
|
|
114
|
+
while (true) {
|
|
115
|
+
const { done, value } = await reader.read();
|
|
116
|
+
if (done) break;
|
|
117
|
+
|
|
118
|
+
const chunk = decoder.decode(value);
|
|
119
|
+
const lines = chunk.split('\n');
|
|
120
|
+
|
|
121
|
+
for (const line of lines) {
|
|
122
|
+
if (line.startsWith('data: ')) {
|
|
123
|
+
const data = line.slice(6);
|
|
124
|
+
if (data === '[DONE]') continue;
|
|
125
|
+
|
|
126
|
+
try {
|
|
127
|
+
const parsed = JSON.parse(data);
|
|
128
|
+
|
|
129
|
+
const content = parsed.choices?.[0]?.delta?.content;
|
|
130
|
+
if (content) {
|
|
131
|
+
fullContent += content;
|
|
132
|
+
onChunk(content);
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
if (parsed.usage) {
|
|
136
|
+
usage.inputTokens = parsed.usage.prompt_tokens || 0;
|
|
137
|
+
usage.outputTokens = parsed.usage.completion_tokens || 0;
|
|
138
|
+
}
|
|
139
|
+
} catch {
|
|
140
|
+
// Skip malformed JSON
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
return { content: fullContent, usage };
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
async validateApiKey() {
|
|
150
|
+
try {
|
|
151
|
+
const response = await fetch(`${this.baseUrl}/models`, {
|
|
152
|
+
headers: {
|
|
153
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
154
|
+
},
|
|
155
|
+
});
|
|
156
|
+
return response.ok;
|
|
157
|
+
} catch {
|
|
158
|
+
return false;
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
export default OpenAIProvider;
|