@acmecloud/core 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/index.d.ts +52 -0
- package/dist/agent/index.js +476 -0
- package/dist/config/index.d.ts +83 -0
- package/dist/config/index.js +318 -0
- package/dist/context/index.d.ts +1 -0
- package/dist/context/index.js +30 -0
- package/dist/llm/provider.d.ts +27 -0
- package/dist/llm/provider.js +202 -0
- package/dist/llm/vision.d.ts +7 -0
- package/dist/llm/vision.js +37 -0
- package/dist/mcp/index.d.ts +10 -0
- package/dist/mcp/index.js +84 -0
- package/dist/prompt/anthropic.d.ts +1 -0
- package/dist/prompt/anthropic.js +32 -0
- package/dist/prompt/architect.d.ts +1 -0
- package/dist/prompt/architect.js +17 -0
- package/dist/prompt/autopilot.d.ts +1 -0
- package/dist/prompt/autopilot.js +18 -0
- package/dist/prompt/beast.d.ts +1 -0
- package/dist/prompt/beast.js +83 -0
- package/dist/prompt/gemini.d.ts +1 -0
- package/dist/prompt/gemini.js +45 -0
- package/dist/prompt/index.d.ts +18 -0
- package/dist/prompt/index.js +239 -0
- package/dist/prompt/zen.d.ts +1 -0
- package/dist/prompt/zen.js +13 -0
- package/dist/session/index.d.ts +18 -0
- package/dist/session/index.js +97 -0
- package/dist/skills/index.d.ts +6 -0
- package/dist/skills/index.js +72 -0
- package/dist/tools/batch.d.ts +2 -0
- package/dist/tools/batch.js +65 -0
- package/dist/tools/browser.d.ts +7 -0
- package/dist/tools/browser.js +86 -0
- package/dist/tools/edit.d.ts +11 -0
- package/dist/tools/edit.js +312 -0
- package/dist/tools/index.d.ts +13 -0
- package/dist/tools/index.js +980 -0
- package/dist/tools/lsp-client.d.ts +11 -0
- package/dist/tools/lsp-client.js +224 -0
- package/package.json +42 -0
- package/src/agent/index.ts +588 -0
- package/src/config/index.ts +383 -0
- package/src/context/index.ts +34 -0
- package/src/llm/provider.ts +237 -0
- package/src/llm/vision.ts +43 -0
- package/src/mcp/index.ts +110 -0
- package/src/prompt/anthropic.ts +32 -0
- package/src/prompt/architect.ts +17 -0
- package/src/prompt/autopilot.ts +18 -0
- package/src/prompt/beast.ts +83 -0
- package/src/prompt/gemini.ts +45 -0
- package/src/prompt/index.ts +267 -0
- package/src/prompt/zen.ts +13 -0
- package/src/session/index.ts +129 -0
- package/src/skills/index.ts +86 -0
- package/src/tools/batch.ts +73 -0
- package/src/tools/browser.ts +95 -0
- package/src/tools/edit.ts +317 -0
- package/src/tools/index.ts +1112 -0
- package/src/tools/lsp-client.ts +303 -0
- package/tsconfig.json +19 -0
|
@@ -0,0 +1,383 @@
|
|
|
1
|
+
import { config } from "dotenv";
|
|
2
|
+
import { resolve, join } from "path";
|
|
3
|
+
import { homedir } from "os";
|
|
4
|
+
import { readFileSync, writeFileSync, mkdirSync, existsSync } from "fs";
|
|
5
|
+
|
|
6
|
+
// Load environment variables from .env in project root if it exists
|
|
7
|
+
config({ quiet: true });
|
|
8
|
+
|
|
9
|
+
// Load environment variables from ~/.acmecode/.env if it exists
|
|
10
|
+
config({ path: resolve(homedir(), ".acmecode", ".env"), quiet: true });
|
|
11
|
+
|
|
12
|
+
export interface ModelConfig {
|
|
13
|
+
provider: string;
|
|
14
|
+
model: string;
|
|
15
|
+
visionProvider?: string;
|
|
16
|
+
visionModel?: string;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export interface ProviderSettings {
|
|
20
|
+
apiKey: string;
|
|
21
|
+
baseUrl?: string;
|
|
22
|
+
isCustom?: boolean;
|
|
23
|
+
protocol?: string;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export type AgentMode = "plan" | "code" | "agent" | "zen";
|
|
27
|
+
export type ReasoningLevel = "low" | "medium" | "high" | "max" | "xhigh";
|
|
28
|
+
|
|
29
|
+
const GLOBAL_CONFIG_DIR = resolve(homedir(), ".acmecode");
|
|
30
|
+
const GLOBAL_CONFIG_FILE = join(GLOBAL_CONFIG_DIR, "config.json");
|
|
31
|
+
|
|
32
|
+
function getProjectConfigDir(): string {
|
|
33
|
+
return resolve(process.cwd(), ".acmecode");
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
function getProjectConfigFile(): string {
|
|
37
|
+
return join(getProjectConfigDir(), "config.json");
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
function readJsonFile(filePath: string): any {
|
|
41
|
+
try {
|
|
42
|
+
if (existsSync(filePath)) {
|
|
43
|
+
return JSON.parse(readFileSync(filePath, "utf-8"));
|
|
44
|
+
}
|
|
45
|
+
} catch {}
|
|
46
|
+
return null;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
function writeJsonFile(filePath: string, data: any): void {
|
|
50
|
+
const dir = resolve(filePath, "..");
|
|
51
|
+
if (!existsSync(dir)) {
|
|
52
|
+
mkdirSync(dir, { recursive: true });
|
|
53
|
+
}
|
|
54
|
+
writeFileSync(filePath, JSON.stringify(data, null, 2), "utf-8");
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Load model config with priority: project .acmecode/config.json > global ~/.acmecode/config.json > defaults
|
|
59
|
+
*/
|
|
60
|
+
export function loadModelConfig(): ModelConfig {
|
|
61
|
+
const projectConfig = readJsonFile(getProjectConfigFile());
|
|
62
|
+
const globalConfig = readJsonFile(GLOBAL_CONFIG_FILE);
|
|
63
|
+
|
|
64
|
+
// Resolve with priority: project > global > defaults
|
|
65
|
+
const provider =
|
|
66
|
+
projectConfig?.provider || globalConfig?.provider || "extralink";
|
|
67
|
+
const model = projectConfig?.model || globalConfig?.model || "";
|
|
68
|
+
const visionProvider =
|
|
69
|
+
projectConfig?.visionProvider || globalConfig?.visionProvider;
|
|
70
|
+
const visionModel = projectConfig?.visionModel || globalConfig?.visionModel;
|
|
71
|
+
|
|
72
|
+
return { provider, model, visionProvider, visionModel };
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Load language config with priority: project .acmecode/config.json > global ~/.acmecode/config.json > default "en"
|
|
77
|
+
*/
|
|
78
|
+
export function loadLangConfig(): string {
|
|
79
|
+
const projectConfig = readJsonFile(getProjectConfigFile());
|
|
80
|
+
if (projectConfig?.lang) return projectConfig.lang;
|
|
81
|
+
|
|
82
|
+
const globalConfig = readJsonFile(GLOBAL_CONFIG_FILE);
|
|
83
|
+
if (globalConfig?.lang) return globalConfig.lang;
|
|
84
|
+
|
|
85
|
+
return "en";
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Load theme config with priority: project .acmecode/config.json > global ~/.acmecode/config.json > default "dark"
|
|
90
|
+
*/
|
|
91
|
+
export function loadThemeConfig(): string {
|
|
92
|
+
const projectConfig = readJsonFile(getProjectConfigFile());
|
|
93
|
+
if (projectConfig?.theme) return projectConfig.theme;
|
|
94
|
+
|
|
95
|
+
const globalConfig = readJsonFile(GLOBAL_CONFIG_FILE);
|
|
96
|
+
if (globalConfig?.theme) return globalConfig.theme;
|
|
97
|
+
|
|
98
|
+
return "dark";
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* Load reasoning level config with priority: project .acmecode/config.json > global ~/.acmecode/config.json > default "medium"
|
|
103
|
+
*/
|
|
104
|
+
export function loadReasoningLevel(): ReasoningLevel {
|
|
105
|
+
const projectConfig = readJsonFile(getProjectConfigFile());
|
|
106
|
+
if (projectConfig?.reasoning)
|
|
107
|
+
return projectConfig.reasoning as ReasoningLevel;
|
|
108
|
+
|
|
109
|
+
const globalConfig = readJsonFile(GLOBAL_CONFIG_FILE);
|
|
110
|
+
if (globalConfig?.reasoning) return globalConfig.reasoning as ReasoningLevel;
|
|
111
|
+
|
|
112
|
+
return "medium";
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
/**
|
|
116
|
+
* Load agent mode config with priority: project .acmecode/config.json > default "agent"
|
|
117
|
+
*/
|
|
118
|
+
export function loadAgentModeConfig(): { mode: AgentMode; planFile?: string } {
|
|
119
|
+
const projectConfig = readJsonFile(getProjectConfigFile());
|
|
120
|
+
return {
|
|
121
|
+
mode: (projectConfig?.agentMode as AgentMode) || "agent",
|
|
122
|
+
planFile: projectConfig?.activePlanFile,
|
|
123
|
+
};
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
/**
|
|
127
|
+
* Save model config to the project's .acmecode/config.json
|
|
128
|
+
*/
|
|
129
|
+
export function saveProjectModelConfig(
|
|
130
|
+
provider: string,
|
|
131
|
+
model: string,
|
|
132
|
+
visionProvider?: string,
|
|
133
|
+
visionModel?: string,
|
|
134
|
+
): void {
|
|
135
|
+
const filePath = getProjectConfigFile();
|
|
136
|
+
const existing = readJsonFile(filePath) || {};
|
|
137
|
+
existing.provider = provider;
|
|
138
|
+
existing.model = model;
|
|
139
|
+
if (visionProvider) existing.visionProvider = visionProvider;
|
|
140
|
+
if (visionModel) existing.visionModel = visionModel;
|
|
141
|
+
writeJsonFile(filePath, existing);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
/**
|
|
145
|
+
* Save agent mode config to the project's .acmecode/config.json
|
|
146
|
+
*/
|
|
147
|
+
export function saveAgentModeConfig(mode: AgentMode, planFile?: string): void {
|
|
148
|
+
const filePath = getProjectConfigFile();
|
|
149
|
+
const existing = readJsonFile(filePath) || {};
|
|
150
|
+
existing.agentMode = mode;
|
|
151
|
+
if (planFile !== undefined) {
|
|
152
|
+
existing.activePlanFile = planFile;
|
|
153
|
+
} else {
|
|
154
|
+
delete existing.activePlanFile;
|
|
155
|
+
}
|
|
156
|
+
writeJsonFile(filePath, existing);
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Save model config to the global ~/.acmecode/config.json
|
|
161
|
+
*/
|
|
162
|
+
export function saveGlobalModelConfig(provider: string, model: string): void {
|
|
163
|
+
const existing = readJsonFile(GLOBAL_CONFIG_FILE) || {};
|
|
164
|
+
existing.provider = provider;
|
|
165
|
+
existing.model = model;
|
|
166
|
+
writeJsonFile(GLOBAL_CONFIG_FILE, existing);
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
/**
|
|
170
|
+
* Save language config to the global ~/.acmecode/config.json
|
|
171
|
+
*/
|
|
172
|
+
export function saveGlobalLangConfig(lang: string): void {
|
|
173
|
+
const existing = readJsonFile(GLOBAL_CONFIG_FILE) || {};
|
|
174
|
+
existing.lang = lang;
|
|
175
|
+
writeJsonFile(GLOBAL_CONFIG_FILE, existing);
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
/**
|
|
179
|
+
* Save theme config to the global ~/.acmecode/config.json
|
|
180
|
+
*/
|
|
181
|
+
export function saveGlobalThemeConfig(theme: string): void {
|
|
182
|
+
const existing = readJsonFile(GLOBAL_CONFIG_FILE) || {};
|
|
183
|
+
existing.theme = theme;
|
|
184
|
+
writeJsonFile(GLOBAL_CONFIG_FILE, existing);
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
/**
|
|
188
|
+
* Save reasoning level config to the global ~/.acmecode/config.json
|
|
189
|
+
*/
|
|
190
|
+
export function saveGlobalReasoningLevel(level: ReasoningLevel): void {
|
|
191
|
+
const existing = readJsonFile(GLOBAL_CONFIG_FILE) || {};
|
|
192
|
+
writeJsonFile(GLOBAL_CONFIG_FILE, existing);
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* Save provider-specific settings to ~/.acmecode/config.json
|
|
197
|
+
*/
|
|
198
|
+
export function saveProviderConfig(
|
|
199
|
+
provider: string,
|
|
200
|
+
config: ProviderSettings,
|
|
201
|
+
): void {
|
|
202
|
+
const existing = readJsonFile(GLOBAL_CONFIG_FILE) || {};
|
|
203
|
+
if (!existing.providers) existing.providers = {};
|
|
204
|
+
existing.providers[provider] = config;
|
|
205
|
+
writeJsonFile(GLOBAL_CONFIG_FILE, existing);
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
/**
|
|
209
|
+
* Load all custom providers from global config
|
|
210
|
+
*/
|
|
211
|
+
export function loadCustomProviders(): Record<string, ProviderSettings> {
|
|
212
|
+
const globalConfig = readJsonFile(GLOBAL_CONFIG_FILE);
|
|
213
|
+
const providers = globalConfig?.providers || {};
|
|
214
|
+
const customs: Record<string, ProviderSettings> = {};
|
|
215
|
+
const officialIds = [
|
|
216
|
+
"extralink",
|
|
217
|
+
"openai",
|
|
218
|
+
"anthropic",
|
|
219
|
+
"google",
|
|
220
|
+
"xai",
|
|
221
|
+
"mistral",
|
|
222
|
+
"groq",
|
|
223
|
+
"deepinfra",
|
|
224
|
+
"openrouter",
|
|
225
|
+
];
|
|
226
|
+
|
|
227
|
+
for (const [id, cfg] of Object.entries(providers)) {
|
|
228
|
+
if ((cfg as any).isCustom || !officialIds.includes(id)) {
|
|
229
|
+
customs[id] = cfg as ProviderSettings;
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
return customs;
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
export const getProviderKey = (provider: string) => {
|
|
236
|
+
// 1. Check saved config in ~/.acmecode/config.json
|
|
237
|
+
const globalConfig = readJsonFile(GLOBAL_CONFIG_FILE);
|
|
238
|
+
if (globalConfig?.providers?.[provider]?.apiKey) {
|
|
239
|
+
return globalConfig.providers[provider].apiKey;
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
// 2. Check environment variables
|
|
243
|
+
switch (provider) {
|
|
244
|
+
case "extralink":
|
|
245
|
+
return (
|
|
246
|
+
process.env.EXTRALINK_API_KEY ||
|
|
247
|
+
"sk-7qwFHXqvDgqTMfswYhgnMDoAWNU2zneozvFgI9DaR5Cpx49w"
|
|
248
|
+
);
|
|
249
|
+
case "openai":
|
|
250
|
+
return (
|
|
251
|
+
process.env.OPENAI_API_KEY ||
|
|
252
|
+
"sk-Plobc3VM4qzRkIUAakQjtj7hwHPedlSoU4haaPNWWNIESiya"
|
|
253
|
+
);
|
|
254
|
+
case "anthropic":
|
|
255
|
+
return process.env.ANTHROPIC_API_KEY;
|
|
256
|
+
case "google":
|
|
257
|
+
return process.env.GOOGLE_GENERATIVE_AI_API_KEY;
|
|
258
|
+
case "xai":
|
|
259
|
+
return process.env.XAI_API_KEY;
|
|
260
|
+
case "mistral":
|
|
261
|
+
return process.env.MISTRAL_API_KEY;
|
|
262
|
+
case "groq":
|
|
263
|
+
return process.env.GROQ_API_KEY;
|
|
264
|
+
case "deepinfra":
|
|
265
|
+
return process.env.DEEPINFRA_API_KEY;
|
|
266
|
+
case "openrouter":
|
|
267
|
+
return process.env.OPENROUTER_API_KEY;
|
|
268
|
+
default:
|
|
269
|
+
return undefined;
|
|
270
|
+
}
|
|
271
|
+
};
|
|
272
|
+
|
|
273
|
+
/**
|
|
274
|
+
* Normalizes a base URL by removing trailing slashes and ensuring consistency.
|
|
275
|
+
*/
|
|
276
|
+
export const normalizeBaseUrl = (
|
|
277
|
+
url: string | undefined,
|
|
278
|
+
): string | undefined => {
|
|
279
|
+
if (!url) return undefined;
|
|
280
|
+
return url.replace(/\/+$/, "");
|
|
281
|
+
};
|
|
282
|
+
|
|
283
|
+
/**
|
|
284
|
+
* Intelligently adapts a base URL for a given protocol if version segments are missing.
|
|
285
|
+
*/
|
|
286
|
+
export const getAdaptedBaseUrl = (
|
|
287
|
+
protocol: string,
|
|
288
|
+
baseUrl: string | undefined,
|
|
289
|
+
): string | undefined => {
|
|
290
|
+
if (!baseUrl) return undefined;
|
|
291
|
+
const clean = baseUrl.replace(/\/+$/, "");
|
|
292
|
+
|
|
293
|
+
// Official or well-known protocols often expect version segments
|
|
294
|
+
if (protocol === "anthropic" && !clean.includes("/v1")) {
|
|
295
|
+
return `${clean}/v1`;
|
|
296
|
+
}
|
|
297
|
+
if (protocol === "google" && !clean.includes("/v1")) {
|
|
298
|
+
// google usually expects /v1 or /v1beta for the SDK
|
|
299
|
+
return `${clean}/v1beta`;
|
|
300
|
+
}
|
|
301
|
+
if (
|
|
302
|
+
protocol === "openai" &&
|
|
303
|
+
!clean.includes("/v1") &&
|
|
304
|
+
!clean.includes("/web")
|
|
305
|
+
) {
|
|
306
|
+
// Many OpenAI relays use /v1, but some use root. We'll leave root as is
|
|
307
|
+
// but normalize to clean for now.
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
return clean;
|
|
311
|
+
};
|
|
312
|
+
|
|
313
|
+
export const getProviderBaseUrl = (provider: string) => {
|
|
314
|
+
// 1. Check saved config in ~/.acmecode/config.json
|
|
315
|
+
const globalConfig = readJsonFile(GLOBAL_CONFIG_FILE);
|
|
316
|
+
const saved = globalConfig?.providers?.[provider];
|
|
317
|
+
if (saved?.baseUrl) {
|
|
318
|
+
return getAdaptedBaseUrl(saved.protocol || "openai", saved.baseUrl);
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
// 2. Check environment variables or official defaults
|
|
322
|
+
let url: string | undefined;
|
|
323
|
+
const protocol = getProviderProtocol(provider);
|
|
324
|
+
switch (provider) {
|
|
325
|
+
case "extralink":
|
|
326
|
+
url = process.env.EXTRALINK_BASE_URL || "https://apis.extralink.net/v1";
|
|
327
|
+
break;
|
|
328
|
+
case "openai":
|
|
329
|
+
url = process.env.OPENAI_BASE_URL || "https://api.openai.com/v1";
|
|
330
|
+
break;
|
|
331
|
+
case "anthropic":
|
|
332
|
+
url = process.env.ANTHROPIC_BASE_URL || "https://api.anthropic.com/v1";
|
|
333
|
+
break;
|
|
334
|
+
case "google":
|
|
335
|
+
url =
|
|
336
|
+
process.env.GOOGLE_GENERATIVE_AI_BASE_URL ||
|
|
337
|
+
"https://generativelanguage.googleapis.com/v1beta";
|
|
338
|
+
break;
|
|
339
|
+
case "xai":
|
|
340
|
+
url = process.env.XAI_BASE_URL || "https://api.x.ai/v1";
|
|
341
|
+
break;
|
|
342
|
+
case "mistral":
|
|
343
|
+
url = process.env.MISTRAL_BASE_URL || "https://api.mistral.ai/v1";
|
|
344
|
+
break;
|
|
345
|
+
case "groq":
|
|
346
|
+
url = process.env.GROQ_BASE_URL || "https://api.groq.com/openai/v1";
|
|
347
|
+
break;
|
|
348
|
+
case "deepinfra":
|
|
349
|
+
url =
|
|
350
|
+
process.env.DEEPINFRA_BASE_URL || "https://api.deepinfra.com/v1/openai";
|
|
351
|
+
break;
|
|
352
|
+
case "openrouter":
|
|
353
|
+
url = process.env.OPENROUTER_BASE_URL || "https://openrouter.ai/api/v1";
|
|
354
|
+
break;
|
|
355
|
+
}
|
|
356
|
+
return getAdaptedBaseUrl(protocol, url);
|
|
357
|
+
};
|
|
358
|
+
|
|
359
|
+
/**
|
|
360
|
+
* Get the protocol for a provider (e.g. 'openai', 'anthropic', 'google')
|
|
361
|
+
*/
|
|
362
|
+
export const getProviderProtocol = (provider: string): string => {
|
|
363
|
+
const globalConfig = readJsonFile(GLOBAL_CONFIG_FILE);
|
|
364
|
+
if (globalConfig?.providers?.[provider]?.protocol) {
|
|
365
|
+
return globalConfig.providers[provider].protocol;
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
// Default to provider ID for official ones, or 'openai' as safe fallback
|
|
369
|
+
const official = [
|
|
370
|
+
"extralink",
|
|
371
|
+
"openai",
|
|
372
|
+
"anthropic",
|
|
373
|
+
"google",
|
|
374
|
+
"xai",
|
|
375
|
+
"mistral",
|
|
376
|
+
"groq",
|
|
377
|
+
"deepinfra",
|
|
378
|
+
"openrouter",
|
|
379
|
+
];
|
|
380
|
+
if (official.includes(provider)) return provider;
|
|
381
|
+
|
|
382
|
+
return "openai";
|
|
383
|
+
};
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import * as fs from 'fs/promises';
|
|
2
|
+
import { exec } from 'child_process';
|
|
3
|
+
import { promisify } from 'util';
|
|
4
|
+
import path from 'path';
|
|
5
|
+
|
|
6
|
+
const execAsync = promisify(exec);
|
|
7
|
+
|
|
8
|
+
export async function getProjectContext(): Promise<string> {
|
|
9
|
+
let context = '';
|
|
10
|
+
|
|
11
|
+
// 1. ACMECODE.md
|
|
12
|
+
try {
|
|
13
|
+
const acmecodePath = path.join(process.cwd(), 'ACMECODE.md');
|
|
14
|
+
const content = await fs.readFile(acmecodePath, 'utf8');
|
|
15
|
+
context += `\n\n[Project Instructions (ACMECODE.md)]\n${content}`;
|
|
16
|
+
} catch (e: any) {
|
|
17
|
+
// Ignore if missing
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
// 2. Git context
|
|
21
|
+
try {
|
|
22
|
+
const { stdout: branch } = await execAsync('git rev-parse --abbrev-ref HEAD');
|
|
23
|
+
const { stdout: status } = await execAsync('git status -s');
|
|
24
|
+
context += `\n\n[Git Status (Branch: ${branch.trim()})]\n${status.trim() || 'Clean working directory'}`;
|
|
25
|
+
|
|
26
|
+
// Let's also get the last commit message for context
|
|
27
|
+
const { stdout: log } = await execAsync('git log -1 --oneline');
|
|
28
|
+
context += `\nLast Commit: ${log.trim()}`;
|
|
29
|
+
} catch (e: any) {
|
|
30
|
+
// Ignore if not a git repo
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
return context;
|
|
34
|
+
}
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
import { createOpenAI } from "@ai-sdk/openai";
|
|
2
|
+
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
3
|
+
import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
4
|
+
import { createXai } from "@ai-sdk/xai";
|
|
5
|
+
import { createMistral } from "@ai-sdk/mistral";
|
|
6
|
+
import { createGroq } from "@ai-sdk/groq";
|
|
7
|
+
import { createDeepInfra } from "@ai-sdk/deepinfra";
|
|
8
|
+
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
|
|
9
|
+
import { LanguageModel } from "ai";
|
|
10
|
+
import {
|
|
11
|
+
getProviderKey,
|
|
12
|
+
getProviderBaseUrl,
|
|
13
|
+
loadCustomProviders,
|
|
14
|
+
getProviderProtocol,
|
|
15
|
+
} from "../config/index.js";
|
|
16
|
+
|
|
17
|
+
// ── Supported provider types ──
|
|
18
|
+
export type ProviderType =
|
|
19
|
+
| "extralink"
|
|
20
|
+
| "openai"
|
|
21
|
+
| "anthropic"
|
|
22
|
+
| "google"
|
|
23
|
+
| "xai"
|
|
24
|
+
| "mistral"
|
|
25
|
+
| "groq"
|
|
26
|
+
| "deepinfra"
|
|
27
|
+
| "openrouter";
|
|
28
|
+
|
|
29
|
+
// ── Provider metadata for display ──
|
|
30
|
+
export interface ProviderInfo {
|
|
31
|
+
id: ProviderType;
|
|
32
|
+
name: string;
|
|
33
|
+
envKey: string;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
const OFFICIAL_PROVIDERS: ProviderInfo[] = [
|
|
37
|
+
{
|
|
38
|
+
id: "extralink",
|
|
39
|
+
name: "Extralink",
|
|
40
|
+
envKey: "EXTRALINK_API_KEY",
|
|
41
|
+
},
|
|
42
|
+
{ id: "openai", name: "OpenAI", envKey: "OPENAI_API_KEY" },
|
|
43
|
+
{ id: "anthropic", name: "Anthropic", envKey: "ANTHROPIC_API_KEY" },
|
|
44
|
+
{ id: "google", name: "Google", envKey: "GOOGLE_GENERATIVE_AI_API_KEY" },
|
|
45
|
+
{ id: "xai", name: "xAI (Grok)", envKey: "XAI_API_KEY" },
|
|
46
|
+
{ id: "mistral", name: "Mistral", envKey: "MISTRAL_API_KEY" },
|
|
47
|
+
{ id: "groq", name: "Groq", envKey: "GROQ_API_KEY" },
|
|
48
|
+
{ id: "deepinfra", name: "DeepInfra", envKey: "DEEPINFRA_API_KEY" },
|
|
49
|
+
{ id: "openrouter", name: "OpenRouter", envKey: "OPENROUTER_API_KEY" },
|
|
50
|
+
];
|
|
51
|
+
|
|
52
|
+
export function getProviders(): ProviderInfo[] {
|
|
53
|
+
const customs = loadCustomProviders();
|
|
54
|
+
const customList: ProviderInfo[] = Object.keys(customs).map((id) => ({
|
|
55
|
+
id: id as ProviderType,
|
|
56
|
+
name: id,
|
|
57
|
+
envKey: "CUSTOM",
|
|
58
|
+
}));
|
|
59
|
+
|
|
60
|
+
// Filter out custom providers that duplicate official ones
|
|
61
|
+
const officialIds = new Set(OFFICIAL_PROVIDERS.map((p) => p.id));
|
|
62
|
+
const filteredCustoms = customList.filter((c) => !officialIds.has(c.id));
|
|
63
|
+
|
|
64
|
+
return [...OFFICIAL_PROVIDERS, ...filteredCustoms];
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
// ── Dynamic provider registry ──
|
|
68
|
+
// Each factory creates an AI SDK provider instance given an API key and optional base URL.
|
|
69
|
+
type ProviderFactory = (
|
|
70
|
+
apiKey: string,
|
|
71
|
+
baseURL?: string,
|
|
72
|
+
) => (modelName: string) => LanguageModel;
|
|
73
|
+
|
|
74
|
+
const PROVIDER_REGISTRY: Record<ProviderType, ProviderFactory> = {
|
|
75
|
+
extralink: (apiKey, baseURL) => {
|
|
76
|
+
const sdk = createOpenAI({ apiKey, baseURL });
|
|
77
|
+
return (model) => sdk.chat(model) as LanguageModel;
|
|
78
|
+
},
|
|
79
|
+
openai: (apiKey, baseURL) => {
|
|
80
|
+
const sdk = createOpenAI({ apiKey, baseURL });
|
|
81
|
+
return (model) => sdk.chat(model) as LanguageModel;
|
|
82
|
+
},
|
|
83
|
+
anthropic: (apiKey, baseURL) => {
|
|
84
|
+
const sdk = createAnthropic({ apiKey, baseURL });
|
|
85
|
+
return (model) => sdk(model) as LanguageModel;
|
|
86
|
+
},
|
|
87
|
+
google: (apiKey, baseURL) => {
|
|
88
|
+
const sdk = createGoogleGenerativeAI({ apiKey, baseURL });
|
|
89
|
+
return (model) => sdk(model) as LanguageModel;
|
|
90
|
+
},
|
|
91
|
+
xai: (apiKey, baseURL) => {
|
|
92
|
+
const sdk = createXai({ apiKey, baseURL });
|
|
93
|
+
return (model) => sdk(model) as LanguageModel;
|
|
94
|
+
},
|
|
95
|
+
mistral: (apiKey, baseURL) => {
|
|
96
|
+
const sdk = createMistral({ apiKey, baseURL });
|
|
97
|
+
return (model) => sdk(model) as LanguageModel;
|
|
98
|
+
},
|
|
99
|
+
groq: (apiKey, baseURL) => {
|
|
100
|
+
const sdk = createGroq({ apiKey, baseURL });
|
|
101
|
+
return (model) => sdk(model) as LanguageModel;
|
|
102
|
+
},
|
|
103
|
+
deepinfra: (apiKey, baseURL) => {
|
|
104
|
+
const sdk = createDeepInfra({ apiKey, baseURL });
|
|
105
|
+
return (model) => sdk(model) as LanguageModel;
|
|
106
|
+
},
|
|
107
|
+
openrouter: (apiKey, baseURL) => {
|
|
108
|
+
const sdk = createOpenRouter({ apiKey, baseURL });
|
|
109
|
+
return (model) => sdk.chat(model) as LanguageModel;
|
|
110
|
+
},
|
|
111
|
+
};
|
|
112
|
+
|
|
113
|
+
/**
|
|
114
|
+
* Create a language model instance for the given provider and model name.
|
|
115
|
+
* Reads API key and optional base URL from the project/env configuration.
|
|
116
|
+
*/
|
|
117
|
+
export function getModel(
|
|
118
|
+
provider: ProviderType,
|
|
119
|
+
modelName: string,
|
|
120
|
+
): LanguageModel {
|
|
121
|
+
const protocol = getProviderProtocol(provider) as ProviderType;
|
|
122
|
+
const factory = PROVIDER_REGISTRY[protocol];
|
|
123
|
+
|
|
124
|
+
const apiKey = getProviderKey(provider);
|
|
125
|
+
if (!apiKey) {
|
|
126
|
+
const info = getProviders().find((p) => p.id === provider);
|
|
127
|
+
throw new Error(
|
|
128
|
+
`${info?.envKey || provider.toUpperCase() + "_API_KEY"} is not set.`,
|
|
129
|
+
);
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
const baseURL = getProviderBaseUrl(provider);
|
|
133
|
+
|
|
134
|
+
if (!factory) {
|
|
135
|
+
// Fallback for custom providers if protocol is unknown: assume OpenAI-compatible
|
|
136
|
+
const sdk = createOpenAI({ apiKey, baseURL });
|
|
137
|
+
return sdk.chat(modelName) as LanguageModel;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
const createModel = factory(apiKey, baseURL);
|
|
141
|
+
return createModel(modelName);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
/**
|
|
145
|
+
* List all providers that have API keys configured.
|
|
146
|
+
*/
|
|
147
|
+
export function getAvailableProviders(): ProviderInfo[] {
|
|
148
|
+
return getProviders().filter((p) => {
|
|
149
|
+
try {
|
|
150
|
+
return !!getProviderKey(p.id);
|
|
151
|
+
} catch {
|
|
152
|
+
return false;
|
|
153
|
+
}
|
|
154
|
+
});
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
// ── Dynamic Context Size Caching ──
|
|
158
|
+
let modelsDevCache: Record<string, number> | null = null;
|
|
159
|
+
|
|
160
|
+
export async function fetchModelsDevCache(): Promise<void> {
|
|
161
|
+
try {
|
|
162
|
+
const res = await fetch("https://models.dev/api.json");
|
|
163
|
+
if (!res.ok) return;
|
|
164
|
+
const data = await res.json();
|
|
165
|
+
const cache: Record<string, number> = {};
|
|
166
|
+
for (const provider of Object.values(data)) {
|
|
167
|
+
const parsedProvider = provider as any;
|
|
168
|
+
if (parsedProvider?.models) {
|
|
169
|
+
for (const [modId, modData] of Object.entries(parsedProvider.models)) {
|
|
170
|
+
if ((modData as any)?.limit?.context) {
|
|
171
|
+
cache[modId] = (modData as any).limit.context;
|
|
172
|
+
cache[modId.toLowerCase()] = (modData as any).limit.context;
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
modelsDevCache = cache;
|
|
178
|
+
} catch {
|
|
179
|
+
// silently ignore fetch errors
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
/**
|
|
184
|
+
* Get the known context window size for a model.
|
|
185
|
+
*/
|
|
186
|
+
export function getContextWindow(modelName: string): number {
|
|
187
|
+
if (modelsDevCache) {
|
|
188
|
+
if (modelsDevCache[modelName]) return modelsDevCache[modelName]!;
|
|
189
|
+
if (modelsDevCache[modelName.toLowerCase()])
|
|
190
|
+
return modelsDevCache[modelName.toLowerCase()]!;
|
|
191
|
+
// Fuzzy match
|
|
192
|
+
for (const [key, val] of Object.entries(modelsDevCache)) {
|
|
193
|
+
if (
|
|
194
|
+
modelName.toLowerCase().includes(key) ||
|
|
195
|
+
key.includes(modelName.toLowerCase())
|
|
196
|
+
) {
|
|
197
|
+
return val;
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
const lower = modelName.toLowerCase();
|
|
203
|
+
|
|
204
|
+
// ── User provided specifics from models.dev ──
|
|
205
|
+
if (lower.includes("kimi-k2.5")) return 262144;
|
|
206
|
+
if (lower.includes("llama-3.3")) return 131072;
|
|
207
|
+
if (lower.includes("phi-4")) return 32000;
|
|
208
|
+
if (lower.includes("gpt-oss")) return 65536;
|
|
209
|
+
if (lower.includes("magistral")) return 131072;
|
|
210
|
+
if (lower.includes("devstral")) return 32768;
|
|
211
|
+
if (lower.includes("voxtral")) return 32000;
|
|
212
|
+
if (lower.includes("whisper")) return 448;
|
|
213
|
+
if (lower.includes("e5-large")) return 512;
|
|
214
|
+
|
|
215
|
+
// ── Standard defaults ──
|
|
216
|
+
if (lower.includes("o1") || lower.includes("o3") || lower.includes("o4"))
|
|
217
|
+
return 128000;
|
|
218
|
+
if (lower.includes("claude-3-5")) return 200000;
|
|
219
|
+
if (lower.includes("claude-3")) return 200000;
|
|
220
|
+
if (lower.includes("gemini-1.5-pro")) return 2000000;
|
|
221
|
+
if (lower.includes("gemini-1.5-flash")) return 1000000;
|
|
222
|
+
if (lower.includes("gpt-4o")) return 128000;
|
|
223
|
+
if (lower.includes("gpt-4")) return 128000;
|
|
224
|
+
if (lower.includes("deepseek")) return 64000;
|
|
225
|
+
if (lower.includes("qwen")) return 32000;
|
|
226
|
+
return 32000; // conservative default
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Heuristic to estimate token count from string content.
|
|
231
|
+
* Average 3.5 characters per token for mixed English/Chinese.
|
|
232
|
+
*/
|
|
233
|
+
export function estimateTokens(text: string): number {
|
|
234
|
+
if (!text) return 0;
|
|
235
|
+
// Basic heuristic: 1 token ≈ 3.0 characters (safer for mixed Code/Chinese which are token heavy)
|
|
236
|
+
return Math.ceil(text.length / 3.0);
|
|
237
|
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import { generateText } from 'ai';
|
|
2
|
+
import { getModel } from './provider.js';
|
|
3
|
+
import { ModelConfig } from '../config/index.js';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Analyzes an image using a specialized vision model and returns a textual description.
|
|
7
|
+
* This allows non-vision primary models to "see" via delegation.
|
|
8
|
+
*/
|
|
9
|
+
export async function analyzeImage(
|
|
10
|
+
imageData: string, // base64
|
|
11
|
+
config: ModelConfig
|
|
12
|
+
): Promise<string> {
|
|
13
|
+
if (!config.visionProvider || !config.visionModel) {
|
|
14
|
+
return "Error: Vision model not configured. Please set visionProvider and visionModel in config.";
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
const model = getModel(config.visionProvider as any, config.visionModel);
|
|
18
|
+
|
|
19
|
+
try {
|
|
20
|
+
const { text } = await generateText({
|
|
21
|
+
model,
|
|
22
|
+
abortSignal: AbortSignal.timeout(60000), // 60 seconds timeout
|
|
23
|
+
maxRetries: 3,
|
|
24
|
+
messages: [
|
|
25
|
+
{
|
|
26
|
+
role: 'user',
|
|
27
|
+
content: [
|
|
28
|
+
{ type: 'text', text: "Please describe this screenshot in detail. Focus on the layout, visible text, interactive elements (buttons, inputs), and any apparent errors or status messages. This description will be used by another AI to understand the state of the web application." },
|
|
29
|
+
{ type: 'image', image: imageData }
|
|
30
|
+
]
|
|
31
|
+
}
|
|
32
|
+
]
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
return `[Vision Model Analysis (${config.visionModel})]:\n${text}`;
|
|
36
|
+
} catch (err: any) {
|
|
37
|
+
let msg = err.message;
|
|
38
|
+
if (err.name === 'TimeoutError' || err.message.includes('timeout')) {
|
|
39
|
+
msg = `Analysis timed out after 60s. The image might be too complex or the provider is slow.`;
|
|
40
|
+
}
|
|
41
|
+
return `Error during vision analysis: ${msg}`;
|
|
42
|
+
}
|
|
43
|
+
}
|