@redstone-md/mapr 0.0.1-alpha
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +45 -0
- package/README.md +109 -0
- package/bin/mapr +2 -0
- package/index.ts +247 -0
- package/lib/ai-analyzer.ts +598 -0
- package/lib/artifacts.ts +233 -0
- package/lib/cli-args.ts +152 -0
- package/lib/config.ts +385 -0
- package/lib/formatter.ts +109 -0
- package/lib/local-rag.ts +104 -0
- package/lib/progress.ts +10 -0
- package/lib/provider.ts +85 -0
- package/lib/reporter.ts +213 -0
- package/lib/scraper.ts +169 -0
- package/lib/swarm-prompts.ts +56 -0
- package/lib/wasm.ts +62 -0
- package/package.json +62 -0
package/lib/config.ts
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
1
|
+
import { cancel, isCancel, password, select, text } from "@clack/prompts";
|
|
2
|
+
import { mkdir, readFile, writeFile } from "fs/promises";
|
|
3
|
+
import { homedir } from "os";
|
|
4
|
+
import { join } from "path";
|
|
5
|
+
import { z } from "zod";
|
|
6
|
+
|
|
7
|
+
import {
|
|
8
|
+
AiProviderClient,
|
|
9
|
+
DEFAULT_MODEL,
|
|
10
|
+
DEFAULT_MODEL_CONTEXT_SIZE,
|
|
11
|
+
DEFAULT_OPENAI_BASE_URL,
|
|
12
|
+
aiProviderConfigSchema,
|
|
13
|
+
type AiProviderConfig,
|
|
14
|
+
} from "./provider";
|
|
15
|
+
|
|
16
|
+
const persistedConfigSchema = z
|
|
17
|
+
.object({
|
|
18
|
+
providerType: z.enum(["openai", "openai-compatible"]).optional(),
|
|
19
|
+
providerName: z.string().min(1).optional(),
|
|
20
|
+
apiKey: z.string().min(1).optional(),
|
|
21
|
+
openAiApiKey: z.string().min(1).optional(),
|
|
22
|
+
baseURL: z.string().url().optional(),
|
|
23
|
+
model: z.string().min(1).optional(),
|
|
24
|
+
modelContextSize: z.number().int().positive().optional(),
|
|
25
|
+
})
|
|
26
|
+
.strict();
|
|
27
|
+
|
|
28
|
+
const configDraftSchema = aiProviderConfigSchema.partial();
|
|
29
|
+
const modelListingConfigSchema = z.object({
|
|
30
|
+
providerType: z.enum(["openai", "openai-compatible"]).default("openai"),
|
|
31
|
+
providerName: z.string().min(1).default("OpenAI"),
|
|
32
|
+
apiKey: z.string().min(1),
|
|
33
|
+
baseURL: z.string().trim().url().default(DEFAULT_OPENAI_BASE_URL),
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
type PersistedConfig = z.infer<typeof persistedConfigSchema>;
|
|
37
|
+
type ConfigDraft = z.infer<typeof configDraftSchema>;
|
|
38
|
+
type FetchLike = (input: string | URL | Request, init?: RequestInit) => Promise<Response>;
|
|
39
|
+
|
|
40
|
+
export interface EnsureConfigOptions {
|
|
41
|
+
forceReconfigure?: boolean;
|
|
42
|
+
headless?: boolean;
|
|
43
|
+
overrides?: ConfigDraft | null;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
interface ConfigManagerOptions {
|
|
47
|
+
homeDir?: string;
|
|
48
|
+
fetcher?: FetchLike;
|
|
49
|
+
promptConfig?: (existingConfig: ConfigDraft | null) => Promise<AiProviderConfig>;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
function exitIfCancelled<T>(value: T): T {
|
|
53
|
+
if (isCancel(value)) {
|
|
54
|
+
cancel("Configuration cancelled.");
|
|
55
|
+
process.exit(0);
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
return value;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
function mergeConfigDrafts(baseConfig: ConfigDraft | null, overrides: ConfigDraft | null): ConfigDraft | null {
|
|
62
|
+
if (!baseConfig && !overrides) {
|
|
63
|
+
return null;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
const merged: ConfigDraft = {
|
|
67
|
+
...(baseConfig ?? {}),
|
|
68
|
+
...(overrides ?? {}),
|
|
69
|
+
};
|
|
70
|
+
|
|
71
|
+
return configDraftSchema.parse(merged);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
function normalizePersistedConfig(config: PersistedConfig | null): ConfigDraft | null {
|
|
75
|
+
if (!config) {
|
|
76
|
+
return null;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
return configDraftSchema.parse({
|
|
80
|
+
providerType: config.providerType ?? "openai",
|
|
81
|
+
providerName: config.providerName ?? "OpenAI",
|
|
82
|
+
apiKey: config.apiKey ?? config.openAiApiKey,
|
|
83
|
+
baseURL: config.baseURL ?? DEFAULT_OPENAI_BASE_URL,
|
|
84
|
+
model: config.model ?? DEFAULT_MODEL,
|
|
85
|
+
modelContextSize: config.modelContextSize ?? DEFAULT_MODEL_CONTEXT_SIZE,
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
async function promptForModel(
|
|
90
|
+
config: Omit<AiProviderConfig, "model" | "modelContextSize">,
|
|
91
|
+
fetcher: FetchLike,
|
|
92
|
+
initialModel?: string,
|
|
93
|
+
): Promise<string> {
|
|
94
|
+
const providerClient = new AiProviderClient({
|
|
95
|
+
...config,
|
|
96
|
+
model: initialModel ?? DEFAULT_MODEL,
|
|
97
|
+
modelContextSize: DEFAULT_MODEL_CONTEXT_SIZE,
|
|
98
|
+
});
|
|
99
|
+
|
|
100
|
+
try {
|
|
101
|
+
const models = await providerClient.fetchModels(fetcher);
|
|
102
|
+
if (models.length === 0) {
|
|
103
|
+
throw new Error("No models returned by the provider.");
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
let currentSearch = "";
|
|
107
|
+
|
|
108
|
+
while (true) {
|
|
109
|
+
const searchTerm = String(
|
|
110
|
+
exitIfCancelled(
|
|
111
|
+
await text({
|
|
112
|
+
message: "Search models",
|
|
113
|
+
placeholder: currentSearch || "gpt, llama, qwen, coder",
|
|
114
|
+
initialValue: currentSearch,
|
|
115
|
+
}),
|
|
116
|
+
),
|
|
117
|
+
).trim();
|
|
118
|
+
|
|
119
|
+
currentSearch = searchTerm;
|
|
120
|
+
const filteredModels = models.filter((model) =>
|
|
121
|
+
searchTerm.length === 0 ? true : model.toLowerCase().includes(searchTerm.toLowerCase()),
|
|
122
|
+
);
|
|
123
|
+
const visibleModels = filteredModels.slice(0, 15);
|
|
124
|
+
|
|
125
|
+
const selectedModel = exitIfCancelled(
|
|
126
|
+
await select({
|
|
127
|
+
message: "Select model",
|
|
128
|
+
options: [
|
|
129
|
+
...visibleModels.map((model) => ({ value: model, label: model })),
|
|
130
|
+
{ value: "__search_again__", label: "Search again" },
|
|
131
|
+
{ value: "__manual__", label: "Enter model manually" },
|
|
132
|
+
],
|
|
133
|
+
}),
|
|
134
|
+
);
|
|
135
|
+
|
|
136
|
+
if (selectedModel === "__search_again__") {
|
|
137
|
+
continue;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
if (selectedModel === "__manual__") {
|
|
141
|
+
break;
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
return z.string().min(1).parse(selectedModel);
|
|
145
|
+
}
|
|
146
|
+
} catch {
|
|
147
|
+
return z
|
|
148
|
+
.string()
|
|
149
|
+
.trim()
|
|
150
|
+
.min(1, "Model is required.")
|
|
151
|
+
.parse(
|
|
152
|
+
exitIfCancelled(
|
|
153
|
+
await text({
|
|
154
|
+
message: "Enter model ID manually",
|
|
155
|
+
placeholder: DEFAULT_MODEL,
|
|
156
|
+
initialValue: initialModel ?? DEFAULT_MODEL,
|
|
157
|
+
validate(value) {
|
|
158
|
+
const parsed = z.string().trim().min(1).safeParse(value);
|
|
159
|
+
return parsed.success ? undefined : "Model is required.";
|
|
160
|
+
},
|
|
161
|
+
}),
|
|
162
|
+
),
|
|
163
|
+
);
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
return z
|
|
167
|
+
.string()
|
|
168
|
+
.trim()
|
|
169
|
+
.min(1, "Model is required.")
|
|
170
|
+
.parse(
|
|
171
|
+
exitIfCancelled(
|
|
172
|
+
await text({
|
|
173
|
+
message: "Enter model ID manually",
|
|
174
|
+
placeholder: DEFAULT_MODEL,
|
|
175
|
+
initialValue: initialModel ?? DEFAULT_MODEL,
|
|
176
|
+
validate(value) {
|
|
177
|
+
const parsed = z.string().trim().min(1).safeParse(value);
|
|
178
|
+
return parsed.success ? undefined : "Model is required.";
|
|
179
|
+
},
|
|
180
|
+
}),
|
|
181
|
+
),
|
|
182
|
+
);
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
async function promptForContextSize(defaultValue: number): Promise<number> {
|
|
186
|
+
const rawValue = exitIfCancelled(
|
|
187
|
+
await text({
|
|
188
|
+
message: "Model context size in tokens",
|
|
189
|
+
placeholder: String(DEFAULT_MODEL_CONTEXT_SIZE),
|
|
190
|
+
initialValue: String(defaultValue),
|
|
191
|
+
validate(value) {
|
|
192
|
+
const parsed = z.coerce.number().int().positive().safeParse(value);
|
|
193
|
+
return parsed.success ? undefined : "Context size must be a positive integer.";
|
|
194
|
+
},
|
|
195
|
+
}),
|
|
196
|
+
);
|
|
197
|
+
|
|
198
|
+
return z.coerce.number().int().positive().parse(rawValue);
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
export type AppConfig = AiProviderConfig;
|
|
202
|
+
export { DEFAULT_MODEL, DEFAULT_MODEL_CONTEXT_SIZE } from "./provider";
|
|
203
|
+
|
|
204
|
+
export class ConfigManager {
|
|
205
|
+
private readonly homeDirectory: string;
|
|
206
|
+
private readonly fetcher: FetchLike;
|
|
207
|
+
private readonly promptConfigOverride: ((existingConfig: ConfigDraft | null) => Promise<AiProviderConfig>) | undefined;
|
|
208
|
+
|
|
209
|
+
public constructor(options: ConfigManagerOptions = {}) {
|
|
210
|
+
this.homeDirectory = options.homeDir ?? homedir();
|
|
211
|
+
this.fetcher = options.fetcher ?? fetch;
|
|
212
|
+
this.promptConfigOverride = options.promptConfig;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
public getConfigDir(): string {
|
|
216
|
+
return join(this.homeDirectory, ".mapr");
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
public getConfigPath(): string {
|
|
220
|
+
return join(this.getConfigDir(), "config.json");
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
public async readConfig(): Promise<ConfigDraft | null> {
|
|
224
|
+
try {
|
|
225
|
+
const raw = await readFile(this.getConfigPath(), "utf8");
|
|
226
|
+
const parsed = persistedConfigSchema.parse(JSON.parse(raw) as unknown);
|
|
227
|
+
return normalizePersistedConfig(parsed);
|
|
228
|
+
} catch (error) {
|
|
229
|
+
if (error instanceof Error && "code" in error && error.code === "ENOENT") {
|
|
230
|
+
return null;
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
if (error instanceof z.ZodError) {
|
|
234
|
+
throw new Error(`Invalid config file at ${this.getConfigPath()}: ${error.issues[0]?.message ?? "schema error"}`);
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
if (error instanceof SyntaxError) {
|
|
238
|
+
throw new Error(`Config file at ${this.getConfigPath()} is not valid JSON.`);
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
throw error;
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
public async saveConfig(config: AppConfig): Promise<void> {
|
|
246
|
+
const validatedConfig = aiProviderConfigSchema.parse(config);
|
|
247
|
+
await mkdir(this.getConfigDir(), { recursive: true });
|
|
248
|
+
await writeFile(this.getConfigPath(), `${JSON.stringify(validatedConfig, null, 2)}\n`, {
|
|
249
|
+
encoding: "utf8",
|
|
250
|
+
mode: 0o600,
|
|
251
|
+
});
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
public async ensureConfig(options: EnsureConfigOptions = {}): Promise<AppConfig> {
|
|
255
|
+
const mergedDraft = await this.resolveConfigDraft(options.overrides ?? null);
|
|
256
|
+
|
|
257
|
+
if (options.headless) {
|
|
258
|
+
try {
|
|
259
|
+
const resolvedConfig = aiProviderConfigSchema.parse(mergedDraft);
|
|
260
|
+
await this.saveConfig(resolvedConfig);
|
|
261
|
+
return resolvedConfig;
|
|
262
|
+
} catch (error) {
|
|
263
|
+
if (error instanceof z.ZodError) {
|
|
264
|
+
throw new Error(`Headless mode requires a complete provider config: ${error.issues[0]?.message ?? "missing field"}`);
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
throw error;
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
if (mergedDraft && !options.forceReconfigure) {
|
|
272
|
+
const parsedConfig = aiProviderConfigSchema.safeParse(mergedDraft);
|
|
273
|
+
if (parsedConfig.success) {
|
|
274
|
+
await this.saveConfig(parsedConfig.data);
|
|
275
|
+
return parsedConfig.data;
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
const config = this.promptConfigOverride
|
|
280
|
+
? aiProviderConfigSchema.parse(await this.promptConfigOverride(mergedDraft))
|
|
281
|
+
: await this.promptForConfig(mergedDraft);
|
|
282
|
+
|
|
283
|
+
await this.saveConfig(config);
|
|
284
|
+
return config;
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
public async listModels(config: ConfigDraft | null): Promise<string[]> {
|
|
288
|
+
const resolvedConfig = modelListingConfigSchema.parse(config);
|
|
289
|
+
const providerClient = new AiProviderClient({
|
|
290
|
+
...resolvedConfig,
|
|
291
|
+
model: DEFAULT_MODEL,
|
|
292
|
+
modelContextSize: DEFAULT_MODEL_CONTEXT_SIZE,
|
|
293
|
+
});
|
|
294
|
+
return providerClient.fetchModels(this.fetcher);
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
public async resolveConfigDraft(overrides: ConfigDraft | null): Promise<ConfigDraft | null> {
|
|
298
|
+
const existingConfig = await this.readConfig();
|
|
299
|
+
return mergeConfigDrafts(existingConfig, overrides);
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
private async promptForConfig(existingConfig: ConfigDraft | null): Promise<AppConfig> {
|
|
303
|
+
const providerType = exitIfCancelled(
|
|
304
|
+
await select({
|
|
305
|
+
message: "Choose AI provider",
|
|
306
|
+
initialValue: existingConfig?.providerType ?? "openai",
|
|
307
|
+
options: [
|
|
308
|
+
{ value: "openai", label: "OpenAI" },
|
|
309
|
+
{ value: "openai-compatible", label: "OpenAI-compatible server" },
|
|
310
|
+
],
|
|
311
|
+
}),
|
|
312
|
+
) as AiProviderConfig["providerType"];
|
|
313
|
+
|
|
314
|
+
const providerName =
|
|
315
|
+
providerType === "openai"
|
|
316
|
+
? "OpenAI"
|
|
317
|
+
: z.string().trim().min(1).parse(
|
|
318
|
+
exitIfCancelled(
|
|
319
|
+
await text({
|
|
320
|
+
message: "Provider display name",
|
|
321
|
+
placeholder: "Local vLLM, LM Studio, Ollama gateway",
|
|
322
|
+
initialValue: existingConfig?.providerName && existingConfig.providerType === "openai-compatible"
|
|
323
|
+
? existingConfig.providerName
|
|
324
|
+
: "",
|
|
325
|
+
validate(value) {
|
|
326
|
+
const parsed = z.string().trim().min(1).safeParse(value);
|
|
327
|
+
return parsed.success ? undefined : "Provider name is required.";
|
|
328
|
+
},
|
|
329
|
+
}),
|
|
330
|
+
),
|
|
331
|
+
);
|
|
332
|
+
|
|
333
|
+
const baseURL = z.string().trim().url().parse(
|
|
334
|
+
exitIfCancelled(
|
|
335
|
+
await text({
|
|
336
|
+
message: providerType === "openai" ? "OpenAI base URL" : "OpenAI-compatible base URL",
|
|
337
|
+
placeholder: DEFAULT_OPENAI_BASE_URL,
|
|
338
|
+
initialValue:
|
|
339
|
+
existingConfig?.baseURL && existingConfig.providerType === providerType
|
|
340
|
+
? existingConfig.baseURL
|
|
341
|
+
: DEFAULT_OPENAI_BASE_URL,
|
|
342
|
+
validate(value) {
|
|
343
|
+
const parsed = z.string().trim().url().safeParse(value);
|
|
344
|
+
return parsed.success ? undefined : "Base URL must be a valid URL.";
|
|
345
|
+
},
|
|
346
|
+
}),
|
|
347
|
+
),
|
|
348
|
+
);
|
|
349
|
+
|
|
350
|
+
const apiKey = z.string().trim().min(1).parse(
|
|
351
|
+
exitIfCancelled(
|
|
352
|
+
await password({
|
|
353
|
+
message: providerType === "openai" ? "Enter your OpenAI API key" : "Enter your provider API key",
|
|
354
|
+
mask: "*",
|
|
355
|
+
validate(value) {
|
|
356
|
+
const parsed = z.string().trim().min(1).safeParse(value);
|
|
357
|
+
return parsed.success ? undefined : "API key is required.";
|
|
358
|
+
},
|
|
359
|
+
}),
|
|
360
|
+
),
|
|
361
|
+
);
|
|
362
|
+
|
|
363
|
+
const model = await promptForModel(
|
|
364
|
+
{
|
|
365
|
+
providerType,
|
|
366
|
+
providerName,
|
|
367
|
+
baseURL,
|
|
368
|
+
apiKey,
|
|
369
|
+
},
|
|
370
|
+
this.fetcher,
|
|
371
|
+
existingConfig?.providerType === providerType ? existingConfig.model : undefined,
|
|
372
|
+
);
|
|
373
|
+
|
|
374
|
+
const modelContextSize = await promptForContextSize(existingConfig?.modelContextSize ?? DEFAULT_MODEL_CONTEXT_SIZE);
|
|
375
|
+
|
|
376
|
+
return aiProviderConfigSchema.parse({
|
|
377
|
+
providerType,
|
|
378
|
+
providerName,
|
|
379
|
+
baseURL,
|
|
380
|
+
apiKey,
|
|
381
|
+
model,
|
|
382
|
+
modelContextSize,
|
|
383
|
+
});
|
|
384
|
+
}
|
|
385
|
+
}
|
package/lib/formatter.ts
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
import * as prettier from "prettier";
|
|
2
|
+
import { z } from "zod";
|
|
3
|
+
|
|
4
|
+
import { artifactTypeSchema, type DiscoveredArtifact } from "./artifacts";
|
|
5
|
+
|
|
6
|
+
const formattedArtifactSchema = z.object({
|
|
7
|
+
url: z.string().url(),
|
|
8
|
+
type: artifactTypeSchema,
|
|
9
|
+
content: z.string(),
|
|
10
|
+
formattedContent: z.string(),
|
|
11
|
+
sizeBytes: z.number().int().nonnegative(),
|
|
12
|
+
discoveredFrom: z.string().min(1),
|
|
13
|
+
formattingSkipped: z.boolean(),
|
|
14
|
+
formattingNote: z.string().optional(),
|
|
15
|
+
});
|
|
16
|
+
|
|
17
|
+
export type FormattedArtifact = z.infer<typeof formattedArtifactSchema>;
|
|
18
|
+
|
|
19
|
+
function resolvePrettierParser(artifactType: FormattedArtifact["type"]): "babel" | "babel-ts" | "html" | "css" | "json" | null {
|
|
20
|
+
switch (artifactType) {
|
|
21
|
+
case "html":
|
|
22
|
+
return "html";
|
|
23
|
+
case "stylesheet":
|
|
24
|
+
return "css";
|
|
25
|
+
case "manifest":
|
|
26
|
+
case "json":
|
|
27
|
+
return "json";
|
|
28
|
+
case "script":
|
|
29
|
+
case "service-worker":
|
|
30
|
+
case "worker":
|
|
31
|
+
return "babel";
|
|
32
|
+
case "wasm":
|
|
33
|
+
return null;
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
export class BundleFormatter {
|
|
38
|
+
public async formatArtifacts(artifacts: DiscoveredArtifact[]): Promise<FormattedArtifact[]> {
|
|
39
|
+
return Promise.all(artifacts.map((artifact) => this.formatArtifact(artifact)));
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
public async formatArtifact(artifact: DiscoveredArtifact): Promise<FormattedArtifact> {
|
|
43
|
+
const validatedArtifact = z
|
|
44
|
+
.object({
|
|
45
|
+
url: z.string().url(),
|
|
46
|
+
type: artifactTypeSchema,
|
|
47
|
+
content: z.string(),
|
|
48
|
+
sizeBytes: z.number().int().nonnegative(),
|
|
49
|
+
discoveredFrom: z.string().min(1),
|
|
50
|
+
})
|
|
51
|
+
.parse(artifact);
|
|
52
|
+
|
|
53
|
+
const parser = resolvePrettierParser(validatedArtifact.type);
|
|
54
|
+
if (!parser) {
|
|
55
|
+
return formattedArtifactSchema.parse({
|
|
56
|
+
...validatedArtifact,
|
|
57
|
+
formattedContent: validatedArtifact.content,
|
|
58
|
+
formattingSkipped: false,
|
|
59
|
+
formattingNote: "Binary artifact summarized without additional formatting.",
|
|
60
|
+
});
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
try {
|
|
64
|
+
const formattedContent = await prettier.format(validatedArtifact.content, {
|
|
65
|
+
parser,
|
|
66
|
+
printWidth: 100,
|
|
67
|
+
tabWidth: 2,
|
|
68
|
+
});
|
|
69
|
+
|
|
70
|
+
return formattedArtifactSchema.parse({
|
|
71
|
+
...validatedArtifact,
|
|
72
|
+
formattedContent,
|
|
73
|
+
formattingSkipped: false,
|
|
74
|
+
});
|
|
75
|
+
} catch (primaryError) {
|
|
76
|
+
if (parser === "babel") {
|
|
77
|
+
try {
|
|
78
|
+
const formattedContent = await prettier.format(validatedArtifact.content, {
|
|
79
|
+
parser: "babel-ts",
|
|
80
|
+
printWidth: 100,
|
|
81
|
+
tabWidth: 2,
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
return formattedArtifactSchema.parse({
|
|
85
|
+
...validatedArtifact,
|
|
86
|
+
formattedContent,
|
|
87
|
+
formattingSkipped: false,
|
|
88
|
+
});
|
|
89
|
+
} catch {
|
|
90
|
+
const message = primaryError instanceof Error ? primaryError.message : "formatter error";
|
|
91
|
+
return formattedArtifactSchema.parse({
|
|
92
|
+
...validatedArtifact,
|
|
93
|
+
formattedContent: validatedArtifact.content,
|
|
94
|
+
formattingSkipped: true,
|
|
95
|
+
formattingNote: `Formatting failed and raw content was preserved: ${message}`,
|
|
96
|
+
});
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
const message = primaryError instanceof Error ? primaryError.message : "formatter error";
|
|
101
|
+
return formattedArtifactSchema.parse({
|
|
102
|
+
...validatedArtifact,
|
|
103
|
+
formattedContent: validatedArtifact.content,
|
|
104
|
+
formattingSkipped: true,
|
|
105
|
+
formattingNote: `Formatting failed and raw content was preserved: ${message}`,
|
|
106
|
+
});
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
}
|
package/lib/local-rag.ts
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
|
|
3
|
+
import type { FormattedArtifact } from "./formatter";
|
|
4
|
+
|
|
5
|
+
const ragOptionsSchema = z.object({
|
|
6
|
+
segmentBytes: z.number().int().positive().default(16 * 1024),
|
|
7
|
+
maxResults: z.number().int().positive().default(3),
|
|
8
|
+
});
|
|
9
|
+
|
|
10
|
+
const tokenPattern = /[a-zA-Z0-9_$.-]{2,}/g;
|
|
11
|
+
|
|
12
|
+
interface RagSegment {
|
|
13
|
+
artifactUrl: string;
|
|
14
|
+
segmentIndex: number;
|
|
15
|
+
content: string;
|
|
16
|
+
tokenWeights: Map<string, number>;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
function tokenize(source: string): string[] {
|
|
20
|
+
return (source.toLowerCase().match(tokenPattern) ?? []).slice(0, 4000);
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
function toTokenWeights(source: string): Map<string, number> {
|
|
24
|
+
const weights = new Map<string, number>();
|
|
25
|
+
|
|
26
|
+
for (const token of tokenize(source)) {
|
|
27
|
+
weights.set(token, (weights.get(token) ?? 0) + 1);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
return weights;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
function segmentContent(source: string, segmentBytes: number): string[] {
|
|
34
|
+
if (source.length === 0) {
|
|
35
|
+
return [];
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const segments: string[] = [];
|
|
39
|
+
let cursor = 0;
|
|
40
|
+
|
|
41
|
+
while (cursor < source.length) {
|
|
42
|
+
const nextCursor = Math.min(source.length, cursor + segmentBytes);
|
|
43
|
+
segments.push(source.slice(cursor, nextCursor));
|
|
44
|
+
cursor = nextCursor;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
return segments;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
function scoreSegment(queryWeights: Map<string, number>, segmentWeights: Map<string, number>): number {
|
|
51
|
+
let score = 0;
|
|
52
|
+
|
|
53
|
+
for (const [token, queryWeight] of queryWeights.entries()) {
|
|
54
|
+
score += queryWeight * (segmentWeights.get(token) ?? 0);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
return score;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
export interface LocalRagQuery {
|
|
61
|
+
artifactUrl: string;
|
|
62
|
+
query: string;
|
|
63
|
+
excludeContent?: string;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
export class LocalArtifactRag {
|
|
67
|
+
private readonly options: z.infer<typeof ragOptionsSchema>;
|
|
68
|
+
private readonly segmentsByArtifact = new Map<string, RagSegment[]>();
|
|
69
|
+
|
|
70
|
+
public constructor(artifacts: FormattedArtifact[], options: z.input<typeof ragOptionsSchema> = {}) {
|
|
71
|
+
this.options = ragOptionsSchema.parse(options);
|
|
72
|
+
|
|
73
|
+
for (const artifact of artifacts) {
|
|
74
|
+
const segments = segmentContent(artifact.formattedContent || artifact.content, this.options.segmentBytes).map(
|
|
75
|
+
(content, segmentIndex) => ({
|
|
76
|
+
artifactUrl: artifact.url,
|
|
77
|
+
segmentIndex,
|
|
78
|
+
content,
|
|
79
|
+
tokenWeights: toTokenWeights(content),
|
|
80
|
+
}),
|
|
81
|
+
);
|
|
82
|
+
|
|
83
|
+
this.segmentsByArtifact.set(artifact.url, segments);
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
public query(input: LocalRagQuery): string[] {
|
|
88
|
+
const queryWeights = toTokenWeights(input.query);
|
|
89
|
+
const segments = this.segmentsByArtifact.get(input.artifactUrl) ?? [];
|
|
90
|
+
const excludeSnippet = input.excludeContent?.slice(0, 256);
|
|
91
|
+
|
|
92
|
+
return segments
|
|
93
|
+
.filter((segment) => !excludeSnippet || !segment.content.includes(excludeSnippet))
|
|
94
|
+
.map((segment) => ({
|
|
95
|
+
score: scoreSegment(queryWeights, segment.tokenWeights),
|
|
96
|
+
content: segment.content,
|
|
97
|
+
segmentIndex: segment.segmentIndex,
|
|
98
|
+
}))
|
|
99
|
+
.filter((entry) => entry.score > 0)
|
|
100
|
+
.sort((left, right) => right.score - left.score || left.segmentIndex - right.segmentIndex)
|
|
101
|
+
.slice(0, this.options.maxResults)
|
|
102
|
+
.map((entry) => entry.content);
|
|
103
|
+
}
|
|
104
|
+
}
|
package/lib/progress.ts
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import pc from "picocolors";
|
|
2
|
+
|
|
3
|
+
export function renderProgressBar(completed: number, total: number, width = 24): string {
|
|
4
|
+
const safeTotal = Math.max(1, total);
|
|
5
|
+
const ratio = Math.min(1, Math.max(0, completed / safeTotal));
|
|
6
|
+
const filled = Math.round(ratio * width);
|
|
7
|
+
const empty = Math.max(0, width - filled);
|
|
8
|
+
|
|
9
|
+
return `${pc.cyan("[" + "=".repeat(filled) + "-".repeat(empty) + "]")} ${Math.round(ratio * 100)}%`;
|
|
10
|
+
}
|
package/lib/provider.ts
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import { createOpenAI } from "@ai-sdk/openai";
|
|
2
|
+
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
|
|
3
|
+
import { z } from "zod";
|
|
4
|
+
|
|
5
|
+
export const DEFAULT_OPENAI_BASE_URL = "https://api.openai.com/v1";
|
|
6
|
+
export const DEFAULT_MODEL = "gpt-4.1-mini";
|
|
7
|
+
export const DEFAULT_MODEL_CONTEXT_SIZE = 128000;
|
|
8
|
+
|
|
9
|
+
export const providerTypeSchema = z.enum(["openai", "openai-compatible"]);
|
|
10
|
+
|
|
11
|
+
export const aiProviderConfigSchema = z.object({
|
|
12
|
+
providerType: providerTypeSchema.default("openai"),
|
|
13
|
+
providerName: z.string().min(1).default("OpenAI"),
|
|
14
|
+
apiKey: z.string().min(1, "API key is required."),
|
|
15
|
+
baseURL: z.string().trim().url("Base URL must be a valid URL.").default(DEFAULT_OPENAI_BASE_URL),
|
|
16
|
+
model: z.string().min(1).default(DEFAULT_MODEL),
|
|
17
|
+
modelContextSize: z.number().int().positive().default(DEFAULT_MODEL_CONTEXT_SIZE),
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
const modelListResponseSchema = z.object({
|
|
21
|
+
data: z.array(
|
|
22
|
+
z.object({
|
|
23
|
+
id: z.string().min(1),
|
|
24
|
+
created: z.number().optional(),
|
|
25
|
+
owned_by: z.string().optional(),
|
|
26
|
+
}),
|
|
27
|
+
),
|
|
28
|
+
});
|
|
29
|
+
|
|
30
|
+
export type AiProviderConfig = z.infer<typeof aiProviderConfigSchema>;
|
|
31
|
+
|
|
32
|
+
type FetchLike = (input: string | URL | Request, init?: RequestInit) => Promise<Response>;
|
|
33
|
+
|
|
34
|
+
function normalizeBaseUrl(baseURL: string): string {
|
|
35
|
+
return new URL(baseURL).toString().replace(/\/$/, "");
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export class AiProviderClient {
|
|
39
|
+
private readonly config: AiProviderConfig;
|
|
40
|
+
|
|
41
|
+
public constructor(config: AiProviderConfig) {
|
|
42
|
+
this.config = aiProviderConfigSchema.parse(config);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
public getConfig(): AiProviderConfig {
|
|
46
|
+
return this.config;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
public getModel(modelId = this.config.model) {
|
|
50
|
+
if (this.config.providerType === "openai-compatible") {
|
|
51
|
+
const provider = createOpenAICompatible({
|
|
52
|
+
name: this.config.providerName,
|
|
53
|
+
apiKey: this.config.apiKey,
|
|
54
|
+
baseURL: this.config.baseURL,
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
return provider(modelId);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
const provider = createOpenAI({
|
|
61
|
+
apiKey: this.config.apiKey,
|
|
62
|
+
baseURL: this.config.baseURL,
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
return provider(modelId);
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
public async fetchModels(fetcher: FetchLike = fetch): Promise<string[]> {
|
|
69
|
+
const endpoint = new URL("models", `${normalizeBaseUrl(this.config.baseURL)}/`).toString();
|
|
70
|
+
const response = await fetcher(endpoint, {
|
|
71
|
+
headers: {
|
|
72
|
+
Authorization: `Bearer ${this.config.apiKey}`,
|
|
73
|
+
},
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
if (!response.ok) {
|
|
77
|
+
throw new Error(`Model discovery failed: ${response.status} ${response.statusText}`);
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
const payload = modelListResponseSchema.parse((await response.json()) as unknown);
|
|
81
|
+
return [...new Set(payload.data.map((entry) => entry.id).filter(Boolean))].sort((left, right) =>
|
|
82
|
+
left.localeCompare(right),
|
|
83
|
+
);
|
|
84
|
+
}
|
|
85
|
+
}
|