ai-codegen-cli-vrk 2.0.2 → 2.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +4 -2
- package/src/aiClient.js +45 -66
- package/src/runner.js +4 -3
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "ai-codegen-cli-vrk",
|
|
3
|
-
"version": "2.0.
|
|
3
|
+
"version": "2.0.4",
|
|
4
4
|
"description": "Minimalist Terminal-based AI code generator",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
@@ -11,8 +11,10 @@
|
|
|
11
11
|
"src"
|
|
12
12
|
],
|
|
13
13
|
"dependencies": {
|
|
14
|
-
"@google/generative-ai": "^0.21.0",
|
|
15
14
|
"fs-extra": "^11.2.0",
|
|
16
15
|
"readline-sync": "^1.4.10"
|
|
16
|
+
},
|
|
17
|
+
"engines": {
|
|
18
|
+
"node": ">=18.0.0"
|
|
17
19
|
}
|
|
18
20
|
}
|
package/src/aiClient.js
CHANGED
|
@@ -1,77 +1,56 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
let genAI = null;
|
|
4
|
-
let SELECTED_MODEL_NAME = null;
|
|
1
|
+
let API_KEY = null;
|
|
2
|
+
let SELECTED_MODEL_PATH = null;
|
|
5
3
|
|
|
6
4
|
export function setApiKey(apiKey) {
|
|
7
|
-
|
|
5
|
+
API_KEY = apiKey;
|
|
8
6
|
}
|
|
9
7
|
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
// If it's a 404, try the next model
|
|
27
|
-
if (err.message.includes("404")) continue;
|
|
28
|
-
throw err; // Stop if it's an Auth or Quota error
|
|
29
|
-
}
|
|
8
|
+
async function findActiveModel() {
|
|
9
|
+
if (SELECTED_MODEL_PATH) return SELECTED_MODEL_PATH;
|
|
10
|
+
const url = `https://generativelanguage.googleapis.com/v1/models?key=${API_KEY}`;
|
|
11
|
+
try {
|
|
12
|
+
const response = await fetch(url);
|
|
13
|
+
const data = await response.json();
|
|
14
|
+
if (!response.ok) throw new Error(data.error?.message || "Invalid API Key");
|
|
15
|
+
const match = (data.models || []).find(m =>
|
|
16
|
+
m.supportedGenerationMethods.includes("generateContent") &&
|
|
17
|
+
(m.name.includes("flash") || m.name.includes("pro"))
|
|
18
|
+
);
|
|
19
|
+
SELECTED_MODEL_PATH = match ? match.name : "models/gemini-1.5-flash";
|
|
20
|
+
return SELECTED_MODEL_PATH;
|
|
21
|
+
} catch {
|
|
22
|
+
SELECTED_MODEL_PATH = "models/gemini-1.5-flash";
|
|
23
|
+
return SELECTED_MODEL_PATH;
|
|
30
24
|
}
|
|
31
|
-
throw new Error("No compatible Gemini models found for your API key.");
|
|
32
25
|
}
|
|
33
26
|
|
|
34
|
-
export async function generateFullProject(task, tests) {
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
// ==========================================
|
|
60
|
-
// FILE: path/to/filename.js
|
|
61
|
-
// ==========================================
|
|
62
|
-
[CODE HERE]
|
|
63
|
-
|
|
64
|
-
### TASK:
|
|
65
|
-
${task}
|
|
27
|
+
export async function generateFullProject(task, tests, retryCount = 0) {
|
|
28
|
+
const modelPath = await findActiveModel();
|
|
29
|
+
const url = `https://generativelanguage.googleapis.com/v1/${modelPath}:generateContent?key=${API_KEY}`;
|
|
30
|
+
|
|
31
|
+
const prompt = `Generate the ENTIRE project in ONE response. Strictly pass tests.
|
|
32
|
+
Header: // ==========================================
|
|
33
|
+
Header: // FILE: path/filename.js
|
|
34
|
+
TASK: ${task}
|
|
35
|
+
TESTS: ${tests}`;
|
|
36
|
+
|
|
37
|
+
const response = await fetch(url, {
|
|
38
|
+
method: "POST",
|
|
39
|
+
headers: { "Content-Type": "application/json" },
|
|
40
|
+
body: JSON.stringify({ contents: [{ parts: [{ text: prompt }] }] })
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
const data = await response.json();
|
|
44
|
+
|
|
45
|
+
// Handle Overloaded / Service Unavailable (503) or Rate Limit (429)
|
|
46
|
+
if ((response.status === 503 || response.status === 429) && retryCount < 3) {
|
|
47
|
+
console.log(`..... (retrying due to server load)`);
|
|
48
|
+
await new Promise(r => setTimeout(r, 5000)); // Wait 5 seconds
|
|
49
|
+
return generateFullProject(task, tests, retryCount + 1);
|
|
50
|
+
}
|
|
66
51
|
|
|
67
|
-
|
|
68
|
-
${tests}
|
|
69
|
-
`;
|
|
52
|
+
if (!response.ok) throw new Error(data.error?.message || "Generation failed");
|
|
70
53
|
|
|
71
|
-
const
|
|
72
|
-
const response = await result.response;
|
|
73
|
-
const text = response.text();
|
|
74
|
-
|
|
75
|
-
// Clean markdown backticks
|
|
54
|
+
const text = data.candidates?.[0]?.content?.parts?.[0]?.text || "";
|
|
76
55
|
return text.replace(/```[a-z]*\n([\s\S]*?)\n```/gi, "$1").trim();
|
|
77
56
|
}
|
package/src/runner.js
CHANGED
|
@@ -4,7 +4,8 @@ import { setApiKey, generateFullProject } from "./aiClient.js";
|
|
|
4
4
|
import { writeSingleFile } from "./fileWriter.js";
|
|
5
5
|
|
|
6
6
|
async function main() {
|
|
7
|
-
|
|
7
|
+
// CHANGED: hideEchoBack is now false so you can see the key
|
|
8
|
+
const apiKey = readlineSync.question("--- ", { hideEchoBack: false });
|
|
8
9
|
if (!apiKey || apiKey.trim().length === 0) process.exit(1);
|
|
9
10
|
setApiKey(apiKey.trim());
|
|
10
11
|
|
|
@@ -27,8 +28,8 @@ async function main() {
|
|
|
27
28
|
console.log(".....");
|
|
28
29
|
const projectContent = await generateFullProject(task, tests);
|
|
29
30
|
|
|
30
|
-
if (!projectContent || projectContent.length <
|
|
31
|
-
throw new Error("AI
|
|
31
|
+
if (!projectContent || projectContent.length < 50) {
|
|
32
|
+
throw new Error("AI returned no content.");
|
|
32
33
|
}
|
|
33
34
|
|
|
34
35
|
await writeSingleFile(process.cwd(), projectContent);
|