@riflo/ryte 1.1.2 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@riflo/ryte",
3
- "version": "1.1.2",
3
+ "version": "1.2.0",
4
4
  "description": "AI Git Workflow Assistant - Generate semantic commits and PRs",
5
5
  "main": "src/index.js",
6
6
  "bin": {
package/src/ai.js CHANGED
@@ -1,28 +1,19 @@
1
- import dotenv from "dotenv";
2
- dotenv.config();
1
+ import { getConfig } from "./config.js";
2
+ import { getProviderConfig } from "./provider.js";
3
3
 
4
- export async function generateAIResponse(messages) {
5
- const groqKey = process.env.GROQ_API_KEY;
6
- const openAiKey = process.env.OPENAI_API_KEY;
4
+ export async function generateAIResponse(messages, overrideConfig = null) {
5
+ const config = overrideConfig || getConfig();
7
6
 
8
- if (!groqKey && !openAiKey) {
9
- console.error("Error: Please set either GROQ_API_KEY (for free AI) or OPENAI_API_KEY in your environment variables.");
10
- process.exit(1);
7
+ if (!config || !config.apiKey) {
8
+ throw new Error("API Key not found. Please run 'ryte config' or follow the setup flow.");
11
9
  }
12
10
 
13
- const isGroq = !!groqKey;
14
- const apiKey = isGroq ? groqKey : openAiKey;
11
+ const providerName = config.provider || "openai";
12
+ const pConfig = getProviderConfig(providerName, config.baseUrl);
15
13
 
16
- // Groq API is 100% compatible with OpenAI's format! Just changing URL & Model.
17
- const apiUrl = isGroq
18
- ? "https://api.groq.com/openai/v1/chat/completions"
19
- : "https://api.openai.com/v1/chat/completions";
20
-
21
- // llama-3.1-8b-instant: 14,400 TPM (6x higher than llama-3.3-70b-versatile)
22
- // Still very capable for commit messages and PR summaries
23
- const model = isGroq
24
- ? "llama-3.1-8b-instant"
25
- : "gpt-4o-mini";
14
+ const apiUrl = pConfig.url;
15
+ const apiKey = config.apiKey;
16
+ const model = config.model || pConfig.model;
26
17
 
27
18
  const MAX_RETRIES = 3;
28
19
 
@@ -42,7 +33,6 @@ export async function generateAIResponse(messages) {
42
33
  });
43
34
 
44
35
  if (response.status === 429) {
45
- // Rate limited — parse retry-after header or use exponential backoff
46
36
  const retryAfter = parseInt(response.headers.get("retry-after") || "15", 10);
47
37
  const waitSeconds = retryAfter + 1;
48
38
 
@@ -50,7 +40,7 @@ export async function generateAIResponse(messages) {
50
40
  process.stdout.write(`\r\x1b[33m⚠ Rate limit hit. Waiting ${i}s before retry (${attempt}/${MAX_RETRIES})...\x1b[0m`);
51
41
  await new Promise(r => setTimeout(r, 1000));
52
42
  }
53
- process.stdout.write("\r" + " ".repeat(80) + "\r"); // Clear the line
43
+ process.stdout.write("\r" + " ".repeat(80) + "\r");
54
44
  continue;
55
45
  }
56
46
 
@@ -63,8 +53,7 @@ export async function generateAIResponse(messages) {
63
53
  return data.choices[0].message.content.trim();
64
54
  } catch (e) {
65
55
  if (attempt === MAX_RETRIES) {
66
- console.error("AI Generation failed:", e.message);
67
- process.exit(1);
56
+ throw e; // Let the caller handle the final failure
68
57
  }
69
58
  }
70
59
  }
package/src/config.js ADDED
@@ -0,0 +1,40 @@
1
+ import fs from "fs";
2
+ import os from "os";
3
+ import path from "path";
4
+
5
+ const CONFIG_DIR = path.join(os.homedir(), ".ryte");
6
+ const CONFIG_FILE = path.join(CONFIG_DIR, "config.json");
7
+
8
+ const DEFAULT_CONFIG = {
9
+ provider: "openai",
10
+ apiKey: "",
11
+ model: "gpt-4o-mini",
12
+ baseUrl: "" // Optional for local providers like OpenClaw/Ollama
13
+ };
14
+
15
+ export function getConfig() {
16
+ if (!fs.existsSync(CONFIG_FILE)) {
17
+ return null;
18
+ }
19
+ try {
20
+ const data = fs.readFileSync(CONFIG_FILE, "utf-8");
21
+ return { ...DEFAULT_CONFIG, ...JSON.parse(data) };
22
+ } catch (e) {
23
+ return null;
24
+ }
25
+ }
26
+
27
+ export function setConfig(updates) {
28
+ if (!fs.existsSync(CONFIG_DIR)) {
29
+ fs.mkdirSync(CONFIG_DIR, { recursive: true });
30
+ }
31
+ const current = getConfig() || DEFAULT_CONFIG;
32
+ const updated = { ...current, ...updates };
33
+ fs.writeFileSync(CONFIG_FILE, JSON.stringify(updated, null, 2), { mode: 0o600 });
34
+ return updated;
35
+ }
36
+
37
+ export function hasValidConfig() {
38
+ const config = getConfig();
39
+ return !!(config && config.apiKey);
40
+ }
package/src/index.js CHANGED
@@ -6,12 +6,16 @@ import path from "path";
6
6
  import { getStagedDiff, getCurrentBranch, getBranchCommits, applyCommit } from "./git.js";
7
7
  import { generateAIResponse } from "./ai.js";
8
8
  import { COMMIT_SYSTEM_PROMPT, PR_SYSTEM_PROMPT } from "./prompt.js";
9
+ import { getConfig, setConfig, hasValidConfig } from "./config.js";
10
+ import { PROVIDERS } from "./provider.js";
9
11
 
10
12
  const rl = readline.createInterface({
11
13
  input: process.stdin,
12
14
  output: process.stdout
13
15
  });
14
16
 
17
+ const VERSION = "1.2.0";
18
+
15
19
  async function question(query) {
16
20
  return new Promise(resolve => rl.question(query, resolve));
17
21
  }
@@ -40,6 +44,43 @@ function editInteractively(initialText) {
40
44
  });
41
45
  }
42
46
 
47
+ async function setupFlow() {
48
+ console.log("\n\x1b[36mWelcome to RYTE. Let's set up your Git Intelligence Layer.\x1b[0m");
49
+ console.log("------------------------------------------------------------");
50
+
51
+ console.log("\nSelect your LLM Provider:");
52
+ const providerList = Object.keys(PROVIDERS);
53
+ providerList.forEach((p, i) => console.log(`${i + 1}) ${p.charAt(0).toUpperCase() + p.slice(1)}`));
54
+
55
+ const choice = await question(`\nChoose [1-${providerList.length}]: `);
56
+ const providerKey = providerList[parseInt(choice) - 1] || "openai";
57
+
58
+ const apiKey = await question(`Paste your ${providerKey.toUpperCase()} API Key: `);
59
+ if (!apiKey) {
60
+ console.error("Error: API Key is required.");
61
+ process.exit(1);
62
+ }
63
+
64
+ setConfig({
65
+ provider: providerKey,
66
+ apiKey: apiKey,
67
+ model: PROVIDERS[providerKey].defaultModel
68
+ });
69
+
70
+ console.log("\n\x1b[32m✔ Configuration saved to ~/.ryte/config.json\x1b[0m");
71
+ }
72
+
73
+ async function handleConfig() {
74
+ const config = getConfig() || {};
75
+ console.log("\n\x1b[36mCurrent Configuration:\x1b[0m");
76
+ console.log(JSON.stringify(config, null, 2));
77
+
78
+ const choice = await question("\nWould you like to reset configuration? [y/N]: ");
79
+ if (choice.toLowerCase() === "y") {
80
+ await setupFlow();
81
+ }
82
+ }
83
+
43
84
  async function interactiveLoop(initialResult, type) {
44
85
  let currentResult = initialResult;
45
86
 
@@ -125,30 +166,36 @@ async function main() {
125
166
  const args = process.argv.slice(2);
126
167
  const cmd = args[0]?.toLowerCase();
127
168
 
169
+ if (!hasValidConfig()) {
170
+ await setupFlow();
171
+ }
172
+
128
173
  if (cmd === "c" || cmd === "commit") {
129
174
  await handleCommit();
130
175
  } else if (cmd === "pr") {
131
176
  await handlePR();
177
+ } else if (cmd === "config") {
178
+ await handleConfig();
132
179
  } else {
133
180
  console.log(`
134
- \x1b[1;38;5;39m██████╗ \x1b[1;38;5;63m██╗ ██╗\x1b[1;38;5;129m████████╗\x1b[1;38;5;161m███████╗\x1b[0m
135
- \x1b[1;38;5;39m██╔══██╗\x1b[1;38;5;63m╚██╗ ██╔╝\x1b[1;38;5;129m╚══██╔══╝\x1b[1;38;5;161m██╔════╝\x1b[0m
136
- \x1b[1;38;5;39m██████╔╝\x1b[1;38;5;63m ╚████╔╝ \x1b[1;38;5;129m ██║ \x1b[1;38;5;161m█████╗ \x1b[0m
137
- \x1b[1;38;5;39m██╔══██╗\x1b[1;38;5;63m ╚██╔╝ \x1b[1;38;5;129m ██║ \x1b[1;38;5;161m██╔══╝ \x1b[0m
138
- \x1b[1;38;5;39m██║ ██║\x1b[1;38;5;63m ██║ \x1b[1;38;5;129m ██║ \x1b[1;38;5;161m███████╗\x1b[0m
139
- \x1b[1;38;5;39m╚═╝ ╚═╝\x1b[1;38;5;63m ╚═╝ \x1b[1;38;5;129m ╚═╝ \x1b[1;38;5;161m╚══════╝\x1b[0m
140
-
141
- \x1b[90mAI-Powered Git Workflow Assistant\x1b[0m
142
- \x1b[90mv1.1.2 | by Riflo\x1b[0m
181
+ \x1b[1;38;5;39m██████╗ \x1b[1;38;5;63m██╗ ██╗\x1b[1;38;5;129m████████╗\x1b[1;38;5;161m███████╗\x1b[0m
182
+ \x1b[1;38;5;39m██╔══██╗\x1b[1;38;5;63m╚██╗ ██╔╝\x1b[1;38;5;129m╚══██╔══╝\x1b[1;38;5;161m██╔════╝\x1b[0m
183
+ \x1b[1;38;5;39m██████╔╝\x1b[1;38;5;63m ╚████╔╝ \x1b[1;38;5;129m ██║ \x1b[1;38;5;161m█████╗ \x1b[0m
184
+ \x1b[1;38;5;39m██╔══██╗\x1b[1;38;5;63m ╚██╔╝ \x1b[1;38;5;129m ██║ \x1b[1;38;5;161m██╔══╝ \x1b[0m
185
+ \x1b[1;38;5;39m██║ ██║\x1b[1;38;5;63m ██║ \x1b[1;38;5;129m ██║ \x1b[1;38;5;161m███████╗\x1b[0m
186
+ \x1b[1;38;5;39m╚═╝ ╚═╝\x1b[1;38;5;63m ╚═╝ \x1b[1;38;5;129m ╚═╝ \x1b[1;38;5;161m╚══════╝\x1b[0m
187
+
188
+ \x1b[1;38;5;46m[ THE AI-POWERED GIT INFRASTRUCTURE ]\x1b[0m
189
+ \x1b[90mv${VERSION} | by Riflo\x1b[0m
143
190
 
144
191
  \x1b[33mCOMMANDS:\x1b[0m
145
- \x1b[32mryte c\x1b[0m Generate semantic commit from diff
146
- \x1b[32mryte pr\x1b[0m Generate PR markdown from branch commits
192
+ \x1b[32mryte c\x1b[0m Generate semantic commit from diff
193
+ \x1b[32mryte pr\x1b[0m Generate PR markdown from branch commits
194
+ \x1b[32mryte config\x1b[0m Generate or edit your local configuration
147
195
 
148
- \x1b[33mGETTING STARTED:\x1b[0m
149
- Set either environment variable to unleash the AI:
150
- \x1b[36mGROQ_API_KEY\x1b[0m (Recommended / Free tier)
151
- • \x1b[36mOPENAI_API_KEY\x1b[0m (OpenAI API key)
196
+ \x1b[33mONBOARDING:\x1b[0m
197
+ No .env required. Run \x1b[32mryte config\x1b[0m or just run \x1b[32mryte c\x1b[0m to
198
+ start the interactive setup.
152
199
  `);
153
200
  }
154
201
 
package/src/prompt.js CHANGED
@@ -15,7 +15,9 @@ RULES:
15
15
  6. Scope: If the diff is localized, infer a scope (e.g., "auth", "ui", "config").
16
16
  7. Body (Optional): If the change is complex, add a brief body after 1 blank line to explain technical nuances.
17
17
  8. Context: If a branch name or ticket is provided, incorporate it into the scope or footer if applicable.
18
- 9. Anti-Plagiarism: If the diff shows documentation changes containing example commit messages, DO NOT copy those examples. Always synthesize a new message describing the actual change.
18
+ 9. Anti-Hallucination: Documentation files (like README.md) often contain example commit messages (e.g., "feat(auth): ..."). DO NOT assume these examples are the topic of the current change.
19
+ 10. Logical Validation: Your suggested <scope> must be derived from actual modified logic in the diff, not from text inside code blocks, comments, or examples within a documentation file.
20
+ 11. If only README.md or docs are changed, the type MUST be "docs" and the scope should relate to the documentation structure (e.g., "readme", "config", "intro"), NOT the example code inside it.
19
21
 
20
22
  Example:
21
23
  feat(ui): add loading state to checkout button
@@ -0,0 +1,31 @@
1
+ export const PROVIDERS = {
2
+ openai: {
3
+ baseUrl: "https://api.openai.com/v1/chat/completions",
4
+ defaultModel: "gpt-4o-mini",
5
+ authHeader: (key) => `Bearer ${key}`
6
+ },
7
+ groq: {
8
+ baseUrl: "https://api.groq.com/openai/v1/chat/completions",
9
+ defaultModel: "llama-3.1-8b-instant",
10
+ authHeader: (key) => `Bearer ${key}`
11
+ },
12
+ openrouter: {
13
+ baseUrl: "https://openrouter.ai/api/v1/chat/completions",
14
+ defaultModel: "google/gemini-pro-1.5-exp",
15
+ authHeader: (key) => `Bearer ${key}`
16
+ },
17
+ local: {
18
+ baseUrl: "http://localhost:11434/v1/chat/completions", // Default Ollama/OpenClaw local port
19
+ defaultModel: "llama3",
20
+ authHeader: (key) => `Bearer ${key}`
21
+ }
22
+ };
23
+
24
+ export function getProviderConfig(name, customBaseUrl = "") {
25
+ const p = PROVIDERS[name] || PROVIDERS.openai;
26
+ return {
27
+ url: customBaseUrl || p.baseUrl,
28
+ model: p.defaultModel,
29
+ auth: p.authHeader
30
+ };
31
+ }