@frigopedro/committer 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,78 +1,56 @@
1
- # AI Git Committer
1
+ # AI Git Committer 🤖
2
2
 
3
- I was tired of typing out commit messages, so I wrote this tool.
4
- I know bunch of other tools exist, but I had hard times with them.
3
+ I was tired of typing out commit messages, so I wrote this tool. I know bunch of other tools exist, but I had hard times with them.
5
4
 
6
5
  Its a personal tool, dont expect to be good because its not :/
7
6
 
8
- ## Install
7
+ ## Install 📦
9
8
 
10
9
  ```bash
11
- npm install -g .
10
+ npm install -g @frigopedro/committer
12
11
  ```
13
12
 
14
- Or for local development:
13
+ Local development:
15
14
 
16
15
  ```bash
17
16
  npm link
18
17
  ```
19
18
 
20
- ## Usage
19
+ ## Quick start ⚡
21
20
 
22
21
  ```bash
23
22
  committer
24
23
  ```
25
24
 
26
- The tool reads your git diff, proposes a conventional commit message, then
27
- asks you to (y) commit, (n) abort, or (r) regenerate.
25
+ You will get a suggested commit message and can:
28
26
 
29
- Commit messages include a required body with a broader summary of the changes.
27
+ - `y` to commit
28
+ - `n` to abort
29
+ - `r` to regenerate
30
30
 
31
- On first run, committer creates a `.committer` config file in your home
32
- directory and walks the user through provider and model selection.
33
- Use `committer --init` to re-run onboarding and rewrite the config.
31
+ The commit message streams live as it is generated.
34
32
 
35
- ### Options
33
+ On first run, committer creates `~/.committer` and walks you through provider + model selection.
34
+ Re-run onboarding anytime with:
35
+
36
+ ```bash
37
+ committer --init
38
+ ```
39
+
40
+ ## Usage ✅
36
41
 
37
42
  ```bash
38
43
  committer --provider ollama --model llama3.1
39
44
  committer --provider openai --model gpt-4o-mini
40
45
  committer --staged
41
46
  committer --all
42
- committer --init
43
47
  ```
44
48
 
45
- If you run `committer` with `--provider ollama` and no model specified, the
46
- CLI will list your local Ollama models and prompt you to pick one.
47
-
48
- ### Environment variables
49
-
50
- - `AI_COMMIT_PROVIDER`: `claude`, `ollama`, or `openai`
51
- - `AI_COMMIT_MODEL`: override model name
52
- - `AI_COMMIT_MAX_DIFF_CHARS`: trim diff length
53
- - `ANTHROPIC_API_KEY` or `CLAUDE_API_KEY`: Claude API key
54
- - `OPENAI_API_KEY`: OpenAI API key
55
- - `AI_COMMIT_OLLAMA_HOST`: Ollama host (default `http://localhost:11434`)
56
-
57
- ### .committer config
58
-
59
- The `.committer` file is a JSON config stored at `~/.committer`.
60
-
61
- Example:
62
-
63
- ```json
64
- {
65
- "version": 1,
66
- "provider": "ollama",
67
- "model": "llama3.1",
68
- "diffMode": "auto",
69
- "maxDiffChars": 12000
70
- }
71
- ```
49
+ If you run with `--provider ollama` and no model, it will list local models and let you pick one.
72
50
 
73
- ### Commit format
51
+ ## Commit format ✍️
74
52
 
75
- Commit messages are generated in the format:
53
+ Commit messages are generated as:
76
54
 
77
55
  ```
78
56
  <type>[optional scope]: <description>
@@ -80,35 +58,51 @@ Commit messages are generated in the format:
80
58
  <body>
81
59
  ```
82
60
 
83
- The body is always present and provides a multi-sentence summary of most
84
- changed files.
61
+ The body is always present and provides a multisentence summary covering most changed files.
85
62
 
86
- ## Claude setup
63
+ ## Providers 🔌
87
64
 
88
- Set your Claude API key before running:
65
+ ### Claude
89
66
 
90
67
  ```bash
91
68
  export ANTHROPIC_API_KEY=your_key_here
92
69
  ```
93
70
 
94
- ## ChatGPT setup
95
-
96
- Set your OpenAI API key before running:
71
+ ### ChatGPT (OpenAI)
97
72
 
98
73
  ```bash
99
74
  export OPENAI_API_KEY=your_key_here
100
75
  ```
101
76
 
102
- ## Local Llama setup
103
-
104
- Install and run Ollama, then pull a model:
77
+ ### Local Llama (Ollama)
105
78
 
106
79
  ```bash
107
80
  ollama pull llama3.1
108
81
  ```
109
82
 
110
- Run the CLI with:
111
-
112
83
  ```bash
113
84
  committer --provider ollama
114
85
  ```
86
+
87
+ ## Config file 🧰
88
+
89
+ `~/.committer` is a JSON config shared across all repos.
90
+
91
+ ```json
92
+ {
93
+ "version": 1,
94
+ "provider": "ollama",
95
+ "model": "llama3.1",
96
+ "diffMode": "auto",
97
+ "maxDiffChars": 12000
98
+ }
99
+ ```
100
+
101
+ ## Environment variables 🌱
102
+
103
+ - `AI_COMMIT_PROVIDER`: `claude`, `ollama`, or `openai`
104
+ - `AI_COMMIT_MODEL`: override model name
105
+ - `AI_COMMIT_MAX_DIFF_CHARS`: trim diff length
106
+ - `ANTHROPIC_API_KEY` or `CLAUDE_API_KEY`: Claude API key
107
+ - `OPENAI_API_KEY`: OpenAI API key
108
+ - `AI_COMMIT_OLLAMA_HOST`: Ollama host (default `http://localhost:11434`)
package/index.js CHANGED
@@ -18,9 +18,8 @@ import { truncateDiff } from "./src/diff.js";
18
18
  import { generateCommitMessage } from "./src/ai.js";
19
19
  import { runOnboarding } from "./src/onboarding.js";
20
20
  import { selectOllamaModel } from "./src/ollama.js";
21
- import { writeLine } from "./src/ui.js";
21
+ import { write, writeLine } from "./src/ui.js";
22
22
  import { commitWithMessage } from "./src/commit.js";
23
- import { hasEmoji, validateCommitMessage } from "./src/validate.js";
24
23
 
25
24
  const PROVIDERS = ["claude", "ollama", "openai"];
26
25
 
@@ -169,34 +168,33 @@ async function main() {
169
168
 
170
169
  try {
171
170
  while (true) {
172
- writeLine("⏳ Loading... generating commit message.");
171
+ writeLine("⏳ Loading commit message...");
172
+ writeLine("\n✨ Suggested commit message:");
173
+ let streamed = false;
173
174
  message = await generateCommitMessage({
174
175
  provider,
175
176
  model,
176
177
  diff: trimmedDiff,
177
178
  truncated,
178
179
  host: ollamaHost,
180
+ stream: true,
181
+ onToken: (chunk) => {
182
+ streamed = true;
183
+ write(chunk);
184
+ },
179
185
  });
180
186
 
181
- if (!message) {
182
- writeLine("⚠️ AI response was invalid. Regenerating...");
183
- continue;
187
+ if (!streamed && message) {
188
+ writeLine(message);
184
189
  }
185
190
 
186
- if (hasEmoji(message)) {
187
- writeLine("⚠️ Commit message contains emoji. Regenerating...");
188
- continue;
189
- }
190
- console.log(message);
191
+ writeLine("");
191
192
 
192
- const validation = validateCommitMessage(message);
193
- if (!validation.valid) {
194
- writeLine(`⚠️ ${validation.reason} Regenerating...`);
193
+ if (!message) {
194
+ writeLine("⚠️ AI response was empty. Regenerating...");
195
195
  continue;
196
196
  }
197
197
 
198
- writeLine(`\n✨ Suggested commit message:\n${message}\n`);
199
-
200
198
  const answer = await rl.question(
201
199
  "Use (y) to commit, (n) to abort, (r) to regenerate: "
202
200
  );
package/package.json CHANGED
@@ -1,28 +1,32 @@
1
1
  {
2
- "name": "@frigopedro/committer",
3
- "version": "0.1.0",
4
- "description": "Generate conventional commit messages using AI",
5
- "type": "module",
6
- "bin": {
7
- "committer": "index.js"
8
- },
9
- "engines": {
10
- "node": ">=18"
11
- },
12
- "license": "MIT",
13
- "keywords": [
14
- "git",
15
- "commit",
16
- "conventional-commits",
17
- "ai",
18
- "cli"
19
- ],
20
- "files": [
21
- "index.js",
22
- "src",
23
- "README.md"
24
- ],
25
- "publishConfig": {
26
- "access": "public"
27
- }
2
+ "name": "@frigopedro/committer",
3
+ "version": "0.1.1",
4
+ "description": "Generate conventional commit messages using AI",
5
+ "type": "module",
6
+ "bin": {
7
+ "committer": "index.js"
8
+ },
9
+ "engines": {
10
+ "node": ">=18"
11
+ },
12
+ "license": "MIT",
13
+ "keywords": [
14
+ "git",
15
+ "commit",
16
+ "conventional-commits",
17
+ "ai",
18
+ "commit-message",
19
+ "commit-generator",
20
+ "commit-bot",
21
+ "ai-commit",
22
+ "cli"
23
+ ],
24
+ "files": [
25
+ "index.js",
26
+ "src",
27
+ "README.md"
28
+ ],
29
+ "publishConfig": {
30
+ "access": "public"
31
+ }
28
32
  }
package/src/ai.js CHANGED
@@ -1,27 +1,45 @@
1
- import { buildPrompt, parseCommitMessage } from "./prompt.js";
1
+ import { buildPrompt, buildSystemPrompt } from "./prompt.js";
2
2
  import { callClaude } from "./providers/claude.js";
3
- import { callOllama } from "./providers/ollama.js";
4
- import { callOpenAI } from "./providers/openai.js";
3
+ import { callOllama, streamOllama } from "./providers/ollama.js";
4
+ import { callOpenAI, streamOpenAI } from "./providers/openai.js";
5
5
 
6
6
  export async function generateCommitMessage({
7
- provider,
8
- model,
9
- diff,
10
- truncated,
11
- host,
7
+ provider,
8
+ model,
9
+ diff,
10
+ truncated,
11
+ host,
12
+ stream = false,
13
+ onToken,
12
14
  }) {
13
- const system =
14
- "You are a senior developer assistant that writes clear, conventional commit messages.";
15
- const user = buildPrompt(diff, { truncated });
15
+ const system = buildSystemPrompt(provider);
16
+ const user = buildPrompt(provider, diff, { truncated });
16
17
 
17
- let raw = "";
18
- if (provider === "ollama") {
19
- raw = await callOllama({ system, user, model, host });
20
- } else if (provider === "openai") {
21
- raw = await callOpenAI({ system, user, model });
22
- } else {
23
- raw = await callClaude({ system, user, model });
24
- }
18
+ let raw = "";
25
19
 
26
- return parseCommitMessage(raw);
20
+ if (stream && typeof onToken === "function") {
21
+ if (provider === "ollama") {
22
+ raw = await streamOllama({ system, user, model, host, onToken });
23
+ return raw;
24
+ }
25
+
26
+ if (provider === "openai") {
27
+ raw = await streamOpenAI({ system, user, model, onToken });
28
+ return raw;
29
+ }
30
+
31
+ if (provider === "claude") {
32
+ // Claude streaming is not enabled; fall back to non-stream.
33
+ }
34
+ }
35
+
36
+ if (provider === "ollama") {
37
+ raw = await callOllama({ system, user, model, host });
38
+ } else if (provider === "openai") {
39
+ raw = await callOpenAI({ system, user, model });
40
+ } else {
41
+ raw = await callClaude({ system, user, model });
42
+ }
43
+
44
+ return raw;
27
45
  }
package/src/claude.js ADDED
@@ -0,0 +1,39 @@
1
+ import { fetchClaudeModels } from "./providers/claude.js";
2
+ import { writeLine } from "./ui.js";
3
+ import { DEFAULT_CLAUDE_MODEL } from "./constants.js";
4
+
5
+ export async function selectClaudeModel({ rl }) {
6
+ const models = await fetchClaudeModels();
7
+ if (!models.length) {
8
+ throw new Error("No Claude models returned by API.");
9
+ }
10
+
11
+ const defaultIndex = Math.max(
12
+ 0,
13
+ models.findIndex((model) => model === DEFAULT_CLAUDE_MODEL)
14
+ );
15
+
16
+ writeLine("\n🧠 Available Claude models:");
17
+ models.forEach((model, index) => {
18
+ const marker = index === defaultIndex ? " (default)" : "";
19
+ writeLine(`${index + 1}) ${model}${marker}`);
20
+ });
21
+
22
+ const answer = await rl.question(`Select model [${defaultIndex + 1}]: `);
23
+ const trimmed = answer.trim();
24
+ if (!trimmed) {
25
+ return models[defaultIndex];
26
+ }
27
+
28
+ const asNumber = Number.parseInt(trimmed, 10);
29
+ if (!Number.isNaN(asNumber) && asNumber >= 1 && asNumber <= models.length) {
30
+ return models[asNumber - 1];
31
+ }
32
+
33
+ if (models.includes(trimmed)) {
34
+ return trimmed;
35
+ }
36
+
37
+ writeLine("Invalid selection. Using default model.");
38
+ return models[defaultIndex];
39
+ }
package/src/constants.js CHANGED
@@ -1,19 +1,19 @@
1
1
  export const COMMIT_TYPES = [
2
- "feat",
3
- "fix",
4
- "docs",
5
- "style",
6
- "refactor",
7
- "perf",
8
- "test",
9
- "build",
10
- "ci",
11
- "chore",
12
- "revert",
2
+ "feat",
3
+ "fix",
4
+ "docs",
5
+ "style",
6
+ "refactor",
7
+ "perf",
8
+ "test",
9
+ "build",
10
+ "ci",
11
+ "chore",
12
+ "revert",
13
13
  ];
14
14
 
15
- export const DEFAULT_MAX_DIFF_CHARS = 12000;
16
- export const DEFAULT_CLAUDE_MODEL = "claude-3-5-sonnet-latest";
15
+ export const DEFAULT_MAX_DIFF_CHARS = 50000;
16
+ export const DEFAULT_CLAUDE_MODEL = "claude-3-5-haiku-20241022";
17
17
  export const DEFAULT_OLLAMA_MODEL = "llama3.1";
18
18
  export const DEFAULT_OPENAI_MODEL = "gpt-4o-mini";
19
19
  export const DEFAULT_PROVIDER = "claude";
package/src/onboarding.js CHANGED
@@ -7,6 +7,7 @@ import {
7
7
  } from "./constants.js";
8
8
  import { promptNumber, promptSelect, writeLine } from "./ui.js";
9
9
  import { selectOllamaModel } from "./ollama.js";
10
+ import { selectClaudeModel } from "./claude.js";
10
11
  import { writeConfig } from "./config.js";
11
12
 
12
13
  export async function runOnboarding({ rl, configPath, ollamaHost }) {
@@ -38,6 +39,16 @@ export async function runOnboarding({ rl, configPath, ollamaHost }) {
38
39
  model = DEFAULT_OPENAI_MODEL;
39
40
  }
40
41
 
42
+ if (provider === "claude") {
43
+ try {
44
+ model = await selectClaudeModel({ rl });
45
+ } catch (error) {
46
+ writeLine(`⚠️ Claude model selection failed: ${error.message}`);
47
+ writeLine(`↩️ Falling back to ${DEFAULT_CLAUDE_MODEL}.`);
48
+ model = DEFAULT_CLAUDE_MODEL;
49
+ }
50
+ }
51
+
41
52
  const diffMode = await promptSelect({
42
53
  rl,
43
54
  question: "Which diff should committer use?",
package/src/prompt.js CHANGED
@@ -1,66 +1,29 @@
1
1
  import { COMMIT_TYPES } from "./constants.js";
2
2
 
3
- export function buildPrompt(diff, { truncated }) {
4
- const types = COMMIT_TYPES.join(", ");
3
+ function buildCommonTypes() {
4
+ return COMMIT_TYPES.join(", ");
5
+ }
5
6
 
7
+ function buildOpenAIPrompt(diff, { truncated, types }) {
6
8
  return [
7
- "You are an expert software engineer writing a professional git commit message.",
8
- "Analyze the diff and produce a precise, high-signal commit message.",
9
- "",
10
- "Return valid JSON only.",
11
- "Do not wrap the response in markdown.",
12
- "Do not include explanations, commentary, or extra keys.",
13
- 'Use exactly this schema: {"subject":"...","body":"..."}',
14
- "",
15
- "Rules for the subject:",
16
- "1. Follow Conventional Commits.",
17
- "2. Format: <type>(optional-scope)!: <description>",
18
- `3. Allowed types: ${types}.`,
19
- "4. Use ! only if the diff clearly introduces a breaking change.",
20
- "5. Use imperative mood.",
21
- "6. Use lower-case for the description.",
22
- "7. Do not end the subject with a period.",
23
- "8. Keep the subject concise, specific, and under 72 characters.",
24
- "9. Prefer describing the main user-visible or architectural change.",
25
- "10. Avoid vague subjects like 'update code', 'fix stuff', or 'changes'.",
26
- "",
27
- "Rules for the body:",
28
- "1. Body is required.",
29
- "2. Write 2 to 5 short sentences.",
30
- "3. Summarize the intent and the most important changes.",
31
- "4. Mention notable implementation details only when they help explain the change.",
32
- "5. Cover the main files or areas affected, but do not list every file mechanically.",
33
- "6. Focus on why and what changed more than line-level details.",
34
- "7. Do not repeat the subject verbatim.",
35
- "8. Do not include a footer, ticket number, or breaking-change footer.",
36
- "9. Do not invent details not supported by the diff.",
37
- "",
38
- "Decision guidance:",
39
- "- Use feat for new behavior or capability.",
40
- "- Use fix for bug fixes or correctness issues.",
41
- "- Use refactor for internal restructuring without behavior change.",
42
- "- Use perf for performance improvements.",
43
- "- Use docs for documentation-only changes.",
44
- "- Use test for test-only changes.",
45
- "- Use build for build system, dependencies, or packaging changes.",
46
- "- Use ci for CI/CD changes.",
47
- "- Use chore for maintenance tasks that do not fit other types.",
48
- "- Use style for formatting-only changes with no logic change.",
49
- "",
50
- "Scoping guidance:",
51
- "- Add a scope only when the affected area is clear and meaningful.",
52
- "- Use a short scope such as api, auth, ui, cli, db, config, or parser.",
53
- "- Omit the scope if there is no obvious single area.",
54
- "",
55
- "Diff interpretation guidance:",
56
- "- Infer the primary purpose of the change from the overall diff, not isolated lines.",
57
- "- Ignore unimportant noise such as formatting churn unless formatting is the main change.",
58
- "- If multiple changes exist, prioritize the dominant one.",
59
- truncated
60
- ? "- The diff was truncated, so base the message on the visible changes only and avoid overclaiming."
61
- : "",
62
- "",
63
- "Now analyze this diff and return only the JSON object.",
9
+ "Write a professional git commit message from this diff.",
10
+ "Return only the commit message.",
11
+ "No preface, no commentary, no markdown.",
12
+ "",
13
+ "Format:",
14
+ "<type>(optional-scope)!: <description>",
15
+ "",
16
+ "<body paragraph>",
17
+ "",
18
+ `Allowed types: ${types}.`,
19
+ "Use conventional commits.",
20
+ "Use imperative mood.",
21
+ "Use lower-case description.",
22
+ "No trailing period in the subject.",
23
+ "Subject under 72 characters.",
24
+ "Body required, 2 to 4 sentences.",
25
+ "No footer.",
26
+ truncated ? "The diff is truncated. Only describe visible changes." : "",
64
27
  "",
65
28
  "Diff:",
66
29
  diff,
@@ -69,38 +32,107 @@ export function buildPrompt(diff, { truncated }) {
69
32
  .join("\n");
70
33
  }
71
34
 
72
- export function cleanCommitMessage(message) {
73
- let cleaned = message.trim();
74
- cleaned = cleaned.replace(/^```[a-z]*\n?/i, "").replace(/```$/, "");
75
- cleaned = cleaned.replace(/^commit message:\s*/i, "");
76
- cleaned = cleaned.replace(/^"|"$/g, "");
77
- cleaned = cleaned.replace(/^'|'$/g, "");
78
- cleaned = cleaned.replace(/\r\n/g, "\n");
79
- return cleaned.trim();
35
+ function buildClaudePrompt(diff, { truncated, types }) {
36
+ return [
37
+ "You are writing a professional git commit message for a maintainer.",
38
+ "Return only the commit message.",
39
+ "",
40
+ "Format:",
41
+ "<type>(optional-scope)!: <description>",
42
+ "",
43
+ "<body paragraph>",
44
+ "",
45
+ `Allowed types: ${types}.`,
46
+ "Use conventional commits.",
47
+ "Use imperative mood.",
48
+ "Use lower-case description.",
49
+ "No trailing period in the subject.",
50
+ "Subject under 72 characters.",
51
+ "Body required, 2 to 4 sentences.",
52
+ "No footer.",
53
+ "Focus on the main intent of the change.",
54
+ truncated ? "The diff is truncated. Only describe visible changes." : "",
55
+ "",
56
+ "Diff:",
57
+ diff,
58
+ ]
59
+ .filter(Boolean)
60
+ .join("\n");
80
61
  }
81
62
 
82
- export function parseCommitMessage(raw) {
83
- const cleaned = cleanCommitMessage(raw);
84
- try {
85
- const parsed = JSON.parse(cleaned);
86
- const subject = parsed?.subject?.trim();
87
- const body = parsed?.body?.trim();
88
- if (!subject || !body) return "";
89
- return `${subject}\n\n${body}`.trim();
90
- } catch {
91
- const start = cleaned.indexOf("{");
92
- const end = cleaned.lastIndexOf("}");
93
- if (start >= 0 && end > start) {
94
- try {
95
- const parsed = JSON.parse(cleaned.slice(start, end + 1));
96
- const subject = parsed?.subject?.trim();
97
- const body = parsed?.body?.trim();
98
- if (!subject || !body) return "";
99
- return `${subject}\n\n${body}`.trim();
100
- } catch {
101
- return "";
102
- }
103
- }
104
- return "";
63
+ function buildLlamaPrompt(diff, { truncated, types }) {
64
+ return [
65
+ "Write a git commit message.",
66
+ "",
67
+ "ONLY output the commit message.",
68
+ "NO explanations.",
69
+ "NO summaries.",
70
+ "NO bullet points.",
71
+ "NO numbered lists.",
72
+ "NO markdown.",
73
+ "NO extra text.",
74
+ "",
75
+ "Format EXACTLY:",
76
+ "<type>(optional-scope)!: <description>",
77
+ "",
78
+ "<body paragraph>",
79
+ "",
80
+ `Allowed types: ${types}.`,
81
+ "Use conventional commits.",
82
+ "Subject: imperative, lower-case, no period, max 72 chars.",
83
+ "Body: 2-3 sentences.",
84
+ "No footer.",
85
+ "",
86
+ truncated ? "Diff is truncated. Do not guess missing parts." : "",
87
+ "",
88
+ "BAD OUTPUT:",
89
+ "The provided code appears to be...",
90
+ "Here are some observations:",
91
+ "1. Updated function...",
92
+ "",
93
+ "GOOD OUTPUT:",
94
+ "feat(api): add structured json response handling",
95
+ "",
96
+ "Update API providers to support structured JSON responses and improve",
97
+ "response parsing. Adjust request configuration to ensure consistent",
98
+ "output formatting across providers.",
99
+ "",
100
+ "Now output ONLY the commit message.",
101
+ "",
102
+ "Diff:",
103
+ diff,
104
+ ]
105
+ .filter(Boolean)
106
+ .join("\n");
107
+ }
108
+
109
+ export function buildPrompt(provider, diff, { truncated }) {
110
+ const types = buildCommonTypes();
111
+ const p = (provider ?? "").toLowerCase();
112
+
113
+ if (p === "claude") return buildClaudePrompt(diff, { truncated, types });
114
+ if (p === "openai") return buildOpenAIPrompt(diff, { truncated, types });
115
+ if (p === "ollama" || p.includes("llama")) {
116
+ return buildLlamaPrompt(diff, { truncated, types });
105
117
  }
118
+
119
+ return buildOpenAIPrompt(diff, { truncated, types });
120
+ }
121
+
122
+ export function buildSystemPrompt(provider) {
123
+ const base =
124
+ "You are a senior developer assistant that writes clear, conventional commit messages.";
125
+ const p = (provider ?? "").toLowerCase();
126
+
127
+ if (p === "ollama" || p.includes("llama")) {
128
+ return [
129
+ base,
130
+ "Follow instructions strictly.",
131
+ "Return only the commit message.",
132
+ "No markdown, no bullet lists, no numbering.",
133
+ "Use exactly one blank line between subject and body.",
134
+ ].join("\n");
135
+ }
136
+
137
+ return base;
106
138
  }
@@ -1,34 +1,64 @@
1
1
  export async function callClaude({ system, user, model }) {
2
- const apiKey =
3
- process.env.ANTHROPIC_API_KEY || process.env.CLAUDE_API_KEY || "";
4
- if (!apiKey) {
5
- throw new Error(
6
- "Missing ANTHROPIC_API_KEY or CLAUDE_API_KEY for Claude provider."
7
- );
8
- }
2
+ const apiKey =
3
+ process.env.ANTHROPIC_API_KEY || process.env.CLAUDE_API_KEY || "";
4
+ if (!apiKey) {
5
+ throw new Error(
6
+ "Missing ANTHROPIC_API_KEY or CLAUDE_API_KEY for Claude provider."
7
+ );
8
+ }
9
9
 
10
- const response = await fetch("https://api.anthropic.com/v1/messages", {
11
- method: "POST",
12
- headers: {
13
- "content-type": "application/json",
14
- "x-api-key": apiKey,
15
- "anthropic-version": "2023-06-01",
16
- },
17
- body: JSON.stringify({
18
- model,
19
- max_tokens: 128,
20
- temperature: 0.2,
21
- system,
22
- messages: [{ role: "user", content: user }],
23
- }),
24
- });
10
+ const response = await fetch("https://api.anthropic.com/v1/messages", {
11
+ method: "POST",
12
+ headers: {
13
+ "content-type": "application/json",
14
+ "x-api-key": apiKey,
15
+ "anthropic-version": "2023-06-01",
16
+ },
17
+ body: JSON.stringify({
18
+ model: "claude-sonnet-4-6",
19
+ max_tokens: 256,
20
+ temperature: 0.2,
21
+ system,
22
+ messages: [{ role: "user", content: user }],
23
+ }),
24
+ });
25
25
 
26
- if (!response.ok) {
27
- const text = await response.text();
28
- throw new Error(`Claude API error: ${response.status} ${text}`);
29
- }
26
+ if (!response.ok) {
27
+ const text = await response.text();
28
+ throw new Error(`Claude API error: ${response.status} ${text}`);
29
+ }
30
30
 
31
- const data = await response.json();
32
- const content = data?.content?.[0]?.text ?? "";
33
- return content;
31
+ const data = await response.json();
32
+ const content = data?.content?.[0]?.text ?? "";
33
+ return content;
34
+ }
35
+
36
+ export async function fetchClaudeModels() {
37
+ const apiKey =
38
+ process.env.ANTHROPIC_API_KEY || process.env.CLAUDE_API_KEY || "";
39
+ if (!apiKey) {
40
+ throw new Error(
41
+ "Missing ANTHROPIC_API_KEY or CLAUDE_API_KEY for Claude provider."
42
+ );
43
+ }
44
+
45
+ const response = await fetch("https://api.anthropic.com/v1/models", {
46
+ method: "GET",
47
+ headers: {
48
+ "content-type": "application/json",
49
+ "x-api-key": apiKey,
50
+ "anthropic-version": "2023-06-01",
51
+ },
52
+ });
53
+
54
+ if (!response.ok) {
55
+ const text = await response.text();
56
+ throw new Error(`Claude API error: ${response.status} ${text}`);
57
+ }
58
+
59
+ const data = await response.json();
60
+ const models = (data?.data || [])
61
+ .map((model) => model?.id)
62
+ .filter(Boolean);
63
+ return Array.from(new Set(models)).sort();
34
64
  }
@@ -1,49 +1,108 @@
1
1
  export async function callOllama({ system, user, model, host }) {
2
- const response = await fetch(`${host}/api/chat`, {
3
- method: "POST",
4
- headers: {
5
- "content-type": "application/json",
6
- },
7
- body: JSON.stringify({
8
- model,
9
- format: "json",
10
- stream: false,
11
- options: {
12
- temperature: 0.2,
13
- num_predict: 128,
14
- },
15
- messages: [
16
- { role: "system", content: system },
17
- { role: "user", content: user },
18
- ],
19
- }),
20
- });
21
-
22
- if (!response.ok) {
23
- const text = await response.text();
24
- throw new Error(`Ollama API error: ${response.status} ${text}`);
25
- }
26
-
27
- const data = await response.json();
28
- return data?.message?.content ?? data?.response ?? "";
2
+ const response = await fetch(`${host}/api/chat`, {
3
+ method: "POST",
4
+ headers: {
5
+ "content-type": "application/json",
6
+ },
7
+ body: JSON.stringify({
8
+ model,
9
+ stream: false,
10
+ options: {
11
+ temperature: 0.1,
12
+ num_predict: 128,
13
+ },
14
+ messages: [
15
+ { role: "system", content: system },
16
+ { role: "user", content: user },
17
+ ],
18
+ }),
19
+ });
20
+
21
+ if (!response.ok) {
22
+ const text = await response.text();
23
+ throw new Error(`Ollama API error: ${response.status} ${text}`);
24
+ }
25
+
26
+ const data = await response.json();
27
+ return data?.message?.content ?? data?.response ?? "";
28
+ }
29
+
30
+ export async function streamOllama({ system, user, model, host, onToken }) {
31
+ const response = await fetch(`${host}/api/chat`, {
32
+ method: "POST",
33
+ headers: {
34
+ "content-type": "application/json",
35
+ },
36
+ body: JSON.stringify({
37
+ model,
38
+ stream: true,
39
+ options: {
40
+ temperature: 0.1,
41
+ num_predict: 128,
42
+ },
43
+ messages: [
44
+ { role: "system", content: system },
45
+ { role: "user", content: user },
46
+ ],
47
+ }),
48
+ });
49
+
50
+ if (!response.ok) {
51
+ const text = await response.text();
52
+ throw new Error(`Ollama API error: ${response.status} ${text}`);
53
+ }
54
+
55
+ if (!response.body) {
56
+ throw new Error("Ollama API error: missing response body.");
57
+ }
58
+
59
+ const reader = response.body.getReader();
60
+ const decoder = new TextDecoder();
61
+ let buffer = "";
62
+ let full = "";
63
+
64
+ while (true) {
65
+ const { value, done } = await reader.read();
66
+ if (done) break;
67
+ buffer += decoder.decode(value, { stream: true });
68
+ const lines = buffer.split("\n");
69
+ buffer = lines.pop() ?? "";
70
+
71
+ for (const line of lines) {
72
+ const trimmed = line.trim();
73
+ if (!trimmed) continue;
74
+ try {
75
+ const payload = JSON.parse(trimmed);
76
+ const content = payload?.message?.content ?? payload?.response ?? "";
77
+ if (content) {
78
+ full += content;
79
+ onToken(content);
80
+ }
81
+ } catch {
82
+ continue;
83
+ }
84
+ }
85
+ }
86
+
87
+ return full;
29
88
  }
30
89
 
31
90
  export async function fetchOllamaModels(host) {
32
- const response = await fetch(`${host}/api/tags`, {
33
- method: "GET",
34
- headers: {
35
- "content-type": "application/json",
36
- },
37
- });
38
-
39
- if (!response.ok) {
40
- const text = await response.text();
41
- throw new Error(`Ollama API error: ${response.status} ${text}`);
42
- }
43
-
44
- const data = await response.json();
45
- const models = (data?.models || [])
46
- .map((model) => model?.name)
47
- .filter(Boolean);
48
- return Array.from(new Set(models)).sort();
91
+ const response = await fetch(`${host}/api/tags`, {
92
+ method: "GET",
93
+ headers: {
94
+ "content-type": "application/json",
95
+ },
96
+ });
97
+
98
+ if (!response.ok) {
99
+ const text = await response.text();
100
+ throw new Error(`Ollama API error: ${response.status} ${text}`);
101
+ }
102
+
103
+ const data = await response.json();
104
+ const models = (data?.models || [])
105
+ .map((model) => model?.name)
106
+ .filter(Boolean);
107
+ return Array.from(new Set(models)).sort();
49
108
  }
@@ -1,33 +1,101 @@
1
1
  export async function callOpenAI({ system, user, model }) {
2
- const apiKey = process.env.OPENAI_API_KEY || "";
3
- if (!apiKey) {
4
- throw new Error("Missing OPENAI_API_KEY for OpenAI provider.");
5
- }
6
-
7
- const response = await fetch("https://api.openai.com/v1/chat/completions", {
8
- method: "POST",
9
- headers: {
10
- "content-type": "application/json",
11
- authorization: `Bearer ${apiKey}`,
12
- },
13
- body: JSON.stringify({
14
- model,
15
- temperature: 0.2,
16
- max_tokens: 128,
17
- response_format: { type: "json_object" },
18
- messages: [
19
- { role: "system", content: system },
20
- { role: "user", content: user },
21
- ],
22
- }),
23
- });
24
-
25
- if (!response.ok) {
26
- const text = await response.text();
27
- throw new Error(`OpenAI API error: ${response.status} ${text}`);
28
- }
29
-
30
- const data = await response.json();
31
- const content = data?.choices?.[0]?.message?.content ?? "";
32
- return content;
2
+ const apiKey = process.env.OPENAI_API_KEY || "";
3
+ if (!apiKey) {
4
+ throw new Error("Missing OPENAI_API_KEY for OpenAI provider.");
5
+ }
6
+
7
+ const response = await fetch("https://api.openai.com/v1/chat/completions", {
8
+ method: "POST",
9
+ headers: {
10
+ "content-type": "application/json",
11
+ authorization: `Bearer ${apiKey}`,
12
+ },
13
+ body: JSON.stringify({
14
+ model,
15
+ temperature: 0.2,
16
+ max_tokens: 256,
17
+ messages: [
18
+ { role: "system", content: system },
19
+ { role: "user", content: user },
20
+ ],
21
+ }),
22
+ });
23
+
24
+ if (!response.ok) {
25
+ const text = await response.text();
26
+ throw new Error(`OpenAI API error: ${response.status} ${text}`);
27
+ }
28
+
29
+ const data = await response.json();
30
+ const content = data?.choices?.[0]?.message?.content ?? "";
31
+ return content;
32
+ }
33
+
34
+ export async function streamOpenAI({ system, user, model, onToken }) {
35
+ const apiKey = process.env.OPENAI_API_KEY || "";
36
+ if (!apiKey) {
37
+ throw new Error("Missing OPENAI_API_KEY for OpenAI provider.");
38
+ }
39
+
40
+ const response = await fetch("https://api.openai.com/v1/chat/completions", {
41
+ method: "POST",
42
+ headers: {
43
+ "content-type": "application/json",
44
+ authorization: `Bearer ${apiKey}`,
45
+ },
46
+ body: JSON.stringify({
47
+ model,
48
+ temperature: 0.2,
49
+ max_tokens: 256,
50
+ stream: true,
51
+ messages: [
52
+ { role: "system", content: system },
53
+ { role: "user", content: user },
54
+ ],
55
+ }),
56
+ });
57
+
58
+ if (!response.ok) {
59
+ const text = await response.text();
60
+ throw new Error(`OpenAI API error: ${response.status} ${text}`);
61
+ }
62
+
63
+ if (!response.body) {
64
+ throw new Error("OpenAI API error: missing response body.");
65
+ }
66
+
67
+ const reader = response.body.getReader();
68
+ const decoder = new TextDecoder();
69
+ let buffer = "";
70
+ let full = "";
71
+
72
+ while (true) {
73
+ const { value, done } = await reader.read();
74
+ if (done) break;
75
+ buffer += decoder.decode(value, { stream: true });
76
+ const lines = buffer.split("\n");
77
+ buffer = lines.pop() ?? "";
78
+
79
+ for (const line of lines) {
80
+ const trimmed = line.trim();
81
+ if (!trimmed) continue;
82
+ if (trimmed === "data: [DONE]") {
83
+ break;
84
+ }
85
+ if (!trimmed.startsWith("data: ")) continue;
86
+
87
+ try {
88
+ const payload = JSON.parse(trimmed.slice(6));
89
+ const content = payload?.choices?.[0]?.delta?.content ?? "";
90
+ if (content) {
91
+ full += content;
92
+ onToken(content);
93
+ }
94
+ } catch {
95
+ continue;
96
+ }
97
+ }
98
+ }
99
+
100
+ return full;
33
101
  }