@frigopedro/committer 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,114 @@
1
+ # AI Git Committer
2
+
3
+ I was tired of typing out commit messages, so I wrote this tool.
4
+ I know bunch of other tools exist, but I had hard times with them.
5
+
6
+ Its a personal tool, dont expect to be good because its not :/
7
+
8
+ ## Install
9
+
10
+ ```bash
11
+ npm install -g .
12
+ ```
13
+
14
+ Or for local development:
15
+
16
+ ```bash
17
+ npm link
18
+ ```
19
+
20
+ ## Usage
21
+
22
+ ```bash
23
+ committer
24
+ ```
25
+
26
+ The tool reads your git diff, proposes a conventional commit message, then
27
+ asks you to (y) commit, (n) abort, or (r) regenerate.
28
+
29
+ Commit messages include a required body with a broader summary of the changes.
30
+
31
+ On first run, committer creates a `.committer` config file in your home
32
+ directory and walks the user through provider and model selection.
33
+ Use `committer --init` to re-run onboarding and rewrite the config.
34
+
35
+ ### Options
36
+
37
+ ```bash
38
+ committer --provider ollama --model llama3.1
39
+ committer --provider openai --model gpt-4o-mini
40
+ committer --staged
41
+ committer --all
42
+ committer --init
43
+ ```
44
+
45
+ If you run `committer` with `--provider ollama` and no model specified, the
46
+ CLI will list your local Ollama models and prompt you to pick one.
47
+
48
+ ### Environment variables
49
+
50
+ - `AI_COMMIT_PROVIDER`: `claude`, `ollama`, or `openai`
51
+ - `AI_COMMIT_MODEL`: override model name
52
+ - `AI_COMMIT_MAX_DIFF_CHARS`: trim diff length
53
+ - `ANTHROPIC_API_KEY` or `CLAUDE_API_KEY`: Claude API key
54
+ - `OPENAI_API_KEY`: OpenAI API key
55
+ - `AI_COMMIT_OLLAMA_HOST`: Ollama host (default `http://localhost:11434`)
56
+
57
+ ### .committer config
58
+
59
+ The `.committer` file is a JSON config stored at `~/.committer`.
60
+
61
+ Example:
62
+
63
+ ```json
64
+ {
65
+ "version": 1,
66
+ "provider": "ollama",
67
+ "model": "llama3.1",
68
+ "diffMode": "auto",
69
+ "maxDiffChars": 12000
70
+ }
71
+ ```
72
+
73
+ ### Commit format
74
+
75
+ Commit messages are generated in the format:
76
+
77
+ ```
78
+ <type>[optional scope]: <description>
79
+
80
+ <body>
81
+ ```
82
+
83
+ The body is always present and provides a multi-sentence summary of most
84
+ changed files.
85
+
86
+ ## Claude setup
87
+
88
+ Set your Claude API key before running:
89
+
90
+ ```bash
91
+ export ANTHROPIC_API_KEY=your_key_here
92
+ ```
93
+
94
+ ## ChatGPT setup
95
+
96
+ Set your OpenAI API key before running:
97
+
98
+ ```bash
99
+ export OPENAI_API_KEY=your_key_here
100
+ ```
101
+
102
+ ## Local Llama setup
103
+
104
+ Install and run Ollama, then pull a model:
105
+
106
+ ```bash
107
+ ollama pull llama3.1
108
+ ```
109
+
110
+ Run the CLI with:
111
+
112
+ ```bash
113
+ committer --provider ollama
114
+ ```
package/index.js ADDED
@@ -0,0 +1,231 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { createInterface } from "node:readline/promises";
4
+ import { stdin as input, stdout as output } from "node:process";
5
+
6
+ import { parseArgs } from "./src/args.js";
7
+ import {
8
+ DEFAULT_CLAUDE_MODEL,
9
+ DEFAULT_MAX_DIFF_CHARS,
10
+ DEFAULT_OLLAMA_MODEL,
11
+ DEFAULT_OPENAI_MODEL,
12
+ DEFAULT_PROVIDER,
13
+ EXIT_USER_ABORT,
14
+ } from "./src/constants.js";
15
+ import { getConfigPath, readConfig } from "./src/config.js";
16
+ import { ensureGitRepo, getDiff } from "./src/git.js";
17
+ import { truncateDiff } from "./src/diff.js";
18
+ import { generateCommitMessage } from "./src/ai.js";
19
+ import { runOnboarding } from "./src/onboarding.js";
20
+ import { selectOllamaModel } from "./src/ollama.js";
21
+ import { writeLine } from "./src/ui.js";
22
+ import { commitWithMessage } from "./src/commit.js";
23
+ import { hasEmoji, validateCommitMessage } from "./src/validate.js";
24
+
25
+ const PROVIDERS = ["claude", "ollama", "openai"];
26
+
27
+ function normalizeProvider(provider) {
28
+ if (provider === "chatgpt") return "openai";
29
+ return provider;
30
+ }
31
+
32
+ function printHelp() {
33
+ const lines = [
34
+ "committer - generate conventional commit messages with AI",
35
+ "",
36
+ "Usage:",
37
+ " committer [--provider claude|ollama|openai] [--model name] [--staged] [--all]",
38
+ "",
39
+ "Options:",
40
+ " --provider AI provider (default: claude)",
41
+ " --model Override model name",
42
+ " --staged Use staged diff only",
43
+ " --all Combine staged + unstaged diff",
44
+ " --max-diff-chars Trim diff to this many chars (default: 12000)",
45
+ " --init Run onboarding and write ~/.committer",
46
+ " --help Show this help",
47
+ "",
48
+ "Environment:",
49
+ " AI_COMMIT_PROVIDER claude | ollama | openai",
50
+ " AI_COMMIT_MODEL model override",
51
+ " AI_COMMIT_MAX_DIFF_CHARS trim diff length",
52
+ " ANTHROPIC_API_KEY Claude API key",
53
+ " CLAUDE_API_KEY Claude API key (alias)",
54
+ " OPENAI_API_KEY OpenAI API key",
55
+ " AI_COMMIT_OLLAMA_HOST Ollama host (default: http://localhost:11434)",
56
+ ];
57
+
58
+ writeLine(lines.join("\n"));
59
+ }
60
+
61
+ function resolveModel({ provider, modelFromArgs, storedConfig }) {
62
+ if (modelFromArgs) return modelFromArgs;
63
+
64
+ if (storedConfig?.provider === provider && storedConfig?.model) {
65
+ return storedConfig.model;
66
+ }
67
+
68
+ if (provider === "ollama") return DEFAULT_OLLAMA_MODEL;
69
+ if (provider === "openai") return DEFAULT_OPENAI_MODEL;
70
+ return DEFAULT_CLAUDE_MODEL;
71
+ }
72
+
73
+ async function main() {
74
+ const args = parseArgs(process.argv.slice(2));
75
+ if (args.has("help")) {
76
+ printHelp();
77
+ process.exit(0);
78
+ }
79
+
80
+ let hasRepo = true;
81
+ try {
82
+ ensureGitRepo();
83
+ } catch (error) {
84
+ hasRepo = false;
85
+ if (!args.has("init")) {
86
+ writeLine(`❌ ${error.message}`);
87
+ process.exit(1);
88
+ }
89
+ }
90
+
91
+ const configPath = getConfigPath();
92
+ const ollamaHost = process.env.AI_COMMIT_OLLAMA_HOST || "http://localhost:11434";
93
+ const rl = createInterface({ input, output });
94
+
95
+ let storedConfig = null;
96
+ try {
97
+ storedConfig = await readConfig(configPath);
98
+ } catch (error) {
99
+ writeLine(`⚠️ ${error.message}`);
100
+ }
101
+
102
+ if (!storedConfig || args.has("init")) {
103
+ storedConfig = await runOnboarding({ rl, configPath, ollamaHost });
104
+ if (!hasRepo) {
105
+ rl.close();
106
+ process.exit(0);
107
+ }
108
+ }
109
+
110
+ const providerInput =
111
+ args.get("provider") ||
112
+ process.env.AI_COMMIT_PROVIDER ||
113
+ storedConfig?.provider ||
114
+ DEFAULT_PROVIDER;
115
+ const provider = normalizeProvider(providerInput);
116
+
117
+ if (!PROVIDERS.includes(provider)) {
118
+ writeLine("❌ Provider must be claude, openai, or ollama.");
119
+ process.exit(1);
120
+ }
121
+
122
+ const modelFromArgs = args.get("model") || process.env.AI_COMMIT_MODEL || null;
123
+ let model = resolveModel({
124
+ provider,
125
+ modelFromArgs,
126
+ storedConfig,
127
+ });
128
+
129
+ if (provider === "ollama" && !modelFromArgs) {
130
+ const configMatches = storedConfig?.provider === "ollama" && storedConfig?.model;
131
+ if (!configMatches) {
132
+ try {
133
+ model = await selectOllamaModel({ host: ollamaHost, rl });
134
+ } catch (error) {
135
+ writeLine(`⚠️ Ollama model selection failed: ${error.message}`);
136
+ writeLine(`↩️ Falling back to ${DEFAULT_OLLAMA_MODEL}.`);
137
+ model = DEFAULT_OLLAMA_MODEL;
138
+ }
139
+ }
140
+ }
141
+
142
+ let maxDiffChars = Number.parseInt(
143
+ args.get("max-diff-chars") ||
144
+ process.env.AI_COMMIT_MAX_DIFF_CHARS ||
145
+ storedConfig?.maxDiffChars ||
146
+ DEFAULT_MAX_DIFF_CHARS,
147
+ 10
148
+ );
149
+ if (Number.isNaN(maxDiffChars) || maxDiffChars <= 0) {
150
+ maxDiffChars = DEFAULT_MAX_DIFF_CHARS;
151
+ }
152
+
153
+ const diffModeFromArgs = args.get("staged")
154
+ ? "staged"
155
+ : args.get("all")
156
+ ? "all"
157
+ : null;
158
+ const diffMode = diffModeFromArgs || storedConfig?.diffMode || "auto";
159
+
160
+ const diff = getDiff(diffMode);
161
+ if (!diff.trim()) {
162
+ writeLine("🟡 No changes detected in git diff.");
163
+ rl.close();
164
+ process.exit(0);
165
+ }
166
+
167
+ const { diff: trimmedDiff, truncated } = truncateDiff(diff, maxDiffChars);
168
+ let message = "";
169
+
170
+ try {
171
+ while (true) {
172
+ writeLine("⏳ Loading... generating commit message.");
173
+ message = await generateCommitMessage({
174
+ provider,
175
+ model,
176
+ diff: trimmedDiff,
177
+ truncated,
178
+ host: ollamaHost,
179
+ });
180
+
181
+ if (!message) {
182
+ writeLine("⚠️ AI response was invalid. Regenerating...");
183
+ continue;
184
+ }
185
+
186
+ if (hasEmoji(message)) {
187
+ writeLine("⚠️ Commit message contains emoji. Regenerating...");
188
+ continue;
189
+ }
190
+ console.log(message);
191
+
192
+ const validation = validateCommitMessage(message);
193
+ if (!validation.valid) {
194
+ writeLine(`⚠️ ${validation.reason} Regenerating...`);
195
+ continue;
196
+ }
197
+
198
+ writeLine(`\n✨ Suggested commit message:\n${message}\n`);
199
+
200
+ const answer = await rl.question(
201
+ "Use (y) to commit, (n) to abort, (r) to regenerate: "
202
+ );
203
+ const choice = answer.trim().toLowerCase();
204
+
205
+ if (choice === "y") {
206
+ writeLine("✅ Committing...");
207
+ const status = await commitWithMessage(message);
208
+ process.exit(status);
209
+ }
210
+
211
+ if (choice === "n") {
212
+ writeLine("🛑 Commit aborted.");
213
+ process.exit(EXIT_USER_ABORT);
214
+ }
215
+
216
+ if (choice === "r") {
217
+ writeLine("🔁 Regenerating commit message...");
218
+ continue;
219
+ }
220
+
221
+ writeLine("Please enter y, n, or r.");
222
+ }
223
+ } catch (error) {
224
+ writeLine(`❌ Error: ${error.message}`);
225
+ process.exit(1);
226
+ } finally {
227
+ rl.close();
228
+ }
229
+ }
230
+
231
+ main();
package/package.json ADDED
@@ -0,0 +1,28 @@
1
+ {
2
+ "name": "@frigopedro/committer",
3
+ "version": "0.1.0",
4
+ "description": "Generate conventional commit messages using AI",
5
+ "type": "module",
6
+ "bin": {
7
+ "committer": "index.js"
8
+ },
9
+ "engines": {
10
+ "node": ">=18"
11
+ },
12
+ "license": "MIT",
13
+ "keywords": [
14
+ "git",
15
+ "commit",
16
+ "conventional-commits",
17
+ "ai",
18
+ "cli"
19
+ ],
20
+ "files": [
21
+ "index.js",
22
+ "src",
23
+ "README.md"
24
+ ],
25
+ "publishConfig": {
26
+ "access": "public"
27
+ }
28
+ }
package/src/ai.js ADDED
@@ -0,0 +1,27 @@
1
+ import { buildPrompt, parseCommitMessage } from "./prompt.js";
2
+ import { callClaude } from "./providers/claude.js";
3
+ import { callOllama } from "./providers/ollama.js";
4
+ import { callOpenAI } from "./providers/openai.js";
5
+
6
+ export async function generateCommitMessage({
7
+ provider,
8
+ model,
9
+ diff,
10
+ truncated,
11
+ host,
12
+ }) {
13
+ const system =
14
+ "You are a senior developer assistant that writes clear, conventional commit messages.";
15
+ const user = buildPrompt(diff, { truncated });
16
+
17
+ let raw = "";
18
+ if (provider === "ollama") {
19
+ raw = await callOllama({ system, user, model, host });
20
+ } else if (provider === "openai") {
21
+ raw = await callOpenAI({ system, user, model });
22
+ } else {
23
+ raw = await callClaude({ system, user, model });
24
+ }
25
+
26
+ return parseCommitMessage(raw);
27
+ }
package/src/args.js ADDED
@@ -0,0 +1,16 @@
1
+ export function parseArgs(argv) {
2
+ const args = new Map();
3
+ for (let i = 0; i < argv.length; i += 1) {
4
+ const arg = argv[i];
5
+ if (!arg.startsWith("--")) continue;
6
+ const key = arg.slice(2);
7
+ const next = argv[i + 1];
8
+ if (next && !next.startsWith("--")) {
9
+ args.set(key, next);
10
+ i += 1;
11
+ } else {
12
+ args.set(key, true);
13
+ }
14
+ }
15
+ return args;
16
+ }
package/src/commit.js ADDED
@@ -0,0 +1,19 @@
1
+ import { spawnSync } from "node:child_process";
2
+ import { promises as fs } from "node:fs";
3
+ import { tmpdir } from "node:os";
4
+ import { join } from "node:path";
5
+ import { randomUUID } from "node:crypto";
6
+
7
+ export async function commitWithMessage(message) {
8
+ const filePath = join(tmpdir(), `committer-${randomUUID()}.txt`);
9
+ await fs.writeFile(filePath, `${message}\n`, "utf8");
10
+
11
+ try {
12
+ const result = spawnSync("git", ["commit", "-F", filePath], {
13
+ stdio: "inherit",
14
+ });
15
+ return result.status ?? 1;
16
+ } finally {
17
+ await fs.rm(filePath, { force: true });
18
+ }
19
+ }
package/src/config.js ADDED
@@ -0,0 +1,24 @@
1
+ import { promises as fs } from "node:fs";
2
+ import { resolve } from "node:path";
3
+ import { homedir } from "node:os";
4
+
5
+ export function getConfigPath() {
6
+ return resolve(homedir(), ".committer");
7
+ }
8
+
9
+ export async function readConfig(configPath) {
10
+ try {
11
+ const raw = await fs.readFile(configPath, "utf8");
12
+ const parsed = JSON.parse(raw);
13
+ if (!parsed || typeof parsed !== "object") return null;
14
+ return parsed;
15
+ } catch (error) {
16
+ if (error.code === "ENOENT") return null;
17
+ throw new Error(`Could not read ${configPath}: ${error.message}`);
18
+ }
19
+ }
20
+
21
+ export async function writeConfig(configPath, config) {
22
+ const contents = `${JSON.stringify(config, null, 2)}\n`;
23
+ await fs.writeFile(configPath, contents, "utf8");
24
+ }
@@ -0,0 +1,22 @@
1
+ export const COMMIT_TYPES = [
2
+ "feat",
3
+ "fix",
4
+ "docs",
5
+ "style",
6
+ "refactor",
7
+ "perf",
8
+ "test",
9
+ "build",
10
+ "ci",
11
+ "chore",
12
+ "revert",
13
+ ];
14
+
15
+ export const DEFAULT_MAX_DIFF_CHARS = 12000;
16
+ export const DEFAULT_CLAUDE_MODEL = "claude-3-5-sonnet-latest";
17
+ export const DEFAULT_OLLAMA_MODEL = "llama3.1";
18
+ export const DEFAULT_OPENAI_MODEL = "gpt-4o-mini";
19
+ export const DEFAULT_PROVIDER = "claude";
20
+
21
+ export const EXIT_USER_ABORT = 2;
22
+ export const CONFIG_VERSION = 1;
package/src/diff.js ADDED
@@ -0,0 +1,10 @@
1
+ export function truncateDiff(diff, maxChars) {
2
+ if (diff.length <= maxChars) return { diff, truncated: false };
3
+ const keep = Math.floor(maxChars / 2);
4
+ const start = diff.slice(0, keep);
5
+ const end = diff.slice(diff.length - keep);
6
+ return {
7
+ diff: `${start}\n\n...diff truncated...\n\n${end}`,
8
+ truncated: true,
9
+ };
10
+ }
package/src/git.js ADDED
@@ -0,0 +1,33 @@
1
+ import { execSync } from "node:child_process";
2
+
3
+ export function runGit(args) {
4
+ return execSync(`git ${args}`, {
5
+ encoding: "utf8",
6
+ stdio: ["ignore", "pipe", "pipe"],
7
+ }).trimEnd();
8
+ }
9
+
10
+ export function ensureGitRepo() {
11
+ try {
12
+ runGit("rev-parse --show-toplevel");
13
+ } catch (error) {
14
+ throw new Error("Not inside a git repository.");
15
+ }
16
+ }
17
+
18
+ export function getRepoRoot() {
19
+ return runGit("rev-parse --show-toplevel");
20
+ }
21
+
22
+ export function getDiff(mode) {
23
+ const staged = runGit("diff --staged");
24
+ const unstaged = runGit("diff");
25
+
26
+ if (mode === "staged") return staged;
27
+ if (mode === "all") {
28
+ if (staged && unstaged) return `${staged}\n\n${unstaged}`;
29
+ return staged || unstaged;
30
+ }
31
+
32
+ return staged || unstaged;
33
+ }
package/src/ollama.js ADDED
@@ -0,0 +1,41 @@
1
+ import { fetchOllamaModels } from "./providers/ollama.js";
2
+ import { writeLine } from "./ui.js";
3
+ import { DEFAULT_OLLAMA_MODEL } from "./constants.js";
4
+
5
+ export async function selectOllamaModel({ host, rl }) {
6
+ const models = await fetchOllamaModels(host);
7
+ if (!models.length) {
8
+ throw new Error(
9
+ "No Ollama models found. Run 'ollama pull <model>' first."
10
+ );
11
+ }
12
+
13
+ const defaultIndex = Math.max(
14
+ 0,
15
+ models.findIndex((model) => model === DEFAULT_OLLAMA_MODEL)
16
+ );
17
+
18
+ writeLine("\n📦 Available Ollama models:");
19
+ models.forEach((model, index) => {
20
+ const marker = index === defaultIndex ? " (default)" : "";
21
+ writeLine(`${index + 1}) ${model}${marker}`);
22
+ });
23
+
24
+ const answer = await rl.question(`Select model [${defaultIndex + 1}]: `);
25
+ const trimmed = answer.trim();
26
+ if (!trimmed) {
27
+ return models[defaultIndex];
28
+ }
29
+
30
+ const asNumber = Number.parseInt(trimmed, 10);
31
+ if (!Number.isNaN(asNumber) && asNumber >= 1 && asNumber <= models.length) {
32
+ return models[asNumber - 1];
33
+ }
34
+
35
+ if (models.includes(trimmed)) {
36
+ return trimmed;
37
+ }
38
+
39
+ writeLine("Invalid selection. Using default model.");
40
+ return models[defaultIndex];
41
+ }
@@ -0,0 +1,70 @@
1
+ import {
2
+ CONFIG_VERSION,
3
+ DEFAULT_CLAUDE_MODEL,
4
+ DEFAULT_OLLAMA_MODEL,
5
+ DEFAULT_OPENAI_MODEL,
6
+ DEFAULT_MAX_DIFF_CHARS,
7
+ } from "./constants.js";
8
+ import { promptNumber, promptSelect, writeLine } from "./ui.js";
9
+ import { selectOllamaModel } from "./ollama.js";
10
+ import { writeConfig } from "./config.js";
11
+
12
+ export async function runOnboarding({ rl, configPath, ollamaHost }) {
13
+ writeLine("\n👋 Welcome to committer! Let's create a .committer config.");
14
+
15
+ const provider = await promptSelect({
16
+ rl,
17
+ question: "Choose your AI provider:",
18
+ defaultValue: "ollama",
19
+ options: [
20
+ { value: "ollama", label: "Ollama (local)" },
21
+ { value: "openai", label: "ChatGPT (OpenAI API key required)" },
22
+ { value: "claude", label: "Claude (API key required)" },
23
+ ],
24
+ });
25
+
26
+ let model = DEFAULT_CLAUDE_MODEL;
27
+ if (provider === "ollama") {
28
+ model = DEFAULT_OLLAMA_MODEL;
29
+ try {
30
+ model = await selectOllamaModel({ host: ollamaHost, rl });
31
+ } catch (error) {
32
+ writeLine(`⚠️ Ollama model selection failed: ${error.message}`);
33
+ writeLine(`↩️ Falling back to ${DEFAULT_OLLAMA_MODEL}.`);
34
+ }
35
+ }
36
+
37
+ if (provider === "openai") {
38
+ model = DEFAULT_OPENAI_MODEL;
39
+ }
40
+
41
+ const diffMode = await promptSelect({
42
+ rl,
43
+ question: "Which diff should committer use?",
44
+ defaultValue: "auto",
45
+ options: [
46
+ { value: "auto", label: "Auto (staged if any, else unstaged)" },
47
+ { value: "staged", label: "Staged only" },
48
+ { value: "all", label: "Staged + unstaged" },
49
+ ],
50
+ });
51
+
52
+ const maxDiffChars = await promptNumber({
53
+ rl,
54
+ question: "Max diff characters to send",
55
+ defaultValue: DEFAULT_MAX_DIFF_CHARS,
56
+ min: 500,
57
+ });
58
+
59
+ const config = {
60
+ version: CONFIG_VERSION,
61
+ provider,
62
+ model,
63
+ diffMode,
64
+ maxDiffChars,
65
+ };
66
+
67
+ await writeConfig(configPath, config);
68
+ writeLine(`✅ Saved config to ${configPath}.`);
69
+ return config;
70
+ }
package/src/prompt.js ADDED
@@ -0,0 +1,106 @@
1
+ import { COMMIT_TYPES } from "./constants.js";
2
+
3
+ export function buildPrompt(diff, { truncated }) {
4
+ const types = COMMIT_TYPES.join(", ");
5
+
6
+ return [
7
+ "You are an expert software engineer writing a professional git commit message.",
8
+ "Analyze the diff and produce a precise, high-signal commit message.",
9
+ "",
10
+ "Return valid JSON only.",
11
+ "Do not wrap the response in markdown.",
12
+ "Do not include explanations, commentary, or extra keys.",
13
+ 'Use exactly this schema: {"subject":"...","body":"..."}',
14
+ "",
15
+ "Rules for the subject:",
16
+ "1. Follow Conventional Commits.",
17
+ "2. Format: <type>(optional-scope)!: <description>",
18
+ `3. Allowed types: ${types}.`,
19
+ "4. Use ! only if the diff clearly introduces a breaking change.",
20
+ "5. Use imperative mood.",
21
+ "6. Use lower-case for the description.",
22
+ "7. Do not end the subject with a period.",
23
+ "8. Keep the subject concise, specific, and under 72 characters.",
24
+ "9. Prefer describing the main user-visible or architectural change.",
25
+ "10. Avoid vague subjects like 'update code', 'fix stuff', or 'changes'.",
26
+ "",
27
+ "Rules for the body:",
28
+ "1. Body is required.",
29
+ "2. Write 2 to 5 short sentences.",
30
+ "3. Summarize the intent and the most important changes.",
31
+ "4. Mention notable implementation details only when they help explain the change.",
32
+ "5. Cover the main files or areas affected, but do not list every file mechanically.",
33
+ "6. Focus on why and what changed more than line-level details.",
34
+ "7. Do not repeat the subject verbatim.",
35
+ "8. Do not include a footer, ticket number, or breaking-change footer.",
36
+ "9. Do not invent details not supported by the diff.",
37
+ "",
38
+ "Decision guidance:",
39
+ "- Use feat for new behavior or capability.",
40
+ "- Use fix for bug fixes or correctness issues.",
41
+ "- Use refactor for internal restructuring without behavior change.",
42
+ "- Use perf for performance improvements.",
43
+ "- Use docs for documentation-only changes.",
44
+ "- Use test for test-only changes.",
45
+ "- Use build for build system, dependencies, or packaging changes.",
46
+ "- Use ci for CI/CD changes.",
47
+ "- Use chore for maintenance tasks that do not fit other types.",
48
+ "- Use style for formatting-only changes with no logic change.",
49
+ "",
50
+ "Scoping guidance:",
51
+ "- Add a scope only when the affected area is clear and meaningful.",
52
+ "- Use a short scope such as api, auth, ui, cli, db, config, or parser.",
53
+ "- Omit the scope if there is no obvious single area.",
54
+ "",
55
+ "Diff interpretation guidance:",
56
+ "- Infer the primary purpose of the change from the overall diff, not isolated lines.",
57
+ "- Ignore unimportant noise such as formatting churn unless formatting is the main change.",
58
+ "- If multiple changes exist, prioritize the dominant one.",
59
+ truncated
60
+ ? "- The diff was truncated, so base the message on the visible changes only and avoid overclaiming."
61
+ : "",
62
+ "",
63
+ "Now analyze this diff and return only the JSON object.",
64
+ "",
65
+ "Diff:",
66
+ diff,
67
+ ]
68
+ .filter(Boolean)
69
+ .join("\n");
70
+ }
71
+
72
+ export function cleanCommitMessage(message) {
73
+ let cleaned = message.trim();
74
+ cleaned = cleaned.replace(/^```[a-z]*\n?/i, "").replace(/```$/, "");
75
+ cleaned = cleaned.replace(/^commit message:\s*/i, "");
76
+ cleaned = cleaned.replace(/^"|"$/g, "");
77
+ cleaned = cleaned.replace(/^'|'$/g, "");
78
+ cleaned = cleaned.replace(/\r\n/g, "\n");
79
+ return cleaned.trim();
80
+ }
81
+
82
+ export function parseCommitMessage(raw) {
83
+ const cleaned = cleanCommitMessage(raw);
84
+ try {
85
+ const parsed = JSON.parse(cleaned);
86
+ const subject = parsed?.subject?.trim();
87
+ const body = parsed?.body?.trim();
88
+ if (!subject || !body) return "";
89
+ return `${subject}\n\n${body}`.trim();
90
+ } catch {
91
+ const start = cleaned.indexOf("{");
92
+ const end = cleaned.lastIndexOf("}");
93
+ if (start >= 0 && end > start) {
94
+ try {
95
+ const parsed = JSON.parse(cleaned.slice(start, end + 1));
96
+ const subject = parsed?.subject?.trim();
97
+ const body = parsed?.body?.trim();
98
+ if (!subject || !body) return "";
99
+ return `${subject}\n\n${body}`.trim();
100
+ } catch {
101
+ return "";
102
+ }
103
+ }
104
+ return "";
105
+ }
106
+ }
@@ -0,0 +1,34 @@
1
+ export async function callClaude({ system, user, model }) {
2
+ const apiKey =
3
+ process.env.ANTHROPIC_API_KEY || process.env.CLAUDE_API_KEY || "";
4
+ if (!apiKey) {
5
+ throw new Error(
6
+ "Missing ANTHROPIC_API_KEY or CLAUDE_API_KEY for Claude provider."
7
+ );
8
+ }
9
+
10
+ const response = await fetch("https://api.anthropic.com/v1/messages", {
11
+ method: "POST",
12
+ headers: {
13
+ "content-type": "application/json",
14
+ "x-api-key": apiKey,
15
+ "anthropic-version": "2023-06-01",
16
+ },
17
+ body: JSON.stringify({
18
+ model,
19
+ max_tokens: 128,
20
+ temperature: 0.2,
21
+ system,
22
+ messages: [{ role: "user", content: user }],
23
+ }),
24
+ });
25
+
26
+ if (!response.ok) {
27
+ const text = await response.text();
28
+ throw new Error(`Claude API error: ${response.status} ${text}`);
29
+ }
30
+
31
+ const data = await response.json();
32
+ const content = data?.content?.[0]?.text ?? "";
33
+ return content;
34
+ }
@@ -0,0 +1,49 @@
1
+ export async function callOllama({ system, user, model, host }) {
2
+ const response = await fetch(`${host}/api/chat`, {
3
+ method: "POST",
4
+ headers: {
5
+ "content-type": "application/json",
6
+ },
7
+ body: JSON.stringify({
8
+ model,
9
+ format: "json",
10
+ stream: false,
11
+ options: {
12
+ temperature: 0.2,
13
+ num_predict: 128,
14
+ },
15
+ messages: [
16
+ { role: "system", content: system },
17
+ { role: "user", content: user },
18
+ ],
19
+ }),
20
+ });
21
+
22
+ if (!response.ok) {
23
+ const text = await response.text();
24
+ throw new Error(`Ollama API error: ${response.status} ${text}`);
25
+ }
26
+
27
+ const data = await response.json();
28
+ return data?.message?.content ?? data?.response ?? "";
29
+ }
30
+
31
+ export async function fetchOllamaModels(host) {
32
+ const response = await fetch(`${host}/api/tags`, {
33
+ method: "GET",
34
+ headers: {
35
+ "content-type": "application/json",
36
+ },
37
+ });
38
+
39
+ if (!response.ok) {
40
+ const text = await response.text();
41
+ throw new Error(`Ollama API error: ${response.status} ${text}`);
42
+ }
43
+
44
+ const data = await response.json();
45
+ const models = (data?.models || [])
46
+ .map((model) => model?.name)
47
+ .filter(Boolean);
48
+ return Array.from(new Set(models)).sort();
49
+ }
@@ -0,0 +1,33 @@
1
+ export async function callOpenAI({ system, user, model }) {
2
+ const apiKey = process.env.OPENAI_API_KEY || "";
3
+ if (!apiKey) {
4
+ throw new Error("Missing OPENAI_API_KEY for OpenAI provider.");
5
+ }
6
+
7
+ const response = await fetch("https://api.openai.com/v1/chat/completions", {
8
+ method: "POST",
9
+ headers: {
10
+ "content-type": "application/json",
11
+ authorization: `Bearer ${apiKey}`,
12
+ },
13
+ body: JSON.stringify({
14
+ model,
15
+ temperature: 0.2,
16
+ max_tokens: 128,
17
+ response_format: { type: "json_object" },
18
+ messages: [
19
+ { role: "system", content: system },
20
+ { role: "user", content: user },
21
+ ],
22
+ }),
23
+ });
24
+
25
+ if (!response.ok) {
26
+ const text = await response.text();
27
+ throw new Error(`OpenAI API error: ${response.status} ${text}`);
28
+ }
29
+
30
+ const data = await response.json();
31
+ const content = data?.choices?.[0]?.message?.content ?? "";
32
+ return content;
33
+ }
package/src/ui.js ADDED
@@ -0,0 +1,43 @@
1
+ import { stdout as output } from "node:process";
2
+
3
+ export function writeLine(text) {
4
+ output.write(`${text}\n`);
5
+ }
6
+
7
+ export function write(text) {
8
+ output.write(text);
9
+ }
10
+
11
+ export async function promptSelect({ rl, question, options, defaultValue }) {
12
+ writeLine(`\n${question}`);
13
+ options.forEach((option, index) => {
14
+ const marker = option.value === defaultValue ? " (default)" : "";
15
+ writeLine(`${index + 1}) ${option.label}${marker}`);
16
+ });
17
+
18
+ const answer = await rl.question("Select option: ");
19
+ const trimmed = answer.trim();
20
+ if (!trimmed) return defaultValue;
21
+
22
+ const asNumber = Number.parseInt(trimmed, 10);
23
+ if (!Number.isNaN(asNumber) && asNumber >= 1 && asNumber <= options.length) {
24
+ return options[asNumber - 1].value;
25
+ }
26
+
27
+ const matched = options.find((option) => option.value === trimmed);
28
+ if (matched) return matched.value;
29
+
30
+ writeLine("Invalid selection. Using default.");
31
+ return defaultValue;
32
+ }
33
+
34
+ export async function promptNumber({ rl, question, defaultValue, min }) {
35
+ while (true) {
36
+ const answer = await rl.question(`${question} [${defaultValue}]: `);
37
+ const trimmed = answer.trim();
38
+ if (!trimmed) return defaultValue;
39
+ const value = Number.parseInt(trimmed, 10);
40
+ if (!Number.isNaN(value) && value >= min) return value;
41
+ writeLine(`Please enter a number >= ${min}.`);
42
+ }
43
+ }
@@ -0,0 +1,40 @@
1
+ import { COMMIT_TYPES } from "./constants.js";
2
+
3
+ const EMOJI_REGEX = /[\u{1F300}-\u{1FAFF}\u{2600}-\u{27BF}]/u;
4
+ const SUBJECT_REGEX = new RegExp(
5
+ `^(${COMMIT_TYPES.join("|")})(\\([^)]+\\))?(!)?:\\s.+$`
6
+ );
7
+
8
+ export function hasEmoji(text) {
9
+ return EMOJI_REGEX.test(text);
10
+ }
11
+
12
+ export function validateCommitMessage(message) {
13
+ const normalized = message.trim().replace(/\r\n/g, "\n");
14
+ const lines = normalized.split("\n");
15
+
16
+ while (lines.length > 0 && lines[lines.length - 1].trim() === "") {
17
+ lines.pop();
18
+ }
19
+
20
+ const subject = lines[0]?.trim();
21
+ if (!subject) {
22
+ return { valid: false, reason: "Missing subject line." };
23
+ }
24
+
25
+ if (!SUBJECT_REGEX.test(subject.toLowerCase())) {
26
+ return {
27
+ valid: false,
28
+ reason: "Subject must follow conventional format like 'feat: add ...'.",
29
+ };
30
+ }
31
+
32
+
33
+ const bodyLines = lines.slice(2);
34
+ const bodyHasContent = bodyLines.some((line) => line.trim().length > 0);
35
+ if (!bodyHasContent) {
36
+ return { valid: false, reason: "Body is required." };
37
+ }
38
+
39
+ return { valid: true };
40
+ }