git-cai-cli 0.1.1.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- git_cai_cli/__init__.py +0 -0
- git_cai_cli/cli.py +61 -0
- git_cai_cli/core/__init__.py +0 -0
- git_cai_cli/core/config.py +146 -0
- git_cai_cli/core/gitutils.py +57 -0
- git_cai_cli/core/llm.py +97 -0
- git_cai_cli-0.1.1.dev0.dist-info/METADATA +104 -0
- git_cai_cli-0.1.1.dev0.dist-info/RECORD +13 -0
- git_cai_cli-0.1.1.dev0.dist-info/WHEEL +5 -0
- git_cai_cli-0.1.1.dev0.dist-info/entry_points.txt +2 -0
- git_cai_cli-0.1.1.dev0.dist-info/licenses/LICENSE +21 -0
- git_cai_cli-0.1.1.dev0.dist-info/top_level.txt +2 -0
- tests/test_core/test_config.py +128 -0
git_cai_cli/__init__.py
ADDED
|
File without changes
|
git_cai_cli/cli.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Main function
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import subprocess
|
|
7
|
+
import sys
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
from git_cai_cli.core.config import get_default_config, load_config, load_token
|
|
11
|
+
from git_cai_cli.core.gitutils import find_git_root, git_diff_excluding
|
|
12
|
+
from git_cai_cli.core.llm import CommitMessageGenerator
|
|
13
|
+
|
|
14
|
+
logging.basicConfig(
|
|
15
|
+
level=logging.INFO,
|
|
16
|
+
format="%(levelname)s: %(message)s",
|
|
17
|
+
)
|
|
18
|
+
log = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def main() -> None:
|
|
22
|
+
"""
|
|
23
|
+
Check for git repo, load access tokens and run git cai
|
|
24
|
+
"""
|
|
25
|
+
# Ensure invoked as 'git cai'
|
|
26
|
+
invoked_as = Path(sys.argv[0]).name
|
|
27
|
+
if not invoked_as.startswith("git-"):
|
|
28
|
+
print("This command must be run as 'git cai'", file=sys.stderr)
|
|
29
|
+
sys.exit(1)
|
|
30
|
+
|
|
31
|
+
# Find the git repo root
|
|
32
|
+
repo_root = find_git_root()
|
|
33
|
+
if not repo_root:
|
|
34
|
+
log.error("Not inside a Git repository.")
|
|
35
|
+
sys.exit(1)
|
|
36
|
+
|
|
37
|
+
# Load configuration and token
|
|
38
|
+
config = load_config()
|
|
39
|
+
default_model = get_default_config()
|
|
40
|
+
log.debug("Default model from config: %s", default_model)
|
|
41
|
+
token = load_token(default_model)
|
|
42
|
+
if not token:
|
|
43
|
+
log.error("Missing %s token in ~/.config/cai/tokens.yml", default_model)
|
|
44
|
+
sys.exit(1)
|
|
45
|
+
|
|
46
|
+
# Get git diff
|
|
47
|
+
diff = git_diff_excluding(repo_root)
|
|
48
|
+
if not diff.strip():
|
|
49
|
+
log.info("No changes to commit. Did you run 'git add'? Files must be staged.")
|
|
50
|
+
sys.exit(0)
|
|
51
|
+
|
|
52
|
+
# Generate commit message
|
|
53
|
+
generator = CommitMessageGenerator(token, config, default_model)
|
|
54
|
+
commit_message = generator.generate(diff)
|
|
55
|
+
|
|
56
|
+
# Open git commit editor with the generated message
|
|
57
|
+
subprocess.run(["git", "commit", "--edit", "-m", commit_message], check=True)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
if __name__ == "__main__":
|
|
61
|
+
main()
|
|
File without changes
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Set configuration
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import os
|
|
7
|
+
import stat
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any, Optional
|
|
10
|
+
|
|
11
|
+
import yaml
|
|
12
|
+
from git_cai_cli.core.gitutils import find_git_root
|
|
13
|
+
|
|
14
|
+
log = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
CONFIG_DIR = Path.home() / ".config" / "cai"
|
|
17
|
+
FALLBACK_CONFIG_FILE = CONFIG_DIR / "cai_config.yml"
|
|
18
|
+
TOKENS_FILE = CONFIG_DIR / "tokens.yml"
|
|
19
|
+
|
|
20
|
+
DEFAULT_CONFIG = {
|
|
21
|
+
"openai": {"model": "gpt-4.1", "temperature": 0},
|
|
22
|
+
"gemini": {"model": "gemini-2.5-flash", "temperature": 0},
|
|
23
|
+
"default": "openai",
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
TOKEN_TEMPLATE = {
|
|
27
|
+
"openai": "PUT-YOUR-OPENAI-TOKEN-HERE",
|
|
28
|
+
"gemini": "PUT-YOUR-GEMINI-TOKEN-HERE",
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def load_config(
|
|
33
|
+
fallback_config_file: Path = FALLBACK_CONFIG_FILE,
|
|
34
|
+
default_config: Optional[dict[str, Any]] = None,
|
|
35
|
+
) -> dict[str, Any]:
|
|
36
|
+
"""
|
|
37
|
+
Load configuration of LLM
|
|
38
|
+
"""
|
|
39
|
+
if default_config is None:
|
|
40
|
+
default_config = DEFAULT_CONFIG.copy()
|
|
41
|
+
log.debug("Loading config...")
|
|
42
|
+
|
|
43
|
+
repo_root = find_git_root()
|
|
44
|
+
repo_config_file = Path(repo_root) / "cai_config.yml" if repo_root else None
|
|
45
|
+
|
|
46
|
+
if repo_config_file and repo_config_file.exists():
|
|
47
|
+
try:
|
|
48
|
+
with open(repo_config_file, "r", encoding="utf-8") as f:
|
|
49
|
+
config = yaml.safe_load(f) or {}
|
|
50
|
+
if config:
|
|
51
|
+
return config
|
|
52
|
+
except yaml.YAMLError as e:
|
|
53
|
+
log.error("Failed to parse repo config: %s", e)
|
|
54
|
+
|
|
55
|
+
if not fallback_config_file.exists() or fallback_config_file.stat().st_size == 0:
|
|
56
|
+
log.warning(
|
|
57
|
+
"No config file provided and default config missing or empty. Creating default config in %s",
|
|
58
|
+
fallback_config_file,
|
|
59
|
+
)
|
|
60
|
+
fallback_config_file.parent.mkdir(parents=True, exist_ok=True)
|
|
61
|
+
with open(fallback_config_file, "w", encoding="utf-8") as f:
|
|
62
|
+
yaml.safe_dump(default_config, f)
|
|
63
|
+
return default_config
|
|
64
|
+
|
|
65
|
+
try:
|
|
66
|
+
with open(fallback_config_file, "r", encoding="utf-8") as f:
|
|
67
|
+
return yaml.safe_load(f) or default_config
|
|
68
|
+
except yaml.YAMLError as e:
|
|
69
|
+
log.error("Failed to parse config at %s: %s", fallback_config_file, e)
|
|
70
|
+
return default_config
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def load_token(
|
|
74
|
+
key_name: str,
|
|
75
|
+
tokens_file: Path = TOKENS_FILE,
|
|
76
|
+
token_template: Optional[dict[str, Any]] = None,
|
|
77
|
+
) -> str | None:
|
|
78
|
+
"""
|
|
79
|
+
Load token to connecto to LLM
|
|
80
|
+
"""
|
|
81
|
+
if token_template is None:
|
|
82
|
+
token_template = TOKEN_TEMPLATE.copy()
|
|
83
|
+
log.debug("Loading token...")
|
|
84
|
+
tokens_file.parent.mkdir(parents=True, exist_ok=True)
|
|
85
|
+
|
|
86
|
+
if not tokens_file.exists():
|
|
87
|
+
log.debug("Check whether file containing tokens exist")
|
|
88
|
+
log.warning("%s does not exist. Creating a token template file.", tokens_file)
|
|
89
|
+
with open(tokens_file, "w", encoding="utf-8") as f:
|
|
90
|
+
yaml.safe_dump(token_template, f)
|
|
91
|
+
os.chmod(tokens_file, stat.S_IRUSR | stat.S_IWUSR)
|
|
92
|
+
log.info("Created token template at %s", tokens_file) # nosemgrep
|
|
93
|
+
return None
|
|
94
|
+
|
|
95
|
+
try:
|
|
96
|
+
with open(tokens_file, "r", encoding="utf-8") as f:
|
|
97
|
+
tokens = yaml.safe_load(f) or {}
|
|
98
|
+
except yaml.YAMLError as e:
|
|
99
|
+
log.error("Error parsing %s: %s", tokens_file, e)
|
|
100
|
+
return None
|
|
101
|
+
|
|
102
|
+
if key_name not in tokens:
|
|
103
|
+
log.error("Key '%s' not found in %s.", key_name, tokens_file)
|
|
104
|
+
return None
|
|
105
|
+
|
|
106
|
+
return tokens[key_name]
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def get_default_config() -> str:
|
|
110
|
+
"""
|
|
111
|
+
Looks for cai_config.yml in the repo root,
|
|
112
|
+
else in ~/.config/cai/cai_config.yml.
|
|
113
|
+
Returns the value of the 'default' key if it exists.
|
|
114
|
+
Raises FileNotFoundError or KeyError otherwise.
|
|
115
|
+
"""
|
|
116
|
+
repo_root = find_git_root()
|
|
117
|
+
repo_config = repo_root / "cai_config.yml" if repo_root else None
|
|
118
|
+
home_config = Path.home() / ".config" / "cai" / "cai_config.yml"
|
|
119
|
+
|
|
120
|
+
if repo_config and repo_config.is_file():
|
|
121
|
+
config_path = repo_config
|
|
122
|
+
log.info("Using config file from repo root: %s", config_path)
|
|
123
|
+
elif home_config.is_file():
|
|
124
|
+
config_path = home_config
|
|
125
|
+
log.info("Using config file from user config dir: %s", config_path)
|
|
126
|
+
else:
|
|
127
|
+
log.error("No cai_config.yml found in repo root or %s", home_config)
|
|
128
|
+
raise FileNotFoundError(
|
|
129
|
+
f"No cai_config.yml found in repo root or {home_config}"
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
try:
|
|
133
|
+
with config_path.open("r", encoding="utf-8") as f:
|
|
134
|
+
config = yaml.safe_load(f) or {}
|
|
135
|
+
log.debug("Loaded configuration from %s: %s", config_path, config)
|
|
136
|
+
except yaml.YAMLError as e:
|
|
137
|
+
log.exception("Error parsing YAML file %s", config_path)
|
|
138
|
+
raise ValueError(f"Error parsing YAML file {config_path}: {e}") from e
|
|
139
|
+
|
|
140
|
+
if "default" not in config:
|
|
141
|
+
log.error("'default' key not found in %s", config_path)
|
|
142
|
+
raise KeyError(f"'default' key not found in {config_path}")
|
|
143
|
+
|
|
144
|
+
default_value = config["default"]
|
|
145
|
+
log.info("Default config value: %s", default_value)
|
|
146
|
+
return default_value
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Check git repo and run git diff
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import subprocess
|
|
7
|
+
import sys
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Callable
|
|
10
|
+
|
|
11
|
+
log = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def find_git_root(
|
|
15
|
+
run_cmd: Callable[..., subprocess.CompletedProcess] = subprocess.run,
|
|
16
|
+
) -> Path | None:
|
|
17
|
+
"""Returns the root directory of the current Git repository, or None if not in a Git repo."""
|
|
18
|
+
try:
|
|
19
|
+
result = run_cmd(
|
|
20
|
+
["git", "rev-parse", "--show-toplevel"],
|
|
21
|
+
capture_output=True,
|
|
22
|
+
text=True,
|
|
23
|
+
check=True,
|
|
24
|
+
)
|
|
25
|
+
return Path(result.stdout.strip())
|
|
26
|
+
except subprocess.CalledProcessError:
|
|
27
|
+
return None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def git_diff_excluding(
|
|
31
|
+
repo_root: Path,
|
|
32
|
+
run_cmd: Callable[..., subprocess.CompletedProcess] = subprocess.run,
|
|
33
|
+
exit_func: Callable[[int], None] = sys.exit,
|
|
34
|
+
) -> str:
|
|
35
|
+
"""Run `git diff` excluding files listed in .caiignore."""
|
|
36
|
+
ignore_file = repo_root / ".caiignore"
|
|
37
|
+
|
|
38
|
+
exclude_files: list[str] = []
|
|
39
|
+
if ignore_file.exists():
|
|
40
|
+
with open(ignore_file, "r", encoding="utf-8") as f:
|
|
41
|
+
exclude_files = [
|
|
42
|
+
line.strip()
|
|
43
|
+
for line in f
|
|
44
|
+
if line.strip() and not line.strip().startswith("#")
|
|
45
|
+
]
|
|
46
|
+
if not exclude_files:
|
|
47
|
+
log.info("%s is empty. No files excluded.", ignore_file)
|
|
48
|
+
|
|
49
|
+
cmd = ["git", "diff", "--cached", "--", "."]
|
|
50
|
+
cmd.extend(f":!{pattern}" for pattern in exclude_files)
|
|
51
|
+
|
|
52
|
+
result = run_cmd(cmd, capture_output=True, text=True, check=True)
|
|
53
|
+
if result.returncode != 0:
|
|
54
|
+
log.error("git diff failed: %s", result.stderr.strip())
|
|
55
|
+
exit_func(1)
|
|
56
|
+
|
|
57
|
+
return result.stdout
|
git_cai_cli/core/llm.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Settings and connection of LLM
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
from typing import Any, Dict, Type
|
|
7
|
+
|
|
8
|
+
from google import genai # type: ignore[reportUnknownImport]
|
|
9
|
+
from google.genai import types # type: ignore[reportUnknownImport]
|
|
10
|
+
from openai import OpenAI
|
|
11
|
+
|
|
12
|
+
log = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class CommitMessageGenerator:
|
|
16
|
+
"""
|
|
17
|
+
Generates git commit messages from a git diff using LLMs.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, token: str, config: Dict[str, Any], default_model: str):
|
|
21
|
+
self.token = token
|
|
22
|
+
self.config = config
|
|
23
|
+
self.default_model = default_model
|
|
24
|
+
|
|
25
|
+
def generate(self, git_diff: str) -> str:
|
|
26
|
+
"""
|
|
27
|
+
Generate a commit message using the default model.
|
|
28
|
+
"""
|
|
29
|
+
model_dispatch = {
|
|
30
|
+
"openai": self.generate_openai,
|
|
31
|
+
"gemini": self.generate_gemini,
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
log.debug("Generating commit message using model '%s'", self.default_model)
|
|
36
|
+
return model_dispatch[self.default_model](git_diff)
|
|
37
|
+
except KeyError as e:
|
|
38
|
+
log.error("Unknown default model: '%s'", self.default_model)
|
|
39
|
+
raise ValueError(f"Unknown default model: '{self.default_model}'") from e
|
|
40
|
+
|
|
41
|
+
def _system_prompt(self) -> str:
|
|
42
|
+
"""
|
|
43
|
+
Shared system prompt for both OpenAI and Gemini.
|
|
44
|
+
"""
|
|
45
|
+
return (
|
|
46
|
+
"You are an expert software engineer assistant. "
|
|
47
|
+
"Your task is to generate a concise, professional git commit message "
|
|
48
|
+
"summarizing the provided git diff changes. "
|
|
49
|
+
"Keep the message clear and focused on what was changed and why. "
|
|
50
|
+
"Always include a headline, followed by a bullet-point list of changes."
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def generate_openai(self, git_diff: str, openai_cls: Type[Any] = OpenAI) -> str:
|
|
54
|
+
"""
|
|
55
|
+
Generate a commit message using OpenAI's API.
|
|
56
|
+
"""
|
|
57
|
+
client = openai_cls(api_key=self.token)
|
|
58
|
+
model = self.config["openai"]["model"]
|
|
59
|
+
temperature = self.config["openai"]["temperature"]
|
|
60
|
+
|
|
61
|
+
system_prompt = self._system_prompt()
|
|
62
|
+
|
|
63
|
+
messages = [
|
|
64
|
+
{"role": "system", "content": system_prompt},
|
|
65
|
+
{
|
|
66
|
+
"role": "user",
|
|
67
|
+
"content": f"Generate a commit message for:\n\n{git_diff}",
|
|
68
|
+
},
|
|
69
|
+
]
|
|
70
|
+
|
|
71
|
+
completion = client.chat.completions.create(
|
|
72
|
+
model=model,
|
|
73
|
+
messages=messages,
|
|
74
|
+
temperature=temperature,
|
|
75
|
+
)
|
|
76
|
+
return completion.choices[0].message.content.strip()
|
|
77
|
+
|
|
78
|
+
def generate_gemini(
|
|
79
|
+
self, git_diff: str, genai_cls: Type[Any] = genai.Client
|
|
80
|
+
) -> str:
|
|
81
|
+
"""
|
|
82
|
+
Generate a commit message using Gemini's API.
|
|
83
|
+
"""
|
|
84
|
+
client = genai_cls(api_key=self.token)
|
|
85
|
+
model = self.config["gemini"]["model"]
|
|
86
|
+
temperature = self.config["gemini"]["temperature"]
|
|
87
|
+
system_prompt = self._system_prompt()
|
|
88
|
+
messages = git_diff
|
|
89
|
+
response = client.models.generate_content(
|
|
90
|
+
model=model,
|
|
91
|
+
contents=messages,
|
|
92
|
+
config=types.GenerateContentConfig(
|
|
93
|
+
system_instruction=system_prompt,
|
|
94
|
+
temperature=temperature,
|
|
95
|
+
),
|
|
96
|
+
)
|
|
97
|
+
return response.text
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: git-cai-cli
|
|
3
|
+
Version: 0.1.1.dev0
|
|
4
|
+
Summary: Use LLM to create git commits
|
|
5
|
+
Author-email: Thorsten Foltz <thorsten.foltz@live.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/thorstenfoltz/cai
|
|
8
|
+
Project-URL: Issues, https://github.com/thorstenfoltz/cai/issues
|
|
9
|
+
Keywords: Git,LLM,Commit,AI,GenAI
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Environment :: Console
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
18
|
+
Classifier: Operating System :: OS Independent
|
|
19
|
+
Classifier: Topic :: Software Development :: Version Control
|
|
20
|
+
Requires-Python: >=3.10
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
License-File: LICENSE
|
|
23
|
+
Requires-Dist: pyyaml>=6.0
|
|
24
|
+
Requires-Dist: openai>=1.0
|
|
25
|
+
Requires-Dist: google-genai>=1.41.0
|
|
26
|
+
Dynamic: license-file
|
|
27
|
+
|
|
28
|
+
# cai
|
|
29
|
+
|
|
30
|
+
`cai` is a Git extension written in Python that automates commit message creation. It allows you to run `git cai` to automatically generate a commit message based on changes and new additions in your repository.
|
|
31
|
+
|
|
32
|
+
`cai` leverages a **large language model (LLM)** to generate meaningful and context-aware commit messages. Currently, it supports the **OpenAI** and **Gemini API** for generating commit messages.
|
|
33
|
+
|
|
34
|
+
## Table of Contents
|
|
35
|
+
|
|
36
|
+
- [About](#about-section)
|
|
37
|
+
- [Prerequisites](#prerequisites)
|
|
38
|
+
- [Features](#features-section)
|
|
39
|
+
- [Installation](#installation-section)
|
|
40
|
+
- [Usage](#usage-section)
|
|
41
|
+
- [License](#license-section)
|
|
42
|
+
|
|
43
|
+
<h2 id="about-section">About</h2>
|
|
44
|
+
|
|
45
|
+
`cai` is designed to simplify your Git workflow by automatically generating commit messages using an LLM. No more struggling to summarize changes — `git cai` does it for you.
|
|
46
|
+
|
|
47
|
+
Currently, the only supported backend are the OpenAI and Gemini API, but additional LLM integrations may be added in the future.
|
|
48
|
+
|
|
49
|
+
<h2 id="prerequisites">Prerequisites</h2>
|
|
50
|
+
|
|
51
|
+
- Python 3.10 or higher
|
|
52
|
+
- [Pipx](https://pypi.org/project/pipx/) or [Pip](https://pypi.org/project/pip/) if installed in a virtual environment
|
|
53
|
+
- API key, currently supported
|
|
54
|
+
- OpenAI
|
|
55
|
+
- Gemini
|
|
56
|
+
|
|
57
|
+
<h2 id="features-section">Features</h2>
|
|
58
|
+
|
|
59
|
+
- Automatically detects added, modified, and deleted files
|
|
60
|
+
- Generates meaningful commit messages using an LLM
|
|
61
|
+
- Seamless integration with Git as a plugin/extension
|
|
62
|
+
- Written in Python for easy customization
|
|
63
|
+
|
|
64
|
+
<h2 id="installation-section">Installation</h2>
|
|
65
|
+
|
|
66
|
+
Install by
|
|
67
|
+
|
|
68
|
+
```sh
|
|
69
|
+
pipx install git-cai-cli
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
Afterwards set cai to PATH by
|
|
73
|
+
|
|
74
|
+
```sh
|
|
75
|
+
pipx ensurepath
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
Restart your shell by executing `bash` or `zsh` or whatever else is your used shell.
|
|
79
|
+
|
|
80
|
+
<h2 id="usage-section">Usage</h2>
|
|
81
|
+
|
|
82
|
+
Once installed, you can use `cai` like a normal Git command:
|
|
83
|
+
|
|
84
|
+
```sh
|
|
85
|
+
git cai
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
`cai` automatically creates a configuration file at: `~/.config/cai/token.yml`
|
|
89
|
+
This file stores your OpenAI API key, which is used every time you run `git cai`.
|
|
90
|
+
Open `~/.config/cai/token.yml` and store your token from OpenAI.
|
|
91
|
+
If a `cai_config.yml` file exists in the root of your repository, `cai` will use the settings defined there. Otherwise, it falls back to default settings, which are automatically created in the same directory as `token.yml` if they don’t already exist.
|
|
92
|
+
Currently, the only configurable options are:
|
|
93
|
+
|
|
94
|
+
- LLM model
|
|
95
|
+
- Temperature
|
|
96
|
+
|
|
97
|
+
`cai` uses Git’s `diff` output as input for generating commit messages.
|
|
98
|
+
To exclude specific files or directories from being included in the generated commit message, create a `.caiignore` file in the root of your repository. This file works like a `.gitignore`.
|
|
99
|
+
|
|
100
|
+
- Files listed in `.gitignore` are **always excluded**.
|
|
101
|
+
- `.caiignore` is only needed for files that are tracked by Git but should **not** be included in the commit message.
|
|
102
|
+
|
|
103
|
+
<h2 id="license-section">License</h2>
|
|
104
|
+
This project is licensed under the MIT License.
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
git_cai_cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
git_cai_cli/cli.py,sha256=-uJibVFO4bseav5ylV7HXu17mvlr7vJX3MONQCIgDYE,1724
|
|
3
|
+
git_cai_cli/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
git_cai_cli/core/config.py,sha256=oPHmtnuA3L0ZMfDgYSxvnAraKTd4x8GWYBAZnsO-fV8,4892
|
|
5
|
+
git_cai_cli/core/gitutils.py,sha256=9nLxOr6Ug-xYu9PUNaZWpC7qfXYc0q4ve-l4vC4hi98,1671
|
|
6
|
+
git_cai_cli/core/llm.py,sha256=d1jrLP9ssbEkWg22YJbJUZizSmbaE5MdCSeJ2uSYDUw,3215
|
|
7
|
+
git_cai_cli-0.1.1.dev0.dist-info/licenses/LICENSE,sha256=mJXH267ubRO4gQIr_0QP42N7tqstouAmsEs5Na73z58,1071
|
|
8
|
+
tests/test_core/test_config.py,sha256=ADmOH1bau2bwovp382m7rP64t1JL9J6xNDIv-Ato-WI,3934
|
|
9
|
+
git_cai_cli-0.1.1.dev0.dist-info/METADATA,sha256=yXGChHmJMS8SAdWJhh9dCA6mCw9vt8iKm7EO_WSHZac,3917
|
|
10
|
+
git_cai_cli-0.1.1.dev0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
11
|
+
git_cai_cli-0.1.1.dev0.dist-info/entry_points.txt,sha256=lNDzmy9McXaZcF-UFAQrfMleNX4DkUiljRp3_K8GV-w,49
|
|
12
|
+
git_cai_cli-0.1.1.dev0.dist-info/top_level.txt,sha256=BGbckcfCHqxkzlMsUzkwZSTooOOJO7vqaIykiiCOR9U,18
|
|
13
|
+
git_cai_cli-0.1.1.dev0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Thorsten Foltz
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Unit tests for git_cai_cli.core.config module.
|
|
3
|
+
|
|
4
|
+
These tests cover the basic functionality of load_config and load_token,
|
|
5
|
+
including file creation, reading existing files, repo config precedence,
|
|
6
|
+
token retrieval, and handling missing keys.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import stat
|
|
10
|
+
from unittest.mock import patch
|
|
11
|
+
|
|
12
|
+
import yaml
|
|
13
|
+
from git_cai_cli.core.config import (
|
|
14
|
+
DEFAULT_CONFIG,
|
|
15
|
+
TOKEN_TEMPLATE,
|
|
16
|
+
load_config,
|
|
17
|
+
load_token,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# ------------------------------
|
|
21
|
+
# LOAD CONFIG UNIT TESTS
|
|
22
|
+
# ------------------------------
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def test_load_config_returns_default(tmp_path):
|
|
26
|
+
"""
|
|
27
|
+
Test that load_config returns the default configuration when no fallback
|
|
28
|
+
or repo-level config exists, and that it creates the fallback file.
|
|
29
|
+
"""
|
|
30
|
+
fallback_file = tmp_path / "cai_config.yml"
|
|
31
|
+
config = load_config(fallback_config_file=fallback_file)
|
|
32
|
+
|
|
33
|
+
# Should return the default configuration
|
|
34
|
+
assert config == DEFAULT_CONFIG
|
|
35
|
+
|
|
36
|
+
# Should create the fallback file
|
|
37
|
+
assert fallback_file.exists()
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def test_load_config_reads_existing_file(tmp_path):
|
|
41
|
+
"""
|
|
42
|
+
Test that load_config correctly reads an existing fallback configuration file.
|
|
43
|
+
"""
|
|
44
|
+
fallback_file = tmp_path / "cai_config.yml"
|
|
45
|
+
sample_config = {"openai": {"model": "gpt-3.5", "temperature": 0.7}}
|
|
46
|
+
|
|
47
|
+
# Write sample config to fallback file
|
|
48
|
+
fallback_file.write_text(yaml.safe_dump(sample_config))
|
|
49
|
+
|
|
50
|
+
# load_config should return the contents of the file
|
|
51
|
+
config = load_config(fallback_config_file=fallback_file)
|
|
52
|
+
assert config == sample_config
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def test_load_config_prefers_repo_config(tmp_path):
|
|
56
|
+
"""
|
|
57
|
+
Test that load_config prefers a repo-level config over the fallback config.
|
|
58
|
+
"""
|
|
59
|
+
# Create a repo-level config file
|
|
60
|
+
repo_file = tmp_path / "cai_config.yml"
|
|
61
|
+
repo_config = {"openai": {"model": "repo-model", "temperature": 1.0}}
|
|
62
|
+
repo_file.write_text(yaml.safe_dump(repo_config))
|
|
63
|
+
|
|
64
|
+
# Create a fallback file
|
|
65
|
+
fallback_file = tmp_path / "fallback.yml"
|
|
66
|
+
fallback_file.write_text(yaml.safe_dump(DEFAULT_CONFIG))
|
|
67
|
+
|
|
68
|
+
# Mock find_git_root to simulate a repo
|
|
69
|
+
with patch("git_cai_cli.core.config.find_git_root", return_value=tmp_path):
|
|
70
|
+
config = load_config(fallback_config_file=fallback_file)
|
|
71
|
+
assert config == repo_config
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
# ------------------------------
|
|
75
|
+
# LOAD TOKEN UNIT TESTS
|
|
76
|
+
# ------------------------------
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def test_load_token_creates_template(tmp_path):
|
|
80
|
+
"""
|
|
81
|
+
Test that load_token creates a template token file if none exists,
|
|
82
|
+
returns None, and sets correct file permissions.
|
|
83
|
+
"""
|
|
84
|
+
token_file = tmp_path / "tokens.yml"
|
|
85
|
+
|
|
86
|
+
result = load_token("openai", tokens_file=token_file)
|
|
87
|
+
|
|
88
|
+
# Function should return None when creating a template
|
|
89
|
+
assert result is None
|
|
90
|
+
|
|
91
|
+
# Template file should exist
|
|
92
|
+
assert token_file.exists()
|
|
93
|
+
|
|
94
|
+
# File permissions should be user read/write only
|
|
95
|
+
assert stat.S_IMODE(token_file.stat().st_mode) == (stat.S_IRUSR | stat.S_IWUSR)
|
|
96
|
+
|
|
97
|
+
# File contents should match TOKEN_TEMPLATE
|
|
98
|
+
loaded = yaml.safe_load(token_file.read_text())
|
|
99
|
+
assert loaded == TOKEN_TEMPLATE
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def test_load_token_reads_existing(tmp_path):
|
|
103
|
+
"""
|
|
104
|
+
Test that load_token returns the correct token when the token file exists.
|
|
105
|
+
"""
|
|
106
|
+
token_file = tmp_path / "tokens.yml"
|
|
107
|
+
sample_tokens = {"openai": "abc123"}
|
|
108
|
+
token_file.write_text(yaml.safe_dump(sample_tokens))
|
|
109
|
+
|
|
110
|
+
result = load_token("openai", tokens_file=token_file)
|
|
111
|
+
assert result == "abc123"
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def test_load_token_missing_key(tmp_path, caplog):
|
|
115
|
+
"""
|
|
116
|
+
Test that load_token returns None and logs an error when the requested
|
|
117
|
+
key is not found in the token file.
|
|
118
|
+
"""
|
|
119
|
+
token_file = tmp_path / "tokens.yml"
|
|
120
|
+
token_file.write_text(yaml.safe_dump({"gemini": "xyz"}))
|
|
121
|
+
|
|
122
|
+
result = load_token("openai", tokens_file=token_file)
|
|
123
|
+
|
|
124
|
+
# Should return None for missing key
|
|
125
|
+
assert result is None
|
|
126
|
+
|
|
127
|
+
# Should log an error about missing key
|
|
128
|
+
assert "Key 'openai' not found" in caplog.text
|