gac 1.4.1__tar.gz → 1.5.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gac might be problematic. Click here for more details.
- {gac-1.4.1 → gac-1.5.0}/PKG-INFO +2 -2
- {gac-1.4.1 → gac-1.5.0}/README.md +1 -1
- {gac-1.4.1 → gac-1.5.0}/src/gac/__version__.py +1 -1
- {gac-1.4.1 → gac-1.5.0}/src/gac/ai.py +4 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/ai_utils.py +12 -1
- {gac-1.4.1 → gac-1.5.0}/src/gac/init_cli.py +2 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/prompt.py +1 -6
- {gac-1.4.1 → gac-1.5.0}/src/gac/providers/__init__.py +4 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/providers/anthropic.py +6 -1
- {gac-1.4.1 → gac-1.5.0}/src/gac/providers/cerebras.py +6 -1
- gac-1.5.0/src/gac/providers/gemini.py +49 -0
- gac-1.5.0/src/gac/providers/lmstudio.py +55 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/providers/ollama.py +15 -4
- {gac-1.4.1 → gac-1.5.0}/src/gac/providers/openai.py +6 -1
- {gac-1.4.1 → gac-1.5.0}/src/gac/providers/openrouter.py +6 -1
- {gac-1.4.1 → gac-1.5.0}/.gitignore +0 -0
- {gac-1.4.1 → gac-1.5.0}/LICENSE +0 -0
- {gac-1.4.1 → gac-1.5.0}/pyproject.toml +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/__init__.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/cli.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/config.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/config_cli.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/constants.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/diff_cli.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/errors.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/git.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/main.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/preprocess.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/providers/groq.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/providers/zai.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/security.py +0 -0
- {gac-1.4.1 → gac-1.5.0}/src/gac/utils.py +0 -0
{gac-1.4.1 → gac-1.5.0}/PKG-INFO
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: gac
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.5.0
|
|
4
4
|
Summary: AI-powered Git commit message generator with multi-provider support
|
|
5
5
|
Project-URL: Homepage, https://github.com/cellwebb/gac
|
|
6
6
|
Project-URL: Documentation, https://github.com/cellwebb/gac#readme
|
|
@@ -56,7 +56,7 @@ Description-Content-Type: text/markdown
|
|
|
56
56
|
|
|
57
57
|
- **LLM-Powered Commit Messages:** Automatically generates clear, concise, and context-aware commit messages using large language models.
|
|
58
58
|
- **Deep Contextual Analysis:** Understands your code by analyzing staged changes, repository structure, and recent commit history to provide highly relevant suggestions.
|
|
59
|
-
- **Multi-Provider & Model Support:** Flexibly works with
|
|
59
|
+
- **Multi-Provider & Model Support:** Flexibly works with leading AI providers (Anthropic, Cerebras, Groq, Ollama, OpenAI, OpenRouter, Z.AI) and models, easily configured through an interactive setup or environment variables.
|
|
60
60
|
- **Seamless Git Workflow:** Integrates smoothly into your existing Git routine as a simple drop-in replacement for `git commit`.
|
|
61
61
|
- **Extensive Customization:** Tailor commit messages to your needs with a rich set of flags, including one-liners (`-o`), AI hints (`-h`), scope inference (`-s`), and specific model selection (`-m`).
|
|
62
62
|
- **Streamlined Workflow Commands:** Boost your productivity with convenient options to stage all changes (`-a`), auto-confirm commits (`-y`), and push to your remote repository (`-p`) in a single step.
|
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
- **LLM-Powered Commit Messages:** Automatically generates clear, concise, and context-aware commit messages using large language models.
|
|
16
16
|
- **Deep Contextual Analysis:** Understands your code by analyzing staged changes, repository structure, and recent commit history to provide highly relevant suggestions.
|
|
17
|
-
- **Multi-Provider & Model Support:** Flexibly works with
|
|
17
|
+
- **Multi-Provider & Model Support:** Flexibly works with leading AI providers (Anthropic, Cerebras, Groq, Ollama, OpenAI, OpenRouter, Z.AI) and models, easily configured through an interactive setup or environment variables.
|
|
18
18
|
- **Seamless Git Workflow:** Integrates smoothly into your existing Git routine as a simple drop-in replacement for `git commit`.
|
|
19
19
|
- **Extensive Customization:** Tailor commit messages to your needs with a rich set of flags, including one-liners (`-o`), AI hints (`-h`), scope inference (`-s`), and specific model selection (`-m`).
|
|
20
20
|
- **Streamlined Workflow Commands:** Boost your productivity with convenient options to stage all changes (`-a`), auto-confirm commits (`-y`), and push to your remote repository (`-p`) in a single step.
|
|
@@ -12,7 +12,9 @@ from gac.errors import AIError
|
|
|
12
12
|
from gac.providers import (
|
|
13
13
|
call_anthropic_api,
|
|
14
14
|
call_cerebras_api,
|
|
15
|
+
call_gemini_api,
|
|
15
16
|
call_groq_api,
|
|
17
|
+
call_lmstudio_api,
|
|
16
18
|
call_ollama_api,
|
|
17
19
|
call_openai_api,
|
|
18
20
|
call_openrouter_api,
|
|
@@ -71,6 +73,8 @@ def generate_commit_message(
|
|
|
71
73
|
"openrouter": call_openrouter_api,
|
|
72
74
|
"zai": call_zai_api,
|
|
73
75
|
"zai-coding": call_zai_coding_api,
|
|
76
|
+
"gemini": call_gemini_api,
|
|
77
|
+
"lmstudio": call_lmstudio_api,
|
|
74
78
|
}
|
|
75
79
|
|
|
76
80
|
# Generate the commit message using centralized retry logic
|
|
@@ -93,7 +93,18 @@ def generate_with_retries(
|
|
|
93
93
|
provider, model_name = model.split(":", 1)
|
|
94
94
|
|
|
95
95
|
# Validate provider
|
|
96
|
-
supported_providers = [
|
|
96
|
+
supported_providers = [
|
|
97
|
+
"anthropic",
|
|
98
|
+
"cerebras",
|
|
99
|
+
"gemini",
|
|
100
|
+
"groq",
|
|
101
|
+
"lmstudio",
|
|
102
|
+
"ollama",
|
|
103
|
+
"openai",
|
|
104
|
+
"openrouter",
|
|
105
|
+
"zai",
|
|
106
|
+
"zai-coding",
|
|
107
|
+
]
|
|
97
108
|
if provider not in supported_providers:
|
|
98
109
|
raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {supported_providers}")
|
|
99
110
|
|
|
@@ -22,7 +22,9 @@ def init() -> None:
|
|
|
22
22
|
providers = [
|
|
23
23
|
("Anthropic", "claude-3-5-haiku-latest"),
|
|
24
24
|
("Cerebras", "qwen-3-coder-480b"),
|
|
25
|
+
("Gemini", "gemini-2.5-flash"),
|
|
25
26
|
("Groq", "meta-llama/llama-4-maverick-17b-128e-instruct"),
|
|
27
|
+
("LM Studio", "deepseek-r1-distill-qwen-7b"),
|
|
26
28
|
("Ollama", "gemma3"),
|
|
27
29
|
("OpenAI", "gpt-4.1-mini"),
|
|
28
30
|
("OpenRouter", "openrouter/auto"),
|
|
@@ -235,12 +235,7 @@ def build_prompt(
|
|
|
235
235
|
except Exception as e:
|
|
236
236
|
logger.error(f"Error processing scope parameter: {e}")
|
|
237
237
|
# Fallback to no scope if there's an error
|
|
238
|
-
template = re.sub(
|
|
239
|
-
r"<conventions_scope_provided>.*?</conventions_scope_provided>\n", "", template, flags=re.DOTALL
|
|
240
|
-
)
|
|
241
|
-
template = re.sub(
|
|
242
|
-
r"<conventions_scope_inferred>.*?</conventions_scope_inferred>\n", "", template, flags=re.DOTALL
|
|
243
|
-
)
|
|
238
|
+
template = re.sub(r"<conventions_with_scope>.*?</conventions_with_scope>\n", "", template, flags=re.DOTALL)
|
|
244
239
|
template = template.replace("<conventions_no_scope>", "<conventions>")
|
|
245
240
|
template = template.replace("</conventions_no_scope>", "</conventions>")
|
|
246
241
|
|
|
@@ -2,7 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
from .anthropic import call_anthropic_api
|
|
4
4
|
from .cerebras import call_cerebras_api
|
|
5
|
+
from .gemini import call_gemini_api
|
|
5
6
|
from .groq import call_groq_api
|
|
7
|
+
from .lmstudio import call_lmstudio_api
|
|
6
8
|
from .ollama import call_ollama_api
|
|
7
9
|
from .openai import call_openai_api
|
|
8
10
|
from .openrouter import call_openrouter_api
|
|
@@ -11,7 +13,9 @@ from .zai import call_zai_api, call_zai_coding_api
|
|
|
11
13
|
__all__ = [
|
|
12
14
|
"call_anthropic_api",
|
|
13
15
|
"call_cerebras_api",
|
|
16
|
+
"call_gemini_api",
|
|
14
17
|
"call_groq_api",
|
|
18
|
+
"call_lmstudio_api",
|
|
15
19
|
"call_ollama_api",
|
|
16
20
|
"call_openai_api",
|
|
17
21
|
"call_openrouter_api",
|
|
@@ -35,7 +35,12 @@ def call_anthropic_api(model: str, messages: list[dict], temperature: float, max
|
|
|
35
35
|
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
36
36
|
response.raise_for_status()
|
|
37
37
|
response_data = response.json()
|
|
38
|
-
|
|
38
|
+
content = response_data["content"][0]["text"]
|
|
39
|
+
if content is None:
|
|
40
|
+
raise AIError.model_error("Anthropic API returned null content")
|
|
41
|
+
if content == "":
|
|
42
|
+
raise AIError.model_error("Anthropic API returned empty content")
|
|
43
|
+
return content
|
|
39
44
|
except httpx.HTTPStatusError as e:
|
|
40
45
|
raise AIError.model_error(f"Anthropic API error: {e.response.status_code} - {e.response.text}") from e
|
|
41
46
|
except Exception as e:
|
|
@@ -22,7 +22,12 @@ def call_cerebras_api(model: str, messages: list[dict], temperature: float, max_
|
|
|
22
22
|
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
23
|
response.raise_for_status()
|
|
24
24
|
response_data = response.json()
|
|
25
|
-
|
|
25
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
26
|
+
if content is None:
|
|
27
|
+
raise AIError.model_error("Cerebras API returned null content")
|
|
28
|
+
if content == "":
|
|
29
|
+
raise AIError.model_error("Cerebras API returned empty content")
|
|
30
|
+
return content
|
|
26
31
|
except httpx.HTTPStatusError as e:
|
|
27
32
|
raise AIError.model_error(f"Cerebras API error: {e.response.status_code} - {e.response.text}") from e
|
|
28
33
|
except Exception as e:
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"""Gemini AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from gac.errors import AIError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def call_gemini_api(model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int) -> str:
|
|
12
|
+
"""Call Gemini API directly."""
|
|
13
|
+
api_key = os.getenv("GEMINI_API_KEY")
|
|
14
|
+
if not api_key:
|
|
15
|
+
raise AIError.model_error("GEMINI_API_KEY not found in environment variables")
|
|
16
|
+
|
|
17
|
+
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
|
|
18
|
+
|
|
19
|
+
system_content = ""
|
|
20
|
+
user_content = ""
|
|
21
|
+
|
|
22
|
+
for msg in messages:
|
|
23
|
+
if msg["role"] == "system":
|
|
24
|
+
system_content = msg["content"]
|
|
25
|
+
elif msg["role"] == "user":
|
|
26
|
+
user_content = msg["content"]
|
|
27
|
+
|
|
28
|
+
payload: dict[str, Any] = {
|
|
29
|
+
"contents": [{"role": "user", "parts": [{"text": user_content}]}],
|
|
30
|
+
"generationConfig": {"temperature": temperature, "maxOutputTokens": max_tokens},
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
if system_content:
|
|
34
|
+
payload["systemInstruction"] = {"parts": [{"text": system_content}]}
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
response = httpx.post(url, params={"key": api_key}, json=payload, timeout=120)
|
|
38
|
+
response.raise_for_status()
|
|
39
|
+
response_data = response.json()
|
|
40
|
+
content = response_data["candidates"][0]["content"]["parts"][0]["text"]
|
|
41
|
+
if content is None or content == "":
|
|
42
|
+
raise AIError.model_error("Gemini API response missing text content")
|
|
43
|
+
return content
|
|
44
|
+
except AIError:
|
|
45
|
+
raise
|
|
46
|
+
except httpx.HTTPStatusError as e:
|
|
47
|
+
raise AIError.model_error(f"Gemini API error: {e.response.status_code} - {e.response.text}") from e
|
|
48
|
+
except Exception as e:
|
|
49
|
+
raise AIError.model_error(f"Error calling Gemini API: {str(e)}") from e
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
"""LM Studio AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from gac.errors import AIError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def call_lmstudio_api(model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int) -> str:
|
|
12
|
+
"""Call LM Studio's OpenAI-compatible API."""
|
|
13
|
+
api_url = os.getenv("LMSTUDIO_API_URL", "http://localhost:1234")
|
|
14
|
+
api_url = api_url.rstrip("/")
|
|
15
|
+
|
|
16
|
+
url = f"{api_url}/v1/chat/completions"
|
|
17
|
+
|
|
18
|
+
headers = {"Content-Type": "application/json"}
|
|
19
|
+
api_key = os.getenv("LMSTUDIO_API_KEY")
|
|
20
|
+
if api_key:
|
|
21
|
+
headers["Authorization"] = f"Bearer {api_key}"
|
|
22
|
+
|
|
23
|
+
payload: dict[str, Any] = {
|
|
24
|
+
"model": model,
|
|
25
|
+
"messages": messages,
|
|
26
|
+
"temperature": temperature,
|
|
27
|
+
"max_tokens": max_tokens,
|
|
28
|
+
"stream": False,
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
response = httpx.post(url, headers=headers, json=payload, timeout=120)
|
|
33
|
+
response.raise_for_status()
|
|
34
|
+
response_data = response.json()
|
|
35
|
+
choices = response_data.get("choices") or []
|
|
36
|
+
if not choices:
|
|
37
|
+
raise AIError.model_error("LM Studio API response missing choices")
|
|
38
|
+
|
|
39
|
+
message = choices[0].get("message") or {}
|
|
40
|
+
content = message.get("content")
|
|
41
|
+
if content:
|
|
42
|
+
return content
|
|
43
|
+
|
|
44
|
+
# Some OpenAI-compatible servers return text field directly
|
|
45
|
+
content = choices[0].get("text")
|
|
46
|
+
if content:
|
|
47
|
+
return content
|
|
48
|
+
|
|
49
|
+
raise AIError.model_error("LM Studio API response missing content")
|
|
50
|
+
except httpx.ConnectError as e:
|
|
51
|
+
raise AIError.connection_error(f"LM Studio connection failed: {str(e)}") from e
|
|
52
|
+
except httpx.HTTPStatusError as e:
|
|
53
|
+
raise AIError.model_error(f"LM Studio API error: {e.response.status_code} - {e.response.text}") from e
|
|
54
|
+
except Exception as e:
|
|
55
|
+
raise AIError.model_error(f"Error calling LM Studio API: {str(e)}") from e
|
|
@@ -10,23 +10,34 @@ from gac.errors import AIError
|
|
|
10
10
|
def call_ollama_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
11
|
"""Call Ollama API directly."""
|
|
12
12
|
api_url = os.getenv("OLLAMA_API_URL", "http://localhost:11434")
|
|
13
|
+
api_key = os.getenv("OLLAMA_API_KEY")
|
|
13
14
|
|
|
14
15
|
url = f"{api_url.rstrip('/')}/api/chat"
|
|
15
16
|
data = {"model": model, "messages": messages, "temperature": temperature, "stream": False}
|
|
17
|
+
headers = {"Content-Type": "application/json"}
|
|
18
|
+
if api_key:
|
|
19
|
+
headers["Authorization"] = f"Bearer {api_key}"
|
|
16
20
|
|
|
17
21
|
try:
|
|
18
|
-
response = httpx.post(url, json=data, timeout=120)
|
|
22
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
19
23
|
response.raise_for_status()
|
|
20
24
|
response_data = response.json()
|
|
21
25
|
|
|
26
|
+
content = None
|
|
22
27
|
# Handle different response formats from Ollama
|
|
23
28
|
if "message" in response_data and "content" in response_data["message"]:
|
|
24
|
-
|
|
29
|
+
content = response_data["message"]["content"]
|
|
25
30
|
elif "response" in response_data:
|
|
26
|
-
|
|
31
|
+
content = response_data["response"]
|
|
27
32
|
else:
|
|
28
33
|
# Fallback: return the full response as string
|
|
29
|
-
|
|
34
|
+
content = str(response_data)
|
|
35
|
+
|
|
36
|
+
if content is None:
|
|
37
|
+
raise AIError.model_error("Ollama API returned null content")
|
|
38
|
+
if content == "":
|
|
39
|
+
raise AIError.model_error("Ollama API returned empty content")
|
|
40
|
+
return content
|
|
30
41
|
except httpx.ConnectError as e:
|
|
31
42
|
raise AIError.connection_error(f"Ollama connection failed. Make sure Ollama is running: {str(e)}") from e
|
|
32
43
|
except httpx.HTTPStatusError as e:
|
|
@@ -22,7 +22,12 @@ def call_openai_api(model: str, messages: list[dict], temperature: float, max_to
|
|
|
22
22
|
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
23
|
response.raise_for_status()
|
|
24
24
|
response_data = response.json()
|
|
25
|
-
|
|
25
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
26
|
+
if content is None:
|
|
27
|
+
raise AIError.model_error("OpenAI API returned null content")
|
|
28
|
+
if content == "":
|
|
29
|
+
raise AIError.model_error("OpenAI API returned empty content")
|
|
30
|
+
return content
|
|
26
31
|
except httpx.HTTPStatusError as e:
|
|
27
32
|
raise AIError.model_error(f"OpenAI API error: {e.response.status_code} - {e.response.text}") from e
|
|
28
33
|
except Exception as e:
|
|
@@ -30,7 +30,12 @@ def call_openrouter_api(model: str, messages: list[dict], temperature: float, ma
|
|
|
30
30
|
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
31
31
|
response.raise_for_status()
|
|
32
32
|
response_data = response.json()
|
|
33
|
-
|
|
33
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
34
|
+
if content is None:
|
|
35
|
+
raise AIError.model_error("OpenRouter API returned null content")
|
|
36
|
+
if content == "":
|
|
37
|
+
raise AIError.model_error("OpenRouter API returned empty content")
|
|
38
|
+
return content
|
|
34
39
|
except httpx.HTTPStatusError as e:
|
|
35
40
|
# Handle specific HTTP status codes
|
|
36
41
|
status_code = e.response.status_code
|
|
File without changes
|
{gac-1.4.1 → gac-1.5.0}/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|