gac 1.4.2__tar.gz → 1.5.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gac might be problematic. Click here for more details.
- {gac-1.4.2 → gac-1.5.1}/PKG-INFO +1 -1
- {gac-1.4.2 → gac-1.5.1}/src/gac/__version__.py +1 -1
- {gac-1.4.2 → gac-1.5.1}/src/gac/ai.py +4 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/ai_utils.py +12 -1
- {gac-1.4.2 → gac-1.5.1}/src/gac/init_cli.py +2 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/providers/__init__.py +4 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/providers/anthropic.py +6 -1
- {gac-1.4.2 → gac-1.5.1}/src/gac/providers/cerebras.py +6 -1
- gac-1.5.1/src/gac/providers/gemini.py +70 -0
- gac-1.5.1/src/gac/providers/lmstudio.py +55 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/providers/ollama.py +15 -4
- {gac-1.4.2 → gac-1.5.1}/src/gac/providers/openai.py +6 -1
- {gac-1.4.2 → gac-1.5.1}/src/gac/providers/openrouter.py +6 -1
- {gac-1.4.2 → gac-1.5.1}/.gitignore +0 -0
- {gac-1.4.2 → gac-1.5.1}/LICENSE +0 -0
- {gac-1.4.2 → gac-1.5.1}/README.md +0 -0
- {gac-1.4.2 → gac-1.5.1}/pyproject.toml +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/__init__.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/cli.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/config.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/config_cli.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/constants.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/diff_cli.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/errors.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/git.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/main.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/preprocess.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/prompt.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/providers/groq.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/providers/zai.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/security.py +0 -0
- {gac-1.4.2 → gac-1.5.1}/src/gac/utils.py +0 -0
{gac-1.4.2 → gac-1.5.1}/PKG-INFO
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: gac
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.5.1
|
|
4
4
|
Summary: AI-powered Git commit message generator with multi-provider support
|
|
5
5
|
Project-URL: Homepage, https://github.com/cellwebb/gac
|
|
6
6
|
Project-URL: Documentation, https://github.com/cellwebb/gac#readme
|
|
@@ -12,7 +12,9 @@ from gac.errors import AIError
|
|
|
12
12
|
from gac.providers import (
|
|
13
13
|
call_anthropic_api,
|
|
14
14
|
call_cerebras_api,
|
|
15
|
+
call_gemini_api,
|
|
15
16
|
call_groq_api,
|
|
17
|
+
call_lmstudio_api,
|
|
16
18
|
call_ollama_api,
|
|
17
19
|
call_openai_api,
|
|
18
20
|
call_openrouter_api,
|
|
@@ -71,6 +73,8 @@ def generate_commit_message(
|
|
|
71
73
|
"openrouter": call_openrouter_api,
|
|
72
74
|
"zai": call_zai_api,
|
|
73
75
|
"zai-coding": call_zai_coding_api,
|
|
76
|
+
"gemini": call_gemini_api,
|
|
77
|
+
"lmstudio": call_lmstudio_api,
|
|
74
78
|
}
|
|
75
79
|
|
|
76
80
|
# Generate the commit message using centralized retry logic
|
|
@@ -93,7 +93,18 @@ def generate_with_retries(
|
|
|
93
93
|
provider, model_name = model.split(":", 1)
|
|
94
94
|
|
|
95
95
|
# Validate provider
|
|
96
|
-
supported_providers = [
|
|
96
|
+
supported_providers = [
|
|
97
|
+
"anthropic",
|
|
98
|
+
"cerebras",
|
|
99
|
+
"gemini",
|
|
100
|
+
"groq",
|
|
101
|
+
"lmstudio",
|
|
102
|
+
"ollama",
|
|
103
|
+
"openai",
|
|
104
|
+
"openrouter",
|
|
105
|
+
"zai",
|
|
106
|
+
"zai-coding",
|
|
107
|
+
]
|
|
97
108
|
if provider not in supported_providers:
|
|
98
109
|
raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {supported_providers}")
|
|
99
110
|
|
|
@@ -22,7 +22,9 @@ def init() -> None:
|
|
|
22
22
|
providers = [
|
|
23
23
|
("Anthropic", "claude-3-5-haiku-latest"),
|
|
24
24
|
("Cerebras", "qwen-3-coder-480b"),
|
|
25
|
+
("Gemini", "gemini-2.5-flash"),
|
|
25
26
|
("Groq", "meta-llama/llama-4-maverick-17b-128e-instruct"),
|
|
27
|
+
("LM Studio", "deepseek-r1-distill-qwen-7b"),
|
|
26
28
|
("Ollama", "gemma3"),
|
|
27
29
|
("OpenAI", "gpt-4.1-mini"),
|
|
28
30
|
("OpenRouter", "openrouter/auto"),
|
|
@@ -2,7 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
from .anthropic import call_anthropic_api
|
|
4
4
|
from .cerebras import call_cerebras_api
|
|
5
|
+
from .gemini import call_gemini_api
|
|
5
6
|
from .groq import call_groq_api
|
|
7
|
+
from .lmstudio import call_lmstudio_api
|
|
6
8
|
from .ollama import call_ollama_api
|
|
7
9
|
from .openai import call_openai_api
|
|
8
10
|
from .openrouter import call_openrouter_api
|
|
@@ -11,7 +13,9 @@ from .zai import call_zai_api, call_zai_coding_api
|
|
|
11
13
|
__all__ = [
|
|
12
14
|
"call_anthropic_api",
|
|
13
15
|
"call_cerebras_api",
|
|
16
|
+
"call_gemini_api",
|
|
14
17
|
"call_groq_api",
|
|
18
|
+
"call_lmstudio_api",
|
|
15
19
|
"call_ollama_api",
|
|
16
20
|
"call_openai_api",
|
|
17
21
|
"call_openrouter_api",
|
|
@@ -35,7 +35,12 @@ def call_anthropic_api(model: str, messages: list[dict], temperature: float, max
|
|
|
35
35
|
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
36
36
|
response.raise_for_status()
|
|
37
37
|
response_data = response.json()
|
|
38
|
-
|
|
38
|
+
content = response_data["content"][0]["text"]
|
|
39
|
+
if content is None:
|
|
40
|
+
raise AIError.model_error("Anthropic API returned null content")
|
|
41
|
+
if content == "":
|
|
42
|
+
raise AIError.model_error("Anthropic API returned empty content")
|
|
43
|
+
return content
|
|
39
44
|
except httpx.HTTPStatusError as e:
|
|
40
45
|
raise AIError.model_error(f"Anthropic API error: {e.response.status_code} - {e.response.text}") from e
|
|
41
46
|
except Exception as e:
|
|
@@ -22,7 +22,12 @@ def call_cerebras_api(model: str, messages: list[dict], temperature: float, max_
|
|
|
22
22
|
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
23
|
response.raise_for_status()
|
|
24
24
|
response_data = response.json()
|
|
25
|
-
|
|
25
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
26
|
+
if content is None:
|
|
27
|
+
raise AIError.model_error("Cerebras API returned null content")
|
|
28
|
+
if content == "":
|
|
29
|
+
raise AIError.model_error("Cerebras API returned empty content")
|
|
30
|
+
return content
|
|
26
31
|
except httpx.HTTPStatusError as e:
|
|
27
32
|
raise AIError.model_error(f"Cerebras API error: {e.response.status_code} - {e.response.text}") from e
|
|
28
33
|
except Exception as e:
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""Gemini AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from gac.errors import AIError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def call_gemini_api(model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int) -> str:
|
|
12
|
+
"""Call Gemini API directly."""
|
|
13
|
+
api_key = os.getenv("GEMINI_API_KEY")
|
|
14
|
+
if not api_key:
|
|
15
|
+
raise AIError.model_error("GEMINI_API_KEY not found in environment variables")
|
|
16
|
+
|
|
17
|
+
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
|
|
18
|
+
|
|
19
|
+
# Build contents array following 2025 Gemini API format
|
|
20
|
+
contents = []
|
|
21
|
+
|
|
22
|
+
# Add system instruction as first content with role "system" (2025 format)
|
|
23
|
+
for msg in messages:
|
|
24
|
+
if msg["role"] == "system":
|
|
25
|
+
contents.append({"role": "system", "parts": [{"text": msg["content"]}]})
|
|
26
|
+
break
|
|
27
|
+
|
|
28
|
+
# Add user and assistant messages
|
|
29
|
+
for msg in messages:
|
|
30
|
+
if msg["role"] == "user":
|
|
31
|
+
contents.append({"role": "user", "parts": [{"text": msg["content"]}]})
|
|
32
|
+
elif msg["role"] == "assistant":
|
|
33
|
+
contents.append(
|
|
34
|
+
{
|
|
35
|
+
"role": "model", # Gemini uses "model" instead of "assistant"
|
|
36
|
+
"parts": [{"text": msg["content"]}],
|
|
37
|
+
}
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
payload: dict[str, Any] = {
|
|
41
|
+
"contents": contents,
|
|
42
|
+
"generationConfig": {"temperature": temperature, "maxOutputTokens": max_tokens},
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
headers = {"x-goog-api-key": api_key, "Content-Type": "application/json"}
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
response = httpx.post(url, headers=headers, json=payload, timeout=120)
|
|
49
|
+
response.raise_for_status()
|
|
50
|
+
response_data = response.json()
|
|
51
|
+
|
|
52
|
+
# Check for candidates and proper response structure
|
|
53
|
+
if not response_data.get("candidates"):
|
|
54
|
+
raise AIError.model_error("Gemini API response missing candidates")
|
|
55
|
+
|
|
56
|
+
candidate = response_data["candidates"][0]
|
|
57
|
+
if "content" not in candidate or "parts" not in candidate["content"] or not candidate["content"]["parts"]:
|
|
58
|
+
raise AIError.model_error("Gemini API response has invalid content structure")
|
|
59
|
+
|
|
60
|
+
content = candidate["content"]["parts"][0].get("text")
|
|
61
|
+
if content is None or content == "":
|
|
62
|
+
raise AIError.model_error("Gemini API response missing text content")
|
|
63
|
+
|
|
64
|
+
return content
|
|
65
|
+
except AIError:
|
|
66
|
+
raise
|
|
67
|
+
except httpx.HTTPStatusError as e:
|
|
68
|
+
raise AIError.model_error(f"Gemini API error: {e.response.status_code} - {e.response.text}") from e
|
|
69
|
+
except Exception as e:
|
|
70
|
+
raise AIError.model_error(f"Error calling Gemini API: {str(e)}") from e
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
"""LM Studio AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from gac.errors import AIError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def call_lmstudio_api(model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int) -> str:
|
|
12
|
+
"""Call LM Studio's OpenAI-compatible API."""
|
|
13
|
+
api_url = os.getenv("LMSTUDIO_API_URL", "http://localhost:1234")
|
|
14
|
+
api_url = api_url.rstrip("/")
|
|
15
|
+
|
|
16
|
+
url = f"{api_url}/v1/chat/completions"
|
|
17
|
+
|
|
18
|
+
headers = {"Content-Type": "application/json"}
|
|
19
|
+
api_key = os.getenv("LMSTUDIO_API_KEY")
|
|
20
|
+
if api_key:
|
|
21
|
+
headers["Authorization"] = f"Bearer {api_key}"
|
|
22
|
+
|
|
23
|
+
payload: dict[str, Any] = {
|
|
24
|
+
"model": model,
|
|
25
|
+
"messages": messages,
|
|
26
|
+
"temperature": temperature,
|
|
27
|
+
"max_tokens": max_tokens,
|
|
28
|
+
"stream": False,
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
response = httpx.post(url, headers=headers, json=payload, timeout=120)
|
|
33
|
+
response.raise_for_status()
|
|
34
|
+
response_data = response.json()
|
|
35
|
+
choices = response_data.get("choices") or []
|
|
36
|
+
if not choices:
|
|
37
|
+
raise AIError.model_error("LM Studio API response missing choices")
|
|
38
|
+
|
|
39
|
+
message = choices[0].get("message") or {}
|
|
40
|
+
content = message.get("content")
|
|
41
|
+
if content:
|
|
42
|
+
return content
|
|
43
|
+
|
|
44
|
+
# Some OpenAI-compatible servers return text field directly
|
|
45
|
+
content = choices[0].get("text")
|
|
46
|
+
if content:
|
|
47
|
+
return content
|
|
48
|
+
|
|
49
|
+
raise AIError.model_error("LM Studio API response missing content")
|
|
50
|
+
except httpx.ConnectError as e:
|
|
51
|
+
raise AIError.connection_error(f"LM Studio connection failed: {str(e)}") from e
|
|
52
|
+
except httpx.HTTPStatusError as e:
|
|
53
|
+
raise AIError.model_error(f"LM Studio API error: {e.response.status_code} - {e.response.text}") from e
|
|
54
|
+
except Exception as e:
|
|
55
|
+
raise AIError.model_error(f"Error calling LM Studio API: {str(e)}") from e
|
|
@@ -10,23 +10,34 @@ from gac.errors import AIError
|
|
|
10
10
|
def call_ollama_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
11
|
"""Call Ollama API directly."""
|
|
12
12
|
api_url = os.getenv("OLLAMA_API_URL", "http://localhost:11434")
|
|
13
|
+
api_key = os.getenv("OLLAMA_API_KEY")
|
|
13
14
|
|
|
14
15
|
url = f"{api_url.rstrip('/')}/api/chat"
|
|
15
16
|
data = {"model": model, "messages": messages, "temperature": temperature, "stream": False}
|
|
17
|
+
headers = {"Content-Type": "application/json"}
|
|
18
|
+
if api_key:
|
|
19
|
+
headers["Authorization"] = f"Bearer {api_key}"
|
|
16
20
|
|
|
17
21
|
try:
|
|
18
|
-
response = httpx.post(url, json=data, timeout=120)
|
|
22
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
19
23
|
response.raise_for_status()
|
|
20
24
|
response_data = response.json()
|
|
21
25
|
|
|
26
|
+
content = None
|
|
22
27
|
# Handle different response formats from Ollama
|
|
23
28
|
if "message" in response_data and "content" in response_data["message"]:
|
|
24
|
-
|
|
29
|
+
content = response_data["message"]["content"]
|
|
25
30
|
elif "response" in response_data:
|
|
26
|
-
|
|
31
|
+
content = response_data["response"]
|
|
27
32
|
else:
|
|
28
33
|
# Fallback: return the full response as string
|
|
29
|
-
|
|
34
|
+
content = str(response_data)
|
|
35
|
+
|
|
36
|
+
if content is None:
|
|
37
|
+
raise AIError.model_error("Ollama API returned null content")
|
|
38
|
+
if content == "":
|
|
39
|
+
raise AIError.model_error("Ollama API returned empty content")
|
|
40
|
+
return content
|
|
30
41
|
except httpx.ConnectError as e:
|
|
31
42
|
raise AIError.connection_error(f"Ollama connection failed. Make sure Ollama is running: {str(e)}") from e
|
|
32
43
|
except httpx.HTTPStatusError as e:
|
|
@@ -22,7 +22,12 @@ def call_openai_api(model: str, messages: list[dict], temperature: float, max_to
|
|
|
22
22
|
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
23
|
response.raise_for_status()
|
|
24
24
|
response_data = response.json()
|
|
25
|
-
|
|
25
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
26
|
+
if content is None:
|
|
27
|
+
raise AIError.model_error("OpenAI API returned null content")
|
|
28
|
+
if content == "":
|
|
29
|
+
raise AIError.model_error("OpenAI API returned empty content")
|
|
30
|
+
return content
|
|
26
31
|
except httpx.HTTPStatusError as e:
|
|
27
32
|
raise AIError.model_error(f"OpenAI API error: {e.response.status_code} - {e.response.text}") from e
|
|
28
33
|
except Exception as e:
|
|
@@ -30,7 +30,12 @@ def call_openrouter_api(model: str, messages: list[dict], temperature: float, ma
|
|
|
30
30
|
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
31
31
|
response.raise_for_status()
|
|
32
32
|
response_data = response.json()
|
|
33
|
-
|
|
33
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
34
|
+
if content is None:
|
|
35
|
+
raise AIError.model_error("OpenRouter API returned null content")
|
|
36
|
+
if content == "":
|
|
37
|
+
raise AIError.model_error("OpenRouter API returned empty content")
|
|
38
|
+
return content
|
|
34
39
|
except httpx.HTTPStatusError as e:
|
|
35
40
|
# Handle specific HTTP status codes
|
|
36
41
|
status_code = e.response.status_code
|
|
File without changes
|
{gac-1.4.2 → gac-1.5.1}/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|