gac 3.6.0__py3-none-any.whl → 3.10.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gac/__init__.py +4 -6
- gac/__version__.py +1 -1
- gac/ai_utils.py +59 -43
- gac/auth_cli.py +181 -36
- gac/cli.py +26 -9
- gac/commit_executor.py +59 -0
- gac/config.py +81 -2
- gac/config_cli.py +19 -7
- gac/constants/__init__.py +34 -0
- gac/constants/commit.py +63 -0
- gac/constants/defaults.py +40 -0
- gac/constants/file_patterns.py +110 -0
- gac/constants/languages.py +119 -0
- gac/diff_cli.py +0 -22
- gac/errors.py +8 -2
- gac/git.py +6 -6
- gac/git_state_validator.py +193 -0
- gac/grouped_commit_workflow.py +458 -0
- gac/init_cli.py +2 -1
- gac/interactive_mode.py +179 -0
- gac/language_cli.py +0 -1
- gac/main.py +231 -926
- gac/model_cli.py +67 -11
- gac/model_identifier.py +70 -0
- gac/oauth/__init__.py +26 -0
- gac/oauth/claude_code.py +89 -22
- gac/oauth/qwen_oauth.py +327 -0
- gac/oauth/token_store.py +81 -0
- gac/oauth_retry.py +161 -0
- gac/postprocess.py +155 -0
- gac/prompt.py +21 -479
- gac/prompt_builder.py +88 -0
- gac/providers/README.md +437 -0
- gac/providers/__init__.py +70 -78
- gac/providers/anthropic.py +12 -46
- gac/providers/azure_openai.py +48 -88
- gac/providers/base.py +329 -0
- gac/providers/cerebras.py +10 -33
- gac/providers/chutes.py +16 -62
- gac/providers/claude_code.py +64 -87
- gac/providers/custom_anthropic.py +51 -81
- gac/providers/custom_openai.py +29 -83
- gac/providers/deepseek.py +10 -33
- gac/providers/error_handler.py +139 -0
- gac/providers/fireworks.py +10 -33
- gac/providers/gemini.py +66 -63
- gac/providers/groq.py +10 -58
- gac/providers/kimi_coding.py +19 -55
- gac/providers/lmstudio.py +64 -43
- gac/providers/minimax.py +10 -33
- gac/providers/mistral.py +10 -33
- gac/providers/moonshot.py +10 -33
- gac/providers/ollama.py +56 -33
- gac/providers/openai.py +30 -36
- gac/providers/openrouter.py +15 -52
- gac/providers/protocol.py +71 -0
- gac/providers/qwen.py +64 -0
- gac/providers/registry.py +58 -0
- gac/providers/replicate.py +140 -82
- gac/providers/streamlake.py +26 -46
- gac/providers/synthetic.py +35 -37
- gac/providers/together.py +10 -33
- gac/providers/zai.py +29 -57
- gac/py.typed +0 -0
- gac/security.py +1 -1
- gac/templates/__init__.py +1 -0
- gac/templates/question_generation.txt +60 -0
- gac/templates/system_prompt.txt +224 -0
- gac/templates/user_prompt.txt +28 -0
- gac/utils.py +36 -6
- gac/workflow_context.py +162 -0
- gac/workflow_utils.py +3 -8
- {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/METADATA +6 -4
- gac-3.10.10.dist-info/RECORD +79 -0
- gac/constants.py +0 -321
- gac-3.6.0.dist-info/RECORD +0 -53
- {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/WHEEL +0 -0
- {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/entry_points.txt +0 -0
- {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/licenses/LICENSE +0 -0
gac/interactive_mode.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Interactive mode handling for gac."""
|
|
3
|
+
|
|
4
|
+
import logging
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
|
|
9
|
+
from gac.ai import generate_commit_message
|
|
10
|
+
from gac.config import GACConfig
|
|
11
|
+
from gac.git_state_validator import GitState
|
|
12
|
+
from gac.workflow_utils import (
|
|
13
|
+
collect_interactive_answers,
|
|
14
|
+
format_answers_for_prompt,
|
|
15
|
+
handle_confirmation_loop,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
console = Console()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class InteractiveMode:
|
|
23
|
+
"""Handles interactive question generation and user interaction flows."""
|
|
24
|
+
|
|
25
|
+
def __init__(self, config: GACConfig):
|
|
26
|
+
self.config = config
|
|
27
|
+
|
|
28
|
+
def generate_contextual_questions(
|
|
29
|
+
self,
|
|
30
|
+
model: str,
|
|
31
|
+
git_state: GitState,
|
|
32
|
+
hint: str,
|
|
33
|
+
temperature: float,
|
|
34
|
+
max_tokens: int,
|
|
35
|
+
max_retries: int,
|
|
36
|
+
quiet: bool = False,
|
|
37
|
+
) -> list[str]:
|
|
38
|
+
"""Generate contextual questions about staged changes."""
|
|
39
|
+
from gac.prompt import build_question_generation_prompt
|
|
40
|
+
|
|
41
|
+
status = git_state.status
|
|
42
|
+
diff = git_state.processed_diff
|
|
43
|
+
diff_stat = git_state.diff_stat
|
|
44
|
+
|
|
45
|
+
try:
|
|
46
|
+
# Build prompts for question generation
|
|
47
|
+
system_prompt, question_prompt = build_question_generation_prompt(
|
|
48
|
+
status=status,
|
|
49
|
+
processed_diff=diff,
|
|
50
|
+
diff_stat=diff_stat,
|
|
51
|
+
hint=hint,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Generate questions using existing infrastructure
|
|
55
|
+
logger.info("Generating contextual questions about staged changes...")
|
|
56
|
+
questions_text = generate_commit_message(
|
|
57
|
+
model=model,
|
|
58
|
+
prompt=(system_prompt, question_prompt),
|
|
59
|
+
temperature=temperature,
|
|
60
|
+
max_tokens=max_tokens,
|
|
61
|
+
max_retries=max_retries,
|
|
62
|
+
quiet=quiet,
|
|
63
|
+
skip_success_message=True, # Don't show "Generated commit message" for questions
|
|
64
|
+
task_description="contextual questions",
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Parse the response to extract individual questions
|
|
68
|
+
questions = self._parse_questions_from_response(questions_text)
|
|
69
|
+
|
|
70
|
+
logger.info(f"Generated {len(questions)} contextual questions")
|
|
71
|
+
return questions
|
|
72
|
+
|
|
73
|
+
except Exception as e:
|
|
74
|
+
logger.warning(f"Failed to generate contextual questions, proceeding without them: {e}")
|
|
75
|
+
if not quiet:
|
|
76
|
+
console.print("[yellow]⚠️ Could not generate contextual questions, proceeding normally[/yellow]\n")
|
|
77
|
+
return []
|
|
78
|
+
|
|
79
|
+
def _parse_questions_from_response(self, response: str) -> list[str]:
|
|
80
|
+
"""Parse the AI response to extract individual questions from a numbered list.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
response: The raw response from the AI model
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
A list of cleaned questions
|
|
87
|
+
"""
|
|
88
|
+
questions = []
|
|
89
|
+
lines = response.strip().split("\n")
|
|
90
|
+
|
|
91
|
+
for line in lines:
|
|
92
|
+
line = line.strip()
|
|
93
|
+
if not line:
|
|
94
|
+
continue
|
|
95
|
+
|
|
96
|
+
# Match numbered list format (e.g., "1. Question text?" or "1) Question text?")
|
|
97
|
+
match = re.match(r"^\d+\.\s+(.+)$", line)
|
|
98
|
+
if not match:
|
|
99
|
+
match = re.match(r"^\d+\)\s+(.+)$", line)
|
|
100
|
+
|
|
101
|
+
if match:
|
|
102
|
+
question = match.group(1).strip()
|
|
103
|
+
# Remove any leading symbols like •, -, *
|
|
104
|
+
question = re.sub(r"^[•\-*]\s+", "", question)
|
|
105
|
+
if question and question.endswith("?"):
|
|
106
|
+
questions.append(question)
|
|
107
|
+
elif line.endswith("?") and len(line) > 5: # Fallback for non-numbered questions
|
|
108
|
+
questions.append(line)
|
|
109
|
+
|
|
110
|
+
return questions
|
|
111
|
+
|
|
112
|
+
def handle_interactive_flow(
|
|
113
|
+
self,
|
|
114
|
+
model: str,
|
|
115
|
+
user_prompt: str,
|
|
116
|
+
git_state: GitState,
|
|
117
|
+
hint: str,
|
|
118
|
+
conversation_messages: list[dict[str, str]],
|
|
119
|
+
temperature: float,
|
|
120
|
+
max_tokens: int,
|
|
121
|
+
max_retries: int,
|
|
122
|
+
quiet: bool = False,
|
|
123
|
+
) -> None:
|
|
124
|
+
"""Handle the complete interactive flow for collecting user context."""
|
|
125
|
+
try:
|
|
126
|
+
questions = self.generate_contextual_questions(
|
|
127
|
+
model=model,
|
|
128
|
+
git_state=git_state,
|
|
129
|
+
hint=hint,
|
|
130
|
+
temperature=temperature,
|
|
131
|
+
max_tokens=max_tokens,
|
|
132
|
+
max_retries=max_retries,
|
|
133
|
+
quiet=quiet,
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
if not questions:
|
|
137
|
+
return
|
|
138
|
+
|
|
139
|
+
# Collect answers interactively
|
|
140
|
+
answers = collect_interactive_answers(questions)
|
|
141
|
+
|
|
142
|
+
if answers is None:
|
|
143
|
+
# User aborted interactive mode
|
|
144
|
+
if not quiet:
|
|
145
|
+
console.print("[yellow]Proceeding with commit without additional context[/yellow]\n")
|
|
146
|
+
elif answers:
|
|
147
|
+
# User provided some answers, format them for the prompt
|
|
148
|
+
answers_context = format_answers_for_prompt(answers)
|
|
149
|
+
enhanced_user_prompt = user_prompt + answers_context
|
|
150
|
+
|
|
151
|
+
# Update the conversation messages with the enhanced prompt
|
|
152
|
+
if conversation_messages and conversation_messages[-1]["role"] == "user":
|
|
153
|
+
conversation_messages[-1]["content"] = enhanced_user_prompt
|
|
154
|
+
|
|
155
|
+
logger.info(f"Collected answers for {len(answers)} questions")
|
|
156
|
+
else:
|
|
157
|
+
# User skipped all questions
|
|
158
|
+
if not quiet:
|
|
159
|
+
console.print("[dim]No answers provided, proceeding with original context[/dim]\n")
|
|
160
|
+
|
|
161
|
+
except Exception as e:
|
|
162
|
+
logger.warning(f"Failed to generate contextual questions, proceeding without them: {e}")
|
|
163
|
+
if not quiet:
|
|
164
|
+
console.print("[yellow]⚠️ Could not generate contextual questions, proceeding normally[/yellow]\n")
|
|
165
|
+
|
|
166
|
+
def handle_single_commit_confirmation(
|
|
167
|
+
self,
|
|
168
|
+
model: str,
|
|
169
|
+
commit_message: str,
|
|
170
|
+
conversation_messages: list[dict[str, str]],
|
|
171
|
+
quiet: bool = False,
|
|
172
|
+
) -> tuple[str, str]:
|
|
173
|
+
"""Handle confirmation loop for single commit. Returns (final_message, decision).
|
|
174
|
+
|
|
175
|
+
Decision is one of: "yes", "no", "regenerate"
|
|
176
|
+
"""
|
|
177
|
+
decision, final_message, _ = handle_confirmation_loop(commit_message, conversation_messages, quiet, model)
|
|
178
|
+
|
|
179
|
+
return final_message, decision
|
gac/language_cli.py
CHANGED
|
@@ -256,7 +256,6 @@ def center_text(text: str, width: int = 80) -> str:
|
|
|
256
256
|
Returns:
|
|
257
257
|
Centered text with proper padding
|
|
258
258
|
"""
|
|
259
|
-
import unicodedata
|
|
260
259
|
|
|
261
260
|
def get_display_width(s: str) -> int:
|
|
262
261
|
"""Get the display width of a string, accounting for wide characters."""
|