gac 3.8.1__py3-none-any.whl → 3.10.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gac/__init__.py +4 -6
- gac/__version__.py +1 -1
- gac/ai_utils.py +18 -49
- gac/cli.py +14 -10
- gac/commit_executor.py +59 -0
- gac/config.py +28 -3
- gac/config_cli.py +19 -7
- gac/constants/__init__.py +34 -0
- gac/constants/commit.py +63 -0
- gac/constants/defaults.py +40 -0
- gac/constants/file_patterns.py +110 -0
- gac/constants/languages.py +119 -0
- gac/diff_cli.py +0 -22
- gac/errors.py +8 -2
- gac/git.py +6 -6
- gac/git_state_validator.py +193 -0
- gac/grouped_commit_workflow.py +458 -0
- gac/init_cli.py +2 -1
- gac/interactive_mode.py +179 -0
- gac/language_cli.py +0 -1
- gac/main.py +222 -959
- gac/model_cli.py +2 -1
- gac/model_identifier.py +70 -0
- gac/oauth/claude_code.py +2 -2
- gac/oauth/qwen_oauth.py +4 -0
- gac/oauth/token_store.py +2 -2
- gac/oauth_retry.py +161 -0
- gac/postprocess.py +155 -0
- gac/prompt.py +20 -490
- gac/prompt_builder.py +88 -0
- gac/providers/README.md +437 -0
- gac/providers/__init__.py +70 -81
- gac/providers/anthropic.py +12 -56
- gac/providers/azure_openai.py +48 -92
- gac/providers/base.py +329 -0
- gac/providers/cerebras.py +10 -43
- gac/providers/chutes.py +16 -72
- gac/providers/claude_code.py +64 -97
- gac/providers/custom_anthropic.py +51 -85
- gac/providers/custom_openai.py +29 -87
- gac/providers/deepseek.py +10 -43
- gac/providers/error_handler.py +139 -0
- gac/providers/fireworks.py +10 -43
- gac/providers/gemini.py +66 -73
- gac/providers/groq.py +10 -62
- gac/providers/kimi_coding.py +19 -59
- gac/providers/lmstudio.py +62 -52
- gac/providers/minimax.py +10 -43
- gac/providers/mistral.py +10 -43
- gac/providers/moonshot.py +10 -43
- gac/providers/ollama.py +54 -41
- gac/providers/openai.py +30 -46
- gac/providers/openrouter.py +15 -62
- gac/providers/protocol.py +71 -0
- gac/providers/qwen.py +55 -67
- gac/providers/registry.py +58 -0
- gac/providers/replicate.py +137 -91
- gac/providers/streamlake.py +26 -56
- gac/providers/synthetic.py +35 -47
- gac/providers/together.py +10 -43
- gac/providers/zai.py +21 -59
- gac/py.typed +0 -0
- gac/security.py +1 -1
- gac/templates/__init__.py +1 -0
- gac/templates/question_generation.txt +60 -0
- gac/templates/system_prompt.txt +224 -0
- gac/templates/user_prompt.txt +28 -0
- gac/utils.py +6 -5
- gac/workflow_context.py +162 -0
- {gac-3.8.1.dist-info → gac-3.10.10.dist-info}/METADATA +1 -1
- gac-3.10.10.dist-info/RECORD +79 -0
- gac/constants.py +0 -328
- gac-3.8.1.dist-info/RECORD +0 -56
- {gac-3.8.1.dist-info → gac-3.10.10.dist-info}/WHEEL +0 -0
- {gac-3.8.1.dist-info → gac-3.10.10.dist-info}/entry_points.txt +0 -0
- {gac-3.8.1.dist-info → gac-3.10.10.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,458 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Grouped commit workflow handling for gac."""
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
import logging
|
|
6
|
+
import subprocess
|
|
7
|
+
from collections import Counter
|
|
8
|
+
from typing import Any, NamedTuple
|
|
9
|
+
|
|
10
|
+
import click
|
|
11
|
+
from rich.console import Console
|
|
12
|
+
from rich.panel import Panel
|
|
13
|
+
|
|
14
|
+
from gac.ai import generate_grouped_commits
|
|
15
|
+
from gac.ai_utils import count_tokens
|
|
16
|
+
from gac.config import GACConfig
|
|
17
|
+
from gac.errors import AIError, ConfigError, GitError
|
|
18
|
+
from gac.git import detect_rename_mappings, get_staged_files, run_git_command
|
|
19
|
+
from gac.git_state_validator import GitState
|
|
20
|
+
from gac.model_identifier import ModelIdentifier
|
|
21
|
+
from gac.workflow_utils import check_token_warning, execute_commit, restore_staging
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
console = Console()
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class GroupedCommitResult(NamedTuple):
|
|
28
|
+
"""Result of grouped commit generation."""
|
|
29
|
+
|
|
30
|
+
commits: list[dict[str, Any]]
|
|
31
|
+
raw_response: str
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class GroupedCommitWorkflow:
|
|
35
|
+
"""Handles multi-file grouping logic and per-group AI calls."""
|
|
36
|
+
|
|
37
|
+
def __init__(self, config: GACConfig):
|
|
38
|
+
self.config = config
|
|
39
|
+
|
|
40
|
+
def validate_grouped_files_or_feedback(
|
|
41
|
+
self, staged: set[str], grouped_result: dict[str, Any]
|
|
42
|
+
) -> tuple[bool, str, str]:
|
|
43
|
+
"""Validate that grouped commits cover all staged files correctly."""
|
|
44
|
+
commits = grouped_result.get("commits", []) if isinstance(grouped_result, dict) else []
|
|
45
|
+
all_files: list[str] = []
|
|
46
|
+
for commit in commits:
|
|
47
|
+
files = commit.get("files", []) if isinstance(commit, dict) else []
|
|
48
|
+
all_files.extend([str(p) for p in files])
|
|
49
|
+
|
|
50
|
+
counts = Counter(all_files)
|
|
51
|
+
union_set = set(all_files)
|
|
52
|
+
|
|
53
|
+
duplicates = sorted([f for f, c in counts.items() if c > 1])
|
|
54
|
+
missing = sorted(staged - union_set)
|
|
55
|
+
unexpected = sorted(union_set - staged)
|
|
56
|
+
|
|
57
|
+
if not duplicates and not missing and not unexpected:
|
|
58
|
+
return True, "", ""
|
|
59
|
+
|
|
60
|
+
problems: list[str] = []
|
|
61
|
+
if missing:
|
|
62
|
+
problems.append(f"Missing: {', '.join(missing)}")
|
|
63
|
+
if unexpected:
|
|
64
|
+
problems.append(f"Not staged: {', '.join(unexpected)}")
|
|
65
|
+
if duplicates:
|
|
66
|
+
problems.append(f"Duplicates: {', '.join(duplicates)}")
|
|
67
|
+
|
|
68
|
+
feedback = f"{'; '.join(problems)}. Required files: {', '.join(sorted(staged))}. Respond with ONLY valid JSON."
|
|
69
|
+
return False, feedback, "; ".join(problems)
|
|
70
|
+
|
|
71
|
+
def handle_validation_retry(
|
|
72
|
+
self,
|
|
73
|
+
attempts: int,
|
|
74
|
+
content_retry_budget: int,
|
|
75
|
+
raw_response: str,
|
|
76
|
+
feedback_message: str,
|
|
77
|
+
error_message: str,
|
|
78
|
+
conversation_messages: list[dict[str, str]],
|
|
79
|
+
quiet: bool,
|
|
80
|
+
retry_context: str,
|
|
81
|
+
) -> bool:
|
|
82
|
+
"""Handle validation retry logic. Returns True if should exit, False if should retry."""
|
|
83
|
+
conversation_messages.append({"role": "assistant", "content": raw_response})
|
|
84
|
+
conversation_messages.append({"role": "user", "content": feedback_message})
|
|
85
|
+
if attempts >= content_retry_budget:
|
|
86
|
+
logger.error(error_message)
|
|
87
|
+
console.print(f"\n[red]{error_message}[/red]")
|
|
88
|
+
console.print("\n[yellow]Raw model output:[/yellow]")
|
|
89
|
+
console.print(Panel(raw_response, title="Model Output", border_style="yellow"))
|
|
90
|
+
return True
|
|
91
|
+
if not quiet:
|
|
92
|
+
console.print(f"[yellow]Retry {attempts} of {content_retry_budget - 1}: {retry_context}[/yellow]")
|
|
93
|
+
return False
|
|
94
|
+
|
|
95
|
+
def parse_and_validate_json_response(self, raw_response: str) -> dict[str, Any] | None:
|
|
96
|
+
"""Parse and validate JSON response from AI."""
|
|
97
|
+
parsed: dict[str, Any] | None = None
|
|
98
|
+
extract = raw_response
|
|
99
|
+
first_brace = raw_response.find("{")
|
|
100
|
+
last_brace = raw_response.rfind("}")
|
|
101
|
+
if first_brace != -1 and last_brace != -1 and first_brace < last_brace:
|
|
102
|
+
extract = raw_response[first_brace : last_brace + 1]
|
|
103
|
+
|
|
104
|
+
try:
|
|
105
|
+
parsed = json.loads(extract)
|
|
106
|
+
except json.JSONDecodeError as e:
|
|
107
|
+
parsed = None
|
|
108
|
+
logger.debug(
|
|
109
|
+
f"JSON parsing failed: {e}. Extract length: {len(extract)}, Response length: {len(raw_response)}"
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
if parsed is None:
|
|
113
|
+
raise ValueError("Invalid JSON response")
|
|
114
|
+
|
|
115
|
+
# Validate structure
|
|
116
|
+
if "commits" not in parsed or not isinstance(parsed["commits"], list):
|
|
117
|
+
raise ValueError("Response missing 'commits' array")
|
|
118
|
+
if len(parsed["commits"]) == 0:
|
|
119
|
+
raise ValueError("No commits in response")
|
|
120
|
+
for idx, commit in enumerate(parsed["commits"]):
|
|
121
|
+
if "files" not in commit or not isinstance(commit["files"], list):
|
|
122
|
+
raise ValueError(f"Commit {idx + 1} missing 'files' array")
|
|
123
|
+
if "message" not in commit or not isinstance(commit["message"], str):
|
|
124
|
+
raise ValueError(f"Commit {idx + 1} missing 'message' string")
|
|
125
|
+
if len(commit["files"]) == 0:
|
|
126
|
+
raise ValueError(f"Commit {idx + 1} has empty files list")
|
|
127
|
+
if not commit["message"].strip():
|
|
128
|
+
raise ValueError(f"Commit {idx + 1} has empty message")
|
|
129
|
+
|
|
130
|
+
return parsed
|
|
131
|
+
|
|
132
|
+
def generate_grouped_commits_with_retry(
|
|
133
|
+
self,
|
|
134
|
+
model: str,
|
|
135
|
+
conversation_messages: list[dict[str, str]],
|
|
136
|
+
temperature: float,
|
|
137
|
+
max_output_tokens: int,
|
|
138
|
+
max_retries: int,
|
|
139
|
+
quiet: bool,
|
|
140
|
+
staged_files_set: set[str],
|
|
141
|
+
require_confirmation: bool = True,
|
|
142
|
+
) -> GroupedCommitResult | int:
|
|
143
|
+
"""Generate grouped commits with validation and retry logic.
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
GroupedCommitResult on success, or int exit code on early exit/failure.
|
|
147
|
+
"""
|
|
148
|
+
first_iteration = True
|
|
149
|
+
content_retry_budget = max(3, int(max_retries))
|
|
150
|
+
attempts = 0
|
|
151
|
+
|
|
152
|
+
warning_limit = self.config["warning_limit_tokens"]
|
|
153
|
+
|
|
154
|
+
while True:
|
|
155
|
+
prompt_tokens = count_tokens(conversation_messages, model)
|
|
156
|
+
|
|
157
|
+
if first_iteration:
|
|
158
|
+
if not check_token_warning(prompt_tokens, warning_limit, require_confirmation):
|
|
159
|
+
return 0 # User declined due to token warning
|
|
160
|
+
first_iteration = False
|
|
161
|
+
|
|
162
|
+
raw_response = generate_grouped_commits(
|
|
163
|
+
model=model,
|
|
164
|
+
prompt=conversation_messages,
|
|
165
|
+
temperature=temperature,
|
|
166
|
+
max_tokens=max_output_tokens,
|
|
167
|
+
max_retries=max_retries,
|
|
168
|
+
quiet=quiet,
|
|
169
|
+
skip_success_message=True,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
parsed = self.parse_and_validate_json_response(raw_response)
|
|
174
|
+
except ValueError as e:
|
|
175
|
+
attempts += 1
|
|
176
|
+
feedback = f"Invalid response structure: {e}. Please return ONLY valid JSON following the schema with a non-empty 'commits' array of objects containing 'files' and 'message'."
|
|
177
|
+
error_msg = f"Invalid grouped commits structure after {attempts} retries: {e}"
|
|
178
|
+
if self.handle_validation_retry(
|
|
179
|
+
attempts,
|
|
180
|
+
content_retry_budget,
|
|
181
|
+
raw_response,
|
|
182
|
+
feedback,
|
|
183
|
+
error_msg,
|
|
184
|
+
conversation_messages,
|
|
185
|
+
quiet,
|
|
186
|
+
"Structure validation failed, asking model to fix...",
|
|
187
|
+
):
|
|
188
|
+
return 1 # Validation failed after retries
|
|
189
|
+
continue
|
|
190
|
+
|
|
191
|
+
# Assert parsed is not None for mypy - ValueError would have been raised earlier
|
|
192
|
+
assert parsed is not None
|
|
193
|
+
ok, feedback, detail_msg = self.validate_grouped_files_or_feedback(staged_files_set, parsed)
|
|
194
|
+
if not ok:
|
|
195
|
+
attempts += 1
|
|
196
|
+
error_msg = f"Grouped commits file set mismatch after {attempts} retries{': ' + detail_msg if detail_msg else ''}"
|
|
197
|
+
if self.handle_validation_retry(
|
|
198
|
+
attempts,
|
|
199
|
+
content_retry_budget,
|
|
200
|
+
raw_response,
|
|
201
|
+
feedback,
|
|
202
|
+
error_msg,
|
|
203
|
+
conversation_messages,
|
|
204
|
+
quiet,
|
|
205
|
+
"File coverage mismatch, asking model to fix...",
|
|
206
|
+
):
|
|
207
|
+
return 1 # File validation failed after retries
|
|
208
|
+
continue
|
|
209
|
+
|
|
210
|
+
conversation_messages.append({"role": "assistant", "content": raw_response})
|
|
211
|
+
# Assert parsed is not None for mypy - ValueError would have been raised earlier
|
|
212
|
+
assert parsed is not None
|
|
213
|
+
return GroupedCommitResult(commits=parsed["commits"], raw_response=raw_response)
|
|
214
|
+
|
|
215
|
+
def display_grouped_commits(self, result: GroupedCommitResult, model: str, prompt_tokens: int, quiet: bool) -> None:
|
|
216
|
+
"""Display the generated grouped commits to the user."""
|
|
217
|
+
model_id = ModelIdentifier.parse(model)
|
|
218
|
+
|
|
219
|
+
if not quiet:
|
|
220
|
+
console.print(f"[green]✔ Generated commit messages with {model_id.provider} {model_id.model_name}[/green]")
|
|
221
|
+
num_commits = len(result.commits)
|
|
222
|
+
console.print(f"[bold green]Proposed Commits ({num_commits}):[/bold green]\n")
|
|
223
|
+
for idx, commit in enumerate(result.commits, 1):
|
|
224
|
+
files = commit["files"]
|
|
225
|
+
files_display = ", ".join(files)
|
|
226
|
+
console.print(f"[dim]{files_display}[/dim]")
|
|
227
|
+
commit_msg = commit["message"].strip()
|
|
228
|
+
console.print(Panel(commit_msg, title=f"Commit Message {idx}/{num_commits}", border_style="cyan"))
|
|
229
|
+
console.print()
|
|
230
|
+
|
|
231
|
+
completion_tokens = count_tokens(result.raw_response, model)
|
|
232
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
233
|
+
console.print(
|
|
234
|
+
f"[dim]Token usage: {prompt_tokens} prompt + {completion_tokens} completion = {total_tokens} total[/dim]"
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
def handle_grouped_commit_confirmation(self, result: GroupedCommitResult) -> str:
|
|
238
|
+
"""Handle user confirmation for grouped commits.
|
|
239
|
+
|
|
240
|
+
Returns:
|
|
241
|
+
"accept": User accepted commits
|
|
242
|
+
"reject": User rejected commits
|
|
243
|
+
"regenerate": User wants to regenerate
|
|
244
|
+
"""
|
|
245
|
+
num_commits = len(result.commits)
|
|
246
|
+
while True:
|
|
247
|
+
response = click.prompt(
|
|
248
|
+
f"Proceed with {num_commits} commits above? [y/n/r/<feedback>]",
|
|
249
|
+
type=str,
|
|
250
|
+
show_default=False,
|
|
251
|
+
).strip()
|
|
252
|
+
response_lower = response.lower()
|
|
253
|
+
|
|
254
|
+
if response_lower in ["y", "yes"]:
|
|
255
|
+
return "accept"
|
|
256
|
+
if response_lower in ["n", "no"]:
|
|
257
|
+
console.print("[yellow]Commits not accepted. Exiting...[/yellow]")
|
|
258
|
+
return "reject"
|
|
259
|
+
if response == "":
|
|
260
|
+
continue
|
|
261
|
+
if response_lower in ["r", "reroll"]:
|
|
262
|
+
console.print("[cyan]Regenerating commit groups...[/cyan]")
|
|
263
|
+
return "regenerate"
|
|
264
|
+
|
|
265
|
+
def execute_grouped_commits(
|
|
266
|
+
self,
|
|
267
|
+
result: GroupedCommitResult,
|
|
268
|
+
dry_run: bool,
|
|
269
|
+
push: bool,
|
|
270
|
+
no_verify: bool,
|
|
271
|
+
hook_timeout: int,
|
|
272
|
+
) -> int:
|
|
273
|
+
"""Execute the grouped commits by creating multiple individual commits.
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
Exit code: 0 for success, non-zero for failure.
|
|
277
|
+
"""
|
|
278
|
+
num_commits = len(result.commits)
|
|
279
|
+
|
|
280
|
+
restore_needed = False
|
|
281
|
+
original_staged_files: list[str] | None = None
|
|
282
|
+
original_staged_diff: str | None = None
|
|
283
|
+
|
|
284
|
+
if dry_run:
|
|
285
|
+
console.print(f"[yellow]Dry run: Would create {num_commits} commits[/yellow]")
|
|
286
|
+
for idx, commit in enumerate(result.commits, 1):
|
|
287
|
+
console.print(f"\n[cyan]Commit {idx}/{num_commits}:[/cyan]")
|
|
288
|
+
console.print(f" Files: {', '.join(commit['files'])}")
|
|
289
|
+
console.print(f" Message: {commit['message'].strip()[:50]}...")
|
|
290
|
+
else:
|
|
291
|
+
original_staged_files = get_staged_files(existing_only=False)
|
|
292
|
+
original_staged_diff = run_git_command(["diff", "--cached", "--binary"], silent=True)
|
|
293
|
+
run_git_command(["reset", "HEAD"])
|
|
294
|
+
|
|
295
|
+
try:
|
|
296
|
+
# Detect file renames to handle them properly
|
|
297
|
+
rename_mappings = detect_rename_mappings(original_staged_diff)
|
|
298
|
+
|
|
299
|
+
for idx, commit in enumerate(result.commits, 1):
|
|
300
|
+
try:
|
|
301
|
+
for file_path in commit["files"]:
|
|
302
|
+
# Check if this file is the destination of a rename
|
|
303
|
+
if file_path in rename_mappings:
|
|
304
|
+
old_file = rename_mappings[file_path]
|
|
305
|
+
# For renames, stage both the old file (for deletion) and new file
|
|
306
|
+
# This ensures the complete rename operation is preserved
|
|
307
|
+
run_git_command(["add", "-A", old_file])
|
|
308
|
+
run_git_command(["add", "-A", file_path])
|
|
309
|
+
else:
|
|
310
|
+
run_git_command(["add", "-A", file_path])
|
|
311
|
+
execute_commit(commit["message"].strip(), no_verify, hook_timeout)
|
|
312
|
+
console.print(f"[green]✓ Commit {idx}/{num_commits} created[/green]")
|
|
313
|
+
except (AIError, ConfigError, GitError, subprocess.SubprocessError, OSError) as e:
|
|
314
|
+
restore_needed = True
|
|
315
|
+
console.print(f"[red]✗ Failed at commit {idx}/{num_commits}: {e}[/red]")
|
|
316
|
+
console.print(f"[yellow]Completed {idx - 1}/{num_commits} commits.[/yellow]")
|
|
317
|
+
break
|
|
318
|
+
except KeyboardInterrupt:
|
|
319
|
+
restore_needed = True
|
|
320
|
+
console.print("\n[yellow]Interrupted by user. Restoring original staging area...[/yellow]")
|
|
321
|
+
|
|
322
|
+
if restore_needed:
|
|
323
|
+
console.print("[yellow]Restoring original staging area...[/yellow]")
|
|
324
|
+
restore_staging(original_staged_files or [], original_staged_diff)
|
|
325
|
+
console.print("[green]Original staging area restored.[/green]")
|
|
326
|
+
return 1
|
|
327
|
+
|
|
328
|
+
if push:
|
|
329
|
+
try:
|
|
330
|
+
if dry_run:
|
|
331
|
+
console.print("[yellow]Dry run: Would push changes[/yellow]")
|
|
332
|
+
return 0
|
|
333
|
+
from gac.git import push_changes
|
|
334
|
+
|
|
335
|
+
if push_changes():
|
|
336
|
+
logger.info("Changes pushed successfully")
|
|
337
|
+
console.print("[green]Changes pushed successfully[/green]")
|
|
338
|
+
else:
|
|
339
|
+
restore_needed = True
|
|
340
|
+
console.print(
|
|
341
|
+
"[red]Failed to push changes. Check your remote configuration and network connection.[/red]"
|
|
342
|
+
)
|
|
343
|
+
except (GitError, OSError) as e:
|
|
344
|
+
restore_needed = True
|
|
345
|
+
console.print(f"[red]Error pushing changes: {e}[/red]")
|
|
346
|
+
|
|
347
|
+
if restore_needed:
|
|
348
|
+
console.print("[yellow]Restoring original staging area...[/yellow]")
|
|
349
|
+
if original_staged_files is None or original_staged_diff is None:
|
|
350
|
+
original_staged_files = get_staged_files(existing_only=False)
|
|
351
|
+
original_staged_diff = run_git_command(["diff", "--cached", "--binary"])
|
|
352
|
+
restore_staging(original_staged_files, original_staged_diff)
|
|
353
|
+
console.print("[green]Original staging area restored.[/green]")
|
|
354
|
+
return 1
|
|
355
|
+
|
|
356
|
+
return 0
|
|
357
|
+
|
|
358
|
+
def execute_workflow(
|
|
359
|
+
self,
|
|
360
|
+
system_prompt: str,
|
|
361
|
+
user_prompt: str,
|
|
362
|
+
model: str,
|
|
363
|
+
temperature: float,
|
|
364
|
+
max_output_tokens: int,
|
|
365
|
+
max_retries: int,
|
|
366
|
+
require_confirmation: bool,
|
|
367
|
+
quiet: bool,
|
|
368
|
+
no_verify: bool,
|
|
369
|
+
dry_run: bool,
|
|
370
|
+
push: bool,
|
|
371
|
+
show_prompt: bool,
|
|
372
|
+
interactive: bool,
|
|
373
|
+
message_only: bool,
|
|
374
|
+
git_state: GitState,
|
|
375
|
+
hint: str,
|
|
376
|
+
hook_timeout: int = 120,
|
|
377
|
+
) -> int:
|
|
378
|
+
"""Execute the complete grouped commit workflow.
|
|
379
|
+
|
|
380
|
+
Returns:
|
|
381
|
+
Exit code: 0 for success, non-zero for failure.
|
|
382
|
+
"""
|
|
383
|
+
if show_prompt:
|
|
384
|
+
full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
|
|
385
|
+
console.print(Panel(full_prompt, title="Prompt for LLM", border_style="bright_blue"))
|
|
386
|
+
|
|
387
|
+
conversation_messages: list[dict[str, str]] = []
|
|
388
|
+
if system_prompt:
|
|
389
|
+
conversation_messages.append({"role": "system", "content": system_prompt})
|
|
390
|
+
conversation_messages.append({"role": "user", "content": user_prompt})
|
|
391
|
+
|
|
392
|
+
# Get staged files for validation
|
|
393
|
+
staged_files_set = set(get_staged_files(existing_only=False))
|
|
394
|
+
|
|
395
|
+
# Handle interactive questions if enabled
|
|
396
|
+
if interactive and not message_only:
|
|
397
|
+
from gac.interactive_mode import InteractiveMode
|
|
398
|
+
|
|
399
|
+
interactive_mode = InteractiveMode(self.config)
|
|
400
|
+
interactive_mode.handle_interactive_flow(
|
|
401
|
+
model=model,
|
|
402
|
+
user_prompt=user_prompt,
|
|
403
|
+
git_state=git_state,
|
|
404
|
+
hint=hint,
|
|
405
|
+
conversation_messages=conversation_messages,
|
|
406
|
+
temperature=temperature,
|
|
407
|
+
max_tokens=max_output_tokens,
|
|
408
|
+
max_retries=max_retries,
|
|
409
|
+
quiet=quiet,
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
while True:
|
|
413
|
+
# Generate grouped commits
|
|
414
|
+
result = self.generate_grouped_commits_with_retry(
|
|
415
|
+
model=model,
|
|
416
|
+
conversation_messages=conversation_messages,
|
|
417
|
+
temperature=temperature,
|
|
418
|
+
max_output_tokens=max_output_tokens,
|
|
419
|
+
max_retries=max_retries,
|
|
420
|
+
quiet=quiet,
|
|
421
|
+
staged_files_set=staged_files_set,
|
|
422
|
+
require_confirmation=require_confirmation,
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
# Check if generation returned an exit code
|
|
426
|
+
if isinstance(result, int):
|
|
427
|
+
return result
|
|
428
|
+
|
|
429
|
+
# Display results
|
|
430
|
+
prompt_tokens = count_tokens(conversation_messages, model)
|
|
431
|
+
self.display_grouped_commits(result, model, prompt_tokens, quiet)
|
|
432
|
+
|
|
433
|
+
# Handle confirmation
|
|
434
|
+
if require_confirmation:
|
|
435
|
+
decision = self.handle_grouped_commit_confirmation(result)
|
|
436
|
+
if decision == "accept":
|
|
437
|
+
# User accepted, execute commits
|
|
438
|
+
return self.execute_grouped_commits(
|
|
439
|
+
result=result,
|
|
440
|
+
dry_run=dry_run,
|
|
441
|
+
push=push,
|
|
442
|
+
no_verify=no_verify,
|
|
443
|
+
hook_timeout=hook_timeout,
|
|
444
|
+
)
|
|
445
|
+
elif decision == "reject":
|
|
446
|
+
return 0 # User rejected, clean exit
|
|
447
|
+
else:
|
|
448
|
+
# User wants to regenerate, continue loop
|
|
449
|
+
continue
|
|
450
|
+
else:
|
|
451
|
+
# No confirmation required, execute directly
|
|
452
|
+
return self.execute_grouped_commits(
|
|
453
|
+
result=result,
|
|
454
|
+
dry_run=dry_run,
|
|
455
|
+
push=push,
|
|
456
|
+
no_verify=no_verify,
|
|
457
|
+
hook_timeout=hook_timeout,
|
|
458
|
+
)
|
gac/init_cli.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""CLI for initializing gac configuration interactively."""
|
|
2
2
|
|
|
3
3
|
from pathlib import Path
|
|
4
|
+
from typing import cast
|
|
4
5
|
|
|
5
6
|
import click
|
|
6
7
|
import questionary
|
|
@@ -20,7 +21,7 @@ def _prompt_required_text(prompt: str) -> str | None:
|
|
|
20
21
|
return None
|
|
21
22
|
value = response.strip()
|
|
22
23
|
if value:
|
|
23
|
-
return value
|
|
24
|
+
return cast(str, value)
|
|
24
25
|
click.echo("A value is required. Please try again.")
|
|
25
26
|
|
|
26
27
|
|
gac/interactive_mode.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Interactive mode handling for gac."""
|
|
3
|
+
|
|
4
|
+
import logging
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
|
|
9
|
+
from gac.ai import generate_commit_message
|
|
10
|
+
from gac.config import GACConfig
|
|
11
|
+
from gac.git_state_validator import GitState
|
|
12
|
+
from gac.workflow_utils import (
|
|
13
|
+
collect_interactive_answers,
|
|
14
|
+
format_answers_for_prompt,
|
|
15
|
+
handle_confirmation_loop,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
console = Console()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class InteractiveMode:
|
|
23
|
+
"""Handles interactive question generation and user interaction flows."""
|
|
24
|
+
|
|
25
|
+
def __init__(self, config: GACConfig):
|
|
26
|
+
self.config = config
|
|
27
|
+
|
|
28
|
+
def generate_contextual_questions(
|
|
29
|
+
self,
|
|
30
|
+
model: str,
|
|
31
|
+
git_state: GitState,
|
|
32
|
+
hint: str,
|
|
33
|
+
temperature: float,
|
|
34
|
+
max_tokens: int,
|
|
35
|
+
max_retries: int,
|
|
36
|
+
quiet: bool = False,
|
|
37
|
+
) -> list[str]:
|
|
38
|
+
"""Generate contextual questions about staged changes."""
|
|
39
|
+
from gac.prompt import build_question_generation_prompt
|
|
40
|
+
|
|
41
|
+
status = git_state.status
|
|
42
|
+
diff = git_state.processed_diff
|
|
43
|
+
diff_stat = git_state.diff_stat
|
|
44
|
+
|
|
45
|
+
try:
|
|
46
|
+
# Build prompts for question generation
|
|
47
|
+
system_prompt, question_prompt = build_question_generation_prompt(
|
|
48
|
+
status=status,
|
|
49
|
+
processed_diff=diff,
|
|
50
|
+
diff_stat=diff_stat,
|
|
51
|
+
hint=hint,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Generate questions using existing infrastructure
|
|
55
|
+
logger.info("Generating contextual questions about staged changes...")
|
|
56
|
+
questions_text = generate_commit_message(
|
|
57
|
+
model=model,
|
|
58
|
+
prompt=(system_prompt, question_prompt),
|
|
59
|
+
temperature=temperature,
|
|
60
|
+
max_tokens=max_tokens,
|
|
61
|
+
max_retries=max_retries,
|
|
62
|
+
quiet=quiet,
|
|
63
|
+
skip_success_message=True, # Don't show "Generated commit message" for questions
|
|
64
|
+
task_description="contextual questions",
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Parse the response to extract individual questions
|
|
68
|
+
questions = self._parse_questions_from_response(questions_text)
|
|
69
|
+
|
|
70
|
+
logger.info(f"Generated {len(questions)} contextual questions")
|
|
71
|
+
return questions
|
|
72
|
+
|
|
73
|
+
except Exception as e:
|
|
74
|
+
logger.warning(f"Failed to generate contextual questions, proceeding without them: {e}")
|
|
75
|
+
if not quiet:
|
|
76
|
+
console.print("[yellow]⚠️ Could not generate contextual questions, proceeding normally[/yellow]\n")
|
|
77
|
+
return []
|
|
78
|
+
|
|
79
|
+
def _parse_questions_from_response(self, response: str) -> list[str]:
|
|
80
|
+
"""Parse the AI response to extract individual questions from a numbered list.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
response: The raw response from the AI model
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
A list of cleaned questions
|
|
87
|
+
"""
|
|
88
|
+
questions = []
|
|
89
|
+
lines = response.strip().split("\n")
|
|
90
|
+
|
|
91
|
+
for line in lines:
|
|
92
|
+
line = line.strip()
|
|
93
|
+
if not line:
|
|
94
|
+
continue
|
|
95
|
+
|
|
96
|
+
# Match numbered list format (e.g., "1. Question text?" or "1) Question text?")
|
|
97
|
+
match = re.match(r"^\d+\.\s+(.+)$", line)
|
|
98
|
+
if not match:
|
|
99
|
+
match = re.match(r"^\d+\)\s+(.+)$", line)
|
|
100
|
+
|
|
101
|
+
if match:
|
|
102
|
+
question = match.group(1).strip()
|
|
103
|
+
# Remove any leading symbols like •, -, *
|
|
104
|
+
question = re.sub(r"^[•\-*]\s+", "", question)
|
|
105
|
+
if question and question.endswith("?"):
|
|
106
|
+
questions.append(question)
|
|
107
|
+
elif line.endswith("?") and len(line) > 5: # Fallback for non-numbered questions
|
|
108
|
+
questions.append(line)
|
|
109
|
+
|
|
110
|
+
return questions
|
|
111
|
+
|
|
112
|
+
def handle_interactive_flow(
|
|
113
|
+
self,
|
|
114
|
+
model: str,
|
|
115
|
+
user_prompt: str,
|
|
116
|
+
git_state: GitState,
|
|
117
|
+
hint: str,
|
|
118
|
+
conversation_messages: list[dict[str, str]],
|
|
119
|
+
temperature: float,
|
|
120
|
+
max_tokens: int,
|
|
121
|
+
max_retries: int,
|
|
122
|
+
quiet: bool = False,
|
|
123
|
+
) -> None:
|
|
124
|
+
"""Handle the complete interactive flow for collecting user context."""
|
|
125
|
+
try:
|
|
126
|
+
questions = self.generate_contextual_questions(
|
|
127
|
+
model=model,
|
|
128
|
+
git_state=git_state,
|
|
129
|
+
hint=hint,
|
|
130
|
+
temperature=temperature,
|
|
131
|
+
max_tokens=max_tokens,
|
|
132
|
+
max_retries=max_retries,
|
|
133
|
+
quiet=quiet,
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
if not questions:
|
|
137
|
+
return
|
|
138
|
+
|
|
139
|
+
# Collect answers interactively
|
|
140
|
+
answers = collect_interactive_answers(questions)
|
|
141
|
+
|
|
142
|
+
if answers is None:
|
|
143
|
+
# User aborted interactive mode
|
|
144
|
+
if not quiet:
|
|
145
|
+
console.print("[yellow]Proceeding with commit without additional context[/yellow]\n")
|
|
146
|
+
elif answers:
|
|
147
|
+
# User provided some answers, format them for the prompt
|
|
148
|
+
answers_context = format_answers_for_prompt(answers)
|
|
149
|
+
enhanced_user_prompt = user_prompt + answers_context
|
|
150
|
+
|
|
151
|
+
# Update the conversation messages with the enhanced prompt
|
|
152
|
+
if conversation_messages and conversation_messages[-1]["role"] == "user":
|
|
153
|
+
conversation_messages[-1]["content"] = enhanced_user_prompt
|
|
154
|
+
|
|
155
|
+
logger.info(f"Collected answers for {len(answers)} questions")
|
|
156
|
+
else:
|
|
157
|
+
# User skipped all questions
|
|
158
|
+
if not quiet:
|
|
159
|
+
console.print("[dim]No answers provided, proceeding with original context[/dim]\n")
|
|
160
|
+
|
|
161
|
+
except Exception as e:
|
|
162
|
+
logger.warning(f"Failed to generate contextual questions, proceeding without them: {e}")
|
|
163
|
+
if not quiet:
|
|
164
|
+
console.print("[yellow]⚠️ Could not generate contextual questions, proceeding normally[/yellow]\n")
|
|
165
|
+
|
|
166
|
+
def handle_single_commit_confirmation(
|
|
167
|
+
self,
|
|
168
|
+
model: str,
|
|
169
|
+
commit_message: str,
|
|
170
|
+
conversation_messages: list[dict[str, str]],
|
|
171
|
+
quiet: bool = False,
|
|
172
|
+
) -> tuple[str, str]:
|
|
173
|
+
"""Handle confirmation loop for single commit. Returns (final_message, decision).
|
|
174
|
+
|
|
175
|
+
Decision is one of: "yes", "no", "regenerate"
|
|
176
|
+
"""
|
|
177
|
+
decision, final_message, _ = handle_confirmation_loop(commit_message, conversation_messages, quiet, model)
|
|
178
|
+
|
|
179
|
+
return final_message, decision
|
gac/language_cli.py
CHANGED
|
@@ -256,7 +256,6 @@ def center_text(text: str, width: int = 80) -> str:
|
|
|
256
256
|
Returns:
|
|
257
257
|
Centered text with proper padding
|
|
258
258
|
"""
|
|
259
|
-
import unicodedata
|
|
260
259
|
|
|
261
260
|
def get_display_width(s: str) -> int:
|
|
262
261
|
"""Get the display width of a string, accounting for wide characters."""
|