gac 3.6.0__py3-none-any.whl → 3.10.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gac/__init__.py +4 -6
- gac/__version__.py +1 -1
- gac/ai_utils.py +59 -43
- gac/auth_cli.py +181 -36
- gac/cli.py +26 -9
- gac/commit_executor.py +59 -0
- gac/config.py +81 -2
- gac/config_cli.py +19 -7
- gac/constants/__init__.py +34 -0
- gac/constants/commit.py +63 -0
- gac/constants/defaults.py +40 -0
- gac/constants/file_patterns.py +110 -0
- gac/constants/languages.py +119 -0
- gac/diff_cli.py +0 -22
- gac/errors.py +8 -2
- gac/git.py +6 -6
- gac/git_state_validator.py +193 -0
- gac/grouped_commit_workflow.py +458 -0
- gac/init_cli.py +2 -1
- gac/interactive_mode.py +179 -0
- gac/language_cli.py +0 -1
- gac/main.py +231 -926
- gac/model_cli.py +67 -11
- gac/model_identifier.py +70 -0
- gac/oauth/__init__.py +26 -0
- gac/oauth/claude_code.py +89 -22
- gac/oauth/qwen_oauth.py +327 -0
- gac/oauth/token_store.py +81 -0
- gac/oauth_retry.py +161 -0
- gac/postprocess.py +155 -0
- gac/prompt.py +21 -479
- gac/prompt_builder.py +88 -0
- gac/providers/README.md +437 -0
- gac/providers/__init__.py +70 -78
- gac/providers/anthropic.py +12 -46
- gac/providers/azure_openai.py +48 -88
- gac/providers/base.py +329 -0
- gac/providers/cerebras.py +10 -33
- gac/providers/chutes.py +16 -62
- gac/providers/claude_code.py +64 -87
- gac/providers/custom_anthropic.py +51 -81
- gac/providers/custom_openai.py +29 -83
- gac/providers/deepseek.py +10 -33
- gac/providers/error_handler.py +139 -0
- gac/providers/fireworks.py +10 -33
- gac/providers/gemini.py +66 -63
- gac/providers/groq.py +10 -58
- gac/providers/kimi_coding.py +19 -55
- gac/providers/lmstudio.py +64 -43
- gac/providers/minimax.py +10 -33
- gac/providers/mistral.py +10 -33
- gac/providers/moonshot.py +10 -33
- gac/providers/ollama.py +56 -33
- gac/providers/openai.py +30 -36
- gac/providers/openrouter.py +15 -52
- gac/providers/protocol.py +71 -0
- gac/providers/qwen.py +64 -0
- gac/providers/registry.py +58 -0
- gac/providers/replicate.py +140 -82
- gac/providers/streamlake.py +26 -46
- gac/providers/synthetic.py +35 -37
- gac/providers/together.py +10 -33
- gac/providers/zai.py +29 -57
- gac/py.typed +0 -0
- gac/security.py +1 -1
- gac/templates/__init__.py +1 -0
- gac/templates/question_generation.txt +60 -0
- gac/templates/system_prompt.txt +224 -0
- gac/templates/user_prompt.txt +28 -0
- gac/utils.py +36 -6
- gac/workflow_context.py +162 -0
- gac/workflow_utils.py +3 -8
- {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/METADATA +6 -4
- gac-3.10.10.dist-info/RECORD +79 -0
- gac/constants.py +0 -321
- gac-3.6.0.dist-info/RECORD +0 -53
- {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/WHEEL +0 -0
- {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/entry_points.txt +0 -0
- {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/licenses/LICENSE +0 -0
gac/main.py
CHANGED
|
@@ -4,763 +4,142 @@ prompt building, AI generation, and commit/push operations. This module contains
|
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
6
|
import logging
|
|
7
|
-
import sys
|
|
8
7
|
|
|
9
|
-
import click
|
|
10
8
|
from rich.console import Console
|
|
11
|
-
from rich.panel import Panel
|
|
12
9
|
|
|
13
10
|
from gac.ai import generate_commit_message
|
|
14
11
|
from gac.ai_utils import count_tokens
|
|
15
|
-
from gac.
|
|
16
|
-
from gac.
|
|
17
|
-
from gac.errors import AIError,
|
|
18
|
-
from gac.git import
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
from gac.preprocess import preprocess_diff
|
|
28
|
-
from gac.prompt import build_prompt, clean_commit_message
|
|
29
|
-
from gac.security import get_affected_files, scan_staged_diff
|
|
30
|
-
from gac.workflow_utils import (
|
|
31
|
-
check_token_warning,
|
|
32
|
-
collect_interactive_answers,
|
|
33
|
-
display_commit_message,
|
|
34
|
-
execute_commit,
|
|
35
|
-
format_answers_for_prompt,
|
|
36
|
-
handle_confirmation_loop,
|
|
37
|
-
restore_staging,
|
|
38
|
-
)
|
|
12
|
+
from gac.commit_executor import CommitExecutor
|
|
13
|
+
from gac.config import GACConfig, load_config
|
|
14
|
+
from gac.errors import AIError, ConfigError, handle_error
|
|
15
|
+
from gac.git import run_lefthook_hooks, run_pre_commit_hooks
|
|
16
|
+
from gac.git_state_validator import GitStateValidator
|
|
17
|
+
from gac.grouped_commit_workflow import GroupedCommitWorkflow
|
|
18
|
+
from gac.interactive_mode import InteractiveMode
|
|
19
|
+
from gac.oauth_retry import handle_oauth_retry
|
|
20
|
+
from gac.postprocess import clean_commit_message
|
|
21
|
+
from gac.prompt_builder import PromptBuilder
|
|
22
|
+
from gac.workflow_context import CLIOptions, GenerationConfig, WorkflowContext, WorkflowFlags, WorkflowState
|
|
23
|
+
from gac.workflow_utils import check_token_warning, display_commit_message
|
|
39
24
|
|
|
40
25
|
logger = logging.getLogger(__name__)
|
|
41
26
|
|
|
42
|
-
config = load_config()
|
|
27
|
+
config: GACConfig = load_config()
|
|
43
28
|
console = Console() # Initialize console globally to prevent undefined access
|
|
44
29
|
|
|
45
30
|
|
|
46
|
-
def
|
|
47
|
-
|
|
31
|
+
def _execute_single_commit_workflow(ctx: WorkflowContext) -> int:
|
|
32
|
+
"""Execute single commit workflow using extracted components.
|
|
48
33
|
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
for commit in commits:
|
|
52
|
-
files = commit.get("files", []) if isinstance(commit, dict) else []
|
|
53
|
-
all_files.extend([str(p) for p in files])
|
|
54
|
-
|
|
55
|
-
counts = Counter(all_files)
|
|
56
|
-
union_set = set(all_files)
|
|
57
|
-
|
|
58
|
-
duplicates = sorted([f for f, c in counts.items() if c > 1])
|
|
59
|
-
missing = sorted(staged - union_set)
|
|
60
|
-
unexpected = sorted(union_set - staged)
|
|
61
|
-
|
|
62
|
-
if not duplicates and not missing and not unexpected:
|
|
63
|
-
return True, "", ""
|
|
64
|
-
|
|
65
|
-
problems: list[str] = []
|
|
66
|
-
if missing:
|
|
67
|
-
problems.append(f"Missing: {', '.join(missing)}")
|
|
68
|
-
if unexpected:
|
|
69
|
-
problems.append(f"Not staged: {', '.join(unexpected)}")
|
|
70
|
-
if duplicates:
|
|
71
|
-
problems.append(f"Duplicates: {', '.join(duplicates)}")
|
|
72
|
-
|
|
73
|
-
feedback = f"{'; '.join(problems)}. Required files: {', '.join(sorted(staged))}. Respond with ONLY valid JSON."
|
|
74
|
-
return False, feedback, "; ".join(problems)
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
def _parse_model_identifier(model: str) -> tuple[str, str]:
|
|
78
|
-
"""Validate and split model identifier into provider and model name."""
|
|
79
|
-
normalized = model.strip()
|
|
80
|
-
if ":" not in normalized:
|
|
81
|
-
message = (
|
|
82
|
-
f"Invalid model format: '{model}'. Expected 'provider:model', e.g. 'openai:gpt-4o-mini'. "
|
|
83
|
-
"Use 'gac config set model <provider:model>' to update your configuration."
|
|
84
|
-
)
|
|
85
|
-
logger.error(message)
|
|
86
|
-
console.print(f"[red]{message}[/red]")
|
|
87
|
-
sys.exit(1)
|
|
88
|
-
|
|
89
|
-
provider, model_name = normalized.split(":", 1)
|
|
90
|
-
if not provider or not model_name:
|
|
91
|
-
message = (
|
|
92
|
-
f"Invalid model format: '{model}'. Both provider and model name are required "
|
|
93
|
-
"(example: 'anthropic:claude-haiku-4-5')."
|
|
94
|
-
)
|
|
95
|
-
logger.error(message)
|
|
96
|
-
console.print(f"[red]{message}[/red]")
|
|
97
|
-
sys.exit(1)
|
|
98
|
-
|
|
99
|
-
return provider, model_name
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
def _handle_validation_retry(
|
|
103
|
-
attempts: int,
|
|
104
|
-
content_retry_budget: int,
|
|
105
|
-
raw_response: str,
|
|
106
|
-
feedback_message: str,
|
|
107
|
-
error_message: str,
|
|
108
|
-
conversation_messages: list[dict[str, str]],
|
|
109
|
-
quiet: bool,
|
|
110
|
-
retry_context: str,
|
|
111
|
-
) -> bool:
|
|
112
|
-
"""Handle validation retry logic. Returns True if should exit, False if should retry."""
|
|
113
|
-
conversation_messages.append({"role": "assistant", "content": raw_response})
|
|
114
|
-
conversation_messages.append({"role": "user", "content": feedback_message})
|
|
115
|
-
if attempts >= content_retry_budget:
|
|
116
|
-
logger.error(error_message)
|
|
117
|
-
console.print(f"\n[red]{error_message}[/red]")
|
|
118
|
-
console.print("\n[yellow]Raw model output:[/yellow]")
|
|
119
|
-
console.print(Panel(raw_response, title="Model Output", border_style="yellow"))
|
|
120
|
-
return True
|
|
121
|
-
if not quiet:
|
|
122
|
-
console.print(f"[yellow]Retry {attempts} of {content_retry_budget - 1}: {retry_context}[/yellow]")
|
|
123
|
-
return False
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
def execute_grouped_commits_workflow(
|
|
127
|
-
*,
|
|
128
|
-
system_prompt: str,
|
|
129
|
-
user_prompt: str,
|
|
130
|
-
model: str,
|
|
131
|
-
temperature: float,
|
|
132
|
-
max_output_tokens: int,
|
|
133
|
-
max_retries: int,
|
|
134
|
-
require_confirmation: bool,
|
|
135
|
-
quiet: bool,
|
|
136
|
-
no_verify: bool,
|
|
137
|
-
dry_run: bool,
|
|
138
|
-
push: bool,
|
|
139
|
-
show_prompt: bool,
|
|
140
|
-
interactive: bool,
|
|
141
|
-
message_only: bool,
|
|
142
|
-
hook_timeout: int = 120,
|
|
143
|
-
) -> None:
|
|
144
|
-
"""Execute the grouped commits workflow."""
|
|
145
|
-
import json
|
|
146
|
-
|
|
147
|
-
from gac.ai import generate_grouped_commits
|
|
148
|
-
|
|
149
|
-
provider, model_name = _parse_model_identifier(model)
|
|
150
|
-
|
|
151
|
-
if show_prompt:
|
|
152
|
-
full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
|
|
153
|
-
console.print(Panel(full_prompt, title="Prompt for LLM", border_style="bright_blue"))
|
|
34
|
+
Args:
|
|
35
|
+
ctx: WorkflowContext containing all configuration, flags, and state
|
|
154
36
|
|
|
37
|
+
Returns:
|
|
38
|
+
Exit code: 0 for success, non-zero for failure/abort
|
|
39
|
+
"""
|
|
155
40
|
conversation_messages: list[dict[str, str]] = []
|
|
156
|
-
if system_prompt:
|
|
157
|
-
conversation_messages.append({"role": "system", "content": system_prompt})
|
|
158
|
-
conversation_messages.append({"role": "user", "content": user_prompt})
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
status_match = re.search(r"<git_status>\n(.*?)\n</git_status>", user_prompt, re.DOTALL)
|
|
173
|
-
diff_match = re.search(r"<git_diff>\n(.*?)\n</git_diff>", user_prompt, re.DOTALL)
|
|
174
|
-
diff_stat_match = re.search(r"<git_diff_stat>\n(.*?)\n</git_diff_stat>", user_prompt, re.DOTALL)
|
|
175
|
-
|
|
176
|
-
status = status_match.group(1) if status_match else ""
|
|
177
|
-
diff = diff_match.group(1) if diff_match else ""
|
|
178
|
-
diff_stat = diff_stat_match.group(1) if diff_stat_match else ""
|
|
179
|
-
|
|
180
|
-
# Extract hint text if present
|
|
181
|
-
hint_match = re.search(r"<hint_text>(.*?)</hint_text>", user_prompt, re.DOTALL)
|
|
182
|
-
hint = hint_match.group(1) if hint_match else ""
|
|
183
|
-
|
|
184
|
-
questions = generate_contextual_questions(
|
|
185
|
-
model=model,
|
|
186
|
-
status=status,
|
|
187
|
-
processed_diff=diff,
|
|
188
|
-
diff_stat=diff_stat,
|
|
189
|
-
hint=hint,
|
|
190
|
-
temperature=temperature,
|
|
191
|
-
max_tokens=max_output_tokens,
|
|
192
|
-
max_retries=max_retries,
|
|
193
|
-
quiet=quiet,
|
|
194
|
-
)
|
|
195
|
-
|
|
196
|
-
if questions:
|
|
197
|
-
# Collect answers interactively
|
|
198
|
-
answers = collect_interactive_answers(questions)
|
|
199
|
-
|
|
200
|
-
if answers is None:
|
|
201
|
-
# User aborted interactive mode
|
|
202
|
-
if not quiet:
|
|
203
|
-
console.print("[yellow]Proceeding with commit without additional context[/yellow]\n")
|
|
204
|
-
elif answers:
|
|
205
|
-
# User provided some answers, format them for the prompt
|
|
206
|
-
answers_context = format_answers_for_prompt(answers)
|
|
207
|
-
enhanced_user_prompt = user_prompt + answers_context
|
|
208
|
-
|
|
209
|
-
# Update the conversation messages with the enhanced prompt
|
|
210
|
-
if conversation_messages and conversation_messages[-1]["role"] == "user":
|
|
211
|
-
conversation_messages[-1]["content"] = enhanced_user_prompt
|
|
212
|
-
|
|
213
|
-
logger.info(f"Collected answers for {len(answers)} questions")
|
|
214
|
-
else:
|
|
215
|
-
# User skipped all questions
|
|
216
|
-
if not quiet:
|
|
217
|
-
console.print("[dim]No answers provided, proceeding with original context[/dim]\n")
|
|
218
|
-
|
|
219
|
-
except Exception as e:
|
|
220
|
-
logger.warning(f"Failed to generate contextual questions, proceeding without them: {e}")
|
|
221
|
-
if not quiet:
|
|
222
|
-
console.print("[yellow]⚠️ Could not generate contextual questions, proceeding normally[/yellow]\n")
|
|
223
|
-
|
|
224
|
-
first_iteration = True
|
|
225
|
-
content_retry_budget = max(3, int(max_retries))
|
|
226
|
-
attempts = 0
|
|
227
|
-
|
|
228
|
-
grouped_result: dict | None = None
|
|
229
|
-
raw_response: str = ""
|
|
230
|
-
|
|
231
|
-
while True:
|
|
232
|
-
prompt_tokens = count_tokens(conversation_messages, model)
|
|
233
|
-
|
|
234
|
-
if first_iteration:
|
|
235
|
-
warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
|
|
236
|
-
assert warning_limit_val is not None
|
|
237
|
-
warning_limit = int(warning_limit_val)
|
|
238
|
-
if not check_token_warning(prompt_tokens, warning_limit, require_confirmation):
|
|
239
|
-
sys.exit(0)
|
|
240
|
-
first_iteration = False
|
|
241
|
-
|
|
242
|
-
raw_response = generate_grouped_commits(
|
|
243
|
-
model=model,
|
|
244
|
-
prompt=conversation_messages,
|
|
245
|
-
temperature=temperature,
|
|
246
|
-
max_tokens=max_output_tokens,
|
|
247
|
-
max_retries=max_retries,
|
|
248
|
-
quiet=quiet,
|
|
249
|
-
skip_success_message=True,
|
|
41
|
+
if ctx.system_prompt:
|
|
42
|
+
conversation_messages.append({"role": "system", "content": ctx.system_prompt})
|
|
43
|
+
conversation_messages.append({"role": "user", "content": ctx.user_prompt})
|
|
44
|
+
|
|
45
|
+
# Handle interactive questions if enabled
|
|
46
|
+
if ctx.interactive and not ctx.message_only:
|
|
47
|
+
ctx.state.interactive_mode.handle_interactive_flow(
|
|
48
|
+
model=ctx.model,
|
|
49
|
+
user_prompt=ctx.user_prompt,
|
|
50
|
+
git_state=ctx.git_state,
|
|
51
|
+
hint=ctx.hint,
|
|
52
|
+
conversation_messages=conversation_messages,
|
|
53
|
+
temperature=ctx.temperature,
|
|
54
|
+
max_tokens=ctx.max_output_tokens,
|
|
55
|
+
max_retries=ctx.max_retries,
|
|
56
|
+
quiet=ctx.quiet,
|
|
250
57
|
)
|
|
251
58
|
|
|
252
|
-
|
|
253
|
-
extract = raw_response
|
|
254
|
-
first_brace = raw_response.find("{")
|
|
255
|
-
last_brace = raw_response.rfind("}")
|
|
256
|
-
if first_brace != -1 and last_brace != -1 and first_brace < last_brace:
|
|
257
|
-
extract = raw_response[first_brace : last_brace + 1]
|
|
258
|
-
|
|
259
|
-
try:
|
|
260
|
-
parsed = json.loads(extract)
|
|
261
|
-
except json.JSONDecodeError as e:
|
|
262
|
-
parsed = None
|
|
263
|
-
logger.debug(
|
|
264
|
-
f"JSON parsing failed: {e}. Extract length: {len(extract)}, Response length: {len(raw_response)}"
|
|
265
|
-
)
|
|
266
|
-
|
|
267
|
-
if parsed is None:
|
|
268
|
-
attempts += 1
|
|
269
|
-
feedback = "Your response was not valid JSON. Respond with ONLY valid JSON matching the expected schema. Do not include any commentary or code fences."
|
|
270
|
-
error_msg = f"Failed to parse LLM response as JSON after {attempts} retries."
|
|
271
|
-
if _handle_validation_retry(
|
|
272
|
-
attempts,
|
|
273
|
-
content_retry_budget,
|
|
274
|
-
raw_response,
|
|
275
|
-
feedback,
|
|
276
|
-
error_msg,
|
|
277
|
-
conversation_messages,
|
|
278
|
-
quiet,
|
|
279
|
-
"JSON parsing failed, asking model to fix...",
|
|
280
|
-
):
|
|
281
|
-
sys.exit(1)
|
|
282
|
-
continue
|
|
283
|
-
|
|
284
|
-
try:
|
|
285
|
-
if "commits" not in parsed or not isinstance(parsed["commits"], list):
|
|
286
|
-
raise ValueError("Response missing 'commits' array")
|
|
287
|
-
if len(parsed["commits"]) == 0:
|
|
288
|
-
raise ValueError("No commits in response")
|
|
289
|
-
for idx, commit in enumerate(parsed["commits"]):
|
|
290
|
-
if "files" not in commit or not isinstance(commit["files"], list):
|
|
291
|
-
raise ValueError(f"Commit {idx + 1} missing 'files' array")
|
|
292
|
-
if "message" not in commit or not isinstance(commit["message"], str):
|
|
293
|
-
raise ValueError(f"Commit {idx + 1} missing 'message' string")
|
|
294
|
-
if len(commit["files"]) == 0:
|
|
295
|
-
raise ValueError(f"Commit {idx + 1} has empty files list")
|
|
296
|
-
if not commit["message"].strip():
|
|
297
|
-
raise ValueError(f"Commit {idx + 1} has empty message")
|
|
298
|
-
except (ValueError, TypeError) as e:
|
|
299
|
-
attempts += 1
|
|
300
|
-
feedback = f"Invalid response structure: {e}. Please return ONLY valid JSON following the schema with a non-empty 'commits' array of objects containing 'files' and 'message'."
|
|
301
|
-
error_msg = f"Invalid grouped commits structure after {attempts} retries: {e}"
|
|
302
|
-
if _handle_validation_retry(
|
|
303
|
-
attempts,
|
|
304
|
-
content_retry_budget,
|
|
305
|
-
raw_response,
|
|
306
|
-
feedback,
|
|
307
|
-
error_msg,
|
|
308
|
-
conversation_messages,
|
|
309
|
-
quiet,
|
|
310
|
-
"Structure validation failed, asking model to fix...",
|
|
311
|
-
):
|
|
312
|
-
sys.exit(1)
|
|
313
|
-
continue
|
|
314
|
-
|
|
315
|
-
staged_set = set(get_staged_files(existing_only=False))
|
|
316
|
-
ok, feedback, detail_msg = _validate_grouped_files_or_feedback(staged_set, parsed)
|
|
317
|
-
if not ok:
|
|
318
|
-
attempts += 1
|
|
319
|
-
error_msg = (
|
|
320
|
-
f"Grouped commits file set mismatch after {attempts} retries{': ' + detail_msg if detail_msg else ''}"
|
|
321
|
-
)
|
|
322
|
-
if _handle_validation_retry(
|
|
323
|
-
attempts,
|
|
324
|
-
content_retry_budget,
|
|
325
|
-
raw_response,
|
|
326
|
-
feedback,
|
|
327
|
-
error_msg,
|
|
328
|
-
conversation_messages,
|
|
329
|
-
quiet,
|
|
330
|
-
"File coverage mismatch, asking model to fix...",
|
|
331
|
-
):
|
|
332
|
-
sys.exit(1)
|
|
333
|
-
continue
|
|
334
|
-
|
|
335
|
-
grouped_result = parsed
|
|
336
|
-
conversation_messages.append({"role": "assistant", "content": raw_response})
|
|
337
|
-
|
|
338
|
-
if not quiet:
|
|
339
|
-
console.print(f"[green]✔ Generated commit messages with {provider} {model_name}[/green]")
|
|
340
|
-
num_commits = len(grouped_result["commits"])
|
|
341
|
-
console.print(f"[bold green]Proposed Commits ({num_commits}):[/bold green]\n")
|
|
342
|
-
for idx, commit in enumerate(grouped_result["commits"], 1):
|
|
343
|
-
files = commit["files"]
|
|
344
|
-
files_display = ", ".join(files)
|
|
345
|
-
console.print(f"[dim]{files_display}[/dim]")
|
|
346
|
-
commit_msg = commit["message"]
|
|
347
|
-
console.print(Panel(commit_msg, title=f"Commit Message {idx}/{num_commits}", border_style="cyan"))
|
|
348
|
-
console.print()
|
|
349
|
-
|
|
350
|
-
completion_tokens = count_tokens(raw_response, model)
|
|
351
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
352
|
-
console.print(
|
|
353
|
-
f"[dim]Token usage: {prompt_tokens} prompt + {completion_tokens} completion = {total_tokens} total[/dim]"
|
|
354
|
-
)
|
|
355
|
-
|
|
356
|
-
if require_confirmation:
|
|
357
|
-
accepted = False
|
|
358
|
-
num_commits = len(grouped_result["commits"]) if grouped_result else 0
|
|
359
|
-
while True:
|
|
360
|
-
response = click.prompt(
|
|
361
|
-
f"Proceed with {num_commits} commits above? [y/n/r/<feedback>]",
|
|
362
|
-
type=str,
|
|
363
|
-
show_default=False,
|
|
364
|
-
).strip()
|
|
365
|
-
response_lower = response.lower()
|
|
366
|
-
|
|
367
|
-
if response_lower in ["y", "yes"]:
|
|
368
|
-
accepted = True
|
|
369
|
-
break
|
|
370
|
-
if response_lower in ["n", "no"]:
|
|
371
|
-
console.print("[yellow]Commits not accepted. Exiting...[/yellow]")
|
|
372
|
-
sys.exit(0)
|
|
373
|
-
if response == "":
|
|
374
|
-
continue
|
|
375
|
-
if response_lower in ["r", "reroll"]:
|
|
376
|
-
feedback_message = "Please provide alternative commit groupings using the same repository context."
|
|
377
|
-
console.print("[cyan]Regenerating commit groups...[/cyan]")
|
|
378
|
-
conversation_messages.append({"role": "user", "content": feedback_message})
|
|
379
|
-
console.print()
|
|
380
|
-
attempts = 0
|
|
381
|
-
break
|
|
382
|
-
|
|
383
|
-
feedback_message = f"Please revise the commit groupings based on this feedback: {response}"
|
|
384
|
-
console.print(f"[cyan]Regenerating commit groups with feedback: {response}[/cyan]")
|
|
385
|
-
conversation_messages.append({"role": "user", "content": feedback_message})
|
|
386
|
-
console.print()
|
|
387
|
-
attempts = 0
|
|
388
|
-
break
|
|
389
|
-
|
|
390
|
-
if not accepted:
|
|
391
|
-
continue
|
|
392
|
-
|
|
393
|
-
num_commits = len(grouped_result["commits"]) if grouped_result else 0
|
|
394
|
-
if dry_run:
|
|
395
|
-
console.print(f"[yellow]Dry run: Would create {num_commits} commits[/yellow]")
|
|
396
|
-
for idx, commit in enumerate(grouped_result["commits"], 1):
|
|
397
|
-
console.print(f"\n[cyan]Commit {idx}/{num_commits}:[/cyan]")
|
|
398
|
-
console.print(f" Files: {', '.join(commit['files'])}")
|
|
399
|
-
console.print(f" Message: {commit['message'][:50]}...")
|
|
400
|
-
else:
|
|
401
|
-
original_staged_files = get_staged_files(existing_only=False)
|
|
402
|
-
original_staged_diff = run_git_command(["diff", "--cached", "--binary"], silent=True)
|
|
403
|
-
run_git_command(["reset", "HEAD"])
|
|
404
|
-
|
|
405
|
-
try:
|
|
406
|
-
# Detect file renames to handle them properly
|
|
407
|
-
rename_mappings = detect_rename_mappings(original_staged_diff)
|
|
408
|
-
|
|
409
|
-
for idx, commit in enumerate(grouped_result["commits"], 1):
|
|
410
|
-
try:
|
|
411
|
-
for file_path in commit["files"]:
|
|
412
|
-
# Check if this file is the destination of a rename
|
|
413
|
-
if file_path in rename_mappings:
|
|
414
|
-
old_file = rename_mappings[file_path]
|
|
415
|
-
# For renames, stage both the old file (for deletion) and new file
|
|
416
|
-
# This ensures the complete rename operation is preserved
|
|
417
|
-
run_git_command(["add", "-A", old_file])
|
|
418
|
-
run_git_command(["add", "-A", file_path])
|
|
419
|
-
else:
|
|
420
|
-
run_git_command(["add", "-A", file_path])
|
|
421
|
-
execute_commit(commit["message"], no_verify, hook_timeout)
|
|
422
|
-
console.print(f"[green]✓ Commit {idx}/{num_commits} created[/green]")
|
|
423
|
-
except Exception as e:
|
|
424
|
-
console.print(f"[red]✗ Failed at commit {idx}/{num_commits}: {e}[/red]")
|
|
425
|
-
console.print(f"[yellow]Completed {idx - 1}/{num_commits} commits.[/yellow]")
|
|
426
|
-
if idx == 1:
|
|
427
|
-
console.print("[yellow]Restoring original staging area...[/yellow]")
|
|
428
|
-
restore_staging(original_staged_files, original_staged_diff)
|
|
429
|
-
console.print("[green]Original staging area restored.[/green]")
|
|
430
|
-
sys.exit(1)
|
|
431
|
-
except KeyboardInterrupt:
|
|
432
|
-
console.print("\n[yellow]Interrupted by user. Restoring original staging area...[/yellow]")
|
|
433
|
-
restore_staging(original_staged_files, original_staged_diff)
|
|
434
|
-
console.print("[green]Original staging area restored.[/green]")
|
|
435
|
-
sys.exit(1)
|
|
436
|
-
|
|
437
|
-
if push:
|
|
438
|
-
try:
|
|
439
|
-
if dry_run:
|
|
440
|
-
console.print("[yellow]Dry run: Would push changes[/yellow]")
|
|
441
|
-
sys.exit(0)
|
|
442
|
-
if push_changes():
|
|
443
|
-
logger.info("Changes pushed successfully")
|
|
444
|
-
console.print("[green]Changes pushed successfully[/green]")
|
|
445
|
-
else:
|
|
446
|
-
console.print(
|
|
447
|
-
"[red]Failed to push changes. Check your remote configuration and network connection.[/red]"
|
|
448
|
-
)
|
|
449
|
-
sys.exit(1)
|
|
450
|
-
except Exception as e:
|
|
451
|
-
console.print(f"[red]Error pushing changes: {e}[/red]")
|
|
452
|
-
sys.exit(1)
|
|
453
|
-
|
|
454
|
-
sys.exit(0)
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
def execute_single_commit_workflow(
|
|
458
|
-
*,
|
|
459
|
-
system_prompt: str,
|
|
460
|
-
user_prompt: str,
|
|
461
|
-
model: str,
|
|
462
|
-
temperature: float,
|
|
463
|
-
max_output_tokens: int,
|
|
464
|
-
max_retries: int,
|
|
465
|
-
require_confirmation: bool,
|
|
466
|
-
quiet: bool,
|
|
467
|
-
no_verify: bool,
|
|
468
|
-
dry_run: bool,
|
|
469
|
-
message_only: bool = False,
|
|
470
|
-
push: bool,
|
|
471
|
-
show_prompt: bool,
|
|
472
|
-
hook_timeout: int = 120,
|
|
473
|
-
interactive: bool = False,
|
|
474
|
-
) -> None:
|
|
475
|
-
if show_prompt:
|
|
476
|
-
full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
|
|
477
|
-
console.print(Panel(full_prompt, title="Prompt for LLM", border_style="bright_blue"))
|
|
478
|
-
|
|
479
|
-
conversation_messages: list[dict[str, str]] = []
|
|
480
|
-
if system_prompt:
|
|
481
|
-
conversation_messages.append({"role": "system", "content": system_prompt})
|
|
482
|
-
conversation_messages.append({"role": "user", "content": user_prompt})
|
|
483
|
-
|
|
484
|
-
_parse_model_identifier(model)
|
|
485
|
-
|
|
486
|
-
# Generate interactive questions if enabled
|
|
487
|
-
if interactive and not message_only:
|
|
488
|
-
try:
|
|
489
|
-
# Extract git data from the user prompt for question generation
|
|
490
|
-
status_match = None
|
|
491
|
-
diff_match = None
|
|
492
|
-
diff_stat_match = None
|
|
493
|
-
|
|
494
|
-
import re
|
|
495
|
-
|
|
496
|
-
status_match = re.search(r"<git_status>\n(.*?)\n</git_status>", user_prompt, re.DOTALL)
|
|
497
|
-
diff_match = re.search(r"<git_diff>\n(.*?)\n</git_diff>", user_prompt, re.DOTALL)
|
|
498
|
-
diff_stat_match = re.search(r"<git_diff_stat>\n(.*?)\n</git_diff_stat>", user_prompt, re.DOTALL)
|
|
499
|
-
|
|
500
|
-
status = status_match.group(1) if status_match else ""
|
|
501
|
-
diff = diff_match.group(1) if diff_match else ""
|
|
502
|
-
diff_stat = diff_stat_match.group(1) if diff_stat_match else ""
|
|
503
|
-
|
|
504
|
-
# Extract hint text if present
|
|
505
|
-
hint_match = re.search(r"<hint_text>(.*?)</hint_text>", user_prompt, re.DOTALL)
|
|
506
|
-
hint = hint_match.group(1) if hint_match else ""
|
|
507
|
-
|
|
508
|
-
questions = generate_contextual_questions(
|
|
509
|
-
model=model,
|
|
510
|
-
status=status,
|
|
511
|
-
processed_diff=diff,
|
|
512
|
-
diff_stat=diff_stat,
|
|
513
|
-
hint=hint,
|
|
514
|
-
temperature=temperature,
|
|
515
|
-
max_tokens=max_output_tokens,
|
|
516
|
-
max_retries=max_retries,
|
|
517
|
-
quiet=quiet,
|
|
518
|
-
)
|
|
519
|
-
|
|
520
|
-
if questions:
|
|
521
|
-
# Collect answers interactively
|
|
522
|
-
answers = collect_interactive_answers(questions)
|
|
523
|
-
|
|
524
|
-
if answers is None:
|
|
525
|
-
# User aborted interactive mode
|
|
526
|
-
if not quiet:
|
|
527
|
-
console.print("[yellow]Proceeding with commit without additional context[/yellow]\n")
|
|
528
|
-
elif answers:
|
|
529
|
-
# User provided some answers, format them for the prompt
|
|
530
|
-
answers_context = format_answers_for_prompt(answers)
|
|
531
|
-
enhanced_user_prompt = user_prompt + answers_context
|
|
532
|
-
|
|
533
|
-
# Update the conversation messages with the enhanced prompt
|
|
534
|
-
if conversation_messages and conversation_messages[-1]["role"] == "user":
|
|
535
|
-
conversation_messages[-1]["content"] = enhanced_user_prompt
|
|
536
|
-
|
|
537
|
-
logger.info(f"Collected answers for {len(answers)} questions")
|
|
538
|
-
else:
|
|
539
|
-
# User skipped all questions
|
|
540
|
-
if not quiet:
|
|
541
|
-
console.print("[dim]No answers provided, proceeding with original context[/dim]\n")
|
|
542
|
-
|
|
543
|
-
except Exception as e:
|
|
544
|
-
logger.warning(f"Failed to generate contextual questions, proceeding without them: {e}")
|
|
545
|
-
if not quiet:
|
|
546
|
-
console.print("[yellow]⚠️ Could not generate contextual questions, proceeding normally[/yellow]\n")
|
|
547
|
-
|
|
59
|
+
# Generate commit message
|
|
548
60
|
first_iteration = True
|
|
549
61
|
while True:
|
|
550
|
-
prompt_tokens = count_tokens(conversation_messages, model)
|
|
62
|
+
prompt_tokens = count_tokens(conversation_messages, ctx.model)
|
|
551
63
|
if first_iteration:
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
if not check_token_warning(prompt_tokens, warning_limit, require_confirmation):
|
|
556
|
-
sys.exit(0)
|
|
64
|
+
warning_limit = config["warning_limit_tokens"]
|
|
65
|
+
if not check_token_warning(prompt_tokens, warning_limit, ctx.flags.require_confirmation):
|
|
66
|
+
return 0 # User declined due to token warning
|
|
557
67
|
first_iteration = False
|
|
558
68
|
|
|
559
69
|
raw_commit_message = generate_commit_message(
|
|
560
|
-
model=model,
|
|
70
|
+
model=ctx.model,
|
|
561
71
|
prompt=conversation_messages,
|
|
562
|
-
temperature=temperature,
|
|
563
|
-
max_tokens=max_output_tokens,
|
|
564
|
-
max_retries=max_retries,
|
|
565
|
-
quiet=quiet or message_only,
|
|
72
|
+
temperature=ctx.temperature,
|
|
73
|
+
max_tokens=ctx.max_output_tokens,
|
|
74
|
+
max_retries=ctx.max_retries,
|
|
75
|
+
quiet=ctx.quiet or ctx.message_only,
|
|
566
76
|
)
|
|
567
77
|
commit_message = clean_commit_message(raw_commit_message)
|
|
568
78
|
logger.info("Generated commit message:")
|
|
569
79
|
logger.info(commit_message)
|
|
570
80
|
conversation_messages.append({"role": "assistant", "content": commit_message})
|
|
571
81
|
|
|
572
|
-
if message_only:
|
|
573
|
-
# Output only the commit message without any formatting
|
|
82
|
+
if ctx.message_only:
|
|
574
83
|
print(commit_message)
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
84
|
+
return 0
|
|
85
|
+
|
|
86
|
+
# Display commit message panel (always show, regardless of confirmation mode)
|
|
87
|
+
if not ctx.quiet:
|
|
88
|
+
display_commit_message(commit_message, prompt_tokens, ctx.model, ctx.quiet)
|
|
89
|
+
|
|
90
|
+
# Handle confirmation
|
|
91
|
+
if ctx.flags.require_confirmation:
|
|
92
|
+
final_message, decision = ctx.state.interactive_mode.handle_single_commit_confirmation(
|
|
93
|
+
model=ctx.model,
|
|
94
|
+
commit_message=commit_message,
|
|
95
|
+
conversation_messages=conversation_messages,
|
|
96
|
+
quiet=ctx.quiet,
|
|
582
97
|
)
|
|
583
|
-
if decision == "
|
|
584
|
-
|
|
585
|
-
sys.exit(0)
|
|
586
|
-
elif decision == "yes":
|
|
98
|
+
if decision == "yes":
|
|
99
|
+
commit_message = final_message
|
|
587
100
|
break
|
|
101
|
+
elif decision == "no":
|
|
102
|
+
console.print("[yellow]Commit aborted.[/yellow]")
|
|
103
|
+
return 0 # User aborted
|
|
104
|
+
# decision == "regenerate": continue the loop
|
|
588
105
|
else:
|
|
589
106
|
break
|
|
590
107
|
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
execute_commit(commit_message, no_verify, hook_timeout)
|
|
600
|
-
|
|
601
|
-
if push:
|
|
602
|
-
try:
|
|
603
|
-
if dry_run:
|
|
604
|
-
staged_files = get_staged_files(existing_only=False)
|
|
605
|
-
logger.info("Dry run: Would push changes")
|
|
606
|
-
logger.info("Would push with message:")
|
|
607
|
-
logger.info(commit_message)
|
|
608
|
-
logger.info(f"Would push {len(staged_files)} files")
|
|
609
|
-
console.print("[yellow]Dry run: Would push changes[/yellow]")
|
|
610
|
-
console.print("Would push with message:")
|
|
611
|
-
console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
|
|
612
|
-
console.print(f"Would push {len(staged_files)} files")
|
|
613
|
-
sys.exit(0)
|
|
614
|
-
if push_changes():
|
|
615
|
-
logger.info("Changes pushed successfully")
|
|
616
|
-
console.print("[green]Changes pushed successfully[/green]")
|
|
617
|
-
else:
|
|
618
|
-
console.print(
|
|
619
|
-
"[red]Failed to push changes. Check your remote configuration and network connection.[/red]"
|
|
620
|
-
)
|
|
621
|
-
sys.exit(1)
|
|
622
|
-
except Exception as e:
|
|
623
|
-
console.print(f"[red]Error pushing changes: {e}[/red]")
|
|
624
|
-
sys.exit(1)
|
|
625
|
-
|
|
626
|
-
if not quiet:
|
|
108
|
+
# Execute the commit
|
|
109
|
+
ctx.state.commit_executor.create_commit(commit_message)
|
|
110
|
+
|
|
111
|
+
# Push if requested
|
|
112
|
+
if ctx.flags.push:
|
|
113
|
+
ctx.state.commit_executor.push_to_remote()
|
|
114
|
+
|
|
115
|
+
if not ctx.quiet:
|
|
627
116
|
logger.info("Successfully committed changes with message:")
|
|
628
117
|
logger.info(commit_message)
|
|
629
|
-
if push:
|
|
118
|
+
if ctx.flags.push:
|
|
630
119
|
logger.info("Changes pushed to remote.")
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
def generate_contextual_questions(
|
|
635
|
-
model: str,
|
|
636
|
-
status: str,
|
|
637
|
-
processed_diff: str,
|
|
638
|
-
diff_stat: str = "",
|
|
639
|
-
hint: str = "",
|
|
640
|
-
temperature: float = EnvDefaults.TEMPERATURE,
|
|
641
|
-
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
642
|
-
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
643
|
-
quiet: bool = False,
|
|
644
|
-
) -> list[str]:
|
|
645
|
-
"""Generate contextual questions about staged changes when interactive mode is enabled.
|
|
120
|
+
return 0
|
|
646
121
|
|
|
647
|
-
Args:
|
|
648
|
-
model: The model to use in provider:model_name format
|
|
649
|
-
status: Git status output
|
|
650
|
-
processed_diff: Git diff output, already preprocessed
|
|
651
|
-
diff_stat: Git diff stat output showing file changes summary
|
|
652
|
-
hint: Optional hint to guide the question generation
|
|
653
|
-
temperature: Controls randomness for generation
|
|
654
|
-
max_tokens: Maximum tokens in the response
|
|
655
|
-
max_retries: Number of retry attempts if generation fails
|
|
656
|
-
quiet: If True, suppress progress indicators
|
|
657
122
|
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
Raises:
|
|
662
|
-
AIError: If question generation fails after max_retries attempts
|
|
663
|
-
"""
|
|
664
|
-
from gac.prompt import build_question_generation_prompt
|
|
665
|
-
|
|
666
|
-
try:
|
|
667
|
-
# Build prompts for question generation
|
|
668
|
-
system_prompt, user_prompt = build_question_generation_prompt(
|
|
669
|
-
status=status,
|
|
670
|
-
processed_diff=processed_diff,
|
|
671
|
-
diff_stat=diff_stat,
|
|
672
|
-
hint=hint,
|
|
673
|
-
)
|
|
674
|
-
|
|
675
|
-
# Generate questions using existing infrastructure
|
|
676
|
-
logger.info("Generating contextual questions about staged changes...")
|
|
677
|
-
questions_text = generate_commit_message(
|
|
678
|
-
model=model,
|
|
679
|
-
prompt=(system_prompt, user_prompt),
|
|
680
|
-
temperature=temperature,
|
|
681
|
-
max_tokens=max_tokens,
|
|
682
|
-
max_retries=max_retries,
|
|
683
|
-
quiet=quiet,
|
|
684
|
-
skip_success_message=True, # Don't show "Generated commit message" for questions
|
|
685
|
-
task_description="contextual questions",
|
|
686
|
-
)
|
|
687
|
-
|
|
688
|
-
# Parse the response to extract individual questions
|
|
689
|
-
questions = _parse_questions_from_response(questions_text)
|
|
690
|
-
|
|
691
|
-
logger.info(f"Generated {len(questions)} contextual questions")
|
|
692
|
-
return questions
|
|
693
|
-
|
|
694
|
-
except Exception as e:
|
|
695
|
-
logger.error(f"Failed to generate contextual questions: {e}")
|
|
696
|
-
raise AIError.model_error(f"Failed to generate contextual questions: {e}") from e
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
def _parse_questions_from_response(response: str) -> list[str]:
|
|
700
|
-
"""Parse the AI response to extract individual questions from a numbered list.
|
|
123
|
+
def main(opts: CLIOptions) -> int:
|
|
124
|
+
"""Main application logic for gac.
|
|
701
125
|
|
|
702
126
|
Args:
|
|
703
|
-
|
|
127
|
+
opts: CLI options bundled in a dataclass
|
|
704
128
|
|
|
705
129
|
Returns:
|
|
706
|
-
|
|
130
|
+
Exit code: 0 for success, non-zero for failure
|
|
707
131
|
"""
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
continue
|
|
717
|
-
|
|
718
|
-
# Match numbered list format (e.g., "1. Question text?" or "1) Question text?")
|
|
719
|
-
match = re.match(r"^\d+\.\s+(.+)$", line)
|
|
720
|
-
if not match:
|
|
721
|
-
match = re.match(r"^\d+\)\s+(.+)$", line)
|
|
722
|
-
|
|
723
|
-
if match:
|
|
724
|
-
question = match.group(1).strip()
|
|
725
|
-
# Remove any leading symbols like •, -, *
|
|
726
|
-
question = re.sub(r"^[•\-*]\s+", "", question)
|
|
727
|
-
if question and question.endswith("?"):
|
|
728
|
-
questions.append(question)
|
|
729
|
-
elif line.endswith("?") and len(line) > 5: # Fallback for non-numbered questions
|
|
730
|
-
questions.append(line)
|
|
731
|
-
|
|
732
|
-
return questions
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
def main(
|
|
736
|
-
stage_all: bool = False,
|
|
737
|
-
group: bool = False,
|
|
738
|
-
interactive: bool = False,
|
|
739
|
-
model: str | None = None,
|
|
740
|
-
hint: str = "",
|
|
741
|
-
one_liner: bool = False,
|
|
742
|
-
show_prompt: bool = False,
|
|
743
|
-
infer_scope: bool = False,
|
|
744
|
-
require_confirmation: bool = True,
|
|
745
|
-
push: bool = False,
|
|
746
|
-
quiet: bool = False,
|
|
747
|
-
dry_run: bool = False,
|
|
748
|
-
message_only: bool = False,
|
|
749
|
-
verbose: bool = False,
|
|
750
|
-
no_verify: bool = False,
|
|
751
|
-
skip_secret_scan: bool = False,
|
|
752
|
-
language: str | None = None,
|
|
753
|
-
hook_timeout: int = 120,
|
|
754
|
-
) -> None:
|
|
755
|
-
"""Main application logic for gac."""
|
|
756
|
-
try:
|
|
757
|
-
git_dir = run_git_command(["rev-parse", "--show-toplevel"])
|
|
758
|
-
if not git_dir:
|
|
759
|
-
raise GitError("Not in a git repository")
|
|
760
|
-
except Exception as e:
|
|
761
|
-
logger.error(f"Error checking git repository: {e}")
|
|
762
|
-
handle_error(GitError("Not in a git repository"), exit_program=True)
|
|
132
|
+
# Initialize components
|
|
133
|
+
git_validator = GitStateValidator(config)
|
|
134
|
+
prompt_builder = PromptBuilder(config)
|
|
135
|
+
commit_executor = CommitExecutor(
|
|
136
|
+
dry_run=opts.dry_run, quiet=opts.quiet, no_verify=opts.no_verify, hook_timeout=opts.hook_timeout
|
|
137
|
+
)
|
|
138
|
+
interactive_mode = InteractiveMode(config)
|
|
139
|
+
grouped_workflow = GroupedCommitWorkflow(config)
|
|
763
140
|
|
|
141
|
+
# Validate and get model configuration
|
|
142
|
+
model = opts.model
|
|
764
143
|
if model is None:
|
|
765
144
|
model_from_config = config["model"]
|
|
766
145
|
if model_from_config is None:
|
|
@@ -773,251 +152,177 @@ def main(
|
|
|
773
152
|
model = str(model_from_config)
|
|
774
153
|
|
|
775
154
|
temperature_val = config["temperature"]
|
|
776
|
-
|
|
155
|
+
if temperature_val is None:
|
|
156
|
+
raise ConfigError("temperature configuration missing")
|
|
777
157
|
temperature = float(temperature_val)
|
|
778
158
|
|
|
779
159
|
max_tokens_val = config["max_output_tokens"]
|
|
780
|
-
|
|
160
|
+
if max_tokens_val is None:
|
|
161
|
+
raise ConfigError("max_output_tokens configuration missing")
|
|
781
162
|
max_output_tokens = int(max_tokens_val)
|
|
782
163
|
|
|
783
164
|
max_retries_val = config["max_retries"]
|
|
784
|
-
|
|
165
|
+
if max_retries_val is None:
|
|
166
|
+
raise ConfigError("max_retries configuration missing")
|
|
785
167
|
max_retries = int(max_retries_val)
|
|
786
168
|
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
169
|
+
# Get git state and handle hooks
|
|
170
|
+
git_state = git_validator.get_git_state(
|
|
171
|
+
stage_all=opts.stage_all,
|
|
172
|
+
dry_run=opts.dry_run,
|
|
173
|
+
skip_secret_scan=opts.skip_secret_scan,
|
|
174
|
+
quiet=opts.quiet,
|
|
175
|
+
model=model,
|
|
176
|
+
hint=opts.hint,
|
|
177
|
+
one_liner=opts.one_liner,
|
|
178
|
+
infer_scope=opts.infer_scope,
|
|
179
|
+
verbose=opts.verbose,
|
|
180
|
+
language=opts.language,
|
|
181
|
+
)
|
|
798
182
|
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
)
|
|
803
|
-
sys.exit(0)
|
|
183
|
+
# No staged changes found
|
|
184
|
+
if git_state is None:
|
|
185
|
+
return 0
|
|
804
186
|
|
|
805
|
-
|
|
806
|
-
|
|
187
|
+
# Run pre-commit hooks
|
|
188
|
+
if not opts.no_verify and not opts.dry_run:
|
|
189
|
+
if not run_lefthook_hooks(opts.hook_timeout):
|
|
807
190
|
console.print("[red]Lefthook hooks failed. Please fix the issues and try again.[/red]")
|
|
808
191
|
console.print("[yellow]You can use --no-verify to skip pre-commit and lefthook hooks.[/yellow]")
|
|
809
|
-
|
|
192
|
+
return 1
|
|
810
193
|
|
|
811
|
-
if not run_pre_commit_hooks(hook_timeout):
|
|
194
|
+
if not run_pre_commit_hooks(opts.hook_timeout):
|
|
812
195
|
console.print("[red]Pre-commit hooks failed. Please fix the issues and try again.[/red]")
|
|
813
196
|
console.print("[yellow]You can use --no-verify to skip pre-commit and lefthook hooks.[/yellow]")
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
try:
|
|
841
|
-
choice = (
|
|
842
|
-
click.prompt(
|
|
843
|
-
"\nChoose an option",
|
|
844
|
-
type=click.Choice(["a", "c", "r"], case_sensitive=False),
|
|
845
|
-
default="a",
|
|
846
|
-
show_choices=True,
|
|
847
|
-
show_default=True,
|
|
848
|
-
)
|
|
849
|
-
.strip()
|
|
850
|
-
.lower()
|
|
851
|
-
)
|
|
852
|
-
except (EOFError, KeyboardInterrupt):
|
|
853
|
-
console.print("\n[red]Aborted by user.[/red]")
|
|
854
|
-
sys.exit(0)
|
|
855
|
-
|
|
856
|
-
if choice == "a":
|
|
857
|
-
console.print("[yellow]Commit aborted.[/yellow]")
|
|
858
|
-
sys.exit(0)
|
|
859
|
-
elif choice == "c":
|
|
860
|
-
console.print("[bold yellow]⚠️ Continuing with potential secrets in commit...[/bold yellow]")
|
|
861
|
-
logger.warning("User chose to continue despite detected secrets")
|
|
862
|
-
elif choice == "r":
|
|
863
|
-
affected_files = get_affected_files(secrets)
|
|
864
|
-
for file_path in affected_files:
|
|
865
|
-
try:
|
|
866
|
-
run_git_command(["reset", "HEAD", file_path])
|
|
867
|
-
console.print(f"[green]Unstaged: {file_path}[/green]")
|
|
868
|
-
except GitError as e:
|
|
869
|
-
console.print(f"[red]Failed to unstage {file_path}: {e}[/red]")
|
|
870
|
-
|
|
871
|
-
# Check if there are still staged files
|
|
872
|
-
remaining_staged = get_staged_files(existing_only=False)
|
|
873
|
-
if not remaining_staged:
|
|
874
|
-
console.print("[yellow]No files remain staged. Commit aborted.[/yellow]")
|
|
875
|
-
sys.exit(0)
|
|
876
|
-
|
|
877
|
-
console.print(f"[green]Continuing with {len(remaining_staged)} staged file(s)...[/green]")
|
|
878
|
-
status = get_staged_status()
|
|
879
|
-
diff = run_git_command(["diff", "--staged"])
|
|
880
|
-
diff_stat = " " + run_git_command(["diff", "--stat", "--cached"])
|
|
881
|
-
else:
|
|
882
|
-
logger.info("No secrets detected in staged changes")
|
|
883
|
-
|
|
884
|
-
logger.debug(f"Preprocessing diff ({len(diff)} characters)")
|
|
885
|
-
assert model is not None
|
|
886
|
-
processed_diff = preprocess_diff(diff, token_limit=Utility.DEFAULT_DIFF_TOKEN_LIMIT, model=model)
|
|
887
|
-
logger.debug(f"Processed diff ({len(processed_diff)} characters)")
|
|
197
|
+
return 1
|
|
198
|
+
|
|
199
|
+
# Handle secret detection
|
|
200
|
+
if git_state.has_secrets:
|
|
201
|
+
secret_decision = git_validator.handle_secret_detection(git_state.secrets, opts.quiet)
|
|
202
|
+
if secret_decision is None:
|
|
203
|
+
# User chose to abort
|
|
204
|
+
return 0
|
|
205
|
+
if not secret_decision:
|
|
206
|
+
# Secrets were removed, we need to refresh the git state
|
|
207
|
+
git_state = git_validator.get_git_state(
|
|
208
|
+
stage_all=False,
|
|
209
|
+
dry_run=opts.dry_run,
|
|
210
|
+
skip_secret_scan=True, # Skip secret scan this time
|
|
211
|
+
quiet=opts.quiet,
|
|
212
|
+
model=model,
|
|
213
|
+
hint=opts.hint,
|
|
214
|
+
one_liner=opts.one_liner,
|
|
215
|
+
infer_scope=opts.infer_scope,
|
|
216
|
+
verbose=opts.verbose,
|
|
217
|
+
language=opts.language,
|
|
218
|
+
)
|
|
219
|
+
# After removing secret files, no staged changes may remain
|
|
220
|
+
if git_state is None:
|
|
221
|
+
return 0
|
|
888
222
|
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
223
|
+
# Adjust max_output_tokens for grouped mode
|
|
224
|
+
if opts.group:
|
|
225
|
+
num_files = len(git_state.staged_files)
|
|
226
|
+
multiplier = min(5, 2 + (num_files // 10))
|
|
227
|
+
max_output_tokens *= multiplier
|
|
228
|
+
logger.debug(f"Grouped mode: scaling max_output_tokens by {multiplier}x for {num_files} files")
|
|
893
229
|
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
processed_diff=processed_diff,
|
|
904
|
-
diff_stat=diff_stat,
|
|
905
|
-
one_liner=one_liner,
|
|
906
|
-
hint=hint,
|
|
907
|
-
infer_scope=infer_scope,
|
|
908
|
-
verbose=verbose,
|
|
909
|
-
system_template_path=system_template_path,
|
|
910
|
-
language=language,
|
|
911
|
-
translate_prefixes=translate_prefixes,
|
|
230
|
+
# Build prompts
|
|
231
|
+
prompts = prompt_builder.build_prompts(
|
|
232
|
+
git_state=git_state,
|
|
233
|
+
group=opts.group,
|
|
234
|
+
one_liner=opts.one_liner,
|
|
235
|
+
hint=opts.hint,
|
|
236
|
+
infer_scope=opts.infer_scope,
|
|
237
|
+
verbose=opts.verbose,
|
|
238
|
+
language=opts.language,
|
|
912
239
|
)
|
|
913
240
|
|
|
914
|
-
if
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
system_prompt, user_prompt = build_group_prompt(
|
|
918
|
-
status=status,
|
|
919
|
-
processed_diff=processed_diff,
|
|
920
|
-
diff_stat=diff_stat,
|
|
921
|
-
one_liner=one_liner,
|
|
922
|
-
hint=hint,
|
|
923
|
-
infer_scope=infer_scope,
|
|
924
|
-
verbose=verbose,
|
|
925
|
-
system_template_path=system_template_path,
|
|
926
|
-
language=language,
|
|
927
|
-
translate_prefixes=translate_prefixes,
|
|
928
|
-
)
|
|
241
|
+
# Display prompts if requested
|
|
242
|
+
if opts.show_prompt:
|
|
243
|
+
prompt_builder.display_prompts(prompts.system_prompt, prompts.user_prompt)
|
|
929
244
|
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
245
|
+
try:
|
|
246
|
+
if opts.group:
|
|
247
|
+
# Execute grouped workflow
|
|
248
|
+
return grouped_workflow.execute_workflow(
|
|
249
|
+
system_prompt=prompts.system_prompt,
|
|
250
|
+
user_prompt=prompts.user_prompt,
|
|
934
251
|
model=model,
|
|
935
252
|
temperature=temperature,
|
|
936
253
|
max_output_tokens=max_output_tokens,
|
|
937
254
|
max_retries=max_retries,
|
|
938
|
-
require_confirmation=require_confirmation,
|
|
939
|
-
quiet=quiet,
|
|
940
|
-
no_verify=no_verify,
|
|
941
|
-
dry_run=dry_run,
|
|
942
|
-
push=push,
|
|
943
|
-
show_prompt=show_prompt,
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
255
|
+
require_confirmation=opts.require_confirmation,
|
|
256
|
+
quiet=opts.quiet,
|
|
257
|
+
no_verify=opts.no_verify,
|
|
258
|
+
dry_run=opts.dry_run,
|
|
259
|
+
push=opts.push,
|
|
260
|
+
show_prompt=opts.show_prompt,
|
|
261
|
+
interactive=opts.interactive,
|
|
262
|
+
message_only=opts.message_only,
|
|
263
|
+
hook_timeout=opts.hook_timeout,
|
|
264
|
+
git_state=git_state,
|
|
265
|
+
hint=opts.hint,
|
|
947
266
|
)
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
sys.exit(1)
|
|
952
|
-
else:
|
|
953
|
-
try:
|
|
954
|
-
execute_single_commit_workflow(
|
|
955
|
-
system_prompt=system_prompt,
|
|
956
|
-
user_prompt=user_prompt,
|
|
267
|
+
else:
|
|
268
|
+
# Build workflow context
|
|
269
|
+
gen_config = GenerationConfig(
|
|
957
270
|
model=model,
|
|
958
271
|
temperature=temperature,
|
|
959
272
|
max_output_tokens=max_output_tokens,
|
|
960
273
|
max_retries=max_retries,
|
|
961
|
-
require_confirmation=require_confirmation,
|
|
962
|
-
quiet=quiet,
|
|
963
|
-
no_verify=no_verify,
|
|
964
|
-
dry_run=dry_run,
|
|
965
|
-
message_only=message_only,
|
|
966
|
-
push=push,
|
|
967
|
-
show_prompt=show_prompt,
|
|
968
|
-
hook_timeout=hook_timeout,
|
|
969
|
-
interactive=interactive,
|
|
970
274
|
)
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
275
|
+
flags = WorkflowFlags(
|
|
276
|
+
require_confirmation=opts.require_confirmation,
|
|
277
|
+
quiet=opts.quiet,
|
|
278
|
+
no_verify=opts.no_verify,
|
|
279
|
+
dry_run=opts.dry_run,
|
|
280
|
+
message_only=opts.message_only,
|
|
281
|
+
push=opts.push,
|
|
282
|
+
show_prompt=opts.show_prompt,
|
|
283
|
+
interactive=opts.interactive,
|
|
284
|
+
hook_timeout=opts.hook_timeout,
|
|
285
|
+
)
|
|
286
|
+
state = WorkflowState(
|
|
287
|
+
prompts=prompts,
|
|
288
|
+
git_state=git_state,
|
|
289
|
+
hint=opts.hint,
|
|
290
|
+
commit_executor=commit_executor,
|
|
291
|
+
interactive_mode=interactive_mode,
|
|
292
|
+
)
|
|
293
|
+
ctx = WorkflowContext(config=gen_config, flags=flags, state=state)
|
|
294
|
+
|
|
295
|
+
# Execute single commit workflow
|
|
296
|
+
return _execute_single_commit_workflow(ctx)
|
|
297
|
+
except AIError as e:
|
|
298
|
+
# Build context for retry
|
|
299
|
+
gen_config = GenerationConfig(
|
|
300
|
+
model=model,
|
|
301
|
+
temperature=temperature,
|
|
302
|
+
max_output_tokens=max_output_tokens,
|
|
303
|
+
max_retries=max_retries,
|
|
304
|
+
)
|
|
305
|
+
flags = WorkflowFlags(
|
|
306
|
+
require_confirmation=opts.require_confirmation,
|
|
307
|
+
quiet=opts.quiet,
|
|
308
|
+
no_verify=opts.no_verify,
|
|
309
|
+
dry_run=opts.dry_run,
|
|
310
|
+
message_only=opts.message_only,
|
|
311
|
+
push=opts.push,
|
|
312
|
+
show_prompt=opts.show_prompt,
|
|
313
|
+
interactive=opts.interactive,
|
|
314
|
+
hook_timeout=opts.hook_timeout,
|
|
315
|
+
)
|
|
316
|
+
state = WorkflowState(
|
|
317
|
+
prompts=prompts,
|
|
318
|
+
git_state=git_state,
|
|
319
|
+
hint=opts.hint,
|
|
320
|
+
commit_executor=commit_executor,
|
|
321
|
+
interactive_mode=interactive_mode,
|
|
322
|
+
)
|
|
323
|
+
ctx = WorkflowContext(config=gen_config, flags=flags, state=state)
|
|
324
|
+
return handle_oauth_retry(e=e, ctx=ctx)
|
|
1020
325
|
|
|
1021
326
|
|
|
1022
327
|
if __name__ == "__main__":
|
|
1023
|
-
main()
|
|
328
|
+
raise SystemExit(main(CLIOptions()))
|