gac 1.13.0__py3-none-any.whl → 3.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gac/__version__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "1.13.0"
3
+ __version__ = "3.6.0"
gac/ai.py CHANGED
@@ -9,27 +9,7 @@ import logging
9
9
  from gac.ai_utils import generate_with_retries
10
10
  from gac.constants import EnvDefaults
11
11
  from gac.errors import AIError
12
- from gac.providers import (
13
- call_anthropic_api,
14
- call_cerebras_api,
15
- call_chutes_api,
16
- call_custom_anthropic_api,
17
- call_custom_openai_api,
18
- call_deepseek_api,
19
- call_fireworks_api,
20
- call_gemini_api,
21
- call_groq_api,
22
- call_lmstudio_api,
23
- call_minimax_api,
24
- call_ollama_api,
25
- call_openai_api,
26
- call_openrouter_api,
27
- call_streamlake_api,
28
- call_synthetic_api,
29
- call_together_api,
30
- call_zai_api,
31
- call_zai_coding_api,
32
- )
12
+ from gac.providers import PROVIDER_REGISTRY
33
13
 
34
14
  logger = logging.getLogger(__name__)
35
15
 
@@ -41,11 +21,14 @@ def generate_commit_message(
41
21
  max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
42
22
  max_retries: int = EnvDefaults.MAX_RETRIES,
43
23
  quiet: bool = False,
24
+ is_group: bool = False,
25
+ skip_success_message: bool = False,
26
+ task_description: str = "commit message",
44
27
  ) -> str:
45
28
  """Generate a commit message using direct API calls to AI providers.
46
29
 
47
30
  Args:
48
- model: The model to use in provider:model_name format (e.g., 'anthropic:claude-3-5-haiku-latest')
31
+ model: The model to use in provider:model_name format (e.g., 'anthropic:claude-haiku-4-5')
49
32
  prompt: Either a string prompt (for backward compatibility) or tuple of (system_prompt, user_prompt)
50
33
  temperature: Controls randomness (0.0-1.0), lower values are more deterministic
51
34
  max_tokens: Maximum tokens in the response
@@ -59,7 +42,7 @@ def generate_commit_message(
59
42
  AIError: If generation fails after max_retries attempts
60
43
 
61
44
  Example:
62
- >>> model = "anthropic:claude-3-5-haiku-latest"
45
+ >>> model = "anthropic:claude-haiku-4-5"
63
46
  >>> system_prompt, user_prompt = build_prompt("On branch main", "diff --git a/README.md b/README.md")
64
47
  >>> generate_commit_message(model, (system_prompt, user_prompt))
65
48
  'docs: Update README with installation instructions'
@@ -81,39 +64,19 @@ def generate_commit_message(
81
64
  {"role": "user", "content": user_prompt},
82
65
  ]
83
66
 
84
- # Provider functions mapping
85
- provider_funcs = {
86
- "anthropic": call_anthropic_api,
87
- "cerebras": call_cerebras_api,
88
- "chutes": call_chutes_api,
89
- "custom-anthropic": call_custom_anthropic_api,
90
- "custom-openai": call_custom_openai_api,
91
- "deepseek": call_deepseek_api,
92
- "fireworks": call_fireworks_api,
93
- "gemini": call_gemini_api,
94
- "groq": call_groq_api,
95
- "lm-studio": call_lmstudio_api,
96
- "minimax": call_minimax_api,
97
- "ollama": call_ollama_api,
98
- "openai": call_openai_api,
99
- "openrouter": call_openrouter_api,
100
- "streamlake": call_streamlake_api,
101
- "synthetic": call_synthetic_api,
102
- "together": call_together_api,
103
- "zai": call_zai_api,
104
- "zai-coding": call_zai_coding_api,
105
- }
106
-
107
67
  # Generate the commit message using centralized retry logic
108
68
  try:
109
69
  return generate_with_retries(
110
- provider_funcs=provider_funcs,
70
+ provider_funcs=PROVIDER_REGISTRY,
111
71
  model=model,
112
72
  messages=messages,
113
73
  temperature=temperature,
114
74
  max_tokens=max_tokens,
115
75
  max_retries=max_retries,
116
76
  quiet=quiet,
77
+ is_group=is_group,
78
+ skip_success_message=skip_success_message,
79
+ task_description=task_description,
117
80
  )
118
81
  except AIError:
119
82
  # Re-raise AIError exceptions as-is to preserve error classification
@@ -121,3 +84,26 @@ def generate_commit_message(
121
84
  except Exception as e:
122
85
  logger.error(f"Failed to generate commit message: {e}")
123
86
  raise AIError.model_error(f"Failed to generate commit message: {e}") from e
87
+
88
+
89
+ def generate_grouped_commits(
90
+ model: str,
91
+ prompt: list[dict[str, str]],
92
+ temperature: float,
93
+ max_tokens: int,
94
+ max_retries: int,
95
+ quiet: bool = False,
96
+ skip_success_message: bool = False,
97
+ ) -> str:
98
+ """Generate grouped commits JSON response."""
99
+ return generate_commit_message(
100
+ model=model,
101
+ prompt=prompt,
102
+ temperature=temperature,
103
+ max_tokens=max_tokens,
104
+ max_retries=max_retries,
105
+ quiet=quiet,
106
+ is_group=True,
107
+ skip_success_message=skip_success_message,
108
+ task_description="commit message",
109
+ )
gac/ai_utils.py CHANGED
@@ -4,17 +4,28 @@ This module provides utility functions that support the AI provider implementati
4
4
  """
5
5
 
6
6
  import logging
7
+ import os
7
8
  import time
8
9
  from functools import lru_cache
9
10
  from typing import Any
10
11
 
11
12
  import tiktoken
12
- from halo import Halo
13
+ from rich.console import Console
14
+ from rich.status import Status
13
15
 
14
- from gac.constants import Utility
16
+ from gac.constants import EnvDefaults, Utility
15
17
  from gac.errors import AIError
18
+ from gac.providers import SUPPORTED_PROVIDERS
16
19
 
17
20
  logger = logging.getLogger(__name__)
21
+ console = Console()
22
+
23
+
24
+ @lru_cache(maxsize=1)
25
+ def _should_skip_tiktoken_counting() -> bool:
26
+ """Return True when token counting should avoid tiktoken calls entirely."""
27
+ value = os.getenv("GAC_NO_TIKTOKEN", str(EnvDefaults.NO_TIKTOKEN))
28
+ return value.lower() in ("true", "1", "yes", "on")
18
29
 
19
30
 
20
31
  def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: str) -> int:
@@ -23,11 +34,15 @@ def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: st
23
34
  if not text:
24
35
  return 0
25
36
 
37
+ if _should_skip_tiktoken_counting():
38
+ return len(text) // 4
39
+
26
40
  try:
27
41
  encoding = get_encoding(model)
28
42
  return len(encoding.encode(text))
29
43
  except Exception as e:
30
44
  logger.error(f"Error counting tokens: {e}")
45
+ # Fallback to rough estimation (4 chars per token on average)
31
46
  return len(text) // 4
32
47
 
33
48
 
@@ -45,10 +60,18 @@ def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -
45
60
  @lru_cache(maxsize=1)
46
61
  def get_encoding(model: str) -> tiktoken.Encoding:
47
62
  """Get the appropriate encoding for a given model."""
48
- model_name = model.split(":")[-1] if ":" in model else model
63
+ provider, model_name = model.split(":", 1) if ":" in model else (None, model)
64
+
65
+ if provider != "openai":
66
+ return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
67
+
49
68
  try:
50
69
  return tiktoken.encoding_for_model(model_name)
51
70
  except KeyError:
71
+ # Fall back to default encoding if model not found
72
+ return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
73
+ except Exception:
74
+ # If there are any network/SSL issues, fall back to default encoding
52
75
  return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
53
76
 
54
77
 
@@ -83,6 +106,9 @@ def generate_with_retries(
83
106
  max_tokens: int,
84
107
  max_retries: int,
85
108
  quiet: bool = False,
109
+ is_group: bool = False,
110
+ skip_success_message: bool = False,
111
+ task_description: str = "commit message",
86
112
  ) -> str:
87
113
  """Generate content with retry logic using direct API calls."""
88
114
  # Parse model string to determine provider and actual model
@@ -92,38 +118,22 @@ def generate_with_retries(
92
118
  provider, model_name = model.split(":", 1)
93
119
 
94
120
  # Validate provider
95
- supported_providers = [
96
- "anthropic",
97
- "cerebras",
98
- "chutes",
99
- "deepseek",
100
- "fireworks",
101
- "gemini",
102
- "groq",
103
- "lm-studio",
104
- "minimax",
105
- "ollama",
106
- "openai",
107
- "openrouter",
108
- "streamlake",
109
- "synthetic",
110
- "together",
111
- "zai",
112
- "zai-coding",
113
- "custom-anthropic",
114
- "custom-openai",
115
- ]
116
- if provider not in supported_providers:
117
- raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {supported_providers}")
121
+ if provider not in SUPPORTED_PROVIDERS:
122
+ raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {SUPPORTED_PROVIDERS}")
118
123
 
119
124
  if not messages:
120
125
  raise AIError.model_error("No messages provided for AI generation")
121
126
 
122
127
  # Set up spinner
128
+ if is_group:
129
+ message_type = f"grouped {task_description}s"
130
+ else:
131
+ message_type = task_description
132
+
123
133
  if quiet:
124
134
  spinner = None
125
135
  else:
126
- spinner = Halo(text=f"Generating commit message with {provider} {model_name}...", spinner="dots")
136
+ spinner = Status(f"Generating {message_type} with {provider} {model_name}...")
127
137
  spinner.start()
128
138
 
129
139
  last_exception = None
@@ -131,9 +141,9 @@ def generate_with_retries(
131
141
 
132
142
  for attempt in range(max_retries):
133
143
  try:
134
- if not quiet and attempt > 0:
144
+ if not quiet and not skip_success_message and attempt > 0:
135
145
  if spinner:
136
- spinner.text = f"Retry {attempt + 1}/{max_retries} with {provider} {model_name}..."
146
+ spinner.update(f"Retry {attempt + 1}/{max_retries} with {provider} {model_name}...")
137
147
  logger.info(f"Retry attempt {attempt + 1}/{max_retries}")
138
148
 
139
149
  # Call the appropriate provider function
@@ -144,7 +154,11 @@ def generate_with_retries(
144
154
  content = provider_func(model=model_name, messages=messages, temperature=temperature, max_tokens=max_tokens)
145
155
 
146
156
  if spinner:
147
- spinner.succeed(f"Generated commit message with {provider} {model_name}")
157
+ if skip_success_message:
158
+ spinner.stop() # Stop spinner without showing success/failure
159
+ else:
160
+ spinner.stop()
161
+ console.print(f"✓ Generated {message_type} with {provider} {model_name}")
148
162
 
149
163
  if content is not None and content.strip():
150
164
  return content.strip() # type: ignore[no-any-return]
@@ -159,8 +173,9 @@ def generate_with_retries(
159
173
 
160
174
  # For authentication and model errors, don't retry
161
175
  if error_type in ["authentication", "model"]:
162
- if spinner:
163
- spinner.fail(f"Failed to generate commit message with {provider} {model_name}")
176
+ if spinner and not skip_success_message:
177
+ spinner.stop()
178
+ console.print(f"✗ Failed to generate {message_type} with {provider} {model_name}")
164
179
 
165
180
  # Create the appropriate error type based on classification
166
181
  if error_type == "authentication":
@@ -171,23 +186,33 @@ def generate_with_retries(
171
186
  if attempt < max_retries - 1:
172
187
  # Exponential backoff
173
188
  wait_time = 2**attempt
174
- if not quiet:
175
- logger.warning(f"AI generation failed (attempt {attempt + 1}), retrying in {wait_time}s: {str(e)}")
176
-
177
- if spinner:
189
+ if not quiet and not skip_success_message:
190
+ if attempt == 0:
191
+ logger.warning(f"AI generation failed, retrying in {wait_time}s: {str(e)}")
192
+ else:
193
+ logger.warning(
194
+ f"AI generation failed (attempt {attempt + 1}), retrying in {wait_time}s: {str(e)}"
195
+ )
196
+
197
+ if spinner and not skip_success_message:
178
198
  for i in range(wait_time, 0, -1):
179
- spinner.text = f"Retry {attempt + 1}/{max_retries} in {i}s..."
199
+ spinner.update(f"Retry {attempt + 1}/{max_retries} in {i}s...")
180
200
  time.sleep(1)
181
201
  else:
182
202
  time.sleep(wait_time)
183
203
  else:
184
- logger.error(f"AI generation failed after {max_retries} attempts: {str(e)}")
204
+ num_retries = max_retries
205
+ retry_word = "retry" if num_retries == 1 else "retries"
206
+ logger.error(f"AI generation failed after {num_retries} {retry_word}: {str(e)}")
185
207
 
186
- if spinner:
187
- spinner.fail(f"Failed to generate commit message with {provider} {model_name}")
208
+ if spinner and not skip_success_message:
209
+ spinner.stop()
210
+ console.print(f"✗ Failed to generate {message_type} with {provider} {model_name}")
188
211
 
189
212
  # If we get here, all retries failed - use the last classified error type
190
- error_message = f"Failed to generate commit message after {max_retries} attempts"
213
+ num_retries = max_retries
214
+ retry_word = "retry" if num_retries == 1 else "retries"
215
+ error_message = f"Failed to generate {message_type} after {num_retries} {retry_word}"
191
216
  if last_error_type == "authentication":
192
217
  raise AIError.authentication_error(error_message) from last_exception
193
218
  elif last_error_type == "rate_limit":
gac/auth_cli.py ADDED
@@ -0,0 +1,69 @@
1
+ """CLI for authenticating Claude Code OAuth tokens.
2
+
3
+ Provides a command to authenticate and re-authenticate Claude Code subscriptions.
4
+ """
5
+
6
+ import logging
7
+
8
+ import click
9
+
10
+ from gac.oauth.claude_code import authenticate_and_save, load_stored_token
11
+ from gac.utils import setup_logging
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ @click.command()
17
+ @click.option(
18
+ "--quiet",
19
+ "-q",
20
+ is_flag=True,
21
+ help="Suppress non-error output",
22
+ )
23
+ @click.option(
24
+ "--log-level",
25
+ default="INFO",
26
+ type=click.Choice(["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"], case_sensitive=False),
27
+ help="Set log level (default: INFO)",
28
+ )
29
+ def auth(quiet: bool = False, log_level: str = "INFO") -> None:
30
+ """Authenticate Claude Code OAuth token.
31
+
32
+ This command allows you to authenticate or re-authenticate your
33
+ Claude Code OAuth token when it expires or you want to refresh it.
34
+ It opens a browser window for the OAuth flow and saves the token
35
+ to ~/.gac.env.
36
+
37
+ The token is used by the Claude Code provider to access your
38
+ Claude Code subscription instead of requiring an Anthropic API key.
39
+ """
40
+ # Setup logging
41
+ if quiet:
42
+ effective_log_level = "ERROR"
43
+ else:
44
+ effective_log_level = log_level
45
+ setup_logging(effective_log_level)
46
+
47
+ # Check if there's an existing token
48
+ existing_token = load_stored_token()
49
+ if existing_token and not quiet:
50
+ click.echo("✓ Found existing Claude Code access token.")
51
+ click.echo()
52
+
53
+ if not quiet:
54
+ click.echo("🔐 Starting Claude Code OAuth authentication...")
55
+ click.echo(" Your browser will open automatically")
56
+ click.echo(" (Waiting up to 3 minutes for callback)")
57
+ click.echo()
58
+
59
+ # Perform OAuth authentication
60
+ success = authenticate_and_save(quiet=quiet)
61
+
62
+ if success:
63
+ if not quiet:
64
+ click.echo("✅ Claude Code authentication completed successfully!")
65
+ click.echo(" Your new token has been saved and is ready to use.")
66
+ else:
67
+ click.echo("❌ Claude Code authentication failed.")
68
+ click.echo(" Please try again or check your network connection.")
69
+ raise click.ClickException("Claude Code authentication failed")
gac/cli.py CHANGED
@@ -9,26 +9,36 @@ import logging
9
9
  import sys
10
10
 
11
11
  import click
12
+ from rich.console import Console
12
13
 
13
14
  from gac import __version__
15
+ from gac.auth_cli import auth as auth_cli
14
16
  from gac.config import load_config
15
17
  from gac.config_cli import config as config_cli
16
- from gac.constants import Logging
18
+ from gac.constants import Languages, Logging
17
19
  from gac.diff_cli import diff as diff_cli
18
20
  from gac.errors import handle_error
19
21
  from gac.init_cli import init as init_cli
22
+ from gac.language_cli import language as language_cli
20
23
  from gac.main import main
24
+ from gac.model_cli import model as model_cli
21
25
  from gac.utils import setup_logging
22
26
 
23
27
  config = load_config()
24
28
  logger = logging.getLogger(__name__)
29
+ console = Console()
25
30
 
26
31
 
27
32
  @click.group(invoke_without_command=True, context_settings={"ignore_unknown_options": True})
28
33
  # Git workflow options
29
34
  @click.option("--add-all", "-a", is_flag=True, help="Stage all changes before committing")
35
+ @click.option("--group", "-g", is_flag=True, help="Group changes into multiple logical commits")
36
+ @click.option(
37
+ "--interactive", "-i", is_flag=True, help="Ask interactive questions to gather more context for the commit message"
38
+ )
30
39
  @click.option("--push", "-p", is_flag=True, help="Push changes to remote after committing")
31
40
  @click.option("--dry-run", is_flag=True, help="Dry run the commit workflow")
41
+ @click.option("--message-only", is_flag=True, help="Output only the generated commit message without committing")
32
42
  @click.option("--yes", "-y", is_flag=True, help="Skip confirmation prompt")
33
43
  # Commit message options
34
44
  @click.option("--one-liner", "-o", is_flag=True, help="Generate a single-line commit message")
@@ -43,6 +53,9 @@ logger = logging.getLogger(__name__)
43
53
  @click.option("--hint", "-h", default="", help="Additional context to include in the prompt")
44
54
  # Model options
45
55
  @click.option("--model", "-m", help="Override the default model (format: 'provider:model_name')")
56
+ @click.option(
57
+ "--language", "-l", help="Override the language for commit messages (e.g., 'Spanish', 'es', 'zh-CN', 'ja')"
58
+ )
46
59
  # Output options
47
60
  @click.option("--quiet", "-q", is_flag=True, help="Suppress non-error output")
48
61
  @click.option(
@@ -60,12 +73,20 @@ logger = logging.getLogger(__name__)
60
73
  # Advanced options
61
74
  @click.option("--no-verify", is_flag=True, help="Skip pre-commit and lefthook hooks when committing")
62
75
  @click.option("--skip-secret-scan", is_flag=True, help="Skip security scan for secrets in staged changes")
76
+ @click.option(
77
+ "--hook-timeout",
78
+ type=int,
79
+ default=0,
80
+ help="Timeout for pre-commit and lefthook hooks in seconds (0 to use configuration)",
81
+ )
63
82
  # Other options
64
83
  @click.option("--version", is_flag=True, help="Show the version of the Git Auto Commit (gac) tool")
65
84
  @click.pass_context
66
85
  def cli(
67
86
  ctx: click.Context,
68
87
  add_all: bool = False,
88
+ group: bool = False,
89
+ interactive: bool = False,
69
90
  log_level: str = str(config["log_level"]),
70
91
  one_liner: bool = False,
71
92
  push: bool = False,
@@ -75,11 +96,14 @@ def cli(
75
96
  yes: bool = False,
76
97
  hint: str = "",
77
98
  model: str | None = None,
99
+ language: str | None = None,
78
100
  version: bool = False,
79
101
  dry_run: bool = False,
102
+ message_only: bool = False,
80
103
  verbose: bool = False,
81
104
  no_verify: bool = False,
82
105
  skip_secret_scan: bool = False,
106
+ hook_timeout: int = 0,
83
107
  ) -> None:
84
108
  """Git Auto Commit - Generate commit messages with AI."""
85
109
  if ctx.invoked_subcommand is None:
@@ -92,15 +116,27 @@ def cli(
92
116
  setup_logging(effective_log_level)
93
117
  logger.info("Starting gac")
94
118
 
119
+ # Validate incompatible flag combinations
120
+ if message_only and group:
121
+ console.print("[red]Error: --message-only and --group options are mutually exclusive[/red]")
122
+ console.print("[yellow]--message-only is for generating a single commit message for external use[/yellow]")
123
+ console.print("[yellow]--group is for organizing multiple commits within the current workflow[/yellow]")
124
+ sys.exit(1)
125
+
95
126
  # Determine if we should infer scope based on -s flag or always_include_scope setting
96
127
  infer_scope = bool(scope or config.get("always_include_scope", False))
97
128
 
98
129
  # Determine if verbose mode should be enabled based on -v flag or verbose config setting
99
130
  use_verbose = bool(verbose or config.get("verbose", False))
100
131
 
132
+ # Resolve language code to full name if provided
133
+ resolved_language = Languages.resolve_code(language) if language else None
134
+
101
135
  try:
102
136
  main(
103
137
  stage_all=add_all,
138
+ group=group,
139
+ interactive=interactive,
104
140
  model=model,
105
141
  hint=hint,
106
142
  one_liner=one_liner,
@@ -110,9 +146,12 @@ def cli(
110
146
  push=push,
111
147
  quiet=quiet,
112
148
  dry_run=dry_run,
149
+ message_only=message_only,
113
150
  verbose=use_verbose,
114
151
  no_verify=no_verify,
115
152
  skip_secret_scan=skip_secret_scan or bool(config.get("skip_secret_scan", False)),
153
+ language=resolved_language,
154
+ hook_timeout=hook_timeout if hook_timeout > 0 else int(config.get("hook_timeout", 120) or 120),
116
155
  )
117
156
  except Exception as e:
118
157
  handle_error(e, exit_program=True)
@@ -122,6 +161,8 @@ def cli(
122
161
 
123
162
  ctx.obj = {
124
163
  "add_all": add_all,
164
+ "group": group,
165
+ "interactive": interactive,
125
166
  "log_level": log_level,
126
167
  "one_liner": one_liner,
127
168
  "push": push,
@@ -131,17 +172,33 @@ def cli(
131
172
  "yes": yes,
132
173
  "hint": hint,
133
174
  "model": model,
175
+ "language": language,
134
176
  "version": version,
135
177
  "dry_run": dry_run,
178
+ "message_only": message_only,
136
179
  "verbose": verbose,
137
180
  "no_verify": no_verify,
138
181
  "skip_secret_scan": skip_secret_scan,
182
+ "hook_timeout": hook_timeout,
139
183
  }
140
184
 
141
185
 
186
+ cli.add_command(auth_cli)
142
187
  cli.add_command(config_cli)
143
- cli.add_command(init_cli)
144
188
  cli.add_command(diff_cli)
189
+ cli.add_command(init_cli)
190
+ cli.add_command(language_cli)
191
+ cli.add_command(model_cli)
192
+
193
+
194
+ @click.command(context_settings=language_cli.context_settings)
195
+ @click.pass_context
196
+ def lang(ctx):
197
+ """Set the language for commit messages interactively. (Alias for 'language')"""
198
+ ctx.forward(language_cli)
199
+
200
+
201
+ cli.add_command(lang) # Add the lang alias
145
202
 
146
203
  if __name__ == "__main__":
147
204
  cli()
gac/config.py CHANGED
@@ -1,6 +1,6 @@
1
1
  """Configuration loading for gac.
2
2
 
3
- Handles environment variable and .env file precedence for application settings.
3
+ Handles environment variable and .gac.env file precedence for application settings.
4
4
  """
5
5
 
6
6
  import os
@@ -12,19 +12,16 @@ from gac.constants import EnvDefaults, Logging
12
12
 
13
13
 
14
14
  def load_config() -> dict[str, str | int | float | bool | None]:
15
- """Load configuration from $HOME/.gac.env, then ./.gac.env or ./.env, then environment variables."""
15
+ """Load configuration from $HOME/.gac.env, then ./.gac.env, then environment variables."""
16
16
  user_config = Path.home() / ".gac.env"
17
17
  if user_config.exists():
18
18
  load_dotenv(user_config)
19
19
 
20
- # Check for both .gac.env and .env in project directory
20
+ # Check for .gac.env in project directory
21
21
  project_gac_env = Path(".gac.env")
22
- project_env = Path(".env")
23
22
 
24
23
  if project_gac_env.exists():
25
24
  load_dotenv(project_gac_env, override=True)
26
- elif project_env.exists():
27
- load_dotenv(project_env, override=True)
28
25
 
29
26
  config = {
30
27
  "model": os.getenv("GAC_MODEL"),
@@ -37,7 +34,13 @@ def load_config() -> dict[str, str | int | float | bool | None]:
37
34
  in ("true", "1", "yes", "on"),
38
35
  "skip_secret_scan": os.getenv("GAC_SKIP_SECRET_SCAN", str(EnvDefaults.SKIP_SECRET_SCAN)).lower()
39
36
  in ("true", "1", "yes", "on"),
37
+ "no_tiktoken": os.getenv("GAC_NO_TIKTOKEN", str(EnvDefaults.NO_TIKTOKEN)).lower() in ("true", "1", "yes", "on"),
40
38
  "verbose": os.getenv("GAC_VERBOSE", str(EnvDefaults.VERBOSE)).lower() in ("true", "1", "yes", "on"),
39
+ "system_prompt_path": os.getenv("GAC_SYSTEM_PROMPT_PATH"),
40
+ "language": os.getenv("GAC_LANGUAGE"),
41
+ "translate_prefixes": os.getenv("GAC_TRANSLATE_PREFIXES", "false").lower() in ("true", "1", "yes", "on"),
42
+ "rtl_confirmed": os.getenv("GAC_RTL_CONFIRMED", "false").lower() in ("true", "1", "yes", "on"),
43
+ "hook_timeout": int(os.getenv("GAC_HOOK_TIMEOUT", EnvDefaults.HOOK_TIMEOUT)),
41
44
  }
42
45
 
43
46
  return config
gac/config_cli.py CHANGED
@@ -18,13 +18,34 @@ def config():
18
18
  @config.command()
19
19
  def show() -> None:
20
20
  """Show all current config values."""
21
- if not GAC_ENV_PATH.exists():
21
+ project_env_path = Path(".gac.env")
22
+ user_exists = GAC_ENV_PATH.exists()
23
+ project_exists = project_env_path.exists()
24
+
25
+ if not user_exists and not project_exists:
22
26
  click.echo("No $HOME/.gac.env found.")
27
+ click.echo("No project-level .gac.env found.")
23
28
  return
24
- load_dotenv(GAC_ENV_PATH, override=True)
25
- with open(GAC_ENV_PATH) as f:
26
- for line in f:
27
- click.echo(line.rstrip())
29
+
30
+ if user_exists:
31
+ click.echo(f"User config ({GAC_ENV_PATH}):")
32
+ with open(GAC_ENV_PATH, encoding="utf-8") as f:
33
+ for line in f:
34
+ click.echo(line.rstrip())
35
+ else:
36
+ click.echo("No $HOME/.gac.env found.")
37
+
38
+ if project_exists:
39
+ if user_exists:
40
+ click.echo("")
41
+ click.echo("Project config (./.gac.env):")
42
+ with open(project_env_path, encoding="utf-8") as f:
43
+ for line in f:
44
+ click.echo(line.rstrip())
45
+ click.echo("")
46
+ click.echo("Note: Project-level .gac.env overrides $HOME/.gac.env values for any duplicated variables.")
47
+ else:
48
+ click.echo("No project-level .gac.env found.")
28
49
 
29
50
 
30
51
  @config.command()