gac 1.13.0__py3-none-any.whl → 3.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gac/providers/__init__.py CHANGED
@@ -1,42 +1,88 @@
1
1
  """AI provider implementations for commit message generation."""
2
2
 
3
3
  from .anthropic import call_anthropic_api
4
+ from .azure_openai import call_azure_openai_api
4
5
  from .cerebras import call_cerebras_api
5
6
  from .chutes import call_chutes_api
7
+ from .claude_code import call_claude_code_api
6
8
  from .custom_anthropic import call_custom_anthropic_api
7
9
  from .custom_openai import call_custom_openai_api
8
10
  from .deepseek import call_deepseek_api
9
11
  from .fireworks import call_fireworks_api
10
12
  from .gemini import call_gemini_api
11
13
  from .groq import call_groq_api
14
+ from .kimi_coding import call_kimi_coding_api
12
15
  from .lmstudio import call_lmstudio_api
13
16
  from .minimax import call_minimax_api
17
+ from .mistral import call_mistral_api
18
+ from .moonshot import call_moonshot_api
14
19
  from .ollama import call_ollama_api
15
20
  from .openai import call_openai_api
16
21
  from .openrouter import call_openrouter_api
22
+ from .replicate import call_replicate_api
17
23
  from .streamlake import call_streamlake_api
18
24
  from .synthetic import call_synthetic_api
19
25
  from .together import call_together_api
20
26
  from .zai import call_zai_api, call_zai_coding_api
21
27
 
28
+ # Provider registry - single source of truth for all providers
29
+ PROVIDER_REGISTRY = {
30
+ "anthropic": call_anthropic_api,
31
+ "azure-openai": call_azure_openai_api,
32
+ "cerebras": call_cerebras_api,
33
+ "claude-code": call_claude_code_api,
34
+ "chutes": call_chutes_api,
35
+ "custom-anthropic": call_custom_anthropic_api,
36
+ "custom-openai": call_custom_openai_api,
37
+ "deepseek": call_deepseek_api,
38
+ "fireworks": call_fireworks_api,
39
+ "gemini": call_gemini_api,
40
+ "groq": call_groq_api,
41
+ "kimi-coding": call_kimi_coding_api,
42
+ "lm-studio": call_lmstudio_api,
43
+ "minimax": call_minimax_api,
44
+ "mistral": call_mistral_api,
45
+ "moonshot": call_moonshot_api,
46
+ "ollama": call_ollama_api,
47
+ "openai": call_openai_api,
48
+ "openrouter": call_openrouter_api,
49
+ "replicate": call_replicate_api,
50
+ "streamlake": call_streamlake_api,
51
+ "synthetic": call_synthetic_api,
52
+ "together": call_together_api,
53
+ "zai": call_zai_api,
54
+ "zai-coding": call_zai_coding_api,
55
+ }
56
+
57
+ # List of supported provider names - derived from registry keys
58
+ SUPPORTED_PROVIDERS = sorted(PROVIDER_REGISTRY.keys())
59
+
22
60
  __all__ = [
23
61
  "call_anthropic_api",
62
+ "call_azure_openai_api",
24
63
  "call_cerebras_api",
25
64
  "call_chutes_api",
65
+ "call_claude_code_api",
26
66
  "call_custom_anthropic_api",
27
67
  "call_custom_openai_api",
28
68
  "call_deepseek_api",
29
69
  "call_fireworks_api",
30
70
  "call_gemini_api",
31
71
  "call_groq_api",
72
+ "call_kimi_coding_api",
32
73
  "call_lmstudio_api",
33
74
  "call_minimax_api",
75
+ "call_mistral_api",
76
+ "call_moonshot_api",
34
77
  "call_ollama_api",
35
78
  "call_openai_api",
36
79
  "call_openrouter_api",
80
+ "call_replicate_api",
37
81
  "call_streamlake_api",
38
82
  "call_synthetic_api",
39
83
  "call_together_api",
40
84
  "call_zai_api",
41
85
  "call_zai_coding_api",
86
+ "PROVIDER_REGISTRY",
87
+ "SUPPORTED_PROVIDERS",
42
88
  ]
@@ -0,0 +1,97 @@
1
+ """Azure OpenAI provider for gac.
2
+
3
+ This provider provides native support for Azure OpenAI Service with proper
4
+ endpoint construction and API version handling.
5
+ """
6
+
7
+ import json
8
+ import logging
9
+ import os
10
+
11
+ import httpx
12
+
13
+ from gac.errors import AIError
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def call_azure_openai_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
19
+ """Call Azure OpenAI Service API.
20
+
21
+ Environment variables:
22
+ AZURE_OPENAI_API_KEY: Azure OpenAI API key (required)
23
+ AZURE_OPENAI_ENDPOINT: Azure OpenAI endpoint URL (required)
24
+ Example: https://your-resource.openai.azure.com
25
+ AZURE_OPENAI_API_VERSION: Azure OpenAI API version (required)
26
+ Example: 2025-01-01-preview
27
+ Example: 2024-02-15-preview
28
+
29
+ Args:
30
+ model: The deployment name in Azure OpenAI (e.g., 'gpt-4o', 'gpt-35-turbo')
31
+ messages: List of message dictionaries with 'role' and 'content' keys
32
+ temperature: Controls randomness (0.0-1.0)
33
+ max_tokens: Maximum tokens in the response
34
+
35
+ Returns:
36
+ The generated commit message
37
+
38
+ Raises:
39
+ AIError: If authentication fails, API errors occur, or response is invalid
40
+ """
41
+ api_key = os.getenv("AZURE_OPENAI_API_KEY")
42
+ if not api_key:
43
+ raise AIError.authentication_error("AZURE_OPENAI_API_KEY environment variable not set")
44
+
45
+ endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
46
+ if not endpoint:
47
+ raise AIError.model_error("AZURE_OPENAI_ENDPOINT environment variable not set")
48
+
49
+ api_version = os.getenv("AZURE_OPENAI_API_VERSION")
50
+ if not api_version:
51
+ raise AIError.model_error("AZURE_OPENAI_API_VERSION environment variable not set")
52
+
53
+ # Build Azure OpenAI URL with proper structure
54
+ endpoint = endpoint.rstrip("/")
55
+ url = f"{endpoint}/openai/deployments/{model}/chat/completions?api-version={api_version}"
56
+
57
+ headers = {"api-key": api_key, "Content-Type": "application/json"}
58
+
59
+ data = {"messages": messages, "temperature": temperature, "max_tokens": max_tokens}
60
+
61
+ try:
62
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
63
+ response.raise_for_status()
64
+ response_data = response.json()
65
+
66
+ try:
67
+ content = response_data["choices"][0]["message"]["content"]
68
+ except (KeyError, IndexError, TypeError) as e:
69
+ logger.error(f"Unexpected response format from Azure OpenAI API. Response: {json.dumps(response_data)}")
70
+ raise AIError.model_error(
71
+ f"Azure OpenAI API returned unexpected format. Expected response with "
72
+ f"'choices[0].message.content', but got: {type(e).__name__}. Check logs for full response structure."
73
+ ) from e
74
+
75
+ if content is None:
76
+ raise AIError.model_error("Azure OpenAI API returned null content")
77
+ if content == "":
78
+ raise AIError.model_error("Azure OpenAI API returned empty content")
79
+ return content
80
+ except httpx.ConnectError as e:
81
+ raise AIError.connection_error(f"Azure OpenAI API connection failed: {str(e)}") from e
82
+ except httpx.HTTPStatusError as e:
83
+ status_code = e.response.status_code
84
+ error_text = e.response.text
85
+
86
+ if status_code == 401:
87
+ raise AIError.authentication_error(f"Azure OpenAI API authentication failed: {error_text}") from e
88
+ elif status_code == 429:
89
+ raise AIError.rate_limit_error(f"Azure OpenAI API rate limit exceeded: {error_text}") from e
90
+ else:
91
+ raise AIError.model_error(f"Azure OpenAI API error: {status_code} - {error_text}") from e
92
+ except httpx.TimeoutException as e:
93
+ raise AIError.timeout_error(f"Azure OpenAI API request timed out: {str(e)}") from e
94
+ except AIError:
95
+ raise
96
+ except Exception as e:
97
+ raise AIError.model_error(f"Error calling Azure OpenAI API: {str(e)}") from e
@@ -0,0 +1,102 @@
1
+ """Claude Code provider implementation.
2
+
3
+ This provider allows users with Claude Code subscriptions to use their OAuth tokens
4
+ instead of paying for the expensive Anthropic API.
5
+ """
6
+
7
+ import os
8
+
9
+ import httpx
10
+
11
+ from gac.errors import AIError
12
+
13
+
14
+ def call_claude_code_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
15
+ """Call Claude Code API using OAuth token.
16
+
17
+ This provider uses the Claude Code subscription OAuth token instead of the Anthropic API key.
18
+ It authenticates using Bearer token authentication with the special anthropic-beta header.
19
+
20
+ Environment variables:
21
+ CLAUDE_CODE_ACCESS_TOKEN: OAuth access token from Claude Code authentication
22
+
23
+ Args:
24
+ model: Model name (e.g., 'claude-sonnet-4-5')
25
+ messages: List of message dictionaries with 'role' and 'content' keys
26
+ temperature: Sampling temperature (0.0-1.0)
27
+ max_tokens: Maximum tokens in response
28
+
29
+ Returns:
30
+ Generated text response
31
+
32
+ Raises:
33
+ AIError: If authentication fails or API call fails
34
+ """
35
+ access_token = os.getenv("CLAUDE_CODE_ACCESS_TOKEN")
36
+ if not access_token:
37
+ raise AIError.authentication_error(
38
+ "CLAUDE_CODE_ACCESS_TOKEN not found in environment variables. "
39
+ "Please authenticate with Claude Code and set this token."
40
+ )
41
+
42
+ url = "https://api.anthropic.com/v1/messages"
43
+ headers = {
44
+ "Authorization": f"Bearer {access_token}",
45
+ "anthropic-version": "2023-06-01",
46
+ "anthropic-beta": "oauth-2025-04-20",
47
+ "content-type": "application/json",
48
+ }
49
+
50
+ # Convert messages to Anthropic format
51
+ # IMPORTANT: Claude Code OAuth tokens require the system message to be EXACTLY
52
+ # "You are Claude Code, Anthropic's official CLI for Claude." with NO additional content.
53
+ # Any other instructions must be moved to the user message.
54
+ anthropic_messages = []
55
+ system_instructions = ""
56
+
57
+ for msg in messages:
58
+ if msg["role"] == "system":
59
+ system_instructions = msg["content"]
60
+ else:
61
+ anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
62
+
63
+ # Claude Code requires this exact system message, nothing more
64
+ system_message = "You are Claude Code, Anthropic's official CLI for Claude."
65
+
66
+ # Move any system instructions into the first user message
67
+ if system_instructions and anthropic_messages:
68
+ # Prepend system instructions to the first user message
69
+ first_user_msg = anthropic_messages[0]
70
+ first_user_msg["content"] = f"{system_instructions}\n\n{first_user_msg['content']}"
71
+
72
+ data = {
73
+ "model": model,
74
+ "messages": anthropic_messages,
75
+ "temperature": temperature,
76
+ "max_tokens": max_tokens,
77
+ "system": system_message,
78
+ }
79
+
80
+ try:
81
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
82
+ response.raise_for_status()
83
+ response_data = response.json()
84
+ content = response_data["content"][0]["text"]
85
+ if content is None:
86
+ raise AIError.model_error("Claude Code API returned null content")
87
+ if content == "":
88
+ raise AIError.model_error("Claude Code API returned empty content")
89
+ return content
90
+ except httpx.HTTPStatusError as e:
91
+ if e.response.status_code == 401:
92
+ raise AIError.authentication_error(
93
+ f"Claude Code authentication failed: {e.response.text}. "
94
+ "Your token may have expired. Please re-authenticate."
95
+ ) from e
96
+ if e.response.status_code == 429:
97
+ raise AIError.rate_limit_error(f"Claude Code API rate limit exceeded: {e.response.text}") from e
98
+ raise AIError.model_error(f"Claude Code API error: {e.response.status_code} - {e.response.text}") from e
99
+ except httpx.TimeoutException as e:
100
+ raise AIError.timeout_error(f"Claude Code API request timed out: {str(e)}") from e
101
+ except Exception as e:
102
+ raise AIError.model_error(f"Error calling Claude Code API: {str(e)}") from e
@@ -30,7 +30,7 @@ def call_custom_anthropic_api(model: str, messages: list[dict], temperature: flo
30
30
  CUSTOM_ANTHROPIC_VERSION: API version header (optional, defaults to '2023-06-01')
31
31
 
32
32
  Args:
33
- model: The model to use (e.g., 'claude-3-5-sonnet-20241022', 'claude-3-5-haiku-latest')
33
+ model: The model to use (e.g., 'claude-sonnet-4-5', 'claude-haiku-4-5')
34
34
  messages: List of message dictionaries with 'role' and 'content' keys
35
35
  temperature: Controls randomness (0.0-1.0)
36
36
  max_tokens: Maximum tokens in the response
@@ -19,7 +19,6 @@ def call_custom_openai_api(model: str, messages: list[dict], temperature: float,
19
19
  """Call a custom OpenAI-compatible API endpoint.
20
20
 
21
21
  This provider is useful for:
22
- - Azure OpenAI Service
23
22
  - OpenAI-compatible proxies or gateways
24
23
  - Self-hosted OpenAI-compatible services
25
24
  - Other services implementing the OpenAI Chat Completions API
@@ -27,8 +26,8 @@ def call_custom_openai_api(model: str, messages: list[dict], temperature: float,
27
26
  Environment variables:
28
27
  CUSTOM_OPENAI_API_KEY: API key for authentication (required)
29
28
  CUSTOM_OPENAI_BASE_URL: Base URL for the API endpoint (required)
30
- Example: https://your-endpoint.openai.azure.com
31
29
  Example: https://your-proxy.example.com/v1
30
+ Example: https://your-custom-endpoint.com
32
31
 
33
32
  Args:
34
33
  model: The model to use (e.g., 'gpt-4', 'gpt-3.5-turbo')
@@ -0,0 +1,63 @@
1
+ """Kimi Coding AI provider implementation."""
2
+
3
+ import json
4
+ import logging
5
+ import os
6
+
7
+ import httpx
8
+
9
+ from gac.errors import AIError
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ def call_kimi_coding_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
15
+ """Call Kimi Coding API using OpenAI-compatible endpoint."""
16
+ api_key = os.getenv("KIMI_CODING_API_KEY")
17
+ if not api_key:
18
+ raise AIError.authentication_error("KIMI_CODING_API_KEY not found in environment variables")
19
+
20
+ base_url = "https://api.kimi.com/coding/v1"
21
+ url = f"{base_url}/chat/completions"
22
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
23
+
24
+ # Use standard OpenAI format - no message conversion needed
25
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
26
+
27
+ try:
28
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
29
+ response.raise_for_status()
30
+ response_data = response.json()
31
+
32
+ try:
33
+ content = response_data["choices"][0]["message"]["content"]
34
+ except (KeyError, IndexError, TypeError) as e:
35
+ logger.error(f"Unexpected response format from Kimi Coding API. Response: {json.dumps(response_data)}")
36
+ raise AIError.model_error(
37
+ f"Kimi Coding API returned unexpected format. Expected OpenAI-compatible response with "
38
+ f"'choices[0].message.content', but got: {type(e).__name__}. Check logs for full response structure."
39
+ ) from e
40
+
41
+ if content is None:
42
+ raise AIError.model_error("Kimi Coding API returned null content")
43
+ if content == "":
44
+ raise AIError.model_error("Kimi Coding API returned empty content")
45
+ return content
46
+ except httpx.ConnectError as e:
47
+ raise AIError.connection_error(f"Kimi Coding API connection failed: {str(e)}") from e
48
+ except httpx.HTTPStatusError as e:
49
+ status_code = e.response.status_code
50
+ error_text = e.response.text
51
+
52
+ if status_code == 401:
53
+ raise AIError.authentication_error(f"Kimi Coding API authentication failed: {error_text}") from e
54
+ elif status_code == 429:
55
+ raise AIError.rate_limit_error(f"Kimi Coding API rate limit exceeded: {error_text}") from e
56
+ else:
57
+ raise AIError.model_error(f"Kimi Coding API error: {status_code} - {error_text}") from e
58
+ except httpx.TimeoutException as e:
59
+ raise AIError.timeout_error(f"Kimi Coding API request timed out: {str(e)}") from e
60
+ except AIError:
61
+ raise
62
+ except Exception as e:
63
+ raise AIError.model_error(f"Error calling Kimi Coding API: {str(e)}") from e
@@ -0,0 +1,38 @@
1
+ """Mistral API provider for gac."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_mistral_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Mistral API directly."""
12
+ api_key = os.getenv("MISTRAL_API_KEY")
13
+ if not api_key:
14
+ raise AIError.authentication_error("MISTRAL_API_KEY not found in environment variables")
15
+
16
+ url = "https://api.mistral.ai/v1/chat/completions"
17
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
18
+
19
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
20
+
21
+ try:
22
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
23
+ response.raise_for_status()
24
+ response_data = response.json()
25
+ content = response_data["choices"][0]["message"]["content"]
26
+ if content is None:
27
+ raise AIError.model_error("Mistral API returned null content")
28
+ if content == "":
29
+ raise AIError.model_error("Mistral API returned empty content")
30
+ return content
31
+ except httpx.HTTPStatusError as e:
32
+ if e.response.status_code == 429:
33
+ raise AIError.rate_limit_error(f"Mistral API rate limit exceeded: {e.response.text}") from e
34
+ raise AIError.model_error(f"Mistral API error: {e.response.status_code} - {e.response.text}") from e
35
+ except httpx.TimeoutException as e:
36
+ raise AIError.timeout_error(f"Mistral API request timed out: {str(e)}") from e
37
+ except Exception as e:
38
+ raise AIError.model_error(f"Error calling Mistral API: {str(e)}") from e
@@ -0,0 +1,38 @@
1
+ """Moonshot AI provider for gac."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_moonshot_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Moonshot AI API directly."""
12
+ api_key = os.getenv("MOONSHOT_API_KEY")
13
+ if not api_key:
14
+ raise AIError.authentication_error("MOONSHOT_API_KEY not found in environment variables")
15
+
16
+ url = "https://api.moonshot.ai/v1/chat/completions"
17
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
18
+
19
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
20
+
21
+ try:
22
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
23
+ response.raise_for_status()
24
+ response_data = response.json()
25
+ content = response_data["choices"][0]["message"]["content"]
26
+ if content is None:
27
+ raise AIError.model_error("Moonshot AI API returned null content")
28
+ if content == "":
29
+ raise AIError.model_error("Moonshot AI API returned empty content")
30
+ return content
31
+ except httpx.HTTPStatusError as e:
32
+ if e.response.status_code == 429:
33
+ raise AIError.rate_limit_error(f"Moonshot AI API rate limit exceeded: {e.response.text}") from e
34
+ raise AIError.model_error(f"Moonshot AI API error: {e.response.status_code} - {e.response.text}") from e
35
+ except httpx.TimeoutException as e:
36
+ raise AIError.timeout_error(f"Moonshot AI API request timed out: {str(e)}") from e
37
+ except Exception as e:
38
+ raise AIError.model_error(f"Error calling Moonshot AI API: {str(e)}") from e
@@ -0,0 +1,98 @@
1
+ """Replicate API provider for gac."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_replicate_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Replicate API directly."""
12
+ api_key = os.getenv("REPLICATE_API_TOKEN")
13
+ if not api_key:
14
+ raise AIError.authentication_error("REPLICATE_API_TOKEN not found in environment variables")
15
+
16
+ # Replicate uses a different endpoint for language models
17
+ url = "https://api.replicate.com/v1/predictions"
18
+ headers = {"Authorization": f"Token {api_key}", "Content-Type": "application/json"}
19
+
20
+ # Convert messages to a single prompt for Replicate
21
+ prompt_parts = []
22
+ system_message = None
23
+
24
+ for message in messages:
25
+ role = message.get("role")
26
+ content = message.get("content", "")
27
+
28
+ if role == "system":
29
+ system_message = content
30
+ elif role == "user":
31
+ prompt_parts.append(f"Human: {content}")
32
+ elif role == "assistant":
33
+ prompt_parts.append(f"Assistant: {content}")
34
+
35
+ # Add system message at the beginning if present
36
+ if system_message:
37
+ prompt_parts.insert(0, f"System: {system_message}")
38
+
39
+ # Add final assistant prompt
40
+ prompt_parts.append("Assistant:")
41
+ full_prompt = "\n\n".join(prompt_parts)
42
+
43
+ # Replicate prediction payload
44
+ data = {
45
+ "version": model, # Replicate uses version string as model identifier
46
+ "input": {
47
+ "prompt": full_prompt,
48
+ "temperature": temperature,
49
+ "max_tokens": max_tokens,
50
+ },
51
+ }
52
+
53
+ try:
54
+ # Create prediction
55
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
56
+ response.raise_for_status()
57
+ prediction_data = response.json()
58
+
59
+ # Get the prediction URL to check status
60
+ get_url = f"https://api.replicate.com/v1/predictions/{prediction_data['id']}"
61
+
62
+ # Poll for completion (Replicate predictions are async)
63
+ max_wait_time = 120
64
+ wait_interval = 2
65
+ elapsed_time = 0
66
+
67
+ while elapsed_time < max_wait_time:
68
+ get_response = httpx.get(get_url, headers=headers, timeout=120)
69
+ get_response.raise_for_status()
70
+ status_data = get_response.json()
71
+
72
+ if status_data["status"] == "succeeded":
73
+ content = status_data["output"]
74
+ if not content:
75
+ raise AIError.model_error("Replicate API returned empty content")
76
+ return content
77
+ elif status_data["status"] == "failed":
78
+ raise AIError.model_error(f"Replicate prediction failed: {status_data.get('error', 'Unknown error')}")
79
+ elif status_data["status"] in ["starting", "processing"]:
80
+ import time
81
+
82
+ time.sleep(wait_interval)
83
+ elapsed_time += wait_interval
84
+ else:
85
+ raise AIError.model_error(f"Replicate API returned unknown status: {status_data['status']}")
86
+
87
+ raise AIError.timeout_error("Replicate API prediction timed out")
88
+
89
+ except httpx.HTTPStatusError as e:
90
+ if e.response.status_code == 429:
91
+ raise AIError.rate_limit_error(f"Replicate API rate limit exceeded: {e.response.text}") from e
92
+ elif e.response.status_code == 401:
93
+ raise AIError.authentication_error(f"Replicate API authentication failed: {e.response.text}") from e
94
+ raise AIError.model_error(f"Replicate API error: {e.response.status_code} - {e.response.text}") from e
95
+ except httpx.TimeoutException as e:
96
+ raise AIError.timeout_error(f"Replicate API request timed out: {str(e)}") from e
97
+ except Exception as e:
98
+ raise AIError.model_error(f"Error calling Replicate API: {str(e)}") from e
gac/security.py CHANGED
@@ -54,7 +54,7 @@ class SecretPatterns:
54
54
  PRIVATE_KEY = re.compile(r"-----BEGIN (?:RSA |EC |OPENSSH )?PRIVATE KEY-----")
55
55
 
56
56
  # Bearer Tokens (require actual token with specific characteristics)
57
- BEARER_TOKEN = re.compile(r"Bearer\s+[A-Za-z0-9]{20,}[/=]*\s", re.IGNORECASE)
57
+ BEARER_TOKEN = re.compile(r"Bearer\s+[A-Za-z0-9]{20,}[/=]*(?:\s|$)", re.IGNORECASE)
58
58
 
59
59
  # JWT Tokens
60
60
  JWT_TOKEN = re.compile(r"eyJ[A-Za-z0-9_-]+\.eyJ[A-Za-z0-9_-]+\.[A-Za-z0-9_-]+")