gac 3.8.1__py3-none-any.whl → 3.10.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. gac/__init__.py +4 -6
  2. gac/__version__.py +1 -1
  3. gac/ai_utils.py +18 -49
  4. gac/cli.py +14 -10
  5. gac/commit_executor.py +59 -0
  6. gac/config.py +28 -3
  7. gac/config_cli.py +19 -7
  8. gac/constants/__init__.py +34 -0
  9. gac/constants/commit.py +63 -0
  10. gac/constants/defaults.py +40 -0
  11. gac/constants/file_patterns.py +110 -0
  12. gac/constants/languages.py +119 -0
  13. gac/diff_cli.py +0 -22
  14. gac/errors.py +8 -2
  15. gac/git.py +6 -6
  16. gac/git_state_validator.py +193 -0
  17. gac/grouped_commit_workflow.py +458 -0
  18. gac/init_cli.py +2 -1
  19. gac/interactive_mode.py +179 -0
  20. gac/language_cli.py +0 -1
  21. gac/main.py +222 -959
  22. gac/model_cli.py +2 -1
  23. gac/model_identifier.py +70 -0
  24. gac/oauth/claude_code.py +2 -2
  25. gac/oauth/qwen_oauth.py +4 -0
  26. gac/oauth/token_store.py +2 -2
  27. gac/oauth_retry.py +161 -0
  28. gac/postprocess.py +155 -0
  29. gac/prompt.py +20 -490
  30. gac/prompt_builder.py +88 -0
  31. gac/providers/README.md +437 -0
  32. gac/providers/__init__.py +70 -81
  33. gac/providers/anthropic.py +12 -56
  34. gac/providers/azure_openai.py +48 -92
  35. gac/providers/base.py +329 -0
  36. gac/providers/cerebras.py +10 -43
  37. gac/providers/chutes.py +16 -72
  38. gac/providers/claude_code.py +64 -97
  39. gac/providers/custom_anthropic.py +51 -85
  40. gac/providers/custom_openai.py +29 -87
  41. gac/providers/deepseek.py +10 -43
  42. gac/providers/error_handler.py +139 -0
  43. gac/providers/fireworks.py +10 -43
  44. gac/providers/gemini.py +66 -73
  45. gac/providers/groq.py +10 -62
  46. gac/providers/kimi_coding.py +19 -59
  47. gac/providers/lmstudio.py +62 -52
  48. gac/providers/minimax.py +10 -43
  49. gac/providers/mistral.py +10 -43
  50. gac/providers/moonshot.py +10 -43
  51. gac/providers/ollama.py +54 -41
  52. gac/providers/openai.py +30 -46
  53. gac/providers/openrouter.py +15 -62
  54. gac/providers/protocol.py +71 -0
  55. gac/providers/qwen.py +55 -67
  56. gac/providers/registry.py +58 -0
  57. gac/providers/replicate.py +137 -91
  58. gac/providers/streamlake.py +26 -56
  59. gac/providers/synthetic.py +35 -47
  60. gac/providers/together.py +10 -43
  61. gac/providers/zai.py +21 -59
  62. gac/py.typed +0 -0
  63. gac/security.py +1 -1
  64. gac/templates/__init__.py +1 -0
  65. gac/templates/question_generation.txt +60 -0
  66. gac/templates/system_prompt.txt +224 -0
  67. gac/templates/user_prompt.txt +28 -0
  68. gac/utils.py +6 -5
  69. gac/workflow_context.py +162 -0
  70. {gac-3.8.1.dist-info → gac-3.10.10.dist-info}/METADATA +1 -1
  71. gac-3.10.10.dist-info/RECORD +79 -0
  72. gac/constants.py +0 -328
  73. gac-3.8.1.dist-info/RECORD +0 -56
  74. {gac-3.8.1.dist-info → gac-3.10.10.dist-info}/WHEEL +0 -0
  75. {gac-3.8.1.dist-info → gac-3.10.10.dist-info}/entry_points.txt +0 -0
  76. {gac-3.8.1.dist-info → gac-3.10.10.dist-info}/licenses/LICENSE +0 -0
gac/providers/minimax.py CHANGED
@@ -1,48 +1,15 @@
1
1
  """MiniMax API provider for gac."""
2
2
 
3
- import logging
4
- import os
3
+ from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
5
4
 
6
- import httpx
7
5
 
8
- from gac.constants import ProviderDefaults
9
- from gac.errors import AIError
10
- from gac.utils import get_ssl_verify
6
+ class MinimaxProvider(OpenAICompatibleProvider):
7
+ config = ProviderConfig(
8
+ name="MiniMax",
9
+ api_key_env="MINIMAX_API_KEY",
10
+ base_url="https://api.minimaxi.com/v1",
11
+ )
11
12
 
12
- logger = logging.getLogger(__name__)
13
-
14
-
15
- def call_minimax_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
16
- """Call MiniMax API directly."""
17
- api_key = os.getenv("MINIMAX_API_KEY")
18
- if not api_key:
19
- raise AIError.authentication_error("MINIMAX_API_KEY not found in environment variables")
20
-
21
- url = "https://api.minimax.io/v1/chat/completions"
22
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
23
-
24
- data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
25
-
26
- logger.debug(f"Calling MiniMax API with model={model}")
27
-
28
- try:
29
- response = httpx.post(
30
- url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
31
- )
32
- response.raise_for_status()
33
- response_data = response.json()
34
- content = response_data["choices"][0]["message"]["content"]
35
- if content is None:
36
- raise AIError.model_error("MiniMax API returned null content")
37
- if content == "":
38
- raise AIError.model_error("MiniMax API returned empty content")
39
- logger.debug("MiniMax API response received successfully")
40
- return content
41
- except httpx.HTTPStatusError as e:
42
- if e.response.status_code == 429:
43
- raise AIError.rate_limit_error(f"MiniMax API rate limit exceeded: {e.response.text}") from e
44
- raise AIError.model_error(f"MiniMax API error: {e.response.status_code} - {e.response.text}") from e
45
- except httpx.TimeoutException as e:
46
- raise AIError.timeout_error(f"MiniMax API request timed out: {str(e)}") from e
47
- except Exception as e:
48
- raise AIError.model_error(f"Error calling MiniMax API: {str(e)}") from e
13
+ def _get_api_url(self, model: str | None = None) -> str:
14
+ """Get MiniMax API URL with /chat/completions endpoint."""
15
+ return f"{self.config.base_url}/chat/completions"
gac/providers/mistral.py CHANGED
@@ -1,48 +1,15 @@
1
1
  """Mistral API provider for gac."""
2
2
 
3
- import logging
4
- import os
3
+ from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
5
4
 
6
- import httpx
7
5
 
8
- from gac.constants import ProviderDefaults
9
- from gac.errors import AIError
10
- from gac.utils import get_ssl_verify
6
+ class MistralProvider(OpenAICompatibleProvider):
7
+ config = ProviderConfig(
8
+ name="Mistral",
9
+ api_key_env="MISTRAL_API_KEY",
10
+ base_url="https://api.mistral.ai/v1",
11
+ )
11
12
 
12
- logger = logging.getLogger(__name__)
13
-
14
-
15
- def call_mistral_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
16
- """Call Mistral API directly."""
17
- api_key = os.getenv("MISTRAL_API_KEY")
18
- if not api_key:
19
- raise AIError.authentication_error("MISTRAL_API_KEY not found in environment variables")
20
-
21
- url = "https://api.mistral.ai/v1/chat/completions"
22
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
23
-
24
- data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
25
-
26
- logger.debug(f"Calling Mistral API with model={model}")
27
-
28
- try:
29
- response = httpx.post(
30
- url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
31
- )
32
- response.raise_for_status()
33
- response_data = response.json()
34
- content = response_data["choices"][0]["message"]["content"]
35
- if content is None:
36
- raise AIError.model_error("Mistral API returned null content")
37
- if content == "":
38
- raise AIError.model_error("Mistral API returned empty content")
39
- logger.debug("Mistral API response received successfully")
40
- return content
41
- except httpx.HTTPStatusError as e:
42
- if e.response.status_code == 429:
43
- raise AIError.rate_limit_error(f"Mistral API rate limit exceeded: {e.response.text}") from e
44
- raise AIError.model_error(f"Mistral API error: {e.response.status_code} - {e.response.text}") from e
45
- except httpx.TimeoutException as e:
46
- raise AIError.timeout_error(f"Mistral API request timed out: {str(e)}") from e
47
- except Exception as e:
48
- raise AIError.model_error(f"Error calling Mistral API: {str(e)}") from e
13
+ def _get_api_url(self, model: str | None = None) -> str:
14
+ """Get Mistral API URL with /chat/completions endpoint."""
15
+ return f"{self.config.base_url}/chat/completions"
gac/providers/moonshot.py CHANGED
@@ -1,48 +1,15 @@
1
1
  """Moonshot AI provider for gac."""
2
2
 
3
- import logging
4
- import os
3
+ from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
5
4
 
6
- import httpx
7
5
 
8
- from gac.constants import ProviderDefaults
9
- from gac.errors import AIError
10
- from gac.utils import get_ssl_verify
6
+ class MoonshotProvider(OpenAICompatibleProvider):
7
+ config = ProviderConfig(
8
+ name="Moonshot",
9
+ api_key_env="MOONSHOT_API_KEY",
10
+ base_url="https://api.moonshot.cn/v1",
11
+ )
11
12
 
12
- logger = logging.getLogger(__name__)
13
-
14
-
15
- def call_moonshot_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
16
- """Call Moonshot AI API directly."""
17
- api_key = os.getenv("MOONSHOT_API_KEY")
18
- if not api_key:
19
- raise AIError.authentication_error("MOONSHOT_API_KEY not found in environment variables")
20
-
21
- url = "https://api.moonshot.ai/v1/chat/completions"
22
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
23
-
24
- data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
25
-
26
- logger.debug(f"Calling Moonshot AI API with model={model}")
27
-
28
- try:
29
- response = httpx.post(
30
- url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
31
- )
32
- response.raise_for_status()
33
- response_data = response.json()
34
- content = response_data["choices"][0]["message"]["content"]
35
- if content is None:
36
- raise AIError.model_error("Moonshot AI API returned null content")
37
- if content == "":
38
- raise AIError.model_error("Moonshot AI API returned empty content")
39
- logger.debug("Moonshot AI API response received successfully")
40
- return content
41
- except httpx.HTTPStatusError as e:
42
- if e.response.status_code == 429:
43
- raise AIError.rate_limit_error(f"Moonshot AI API rate limit exceeded: {e.response.text}") from e
44
- raise AIError.model_error(f"Moonshot AI API error: {e.response.status_code} - {e.response.text}") from e
45
- except httpx.TimeoutException as e:
46
- raise AIError.timeout_error(f"Moonshot AI API request timed out: {str(e)}") from e
47
- except Exception as e:
48
- raise AIError.model_error(f"Error calling Moonshot AI API: {str(e)}") from e
13
+ def _get_api_url(self, model: str | None = None) -> str:
14
+ """Get Moonshot API URL with /chat/completions endpoint."""
15
+ return f"{self.config.base_url}/chat/completions"
gac/providers/ollama.py CHANGED
@@ -1,60 +1,73 @@
1
- """Ollama AI provider implementation."""
1
+ """Ollama API provider for gac."""
2
2
 
3
- import logging
4
3
  import os
4
+ from typing import Any
5
5
 
6
- import httpx
6
+ from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
7
7
 
8
- from gac.constants import ProviderDefaults
9
- from gac.errors import AIError
10
- from gac.utils import get_ssl_verify
11
8
 
12
- logger = logging.getLogger(__name__)
9
+ class OllamaProvider(OpenAICompatibleProvider):
10
+ """Ollama provider for local LLM models with optional authentication."""
13
11
 
12
+ config = ProviderConfig(
13
+ name="Ollama",
14
+ api_key_env="OLLAMA_API_KEY",
15
+ base_url="http://localhost:11434",
16
+ )
14
17
 
15
- def call_ollama_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
16
- """Call Ollama API directly."""
17
- api_url = os.getenv("OLLAMA_API_URL", "http://localhost:11434")
18
- api_key = os.getenv("OLLAMA_API_KEY")
18
+ def __init__(self, config: ProviderConfig):
19
+ """Initialize with configurable URL from environment."""
20
+ super().__init__(config)
21
+ # Allow URL override via environment variable
22
+ api_url = os.getenv("OLLAMA_API_URL", "http://localhost:11434")
23
+ self.config.base_url = api_url.rstrip("/")
19
24
 
20
- url = f"{api_url.rstrip('/')}/api/chat"
21
- data = {"model": model, "messages": messages, "temperature": temperature, "stream": False}
22
- headers = {"Content-Type": "application/json"}
23
- if api_key:
24
- headers["Authorization"] = f"Bearer {api_key}"
25
+ def _build_headers(self) -> dict[str, str]:
26
+ """Build headers with optional API key."""
27
+ headers = super()._build_headers()
28
+ api_key = os.getenv("OLLAMA_API_KEY")
29
+ if api_key:
30
+ headers["Authorization"] = f"Bearer {api_key}"
31
+ return headers
25
32
 
26
- logger.debug(f"Calling Ollama API with model={model}")
33
+ def _build_request_body(
34
+ self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
35
+ ) -> dict[str, Any]:
36
+ """Build Ollama request body with stream disabled."""
37
+ return {
38
+ "messages": messages,
39
+ "temperature": temperature,
40
+ "stream": False,
41
+ **kwargs,
42
+ }
27
43
 
28
- try:
29
- response = httpx.post(
30
- url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
31
- )
32
- response.raise_for_status()
33
- response_data = response.json()
44
+ def _get_api_url(self, model: str | None = None) -> str:
45
+ """Get API URL with /api/chat endpoint."""
46
+ return f"{self.config.base_url}/api/chat"
47
+
48
+ def _get_api_key(self) -> str:
49
+ """Get optional API key for Ollama."""
50
+ api_key = os.getenv(self.config.api_key_env)
51
+ if not api_key:
52
+ return "" # Optional API key
53
+ return api_key
54
+
55
+ def _parse_response(self, response: dict[str, Any]) -> str:
56
+ """Parse Ollama response with flexible format support."""
57
+ from gac.errors import AIError
34
58
 
35
- content = None
36
59
  # Handle different response formats from Ollama
37
- if "message" in response_data and "content" in response_data["message"]:
38
- content = response_data["message"]["content"]
39
- elif "response" in response_data:
40
- content = response_data["response"]
60
+ if "message" in response and "content" in response["message"]:
61
+ content = response["message"]["content"]
62
+ elif "response" in response:
63
+ content = response["response"]
41
64
  else:
42
- # Fallback: return the full response as string
43
- content = str(response_data)
65
+ # Fallback: try to serialize response
66
+ content = str(response) if response else ""
44
67
 
45
68
  if content is None:
46
69
  raise AIError.model_error("Ollama API returned null content")
47
70
  if content == "":
48
71
  raise AIError.model_error("Ollama API returned empty content")
49
- logger.debug("Ollama API response received successfully")
72
+
50
73
  return content
51
- except httpx.ConnectError as e:
52
- raise AIError.connection_error(f"Ollama connection failed. Make sure Ollama is running: {str(e)}") from e
53
- except httpx.HTTPStatusError as e:
54
- if e.response.status_code == 429:
55
- raise AIError.rate_limit_error(f"Ollama API rate limit exceeded: {e.response.text}") from e
56
- raise AIError.model_error(f"Ollama API error: {e.response.status_code} - {e.response.text}") from e
57
- except httpx.TimeoutException as e:
58
- raise AIError.timeout_error(f"Ollama API request timed out: {str(e)}") from e
59
- except Exception as e:
60
- raise AIError.model_error(f"Error calling Ollama API: {str(e)}") from e
gac/providers/openai.py CHANGED
@@ -1,48 +1,32 @@
1
1
  """OpenAI API provider for gac."""
2
2
 
3
- import logging
4
- import os
5
-
6
- import httpx
7
-
8
- from gac.constants import ProviderDefaults
9
- from gac.errors import AIError
10
- from gac.utils import get_ssl_verify
11
-
12
- logger = logging.getLogger(__name__)
13
-
14
-
15
- def call_openai_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
16
- """Call OpenAI API directly."""
17
- api_key = os.getenv("OPENAI_API_KEY")
18
- if not api_key:
19
- raise AIError.authentication_error("OPENAI_API_KEY not found in environment variables")
20
-
21
- url = "https://api.openai.com/v1/chat/completions"
22
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
23
-
24
- data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
25
-
26
- logger.debug(f"Calling OpenAI API with model={model}")
27
-
28
- try:
29
- response = httpx.post(
30
- url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
31
- )
32
- response.raise_for_status()
33
- response_data = response.json()
34
- content = response_data["choices"][0]["message"]["content"]
35
- if content is None:
36
- raise AIError.model_error("OpenAI API returned null content")
37
- if content == "":
38
- raise AIError.model_error("OpenAI API returned empty content")
39
- logger.debug("OpenAI API response received successfully")
40
- return content
41
- except httpx.HTTPStatusError as e:
42
- if e.response.status_code == 429:
43
- raise AIError.rate_limit_error(f"OpenAI API rate limit exceeded: {e.response.text}") from e
44
- raise AIError.model_error(f"OpenAI API error: {e.response.status_code} - {e.response.text}") from e
45
- except httpx.TimeoutException as e:
46
- raise AIError.timeout_error(f"OpenAI API request timed out: {str(e)}") from e
47
- except Exception as e:
48
- raise AIError.model_error(f"Error calling OpenAI API: {str(e)}") from e
3
+ from typing import Any
4
+
5
+ from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
6
+
7
+
8
+ class OpenAIProvider(OpenAICompatibleProvider):
9
+ """OpenAI API provider with model-specific adjustments."""
10
+
11
+ config = ProviderConfig(name="OpenAI", api_key_env="OPENAI_API_KEY", base_url="https://api.openai.com/v1")
12
+
13
+ def _get_api_url(self, model: str | None = None) -> str:
14
+ """Get OpenAI API URL with /chat/completions endpoint."""
15
+ return f"{self.config.base_url}/chat/completions"
16
+
17
+ def _build_request_body(
18
+ self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
19
+ ) -> dict[str, Any]:
20
+ """Build OpenAI-specific request body."""
21
+ data = super()._build_request_body(messages, temperature, max_tokens, model, **kwargs)
22
+
23
+ # OpenAI uses max_completion_tokens instead of max_tokens
24
+ data["max_completion_tokens"] = data.pop("max_tokens")
25
+
26
+ # Handle optional parameters
27
+ if "response_format" in kwargs:
28
+ data["response_format"] = kwargs["response_format"]
29
+ if "stop" in kwargs:
30
+ data["stop"] = kwargs["stop"]
31
+
32
+ return data
@@ -1,68 +1,21 @@
1
1
  """OpenRouter API provider for gac."""
2
2
 
3
- import logging
4
- import os
3
+ from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
5
4
 
6
- import httpx
7
5
 
8
- from gac.constants import ProviderDefaults
9
- from gac.errors import AIError
10
- from gac.utils import get_ssl_verify
6
+ class OpenRouterProvider(OpenAICompatibleProvider):
7
+ config = ProviderConfig(
8
+ name="OpenRouter",
9
+ api_key_env="OPENROUTER_API_KEY",
10
+ base_url="https://openrouter.ai/api/v1",
11
+ )
11
12
 
12
- logger = logging.getLogger(__name__)
13
+ def _get_api_url(self, model: str | None = None) -> str:
14
+ """Get OpenRouter API URL with /chat/completions endpoint."""
15
+ return f"{self.config.base_url}/chat/completions"
13
16
 
14
-
15
- def call_openrouter_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
16
- """Call OpenRouter API directly."""
17
- api_key = os.getenv("OPENROUTER_API_KEY")
18
- if not api_key:
19
- raise AIError.authentication_error("OPENROUTER_API_KEY environment variable not set")
20
-
21
- url = "https://openrouter.ai/api/v1/chat/completions"
22
- headers = {
23
- "Content-Type": "application/json",
24
- "Authorization": f"Bearer {api_key}",
25
- }
26
-
27
- data = {
28
- "model": model,
29
- "messages": messages,
30
- "temperature": temperature,
31
- "max_tokens": max_tokens,
32
- }
33
-
34
- logger.debug(f"Calling OpenRouter API with model={model}")
35
-
36
- try:
37
- response = httpx.post(
38
- url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
39
- )
40
- response.raise_for_status()
41
- response_data = response.json()
42
- content = response_data["choices"][0]["message"]["content"]
43
- if content is None:
44
- raise AIError.model_error("OpenRouter API returned null content")
45
- if content == "":
46
- raise AIError.model_error("OpenRouter API returned empty content")
47
- logger.debug("OpenRouter API response received successfully")
48
- return content
49
- except httpx.HTTPStatusError as e:
50
- # Handle specific HTTP status codes
51
- status_code = e.response.status_code
52
- error_text = e.response.text
53
-
54
- # Rate limiting
55
- if status_code == 429:
56
- raise AIError.rate_limit_error(f"OpenRouter API rate limit exceeded: {error_text}") from e
57
- # Service unavailable
58
- elif status_code in (502, 503):
59
- raise AIError.connection_error(f"OpenRouter API service unavailable: {status_code} - {error_text}") from e
60
- # Other HTTP errors
61
- else:
62
- raise AIError.model_error(f"OpenRouter API error: {status_code} - {error_text}") from e
63
- except httpx.ConnectError as e:
64
- raise AIError.connection_error(f"OpenRouter API connection error: {str(e)}") from e
65
- except httpx.TimeoutException as e:
66
- raise AIError.timeout_error(f"OpenRouter API request timed out: {str(e)}") from e
67
- except Exception as e:
68
- raise AIError.model_error(f"Error calling OpenRouter API: {str(e)}") from e
17
+ def _build_headers(self) -> dict[str, str]:
18
+ """Build headers with OpenRouter-style authorization and HTTP-Referer."""
19
+ headers = super()._build_headers()
20
+ headers["HTTP-Referer"] = "https://github.com/codeindolence/gac"
21
+ return headers
@@ -0,0 +1,71 @@
1
+ """Provider protocol for type-safe AI provider implementations."""
2
+
3
+ from typing import Any, Protocol, runtime_checkable
4
+
5
+
6
+ @runtime_checkable
7
+ class ProviderProtocol(Protocol):
8
+ """Protocol defining the contract for AI providers.
9
+
10
+ All providers must implement this protocol to ensure consistent
11
+ interface and type safety across the codebase.
12
+
13
+ This protocol supports both class-based providers (with methods)
14
+ and function-based providers (used in the registry).
15
+ """
16
+
17
+ def generate(
18
+ self, model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int, **kwargs: Any
19
+ ) -> str:
20
+ """Generate text using the AI model.
21
+
22
+ Args:
23
+ model: The model name to use
24
+ messages: List of message dictionaries in chat format
25
+ temperature: Temperature parameter (0.0-2.0)
26
+ max_tokens: Maximum tokens in response
27
+ **kwargs: Additional provider-specific parameters
28
+
29
+ Returns:
30
+ Generated text content
31
+
32
+ Raises:
33
+ AIError: For any generation-related errors
34
+ """
35
+ ...
36
+
37
+ @property
38
+ def name(self) -> str:
39
+ """Get the provider name.
40
+
41
+ Returns:
42
+ Provider name identifier
43
+ """
44
+ ...
45
+
46
+ @property
47
+ def api_key_env(self) -> str:
48
+ """Get the environment variable name for the API key.
49
+
50
+ Returns:
51
+ Environment variable name
52
+ """
53
+ ...
54
+
55
+ @property
56
+ def base_url(self) -> str:
57
+ """Get the base URL for the API.
58
+
59
+ Returns:
60
+ Base API URL
61
+ """
62
+ ...
63
+
64
+ @property
65
+ def timeout(self) -> int:
66
+ """Get the timeout in seconds.
67
+
68
+ Returns:
69
+ Timeout in seconds
70
+ """
71
+ ...