xai-review 0.23.0__py3-none-any.whl → 0.25.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xai-review might be problematic. Click here for more details.

Files changed (56) hide show
  1. ai_review/clients/claude/client.py +2 -1
  2. ai_review/clients/claude/types.py +8 -0
  3. ai_review/clients/gemini/client.py +2 -1
  4. ai_review/clients/gemini/types.py +8 -0
  5. ai_review/clients/ollama/__init__.py +0 -0
  6. ai_review/clients/ollama/client.py +41 -0
  7. ai_review/clients/ollama/schema.py +47 -0
  8. ai_review/clients/ollama/types.py +8 -0
  9. ai_review/clients/openai/client.py +2 -1
  10. ai_review/clients/openai/types.py +8 -0
  11. ai_review/config.py +2 -2
  12. ai_review/libs/config/http.py +4 -1
  13. ai_review/libs/config/llm/__init__.py +0 -0
  14. ai_review/libs/config/{llm.py → llm/base.py} +11 -4
  15. ai_review/libs/config/llm/claude.py +10 -0
  16. ai_review/libs/config/llm/gemini.py +10 -0
  17. ai_review/libs/config/llm/meta.py +7 -0
  18. ai_review/libs/config/llm/ollama.py +14 -0
  19. ai_review/libs/config/llm/openai.py +10 -0
  20. ai_review/libs/config/vcs/__init__.py +0 -0
  21. ai_review/libs/config/{vcs.py → vcs/base.py} +2 -2
  22. ai_review/libs/config/{github.py → vcs/github.py} +2 -2
  23. ai_review/libs/config/{gitlab.py → vcs/gitlab.py} +2 -2
  24. ai_review/libs/constants/llm_provider.py +1 -0
  25. ai_review/libs/http/transports/retry.py +1 -1
  26. ai_review/services/llm/factory.py +3 -0
  27. ai_review/services/llm/ollama/__init__.py +0 -0
  28. ai_review/services/llm/ollama/client.py +34 -0
  29. ai_review/tests/fixtures/clients/claude.py +47 -2
  30. ai_review/tests/fixtures/clients/gemini.py +54 -2
  31. ai_review/tests/fixtures/clients/github.py +2 -2
  32. ai_review/tests/fixtures/clients/gitlab.py +2 -2
  33. ai_review/tests/fixtures/clients/ollama.py +65 -0
  34. ai_review/tests/fixtures/clients/openai.py +50 -2
  35. ai_review/tests/suites/clients/ollama/__init__.py +0 -0
  36. ai_review/tests/suites/clients/ollama/test_client.py +12 -0
  37. ai_review/tests/suites/clients/ollama/test_schema.py +65 -0
  38. ai_review/tests/suites/services/cost/test_service.py +1 -1
  39. ai_review/tests/suites/services/llm/claude/__init__.py +0 -0
  40. ai_review/tests/suites/services/llm/claude/test_client.py +22 -0
  41. ai_review/tests/suites/services/llm/gemini/__init__.py +0 -0
  42. ai_review/tests/suites/services/llm/gemini/test_client.py +22 -0
  43. ai_review/tests/suites/services/llm/ollama/__init__.py +0 -0
  44. ai_review/tests/suites/services/llm/ollama/test_client.py +22 -0
  45. ai_review/tests/suites/services/llm/openai/__init__.py +0 -0
  46. ai_review/tests/suites/services/llm/openai/test_client.py +22 -0
  47. ai_review/tests/suites/services/llm/test_factory.py +8 -1
  48. {xai_review-0.23.0.dist-info → xai_review-0.25.0.dist-info}/METADATA +8 -4
  49. {xai_review-0.23.0.dist-info → xai_review-0.25.0.dist-info}/RECORD +53 -28
  50. ai_review/libs/config/claude.py +0 -13
  51. ai_review/libs/config/gemini.py +0 -13
  52. ai_review/libs/config/openai.py +0 -13
  53. {xai_review-0.23.0.dist-info → xai_review-0.25.0.dist-info}/WHEEL +0 -0
  54. {xai_review-0.23.0.dist-info → xai_review-0.25.0.dist-info}/entry_points.txt +0 -0
  55. {xai_review-0.23.0.dist-info → xai_review-0.25.0.dist-info}/licenses/LICENSE +0 -0
  56. {xai_review-0.23.0.dist-info → xai_review-0.25.0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,7 @@
1
1
  from httpx import AsyncClient, Response, AsyncHTTPTransport
2
2
 
3
3
  from ai_review.clients.claude.schema import ClaudeChatRequestSchema, ClaudeChatResponseSchema
4
+ from ai_review.clients.claude.types import ClaudeHTTPClientProtocol
4
5
  from ai_review.config import settings
5
6
  from ai_review.libs.http.client import HTTPClient
6
7
  from ai_review.libs.http.event_hooks.logger import LoggerEventHook
@@ -13,7 +14,7 @@ class ClaudeHTTPClientError(HTTPClientError):
13
14
  pass
14
15
 
15
16
 
16
- class ClaudeHTTPClient(HTTPClient):
17
+ class ClaudeHTTPClient(HTTPClient, ClaudeHTTPClientProtocol):
17
18
  @handle_http_error(client="ClaudeHTTPClient", exception=ClaudeHTTPClientError)
18
19
  async def chat_api(self, request: ClaudeChatRequestSchema) -> Response:
19
20
  return await self.post("/v1/messages", json=request.model_dump())
@@ -0,0 +1,8 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.clients.claude.schema import ClaudeChatRequestSchema, ClaudeChatResponseSchema
4
+
5
+
6
+ class ClaudeHTTPClientProtocol(Protocol):
7
+ async def chat(self, request: ClaudeChatRequestSchema) -> ClaudeChatResponseSchema:
8
+ ...
@@ -1,6 +1,7 @@
1
1
  from httpx import Response, AsyncHTTPTransport, AsyncClient
2
2
 
3
3
  from ai_review.clients.gemini.schema import GeminiChatRequestSchema, GeminiChatResponseSchema
4
+ from ai_review.clients.gemini.types import GeminiHTTPClientProtocol
4
5
  from ai_review.config import settings
5
6
  from ai_review.libs.http.client import HTTPClient
6
7
  from ai_review.libs.http.event_hooks.logger import LoggerEventHook
@@ -13,7 +14,7 @@ class GeminiHTTPClientError(HTTPClientError):
13
14
  pass
14
15
 
15
16
 
16
- class GeminiHTTPClient(HTTPClient):
17
+ class GeminiHTTPClient(HTTPClient, GeminiHTTPClientProtocol):
17
18
  @handle_http_error(client="GeminiHTTPClient", exception=GeminiHTTPClientError)
18
19
  async def chat_api(self, request: GeminiChatRequestSchema) -> Response:
19
20
  meta = settings.llm.meta
@@ -0,0 +1,8 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.clients.gemini.schema import GeminiChatRequestSchema, GeminiChatResponseSchema
4
+
5
+
6
+ class GeminiHTTPClientProtocol(Protocol):
7
+ async def chat(self, request: GeminiChatRequestSchema) -> GeminiChatResponseSchema:
8
+ ...
File without changes
@@ -0,0 +1,41 @@
1
+ from httpx import AsyncClient, Response, AsyncHTTPTransport
2
+
3
+ from ai_review.clients.ollama.schema import OllamaChatRequestSchema, OllamaChatResponseSchema
4
+ from ai_review.clients.ollama.types import OllamaHTTPClientProtocol
5
+ from ai_review.config import settings
6
+ from ai_review.libs.http.client import HTTPClient
7
+ from ai_review.libs.http.event_hooks.logger import LoggerEventHook
8
+ from ai_review.libs.http.handlers import HTTPClientError, handle_http_error
9
+ from ai_review.libs.http.transports.retry import RetryTransport
10
+ from ai_review.libs.logger import get_logger
11
+
12
+
13
+ class OllamaHTTPClientError(HTTPClientError):
14
+ pass
15
+
16
+
17
+ class OllamaHTTPClient(HTTPClient, OllamaHTTPClientProtocol):
18
+ @handle_http_error(client="OllamaHTTPClient", exception=OllamaHTTPClientError)
19
+ async def chat_api(self, request: OllamaChatRequestSchema) -> Response:
20
+ return await self.post("/api/chat", json=request.model_dump())
21
+
22
+ async def chat(self, request: OllamaChatRequestSchema) -> OllamaChatResponseSchema:
23
+ response = await self.chat_api(request)
24
+ return OllamaChatResponseSchema.model_validate_json(response.text)
25
+
26
+
27
+ def get_ollama_http_client() -> OllamaHTTPClient:
28
+ logger = get_logger("OLLAMA_HTTP_CLIENT")
29
+ logger_event_hook = LoggerEventHook(logger=logger)
30
+ retry_transport = RetryTransport(logger=logger, transport=AsyncHTTPTransport())
31
+
32
+ client = AsyncClient(
33
+ timeout=settings.llm.http_client.timeout,
34
+ base_url=settings.llm.http_client.api_url_value,
35
+ transport=retry_transport,
36
+ event_hooks={
37
+ "request": [logger_event_hook.request],
38
+ "response": [logger_event_hook.response],
39
+ },
40
+ )
41
+ return OllamaHTTPClient(client=client)
@@ -0,0 +1,47 @@
1
+ from typing import Literal
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class OllamaMessageSchema(BaseModel):
7
+ role: Literal["system", "user", "assistant"]
8
+ content: str
9
+
10
+
11
+ class OllamaOptionsSchema(BaseModel):
12
+ stop: list[str] | None = None
13
+ seed: int | None = None
14
+ top_p: float | None = Field(default=None, ge=0.0, le=1.0)
15
+ temperature: float | None = Field(default=None, ge=0.0, le=2.0)
16
+ num_predict: int | None = Field(default=None, ge=1)
17
+ repeat_penalty: float | None = Field(default=None, ge=0.0)
18
+
19
+
20
+ class OllamaChatRequestSchema(BaseModel):
21
+ model: str
22
+ stream: bool = False
23
+ options: OllamaOptionsSchema | None = None
24
+ messages: list[OllamaMessageSchema]
25
+
26
+
27
+ class OllamaUsageSchema(BaseModel):
28
+ prompt_tokens: int | None = None
29
+ completion_tokens: int | None = None
30
+
31
+ @property
32
+ def total_tokens(self) -> int | None:
33
+ if (self.prompt_tokens is not None) and (self.completion_tokens is not None):
34
+ return self.prompt_tokens + self.completion_tokens
35
+
36
+ return None
37
+
38
+
39
+ class OllamaChatResponseSchema(BaseModel):
40
+ done: bool = Field(default=True)
41
+ usage: OllamaUsageSchema | None = None
42
+ model: str
43
+ message: OllamaMessageSchema
44
+
45
+ @property
46
+ def first_text(self) -> str:
47
+ return (self.message.content or "").strip()
@@ -0,0 +1,8 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.clients.ollama.schema import OllamaChatRequestSchema, OllamaChatResponseSchema
4
+
5
+
6
+ class OllamaHTTPClientProtocol(Protocol):
7
+ async def chat(self, request: OllamaChatRequestSchema) -> OllamaChatResponseSchema:
8
+ ...
@@ -1,6 +1,7 @@
1
1
  from httpx import Response, AsyncHTTPTransport, AsyncClient
2
2
 
3
3
  from ai_review.clients.openai.schema import OpenAIChatRequestSchema, OpenAIChatResponseSchema
4
+ from ai_review.clients.openai.types import OpenAIHTTPClientProtocol
4
5
  from ai_review.config import settings
5
6
  from ai_review.libs.http.client import HTTPClient
6
7
  from ai_review.libs.http.event_hooks.logger import LoggerEventHook
@@ -13,7 +14,7 @@ class OpenAIHTTPClientError(HTTPClientError):
13
14
  pass
14
15
 
15
16
 
16
- class OpenAIHTTPClient(HTTPClient):
17
+ class OpenAIHTTPClient(HTTPClient, OpenAIHTTPClientProtocol):
17
18
  @handle_http_error(client='OpenAIHTTPClient', exception=OpenAIHTTPClientError)
18
19
  async def chat_api(self, request: OpenAIChatRequestSchema) -> Response:
19
20
  return await self.post("/chat/completions", json=request.model_dump())
@@ -0,0 +1,8 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.clients.openai.schema import OpenAIChatRequestSchema, OpenAIChatResponseSchema
4
+
5
+
6
+ class OpenAIHTTPClientProtocol(Protocol):
7
+ async def chat(self, request: OpenAIChatRequestSchema) -> OpenAIChatResponseSchema:
8
+ ...
ai_review/config.py CHANGED
@@ -13,11 +13,11 @@ from ai_review.libs.config.base import (
13
13
  get_json_config_file_or_default
14
14
  )
15
15
  from ai_review.libs.config.core import CoreConfig
16
- from ai_review.libs.config.llm import LLMConfig
16
+ from ai_review.libs.config.llm.base import LLMConfig
17
17
  from ai_review.libs.config.logger import LoggerConfig
18
18
  from ai_review.libs.config.prompt import PromptConfig
19
19
  from ai_review.libs.config.review import ReviewConfig
20
- from ai_review.libs.config.vcs import VCSConfig
20
+ from ai_review.libs.config.vcs.base import VCSConfig
21
21
 
22
22
 
23
23
  class Settings(BaseSettings):
@@ -4,12 +4,15 @@ from pydantic import BaseModel, HttpUrl, SecretStr
4
4
  class HTTPClientConfig(BaseModel):
5
5
  timeout: float = 120
6
6
  api_url: HttpUrl
7
- api_token: SecretStr
8
7
 
9
8
  @property
10
9
  def api_url_value(self) -> str:
11
10
  return str(self.api_url)
12
11
 
12
+
13
+ class HTTPClientWithTokenConfig(HTTPClientConfig):
14
+ api_token: SecretStr
15
+
13
16
  @property
14
17
  def api_token_value(self) -> str:
15
18
  return self.api_token.get_secret_value()
File without changes
@@ -4,9 +4,10 @@ from typing import Annotated, Literal
4
4
  import yaml
5
5
  from pydantic import BaseModel, Field, FilePath
6
6
 
7
- from ai_review.libs.config.claude import ClaudeHTTPClientConfig, ClaudeMetaConfig
8
- from ai_review.libs.config.gemini import GeminiHTTPClientConfig, GeminiMetaConfig
9
- from ai_review.libs.config.openai import OpenAIHTTPClientConfig, OpenAIMetaConfig
7
+ from ai_review.libs.config.llm.claude import ClaudeHTTPClientConfig, ClaudeMetaConfig
8
+ from ai_review.libs.config.llm.gemini import GeminiHTTPClientConfig, GeminiMetaConfig
9
+ from ai_review.libs.config.llm.ollama import OllamaHTTPClientConfig, OllamaMetaConfig
10
+ from ai_review.libs.config.llm.openai import OpenAIHTTPClientConfig, OpenAIMetaConfig
10
11
  from ai_review.libs.constants.llm_provider import LLMProvider
11
12
  from ai_review.libs.resources import load_resource
12
13
 
@@ -55,7 +56,13 @@ class ClaudeLLMConfig(LLMConfigBase):
55
56
  http_client: ClaudeHTTPClientConfig
56
57
 
57
58
 
59
+ class OllamaLLMConfig(LLMConfigBase):
60
+ meta: OllamaMetaConfig
61
+ provider: Literal[LLMProvider.OLLAMA]
62
+ http_client: OllamaHTTPClientConfig
63
+
64
+
58
65
  LLMConfig = Annotated[
59
- OpenAILLMConfig | GeminiLLMConfig | ClaudeLLMConfig,
66
+ OpenAILLMConfig | GeminiLLMConfig | ClaudeLLMConfig | OllamaLLMConfig,
60
67
  Field(discriminator="provider")
61
68
  ]
@@ -0,0 +1,10 @@
1
+ from ai_review.libs.config.http import HTTPClientWithTokenConfig
2
+ from ai_review.libs.config.llm.meta import LLMMetaConfig
3
+
4
+
5
+ class ClaudeMetaConfig(LLMMetaConfig):
6
+ model: str = "claude-3-sonnet"
7
+
8
+
9
+ class ClaudeHTTPClientConfig(HTTPClientWithTokenConfig):
10
+ api_version: str = "2023-06-01"
@@ -0,0 +1,10 @@
1
+ from ai_review.libs.config.http import HTTPClientWithTokenConfig
2
+ from ai_review.libs.config.llm.meta import LLMMetaConfig
3
+
4
+
5
+ class GeminiMetaConfig(LLMMetaConfig):
6
+ model: str = "gemini-2.0-pro"
7
+
8
+
9
+ class GeminiHTTPClientConfig(HTTPClientWithTokenConfig):
10
+ pass
@@ -0,0 +1,7 @@
1
+ from pydantic import BaseModel, Field
2
+
3
+
4
+ class LLMMetaConfig(BaseModel):
5
+ model: str
6
+ max_tokens: int = Field(default=5000, ge=1)
7
+ temperature: float = Field(default=0.3, ge=0.0, le=2.0)
@@ -0,0 +1,14 @@
1
+ from ai_review.libs.config.http import HTTPClientConfig
2
+ from ai_review.libs.config.llm.meta import LLMMetaConfig
3
+
4
+
5
+ class OllamaMetaConfig(LLMMetaConfig):
6
+ stop: list[str] | None = None
7
+ seed: int | None = None
8
+ model: str = "llama2"
9
+ top_p: float | None = None
10
+ repeat_penalty: float | None = None
11
+
12
+
13
+ class OllamaHTTPClientConfig(HTTPClientConfig):
14
+ pass
@@ -0,0 +1,10 @@
1
+ from ai_review.libs.config.http import HTTPClientWithTokenConfig
2
+ from ai_review.libs.config.llm.meta import LLMMetaConfig
3
+
4
+
5
+ class OpenAIMetaConfig(LLMMetaConfig):
6
+ model: str = "gpt-4o-mini"
7
+
8
+
9
+ class OpenAIHTTPClientConfig(HTTPClientWithTokenConfig):
10
+ pass
File without changes
@@ -2,8 +2,8 @@ from typing import Annotated, Literal
2
2
 
3
3
  from pydantic import BaseModel, Field
4
4
 
5
- from ai_review.libs.config.github import GitHubPipelineConfig, GitHubHTTPClientConfig
6
- from ai_review.libs.config.gitlab import GitLabPipelineConfig, GitLabHTTPClientConfig
5
+ from ai_review.libs.config.vcs.github import GitHubPipelineConfig, GitHubHTTPClientConfig
6
+ from ai_review.libs.config.vcs.gitlab import GitLabPipelineConfig, GitLabHTTPClientConfig
7
7
  from ai_review.libs.constants.vcs_provider import VCSProvider
8
8
 
9
9
 
@@ -1,6 +1,6 @@
1
1
  from pydantic import BaseModel
2
2
 
3
- from ai_review.libs.config.http import HTTPClientConfig
3
+ from ai_review.libs.config.http import HTTPClientWithTokenConfig
4
4
 
5
5
 
6
6
  class GitHubPipelineConfig(BaseModel):
@@ -9,5 +9,5 @@ class GitHubPipelineConfig(BaseModel):
9
9
  pull_number: str
10
10
 
11
11
 
12
- class GitHubHTTPClientConfig(HTTPClientConfig):
12
+ class GitHubHTTPClientConfig(HTTPClientWithTokenConfig):
13
13
  pass
@@ -1,6 +1,6 @@
1
1
  from pydantic import BaseModel
2
2
 
3
- from ai_review.libs.config.http import HTTPClientConfig
3
+ from ai_review.libs.config.http import HTTPClientWithTokenConfig
4
4
 
5
5
 
6
6
  class GitLabPipelineConfig(BaseModel):
@@ -8,5 +8,5 @@ class GitLabPipelineConfig(BaseModel):
8
8
  merge_request_id: str
9
9
 
10
10
 
11
- class GitLabHTTPClientConfig(HTTPClientConfig):
11
+ class GitLabHTTPClientConfig(HTTPClientWithTokenConfig):
12
12
  pass
@@ -5,3 +5,4 @@ class LLMProvider(StrEnum):
5
5
  OPENAI = "OPENAI"
6
6
  GEMINI = "GEMINI"
7
7
  CLAUDE = "CLAUDE"
8
+ OLLAMA = "OLLAMA"
@@ -36,7 +36,7 @@ class RetryTransport(AsyncBaseTransport):
36
36
  return last_response
37
37
 
38
38
  self.logger.warning(
39
- f"Attempt {attempt}/{self.max_retries} failed "
39
+ f"Attempt {attempt + 1}/{self.max_retries} failed "
40
40
  f"with status={last_response.status_code} for {request.method} {request.url}. "
41
41
  f"Retrying in {self.retry_delay:.1f}s..."
42
42
  )
@@ -2,6 +2,7 @@ from ai_review.config import settings
2
2
  from ai_review.libs.constants.llm_provider import LLMProvider
3
3
  from ai_review.services.llm.claude.client import ClaudeLLMClient
4
4
  from ai_review.services.llm.gemini.client import GeminiLLMClient
5
+ from ai_review.services.llm.ollama.client import OllamaLLMClient
5
6
  from ai_review.services.llm.openai.client import OpenAILLMClient
6
7
  from ai_review.services.llm.types import LLMClientProtocol
7
8
 
@@ -14,5 +15,7 @@ def get_llm_client() -> LLMClientProtocol:
14
15
  return GeminiLLMClient()
15
16
  case LLMProvider.CLAUDE:
16
17
  return ClaudeLLMClient()
18
+ case LLMProvider.OLLAMA:
19
+ return OllamaLLMClient()
17
20
  case _:
18
21
  raise ValueError(f"Unsupported LLM provider: {settings.llm.provider}")
File without changes
@@ -0,0 +1,34 @@
1
+ from ai_review.clients.ollama.client import get_ollama_http_client
2
+ from ai_review.clients.ollama.schema import OllamaChatRequestSchema, OllamaMessageSchema, OllamaOptionsSchema
3
+ from ai_review.config import settings
4
+ from ai_review.services.llm.types import LLMClientProtocol, ChatResultSchema
5
+
6
+
7
+ class OllamaLLMClient(LLMClientProtocol):
8
+ def __init__(self):
9
+ self.http_client = get_ollama_http_client()
10
+
11
+ async def chat(self, prompt: str, prompt_system: str) -> ChatResultSchema:
12
+ meta = settings.llm.meta
13
+ request = OllamaChatRequestSchema(
14
+ model=meta.model,
15
+ options=OllamaOptionsSchema(
16
+ stop=meta.stop,
17
+ seed=meta.seed,
18
+ top_p=meta.top_p,
19
+ temperature=meta.temperature,
20
+ num_predict=meta.max_tokens,
21
+ repeat_penalty=meta.repeat_penalty,
22
+ ),
23
+ messages=[
24
+ OllamaMessageSchema(role="system", content=prompt_system),
25
+ OllamaMessageSchema(role="user", content=prompt),
26
+ ],
27
+ )
28
+ response = await self.http_client.chat(request)
29
+ return ChatResultSchema(
30
+ text=response.first_text,
31
+ total_tokens=response.usage.total_tokens if response.usage else None,
32
+ prompt_tokens=response.usage.prompt_tokens if response.usage else None,
33
+ completion_tokens=response.usage.completion_tokens if response.usage else None,
34
+ )
@@ -1,10 +1,55 @@
1
+ from typing import Any
2
+
1
3
  import pytest
2
4
  from pydantic import HttpUrl, SecretStr
3
5
 
6
+ from ai_review.clients.claude.schema import (
7
+ ClaudeUsageSchema,
8
+ ClaudeContentSchema,
9
+ ClaudeChatRequestSchema,
10
+ ClaudeChatResponseSchema,
11
+ )
12
+ from ai_review.clients.claude.types import ClaudeHTTPClientProtocol
4
13
  from ai_review.config import settings
5
- from ai_review.libs.config.claude import ClaudeMetaConfig, ClaudeHTTPClientConfig
6
- from ai_review.libs.config.llm import ClaudeLLMConfig
14
+ from ai_review.libs.config.llm.base import ClaudeLLMConfig
15
+ from ai_review.libs.config.llm.claude import ClaudeMetaConfig, ClaudeHTTPClientConfig
7
16
  from ai_review.libs.constants.llm_provider import LLMProvider
17
+ from ai_review.services.llm.claude.client import ClaudeLLMClient
18
+
19
+
20
+ class FakeClaudeHTTPClient(ClaudeHTTPClientProtocol):
21
+ def __init__(self, responses: dict[str, Any] | None = None) -> None:
22
+ self.calls: list[tuple[str, dict]] = []
23
+ self.responses = responses or {}
24
+
25
+ async def chat(self, request: ClaudeChatRequestSchema) -> ClaudeChatResponseSchema:
26
+ self.calls.append(("chat", {"request": request}))
27
+ return self.responses.get(
28
+ "chat",
29
+ ClaudeChatResponseSchema(
30
+ id="fake-id",
31
+ role="assistant",
32
+ usage=ClaudeUsageSchema(input_tokens=5, output_tokens=7),
33
+ content=[ClaudeContentSchema(type="text", text="FAKE_CLAUDE_RESPONSE")],
34
+ ),
35
+ )
36
+
37
+
38
+ @pytest.fixture
39
+ def fake_claude_http_client():
40
+ return FakeClaudeHTTPClient()
41
+
42
+
43
+ @pytest.fixture
44
+ def claude_llm_client(
45
+ monkeypatch: pytest.MonkeyPatch,
46
+ fake_claude_http_client: FakeClaudeHTTPClient
47
+ ) -> ClaudeLLMClient:
48
+ monkeypatch.setattr(
49
+ "ai_review.services.llm.claude.client.get_claude_http_client",
50
+ lambda: fake_claude_http_client,
51
+ )
52
+ return ClaudeLLMClient()
8
53
 
9
54
 
10
55
  @pytest.fixture
@@ -1,10 +1,62 @@
1
+ from typing import Any
2
+
1
3
  import pytest
2
4
  from pydantic import HttpUrl, SecretStr
3
5
 
6
+ from ai_review.clients.gemini.schema import (
7
+ GeminiPartSchema,
8
+ GeminiUsageSchema,
9
+ GeminiContentSchema,
10
+ GeminiCandidateSchema,
11
+ GeminiChatRequestSchema,
12
+ GeminiChatResponseSchema,
13
+ )
14
+ from ai_review.clients.gemini.types import GeminiHTTPClientProtocol
4
15
  from ai_review.config import settings
5
- from ai_review.libs.config.gemini import GeminiMetaConfig, GeminiHTTPClientConfig
6
- from ai_review.libs.config.llm import GeminiLLMConfig
16
+ from ai_review.libs.config.llm.base import GeminiLLMConfig
17
+ from ai_review.libs.config.llm.gemini import GeminiMetaConfig, GeminiHTTPClientConfig
7
18
  from ai_review.libs.constants.llm_provider import LLMProvider
19
+ from ai_review.services.llm.gemini.client import GeminiLLMClient
20
+
21
+
22
+ class FakeGeminiHTTPClient(GeminiHTTPClientProtocol):
23
+ def __init__(self, responses: dict[str, Any] | None = None) -> None:
24
+ self.calls: list[tuple[str, dict]] = []
25
+ self.responses = responses or {}
26
+
27
+ async def chat(self, request: GeminiChatRequestSchema) -> GeminiChatResponseSchema:
28
+ self.calls.append(("chat", {"request": request}))
29
+ return self.responses.get(
30
+ "chat",
31
+ GeminiChatResponseSchema(
32
+ usage=GeminiUsageSchema(prompt_token_count=2, total_tokens_count=10),
33
+ candidates=[
34
+ GeminiCandidateSchema(
35
+ content=GeminiContentSchema(
36
+ role="model",
37
+ parts=[GeminiPartSchema(text="FAKE_GEMINI_RESPONSE")]
38
+ )
39
+ )
40
+ ],
41
+ ),
42
+ )
43
+
44
+
45
+ @pytest.fixture
46
+ def fake_gemini_http_client() -> FakeGeminiHTTPClient:
47
+ return FakeGeminiHTTPClient()
48
+
49
+
50
+ @pytest.fixture
51
+ def gemini_llm_client(
52
+ monkeypatch: pytest.MonkeyPatch,
53
+ fake_gemini_http_client: FakeGeminiHTTPClient
54
+ ) -> GeminiLLMClient:
55
+ monkeypatch.setattr(
56
+ "ai_review.services.llm.gemini.client.get_gemini_http_client",
57
+ lambda: fake_gemini_http_client,
58
+ )
59
+ return GeminiLLMClient()
8
60
 
9
61
 
10
62
  @pytest.fixture
@@ -18,8 +18,8 @@ from ai_review.clients.github.pr.schema.pull_request import (
18
18
  from ai_review.clients.github.pr.schema.reviews import GitHubGetPRReviewsResponseSchema, GitHubPRReviewSchema
19
19
  from ai_review.clients.github.pr.types import GitHubPullRequestsHTTPClientProtocol
20
20
  from ai_review.config import settings
21
- from ai_review.libs.config.github import GitHubPipelineConfig, GitHubHTTPClientConfig
22
- from ai_review.libs.config.vcs import GitHubVCSConfig
21
+ from ai_review.libs.config.vcs.base import GitHubVCSConfig
22
+ from ai_review.libs.config.vcs.github import GitHubPipelineConfig, GitHubHTTPClientConfig
23
23
  from ai_review.libs.constants.vcs_provider import VCSProvider
24
24
  from ai_review.services.vcs.github.client import GitHubVCSClient
25
25
 
@@ -20,8 +20,8 @@ from ai_review.clients.gitlab.mr.schema.notes import (
20
20
  )
21
21
  from ai_review.clients.gitlab.mr.types import GitLabMergeRequestsHTTPClientProtocol
22
22
  from ai_review.config import settings
23
- from ai_review.libs.config.gitlab import GitLabPipelineConfig, GitLabHTTPClientConfig
24
- from ai_review.libs.config.vcs import GitLabVCSConfig
23
+ from ai_review.libs.config.vcs.base import GitLabVCSConfig
24
+ from ai_review.libs.config.vcs.gitlab import GitLabPipelineConfig, GitLabHTTPClientConfig
25
25
  from ai_review.libs.constants.vcs_provider import VCSProvider
26
26
  from ai_review.services.vcs.gitlab.client import GitLabVCSClient
27
27
 
@@ -0,0 +1,65 @@
1
+ from typing import Any
2
+
3
+ import pytest
4
+ from pydantic import HttpUrl
5
+
6
+ from ai_review.clients.ollama.schema import (
7
+ OllamaUsageSchema,
8
+ OllamaMessageSchema,
9
+ OllamaChatRequestSchema,
10
+ OllamaChatResponseSchema,
11
+ )
12
+ from ai_review.clients.ollama.types import OllamaHTTPClientProtocol
13
+ from ai_review.config import settings
14
+ from ai_review.libs.config.llm.base import OllamaLLMConfig
15
+ from ai_review.libs.config.llm.ollama import OllamaMetaConfig, OllamaHTTPClientConfig
16
+ from ai_review.libs.constants.llm_provider import LLMProvider
17
+ from ai_review.services.llm.ollama.client import OllamaLLMClient
18
+
19
+
20
+ class FakeOllamaHTTPClient(OllamaHTTPClientProtocol):
21
+ def __init__(self, responses: dict[str, Any] | None = None) -> None:
22
+ self.calls: list[tuple[str, dict]] = []
23
+ self.responses = responses or {}
24
+
25
+ async def chat(self, request: OllamaChatRequestSchema) -> OllamaChatResponseSchema:
26
+ self.calls.append(("chat", {"request": request}))
27
+ return self.responses.get(
28
+ "chat",
29
+ OllamaChatResponseSchema(
30
+ done=True,
31
+ model="llama2",
32
+ usage=OllamaUsageSchema(prompt_tokens=3, completion_tokens=5),
33
+ message=OllamaMessageSchema(role="assistant", content="FAKE_OLLAMA_RESPONSE"),
34
+ ),
35
+ )
36
+
37
+
38
+ @pytest.fixture
39
+ def fake_ollama_http_client():
40
+ return FakeOllamaHTTPClient()
41
+
42
+
43
+ @pytest.fixture
44
+ def ollama_llm_client(
45
+ monkeypatch: pytest.MonkeyPatch,
46
+ fake_ollama_http_client: FakeOllamaHTTPClient
47
+ ) -> OllamaLLMClient:
48
+ monkeypatch.setattr(
49
+ "ai_review.services.llm.ollama.client.get_ollama_http_client",
50
+ lambda: fake_ollama_http_client,
51
+ )
52
+ return OllamaLLMClient()
53
+
54
+
55
+ @pytest.fixture
56
+ def ollama_http_client_config(monkeypatch: pytest.MonkeyPatch):
57
+ fake_config = OllamaLLMConfig(
58
+ meta=OllamaMetaConfig(),
59
+ provider=LLMProvider.OLLAMA,
60
+ http_client=OllamaHTTPClientConfig(
61
+ timeout=10,
62
+ api_url=HttpUrl("http://localhost:11434")
63
+ )
64
+ )
65
+ monkeypatch.setattr(settings, "llm", fake_config)
@@ -1,10 +1,58 @@
1
+ from typing import Any
2
+
1
3
  import pytest
2
4
  from pydantic import HttpUrl, SecretStr
3
5
 
6
+ from ai_review.clients.openai.schema import (
7
+ OpenAIUsageSchema,
8
+ OpenAIChoiceSchema,
9
+ OpenAIMessageSchema,
10
+ OpenAIChatRequestSchema,
11
+ OpenAIChatResponseSchema,
12
+ )
13
+ from ai_review.clients.openai.types import OpenAIHTTPClientProtocol
4
14
  from ai_review.config import settings
5
- from ai_review.libs.config.llm import OpenAILLMConfig
6
- from ai_review.libs.config.openai import OpenAIMetaConfig, OpenAIHTTPClientConfig
15
+ from ai_review.libs.config.llm.base import OpenAILLMConfig
16
+ from ai_review.libs.config.llm.openai import OpenAIMetaConfig, OpenAIHTTPClientConfig
7
17
  from ai_review.libs.constants.llm_provider import LLMProvider
18
+ from ai_review.services.llm.openai.client import OpenAILLMClient
19
+
20
+
21
+ class FakeOpenAIHTTPClient(OpenAIHTTPClientProtocol):
22
+ def __init__(self, responses: dict[str, Any] | None = None) -> None:
23
+ self.calls: list[tuple[str, dict]] = []
24
+ self.responses = responses or {}
25
+
26
+ async def chat(self, request: OpenAIChatRequestSchema) -> OpenAIChatResponseSchema:
27
+ self.calls.append(("chat", {"request": request}))
28
+ return self.responses.get(
29
+ "chat",
30
+ OpenAIChatResponseSchema(
31
+ usage=OpenAIUsageSchema(total_tokens=12, prompt_tokens=5, completion_tokens=7),
32
+ choices=[
33
+ OpenAIChoiceSchema(
34
+ message=OpenAIMessageSchema(role="assistant", content="FAKE_OPENAI_RESPONSE")
35
+ )
36
+ ],
37
+ ),
38
+ )
39
+
40
+
41
+ @pytest.fixture
42
+ def fake_openai_http_client():
43
+ return FakeOpenAIHTTPClient()
44
+
45
+
46
+ @pytest.fixture
47
+ def openai_llm_client(
48
+ monkeypatch: pytest.MonkeyPatch,
49
+ fake_openai_http_client: FakeOpenAIHTTPClient
50
+ ) -> OpenAILLMClient:
51
+ monkeypatch.setattr(
52
+ "ai_review.services.llm.openai.client.get_openai_http_client",
53
+ lambda: fake_openai_http_client,
54
+ )
55
+ return OpenAILLMClient()
8
56
 
9
57
 
10
58
  @pytest.fixture
File without changes
@@ -0,0 +1,12 @@
1
+ import pytest
2
+ from httpx import AsyncClient
3
+
4
+ from ai_review.clients.ollama.client import get_ollama_http_client, OllamaHTTPClient
5
+
6
+
7
+ @pytest.mark.usefixtures('ollama_http_client_config')
8
+ def test_get_ollama_http_client_builds_ok():
9
+ ollama_http_client = get_ollama_http_client()
10
+
11
+ assert isinstance(ollama_http_client, OllamaHTTPClient)
12
+ assert isinstance(ollama_http_client.client, AsyncClient)
@@ -0,0 +1,65 @@
1
+ from ai_review.clients.ollama.schema import (
2
+ OllamaMessageSchema,
3
+ OllamaOptionsSchema,
4
+ OllamaChatRequestSchema,
5
+ OllamaUsageSchema,
6
+ OllamaChatResponseSchema,
7
+ )
8
+
9
+
10
+ # ---------- OllamaUsageSchema ----------
11
+
12
+ def test_usage_total_tokens_sum_ok():
13
+ usage = OllamaUsageSchema(prompt_tokens=5, completion_tokens=7)
14
+ assert usage.total_tokens == 12
15
+
16
+
17
+ def test_usage_total_tokens_none_if_missing():
18
+ usage = OllamaUsageSchema(prompt_tokens=3)
19
+ assert usage.total_tokens is None
20
+
21
+
22
+ # ---------- OllamaChatResponseSchema ----------
23
+
24
+ def test_first_text_returns_text():
25
+ resp = OllamaChatResponseSchema(
26
+ model="llama2",
27
+ message=OllamaMessageSchema(role="assistant", content=" hello ollama "),
28
+ usage=OllamaUsageSchema(prompt_tokens=2, completion_tokens=3),
29
+ )
30
+ assert resp.first_text == "hello ollama"
31
+
32
+
33
+ def test_first_text_empty_if_content_empty():
34
+ resp = OllamaChatResponseSchema(
35
+ model="llama2",
36
+ message=OllamaMessageSchema(role="assistant", content=" "),
37
+ usage=OllamaUsageSchema(prompt_tokens=1, completion_tokens=1),
38
+ )
39
+ assert resp.first_text == ""
40
+
41
+
42
+ # ---------- OllamaChatRequestSchema ----------
43
+
44
+ def test_chat_request_schema_builds_ok():
45
+ msg = OllamaMessageSchema(role="user", content="hi ollama")
46
+ opts = OllamaOptionsSchema(
47
+ stop=["stop1", "stop2"],
48
+ seed=123,
49
+ top_p=0.9,
50
+ temperature=0.7,
51
+ num_predict=256,
52
+ repeat_penalty=1.1,
53
+ )
54
+ req = OllamaChatRequestSchema(
55
+ model="llama2",
56
+ stream=False,
57
+ options=opts,
58
+ messages=[msg],
59
+ )
60
+
61
+ assert req.model == "llama2"
62
+ assert req.options.temperature == 0.7
63
+ assert req.options.num_predict == 256
64
+ assert req.options.stop == ["stop1", "stop2"]
65
+ assert req.messages[0].content == "hi ollama"
@@ -1,6 +1,6 @@
1
1
  import pytest
2
2
 
3
- from ai_review.libs.config.llm import LLMPricingConfig, LLMConfigBase
3
+ from ai_review.libs.config.llm.base import LLMPricingConfig, LLMConfigBase
4
4
  from ai_review.services.cost.schema import CostReportSchema
5
5
  from ai_review.services.cost.service import CostService
6
6
  from ai_review.services.llm.types import ChatResultSchema
File without changes
@@ -0,0 +1,22 @@
1
+ import pytest
2
+
3
+ from ai_review.services.llm.claude.client import ClaudeLLMClient
4
+ from ai_review.services.llm.types import ChatResultSchema
5
+ from ai_review.tests.fixtures.clients.claude import FakeClaudeHTTPClient
6
+
7
+
8
+ @pytest.mark.asyncio
9
+ @pytest.mark.usefixtures("claude_http_client_config")
10
+ async def test_claude_llm_chat(
11
+ claude_llm_client: ClaudeLLMClient,
12
+ fake_claude_http_client: FakeClaudeHTTPClient
13
+ ):
14
+ result = await claude_llm_client.chat("prompt", "prompt_system")
15
+
16
+ assert isinstance(result, ChatResultSchema)
17
+ assert result.text == "FAKE_CLAUDE_RESPONSE"
18
+ assert result.total_tokens == 12
19
+ assert result.prompt_tokens == 5
20
+ assert result.completion_tokens == 7
21
+
22
+ assert fake_claude_http_client.calls[0][0] == "chat"
File without changes
@@ -0,0 +1,22 @@
1
+ import pytest
2
+
3
+ from ai_review.services.llm.gemini.client import GeminiLLMClient
4
+ from ai_review.services.llm.types import ChatResultSchema
5
+ from ai_review.tests.fixtures.clients.gemini import FakeGeminiHTTPClient
6
+
7
+
8
+ @pytest.mark.asyncio
9
+ @pytest.mark.usefixtures("gemini_http_client_config")
10
+ async def test_gemini_llm_chat(
11
+ gemini_llm_client: GeminiLLMClient,
12
+ fake_gemini_http_client: FakeGeminiHTTPClient
13
+ ):
14
+ result = await gemini_llm_client.chat("prompt", "prompt_system")
15
+
16
+ assert isinstance(result, ChatResultSchema)
17
+ assert result.text == "FAKE_GEMINI_RESPONSE"
18
+ assert result.total_tokens == 10
19
+ assert result.prompt_tokens == 2
20
+ assert result.completion_tokens is None
21
+
22
+ assert fake_gemini_http_client.calls[0][0] == "chat"
File without changes
@@ -0,0 +1,22 @@
1
+ import pytest
2
+
3
+ from ai_review.services.llm.ollama.client import OllamaLLMClient
4
+ from ai_review.services.llm.types import ChatResultSchema
5
+ from ai_review.tests.fixtures.clients.ollama import FakeOllamaHTTPClient
6
+
7
+
8
+ @pytest.mark.asyncio
9
+ @pytest.mark.usefixtures("ollama_http_client_config")
10
+ async def test_ollama_llm_chat(
11
+ ollama_llm_client: OllamaLLMClient,
12
+ fake_ollama_http_client: FakeOllamaHTTPClient
13
+ ):
14
+ result = await ollama_llm_client.chat("prompt", "prompt_system")
15
+
16
+ assert isinstance(result, ChatResultSchema)
17
+ assert result.text == "FAKE_OLLAMA_RESPONSE"
18
+ assert result.total_tokens == 8
19
+ assert result.prompt_tokens == 3
20
+ assert result.completion_tokens == 5
21
+
22
+ assert fake_ollama_http_client.calls[0][0] == "chat"
File without changes
@@ -0,0 +1,22 @@
1
+ import pytest
2
+
3
+ from ai_review.services.llm.openai.client import OpenAILLMClient
4
+ from ai_review.services.llm.types import ChatResultSchema
5
+ from ai_review.tests.fixtures.clients.openai import FakeOpenAIHTTPClient
6
+
7
+
8
+ @pytest.mark.asyncio
9
+ @pytest.mark.usefixtures("openai_http_client_config")
10
+ async def test_openai_llm_chat(
11
+ openai_llm_client: OpenAILLMClient,
12
+ fake_openai_http_client: FakeOpenAIHTTPClient
13
+ ):
14
+ result = await openai_llm_client.chat("prompt", "prompt_system")
15
+
16
+ assert isinstance(result, ChatResultSchema)
17
+ assert result.text == "FAKE_OPENAI_RESPONSE"
18
+ assert result.total_tokens == 12
19
+ assert result.prompt_tokens == 5
20
+ assert result.completion_tokens == 7
21
+
22
+ assert fake_openai_http_client.calls[0][0] == "chat"
@@ -3,6 +3,7 @@ import pytest
3
3
  from ai_review.services.llm.claude.client import ClaudeLLMClient
4
4
  from ai_review.services.llm.factory import get_llm_client
5
5
  from ai_review.services.llm.gemini.client import GeminiLLMClient
6
+ from ai_review.services.llm.ollama.client import OllamaLLMClient
6
7
  from ai_review.services.llm.openai.client import OpenAILLMClient
7
8
 
8
9
 
@@ -24,7 +25,13 @@ def test_get_llm_client_returns_claude(monkeypatch: pytest.MonkeyPatch):
24
25
  assert isinstance(client, ClaudeLLMClient)
25
26
 
26
27
 
28
+ @pytest.mark.usefixtures("ollama_http_client_config")
29
+ def test_get_llm_client_returns_ollama(monkeypatch: pytest.MonkeyPatch):
30
+ client = get_llm_client()
31
+ assert isinstance(client, OllamaLLMClient)
32
+
33
+
27
34
  def test_get_llm_client_unsupported_provider(monkeypatch: pytest.MonkeyPatch):
28
- monkeypatch.setattr("ai_review.services.llm.factory.settings.llm.provider", "DEEPSEEK")
35
+ monkeypatch.setattr("ai_review.services.llm.factory.settings.llm.provider", "UNSUPPORTED")
29
36
  with pytest.raises(ValueError):
30
37
  get_llm_client()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xai-review
3
- Version: 0.23.0
3
+ Version: 0.25.0
4
4
  Summary: AI-powered code review tool
5
5
  Author-email: Nikita Filonov <nikita.filonov@example.com>
6
6
  Maintainer-email: Nikita Filonov <nikita.filonov@example.com>
@@ -66,7 +66,7 @@ improve code quality, enforce consistency, and speed up the review process.
66
66
 
67
67
  ✨ Key features:
68
68
 
69
- - **Multiple LLM providers** — choose between **OpenAI**, **Claude**, and **Gemini**, or switch anytime.
69
+ - **Multiple LLM providers** — choose between **OpenAI**, **Claude**, **Gemini**, or **Ollama**, and switch anytime.
70
70
  - **VCS integration** — works out of the box with GitLab, GitHub (more providers coming).
71
71
  - **Customizable prompts** — adapt inline, context, and summary reviews to match your team’s coding guidelines.
72
72
  - **Flexible configuration** — supports `YAML`, `JSON`, and `ENV`, with seamless overrides in CI/CD pipelines.
@@ -168,7 +168,7 @@ for complete, ready-to-use examples.
168
168
 
169
169
  Key things you can customize:
170
170
 
171
- - **LLM provider** — OpenAI, Gemini, or Claude
171
+ - **LLM provider** — OpenAI, Gemini, Claude, or Ollama
172
172
  - **Model settings** — model name, temperature, max tokens
173
173
  - **VCS integration** — works out of the box with **GitLab** and **GitHub**.
174
174
  - **Review policy** — which files to include/exclude, review modes
@@ -209,7 +209,7 @@ jobs:
209
209
  runs-on: ubuntu-latest
210
210
  steps:
211
211
  - uses: actions/checkout@v4
212
- - uses: Nikita-Filonov/ai-review@v0.23.0
212
+ - uses: Nikita-Filonov/ai-review@v0.25.0
213
213
  with:
214
214
  review-command: ${{ inputs.review-command }}
215
215
  env:
@@ -288,6 +288,10 @@ provider** explicitly configured in your `.ai-review.yaml`.
288
288
  All data is sent **directly** from your CI/CD environment to the selected LLM API endpoint (e.g. OpenAI, Gemini,
289
289
  Claude). No intermediary servers or storage layers are involved.
290
290
 
291
+ If you use **Ollama**, requests are sent to your **local or self-hosted Ollama runtime**
292
+ (by default `http://localhost:11434`). This allows you to run reviews completely **offline**, keeping all data strictly
293
+ inside your infrastructure.
294
+
291
295
  > ⚠️ Please ensure you use proper API tokens and avoid exposing corporate or personal secrets.
292
296
  > If you accidentally leak private code or credentials due to incorrect configuration (e.g., using a personal key
293
297
  > instead of an enterprise one), it is **your responsibility** — the tool does not retain or share any data by itself.
@@ -1,5 +1,5 @@
1
1
  ai_review/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- ai_review/config.py,sha256=tPda0l_EX6MWHtKXE6HLFIkzYqLxA2yogSQDMeE5MAM,1954
2
+ ai_review/config.py,sha256=LmHYNW44hi9qUhGxUjwoZBNrfu49-TcZD7WyGpfbpPI,1964
3
3
  ai_review/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  ai_review/cli/main.py,sha256=rZ0LYSAt3AFT-wOHdop8lq2GOVAK48kJYp7nf801Mjs,1854
5
5
  ai_review/cli/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -9,11 +9,13 @@ ai_review/cli/commands/run_review.py,sha256=i39IYNDE_lAiQQnKLmxG71Ao8WAIOSn82L9E
9
9
  ai_review/cli/commands/run_summary_review.py,sha256=NqjepGH5cbqczPzcuMEAxO4dI58FEUZl0b6uRVQ9SiA,224
10
10
  ai_review/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  ai_review/clients/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- ai_review/clients/claude/client.py,sha256=MKN7W5RRvV_zRWZLCLx75ATj-gtkef-YE7-Yd7C3ke8,1796
12
+ ai_review/clients/claude/client.py,sha256=uEadbBNBJnzjHDczbxXiiw1V1H1PdUWKu-Gn-eIDEmw,1890
13
13
  ai_review/clients/claude/schema.py,sha256=LE6KCjJKDXqBGU2Cno5XL5R8vUfScgskE9MqvE0Pt2A,887
14
+ ai_review/clients/claude/types.py,sha256=y_-yF7zQrTvyiowS2b9xjIlAzkF8i6OfOjqo9eB8Xo4,267
14
15
  ai_review/clients/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
- ai_review/clients/gemini/client.py,sha256=a-QY_4lDgZgUcEZpaeNjrqthWhXQ7iqdE_8pQROxqR0,1795
16
+ ai_review/clients/gemini/client.py,sha256=4G1LBcpiFcrITOysQbMwhY1db4hHcSGgyI-0XazZMV0,1889
16
17
  ai_review/clients/gemini/schema.py,sha256=5oVvbI-h_sw8bFreS4JUmMj-aXa_frvxK3H8sg4iJIA,2264
18
+ ai_review/clients/gemini/types.py,sha256=D-P0THorrQ8yq5P-NKAC65zzhEYRa9HkiXTORG9QoIk,267
17
19
  ai_review/clients/github/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
20
  ai_review/clients/github/client.py,sha256=pprQcCYdrhRYtuqRsTFiCbj54Qb1Ll6_jmlm7AJg8pk,1149
19
21
  ai_review/clients/github/pr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -33,9 +35,14 @@ ai_review/clients/gitlab/mr/schema/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQe
33
35
  ai_review/clients/gitlab/mr/schema/changes.py,sha256=ZqSPb8zO0z_V8cEjxoTqnwbjRLxo6OTV4LeQEAg91cU,835
34
36
  ai_review/clients/gitlab/mr/schema/discussions.py,sha256=JgvxKfHoYxmp86aP4MpIczK-arU0hc-BZLASWDWBIRs,790
35
37
  ai_review/clients/gitlab/mr/schema/notes.py,sha256=yfnnRt69fALKfapzZpVtvCvNwPkq5jBFI7fbPMq1w1c,424
38
+ ai_review/clients/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
+ ai_review/clients/ollama/client.py,sha256=KoJ9J5_Vfpv5XNJREshE_gA46uo9J0Z3qVC7wJPEcX8,1720
40
+ ai_review/clients/ollama/schema.py,sha256=A6oKwkkEVrduyzMR_lhLnaLyvKXqlfsXjkMIF2eXaYw,1310
41
+ ai_review/clients/ollama/types.py,sha256=9ES8K-EClKYU7UsaMKgXvZ3sUOF9o6reEvfL6wFOJ4M,267
36
42
  ai_review/clients/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
- ai_review/clients/openai/client.py,sha256=IWIl5um4chLiz45Hw1hd_KAz7jkXpJp0l2qf0ztMUV0,1722
43
+ ai_review/clients/openai/client.py,sha256=jY1XG_5GtNboNjkXu3UtuXFx5V9rD6UskK7VT0lOzP8,1816
38
44
  ai_review/clients/openai/schema.py,sha256=glxwMtBrDA6W0BQgH-ruKe0bKH3Ps1P-Y1-2jGdqaUM,764
45
+ ai_review/clients/openai/types.py,sha256=4VRY45ihKjii8w0d5XLnUGnHuBSh9wRsOP6lmkseC0Q,267
39
46
  ai_review/libs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
40
47
  ai_review/libs/json.py,sha256=g-P5_pNUomQ-bGHCXASvPKj9Og0s9MaLFVEAkzqGp1A,350
41
48
  ai_review/libs/logger.py,sha256=LbXR2Zk1btJ-83I-vHee7cUETgT1mHToSsqEI_8uM0U,370
@@ -45,20 +52,24 @@ ai_review/libs/asynchronous/gather.py,sha256=wH65sqBfrnwA1A9Juc5MSyLCJrcxzRqk2m0
45
52
  ai_review/libs/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
53
  ai_review/libs/config/artifacts.py,sha256=8BzbQu5GxwV6i6qzrUKM1De1Ogb00Ph5WTqwZ3fVpGg,483
47
54
  ai_review/libs/config/base.py,sha256=sPf3OKeF1ID0ouOwiVaUtvpWuZXJXQvIw5kbnPUyN9o,686
48
- ai_review/libs/config/claude.py,sha256=E9AJmszfY4TH8PkJjnDDDJYNAU9bLGsUThM3kriVA58,302
49
55
  ai_review/libs/config/core.py,sha256=ZQ2QtYr7vAF0tXbVLvVwk9QFE5h6JjAKAUQWcb9gHws,87
50
- ai_review/libs/config/gemini.py,sha256=sXHud43LWb4xTvhdkGQeHSLC7qvWl5LfU41fgcIVE5E,274
51
- ai_review/libs/config/github.py,sha256=1yFfvkTOt5ernIrxjqmiUKDpbEEHpa6lTpDiFQ5gVn4,238
52
- ai_review/libs/config/gitlab.py,sha256=VFvoVtni86tWky6Y34XCYdNrBuAtbgFFYGK3idPSOS4,234
53
- ai_review/libs/config/http.py,sha256=QsIj0yH1IYELOFBQ5AoqPZT0kGIIrQ19cxk1ozPRhLE,345
54
- ai_review/libs/config/llm.py,sha256=cK-e4NCQxnnixLATCsO8-r5k3zUWz1N0BdPCoqerORM,1824
56
+ ai_review/libs/config/http.py,sha256=dx5PwgnGbPocUwf9QRhFmXmjfFDoeerOM04yB3B6S8w,398
55
57
  ai_review/libs/config/logger.py,sha256=oPmjpjf6EZwW7CgOjT8mOQdGnT98CLwXepiGB_ajZvU,384
56
- ai_review/libs/config/openai.py,sha256=vOYqhUq0ceEuNdQrQaHq44lVS5M648mB61Zc4YlfJVw,271
57
58
  ai_review/libs/config/prompt.py,sha256=8aO5WNnhVhQcpWzWxqzb9lq6PzormaJazVwPHuf_ia8,4469
58
59
  ai_review/libs/config/review.py,sha256=LEZni68iH_0m4URPfN0d3F6yrrK7KSn-BwXf-7w2al8,1058
59
- ai_review/libs/config/vcs.py,sha256=ULuLicuulFgG-_sTuDWsldyVWIT2SkiS8brPUU1svgk,778
60
+ ai_review/libs/config/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
61
+ ai_review/libs/config/llm/base.py,sha256=ovvULFhfwH66_705D1O87ZGMeaQOZO7ZQhRUzzfzguU,2089
62
+ ai_review/libs/config/llm/claude.py,sha256=MoalXkBA6pEp01znS8ohTRopfea9RUcqhZX5lOIuek8,293
63
+ ai_review/libs/config/llm/gemini.py,sha256=SKtlzsRuNWOlM9m3SFvcqOIjnml8lpPidp7FiGmIEz4,265
64
+ ai_review/libs/config/llm/meta.py,sha256=cEcAHOwy-mQBKo9_KJrQe0I7qppq6h99lSmoWX4ElJI,195
65
+ ai_review/libs/config/llm/ollama.py,sha256=M6aiPb5GvYvkiGcgHTsh9bOw5JsBLqmfSKoIbHCejrU,372
66
+ ai_review/libs/config/llm/openai.py,sha256=jGVL4gJ2wIacoKeK9Zc9LCgY95TxdeYOThdglVPErFU,262
67
+ ai_review/libs/config/vcs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
68
+ ai_review/libs/config/vcs/base.py,sha256=zkfqBnI9kF_wbU9Nan19CKciKwOpWvdcZEt57jHujbE,786
69
+ ai_review/libs/config/vcs/github.py,sha256=hk-kuDLd8wecqtEb8PSqF7Yy_pkihplJhi6nB6FZID4,256
70
+ ai_review/libs/config/vcs/gitlab.py,sha256=ecYfU158VgVlM6P5mgZn8FOqk3Xt60xx7gUqT5e22a4,252
60
71
  ai_review/libs/constants/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
61
- ai_review/libs/constants/llm_provider.py,sha256=sKnDLylCIIosYjq0-0r91LMiYJ4DlHVH2jeRDv_DlsQ,121
72
+ ai_review/libs/constants/llm_provider.py,sha256=k7GzctIZ-TDsRlhTPbpGYgym_CO2YKVFp_oXG9dTBW0,143
62
73
  ai_review/libs/constants/vcs_provider.py,sha256=mZMC8DWIDWQ1YeUZh1a1jduX5enOAe1rWeza0RBmpTY,99
63
74
  ai_review/libs/diff/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
64
75
  ai_review/libs/diff/models.py,sha256=RT4YJboOPA-AjNJGRj_HIZaJLEmROOhOgMh1wIGpIwY,2344
@@ -71,7 +82,7 @@ ai_review/libs/http/event_hooks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm
71
82
  ai_review/libs/http/event_hooks/base.py,sha256=cnSOOButTJYKeyb_OnGms1vXRfwfExP81L3ZfYWLufk,279
72
83
  ai_review/libs/http/event_hooks/logger.py,sha256=8_omfl6q3JijaBBIgzvzb4SayjNEDW-oxyck_Ky8wnI,603
73
84
  ai_review/libs/http/transports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
74
- ai_review/libs/http/transports/retry.py,sha256=OIhTplK22RigjF5A8M9FdMm01BLyWpvQnuK2__NC678,1768
85
+ ai_review/libs/http/transports/retry.py,sha256=S66-SKvJvIgqBknDgEnqciTB0g2-ZQ7JubQtkzv8P44,1772
75
86
  ai_review/libs/template/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
76
87
  ai_review/libs/template/render.py,sha256=PwLG46fXg8P3gZvmJB93P51G2IBdsEK2I8oDlLGmA-4,414
77
88
  ai_review/prompts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -107,12 +118,14 @@ ai_review/services/hook/constants.py,sha256=uQJld5tJVUFk506h5RswTqLy-sIYxQfuQcUw
107
118
  ai_review/services/hook/service.py,sha256=InPoWBas6SPoy0KUyKJFg5xVk90jBlWdWtUTaX71G88,6364
108
119
  ai_review/services/hook/types.py,sha256=zwOmnZVGlg53vUoC2rHNhpEiNsTpf0Tnb-s3SRPKFys,1405
109
120
  ai_review/services/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
110
- ai_review/services/llm/factory.py,sha256=EHCRA5asqyE86Utn-EBYe_HbWRzck0S0UJG1gm5f2uo,741
121
+ ai_review/services/llm/factory.py,sha256=AszDqufYPaZdVVR99UZBEFvnGdOildBFQ9uVOiBI1Tc,876
111
122
  ai_review/services/llm/types.py,sha256=OvbJWYRDThBgLhn9TWU0mliuanOW01CS3e8ermtuS-s,353
112
123
  ai_review/services/llm/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
113
124
  ai_review/services/llm/claude/client.py,sha256=JJD0FWiXjCCpO7NW3vVoBMXhTQ9VBA4Q93QqkeQqON0,1082
114
125
  ai_review/services/llm/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
115
126
  ai_review/services/llm/gemini/client.py,sha256=TR4HshVxtDV8_luQKCM3aFNH9tjAjpzNeFBg-oxdsfA,1282
127
+ ai_review/services/llm/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
128
+ ai_review/services/llm/ollama/client.py,sha256=817nOQRsnaVqoY6LdO95l5JkRHkGvvS8TX7hezT2gqk,1479
116
129
  ai_review/services/llm/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
117
130
  ai_review/services/llm/openai/client.py,sha256=c3DWwLnwTheERdSGnMiQIbg5SaICouUAGClcQZSh1fE,1159
118
131
  ai_review/services/prompt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -146,11 +159,12 @@ ai_review/services/vcs/gitlab/client.py,sha256=LK95m-uFSxhDEVU-cBGct61NTKjul-ieL
146
159
  ai_review/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
147
160
  ai_review/tests/fixtures/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
148
161
  ai_review/tests/fixtures/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
149
- ai_review/tests/fixtures/clients/claude.py,sha256=CpB65Twi2Q4kIOclflnyT--wGNrLPCEEgt9P_qyhu10,759
150
- ai_review/tests/fixtures/clients/gemini.py,sha256=UXlLEqyXlei3ojrVCYRX0ic3Lq7iBNrG6BFZ8Ec_K7c,737
151
- ai_review/tests/fixtures/clients/github.py,sha256=vZu_Tf8RlvYKDyMUnlP9Uc8fL95HRs1VTv7zWxIH_eQ,6856
152
- ai_review/tests/fixtures/clients/gitlab.py,sha256=qyzp6lYZ9EQz-ECujFa5WK_xSbE-hmy35cfnW11mS6g,5531
153
- ai_review/tests/fixtures/clients/openai.py,sha256=O30n-tCfpeCxeUQgrQPK5ll__buhbeShobK7LASFMls,721
162
+ ai_review/tests/fixtures/clients/claude.py,sha256=6ldJlSSea0zsZV0hRDMi9mqWm0hWT3mp_ROwG_sVU1c,2203
163
+ ai_review/tests/fixtures/clients/gemini.py,sha256=zhLJhm49keKEBCPOf_pLu8_zCatsKKAWM4-gXOhaXeM,2429
164
+ ai_review/tests/fixtures/clients/github.py,sha256=Mzr8LcvVlYLhimzDMG4tEOQwj_6E6kTvYvSrq04R3YI,6865
165
+ ai_review/tests/fixtures/clients/gitlab.py,sha256=_0JSN-ixA7nDOwY18BlL_L9fh_qmT1_6sxGx_CIRhmM,5540
166
+ ai_review/tests/fixtures/clients/ollama.py,sha256=UUHDDPUraQAG8gBC-0UvftaK0BDYir5cJDlRKJymSQg,2109
167
+ ai_review/tests/fixtures/clients/openai.py,sha256=UgfuRZWzl3X7ZVHMLKP4mZxNXVpcccitkc9tuUyffXE,2267
154
168
  ai_review/tests/fixtures/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
155
169
  ai_review/tests/fixtures/services/artifacts.py,sha256=V5FvUnC9OAo0n-paxxJP5OxAgLz1Zz3OZ8zZvqu_01w,1462
156
170
  ai_review/tests/fixtures/services/cost.py,sha256=A6Ja0CtQ-k6pR2-B5LRE8EzkqPL34xHGXYtaILjhYvw,1612
@@ -174,6 +188,9 @@ ai_review/tests/suites/clients/github/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCe
174
188
  ai_review/tests/suites/clients/github/test_client.py,sha256=BiuLKCHIk83U1szYEZkB-n3vvyPgj6tAI5EqxKiT-CY,558
175
189
  ai_review/tests/suites/clients/gitlab/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
176
190
  ai_review/tests/suites/clients/gitlab/test_client.py,sha256=5QOkNvgm0XRKHh79FNIY9CTonAqYPXqCCxcxeiAHYCA,560
191
+ ai_review/tests/suites/clients/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
192
+ ai_review/tests/suites/clients/ollama/test_client.py,sha256=XZ8NAd1bS_ltTuYZPgqlutPRA6kbvH3_3SKTCbNBTgA,404
193
+ ai_review/tests/suites/clients/ollama/test_schema.py,sha256=A93wCmxwGdvudfbA97VCPYP3gT6u6EYMetAg5fgURRA,1836
177
194
  ai_review/tests/suites/clients/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
178
195
  ai_review/tests/suites/clients/openai/test_client.py,sha256=6Wsxw6-6Uk0uPYFkzpWSwsxfCYUZhT3UYznayo-xlPI,404
179
196
  ai_review/tests/suites/clients/openai/test_schema.py,sha256=x1tamS4GC9pOTpjieKDbK2D73CVV4BkATppytwMevLo,1599
@@ -192,7 +209,7 @@ ai_review/tests/suites/libs/template/test_render.py,sha256=n-ss5bd_hwc-RzYmqWmFM
192
209
  ai_review/tests/suites/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
193
210
  ai_review/tests/suites/services/cost/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
194
211
  ai_review/tests/suites/services/cost/test_schema.py,sha256=AI3Wg1sR6nzLpkEqJGDu6nDYwiwzbbghsxhRNwRsUFA,3044
195
- ai_review/tests/suites/services/cost/test_service.py,sha256=fMW4Tg6BRMXKcqOO7MmSqJc1mpuguvFSl0GjS93m7u8,3253
212
+ ai_review/tests/suites/services/cost/test_service.py,sha256=9_Mi5hu2cq3w2tIEPfhrn9x8SblCT5m1W-QUOc9BZds,3258
196
213
  ai_review/tests/suites/services/diff/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
197
214
  ai_review/tests/suites/services/diff/test_renderers.py,sha256=IKOpsGedONNW8ZfYTAk0Vq0hfFi7L6TpWs8vVVQroj0,6273
198
215
  ai_review/tests/suites/services/diff/test_service.py,sha256=evFe1I-ulOYdkhQS9NC8Bol4jXuhEnb2iArTAzckWrA,3195
@@ -200,7 +217,15 @@ ai_review/tests/suites/services/diff/test_tools.py,sha256=vsOSSIDZKkuD8dMCoBBEBt
200
217
  ai_review/tests/suites/services/hook/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
201
218
  ai_review/tests/suites/services/hook/test_service.py,sha256=GM_AiNVGP2Pgp-3BwGOAIfA8lLXl6ah28ey77KZz_C4,2750
202
219
  ai_review/tests/suites/services/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
203
- ai_review/tests/suites/services/llm/test_factory.py,sha256=_i_UFtG_WGT3jpBDm20Hb0rFTFrfPuiFJhhSrlvUlVQ,1120
220
+ ai_review/tests/suites/services/llm/test_factory.py,sha256=Lp37aXM08fHaLzgwRBw5xZEDZkPDNhJ4qwjtEwGENv8,1394
221
+ ai_review/tests/suites/services/llm/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
222
+ ai_review/tests/suites/services/llm/claude/test_client.py,sha256=ymIeuIax0Bp_CuXBSApK1RDl1JmbGc97uzXZToQOZO8,761
223
+ ai_review/tests/suites/services/llm/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
224
+ ai_review/tests/suites/services/llm/gemini/test_client.py,sha256=RjYViMZTgTdbzmDpOvwjuwYVkQV3IyNRhxZ8Y_cfJiQ,764
225
+ ai_review/tests/suites/services/llm/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
226
+ ai_review/tests/suites/services/llm/ollama/test_client.py,sha256=Eu4OERB00SJwCKznyOCyqSFTDBp9J2Lw-BcW7sPJQM4,760
227
+ ai_review/tests/suites/services/llm/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
228
+ ai_review/tests/suites/services/llm/openai/test_client.py,sha256=yzIL8GYHyX9iLKIlaF__87aue9w0cr66feoMaCv5gms,761
204
229
  ai_review/tests/suites/services/prompt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
205
230
  ai_review/tests/suites/services/prompt/test_adapter.py,sha256=9KZOFQmZUs3l_cW7Q5LIMPs4i4J-gOCQ6VrlDPR0ImU,2156
206
231
  ai_review/tests/suites/services/prompt/test_schema.py,sha256=rm2__LA2_4qQwSmNAZ_Wnpy11T3yYRkYUkRUrqxUQKE,5421
@@ -222,9 +247,9 @@ ai_review/tests/suites/services/vcs/github/__init__.py,sha256=47DEQpj8HBSa-_TImW
222
247
  ai_review/tests/suites/services/vcs/github/test_service.py,sha256=c2sjecm4qzqYXuO9j6j35NQyJzqDpnXIJImRTcpkyHo,4378
223
248
  ai_review/tests/suites/services/vcs/gitlab/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
224
249
  ai_review/tests/suites/services/vcs/gitlab/test_service.py,sha256=0dqgL5whzjcP-AQ4adP_12QfkYm_ZtdtMotmYm8Se7Y,4449
225
- xai_review-0.23.0.dist-info/licenses/LICENSE,sha256=p-v8m7Kmz4KKc7PcvsGiGEmCw9AiSXY4_ylOPy_u--Y,11343
226
- xai_review-0.23.0.dist-info/METADATA,sha256=6GjvVteV4O_MPSSgTB4dNhlbS7iNKlGO5Uei0Y6JAuQ,10872
227
- xai_review-0.23.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
228
- xai_review-0.23.0.dist-info/entry_points.txt,sha256=JyC5URanMi5io5P_PXQf7H_I1OGIpk5cZQhaPQ0g4Zs,53
229
- xai_review-0.23.0.dist-info/top_level.txt,sha256=sTsZbfzLoqvRZKdKa-BcxWvjlHdrpbeJ6DrGY0EuR0E,10
230
- xai_review-0.23.0.dist-info/RECORD,,
250
+ xai_review-0.25.0.dist-info/licenses/LICENSE,sha256=p-v8m7Kmz4KKc7PcvsGiGEmCw9AiSXY4_ylOPy_u--Y,11343
251
+ xai_review-0.25.0.dist-info/METADATA,sha256=-Ogp0iZijYiluzi9D_yO21Ur9N9Z4qJclmSeHrddU3A,11132
252
+ xai_review-0.25.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
253
+ xai_review-0.25.0.dist-info/entry_points.txt,sha256=JyC5URanMi5io5P_PXQf7H_I1OGIpk5cZQhaPQ0g4Zs,53
254
+ xai_review-0.25.0.dist-info/top_level.txt,sha256=sTsZbfzLoqvRZKdKa-BcxWvjlHdrpbeJ6DrGY0EuR0E,10
255
+ xai_review-0.25.0.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- from pydantic import BaseModel
2
-
3
- from ai_review.libs.config.http import HTTPClientConfig
4
-
5
-
6
- class ClaudeMetaConfig(BaseModel):
7
- model: str = "claude-3-sonnet"
8
- max_tokens: int = 1200
9
- temperature: float = 0.3
10
-
11
-
12
- class ClaudeHTTPClientConfig(HTTPClientConfig):
13
- api_version: str = "2023-06-01"
@@ -1,13 +0,0 @@
1
- from pydantic import BaseModel
2
-
3
- from ai_review.libs.config.http import HTTPClientConfig
4
-
5
-
6
- class GeminiMetaConfig(BaseModel):
7
- model: str = "gemini-2.0-pro"
8
- max_tokens: int = 1200
9
- temperature: float = 0.3
10
-
11
-
12
- class GeminiHTTPClientConfig(HTTPClientConfig):
13
- pass
@@ -1,13 +0,0 @@
1
- from pydantic import BaseModel
2
-
3
- from ai_review.libs.config.http import HTTPClientConfig
4
-
5
-
6
- class OpenAIMetaConfig(BaseModel):
7
- model: str = "gpt-4o-mini"
8
- max_tokens: int = 1200
9
- temperature: float = 0.3
10
-
11
-
12
- class OpenAIHTTPClientConfig(HTTPClientConfig):
13
- pass