xai-review 0.24.0__py3-none-any.whl → 0.26.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xai-review might be problematic. Click here for more details.

Files changed (64) hide show
  1. ai_review/clients/bitbucket/__init__.py +0 -0
  2. ai_review/clients/bitbucket/client.py +31 -0
  3. ai_review/clients/bitbucket/pr/__init__.py +0 -0
  4. ai_review/clients/bitbucket/pr/client.py +104 -0
  5. ai_review/clients/bitbucket/pr/schema/__init__.py +0 -0
  6. ai_review/clients/bitbucket/pr/schema/comments.py +44 -0
  7. ai_review/clients/bitbucket/pr/schema/files.py +25 -0
  8. ai_review/clients/bitbucket/pr/schema/pull_request.py +38 -0
  9. ai_review/clients/bitbucket/pr/types.py +44 -0
  10. ai_review/clients/claude/client.py +2 -1
  11. ai_review/clients/claude/types.py +8 -0
  12. ai_review/clients/gemini/client.py +2 -1
  13. ai_review/clients/gemini/types.py +8 -0
  14. ai_review/clients/ollama/__init__.py +0 -0
  15. ai_review/clients/ollama/client.py +41 -0
  16. ai_review/clients/ollama/schema.py +47 -0
  17. ai_review/clients/ollama/types.py +8 -0
  18. ai_review/clients/openai/client.py +2 -1
  19. ai_review/clients/openai/types.py +8 -0
  20. ai_review/libs/config/http.py +4 -1
  21. ai_review/libs/config/llm/base.py +8 -1
  22. ai_review/libs/config/llm/claude.py +4 -7
  23. ai_review/libs/config/llm/gemini.py +4 -7
  24. ai_review/libs/config/llm/meta.py +7 -0
  25. ai_review/libs/config/llm/ollama.py +14 -0
  26. ai_review/libs/config/llm/openai.py +4 -7
  27. ai_review/libs/config/vcs/base.py +11 -1
  28. ai_review/libs/config/vcs/bitbucket.py +13 -0
  29. ai_review/libs/config/vcs/github.py +2 -2
  30. ai_review/libs/config/vcs/gitlab.py +2 -2
  31. ai_review/libs/constants/llm_provider.py +1 -0
  32. ai_review/libs/constants/vcs_provider.py +1 -0
  33. ai_review/services/llm/factory.py +3 -0
  34. ai_review/services/llm/ollama/__init__.py +0 -0
  35. ai_review/services/llm/ollama/client.py +34 -0
  36. ai_review/services/vcs/bitbucket/__init__.py +0 -0
  37. ai_review/services/vcs/bitbucket/client.py +185 -0
  38. ai_review/services/vcs/factory.py +3 -0
  39. ai_review/tests/fixtures/clients/bitbucket.py +204 -0
  40. ai_review/tests/fixtures/clients/claude.py +45 -0
  41. ai_review/tests/fixtures/clients/gemini.py +52 -0
  42. ai_review/tests/fixtures/clients/ollama.py +65 -0
  43. ai_review/tests/fixtures/clients/openai.py +48 -0
  44. ai_review/tests/suites/clients/ollama/__init__.py +0 -0
  45. ai_review/tests/suites/clients/ollama/test_client.py +12 -0
  46. ai_review/tests/suites/clients/ollama/test_schema.py +65 -0
  47. ai_review/tests/suites/services/llm/claude/__init__.py +0 -0
  48. ai_review/tests/suites/services/llm/claude/test_client.py +22 -0
  49. ai_review/tests/suites/services/llm/gemini/__init__.py +0 -0
  50. ai_review/tests/suites/services/llm/gemini/test_client.py +22 -0
  51. ai_review/tests/suites/services/llm/ollama/__init__.py +0 -0
  52. ai_review/tests/suites/services/llm/ollama/test_client.py +22 -0
  53. ai_review/tests/suites/services/llm/openai/__init__.py +0 -0
  54. ai_review/tests/suites/services/llm/openai/test_client.py +22 -0
  55. ai_review/tests/suites/services/llm/test_factory.py +8 -1
  56. ai_review/tests/suites/services/vcs/bitbucket/__init__.py +0 -0
  57. ai_review/tests/suites/services/vcs/bitbucket/test_service.py +117 -0
  58. ai_review/tests/suites/services/vcs/test_factory.py +8 -1
  59. {xai_review-0.24.0.dist-info → xai_review-0.26.0.dist-info}/METADATA +10 -6
  60. {xai_review-0.24.0.dist-info → xai_review-0.26.0.dist-info}/RECORD +64 -26
  61. {xai_review-0.24.0.dist-info → xai_review-0.26.0.dist-info}/WHEEL +0 -0
  62. {xai_review-0.24.0.dist-info → xai_review-0.26.0.dist-info}/entry_points.txt +0 -0
  63. {xai_review-0.24.0.dist-info → xai_review-0.26.0.dist-info}/licenses/LICENSE +0 -0
  64. {xai_review-0.24.0.dist-info → xai_review-0.26.0.dist-info}/top_level.txt +0 -0
File without changes
@@ -0,0 +1,31 @@
1
+ from ai_review.clients.bitbucket.pr.client import BitbucketPullRequestsHTTPClient
2
+ from httpx import AsyncClient, AsyncHTTPTransport
3
+
4
+ from ai_review.config import settings
5
+ from ai_review.libs.http.event_hooks.logger import LoggerEventHook
6
+ from ai_review.libs.http.transports.retry import RetryTransport
7
+ from ai_review.libs.logger import get_logger
8
+
9
+
10
+ class BitbucketHTTPClient:
11
+ def __init__(self, client: AsyncClient):
12
+ self.pr = BitbucketPullRequestsHTTPClient(client)
13
+
14
+
15
+ def get_bitbucket_http_client() -> BitbucketHTTPClient:
16
+ logger = get_logger("BITBUCKET_HTTP_CLIENT")
17
+ logger_event_hook = LoggerEventHook(logger=logger)
18
+ retry_transport = RetryTransport(logger=logger, transport=AsyncHTTPTransport())
19
+
20
+ client = AsyncClient(
21
+ timeout=settings.llm.http_client.timeout,
22
+ headers={"Authorization": f"Bearer {settings.vcs.http_client.api_token_value}"},
23
+ base_url=settings.vcs.http_client.api_url_value,
24
+ transport=retry_transport,
25
+ event_hooks={
26
+ "request": [logger_event_hook.request],
27
+ "response": [logger_event_hook.response],
28
+ }
29
+ )
30
+
31
+ return BitbucketHTTPClient(client=client)
File without changes
@@ -0,0 +1,104 @@
1
+ from httpx import Response, QueryParams
2
+
3
+ from ai_review.clients.bitbucket.pr.schema.comments import (
4
+ BitbucketGetPRCommentsQuerySchema,
5
+ BitbucketGetPRCommentsResponseSchema,
6
+ BitbucketCreatePRCommentRequestSchema,
7
+ BitbucketCreatePRCommentResponseSchema,
8
+ )
9
+ from ai_review.clients.bitbucket.pr.schema.files import (
10
+ BitbucketGetPRFilesQuerySchema,
11
+ BitbucketGetPRFilesResponseSchema,
12
+ )
13
+ from ai_review.clients.bitbucket.pr.schema.pull_request import BitbucketGetPRResponseSchema
14
+ from ai_review.clients.bitbucket.pr.types import BitbucketPullRequestsHTTPClientProtocol
15
+ from ai_review.libs.http.client import HTTPClient
16
+ from ai_review.libs.http.handlers import handle_http_error, HTTPClientError
17
+
18
+
19
+ class BitbucketPullRequestsHTTPClientError(HTTPClientError):
20
+ pass
21
+
22
+
23
+ class BitbucketPullRequestsHTTPClient(HTTPClient, BitbucketPullRequestsHTTPClientProtocol):
24
+ @handle_http_error(client="BitbucketPullRequestsHTTPClient", exception=BitbucketPullRequestsHTTPClientError)
25
+ async def get_pull_request_api(self, workspace: str, repo_slug: str, pull_request_id: str) -> Response:
26
+ return await self.get(f"/repositories/{workspace}/{repo_slug}/pullrequests/{pull_request_id}")
27
+
28
+ @handle_http_error(client="BitbucketPullRequestsHTTPClient", exception=BitbucketPullRequestsHTTPClientError)
29
+ async def get_diffstat_api(
30
+ self,
31
+ workspace: str,
32
+ repo_slug: str,
33
+ pull_request_id: str,
34
+ query: BitbucketGetPRFilesQuerySchema,
35
+ ) -> Response:
36
+ return await self.get(
37
+ f"/repositories/{workspace}/{repo_slug}/pullrequests/{pull_request_id}/diffstat",
38
+ query=QueryParams(**query.model_dump()),
39
+ )
40
+
41
+ @handle_http_error(client="BitbucketPullRequestsHTTPClient", exception=BitbucketPullRequestsHTTPClientError)
42
+ async def get_comments_api(
43
+ self,
44
+ workspace: str,
45
+ repo_slug: str,
46
+ pull_request_id: str,
47
+ query: BitbucketGetPRCommentsQuerySchema,
48
+ ) -> Response:
49
+ return await self.get(
50
+ f"/repositories/{workspace}/{repo_slug}/pullrequests/{pull_request_id}/comments",
51
+ query=QueryParams(**query.model_dump()),
52
+ )
53
+
54
+ @handle_http_error(client="BitbucketPullRequestsHTTPClient", exception=BitbucketPullRequestsHTTPClientError)
55
+ async def create_comment_api(
56
+ self,
57
+ workspace: str,
58
+ repo_slug: str,
59
+ pull_request_id: str,
60
+ request: BitbucketCreatePRCommentRequestSchema,
61
+ ) -> Response:
62
+ return await self.post(
63
+ f"/repositories/{workspace}/{repo_slug}/pullrequests/{pull_request_id}/comments",
64
+ json=request.model_dump(by_alias=True),
65
+ )
66
+
67
+ async def get_pull_request(
68
+ self,
69
+ workspace: str,
70
+ repo_slug: str,
71
+ pull_request_id: str
72
+ ) -> BitbucketGetPRResponseSchema:
73
+ resp = await self.get_pull_request_api(workspace, repo_slug, pull_request_id)
74
+ return BitbucketGetPRResponseSchema.model_validate_json(resp.text)
75
+
76
+ async def get_files(
77
+ self,
78
+ workspace: str,
79
+ repo_slug: str,
80
+ pull_request_id: str
81
+ ) -> BitbucketGetPRFilesResponseSchema:
82
+ query = BitbucketGetPRFilesQuerySchema(pagelen=100)
83
+ resp = await self.get_diffstat_api(workspace, repo_slug, pull_request_id, query)
84
+ return BitbucketGetPRFilesResponseSchema.model_validate_json(resp.text)
85
+
86
+ async def get_comments(
87
+ self,
88
+ workspace: str,
89
+ repo_slug: str,
90
+ pull_request_id: str
91
+ ) -> BitbucketGetPRCommentsResponseSchema:
92
+ query = BitbucketGetPRCommentsQuerySchema(pagelen=100)
93
+ response = await self.get_comments_api(workspace, repo_slug, pull_request_id, query)
94
+ return BitbucketGetPRCommentsResponseSchema.model_validate_json(response.text)
95
+
96
+ async def create_comment(
97
+ self,
98
+ workspace: str,
99
+ repo_slug: str,
100
+ pull_request_id: str,
101
+ request: BitbucketCreatePRCommentRequestSchema
102
+ ) -> BitbucketCreatePRCommentResponseSchema:
103
+ response = await self.create_comment_api(workspace, repo_slug, pull_request_id, request)
104
+ return BitbucketCreatePRCommentResponseSchema.model_validate_json(response.text)
File without changes
@@ -0,0 +1,44 @@
1
+ from pydantic import BaseModel, Field, ConfigDict
2
+
3
+
4
+ class BitbucketCommentContentSchema(BaseModel):
5
+ raw: str
6
+ html: str | None = None
7
+ markup: str | None = None
8
+
9
+
10
+ class BitbucketCommentInlineSchema(BaseModel):
11
+ model_config = ConfigDict(populate_by_name=True)
12
+
13
+ path: str
14
+ to_line: int | None = Field(alias="to", default=None)
15
+ from_line: int | None = Field(alias="from", default=None)
16
+
17
+
18
+ class BitbucketPRCommentSchema(BaseModel):
19
+ id: int
20
+ inline: BitbucketCommentInlineSchema | None = None
21
+ content: BitbucketCommentContentSchema
22
+
23
+
24
+ class BitbucketGetPRCommentsQuerySchema(BaseModel):
25
+ pagelen: int = 100
26
+
27
+
28
+ class BitbucketGetPRCommentsResponseSchema(BaseModel):
29
+ size: int
30
+ page: int | None = None
31
+ next: str | None = None
32
+ values: list[BitbucketPRCommentSchema]
33
+ pagelen: int
34
+
35
+
36
+ class BitbucketCreatePRCommentRequestSchema(BaseModel):
37
+ inline: BitbucketCommentInlineSchema | None = None
38
+ content: BitbucketCommentContentSchema
39
+
40
+
41
+ class BitbucketCreatePRCommentResponseSchema(BaseModel):
42
+ id: int
43
+ inline: BitbucketCommentInlineSchema | None = None
44
+ content: BitbucketCommentContentSchema
@@ -0,0 +1,25 @@
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class BitbucketPRFilePathSchema(BaseModel):
5
+ path: str
6
+
7
+
8
+ class BitbucketPRFileSchema(BaseModel):
9
+ new: BitbucketPRFilePathSchema | None = None
10
+ old: BitbucketPRFilePathSchema | None = None
11
+ status: str
12
+ lines_added: int
13
+ lines_removed: int
14
+
15
+
16
+ class BitbucketGetPRFilesQuerySchema(BaseModel):
17
+ pagelen: int = 100
18
+
19
+
20
+ class BitbucketGetPRFilesResponseSchema(BaseModel):
21
+ size: int
22
+ page: int | None = None
23
+ next: str | None = None
24
+ values: list[BitbucketPRFileSchema]
25
+ pagelen: int
@@ -0,0 +1,38 @@
1
+ from pydantic import BaseModel, Field
2
+
3
+
4
+ class BitbucketUserSchema(BaseModel):
5
+ uuid: str
6
+ nickname: str
7
+ display_name: str
8
+
9
+
10
+ class BitbucketBranchSchema(BaseModel):
11
+ name: str
12
+
13
+
14
+ class BitbucketCommitSchema(BaseModel):
15
+ hash: str
16
+
17
+
18
+ class BitbucketRepositorySchema(BaseModel):
19
+ uuid: str
20
+ full_name: str
21
+
22
+
23
+ class BitbucketPRLocationSchema(BaseModel):
24
+ branch: BitbucketBranchSchema
25
+ commit: BitbucketCommitSchema
26
+ repository: BitbucketRepositorySchema
27
+
28
+
29
+ class BitbucketGetPRResponseSchema(BaseModel):
30
+ id: int
31
+ title: str
32
+ description: str | None = None
33
+ state: str
34
+ author: BitbucketUserSchema
35
+ source: BitbucketPRLocationSchema
36
+ destination: BitbucketPRLocationSchema
37
+ reviewers: list[BitbucketUserSchema] = Field(default_factory=list)
38
+ participants: list[BitbucketUserSchema] = Field(default_factory=list)
@@ -0,0 +1,44 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.clients.bitbucket.pr.schema.comments import (
4
+ BitbucketGetPRCommentsResponseSchema,
5
+ BitbucketCreatePRCommentRequestSchema,
6
+ BitbucketCreatePRCommentResponseSchema,
7
+ )
8
+ from ai_review.clients.bitbucket.pr.schema.files import BitbucketGetPRFilesResponseSchema
9
+ from ai_review.clients.bitbucket.pr.schema.pull_request import BitbucketGetPRResponseSchema
10
+
11
+
12
+ class BitbucketPullRequestsHTTPClientProtocol(Protocol):
13
+ async def get_pull_request(
14
+ self,
15
+ workspace: str,
16
+ repo_slug: str,
17
+ pull_request_id: str
18
+ ) -> BitbucketGetPRResponseSchema:
19
+ ...
20
+
21
+ async def get_files(
22
+ self,
23
+ workspace: str,
24
+ repo_slug: str,
25
+ pull_request_id: str
26
+ ) -> BitbucketGetPRFilesResponseSchema:
27
+ ...
28
+
29
+ async def get_comments(
30
+ self,
31
+ workspace: str,
32
+ repo_slug: str,
33
+ pull_request_id: str
34
+ ) -> BitbucketGetPRCommentsResponseSchema:
35
+ ...
36
+
37
+ async def create_comment(
38
+ self,
39
+ workspace: str,
40
+ repo_slug: str,
41
+ pull_request_id: str,
42
+ request: BitbucketCreatePRCommentRequestSchema,
43
+ ) -> BitbucketCreatePRCommentResponseSchema:
44
+ ...
@@ -1,6 +1,7 @@
1
1
  from httpx import AsyncClient, Response, AsyncHTTPTransport
2
2
 
3
3
  from ai_review.clients.claude.schema import ClaudeChatRequestSchema, ClaudeChatResponseSchema
4
+ from ai_review.clients.claude.types import ClaudeHTTPClientProtocol
4
5
  from ai_review.config import settings
5
6
  from ai_review.libs.http.client import HTTPClient
6
7
  from ai_review.libs.http.event_hooks.logger import LoggerEventHook
@@ -13,7 +14,7 @@ class ClaudeHTTPClientError(HTTPClientError):
13
14
  pass
14
15
 
15
16
 
16
- class ClaudeHTTPClient(HTTPClient):
17
+ class ClaudeHTTPClient(HTTPClient, ClaudeHTTPClientProtocol):
17
18
  @handle_http_error(client="ClaudeHTTPClient", exception=ClaudeHTTPClientError)
18
19
  async def chat_api(self, request: ClaudeChatRequestSchema) -> Response:
19
20
  return await self.post("/v1/messages", json=request.model_dump())
@@ -0,0 +1,8 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.clients.claude.schema import ClaudeChatRequestSchema, ClaudeChatResponseSchema
4
+
5
+
6
+ class ClaudeHTTPClientProtocol(Protocol):
7
+ async def chat(self, request: ClaudeChatRequestSchema) -> ClaudeChatResponseSchema:
8
+ ...
@@ -1,6 +1,7 @@
1
1
  from httpx import Response, AsyncHTTPTransport, AsyncClient
2
2
 
3
3
  from ai_review.clients.gemini.schema import GeminiChatRequestSchema, GeminiChatResponseSchema
4
+ from ai_review.clients.gemini.types import GeminiHTTPClientProtocol
4
5
  from ai_review.config import settings
5
6
  from ai_review.libs.http.client import HTTPClient
6
7
  from ai_review.libs.http.event_hooks.logger import LoggerEventHook
@@ -13,7 +14,7 @@ class GeminiHTTPClientError(HTTPClientError):
13
14
  pass
14
15
 
15
16
 
16
- class GeminiHTTPClient(HTTPClient):
17
+ class GeminiHTTPClient(HTTPClient, GeminiHTTPClientProtocol):
17
18
  @handle_http_error(client="GeminiHTTPClient", exception=GeminiHTTPClientError)
18
19
  async def chat_api(self, request: GeminiChatRequestSchema) -> Response:
19
20
  meta = settings.llm.meta
@@ -0,0 +1,8 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.clients.gemini.schema import GeminiChatRequestSchema, GeminiChatResponseSchema
4
+
5
+
6
+ class GeminiHTTPClientProtocol(Protocol):
7
+ async def chat(self, request: GeminiChatRequestSchema) -> GeminiChatResponseSchema:
8
+ ...
File without changes
@@ -0,0 +1,41 @@
1
+ from httpx import AsyncClient, Response, AsyncHTTPTransport
2
+
3
+ from ai_review.clients.ollama.schema import OllamaChatRequestSchema, OllamaChatResponseSchema
4
+ from ai_review.clients.ollama.types import OllamaHTTPClientProtocol
5
+ from ai_review.config import settings
6
+ from ai_review.libs.http.client import HTTPClient
7
+ from ai_review.libs.http.event_hooks.logger import LoggerEventHook
8
+ from ai_review.libs.http.handlers import HTTPClientError, handle_http_error
9
+ from ai_review.libs.http.transports.retry import RetryTransport
10
+ from ai_review.libs.logger import get_logger
11
+
12
+
13
+ class OllamaHTTPClientError(HTTPClientError):
14
+ pass
15
+
16
+
17
+ class OllamaHTTPClient(HTTPClient, OllamaHTTPClientProtocol):
18
+ @handle_http_error(client="OllamaHTTPClient", exception=OllamaHTTPClientError)
19
+ async def chat_api(self, request: OllamaChatRequestSchema) -> Response:
20
+ return await self.post("/api/chat", json=request.model_dump())
21
+
22
+ async def chat(self, request: OllamaChatRequestSchema) -> OllamaChatResponseSchema:
23
+ response = await self.chat_api(request)
24
+ return OllamaChatResponseSchema.model_validate_json(response.text)
25
+
26
+
27
+ def get_ollama_http_client() -> OllamaHTTPClient:
28
+ logger = get_logger("OLLAMA_HTTP_CLIENT")
29
+ logger_event_hook = LoggerEventHook(logger=logger)
30
+ retry_transport = RetryTransport(logger=logger, transport=AsyncHTTPTransport())
31
+
32
+ client = AsyncClient(
33
+ timeout=settings.llm.http_client.timeout,
34
+ base_url=settings.llm.http_client.api_url_value,
35
+ transport=retry_transport,
36
+ event_hooks={
37
+ "request": [logger_event_hook.request],
38
+ "response": [logger_event_hook.response],
39
+ },
40
+ )
41
+ return OllamaHTTPClient(client=client)
@@ -0,0 +1,47 @@
1
+ from typing import Literal
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class OllamaMessageSchema(BaseModel):
7
+ role: Literal["system", "user", "assistant"]
8
+ content: str
9
+
10
+
11
+ class OllamaOptionsSchema(BaseModel):
12
+ stop: list[str] | None = None
13
+ seed: int | None = None
14
+ top_p: float | None = Field(default=None, ge=0.0, le=1.0)
15
+ temperature: float | None = Field(default=None, ge=0.0, le=2.0)
16
+ num_predict: int | None = Field(default=None, ge=1)
17
+ repeat_penalty: float | None = Field(default=None, ge=0.0)
18
+
19
+
20
+ class OllamaChatRequestSchema(BaseModel):
21
+ model: str
22
+ stream: bool = False
23
+ options: OllamaOptionsSchema | None = None
24
+ messages: list[OllamaMessageSchema]
25
+
26
+
27
+ class OllamaUsageSchema(BaseModel):
28
+ prompt_tokens: int | None = None
29
+ completion_tokens: int | None = None
30
+
31
+ @property
32
+ def total_tokens(self) -> int | None:
33
+ if (self.prompt_tokens is not None) and (self.completion_tokens is not None):
34
+ return self.prompt_tokens + self.completion_tokens
35
+
36
+ return None
37
+
38
+
39
+ class OllamaChatResponseSchema(BaseModel):
40
+ done: bool = Field(default=True)
41
+ usage: OllamaUsageSchema | None = None
42
+ model: str
43
+ message: OllamaMessageSchema
44
+
45
+ @property
46
+ def first_text(self) -> str:
47
+ return (self.message.content or "").strip()
@@ -0,0 +1,8 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.clients.ollama.schema import OllamaChatRequestSchema, OllamaChatResponseSchema
4
+
5
+
6
+ class OllamaHTTPClientProtocol(Protocol):
7
+ async def chat(self, request: OllamaChatRequestSchema) -> OllamaChatResponseSchema:
8
+ ...
@@ -1,6 +1,7 @@
1
1
  from httpx import Response, AsyncHTTPTransport, AsyncClient
2
2
 
3
3
  from ai_review.clients.openai.schema import OpenAIChatRequestSchema, OpenAIChatResponseSchema
4
+ from ai_review.clients.openai.types import OpenAIHTTPClientProtocol
4
5
  from ai_review.config import settings
5
6
  from ai_review.libs.http.client import HTTPClient
6
7
  from ai_review.libs.http.event_hooks.logger import LoggerEventHook
@@ -13,7 +14,7 @@ class OpenAIHTTPClientError(HTTPClientError):
13
14
  pass
14
15
 
15
16
 
16
- class OpenAIHTTPClient(HTTPClient):
17
+ class OpenAIHTTPClient(HTTPClient, OpenAIHTTPClientProtocol):
17
18
  @handle_http_error(client='OpenAIHTTPClient', exception=OpenAIHTTPClientError)
18
19
  async def chat_api(self, request: OpenAIChatRequestSchema) -> Response:
19
20
  return await self.post("/chat/completions", json=request.model_dump())
@@ -0,0 +1,8 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.clients.openai.schema import OpenAIChatRequestSchema, OpenAIChatResponseSchema
4
+
5
+
6
+ class OpenAIHTTPClientProtocol(Protocol):
7
+ async def chat(self, request: OpenAIChatRequestSchema) -> OpenAIChatResponseSchema:
8
+ ...
@@ -4,12 +4,15 @@ from pydantic import BaseModel, HttpUrl, SecretStr
4
4
  class HTTPClientConfig(BaseModel):
5
5
  timeout: float = 120
6
6
  api_url: HttpUrl
7
- api_token: SecretStr
8
7
 
9
8
  @property
10
9
  def api_url_value(self) -> str:
11
10
  return str(self.api_url)
12
11
 
12
+
13
+ class HTTPClientWithTokenConfig(HTTPClientConfig):
14
+ api_token: SecretStr
15
+
13
16
  @property
14
17
  def api_token_value(self) -> str:
15
18
  return self.api_token.get_secret_value()
@@ -6,6 +6,7 @@ from pydantic import BaseModel, Field, FilePath
6
6
 
7
7
  from ai_review.libs.config.llm.claude import ClaudeHTTPClientConfig, ClaudeMetaConfig
8
8
  from ai_review.libs.config.llm.gemini import GeminiHTTPClientConfig, GeminiMetaConfig
9
+ from ai_review.libs.config.llm.ollama import OllamaHTTPClientConfig, OllamaMetaConfig
9
10
  from ai_review.libs.config.llm.openai import OpenAIHTTPClientConfig, OpenAIMetaConfig
10
11
  from ai_review.libs.constants.llm_provider import LLMProvider
11
12
  from ai_review.libs.resources import load_resource
@@ -55,7 +56,13 @@ class ClaudeLLMConfig(LLMConfigBase):
55
56
  http_client: ClaudeHTTPClientConfig
56
57
 
57
58
 
59
+ class OllamaLLMConfig(LLMConfigBase):
60
+ meta: OllamaMetaConfig
61
+ provider: Literal[LLMProvider.OLLAMA]
62
+ http_client: OllamaHTTPClientConfig
63
+
64
+
58
65
  LLMConfig = Annotated[
59
- OpenAILLMConfig | GeminiLLMConfig | ClaudeLLMConfig,
66
+ OpenAILLMConfig | GeminiLLMConfig | ClaudeLLMConfig | OllamaLLMConfig,
60
67
  Field(discriminator="provider")
61
68
  ]
@@ -1,13 +1,10 @@
1
- from pydantic import BaseModel
1
+ from ai_review.libs.config.http import HTTPClientWithTokenConfig
2
+ from ai_review.libs.config.llm.meta import LLMMetaConfig
2
3
 
3
- from ai_review.libs.config.http import HTTPClientConfig
4
4
 
5
-
6
- class ClaudeMetaConfig(BaseModel):
5
+ class ClaudeMetaConfig(LLMMetaConfig):
7
6
  model: str = "claude-3-sonnet"
8
- max_tokens: int = 1200
9
- temperature: float = 0.3
10
7
 
11
8
 
12
- class ClaudeHTTPClientConfig(HTTPClientConfig):
9
+ class ClaudeHTTPClientConfig(HTTPClientWithTokenConfig):
13
10
  api_version: str = "2023-06-01"
@@ -1,13 +1,10 @@
1
- from pydantic import BaseModel
1
+ from ai_review.libs.config.http import HTTPClientWithTokenConfig
2
+ from ai_review.libs.config.llm.meta import LLMMetaConfig
2
3
 
3
- from ai_review.libs.config.http import HTTPClientConfig
4
4
 
5
-
6
- class GeminiMetaConfig(BaseModel):
5
+ class GeminiMetaConfig(LLMMetaConfig):
7
6
  model: str = "gemini-2.0-pro"
8
- max_tokens: int = 1200
9
- temperature: float = 0.3
10
7
 
11
8
 
12
- class GeminiHTTPClientConfig(HTTPClientConfig):
9
+ class GeminiHTTPClientConfig(HTTPClientWithTokenConfig):
13
10
  pass
@@ -0,0 +1,7 @@
1
+ from pydantic import BaseModel, Field
2
+
3
+
4
+ class LLMMetaConfig(BaseModel):
5
+ model: str
6
+ max_tokens: int = Field(default=5000, ge=1)
7
+ temperature: float = Field(default=0.3, ge=0.0, le=2.0)
@@ -0,0 +1,14 @@
1
+ from ai_review.libs.config.http import HTTPClientConfig
2
+ from ai_review.libs.config.llm.meta import LLMMetaConfig
3
+
4
+
5
+ class OllamaMetaConfig(LLMMetaConfig):
6
+ stop: list[str] | None = None
7
+ seed: int | None = None
8
+ model: str = "llama2"
9
+ top_p: float | None = None
10
+ repeat_penalty: float | None = None
11
+
12
+
13
+ class OllamaHTTPClientConfig(HTTPClientConfig):
14
+ pass
@@ -1,13 +1,10 @@
1
- from pydantic import BaseModel
1
+ from ai_review.libs.config.http import HTTPClientWithTokenConfig
2
+ from ai_review.libs.config.llm.meta import LLMMetaConfig
2
3
 
3
- from ai_review.libs.config.http import HTTPClientConfig
4
4
 
5
-
6
- class OpenAIMetaConfig(BaseModel):
5
+ class OpenAIMetaConfig(LLMMetaConfig):
7
6
  model: str = "gpt-4o-mini"
8
- max_tokens: int = 1200
9
- temperature: float = 0.3
10
7
 
11
8
 
12
- class OpenAIHTTPClientConfig(HTTPClientConfig):
9
+ class OpenAIHTTPClientConfig(HTTPClientWithTokenConfig):
13
10
  pass
@@ -2,6 +2,7 @@ from typing import Annotated, Literal
2
2
 
3
3
  from pydantic import BaseModel, Field
4
4
 
5
+ from ai_review.libs.config.vcs.bitbucket import BitbucketPipelineConfig, BitbucketHTTPClientConfig
5
6
  from ai_review.libs.config.vcs.github import GitHubPipelineConfig, GitHubHTTPClientConfig
6
7
  from ai_review.libs.config.vcs.gitlab import GitLabPipelineConfig, GitLabHTTPClientConfig
7
8
  from ai_review.libs.constants.vcs_provider import VCSProvider
@@ -23,4 +24,13 @@ class GitHubVCSConfig(VCSConfigBase):
23
24
  http_client: GitHubHTTPClientConfig
24
25
 
25
26
 
26
- VCSConfig = Annotated[GitLabVCSConfig | GitHubVCSConfig, Field(discriminator="provider")]
27
+ class BitbucketVCSConfig(VCSConfigBase):
28
+ provider: Literal[VCSProvider.BITBUCKET]
29
+ pipeline: BitbucketPipelineConfig
30
+ http_client: BitbucketHTTPClientConfig
31
+
32
+
33
+ VCSConfig = Annotated[
34
+ GitLabVCSConfig | GitHubVCSConfig | BitbucketVCSConfig,
35
+ Field(discriminator="provider")
36
+ ]
@@ -0,0 +1,13 @@
1
+ from pydantic import BaseModel
2
+
3
+ from ai_review.libs.config.http import HTTPClientWithTokenConfig
4
+
5
+
6
+ class BitbucketPipelineConfig(BaseModel):
7
+ workspace: str
8
+ repo_slug: str
9
+ pull_request_id: str
10
+
11
+
12
+ class BitbucketHTTPClientConfig(HTTPClientWithTokenConfig):
13
+ pass
@@ -1,6 +1,6 @@
1
1
  from pydantic import BaseModel
2
2
 
3
- from ai_review.libs.config.http import HTTPClientConfig
3
+ from ai_review.libs.config.http import HTTPClientWithTokenConfig
4
4
 
5
5
 
6
6
  class GitHubPipelineConfig(BaseModel):
@@ -9,5 +9,5 @@ class GitHubPipelineConfig(BaseModel):
9
9
  pull_number: str
10
10
 
11
11
 
12
- class GitHubHTTPClientConfig(HTTPClientConfig):
12
+ class GitHubHTTPClientConfig(HTTPClientWithTokenConfig):
13
13
  pass
@@ -1,6 +1,6 @@
1
1
  from pydantic import BaseModel
2
2
 
3
- from ai_review.libs.config.http import HTTPClientConfig
3
+ from ai_review.libs.config.http import HTTPClientWithTokenConfig
4
4
 
5
5
 
6
6
  class GitLabPipelineConfig(BaseModel):
@@ -8,5 +8,5 @@ class GitLabPipelineConfig(BaseModel):
8
8
  merge_request_id: str
9
9
 
10
10
 
11
- class GitLabHTTPClientConfig(HTTPClientConfig):
11
+ class GitLabHTTPClientConfig(HTTPClientWithTokenConfig):
12
12
  pass