xai-review 0.24.0__py3-none-any.whl → 0.26.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xai-review might be problematic. Click here for more details.

Files changed (64) hide show
  1. ai_review/clients/bitbucket/__init__.py +0 -0
  2. ai_review/clients/bitbucket/client.py +31 -0
  3. ai_review/clients/bitbucket/pr/__init__.py +0 -0
  4. ai_review/clients/bitbucket/pr/client.py +104 -0
  5. ai_review/clients/bitbucket/pr/schema/__init__.py +0 -0
  6. ai_review/clients/bitbucket/pr/schema/comments.py +44 -0
  7. ai_review/clients/bitbucket/pr/schema/files.py +25 -0
  8. ai_review/clients/bitbucket/pr/schema/pull_request.py +38 -0
  9. ai_review/clients/bitbucket/pr/types.py +44 -0
  10. ai_review/clients/claude/client.py +2 -1
  11. ai_review/clients/claude/types.py +8 -0
  12. ai_review/clients/gemini/client.py +2 -1
  13. ai_review/clients/gemini/types.py +8 -0
  14. ai_review/clients/ollama/__init__.py +0 -0
  15. ai_review/clients/ollama/client.py +41 -0
  16. ai_review/clients/ollama/schema.py +47 -0
  17. ai_review/clients/ollama/types.py +8 -0
  18. ai_review/clients/openai/client.py +2 -1
  19. ai_review/clients/openai/types.py +8 -0
  20. ai_review/libs/config/http.py +4 -1
  21. ai_review/libs/config/llm/base.py +8 -1
  22. ai_review/libs/config/llm/claude.py +4 -7
  23. ai_review/libs/config/llm/gemini.py +4 -7
  24. ai_review/libs/config/llm/meta.py +7 -0
  25. ai_review/libs/config/llm/ollama.py +14 -0
  26. ai_review/libs/config/llm/openai.py +4 -7
  27. ai_review/libs/config/vcs/base.py +11 -1
  28. ai_review/libs/config/vcs/bitbucket.py +13 -0
  29. ai_review/libs/config/vcs/github.py +2 -2
  30. ai_review/libs/config/vcs/gitlab.py +2 -2
  31. ai_review/libs/constants/llm_provider.py +1 -0
  32. ai_review/libs/constants/vcs_provider.py +1 -0
  33. ai_review/services/llm/factory.py +3 -0
  34. ai_review/services/llm/ollama/__init__.py +0 -0
  35. ai_review/services/llm/ollama/client.py +34 -0
  36. ai_review/services/vcs/bitbucket/__init__.py +0 -0
  37. ai_review/services/vcs/bitbucket/client.py +185 -0
  38. ai_review/services/vcs/factory.py +3 -0
  39. ai_review/tests/fixtures/clients/bitbucket.py +204 -0
  40. ai_review/tests/fixtures/clients/claude.py +45 -0
  41. ai_review/tests/fixtures/clients/gemini.py +52 -0
  42. ai_review/tests/fixtures/clients/ollama.py +65 -0
  43. ai_review/tests/fixtures/clients/openai.py +48 -0
  44. ai_review/tests/suites/clients/ollama/__init__.py +0 -0
  45. ai_review/tests/suites/clients/ollama/test_client.py +12 -0
  46. ai_review/tests/suites/clients/ollama/test_schema.py +65 -0
  47. ai_review/tests/suites/services/llm/claude/__init__.py +0 -0
  48. ai_review/tests/suites/services/llm/claude/test_client.py +22 -0
  49. ai_review/tests/suites/services/llm/gemini/__init__.py +0 -0
  50. ai_review/tests/suites/services/llm/gemini/test_client.py +22 -0
  51. ai_review/tests/suites/services/llm/ollama/__init__.py +0 -0
  52. ai_review/tests/suites/services/llm/ollama/test_client.py +22 -0
  53. ai_review/tests/suites/services/llm/openai/__init__.py +0 -0
  54. ai_review/tests/suites/services/llm/openai/test_client.py +22 -0
  55. ai_review/tests/suites/services/llm/test_factory.py +8 -1
  56. ai_review/tests/suites/services/vcs/bitbucket/__init__.py +0 -0
  57. ai_review/tests/suites/services/vcs/bitbucket/test_service.py +117 -0
  58. ai_review/tests/suites/services/vcs/test_factory.py +8 -1
  59. {xai_review-0.24.0.dist-info → xai_review-0.26.0.dist-info}/METADATA +10 -6
  60. {xai_review-0.24.0.dist-info → xai_review-0.26.0.dist-info}/RECORD +64 -26
  61. {xai_review-0.24.0.dist-info → xai_review-0.26.0.dist-info}/WHEEL +0 -0
  62. {xai_review-0.24.0.dist-info → xai_review-0.26.0.dist-info}/entry_points.txt +0 -0
  63. {xai_review-0.24.0.dist-info → xai_review-0.26.0.dist-info}/licenses/LICENSE +0 -0
  64. {xai_review-0.24.0.dist-info → xai_review-0.26.0.dist-info}/top_level.txt +0 -0
@@ -5,3 +5,4 @@ class LLMProvider(StrEnum):
5
5
  OPENAI = "OPENAI"
6
6
  GEMINI = "GEMINI"
7
7
  CLAUDE = "CLAUDE"
8
+ OLLAMA = "OLLAMA"
@@ -4,3 +4,4 @@ from enum import StrEnum
4
4
  class VCSProvider(StrEnum):
5
5
  GITHUB = "GITHUB"
6
6
  GITLAB = "GITLAB"
7
+ BITBUCKET = "BITBUCKET"
@@ -2,6 +2,7 @@ from ai_review.config import settings
2
2
  from ai_review.libs.constants.llm_provider import LLMProvider
3
3
  from ai_review.services.llm.claude.client import ClaudeLLMClient
4
4
  from ai_review.services.llm.gemini.client import GeminiLLMClient
5
+ from ai_review.services.llm.ollama.client import OllamaLLMClient
5
6
  from ai_review.services.llm.openai.client import OpenAILLMClient
6
7
  from ai_review.services.llm.types import LLMClientProtocol
7
8
 
@@ -14,5 +15,7 @@ def get_llm_client() -> LLMClientProtocol:
14
15
  return GeminiLLMClient()
15
16
  case LLMProvider.CLAUDE:
16
17
  return ClaudeLLMClient()
18
+ case LLMProvider.OLLAMA:
19
+ return OllamaLLMClient()
17
20
  case _:
18
21
  raise ValueError(f"Unsupported LLM provider: {settings.llm.provider}")
File without changes
@@ -0,0 +1,34 @@
1
+ from ai_review.clients.ollama.client import get_ollama_http_client
2
+ from ai_review.clients.ollama.schema import OllamaChatRequestSchema, OllamaMessageSchema, OllamaOptionsSchema
3
+ from ai_review.config import settings
4
+ from ai_review.services.llm.types import LLMClientProtocol, ChatResultSchema
5
+
6
+
7
+ class OllamaLLMClient(LLMClientProtocol):
8
+ def __init__(self):
9
+ self.http_client = get_ollama_http_client()
10
+
11
+ async def chat(self, prompt: str, prompt_system: str) -> ChatResultSchema:
12
+ meta = settings.llm.meta
13
+ request = OllamaChatRequestSchema(
14
+ model=meta.model,
15
+ options=OllamaOptionsSchema(
16
+ stop=meta.stop,
17
+ seed=meta.seed,
18
+ top_p=meta.top_p,
19
+ temperature=meta.temperature,
20
+ num_predict=meta.max_tokens,
21
+ repeat_penalty=meta.repeat_penalty,
22
+ ),
23
+ messages=[
24
+ OllamaMessageSchema(role="system", content=prompt_system),
25
+ OllamaMessageSchema(role="user", content=prompt),
26
+ ],
27
+ )
28
+ response = await self.http_client.chat(request)
29
+ return ChatResultSchema(
30
+ text=response.first_text,
31
+ total_tokens=response.usage.total_tokens if response.usage else None,
32
+ prompt_tokens=response.usage.prompt_tokens if response.usage else None,
33
+ completion_tokens=response.usage.completion_tokens if response.usage else None,
34
+ )
File without changes
@@ -0,0 +1,185 @@
1
+ from ai_review.clients.bitbucket.client import get_bitbucket_http_client
2
+ from ai_review.clients.bitbucket.pr.schema.comments import (
3
+ BitbucketCommentInlineSchema,
4
+ BitbucketCommentContentSchema,
5
+ BitbucketCreatePRCommentRequestSchema,
6
+ )
7
+ from ai_review.config import settings
8
+ from ai_review.libs.logger import get_logger
9
+ from ai_review.services.vcs.types import (
10
+ VCSClientProtocol,
11
+ UserSchema,
12
+ BranchRefSchema,
13
+ ReviewInfoSchema,
14
+ ReviewCommentSchema,
15
+ )
16
+
17
+ logger = get_logger("BITBUCKET_VCS_CLIENT")
18
+
19
+
20
+ class BitbucketVCSClient(VCSClientProtocol):
21
+ def __init__(self):
22
+ self.http_client = get_bitbucket_http_client()
23
+ self.workspace = settings.vcs.pipeline.workspace
24
+ self.repo_slug = settings.vcs.pipeline.repo_slug
25
+ self.pull_request_id = settings.vcs.pipeline.pull_request_id
26
+
27
+ async def get_review_info(self) -> ReviewInfoSchema:
28
+ try:
29
+ pr = await self.http_client.pr.get_pull_request(
30
+ workspace=self.workspace,
31
+ repo_slug=self.repo_slug,
32
+ pull_request_id=self.pull_request_id,
33
+ )
34
+ files = await self.http_client.pr.get_files(
35
+ workspace=self.workspace,
36
+ repo_slug=self.repo_slug,
37
+ pull_request_id=self.pull_request_id,
38
+ )
39
+
40
+ logger.info(f"Fetched PR info for {self.workspace}/{self.repo_slug}#{self.pull_request_id}")
41
+
42
+ return ReviewInfoSchema(
43
+ id=pr.id,
44
+ title=pr.title,
45
+ description=pr.description or "",
46
+ author=UserSchema(
47
+ id=pr.author.uuid,
48
+ name=pr.author.display_name,
49
+ username=pr.author.nickname,
50
+ ),
51
+ labels=[],
52
+ base_sha=pr.destination.commit.hash,
53
+ head_sha=pr.source.commit.hash,
54
+ assignees=[
55
+ UserSchema(
56
+ id=user.uuid,
57
+ name=user.display_name,
58
+ username=user.nickname,
59
+ )
60
+ for user in pr.participants
61
+ ],
62
+ reviewers=[
63
+ UserSchema(
64
+ id=user.uuid,
65
+ name=user.display_name,
66
+ username=user.nickname,
67
+ )
68
+ for user in pr.reviewers
69
+ ],
70
+ source_branch=BranchRefSchema(
71
+ ref=pr.source.branch.name,
72
+ sha=pr.source.commit.hash,
73
+ ),
74
+ target_branch=BranchRefSchema(
75
+ ref=pr.destination.branch.name,
76
+ sha=pr.destination.commit.hash,
77
+ ),
78
+ changed_files=[
79
+ file.new.path if file.new else file.old.path
80
+ for file in files.values
81
+ ],
82
+ )
83
+ except Exception as error:
84
+ logger.exception(
85
+ f"Failed to fetch PR info {self.workspace}/{self.repo_slug}#{self.pull_request_id}: {error}"
86
+ )
87
+ return ReviewInfoSchema()
88
+
89
+ async def get_general_comments(self) -> list[ReviewCommentSchema]:
90
+ try:
91
+ response = await self.http_client.pr.get_comments(
92
+ workspace=self.workspace,
93
+ repo_slug=self.repo_slug,
94
+ pull_request_id=self.pull_request_id,
95
+ )
96
+ logger.info(f"Fetched general comments for {self.workspace}/{self.repo_slug}#{self.pull_request_id}")
97
+
98
+ return [
99
+ ReviewCommentSchema(id=comment.id, body=comment.content.raw)
100
+ for comment in response.values
101
+ if comment.inline is None
102
+ ]
103
+ except Exception as error:
104
+ logger.exception(
105
+ f"Failed to fetch general comments for "
106
+ f"{self.workspace}/{self.repo_slug}#{self.pull_request_id}: {error}"
107
+ )
108
+ return []
109
+
110
+ async def get_inline_comments(self) -> list[ReviewCommentSchema]:
111
+ try:
112
+ response = await self.http_client.pr.get_comments(
113
+ workspace=self.workspace,
114
+ repo_slug=self.repo_slug,
115
+ pull_request_id=self.pull_request_id,
116
+ )
117
+ logger.info(f"Fetched inline comments for {self.workspace}/{self.repo_slug}#{self.pull_request_id}")
118
+
119
+ return [
120
+ ReviewCommentSchema(
121
+ id=comment.id,
122
+ body=comment.content.raw,
123
+ file=comment.inline.path,
124
+ line=comment.inline.to_line,
125
+ )
126
+ for comment in response.values
127
+ if comment.inline is not None
128
+ ]
129
+ except Exception as error:
130
+ logger.exception(
131
+ f"Failed to fetch inline comments for "
132
+ f"{self.workspace}/{self.repo_slug}#{self.pull_request_id}: {error}"
133
+ )
134
+ return []
135
+
136
+ async def create_general_comment(self, message: str) -> None:
137
+ try:
138
+ logger.info(
139
+ f"Posting general comment to PR {self.workspace}/{self.repo_slug}#{self.pull_request_id}: {message}"
140
+ )
141
+ request = BitbucketCreatePRCommentRequestSchema(
142
+ content=BitbucketCommentContentSchema(raw=message)
143
+ )
144
+ await self.http_client.pr.create_comment(
145
+ workspace=self.workspace,
146
+ repo_slug=self.repo_slug,
147
+ pull_request_id=self.pull_request_id,
148
+ request=request,
149
+ )
150
+ logger.info(
151
+ f"Created general comment in PR {self.workspace}/{self.repo_slug}#{self.pull_request_id}"
152
+ )
153
+ except Exception as error:
154
+ logger.exception(
155
+ f"Failed to create general comment in PR "
156
+ f"{self.workspace}/{self.repo_slug}#{self.pull_request_id}: {error}"
157
+ )
158
+ raise
159
+
160
+ async def create_inline_comment(self, file: str, line: int, message: str) -> None:
161
+ try:
162
+ logger.info(
163
+ f"Posting inline comment in {self.workspace}/{self.repo_slug}#{self.pull_request_id} "
164
+ f"at {file}:{line}: {message}"
165
+ )
166
+ request = BitbucketCreatePRCommentRequestSchema(
167
+ content=BitbucketCommentContentSchema(raw=message),
168
+ inline=BitbucketCommentInlineSchema(path=file, to_line=line),
169
+ )
170
+ await self.http_client.pr.create_comment(
171
+ workspace=self.workspace,
172
+ repo_slug=self.repo_slug,
173
+ pull_request_id=self.pull_request_id,
174
+ request=request,
175
+ )
176
+ logger.info(
177
+ f"Created inline comment in {self.workspace}/{self.repo_slug}#{self.pull_request_id} "
178
+ f"at {file}:{line}"
179
+ )
180
+ except Exception as error:
181
+ logger.exception(
182
+ f"Failed to create inline comment in {self.workspace}/{self.repo_slug}#{self.pull_request_id} "
183
+ f"at {file}:{line}: {error}"
184
+ )
185
+ raise
@@ -1,5 +1,6 @@
1
1
  from ai_review.config import settings
2
2
  from ai_review.libs.constants.vcs_provider import VCSProvider
3
+ from ai_review.services.vcs.bitbucket.client import BitbucketVCSClient
3
4
  from ai_review.services.vcs.github.client import GitHubVCSClient
4
5
  from ai_review.services.vcs.gitlab.client import GitLabVCSClient
5
6
  from ai_review.services.vcs.types import VCSClientProtocol
@@ -11,5 +12,7 @@ def get_vcs_client() -> VCSClientProtocol:
11
12
  return GitLabVCSClient()
12
13
  case VCSProvider.GITHUB:
13
14
  return GitHubVCSClient()
15
+ case VCSProvider.BITBUCKET:
16
+ return BitbucketVCSClient()
14
17
  case _:
15
18
  raise ValueError(f"Unsupported VCS provider: {settings.vcs.provider}")
@@ -0,0 +1,204 @@
1
+ import pytest
2
+ from pydantic import HttpUrl, SecretStr
3
+
4
+ from ai_review.clients.bitbucket.pr.schema.comments import (
5
+ BitbucketPRCommentSchema,
6
+ BitbucketCommentContentSchema,
7
+ BitbucketCommentInlineSchema,
8
+ BitbucketGetPRCommentsResponseSchema,
9
+ BitbucketCreatePRCommentRequestSchema,
10
+ BitbucketCreatePRCommentResponseSchema,
11
+ )
12
+ from ai_review.clients.bitbucket.pr.schema.files import (
13
+ BitbucketGetPRFilesResponseSchema,
14
+ BitbucketPRFileSchema,
15
+ BitbucketPRFilePathSchema,
16
+ )
17
+ from ai_review.clients.bitbucket.pr.schema.pull_request import (
18
+ BitbucketUserSchema,
19
+ BitbucketBranchSchema,
20
+ BitbucketCommitSchema,
21
+ BitbucketRepositorySchema,
22
+ BitbucketPRLocationSchema,
23
+ BitbucketGetPRResponseSchema,
24
+ )
25
+ from ai_review.clients.bitbucket.pr.types import BitbucketPullRequestsHTTPClientProtocol
26
+ from ai_review.config import settings
27
+ from ai_review.libs.config.vcs.base import BitbucketVCSConfig
28
+ from ai_review.libs.config.vcs.bitbucket import BitbucketPipelineConfig, BitbucketHTTPClientConfig
29
+ from ai_review.libs.constants.vcs_provider import VCSProvider
30
+ from ai_review.services.vcs.bitbucket.client import BitbucketVCSClient
31
+
32
+
33
+ class FakeBitbucketPullRequestsHTTPClient(BitbucketPullRequestsHTTPClientProtocol):
34
+ def __init__(self):
35
+ self.calls: list[tuple[str, dict]] = []
36
+
37
+ async def get_pull_request(
38
+ self,
39
+ workspace: str,
40
+ repo_slug: str,
41
+ pull_request_id: str
42
+ ) -> BitbucketGetPRResponseSchema:
43
+ self.calls.append(
44
+ (
45
+ "get_pull_request",
46
+ {"workspace": workspace, "repo_slug": repo_slug, "pull_request_id": pull_request_id}
47
+ )
48
+ )
49
+ return BitbucketGetPRResponseSchema(
50
+ id=1,
51
+ title="Fake Bitbucket PR",
52
+ description="This is a fake PR for testing",
53
+ state="OPEN",
54
+ author=BitbucketUserSchema(uuid="u1", display_name="Tester", nickname="tester"),
55
+ source=BitbucketPRLocationSchema(
56
+ commit=BitbucketCommitSchema(hash="def456"),
57
+ branch=BitbucketBranchSchema(name="feature/test"),
58
+ repository=BitbucketRepositorySchema(uuid="r1", full_name="workspace/repo"),
59
+ ),
60
+ destination=BitbucketPRLocationSchema(
61
+ commit=BitbucketCommitSchema(hash="abc123"),
62
+ branch=BitbucketBranchSchema(name="main"),
63
+ repository=BitbucketRepositorySchema(uuid="r1", full_name="workspace/repo"),
64
+ ),
65
+ reviewers=[BitbucketUserSchema(uuid="u2", display_name="Reviewer", nickname="reviewer")],
66
+ participants=[BitbucketUserSchema(uuid="u3", display_name="Participant", nickname="participant")],
67
+ )
68
+
69
+ async def get_files(
70
+ self,
71
+ workspace: str,
72
+ repo_slug: str,
73
+ pull_request_id: str
74
+ ) -> BitbucketGetPRFilesResponseSchema:
75
+ self.calls.append(
76
+ (
77
+ "get_files",
78
+ {"workspace": workspace, "repo_slug": repo_slug, "pull_request_id": pull_request_id}
79
+ )
80
+ )
81
+ return BitbucketGetPRFilesResponseSchema(
82
+ size=2,
83
+ page=1,
84
+ pagelen=100,
85
+ next=None,
86
+ values=[
87
+ BitbucketPRFileSchema(
88
+ new=BitbucketPRFilePathSchema(path="app/main.py"),
89
+ old=None,
90
+ status="modified",
91
+ lines_added=10,
92
+ lines_removed=2,
93
+ ),
94
+ BitbucketPRFileSchema(
95
+ new=BitbucketPRFilePathSchema(path="utils/helper.py"),
96
+ old=None,
97
+ status="added",
98
+ lines_added=5,
99
+ lines_removed=0,
100
+ ),
101
+ ],
102
+ )
103
+
104
+ async def get_comments(
105
+ self,
106
+ workspace: str,
107
+ repo_slug: str,
108
+ pull_request_id: str
109
+ ) -> BitbucketGetPRCommentsResponseSchema:
110
+ self.calls.append(
111
+ (
112
+ "get_comments",
113
+ {"workspace": workspace, "repo_slug": repo_slug, "pull_request_id": pull_request_id}
114
+ )
115
+ )
116
+ return BitbucketGetPRCommentsResponseSchema(
117
+ size=2,
118
+ page=1,
119
+ next=None,
120
+ values=[
121
+ BitbucketPRCommentSchema(
122
+ id=1,
123
+ inline=None,
124
+ content=BitbucketCommentContentSchema(raw="General comment"),
125
+ ),
126
+ BitbucketPRCommentSchema(
127
+ id=2,
128
+ inline=BitbucketCommentInlineSchema(path="file.py", to_line=5),
129
+ content=BitbucketCommentContentSchema(raw="Inline comment"),
130
+ ),
131
+ ],
132
+ pagelen=100,
133
+ )
134
+
135
+ async def create_comment(
136
+ self,
137
+ workspace: str,
138
+ repo_slug: str,
139
+ pull_request_id: str,
140
+ request: BitbucketCreatePRCommentRequestSchema
141
+ ) -> BitbucketCreatePRCommentResponseSchema:
142
+ self.calls.append(
143
+ (
144
+ "create_comment",
145
+ {
146
+ "workspace": workspace,
147
+ "repo_slug": repo_slug,
148
+ "pull_request_id": pull_request_id,
149
+ **request.model_dump(by_alias=True)
150
+ }
151
+ )
152
+ )
153
+ return BitbucketCreatePRCommentResponseSchema(
154
+ id=10,
155
+ content=request.content,
156
+ inline=request.inline,
157
+ )
158
+
159
+
160
+ class FakeBitbucketHTTPClient:
161
+ def __init__(self, pull_requests_client: BitbucketPullRequestsHTTPClientProtocol):
162
+ self.pr = pull_requests_client
163
+
164
+
165
+ @pytest.fixture
166
+ def fake_bitbucket_pull_requests_http_client() -> FakeBitbucketPullRequestsHTTPClient:
167
+ return FakeBitbucketPullRequestsHTTPClient()
168
+
169
+
170
+ @pytest.fixture
171
+ def fake_bitbucket_http_client(
172
+ fake_bitbucket_pull_requests_http_client: FakeBitbucketPullRequestsHTTPClient
173
+ ) -> FakeBitbucketHTTPClient:
174
+ return FakeBitbucketHTTPClient(pull_requests_client=fake_bitbucket_pull_requests_http_client)
175
+
176
+
177
+ @pytest.fixture
178
+ def bitbucket_vcs_client(
179
+ monkeypatch: pytest.MonkeyPatch,
180
+ fake_bitbucket_http_client: FakeBitbucketHTTPClient
181
+ ) -> BitbucketVCSClient:
182
+ monkeypatch.setattr(
183
+ "ai_review.services.vcs.bitbucket.client.get_bitbucket_http_client",
184
+ lambda: fake_bitbucket_http_client,
185
+ )
186
+ return BitbucketVCSClient()
187
+
188
+
189
+ @pytest.fixture
190
+ def bitbucket_http_client_config(monkeypatch: pytest.MonkeyPatch):
191
+ fake_config = BitbucketVCSConfig(
192
+ provider=VCSProvider.BITBUCKET,
193
+ pipeline=BitbucketPipelineConfig(
194
+ workspace="workspace",
195
+ repo_slug="repo",
196
+ pull_request_id="123",
197
+ ),
198
+ http_client=BitbucketHTTPClientConfig(
199
+ timeout=10,
200
+ api_url=HttpUrl("https://api.bitbucket.org/2.0"),
201
+ api_token=SecretStr("fake-token"),
202
+ )
203
+ )
204
+ monkeypatch.setattr(settings, "vcs", fake_config)
@@ -1,10 +1,55 @@
1
+ from typing import Any
2
+
1
3
  import pytest
2
4
  from pydantic import HttpUrl, SecretStr
3
5
 
6
+ from ai_review.clients.claude.schema import (
7
+ ClaudeUsageSchema,
8
+ ClaudeContentSchema,
9
+ ClaudeChatRequestSchema,
10
+ ClaudeChatResponseSchema,
11
+ )
12
+ from ai_review.clients.claude.types import ClaudeHTTPClientProtocol
4
13
  from ai_review.config import settings
5
14
  from ai_review.libs.config.llm.base import ClaudeLLMConfig
6
15
  from ai_review.libs.config.llm.claude import ClaudeMetaConfig, ClaudeHTTPClientConfig
7
16
  from ai_review.libs.constants.llm_provider import LLMProvider
17
+ from ai_review.services.llm.claude.client import ClaudeLLMClient
18
+
19
+
20
+ class FakeClaudeHTTPClient(ClaudeHTTPClientProtocol):
21
+ def __init__(self, responses: dict[str, Any] | None = None) -> None:
22
+ self.calls: list[tuple[str, dict]] = []
23
+ self.responses = responses or {}
24
+
25
+ async def chat(self, request: ClaudeChatRequestSchema) -> ClaudeChatResponseSchema:
26
+ self.calls.append(("chat", {"request": request}))
27
+ return self.responses.get(
28
+ "chat",
29
+ ClaudeChatResponseSchema(
30
+ id="fake-id",
31
+ role="assistant",
32
+ usage=ClaudeUsageSchema(input_tokens=5, output_tokens=7),
33
+ content=[ClaudeContentSchema(type="text", text="FAKE_CLAUDE_RESPONSE")],
34
+ ),
35
+ )
36
+
37
+
38
+ @pytest.fixture
39
+ def fake_claude_http_client():
40
+ return FakeClaudeHTTPClient()
41
+
42
+
43
+ @pytest.fixture
44
+ def claude_llm_client(
45
+ monkeypatch: pytest.MonkeyPatch,
46
+ fake_claude_http_client: FakeClaudeHTTPClient
47
+ ) -> ClaudeLLMClient:
48
+ monkeypatch.setattr(
49
+ "ai_review.services.llm.claude.client.get_claude_http_client",
50
+ lambda: fake_claude_http_client,
51
+ )
52
+ return ClaudeLLMClient()
8
53
 
9
54
 
10
55
  @pytest.fixture
@@ -1,10 +1,62 @@
1
+ from typing import Any
2
+
1
3
  import pytest
2
4
  from pydantic import HttpUrl, SecretStr
3
5
 
6
+ from ai_review.clients.gemini.schema import (
7
+ GeminiPartSchema,
8
+ GeminiUsageSchema,
9
+ GeminiContentSchema,
10
+ GeminiCandidateSchema,
11
+ GeminiChatRequestSchema,
12
+ GeminiChatResponseSchema,
13
+ )
14
+ from ai_review.clients.gemini.types import GeminiHTTPClientProtocol
4
15
  from ai_review.config import settings
5
16
  from ai_review.libs.config.llm.base import GeminiLLMConfig
6
17
  from ai_review.libs.config.llm.gemini import GeminiMetaConfig, GeminiHTTPClientConfig
7
18
  from ai_review.libs.constants.llm_provider import LLMProvider
19
+ from ai_review.services.llm.gemini.client import GeminiLLMClient
20
+
21
+
22
+ class FakeGeminiHTTPClient(GeminiHTTPClientProtocol):
23
+ def __init__(self, responses: dict[str, Any] | None = None) -> None:
24
+ self.calls: list[tuple[str, dict]] = []
25
+ self.responses = responses or {}
26
+
27
+ async def chat(self, request: GeminiChatRequestSchema) -> GeminiChatResponseSchema:
28
+ self.calls.append(("chat", {"request": request}))
29
+ return self.responses.get(
30
+ "chat",
31
+ GeminiChatResponseSchema(
32
+ usage=GeminiUsageSchema(prompt_token_count=2, total_tokens_count=10),
33
+ candidates=[
34
+ GeminiCandidateSchema(
35
+ content=GeminiContentSchema(
36
+ role="model",
37
+ parts=[GeminiPartSchema(text="FAKE_GEMINI_RESPONSE")]
38
+ )
39
+ )
40
+ ],
41
+ ),
42
+ )
43
+
44
+
45
+ @pytest.fixture
46
+ def fake_gemini_http_client() -> FakeGeminiHTTPClient:
47
+ return FakeGeminiHTTPClient()
48
+
49
+
50
+ @pytest.fixture
51
+ def gemini_llm_client(
52
+ monkeypatch: pytest.MonkeyPatch,
53
+ fake_gemini_http_client: FakeGeminiHTTPClient
54
+ ) -> GeminiLLMClient:
55
+ monkeypatch.setattr(
56
+ "ai_review.services.llm.gemini.client.get_gemini_http_client",
57
+ lambda: fake_gemini_http_client,
58
+ )
59
+ return GeminiLLMClient()
8
60
 
9
61
 
10
62
  @pytest.fixture
@@ -0,0 +1,65 @@
1
+ from typing import Any
2
+
3
+ import pytest
4
+ from pydantic import HttpUrl
5
+
6
+ from ai_review.clients.ollama.schema import (
7
+ OllamaUsageSchema,
8
+ OllamaMessageSchema,
9
+ OllamaChatRequestSchema,
10
+ OllamaChatResponseSchema,
11
+ )
12
+ from ai_review.clients.ollama.types import OllamaHTTPClientProtocol
13
+ from ai_review.config import settings
14
+ from ai_review.libs.config.llm.base import OllamaLLMConfig
15
+ from ai_review.libs.config.llm.ollama import OllamaMetaConfig, OllamaHTTPClientConfig
16
+ from ai_review.libs.constants.llm_provider import LLMProvider
17
+ from ai_review.services.llm.ollama.client import OllamaLLMClient
18
+
19
+
20
+ class FakeOllamaHTTPClient(OllamaHTTPClientProtocol):
21
+ def __init__(self, responses: dict[str, Any] | None = None) -> None:
22
+ self.calls: list[tuple[str, dict]] = []
23
+ self.responses = responses or {}
24
+
25
+ async def chat(self, request: OllamaChatRequestSchema) -> OllamaChatResponseSchema:
26
+ self.calls.append(("chat", {"request": request}))
27
+ return self.responses.get(
28
+ "chat",
29
+ OllamaChatResponseSchema(
30
+ done=True,
31
+ model="llama2",
32
+ usage=OllamaUsageSchema(prompt_tokens=3, completion_tokens=5),
33
+ message=OllamaMessageSchema(role="assistant", content="FAKE_OLLAMA_RESPONSE"),
34
+ ),
35
+ )
36
+
37
+
38
+ @pytest.fixture
39
+ def fake_ollama_http_client():
40
+ return FakeOllamaHTTPClient()
41
+
42
+
43
+ @pytest.fixture
44
+ def ollama_llm_client(
45
+ monkeypatch: pytest.MonkeyPatch,
46
+ fake_ollama_http_client: FakeOllamaHTTPClient
47
+ ) -> OllamaLLMClient:
48
+ monkeypatch.setattr(
49
+ "ai_review.services.llm.ollama.client.get_ollama_http_client",
50
+ lambda: fake_ollama_http_client,
51
+ )
52
+ return OllamaLLMClient()
53
+
54
+
55
+ @pytest.fixture
56
+ def ollama_http_client_config(monkeypatch: pytest.MonkeyPatch):
57
+ fake_config = OllamaLLMConfig(
58
+ meta=OllamaMetaConfig(),
59
+ provider=LLMProvider.OLLAMA,
60
+ http_client=OllamaHTTPClientConfig(
61
+ timeout=10,
62
+ api_url=HttpUrl("http://localhost:11434")
63
+ )
64
+ )
65
+ monkeypatch.setattr(settings, "llm", fake_config)