xai-review 0.26.0__py3-none-any.whl → 0.28.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xai-review might be problematic. Click here for more details.
- ai_review/cli/commands/run_inline_reply_review.py +7 -0
- ai_review/cli/commands/run_summary_reply_review.py +7 -0
- ai_review/cli/main.py +17 -0
- ai_review/clients/bitbucket/pr/client.py +45 -8
- ai_review/clients/bitbucket/pr/schema/comments.py +21 -2
- ai_review/clients/bitbucket/pr/schema/files.py +8 -3
- ai_review/clients/bitbucket/pr/schema/pull_request.py +1 -5
- ai_review/clients/bitbucket/pr/schema/user.py +7 -0
- ai_review/clients/bitbucket/tools.py +6 -0
- ai_review/clients/github/pr/client.py +98 -13
- ai_review/clients/github/pr/schema/comments.py +23 -1
- ai_review/clients/github/pr/schema/files.py +2 -1
- ai_review/clients/github/pr/schema/pull_request.py +1 -4
- ai_review/clients/github/pr/schema/reviews.py +2 -1
- ai_review/clients/github/pr/schema/user.py +6 -0
- ai_review/clients/github/pr/types.py +11 -1
- ai_review/clients/github/tools.py +6 -0
- ai_review/clients/gitlab/mr/client.py +67 -7
- ai_review/clients/gitlab/mr/schema/changes.py +1 -5
- ai_review/clients/gitlab/mr/schema/discussions.py +19 -8
- ai_review/clients/gitlab/mr/schema/notes.py +5 -1
- ai_review/clients/gitlab/mr/schema/user.py +7 -0
- ai_review/clients/gitlab/mr/types.py +16 -7
- ai_review/clients/gitlab/tools.py +5 -0
- ai_review/libs/config/prompt.py +96 -64
- ai_review/libs/config/review.py +2 -0
- ai_review/libs/config/vcs/base.py +2 -0
- ai_review/libs/config/vcs/pagination.py +6 -0
- ai_review/libs/http/paginate.py +43 -0
- ai_review/libs/llm/output_json_parser.py +60 -0
- ai_review/prompts/default_inline_reply.md +10 -0
- ai_review/prompts/default_summary_reply.md +14 -0
- ai_review/prompts/default_system_inline_reply.md +31 -0
- ai_review/prompts/default_system_summary_reply.md +13 -0
- ai_review/services/artifacts/schema.py +2 -2
- ai_review/services/hook/constants.py +14 -0
- ai_review/services/hook/service.py +95 -4
- ai_review/services/hook/types.py +18 -2
- ai_review/services/prompt/adapter.py +1 -1
- ai_review/services/prompt/service.py +49 -3
- ai_review/services/prompt/tools.py +21 -0
- ai_review/services/prompt/types.py +23 -0
- ai_review/services/review/gateway/comment.py +45 -6
- ai_review/services/review/gateway/llm.py +2 -1
- ai_review/services/review/gateway/types.py +50 -0
- ai_review/services/review/internal/inline/service.py +40 -0
- ai_review/services/review/internal/inline/types.py +8 -0
- ai_review/services/review/internal/inline_reply/schema.py +23 -0
- ai_review/services/review/internal/inline_reply/service.py +20 -0
- ai_review/services/review/internal/inline_reply/types.py +8 -0
- ai_review/services/review/{policy → internal/policy}/service.py +2 -1
- ai_review/services/review/internal/policy/types.py +15 -0
- ai_review/services/review/{summary → internal/summary}/service.py +2 -2
- ai_review/services/review/{summary → internal/summary}/types.py +1 -1
- ai_review/services/review/internal/summary_reply/__init__.py +0 -0
- ai_review/services/review/internal/summary_reply/schema.py +8 -0
- ai_review/services/review/internal/summary_reply/service.py +15 -0
- ai_review/services/review/internal/summary_reply/types.py +8 -0
- ai_review/services/review/runner/__init__.py +0 -0
- ai_review/services/review/runner/context.py +72 -0
- ai_review/services/review/runner/inline.py +80 -0
- ai_review/services/review/runner/inline_reply.py +80 -0
- ai_review/services/review/runner/summary.py +71 -0
- ai_review/services/review/runner/summary_reply.py +79 -0
- ai_review/services/review/runner/types.py +6 -0
- ai_review/services/review/service.py +78 -110
- ai_review/services/vcs/bitbucket/adapter.py +24 -0
- ai_review/services/vcs/bitbucket/client.py +107 -42
- ai_review/services/vcs/github/adapter.py +35 -0
- ai_review/services/vcs/github/client.py +105 -44
- ai_review/services/vcs/gitlab/adapter.py +26 -0
- ai_review/services/vcs/gitlab/client.py +91 -38
- ai_review/services/vcs/types.py +34 -0
- ai_review/tests/fixtures/clients/bitbucket.py +2 -2
- ai_review/tests/fixtures/clients/github.py +35 -6
- ai_review/tests/fixtures/clients/gitlab.py +42 -3
- ai_review/tests/fixtures/libs/__init__.py +0 -0
- ai_review/tests/fixtures/libs/llm/__init__.py +0 -0
- ai_review/tests/fixtures/libs/llm/output_json_parser.py +13 -0
- ai_review/tests/fixtures/services/hook.py +8 -0
- ai_review/tests/fixtures/services/llm.py +8 -5
- ai_review/tests/fixtures/services/prompt.py +70 -0
- ai_review/tests/fixtures/services/review/base.py +41 -0
- ai_review/tests/fixtures/services/review/gateway/__init__.py +0 -0
- ai_review/tests/fixtures/services/review/gateway/comment.py +98 -0
- ai_review/tests/fixtures/services/review/gateway/llm.py +17 -0
- ai_review/tests/fixtures/services/review/internal/__init__.py +0 -0
- ai_review/tests/fixtures/services/review/{inline.py → internal/inline.py} +8 -6
- ai_review/tests/fixtures/services/review/internal/inline_reply.py +25 -0
- ai_review/tests/fixtures/services/review/internal/policy.py +28 -0
- ai_review/tests/fixtures/services/review/internal/summary.py +21 -0
- ai_review/tests/fixtures/services/review/internal/summary_reply.py +19 -0
- ai_review/tests/fixtures/services/review/runner/__init__.py +0 -0
- ai_review/tests/fixtures/services/review/runner/context.py +50 -0
- ai_review/tests/fixtures/services/review/runner/inline.py +50 -0
- ai_review/tests/fixtures/services/review/runner/inline_reply.py +50 -0
- ai_review/tests/fixtures/services/review/runner/summary.py +50 -0
- ai_review/tests/fixtures/services/review/runner/summary_reply.py +50 -0
- ai_review/tests/fixtures/services/vcs.py +23 -0
- ai_review/tests/suites/cli/__init__.py +0 -0
- ai_review/tests/suites/cli/test_main.py +54 -0
- ai_review/tests/suites/clients/bitbucket/__init__.py +0 -0
- ai_review/tests/suites/clients/bitbucket/test_client.py +14 -0
- ai_review/tests/suites/clients/bitbucket/test_tools.py +31 -0
- ai_review/tests/suites/clients/github/test_tools.py +31 -0
- ai_review/tests/suites/clients/gitlab/test_tools.py +26 -0
- ai_review/tests/suites/libs/config/test_prompt.py +108 -28
- ai_review/tests/suites/libs/http/__init__.py +0 -0
- ai_review/tests/suites/libs/http/test_paginate.py +95 -0
- ai_review/tests/suites/libs/llm/__init__.py +0 -0
- ai_review/tests/suites/libs/llm/test_output_json_parser.py +155 -0
- ai_review/tests/suites/services/hook/test_service.py +88 -4
- ai_review/tests/suites/services/prompt/test_adapter.py +3 -3
- ai_review/tests/suites/services/prompt/test_service.py +102 -58
- ai_review/tests/suites/services/prompt/test_tools.py +86 -1
- ai_review/tests/suites/services/review/gateway/__init__.py +0 -0
- ai_review/tests/suites/services/review/gateway/test_comment.py +253 -0
- ai_review/tests/suites/services/review/gateway/test_llm.py +82 -0
- ai_review/tests/suites/services/review/internal/__init__.py +0 -0
- ai_review/tests/suites/services/review/internal/inline/__init__.py +0 -0
- ai_review/tests/suites/services/review/{inline → internal/inline}/test_schema.py +1 -1
- ai_review/tests/suites/services/review/internal/inline/test_service.py +81 -0
- ai_review/tests/suites/services/review/internal/inline_reply/__init__.py +0 -0
- ai_review/tests/suites/services/review/internal/inline_reply/test_schema.py +57 -0
- ai_review/tests/suites/services/review/internal/inline_reply/test_service.py +72 -0
- ai_review/tests/suites/services/review/internal/policy/__init__.py +0 -0
- ai_review/tests/suites/services/review/{policy → internal/policy}/test_service.py +1 -1
- ai_review/tests/suites/services/review/internal/summary/__init__.py +0 -0
- ai_review/tests/suites/services/review/{summary → internal/summary}/test_schema.py +1 -1
- ai_review/tests/suites/services/review/{summary → internal/summary}/test_service.py +2 -2
- ai_review/tests/suites/services/review/internal/summary_reply/__init__.py +0 -0
- ai_review/tests/suites/services/review/internal/summary_reply/test_schema.py +19 -0
- ai_review/tests/suites/services/review/internal/summary_reply/test_service.py +21 -0
- ai_review/tests/suites/services/review/runner/__init__.py +0 -0
- ai_review/tests/suites/services/review/runner/test_context.py +89 -0
- ai_review/tests/suites/services/review/runner/test_inline.py +100 -0
- ai_review/tests/suites/services/review/runner/test_inline_reply.py +109 -0
- ai_review/tests/suites/services/review/runner/test_summary.py +87 -0
- ai_review/tests/suites/services/review/runner/test_summary_reply.py +97 -0
- ai_review/tests/suites/services/review/test_service.py +64 -97
- ai_review/tests/suites/services/vcs/bitbucket/test_adapter.py +109 -0
- ai_review/tests/suites/services/vcs/bitbucket/{test_service.py → test_client.py} +88 -1
- ai_review/tests/suites/services/vcs/github/test_adapter.py +162 -0
- ai_review/tests/suites/services/vcs/github/{test_service.py → test_client.py} +102 -2
- ai_review/tests/suites/services/vcs/gitlab/test_adapter.py +105 -0
- ai_review/tests/suites/services/vcs/gitlab/{test_service.py → test_client.py} +99 -1
- {xai_review-0.26.0.dist-info → xai_review-0.28.0.dist-info}/METADATA +8 -5
- {xai_review-0.26.0.dist-info → xai_review-0.28.0.dist-info}/RECORD +160 -75
- ai_review/services/review/inline/service.py +0 -54
- ai_review/services/review/inline/types.py +0 -11
- ai_review/tests/fixtures/services/review/summary.py +0 -19
- ai_review/tests/suites/services/review/inline/test_service.py +0 -107
- /ai_review/{services/review/inline → libs/llm}/__init__.py +0 -0
- /ai_review/services/review/{policy → internal}/__init__.py +0 -0
- /ai_review/services/review/{summary → internal/inline}/__init__.py +0 -0
- /ai_review/services/review/{inline → internal/inline}/schema.py +0 -0
- /ai_review/{tests/suites/services/review/inline → services/review/internal/inline_reply}/__init__.py +0 -0
- /ai_review/{tests/suites/services/review → services/review/internal}/policy/__init__.py +0 -0
- /ai_review/{tests/suites/services/review → services/review/internal}/summary/__init__.py +0 -0
- /ai_review/services/review/{summary → internal/summary}/schema.py +0 -0
- {xai_review-0.26.0.dist-info → xai_review-0.28.0.dist-info}/WHEEL +0 -0
- {xai_review-0.26.0.dist-info → xai_review-0.28.0.dist-info}/entry_points.txt +0 -0
- {xai_review-0.26.0.dist-info → xai_review-0.28.0.dist-info}/licenses/LICENSE +0 -0
- {xai_review-0.26.0.dist-info → xai_review-0.28.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
|
|
3
|
+
from ai_review.services.cost.types import CostServiceProtocol
|
|
4
|
+
from ai_review.services.diff.types import DiffServiceProtocol
|
|
5
|
+
from ai_review.services.git.types import GitServiceProtocol
|
|
6
|
+
from ai_review.services.prompt.types import PromptServiceProtocol
|
|
7
|
+
from ai_review.services.review.gateway.types import ReviewCommentGatewayProtocol, ReviewLLMGatewayProtocol
|
|
8
|
+
from ai_review.services.review.internal.policy.types import ReviewPolicyServiceProtocol
|
|
9
|
+
from ai_review.services.review.internal.summary.types import SummaryCommentServiceProtocol
|
|
10
|
+
from ai_review.services.review.runner.summary import SummaryReviewRunner
|
|
11
|
+
from ai_review.services.review.runner.types import ReviewRunnerProtocol
|
|
12
|
+
from ai_review.services.vcs.types import VCSClientProtocol
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class FakeSummaryReviewRunner(ReviewRunnerProtocol):
|
|
16
|
+
def __init__(self):
|
|
17
|
+
self.calls = []
|
|
18
|
+
|
|
19
|
+
async def run(self) -> None:
|
|
20
|
+
self.calls.append(("run", {}))
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@pytest.fixture
|
|
24
|
+
def fake_summary_review_runner() -> FakeSummaryReviewRunner:
|
|
25
|
+
return FakeSummaryReviewRunner()
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@pytest.fixture
|
|
29
|
+
def summary_review_runner(
|
|
30
|
+
fake_vcs_client: VCSClientProtocol,
|
|
31
|
+
fake_git_service: GitServiceProtocol,
|
|
32
|
+
fake_diff_service: DiffServiceProtocol,
|
|
33
|
+
fake_cost_service: CostServiceProtocol,
|
|
34
|
+
fake_prompt_service: PromptServiceProtocol,
|
|
35
|
+
fake_review_llm_gateway: ReviewLLMGatewayProtocol,
|
|
36
|
+
fake_review_policy_service: ReviewPolicyServiceProtocol,
|
|
37
|
+
fake_review_comment_gateway: ReviewCommentGatewayProtocol,
|
|
38
|
+
fake_summary_comment_service: SummaryCommentServiceProtocol,
|
|
39
|
+
) -> SummaryReviewRunner:
|
|
40
|
+
return SummaryReviewRunner(
|
|
41
|
+
vcs=fake_vcs_client,
|
|
42
|
+
git=fake_git_service,
|
|
43
|
+
diff=fake_diff_service,
|
|
44
|
+
cost=fake_cost_service,
|
|
45
|
+
prompt=fake_prompt_service,
|
|
46
|
+
review_policy=fake_review_policy_service,
|
|
47
|
+
summary_comment=fake_summary_comment_service,
|
|
48
|
+
review_llm_gateway=fake_review_llm_gateway,
|
|
49
|
+
review_comment_gateway=fake_review_comment_gateway,
|
|
50
|
+
)
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
|
|
3
|
+
from ai_review.services.cost.types import CostServiceProtocol
|
|
4
|
+
from ai_review.services.diff.types import DiffServiceProtocol
|
|
5
|
+
from ai_review.services.git.types import GitServiceProtocol
|
|
6
|
+
from ai_review.services.prompt.types import PromptServiceProtocol
|
|
7
|
+
from ai_review.services.review.gateway.types import ReviewLLMGatewayProtocol, ReviewCommentGatewayProtocol
|
|
8
|
+
from ai_review.services.review.internal.policy.types import ReviewPolicyServiceProtocol
|
|
9
|
+
from ai_review.services.review.internal.summary_reply.types import SummaryCommentReplyServiceProtocol
|
|
10
|
+
from ai_review.services.review.runner.summary_reply import SummaryReplyReviewRunner
|
|
11
|
+
from ai_review.services.review.runner.types import ReviewRunnerProtocol
|
|
12
|
+
from ai_review.services.vcs.types import VCSClientProtocol
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class FakeSummaryReplyReviewRunner(ReviewRunnerProtocol):
|
|
16
|
+
def __init__(self):
|
|
17
|
+
self.calls = []
|
|
18
|
+
|
|
19
|
+
async def run(self) -> None:
|
|
20
|
+
self.calls.append(("run", {}))
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@pytest.fixture
|
|
24
|
+
def fake_summary_reply_review_runner() -> FakeSummaryReplyReviewRunner:
|
|
25
|
+
return FakeSummaryReplyReviewRunner()
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@pytest.fixture
|
|
29
|
+
def summary_reply_review_runner(
|
|
30
|
+
fake_vcs_client: VCSClientProtocol,
|
|
31
|
+
fake_git_service: GitServiceProtocol,
|
|
32
|
+
fake_diff_service: DiffServiceProtocol,
|
|
33
|
+
fake_cost_service: CostServiceProtocol,
|
|
34
|
+
fake_prompt_service: PromptServiceProtocol,
|
|
35
|
+
fake_review_llm_gateway: ReviewLLMGatewayProtocol,
|
|
36
|
+
fake_review_policy_service: ReviewPolicyServiceProtocol,
|
|
37
|
+
fake_review_comment_gateway: ReviewCommentGatewayProtocol,
|
|
38
|
+
fake_summary_comment_reply_service: SummaryCommentReplyServiceProtocol,
|
|
39
|
+
) -> SummaryReplyReviewRunner:
|
|
40
|
+
return SummaryReplyReviewRunner(
|
|
41
|
+
vcs=fake_vcs_client,
|
|
42
|
+
git=fake_git_service,
|
|
43
|
+
diff=fake_diff_service,
|
|
44
|
+
cost=fake_cost_service,
|
|
45
|
+
prompt=fake_prompt_service,
|
|
46
|
+
review_policy=fake_review_policy_service,
|
|
47
|
+
review_llm_gateway=fake_review_llm_gateway,
|
|
48
|
+
summary_comment_reply=fake_summary_comment_reply_service,
|
|
49
|
+
review_comment_gateway=fake_review_comment_gateway,
|
|
50
|
+
)
|
|
@@ -5,6 +5,7 @@ import pytest
|
|
|
5
5
|
from ai_review.services.vcs.types import (
|
|
6
6
|
VCSClientProtocol,
|
|
7
7
|
ReviewInfoSchema,
|
|
8
|
+
ReviewThreadSchema,
|
|
8
9
|
ReviewCommentSchema,
|
|
9
10
|
)
|
|
10
11
|
|
|
@@ -43,6 +44,28 @@ class FakeVCSClient(VCSClientProtocol):
|
|
|
43
44
|
|
|
44
45
|
return self.responses.get("create_inline_comment_result", None)
|
|
45
46
|
|
|
47
|
+
async def create_inline_reply(self, thread_id: int | str, message: str) -> None:
|
|
48
|
+
self.calls.append(("create_inline_reply", (thread_id, message), {}))
|
|
49
|
+
if error := self.responses.get("create_inline_reply_error"):
|
|
50
|
+
raise error
|
|
51
|
+
|
|
52
|
+
return self.responses.get("create_inline_reply_result", None)
|
|
53
|
+
|
|
54
|
+
async def create_summary_reply(self, thread_id: int | str, message: str) -> None:
|
|
55
|
+
self.calls.append(("create_summary_reply", (thread_id, message), {}))
|
|
56
|
+
if error := self.responses.get("create_summary_reply_error"):
|
|
57
|
+
raise error
|
|
58
|
+
|
|
59
|
+
return self.responses.get("create_summary_reply_result", None)
|
|
60
|
+
|
|
61
|
+
async def get_inline_threads(self) -> list[ReviewThreadSchema]:
|
|
62
|
+
self.calls.append(("get_inline_threads", (), {}))
|
|
63
|
+
return self.responses.get("get_inline_threads", [])
|
|
64
|
+
|
|
65
|
+
async def get_general_threads(self) -> list[ReviewThreadSchema]:
|
|
66
|
+
self.calls.append(("get_general_threads", (), {}))
|
|
67
|
+
return self.responses.get("get_general_threads", [])
|
|
68
|
+
|
|
46
69
|
|
|
47
70
|
@pytest.fixture
|
|
48
71
|
def fake_vcs_client() -> FakeVCSClient:
|
|
File without changes
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from typer.testing import CliRunner
|
|
3
|
+
|
|
4
|
+
from ai_review.cli.main import app
|
|
5
|
+
from ai_review.services.review.service import ReviewService
|
|
6
|
+
|
|
7
|
+
runner = CliRunner()
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@pytest.fixture(autouse=True)
|
|
11
|
+
def dummy_review_service(monkeypatch: pytest.MonkeyPatch, review_service: ReviewService):
|
|
12
|
+
monkeypatch.setattr("ai_review.cli.commands.run_review.ReviewService", lambda: review_service)
|
|
13
|
+
monkeypatch.setattr("ai_review.cli.commands.run_inline_review.ReviewService", lambda: review_service)
|
|
14
|
+
monkeypatch.setattr("ai_review.cli.commands.run_context_review.ReviewService", lambda: review_service)
|
|
15
|
+
monkeypatch.setattr("ai_review.cli.commands.run_summary_review.ReviewService", lambda: review_service)
|
|
16
|
+
monkeypatch.setattr("ai_review.cli.commands.run_inline_reply_review.ReviewService", lambda: review_service)
|
|
17
|
+
monkeypatch.setattr("ai_review.cli.commands.run_summary_reply_review.ReviewService", lambda: review_service)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@pytest.mark.parametrize(
|
|
21
|
+
"args, expected_output",
|
|
22
|
+
[
|
|
23
|
+
(["run"], "Starting full AI review..."),
|
|
24
|
+
(["run-inline"], "Starting inline AI review..."),
|
|
25
|
+
(["run-context"], "Starting context AI review..."),
|
|
26
|
+
(["run-summary"], "Starting summary AI review..."),
|
|
27
|
+
(["run-inline-reply"], "Starting inline reply AI review..."),
|
|
28
|
+
(["run-summary-reply"], "Starting summary reply AI review..."),
|
|
29
|
+
],
|
|
30
|
+
)
|
|
31
|
+
def test_cli_commands_invoke_review_service_successfully(args: list[str], expected_output: str):
|
|
32
|
+
"""
|
|
33
|
+
Ensure CLI commands correctly call the ReviewService with fake dependencies.
|
|
34
|
+
"""
|
|
35
|
+
result = runner.invoke(app, args)
|
|
36
|
+
|
|
37
|
+
assert result.exit_code == 0
|
|
38
|
+
assert expected_output in result.output
|
|
39
|
+
assert "AI review completed successfully!" in result.output
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def test_show_config_outputs_json(monkeypatch: pytest.MonkeyPatch):
|
|
43
|
+
"""
|
|
44
|
+
Validate that the 'show-config' command prints settings as JSON.
|
|
45
|
+
"""
|
|
46
|
+
monkeypatch.setattr(
|
|
47
|
+
"ai_review.cli.main.settings.model_dump_json",
|
|
48
|
+
lambda **_: '{"debug": true}'
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
result = runner.invoke(app, ["show-config"])
|
|
52
|
+
assert result.exit_code == 0
|
|
53
|
+
assert "Loaded AI Review configuration" in result.output
|
|
54
|
+
assert '{"debug": true}' in result.output
|
|
File without changes
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from httpx import AsyncClient
|
|
3
|
+
|
|
4
|
+
from ai_review.clients.bitbucket.client import get_bitbucket_http_client, BitbucketHTTPClient
|
|
5
|
+
from ai_review.clients.bitbucket.pr.client import BitbucketPullRequestsHTTPClient
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@pytest.mark.usefixtures("bitbucket_http_client_config")
|
|
9
|
+
def test_get_bitbucket_http_client_builds_ok():
|
|
10
|
+
bitbucket_http_client = get_bitbucket_http_client()
|
|
11
|
+
|
|
12
|
+
assert isinstance(bitbucket_http_client, BitbucketHTTPClient)
|
|
13
|
+
assert isinstance(bitbucket_http_client.pr, BitbucketPullRequestsHTTPClient)
|
|
14
|
+
assert isinstance(bitbucket_http_client.pr.client, AsyncClient)
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from httpx import Response, Request
|
|
2
|
+
|
|
3
|
+
from ai_review.clients.bitbucket.tools import bitbucket_has_next_page
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def make_response(data: dict) -> Response:
|
|
7
|
+
return Response(
|
|
8
|
+
json=data,
|
|
9
|
+
request=Request("GET", "http://bitbucket.test"),
|
|
10
|
+
status_code=200,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def test_bitbucket_has_next_page_true():
|
|
15
|
+
resp = make_response({"next": "https://api.bitbucket.org/2.0/repositories/test/repo?page=2"})
|
|
16
|
+
assert bitbucket_has_next_page(resp) is True
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def test_bitbucket_has_next_page_false_none():
|
|
20
|
+
resp = make_response({"next": None})
|
|
21
|
+
assert bitbucket_has_next_page(resp) is False
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def test_bitbucket_has_next_page_false_missing():
|
|
25
|
+
resp = make_response({})
|
|
26
|
+
assert bitbucket_has_next_page(resp) is False
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def test_bitbucket_has_next_page_false_empty_string():
|
|
30
|
+
resp = make_response({"next": ""})
|
|
31
|
+
assert bitbucket_has_next_page(resp) is False
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from httpx import Response, Request
|
|
2
|
+
|
|
3
|
+
from ai_review.clients.github.tools import github_has_next_page
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def make_response(headers: dict) -> Response:
|
|
7
|
+
return Response(
|
|
8
|
+
request=Request("GET", "http://test"),
|
|
9
|
+
headers=headers,
|
|
10
|
+
status_code=200,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def test_github_has_next_page_true():
|
|
15
|
+
response = make_response({
|
|
16
|
+
"Link": '<https://api.github.com/resource?page=2>; rel="next", '
|
|
17
|
+
'<https://api.github.com/resource?page=5>; rel="last"'
|
|
18
|
+
})
|
|
19
|
+
assert github_has_next_page(response) is True
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def test_github_has_next_page_false_no_next():
|
|
23
|
+
response = make_response({
|
|
24
|
+
"Link": '<https://api.github.com/resource?page=5>; rel="last"'
|
|
25
|
+
})
|
|
26
|
+
assert github_has_next_page(response) is False
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def test_github_has_next_page_false_no_header():
|
|
30
|
+
resp = make_response({})
|
|
31
|
+
assert github_has_next_page(resp) is False
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from httpx import Response, Request
|
|
2
|
+
|
|
3
|
+
from ai_review.clients.gitlab.tools import gitlab_has_next_page
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def make_response(headers: dict) -> Response:
|
|
7
|
+
return Response(
|
|
8
|
+
request=Request("GET", "http://gitlab.test"),
|
|
9
|
+
headers=headers,
|
|
10
|
+
status_code=200,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def test_gitlab_has_next_page_true():
|
|
15
|
+
resp = make_response({"X-Next-Page": "2"})
|
|
16
|
+
assert gitlab_has_next_page(resp) is True
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def test_gitlab_has_next_page_false_empty():
|
|
20
|
+
resp = make_response({"X-Next-Page": ""})
|
|
21
|
+
assert gitlab_has_next_page(resp) is False
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def test_gitlab_has_next_page_false_missing():
|
|
25
|
+
resp = make_response({})
|
|
26
|
+
assert gitlab_has_next_page(resp) is False
|
|
@@ -2,56 +2,136 @@ from pathlib import Path
|
|
|
2
2
|
|
|
3
3
|
import pytest
|
|
4
4
|
|
|
5
|
-
from ai_review.libs.config.prompt import PromptConfig
|
|
5
|
+
from ai_review.libs.config.prompt import PromptConfig, resolve_prompt_files, resolve_system_prompt_files
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
|
|
9
|
-
dummy_file = tmp_path / "dummy.md"
|
|
10
|
-
dummy_file.write_text("DUMMY")
|
|
11
|
-
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
|
|
12
|
-
|
|
13
|
-
config = PromptConfig()
|
|
14
|
-
result = config.inline_prompt_files_or_default
|
|
8
|
+
# ---------- resolve_prompt_files ----------
|
|
15
9
|
|
|
10
|
+
def test_resolve_prompt_files_returns_given_list(tmp_path: Path):
|
|
11
|
+
dummy_file = tmp_path / "file.md"
|
|
12
|
+
result = resolve_prompt_files([dummy_file], "default_inline.md")
|
|
16
13
|
assert result == [dummy_file]
|
|
17
|
-
assert config.load_inline() == ["DUMMY"]
|
|
18
14
|
|
|
19
15
|
|
|
20
|
-
def
|
|
21
|
-
dummy_file = tmp_path / "
|
|
22
|
-
dummy_file.write_text("
|
|
16
|
+
def test_resolve_prompt_files_loads_default_when_none(tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
|
|
17
|
+
dummy_file = tmp_path / "inline_default.md"
|
|
18
|
+
dummy_file.write_text("INLINE_DEFAULT")
|
|
23
19
|
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
|
|
24
20
|
|
|
25
|
-
|
|
26
|
-
result
|
|
21
|
+
result = resolve_prompt_files(None, "default_inline.md")
|
|
22
|
+
assert result == [dummy_file]
|
|
27
23
|
|
|
24
|
+
|
|
25
|
+
# ---------- resolve_system_prompt_files ----------
|
|
26
|
+
|
|
27
|
+
def test_resolve_system_prompt_files_none_returns_global(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
|
28
|
+
dummy_file = tmp_path / "sys.md"
|
|
29
|
+
dummy_file.write_text("SYS")
|
|
30
|
+
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
|
|
31
|
+
|
|
32
|
+
result = resolve_system_prompt_files(None, include=True, default_file="default_system_inline.md")
|
|
28
33
|
assert result == [dummy_file]
|
|
29
|
-
assert config.load_system_inline() == ["GLOBAL"]
|
|
30
34
|
|
|
31
35
|
|
|
32
|
-
def
|
|
36
|
+
def test_resolve_system_prompt_files_include_true(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
|
33
37
|
global_file = tmp_path / "global.md"
|
|
34
38
|
global_file.write_text("GLOBAL")
|
|
35
39
|
custom_file = tmp_path / "custom.md"
|
|
36
|
-
custom_file.write_text("CUSTOM")
|
|
37
40
|
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: global_file)
|
|
38
41
|
|
|
39
|
-
|
|
40
|
-
result
|
|
41
|
-
|
|
42
|
-
assert global_file in result and custom_file in result
|
|
43
|
-
assert config.load_system_inline() == ["GLOBAL", "CUSTOM"]
|
|
42
|
+
result = resolve_system_prompt_files([custom_file], include=True, default_file="default_system_inline.md")
|
|
43
|
+
assert result == [global_file, custom_file]
|
|
44
44
|
|
|
45
45
|
|
|
46
|
-
def
|
|
46
|
+
def test_resolve_system_prompt_files_include_false(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
|
47
47
|
global_file = tmp_path / "global.md"
|
|
48
48
|
global_file.write_text("GLOBAL")
|
|
49
49
|
custom_file = tmp_path / "custom.md"
|
|
50
|
-
custom_file.write_text("CUSTOM")
|
|
51
50
|
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: global_file)
|
|
52
51
|
|
|
53
|
-
|
|
54
|
-
result = config.system_inline_prompt_files_or_default
|
|
55
|
-
|
|
52
|
+
result = resolve_system_prompt_files([custom_file], include=False, default_file="default_system_inline.md")
|
|
56
53
|
assert result == [custom_file]
|
|
57
|
-
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
# ---------- Prompts ---------
|
|
57
|
+
|
|
58
|
+
def test_load_context_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
|
59
|
+
dummy_file = tmp_path / "context.md"
|
|
60
|
+
dummy_file.write_text("CTX")
|
|
61
|
+
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
|
|
62
|
+
|
|
63
|
+
config = PromptConfig()
|
|
64
|
+
assert config.context_prompt_files_or_default == [dummy_file]
|
|
65
|
+
assert config.load_context() == ["CTX"]
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def test_load_summary_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
|
69
|
+
dummy_file = tmp_path / "summary.md"
|
|
70
|
+
dummy_file.write_text("SUM")
|
|
71
|
+
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
|
|
72
|
+
|
|
73
|
+
config = PromptConfig()
|
|
74
|
+
assert config.summary_prompt_files_or_default == [dummy_file]
|
|
75
|
+
assert config.load_summary() == ["SUM"]
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def test_load_inline_reply_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
|
79
|
+
dummy_file = tmp_path / "inline_reply.md"
|
|
80
|
+
dummy_file.write_text("INL_R")
|
|
81
|
+
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
|
|
82
|
+
|
|
83
|
+
config = PromptConfig()
|
|
84
|
+
assert config.inline_reply_prompt_files_or_default == [dummy_file]
|
|
85
|
+
assert config.load_inline_reply() == ["INL_R"]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def test_load_summary_reply_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
|
89
|
+
dummy_file = tmp_path / "summary_reply.md"
|
|
90
|
+
dummy_file.write_text("SUM_R")
|
|
91
|
+
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
|
|
92
|
+
|
|
93
|
+
config = PromptConfig()
|
|
94
|
+
assert config.summary_reply_prompt_files_or_default == [dummy_file]
|
|
95
|
+
assert config.load_summary_reply() == ["SUM_R"]
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
# ---------- System Prompts ----------
|
|
99
|
+
|
|
100
|
+
def test_load_system_context_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
|
101
|
+
dummy_file = tmp_path / "sys_context.md"
|
|
102
|
+
dummy_file.write_text("SYS_CTX")
|
|
103
|
+
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
|
|
104
|
+
|
|
105
|
+
config = PromptConfig()
|
|
106
|
+
assert config.system_context_prompt_files_or_default == [dummy_file]
|
|
107
|
+
assert config.load_system_context() == ["SYS_CTX"]
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def test_load_system_summary_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
|
111
|
+
dummy_file = tmp_path / "sys_summary.md"
|
|
112
|
+
dummy_file.write_text("SYS_SUM")
|
|
113
|
+
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
|
|
114
|
+
|
|
115
|
+
config = PromptConfig()
|
|
116
|
+
assert config.system_summary_prompt_files_or_default == [dummy_file]
|
|
117
|
+
assert config.load_system_summary() == ["SYS_SUM"]
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def test_load_system_inline_reply_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
|
121
|
+
dummy_file = tmp_path / "sys_inline_reply.md"
|
|
122
|
+
dummy_file.write_text("SYS_IR")
|
|
123
|
+
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
|
|
124
|
+
|
|
125
|
+
config = PromptConfig()
|
|
126
|
+
assert config.system_inline_reply_prompt_files_or_default == [dummy_file]
|
|
127
|
+
assert config.load_system_inline_reply() == ["SYS_IR"]
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def test_load_system_summary_reply_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
|
131
|
+
dummy_file = tmp_path / "sys_summary_reply.md"
|
|
132
|
+
dummy_file.write_text("SYS_SR")
|
|
133
|
+
monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
|
|
134
|
+
|
|
135
|
+
config = PromptConfig()
|
|
136
|
+
assert config.system_summary_reply_prompt_files_or_default == [dummy_file]
|
|
137
|
+
assert config.load_system_summary_reply() == ["SYS_SR"]
|
|
File without changes
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from httpx import Response, Request
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
from ai_review.libs.http.paginate import paginate
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class DummySchema(BaseModel):
|
|
9
|
+
value: int
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def make_response(data: dict) -> Response:
|
|
13
|
+
return Response(
|
|
14
|
+
json=data,
|
|
15
|
+
request=Request("GET", "http://test"),
|
|
16
|
+
status_code=200,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@pytest.mark.asyncio
|
|
21
|
+
async def test_single_page():
|
|
22
|
+
async def fetch_page(_: int) -> Response:
|
|
23
|
+
return make_response({"items": [1, 2, 3]})
|
|
24
|
+
|
|
25
|
+
def extract_items(response: Response) -> list[DummySchema]:
|
|
26
|
+
return [DummySchema(value=value) for value in response.json()["items"]]
|
|
27
|
+
|
|
28
|
+
def has_next_page(_: Response) -> bool:
|
|
29
|
+
return False
|
|
30
|
+
|
|
31
|
+
items = await paginate(fetch_page, extract_items, has_next_page)
|
|
32
|
+
assert len(items) == 3
|
|
33
|
+
assert [item.value for item in items] == [1, 2, 3]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@pytest.mark.asyncio
|
|
37
|
+
async def test_multiple_pages():
|
|
38
|
+
async def fetch_page(page: int) -> Response:
|
|
39
|
+
return make_response({"items": [page]})
|
|
40
|
+
|
|
41
|
+
def extract_items(response: Response):
|
|
42
|
+
return [DummySchema(value=value) for value in response.json()["items"]]
|
|
43
|
+
|
|
44
|
+
def has_next_page(response: Response) -> bool:
|
|
45
|
+
return response.json()["items"][0] < 3
|
|
46
|
+
|
|
47
|
+
items = await paginate(fetch_page, extract_items, has_next_page)
|
|
48
|
+
assert [item.value for item in items] == [1, 2, 3]
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
@pytest.mark.asyncio
|
|
52
|
+
async def test_extract_items_error():
|
|
53
|
+
async def fetch_page(_: int) -> Response:
|
|
54
|
+
return make_response({"items": [1]})
|
|
55
|
+
|
|
56
|
+
def extract_items(_: Response):
|
|
57
|
+
raise ValueError("bad json")
|
|
58
|
+
|
|
59
|
+
def has_next_page(_: Response) -> bool:
|
|
60
|
+
return False
|
|
61
|
+
|
|
62
|
+
with pytest.raises(RuntimeError) as exc:
|
|
63
|
+
await paginate(fetch_page, extract_items, has_next_page)
|
|
64
|
+
assert "Failed to extract items" in str(exc.value)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
@pytest.mark.asyncio
|
|
68
|
+
async def test_max_pages_exceeded():
|
|
69
|
+
async def fetch_page(page: int) -> Response:
|
|
70
|
+
return make_response({"items": [page]})
|
|
71
|
+
|
|
72
|
+
def extract_items(response: Response):
|
|
73
|
+
return [DummySchema(value=value) for value in response.json()["items"]]
|
|
74
|
+
|
|
75
|
+
def has_next_page(_: Response) -> bool:
|
|
76
|
+
return True
|
|
77
|
+
|
|
78
|
+
with pytest.raises(RuntimeError) as exc:
|
|
79
|
+
await paginate(fetch_page, extract_items, has_next_page, max_pages=2)
|
|
80
|
+
assert "Pagination exceeded" in str(exc.value)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
@pytest.mark.asyncio
|
|
84
|
+
async def test_empty_items():
|
|
85
|
+
async def fetch_page(_: int) -> Response:
|
|
86
|
+
return make_response({"items": []})
|
|
87
|
+
|
|
88
|
+
def extract_items(_: Response):
|
|
89
|
+
return []
|
|
90
|
+
|
|
91
|
+
def has_next_page(_: Response) -> bool:
|
|
92
|
+
return False
|
|
93
|
+
|
|
94
|
+
result = await paginate(fetch_page, extract_items, has_next_page)
|
|
95
|
+
assert result == []
|
|
File without changes
|