xai-review 0.27.0__py3-none-any.whl → 0.28.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xai-review might be problematic. Click here for more details.

Files changed (147) hide show
  1. ai_review/cli/commands/run_inline_reply_review.py +7 -0
  2. ai_review/cli/commands/run_summary_reply_review.py +7 -0
  3. ai_review/cli/main.py +17 -0
  4. ai_review/clients/bitbucket/pr/schema/comments.py +14 -0
  5. ai_review/clients/bitbucket/pr/schema/pull_request.py +1 -5
  6. ai_review/clients/bitbucket/pr/schema/user.py +7 -0
  7. ai_review/clients/github/pr/client.py +35 -4
  8. ai_review/clients/github/pr/schema/comments.py +21 -0
  9. ai_review/clients/github/pr/schema/pull_request.py +1 -4
  10. ai_review/clients/github/pr/schema/user.py +6 -0
  11. ai_review/clients/github/pr/types.py +11 -1
  12. ai_review/clients/gitlab/mr/client.py +32 -1
  13. ai_review/clients/gitlab/mr/schema/changes.py +1 -5
  14. ai_review/clients/gitlab/mr/schema/discussions.py +17 -7
  15. ai_review/clients/gitlab/mr/schema/notes.py +3 -0
  16. ai_review/clients/gitlab/mr/schema/user.py +7 -0
  17. ai_review/clients/gitlab/mr/types.py +16 -7
  18. ai_review/libs/config/prompt.py +96 -64
  19. ai_review/libs/config/review.py +2 -0
  20. ai_review/libs/llm/output_json_parser.py +60 -0
  21. ai_review/prompts/default_inline_reply.md +10 -0
  22. ai_review/prompts/default_summary_reply.md +14 -0
  23. ai_review/prompts/default_system_inline_reply.md +31 -0
  24. ai_review/prompts/default_system_summary_reply.md +13 -0
  25. ai_review/services/artifacts/schema.py +2 -2
  26. ai_review/services/hook/constants.py +14 -0
  27. ai_review/services/hook/service.py +95 -4
  28. ai_review/services/hook/types.py +18 -2
  29. ai_review/services/prompt/adapter.py +1 -1
  30. ai_review/services/prompt/service.py +49 -3
  31. ai_review/services/prompt/tools.py +21 -0
  32. ai_review/services/prompt/types.py +23 -0
  33. ai_review/services/review/gateway/comment.py +45 -6
  34. ai_review/services/review/gateway/llm.py +2 -1
  35. ai_review/services/review/gateway/types.py +50 -0
  36. ai_review/services/review/internal/inline/service.py +40 -0
  37. ai_review/services/review/internal/inline/types.py +8 -0
  38. ai_review/services/review/internal/inline_reply/schema.py +23 -0
  39. ai_review/services/review/internal/inline_reply/service.py +20 -0
  40. ai_review/services/review/internal/inline_reply/types.py +8 -0
  41. ai_review/services/review/{policy → internal/policy}/service.py +2 -1
  42. ai_review/services/review/internal/policy/types.py +15 -0
  43. ai_review/services/review/{summary → internal/summary}/service.py +2 -2
  44. ai_review/services/review/{summary → internal/summary}/types.py +1 -1
  45. ai_review/services/review/internal/summary_reply/__init__.py +0 -0
  46. ai_review/services/review/internal/summary_reply/schema.py +8 -0
  47. ai_review/services/review/internal/summary_reply/service.py +15 -0
  48. ai_review/services/review/internal/summary_reply/types.py +8 -0
  49. ai_review/services/review/runner/__init__.py +0 -0
  50. ai_review/services/review/runner/context.py +72 -0
  51. ai_review/services/review/runner/inline.py +80 -0
  52. ai_review/services/review/runner/inline_reply.py +80 -0
  53. ai_review/services/review/runner/summary.py +71 -0
  54. ai_review/services/review/runner/summary_reply.py +79 -0
  55. ai_review/services/review/runner/types.py +6 -0
  56. ai_review/services/review/service.py +78 -110
  57. ai_review/services/vcs/bitbucket/adapter.py +24 -0
  58. ai_review/services/vcs/bitbucket/client.py +107 -42
  59. ai_review/services/vcs/github/adapter.py +35 -0
  60. ai_review/services/vcs/github/client.py +105 -44
  61. ai_review/services/vcs/gitlab/adapter.py +26 -0
  62. ai_review/services/vcs/gitlab/client.py +91 -38
  63. ai_review/services/vcs/types.py +34 -0
  64. ai_review/tests/fixtures/clients/bitbucket.py +2 -2
  65. ai_review/tests/fixtures/clients/github.py +35 -6
  66. ai_review/tests/fixtures/clients/gitlab.py +42 -3
  67. ai_review/tests/fixtures/libs/__init__.py +0 -0
  68. ai_review/tests/fixtures/libs/llm/__init__.py +0 -0
  69. ai_review/tests/fixtures/libs/llm/output_json_parser.py +13 -0
  70. ai_review/tests/fixtures/services/hook.py +8 -0
  71. ai_review/tests/fixtures/services/llm.py +8 -5
  72. ai_review/tests/fixtures/services/prompt.py +70 -0
  73. ai_review/tests/fixtures/services/review/base.py +41 -0
  74. ai_review/tests/fixtures/services/review/gateway/__init__.py +0 -0
  75. ai_review/tests/fixtures/services/review/gateway/comment.py +98 -0
  76. ai_review/tests/fixtures/services/review/gateway/llm.py +17 -0
  77. ai_review/tests/fixtures/services/review/internal/__init__.py +0 -0
  78. ai_review/tests/fixtures/services/review/{inline.py → internal/inline.py} +8 -6
  79. ai_review/tests/fixtures/services/review/internal/inline_reply.py +25 -0
  80. ai_review/tests/fixtures/services/review/internal/policy.py +28 -0
  81. ai_review/tests/fixtures/services/review/internal/summary.py +21 -0
  82. ai_review/tests/fixtures/services/review/internal/summary_reply.py +19 -0
  83. ai_review/tests/fixtures/services/review/runner/__init__.py +0 -0
  84. ai_review/tests/fixtures/services/review/runner/context.py +50 -0
  85. ai_review/tests/fixtures/services/review/runner/inline.py +50 -0
  86. ai_review/tests/fixtures/services/review/runner/inline_reply.py +50 -0
  87. ai_review/tests/fixtures/services/review/runner/summary.py +50 -0
  88. ai_review/tests/fixtures/services/review/runner/summary_reply.py +50 -0
  89. ai_review/tests/fixtures/services/vcs.py +23 -0
  90. ai_review/tests/suites/cli/__init__.py +0 -0
  91. ai_review/tests/suites/cli/test_main.py +54 -0
  92. ai_review/tests/suites/libs/config/test_prompt.py +108 -28
  93. ai_review/tests/suites/libs/llm/__init__.py +0 -0
  94. ai_review/tests/suites/libs/llm/test_output_json_parser.py +155 -0
  95. ai_review/tests/suites/services/hook/test_service.py +88 -4
  96. ai_review/tests/suites/services/prompt/test_adapter.py +3 -3
  97. ai_review/tests/suites/services/prompt/test_service.py +102 -58
  98. ai_review/tests/suites/services/prompt/test_tools.py +86 -1
  99. ai_review/tests/suites/services/review/gateway/__init__.py +0 -0
  100. ai_review/tests/suites/services/review/gateway/test_comment.py +253 -0
  101. ai_review/tests/suites/services/review/gateway/test_llm.py +82 -0
  102. ai_review/tests/suites/services/review/internal/__init__.py +0 -0
  103. ai_review/tests/suites/services/review/internal/inline/__init__.py +0 -0
  104. ai_review/tests/suites/services/review/{inline → internal/inline}/test_schema.py +1 -1
  105. ai_review/tests/suites/services/review/internal/inline/test_service.py +81 -0
  106. ai_review/tests/suites/services/review/internal/inline_reply/__init__.py +0 -0
  107. ai_review/tests/suites/services/review/internal/inline_reply/test_schema.py +57 -0
  108. ai_review/tests/suites/services/review/internal/inline_reply/test_service.py +72 -0
  109. ai_review/tests/suites/services/review/internal/policy/__init__.py +0 -0
  110. ai_review/tests/suites/services/review/{policy → internal/policy}/test_service.py +1 -1
  111. ai_review/tests/suites/services/review/internal/summary/__init__.py +0 -0
  112. ai_review/tests/suites/services/review/{summary → internal/summary}/test_schema.py +1 -1
  113. ai_review/tests/suites/services/review/{summary → internal/summary}/test_service.py +2 -2
  114. ai_review/tests/suites/services/review/internal/summary_reply/__init__.py +0 -0
  115. ai_review/tests/suites/services/review/internal/summary_reply/test_schema.py +19 -0
  116. ai_review/tests/suites/services/review/internal/summary_reply/test_service.py +21 -0
  117. ai_review/tests/suites/services/review/runner/__init__.py +0 -0
  118. ai_review/tests/suites/services/review/runner/test_context.py +89 -0
  119. ai_review/tests/suites/services/review/runner/test_inline.py +100 -0
  120. ai_review/tests/suites/services/review/runner/test_inline_reply.py +109 -0
  121. ai_review/tests/suites/services/review/runner/test_summary.py +87 -0
  122. ai_review/tests/suites/services/review/runner/test_summary_reply.py +97 -0
  123. ai_review/tests/suites/services/review/test_service.py +64 -97
  124. ai_review/tests/suites/services/vcs/bitbucket/test_adapter.py +109 -0
  125. ai_review/tests/suites/services/vcs/bitbucket/{test_service.py → test_client.py} +88 -1
  126. ai_review/tests/suites/services/vcs/github/test_adapter.py +162 -0
  127. ai_review/tests/suites/services/vcs/github/{test_service.py → test_client.py} +102 -2
  128. ai_review/tests/suites/services/vcs/gitlab/test_adapter.py +105 -0
  129. ai_review/tests/suites/services/vcs/gitlab/{test_service.py → test_client.py} +99 -1
  130. {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/METADATA +8 -5
  131. {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/RECORD +143 -70
  132. ai_review/services/review/inline/service.py +0 -54
  133. ai_review/services/review/inline/types.py +0 -11
  134. ai_review/tests/fixtures/services/review/summary.py +0 -19
  135. ai_review/tests/suites/services/review/inline/test_service.py +0 -107
  136. /ai_review/{services/review/inline → libs/llm}/__init__.py +0 -0
  137. /ai_review/services/review/{policy → internal}/__init__.py +0 -0
  138. /ai_review/services/review/{summary → internal/inline}/__init__.py +0 -0
  139. /ai_review/services/review/{inline → internal/inline}/schema.py +0 -0
  140. /ai_review/{tests/suites/services/review/inline → services/review/internal/inline_reply}/__init__.py +0 -0
  141. /ai_review/{tests/suites/services/review → services/review/internal}/policy/__init__.py +0 -0
  142. /ai_review/{tests/suites/services/review → services/review/internal}/summary/__init__.py +0 -0
  143. /ai_review/services/review/{summary → internal/summary}/schema.py +0 -0
  144. {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/WHEEL +0 -0
  145. {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/entry_points.txt +0 -0
  146. {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/licenses/LICENSE +0 -0
  147. {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/top_level.txt +0 -0
@@ -5,41 +5,13 @@ from ai_review.libs.config.prompt import PromptConfig
5
5
  from ai_review.services.diff.schema import DiffFileSchema
6
6
  from ai_review.services.prompt.schema import PromptContextSchema
7
7
  from ai_review.services.prompt.service import PromptService
8
+ from ai_review.services.vcs.types import ReviewThreadSchema, ThreadKind, ReviewCommentSchema
8
9
 
9
10
 
10
- @pytest.fixture(autouse=True)
11
- def patch_prompts(monkeypatch: pytest.MonkeyPatch) -> None:
12
- """Patch methods of settings.prompt to return dummy values."""
13
- monkeypatch.setattr(PromptConfig, "load_inline", lambda self: ["GLOBAL_INLINE", "INLINE_PROMPT"])
14
- monkeypatch.setattr(PromptConfig, "load_context", lambda self: ["GLOBAL_CONTEXT", "CONTEXT_PROMPT"])
15
- monkeypatch.setattr(PromptConfig, "load_summary", lambda self: ["GLOBAL_SUMMARY", "SUMMARY_PROMPT"])
16
- monkeypatch.setattr(PromptConfig, "load_system_inline", lambda self: ["SYS_INLINE_A", "SYS_INLINE_B"])
17
- monkeypatch.setattr(PromptConfig, "load_system_context", lambda self: ["SYS_CONTEXT_A", "SYS_CONTEXT_B"])
18
- monkeypatch.setattr(PromptConfig, "load_system_summary", lambda self: ["SYS_SUMMARY_A", "SYS_SUMMARY_B"])
19
-
20
-
21
- @pytest.fixture
22
- def dummy_context() -> PromptContextSchema:
23
- """Builds a context object that reflects the new unified review schema."""
24
- return PromptContextSchema(
25
- review_title="Fix login bug",
26
- review_description="Some description",
27
- review_author_name="Nikita",
28
- review_author_username="nikita.filonov",
29
- review_reviewers=["Alice", "Bob"],
30
- review_reviewers_usernames=["alice", "bob"],
31
- review_assignees=["Charlie"],
32
- review_assignees_usernames=["charlie"],
33
- source_branch="feature/login-fix",
34
- target_branch="main",
35
- labels=["bug", "critical"],
36
- changed_files=["foo.py", "bar.py"],
37
- )
38
-
39
-
40
- def test_build_inline_request_includes_prompts_and_diff(dummy_context: PromptContextSchema) -> None:
11
+ @pytest.mark.usefixtures("fake_prompts")
12
+ def test_build_inline_request_includes_prompts_and_diff(fake_prompt_context: PromptContextSchema) -> None:
41
13
  diff = DiffFileSchema(file="foo.py", diff="+ added line\n- removed line")
42
- result = PromptService.build_inline_request(diff, dummy_context)
14
+ result = PromptService.build_inline_request(diff, fake_prompt_context)
43
15
 
44
16
  assert "GLOBAL_INLINE" in result
45
17
  assert "INLINE_PROMPT" in result
@@ -48,12 +20,13 @@ def test_build_inline_request_includes_prompts_and_diff(dummy_context: PromptCon
48
20
  assert "- removed line" in result
49
21
 
50
22
 
51
- def test_build_summary_request_includes_prompts_and_diffs(dummy_context: PromptContextSchema) -> None:
23
+ @pytest.mark.usefixtures("fake_prompts")
24
+ def test_build_summary_request_includes_prompts_and_diffs(fake_prompt_context: PromptContextSchema) -> None:
52
25
  diffs = [
53
26
  DiffFileSchema(file="a.py", diff="+ foo"),
54
27
  DiffFileSchema(file="b.py", diff="- bar"),
55
28
  ]
56
- result = PromptService.build_summary_request(diffs, dummy_context)
29
+ result = PromptService.build_summary_request(diffs, fake_prompt_context)
57
30
 
58
31
  assert "GLOBAL_SUMMARY" in result
59
32
  assert "SUMMARY_PROMPT" in result
@@ -63,9 +36,10 @@ def test_build_summary_request_includes_prompts_and_diffs(dummy_context: PromptC
63
36
  assert "- bar" in result
64
37
 
65
38
 
66
- def test_build_summary_request_empty_list(dummy_context: PromptContextSchema) -> None:
39
+ @pytest.mark.usefixtures("fake_prompts")
40
+ def test_build_summary_request_empty_list(fake_prompt_context: PromptContextSchema) -> None:
67
41
  """Empty diffs list should still produce valid prompt with no diff content."""
68
- result = PromptService.build_summary_request([], dummy_context)
42
+ result = PromptService.build_summary_request([], fake_prompt_context)
69
43
 
70
44
  assert "GLOBAL_SUMMARY" in result
71
45
  assert "SUMMARY_PROMPT" in result
@@ -73,12 +47,13 @@ def test_build_summary_request_empty_list(dummy_context: PromptContextSchema) ->
73
47
  assert result.strip().endswith("## Changes")
74
48
 
75
49
 
76
- def test_build_context_request_includes_prompts_and_diffs(dummy_context: PromptContextSchema) -> None:
50
+ @pytest.mark.usefixtures("fake_prompts")
51
+ def test_build_context_request_includes_prompts_and_diffs(fake_prompt_context: PromptContextSchema) -> None:
77
52
  diffs = [
78
53
  DiffFileSchema(file="a.py", diff="+ foo"),
79
54
  DiffFileSchema(file="b.py", diff="- bar"),
80
55
  ]
81
- result = PromptService.build_context_request(diffs, dummy_context)
56
+ result = PromptService.build_context_request(diffs, fake_prompt_context)
82
57
 
83
58
  assert "GLOBAL_CONTEXT" in result
84
59
  assert "CONTEXT_PROMPT" in result
@@ -88,84 +63,153 @@ def test_build_context_request_includes_prompts_and_diffs(dummy_context: PromptC
88
63
  assert "- bar" in result
89
64
 
90
65
 
91
- def test_build_system_inline_request_returns_joined_prompts(dummy_context: PromptContextSchema) -> None:
92
- result = PromptService.build_system_inline_request(dummy_context)
66
+ @pytest.mark.usefixtures("fake_prompts")
67
+ def test_build_system_inline_request_returns_joined_prompts(fake_prompt_context: PromptContextSchema) -> None:
68
+ result = PromptService.build_system_inline_request(fake_prompt_context)
93
69
  assert result == "SYS_INLINE_A\n\nSYS_INLINE_B".replace("SYS_INLINE_A", "SYS_INLINE_A")
94
70
 
95
71
 
96
- def test_build_system_context_request_returns_joined_prompts(dummy_context: PromptContextSchema) -> None:
97
- result = PromptService.build_system_context_request(dummy_context)
72
+ @pytest.mark.usefixtures("fake_prompts")
73
+ def test_build_system_context_request_returns_joined_prompts(fake_prompt_context: PromptContextSchema) -> None:
74
+ result = PromptService.build_system_context_request(fake_prompt_context)
98
75
  assert result == "SYS_CONTEXT_A\n\nSYS_CONTEXT_B"
99
76
 
100
77
 
101
- def test_build_system_summary_request_returns_joined_prompts(dummy_context: PromptContextSchema) -> None:
102
- result = PromptService.build_system_summary_request(dummy_context)
78
+ @pytest.mark.usefixtures("fake_prompts")
79
+ def test_build_system_summary_request_returns_joined_prompts(fake_prompt_context: PromptContextSchema) -> None:
80
+ result = PromptService.build_system_summary_request(fake_prompt_context)
103
81
  assert result == "SYS_SUMMARY_A\n\nSYS_SUMMARY_B"
104
82
 
105
83
 
84
+ @pytest.mark.usefixtures("fake_prompts")
106
85
  def test_build_system_inline_request_empty(
107
86
  monkeypatch: pytest.MonkeyPatch,
108
- dummy_context: PromptContextSchema
87
+ fake_prompt_context: PromptContextSchema
109
88
  ) -> None:
110
89
  monkeypatch.setattr(PromptConfig, "load_system_inline", lambda self: [])
111
- result = PromptService.build_system_inline_request(dummy_context)
90
+ result = PromptService.build_system_inline_request(fake_prompt_context)
112
91
  assert result == ""
113
92
 
114
93
 
94
+ @pytest.mark.usefixtures("fake_prompts")
115
95
  def test_build_system_context_request_empty(
116
96
  monkeypatch: pytest.MonkeyPatch,
117
- dummy_context: PromptContextSchema
97
+ fake_prompt_context: PromptContextSchema
118
98
  ) -> None:
119
99
  monkeypatch.setattr(PromptConfig, "load_system_context", lambda self: [])
120
- result = PromptService.build_system_context_request(dummy_context)
100
+ result = PromptService.build_system_context_request(fake_prompt_context)
121
101
  assert result == ""
122
102
 
123
103
 
104
+ @pytest.mark.usefixtures("fake_prompts")
124
105
  def test_build_system_summary_request_empty(
125
106
  monkeypatch: pytest.MonkeyPatch,
126
- dummy_context: PromptContextSchema
107
+ fake_prompt_context: PromptContextSchema
127
108
  ) -> None:
128
109
  monkeypatch.setattr(PromptConfig, "load_system_summary", lambda self: [])
129
- result = PromptService.build_system_summary_request(dummy_context)
110
+ result = PromptService.build_system_summary_request(fake_prompt_context)
130
111
  assert result == ""
131
112
 
132
113
 
133
- def test_diff_placeholders_are_not_replaced(dummy_context: PromptContextSchema) -> None:
114
+ @pytest.mark.usefixtures("fake_prompts")
115
+ def test_diff_placeholders_are_not_replaced(fake_prompt_context: PromptContextSchema) -> None:
134
116
  diffs = [DiffFileSchema(file="x.py", diff='print("<<review_title>>")')]
135
- result = PromptService.build_summary_request(diffs, dummy_context)
117
+ result = PromptService.build_summary_request(diffs, fake_prompt_context)
136
118
 
137
119
  assert "<<review_title>>" in result
138
120
  assert "Fix login bug" not in result
139
121
 
140
122
 
141
- def test_prepare_prompt_basic_substitution(dummy_context: PromptContextSchema) -> None:
123
+ @pytest.mark.usefixtures("fake_prompts")
124
+ def test_prepare_prompt_basic_substitution(fake_prompt_context: PromptContextSchema) -> None:
142
125
  prompts = ["Hello", "MR title: <<review_title>>"]
143
- result = PromptService.prepare_prompt(prompts, dummy_context)
126
+ result = PromptService.prepare_prompt(prompts, fake_prompt_context)
144
127
 
145
128
  assert "Hello" in result
146
129
  assert "MR title: Fix login bug" in result
147
130
 
148
131
 
132
+ @pytest.mark.usefixtures("fake_prompts")
149
133
  def test_prepare_prompt_applies_normalization(
150
134
  monkeypatch: pytest.MonkeyPatch,
151
- dummy_context: PromptContextSchema
135
+ fake_prompt_context: PromptContextSchema
152
136
  ) -> None:
153
137
  monkeypatch.setattr(settings.prompt, "normalize_prompts", True)
154
138
  prompts = ["Line with space ", "", "", "Next line"]
155
- result = PromptService.prepare_prompt(prompts, dummy_context)
139
+ result = PromptService.prepare_prompt(prompts, fake_prompt_context)
156
140
 
157
141
  assert "Line with space" in result
158
142
  assert "Next line" in result
159
143
  assert "\n\n\n" not in result
160
144
 
161
145
 
146
+ @pytest.mark.usefixtures("fake_prompts")
162
147
  def test_prepare_prompt_skips_normalization(
163
148
  monkeypatch: pytest.MonkeyPatch,
164
- dummy_context: PromptContextSchema
149
+ fake_prompt_context: PromptContextSchema
165
150
  ) -> None:
166
151
  monkeypatch.setattr(settings.prompt, "normalize_prompts", False)
167
152
  prompts = ["Line with space ", "", "", "Next line"]
168
- result = PromptService.prepare_prompt(prompts, dummy_context)
153
+ result = PromptService.prepare_prompt(prompts, fake_prompt_context)
169
154
 
170
155
  assert "Line with space " in result
171
156
  assert "\n\n\n" in result
157
+
158
+
159
+ @pytest.mark.usefixtures("fake_prompts")
160
+ def test_build_inline_reply_request_includes_conversation_and_diff(fake_prompt_context: PromptContextSchema) -> None:
161
+ diff = DiffFileSchema(file="foo.py", diff="+ added\n- removed")
162
+ thread = ReviewThreadSchema(
163
+ id="t1",
164
+ kind=ThreadKind.INLINE,
165
+ file="foo.py",
166
+ line=10,
167
+ comments=[
168
+ ReviewCommentSchema(id=1, body="Initial comment"),
169
+ ReviewCommentSchema(id=2, body="Follow-up"),
170
+ ],
171
+ )
172
+
173
+ result = PromptService.build_inline_reply_request(diff, thread, fake_prompt_context)
174
+
175
+ assert "INLINE_REPLY_A" in result
176
+ assert "INLINE_REPLY_B" in result
177
+ assert "## Conversation" in result
178
+ assert "Initial comment" in result
179
+ assert "Follow-up" in result
180
+ assert "## Diff" in result
181
+ assert "# File: foo.py" in result
182
+ assert "+ added" in result
183
+
184
+
185
+ @pytest.mark.usefixtures("fake_prompts")
186
+ def test_build_summary_reply_request_includes_conversation_and_changes(
187
+ fake_prompt_context: PromptContextSchema
188
+ ) -> None:
189
+ diffs = [DiffFileSchema(file="a.py", diff="+ foo")]
190
+ thread = ReviewThreadSchema(
191
+ id="t2",
192
+ kind=ThreadKind.SUMMARY,
193
+ comments=[ReviewCommentSchema(id=1, body="Overall feedback")],
194
+ )
195
+
196
+ result = PromptService.build_summary_reply_request(diffs, thread, fake_prompt_context)
197
+
198
+ assert "SUMMARY_REPLY_A" in result
199
+ assert "SUMMARY_REPLY_B" in result
200
+ assert "## Conversation" in result
201
+ assert "Overall feedback" in result
202
+ assert "## Changes" in result
203
+ assert "+ foo" in result
204
+
205
+
206
+ @pytest.mark.usefixtures("fake_prompts")
207
+ def test_build_system_inline_reply_request_returns_joined_prompts(fake_prompt_context: PromptContextSchema) -> None:
208
+ result = PromptService.build_system_inline_reply_request(fake_prompt_context)
209
+ assert result == "SYS_INLINE_REPLY_A\n\nSYS_INLINE_REPLY_B"
210
+
211
+
212
+ @pytest.mark.usefixtures("fake_prompts")
213
+ def test_build_system_summary_reply_request_returns_joined_prompts(fake_prompt_context: PromptContextSchema) -> None:
214
+ result = PromptService.build_system_summary_reply_request(fake_prompt_context)
215
+ assert result == "SYS_SUMMARY_REPLY_A\n\nSYS_SUMMARY_REPLY_B"
@@ -1,7 +1,10 @@
1
1
  from ai_review.services.diff.schema import DiffFileSchema
2
- from ai_review.services.prompt.tools import format_file, normalize_prompt
2
+ from ai_review.services.prompt.tools import format_file, normalize_prompt, format_files, format_thread
3
+ from ai_review.services.vcs.types import ReviewThreadSchema, ReviewCommentSchema, UserSchema, ThreadKind
3
4
 
4
5
 
6
+ # ---------- format_file ----------
7
+
5
8
  def test_format_file_basic():
6
9
  diff = DiffFileSchema(file="main.py", diff="+ print('hello')")
7
10
  result = format_file(diff)
@@ -36,6 +39,88 @@ def test_format_file_filename_with_path():
36
39
  assert result.endswith("+ class User:\n")
37
40
 
38
41
 
42
+ def test_format_file_handles_whitespace_filename():
43
+ diff = DiffFileSchema(file=" spaced.py ", diff="+ print('x')")
44
+ result = format_file(diff)
45
+ assert "# File: spaced.py " in result
46
+
47
+
48
+ # ---------- format_files ----------
49
+
50
+ def test_format_files_combines_multiple_diffs():
51
+ diffs = [
52
+ DiffFileSchema(file="a.py", diff="+ foo"),
53
+ DiffFileSchema(file="b.py", diff="- bar"),
54
+ ]
55
+ result = format_files(diffs)
56
+
57
+ assert "# File: a.py" in result
58
+ assert "# File: b.py" in result
59
+ assert "+ foo" in result
60
+ assert "- bar" in result
61
+ assert "\n\n# File: b.py" in result
62
+
63
+
64
+ def test_format_files_empty_list():
65
+ result = format_files([])
66
+ assert result == ""
67
+
68
+
69
+ # ---------- format_thread ----------
70
+
71
+ def test_format_thread_with_multiple_comments():
72
+ thread = ReviewThreadSchema(
73
+ id="t1",
74
+ kind=ThreadKind.INLINE,
75
+ comments=[
76
+ ReviewCommentSchema(
77
+ id=1, body="Looks good", author=UserSchema(name="Alice")
78
+ ),
79
+ ReviewCommentSchema(
80
+ id=2, body="Maybe refactor?", author=UserSchema(username="bob")
81
+ ),
82
+ ],
83
+ )
84
+ result = format_thread(thread)
85
+
86
+ assert "- Alice: Looks good" in result
87
+ assert "- bob: Maybe refactor?" in result
88
+ assert "\n\n- bob" in result
89
+
90
+
91
+ def test_format_thread_ignores_empty_bodies():
92
+ thread = ReviewThreadSchema(
93
+ id="t2",
94
+ kind=ThreadKind.SUMMARY,
95
+ comments=[
96
+ ReviewCommentSchema(id=1, body="", author=UserSchema(name="Alice")),
97
+ ReviewCommentSchema(id=2, body="", author=UserSchema(username="bob")),
98
+ ],
99
+ )
100
+ result = format_thread(thread)
101
+ assert result == "No comments in thread." or result == ""
102
+
103
+
104
+ def test_format_thread_handles_empty_comments_list():
105
+ thread = ReviewThreadSchema(id="t3", kind=ThreadKind.SUMMARY, comments=[])
106
+ result = format_thread(thread)
107
+ assert result == "No comments in thread."
108
+
109
+
110
+ def test_format_thread_fallback_to_user_when_no_name_or_username():
111
+ thread = ReviewThreadSchema(
112
+ id="t4",
113
+ kind=ThreadKind.INLINE,
114
+ comments=[
115
+ ReviewCommentSchema(id=1, body="Anon feedback", author=UserSchema())
116
+ ],
117
+ )
118
+ result = format_thread(thread)
119
+ assert "- User: Anon feedback" in result
120
+
121
+
122
+ # ---------- normalize_prompt ----------
123
+
39
124
  def test_trailing_spaces_are_removed():
40
125
  text = "hello \nworld\t\t"
41
126
  result = normalize_prompt(text)
@@ -0,0 +1,253 @@
1
+ import pytest
2
+
3
+ from ai_review.config import settings
4
+ from ai_review.services.review.gateway.comment import ReviewCommentGateway
5
+ from ai_review.services.review.internal.inline.schema import InlineCommentSchema, InlineCommentListSchema
6
+ from ai_review.services.review.internal.inline_reply.schema import InlineCommentReplySchema
7
+ from ai_review.services.review.internal.summary.schema import SummaryCommentSchema
8
+ from ai_review.services.review.internal.summary_reply.schema import SummaryCommentReplySchema
9
+ from ai_review.services.vcs.types import ReviewThreadSchema, ReviewCommentSchema, ThreadKind
10
+ from ai_review.tests.fixtures.services.vcs import FakeVCSClient
11
+
12
+
13
+ # === INLINE THREADS ===
14
+
15
+ @pytest.mark.asyncio
16
+ async def test_get_inline_threads_filters_by_tag(
17
+ fake_vcs_client: FakeVCSClient,
18
+ review_comment_gateway: ReviewCommentGateway,
19
+ ):
20
+ """Should return only threads containing AI inline tags."""
21
+ threads = [
22
+ ReviewThreadSchema(
23
+ id="1",
24
+ kind=ThreadKind.INLINE,
25
+ file="a.py",
26
+ comments=[ReviewCommentSchema(id="1", body=f"Hello {settings.review.inline_reply_tag}")]
27
+ ),
28
+ ReviewThreadSchema(
29
+ id="2",
30
+ kind=ThreadKind.INLINE,
31
+ file="b.py",
32
+ comments=[ReviewCommentSchema(id="2", body="No AI tag here")]
33
+ ),
34
+ ]
35
+ fake_vcs_client.responses["get_inline_threads"] = threads
36
+
37
+ result = await review_comment_gateway.get_inline_threads()
38
+
39
+ assert len(result) == 1
40
+ assert result[0].id == "1"
41
+ assert any(call[0] == "get_inline_threads" for call in fake_vcs_client.calls)
42
+
43
+
44
+ @pytest.mark.asyncio
45
+ async def test_get_summary_threads_filters_by_tag(
46
+ fake_vcs_client: FakeVCSClient,
47
+ review_comment_gateway: ReviewCommentGateway,
48
+ ):
49
+ """Should return only threads containing AI summary tags."""
50
+ threads = [
51
+ ReviewThreadSchema(
52
+ id="10",
53
+ kind=ThreadKind.SUMMARY,
54
+ comments=[ReviewCommentSchema(id="1", body=f"AI {settings.review.summary_reply_tag}")]
55
+ ),
56
+ ReviewThreadSchema(
57
+ id="11",
58
+ kind=ThreadKind.SUMMARY,
59
+ comments=[ReviewCommentSchema(id="2", body="No tags here")]
60
+ ),
61
+ ]
62
+ fake_vcs_client.responses["get_general_threads"] = threads
63
+
64
+ result = await review_comment_gateway.get_summary_threads()
65
+
66
+ assert len(result) == 1
67
+ assert result[0].id == "10"
68
+ assert any(call[0] == "get_general_threads" for call in fake_vcs_client.calls)
69
+
70
+
71
+ # === EXISTING COMMENTS ===
72
+
73
+ @pytest.mark.asyncio
74
+ async def test_has_existing_inline_comments_true(
75
+ capsys,
76
+ fake_vcs_client: FakeVCSClient,
77
+ review_comment_gateway: ReviewCommentGateway,
78
+ ):
79
+ """Should detect existing inline comments and log skip message."""
80
+ fake_vcs_client.responses["get_inline_comments"] = [
81
+ ReviewCommentSchema(id="1", body=f"{settings.review.inline_tag} existing comment")
82
+ ]
83
+
84
+ result = await review_comment_gateway.has_existing_inline_comments()
85
+ output = capsys.readouterr().out
86
+
87
+ assert result is True
88
+ assert "AI inline comments already exist" in output
89
+
90
+
91
+ @pytest.mark.asyncio
92
+ async def test_has_existing_summary_comments_false(
93
+ fake_vcs_client: FakeVCSClient,
94
+ review_comment_gateway: ReviewCommentGateway,
95
+ ):
96
+ """Should return False when no summary AI comments exist."""
97
+ fake_vcs_client.responses["get_general_comments"] = [
98
+ ReviewCommentSchema(id="1", body="Regular comment")
99
+ ]
100
+ result = await review_comment_gateway.has_existing_summary_comments()
101
+ assert result is False
102
+
103
+
104
+ # === INLINE REPLY ===
105
+
106
+ @pytest.mark.asyncio
107
+ async def test_process_inline_reply_happy_path(
108
+ fake_vcs_client: FakeVCSClient,
109
+ review_comment_gateway: ReviewCommentGateway,
110
+ ):
111
+ """Should create inline reply and emit hook events."""
112
+ reply = InlineCommentReplySchema(message="AI reply text")
113
+
114
+ await review_comment_gateway.process_inline_reply("t1", reply)
115
+
116
+ assert any(call[0] == "create_inline_reply" for call in fake_vcs_client.calls)
117
+
118
+
119
+ @pytest.mark.asyncio
120
+ async def test_process_inline_reply_error(
121
+ capsys,
122
+ fake_vcs_client: FakeVCSClient,
123
+ review_comment_gateway: ReviewCommentGateway,
124
+ ):
125
+ """Should log and emit error if VCS fails to create reply."""
126
+
127
+ async def failing_create_inline_reply(thread_id: str, body: str):
128
+ raise RuntimeError("API error")
129
+
130
+ fake_vcs_client.create_inline_reply = failing_create_inline_reply
131
+
132
+ reply = InlineCommentReplySchema(message="AI reply text")
133
+ await review_comment_gateway.process_inline_reply("t1", reply)
134
+ output = capsys.readouterr().out
135
+
136
+ assert "Failed to create inline reply" in output
137
+
138
+
139
+ # === SUMMARY REPLY ===
140
+
141
+ @pytest.mark.asyncio
142
+ async def test_process_summary_reply_success(
143
+ fake_vcs_client: FakeVCSClient,
144
+ review_comment_gateway: ReviewCommentGateway,
145
+ ):
146
+ """Should create summary reply comment."""
147
+ reply = SummaryCommentReplySchema(text="AI summary reply")
148
+ await review_comment_gateway.process_summary_reply("t42", reply)
149
+ assert any(call[0] == "create_summary_reply" for call in fake_vcs_client.calls)
150
+
151
+
152
+ @pytest.mark.asyncio
153
+ async def test_process_summary_reply_error(
154
+ capsys,
155
+ fake_vcs_client: FakeVCSClient,
156
+ review_comment_gateway: ReviewCommentGateway,
157
+ ):
158
+ """Should log and emit error on exception in summary reply."""
159
+
160
+ async def failing_create_summary_reply(thread_id: str, body: str):
161
+ raise RuntimeError("Network fail")
162
+
163
+ fake_vcs_client.create_summary_reply = failing_create_summary_reply
164
+
165
+ reply = SummaryCommentReplySchema(text="AI summary reply")
166
+ await review_comment_gateway.process_summary_reply("t42", reply)
167
+ output = capsys.readouterr().out
168
+
169
+ assert "Failed to create summary reply" in output
170
+
171
+
172
+ # === INLINE COMMENT ===
173
+
174
+ @pytest.mark.asyncio
175
+ async def test_process_inline_comment_happy_path(
176
+ fake_vcs_client: FakeVCSClient,
177
+ review_comment_gateway: ReviewCommentGateway,
178
+ ):
179
+ """Should create inline comment via VCS."""
180
+ comment = InlineCommentSchema(file="f.py", line=1, message="AI inline comment")
181
+ await review_comment_gateway.process_inline_comment(comment)
182
+ assert any(call[0] == "create_inline_comment" for call in fake_vcs_client.calls)
183
+
184
+
185
+ @pytest.mark.asyncio
186
+ async def test_process_inline_comment_error_fallback(
187
+ capsys,
188
+ fake_vcs_client: FakeVCSClient,
189
+ review_comment_gateway: ReviewCommentGateway,
190
+ ):
191
+ """Should fall back to summary comment when inline comment fails."""
192
+
193
+ async def failing_create_inline_comment(file: str, line: int, message: str):
194
+ raise RuntimeError("Failed to post inline")
195
+
196
+ fake_vcs_client.create_inline_comment = failing_create_inline_comment
197
+
198
+ comment = InlineCommentSchema(file="x.py", line=5, message="AI inline")
199
+ await review_comment_gateway.process_inline_comment(comment)
200
+ output = capsys.readouterr().out
201
+
202
+ assert "Falling back to general comment" in output
203
+ assert any(call[0] == "create_general_comment" for call in fake_vcs_client.calls)
204
+
205
+
206
+ # === SUMMARY COMMENT ===
207
+
208
+ @pytest.mark.asyncio
209
+ async def test_process_summary_comment_happy_path(
210
+ fake_vcs_client: FakeVCSClient,
211
+ review_comment_gateway: ReviewCommentGateway,
212
+ ):
213
+ """Should create general summary comment successfully."""
214
+ comment = SummaryCommentSchema(text="AI summary")
215
+ await review_comment_gateway.process_summary_comment(comment)
216
+ assert any(call[0] == "create_general_comment" for call in fake_vcs_client.calls)
217
+
218
+
219
+ @pytest.mark.asyncio
220
+ async def test_process_summary_comment_error(
221
+ capsys,
222
+ fake_vcs_client: FakeVCSClient,
223
+ review_comment_gateway: ReviewCommentGateway,
224
+ ):
225
+ """Should log error if summary comment creation fails."""
226
+
227
+ async def failing_create_general_comment(body: str):
228
+ raise RuntimeError("Backend down")
229
+
230
+ fake_vcs_client.create_general_comment = failing_create_general_comment
231
+
232
+ comment = SummaryCommentSchema(text="Broken")
233
+ await review_comment_gateway.process_summary_comment(comment)
234
+ output = capsys.readouterr().out
235
+
236
+ assert "Failed to process summary comment" in output
237
+
238
+
239
+ @pytest.mark.asyncio
240
+ async def test_process_inline_comments_calls_each(
241
+ fake_vcs_client: FakeVCSClient,
242
+ review_comment_gateway: ReviewCommentGateway,
243
+ ):
244
+ """Should process all inline comments concurrently."""
245
+ comments = InlineCommentListSchema(root=[
246
+ InlineCommentSchema(file="a.py", line=1, message="c1"),
247
+ InlineCommentSchema(file="b.py", line=2, message="c2"),
248
+ ])
249
+
250
+ await review_comment_gateway.process_inline_comments(comments)
251
+
252
+ created = [call for call in fake_vcs_client.calls if call[0] == "create_inline_comment"]
253
+ assert len(created) == 2