xai-review 0.26.0__py3-none-any.whl → 0.28.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xai-review might be problematic. Click here for more details.
- ai_review/cli/commands/run_inline_reply_review.py +7 -0
- ai_review/cli/commands/run_summary_reply_review.py +7 -0
- ai_review/cli/main.py +17 -0
- ai_review/clients/bitbucket/pr/client.py +45 -8
- ai_review/clients/bitbucket/pr/schema/comments.py +21 -2
- ai_review/clients/bitbucket/pr/schema/files.py +8 -3
- ai_review/clients/bitbucket/pr/schema/pull_request.py +1 -5
- ai_review/clients/bitbucket/pr/schema/user.py +7 -0
- ai_review/clients/bitbucket/tools.py +6 -0
- ai_review/clients/github/pr/client.py +98 -13
- ai_review/clients/github/pr/schema/comments.py +23 -1
- ai_review/clients/github/pr/schema/files.py +2 -1
- ai_review/clients/github/pr/schema/pull_request.py +1 -4
- ai_review/clients/github/pr/schema/reviews.py +2 -1
- ai_review/clients/github/pr/schema/user.py +6 -0
- ai_review/clients/github/pr/types.py +11 -1
- ai_review/clients/github/tools.py +6 -0
- ai_review/clients/gitlab/mr/client.py +67 -7
- ai_review/clients/gitlab/mr/schema/changes.py +1 -5
- ai_review/clients/gitlab/mr/schema/discussions.py +19 -8
- ai_review/clients/gitlab/mr/schema/notes.py +5 -1
- ai_review/clients/gitlab/mr/schema/user.py +7 -0
- ai_review/clients/gitlab/mr/types.py +16 -7
- ai_review/clients/gitlab/tools.py +5 -0
- ai_review/libs/config/prompt.py +96 -64
- ai_review/libs/config/review.py +2 -0
- ai_review/libs/config/vcs/base.py +2 -0
- ai_review/libs/config/vcs/pagination.py +6 -0
- ai_review/libs/http/paginate.py +43 -0
- ai_review/libs/llm/output_json_parser.py +60 -0
- ai_review/prompts/default_inline_reply.md +10 -0
- ai_review/prompts/default_summary_reply.md +14 -0
- ai_review/prompts/default_system_inline_reply.md +31 -0
- ai_review/prompts/default_system_summary_reply.md +13 -0
- ai_review/services/artifacts/schema.py +2 -2
- ai_review/services/hook/constants.py +14 -0
- ai_review/services/hook/service.py +95 -4
- ai_review/services/hook/types.py +18 -2
- ai_review/services/prompt/adapter.py +1 -1
- ai_review/services/prompt/service.py +49 -3
- ai_review/services/prompt/tools.py +21 -0
- ai_review/services/prompt/types.py +23 -0
- ai_review/services/review/gateway/comment.py +45 -6
- ai_review/services/review/gateway/llm.py +2 -1
- ai_review/services/review/gateway/types.py +50 -0
- ai_review/services/review/internal/inline/service.py +40 -0
- ai_review/services/review/internal/inline/types.py +8 -0
- ai_review/services/review/internal/inline_reply/schema.py +23 -0
- ai_review/services/review/internal/inline_reply/service.py +20 -0
- ai_review/services/review/internal/inline_reply/types.py +8 -0
- ai_review/services/review/{policy → internal/policy}/service.py +2 -1
- ai_review/services/review/internal/policy/types.py +15 -0
- ai_review/services/review/{summary → internal/summary}/service.py +2 -2
- ai_review/services/review/{summary → internal/summary}/types.py +1 -1
- ai_review/services/review/internal/summary_reply/__init__.py +0 -0
- ai_review/services/review/internal/summary_reply/schema.py +8 -0
- ai_review/services/review/internal/summary_reply/service.py +15 -0
- ai_review/services/review/internal/summary_reply/types.py +8 -0
- ai_review/services/review/runner/__init__.py +0 -0
- ai_review/services/review/runner/context.py +72 -0
- ai_review/services/review/runner/inline.py +80 -0
- ai_review/services/review/runner/inline_reply.py +80 -0
- ai_review/services/review/runner/summary.py +71 -0
- ai_review/services/review/runner/summary_reply.py +79 -0
- ai_review/services/review/runner/types.py +6 -0
- ai_review/services/review/service.py +78 -110
- ai_review/services/vcs/bitbucket/adapter.py +24 -0
- ai_review/services/vcs/bitbucket/client.py +107 -42
- ai_review/services/vcs/github/adapter.py +35 -0
- ai_review/services/vcs/github/client.py +105 -44
- ai_review/services/vcs/gitlab/adapter.py +26 -0
- ai_review/services/vcs/gitlab/client.py +91 -38
- ai_review/services/vcs/types.py +34 -0
- ai_review/tests/fixtures/clients/bitbucket.py +2 -2
- ai_review/tests/fixtures/clients/github.py +35 -6
- ai_review/tests/fixtures/clients/gitlab.py +42 -3
- ai_review/tests/fixtures/libs/__init__.py +0 -0
- ai_review/tests/fixtures/libs/llm/__init__.py +0 -0
- ai_review/tests/fixtures/libs/llm/output_json_parser.py +13 -0
- ai_review/tests/fixtures/services/hook.py +8 -0
- ai_review/tests/fixtures/services/llm.py +8 -5
- ai_review/tests/fixtures/services/prompt.py +70 -0
- ai_review/tests/fixtures/services/review/base.py +41 -0
- ai_review/tests/fixtures/services/review/gateway/__init__.py +0 -0
- ai_review/tests/fixtures/services/review/gateway/comment.py +98 -0
- ai_review/tests/fixtures/services/review/gateway/llm.py +17 -0
- ai_review/tests/fixtures/services/review/internal/__init__.py +0 -0
- ai_review/tests/fixtures/services/review/{inline.py → internal/inline.py} +8 -6
- ai_review/tests/fixtures/services/review/internal/inline_reply.py +25 -0
- ai_review/tests/fixtures/services/review/internal/policy.py +28 -0
- ai_review/tests/fixtures/services/review/internal/summary.py +21 -0
- ai_review/tests/fixtures/services/review/internal/summary_reply.py +19 -0
- ai_review/tests/fixtures/services/review/runner/__init__.py +0 -0
- ai_review/tests/fixtures/services/review/runner/context.py +50 -0
- ai_review/tests/fixtures/services/review/runner/inline.py +50 -0
- ai_review/tests/fixtures/services/review/runner/inline_reply.py +50 -0
- ai_review/tests/fixtures/services/review/runner/summary.py +50 -0
- ai_review/tests/fixtures/services/review/runner/summary_reply.py +50 -0
- ai_review/tests/fixtures/services/vcs.py +23 -0
- ai_review/tests/suites/cli/__init__.py +0 -0
- ai_review/tests/suites/cli/test_main.py +54 -0
- ai_review/tests/suites/clients/bitbucket/__init__.py +0 -0
- ai_review/tests/suites/clients/bitbucket/test_client.py +14 -0
- ai_review/tests/suites/clients/bitbucket/test_tools.py +31 -0
- ai_review/tests/suites/clients/github/test_tools.py +31 -0
- ai_review/tests/suites/clients/gitlab/test_tools.py +26 -0
- ai_review/tests/suites/libs/config/test_prompt.py +108 -28
- ai_review/tests/suites/libs/http/__init__.py +0 -0
- ai_review/tests/suites/libs/http/test_paginate.py +95 -0
- ai_review/tests/suites/libs/llm/__init__.py +0 -0
- ai_review/tests/suites/libs/llm/test_output_json_parser.py +155 -0
- ai_review/tests/suites/services/hook/test_service.py +88 -4
- ai_review/tests/suites/services/prompt/test_adapter.py +3 -3
- ai_review/tests/suites/services/prompt/test_service.py +102 -58
- ai_review/tests/suites/services/prompt/test_tools.py +86 -1
- ai_review/tests/suites/services/review/gateway/__init__.py +0 -0
- ai_review/tests/suites/services/review/gateway/test_comment.py +253 -0
- ai_review/tests/suites/services/review/gateway/test_llm.py +82 -0
- ai_review/tests/suites/services/review/internal/__init__.py +0 -0
- ai_review/tests/suites/services/review/internal/inline/__init__.py +0 -0
- ai_review/tests/suites/services/review/{inline → internal/inline}/test_schema.py +1 -1
- ai_review/tests/suites/services/review/internal/inline/test_service.py +81 -0
- ai_review/tests/suites/services/review/internal/inline_reply/__init__.py +0 -0
- ai_review/tests/suites/services/review/internal/inline_reply/test_schema.py +57 -0
- ai_review/tests/suites/services/review/internal/inline_reply/test_service.py +72 -0
- ai_review/tests/suites/services/review/internal/policy/__init__.py +0 -0
- ai_review/tests/suites/services/review/{policy → internal/policy}/test_service.py +1 -1
- ai_review/tests/suites/services/review/internal/summary/__init__.py +0 -0
- ai_review/tests/suites/services/review/{summary → internal/summary}/test_schema.py +1 -1
- ai_review/tests/suites/services/review/{summary → internal/summary}/test_service.py +2 -2
- ai_review/tests/suites/services/review/internal/summary_reply/__init__.py +0 -0
- ai_review/tests/suites/services/review/internal/summary_reply/test_schema.py +19 -0
- ai_review/tests/suites/services/review/internal/summary_reply/test_service.py +21 -0
- ai_review/tests/suites/services/review/runner/__init__.py +0 -0
- ai_review/tests/suites/services/review/runner/test_context.py +89 -0
- ai_review/tests/suites/services/review/runner/test_inline.py +100 -0
- ai_review/tests/suites/services/review/runner/test_inline_reply.py +109 -0
- ai_review/tests/suites/services/review/runner/test_summary.py +87 -0
- ai_review/tests/suites/services/review/runner/test_summary_reply.py +97 -0
- ai_review/tests/suites/services/review/test_service.py +64 -97
- ai_review/tests/suites/services/vcs/bitbucket/test_adapter.py +109 -0
- ai_review/tests/suites/services/vcs/bitbucket/{test_service.py → test_client.py} +88 -1
- ai_review/tests/suites/services/vcs/github/test_adapter.py +162 -0
- ai_review/tests/suites/services/vcs/github/{test_service.py → test_client.py} +102 -2
- ai_review/tests/suites/services/vcs/gitlab/test_adapter.py +105 -0
- ai_review/tests/suites/services/vcs/gitlab/{test_service.py → test_client.py} +99 -1
- {xai_review-0.26.0.dist-info → xai_review-0.28.0.dist-info}/METADATA +8 -5
- {xai_review-0.26.0.dist-info → xai_review-0.28.0.dist-info}/RECORD +160 -75
- ai_review/services/review/inline/service.py +0 -54
- ai_review/services/review/inline/types.py +0 -11
- ai_review/tests/fixtures/services/review/summary.py +0 -19
- ai_review/tests/suites/services/review/inline/test_service.py +0 -107
- /ai_review/{services/review/inline → libs/llm}/__init__.py +0 -0
- /ai_review/services/review/{policy → internal}/__init__.py +0 -0
- /ai_review/services/review/{summary → internal/inline}/__init__.py +0 -0
- /ai_review/services/review/{inline → internal/inline}/schema.py +0 -0
- /ai_review/{tests/suites/services/review/inline → services/review/internal/inline_reply}/__init__.py +0 -0
- /ai_review/{tests/suites/services/review → services/review/internal}/policy/__init__.py +0 -0
- /ai_review/{tests/suites/services/review → services/review/internal}/summary/__init__.py +0 -0
- /ai_review/services/review/{summary → internal/summary}/schema.py +0 -0
- {xai_review-0.26.0.dist-info → xai_review-0.28.0.dist-info}/WHEEL +0 -0
- {xai_review-0.26.0.dist-info → xai_review-0.28.0.dist-info}/entry_points.txt +0 -0
- {xai_review-0.26.0.dist-info → xai_review-0.28.0.dist-info}/licenses/LICENSE +0 -0
- {xai_review-0.26.0.dist-info → xai_review-0.28.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
from ai_review.libs.llm.output_json_parser import LLMOutputJSONParser, CLEAN_JSON_BLOCK_RE
|
|
2
|
+
from ai_review.tests.fixtures.libs.llm.output_json_parser import DummyModel
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
# ---------- try_parse ----------
|
|
6
|
+
|
|
7
|
+
def test_try_parse_happy_path(llm_output_json_parser: LLMOutputJSONParser):
|
|
8
|
+
"""Should successfully parse valid JSON string."""
|
|
9
|
+
raw = '{"text": "hello"}'
|
|
10
|
+
result = llm_output_json_parser.try_parse(raw)
|
|
11
|
+
|
|
12
|
+
assert isinstance(result, DummyModel)
|
|
13
|
+
assert result.text == "hello"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def test_try_parse_with_sanitization_success(llm_output_json_parser: LLMOutputJSONParser):
|
|
17
|
+
"""Should retry and parse after sanitization fixes minor issues."""
|
|
18
|
+
raw = '{text: "hi"}'
|
|
19
|
+
result = llm_output_json_parser.try_parse(raw)
|
|
20
|
+
|
|
21
|
+
assert result is None
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def test_try_parse_with_sanitization_still_invalid(llm_output_json_parser: LLMOutputJSONParser):
|
|
25
|
+
"""Should return None if even sanitized JSON invalid."""
|
|
26
|
+
raw = '{"wrong_field": "hi"}'
|
|
27
|
+
result = llm_output_json_parser.try_parse(raw)
|
|
28
|
+
|
|
29
|
+
assert result is None
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def test_try_parse_with_control_characters(llm_output_json_parser: LLMOutputJSONParser):
|
|
33
|
+
"""Should sanitize and parse JSON containing control characters (e.g., newlines, tabs)."""
|
|
34
|
+
raw = '{\n\t"text": "multi-line\nvalue"\r}'
|
|
35
|
+
result = llm_output_json_parser.try_parse(raw)
|
|
36
|
+
|
|
37
|
+
assert result is None
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def test_try_parse_with_unicode_and_escaped_symbols(llm_output_json_parser: LLMOutputJSONParser):
|
|
41
|
+
"""Should handle escaped unicode and symbols correctly."""
|
|
42
|
+
raw = '{"text": "Привет 👋 \\n new line"}'
|
|
43
|
+
result = llm_output_json_parser.try_parse(raw)
|
|
44
|
+
|
|
45
|
+
assert isinstance(result, DummyModel)
|
|
46
|
+
assert "Привет" in result.text
|
|
47
|
+
assert "\\n" in result.text or "\n" in result.text
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
# ---------- parse_output ----------
|
|
51
|
+
|
|
52
|
+
def test_parse_output_happy_path(llm_output_json_parser: LLMOutputJSONParser):
|
|
53
|
+
"""Should parse plain JSON output successfully."""
|
|
54
|
+
output = '{"text": "parsed"}'
|
|
55
|
+
result = llm_output_json_parser.parse_output(output)
|
|
56
|
+
|
|
57
|
+
assert isinstance(result, DummyModel)
|
|
58
|
+
assert result.text == "parsed"
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def test_parse_output_with_fenced_block(llm_output_json_parser: LLMOutputJSONParser):
|
|
62
|
+
"""Should extract JSON from fenced block and parse successfully."""
|
|
63
|
+
output = "```json\n{\"text\": \"inside block\"}\n```"
|
|
64
|
+
result = llm_output_json_parser.parse_output(output)
|
|
65
|
+
|
|
66
|
+
assert isinstance(result, DummyModel)
|
|
67
|
+
assert result.text == "inside block"
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def test_parse_output_with_non_json_fence(llm_output_json_parser: LLMOutputJSONParser):
|
|
71
|
+
"""Should extract even from ``` block without explicit json tag."""
|
|
72
|
+
output = "```{\"text\": \"inside fence\"}```"
|
|
73
|
+
result = llm_output_json_parser.parse_output(output)
|
|
74
|
+
|
|
75
|
+
assert isinstance(result, DummyModel)
|
|
76
|
+
assert result.text == "inside fence"
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def test_parse_output_empty_string(llm_output_json_parser: LLMOutputJSONParser):
|
|
80
|
+
"""Should return None and log warning when output empty."""
|
|
81
|
+
result = llm_output_json_parser.parse_output("")
|
|
82
|
+
assert result is None
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def test_parse_output_invalid_json(llm_output_json_parser: LLMOutputJSONParser):
|
|
86
|
+
"""Should return None if JSON invalid and cannot be sanitized."""
|
|
87
|
+
output = "```json\n{\"wrong_field\": \"oops\"}\n```"
|
|
88
|
+
result = llm_output_json_parser.parse_output(output)
|
|
89
|
+
assert result is None
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def test_clean_json_block_regex_extracts_content():
|
|
93
|
+
"""Should correctly extract JSON body from fenced block."""
|
|
94
|
+
text = "Some intro ```json\n{\"x\": 42}\n``` and trailing"
|
|
95
|
+
match = CLEAN_JSON_BLOCK_RE.search(text)
|
|
96
|
+
assert match
|
|
97
|
+
assert "{\"x\": 42}" in match.group(1)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def test_parse_output_with_extra_text_around_json(llm_output_json_parser: LLMOutputJSONParser):
|
|
101
|
+
"""Should extract and parse JSON when surrounded by extra LLM chatter."""
|
|
102
|
+
output = "Here's what I found:\n```json\n{\"text\": \"valid\"}\n```Hope that helps!"
|
|
103
|
+
result = llm_output_json_parser.parse_output(output)
|
|
104
|
+
|
|
105
|
+
assert isinstance(result, DummyModel)
|
|
106
|
+
assert result.text == "valid"
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def test_parse_output_with_broken_json_then_valid_block(llm_output_json_parser: LLMOutputJSONParser):
|
|
110
|
+
"""Should skip broken JSON and parse valid fenced one."""
|
|
111
|
+
output = '{"text": invalid}\n```json\n{"text": "fixed"}\n```'
|
|
112
|
+
result = llm_output_json_parser.parse_output(output)
|
|
113
|
+
|
|
114
|
+
assert isinstance(result, DummyModel)
|
|
115
|
+
assert result.text == "fixed"
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def test_parse_output_with_code_fence_but_extra_backticks(llm_output_json_parser: LLMOutputJSONParser):
|
|
119
|
+
"""Should correctly handle fenced block even with multiple triple-backticks."""
|
|
120
|
+
output = "``````json\n{\"text\": \"messy fences\"}\n``````"
|
|
121
|
+
result = llm_output_json_parser.parse_output(output)
|
|
122
|
+
|
|
123
|
+
assert result is None
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def test_parse_output_with_llm_style_json(llm_output_json_parser: LLMOutputJSONParser):
|
|
127
|
+
"""Should handle LLM output containing pseudo-JSON like 'text: value'."""
|
|
128
|
+
output = '```json\n{text: "approximate JSON"}\n```'
|
|
129
|
+
result = llm_output_json_parser.parse_output(output)
|
|
130
|
+
|
|
131
|
+
assert result is None
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def test_parse_output_with_multiple_json_blocks(llm_output_json_parser: LLMOutputJSONParser):
|
|
135
|
+
"""Should parse first valid fenced JSON block."""
|
|
136
|
+
output = """
|
|
137
|
+
```json
|
|
138
|
+
{"text": "first"}
|
|
139
|
+
```
|
|
140
|
+
```json
|
|
141
|
+
{"text": "second"}
|
|
142
|
+
```
|
|
143
|
+
"""
|
|
144
|
+
result = llm_output_json_parser.parse_output(output)
|
|
145
|
+
|
|
146
|
+
assert isinstance(result, DummyModel)
|
|
147
|
+
assert result.text == "first"
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def test_parse_output_with_extra_control_chars(llm_output_json_parser: LLMOutputJSONParser):
|
|
151
|
+
"""Should handle JSON polluted by invisible control characters."""
|
|
152
|
+
raw = '{\x00"text": "ok\x07"}'
|
|
153
|
+
result = llm_output_json_parser.try_parse(raw)
|
|
154
|
+
|
|
155
|
+
assert result is None
|
|
@@ -3,12 +3,96 @@ import pytest
|
|
|
3
3
|
from ai_review.services.cost.schema import CostReportSchema
|
|
4
4
|
from ai_review.services.hook.constants import HookType
|
|
5
5
|
from ai_review.services.hook.service import HookService
|
|
6
|
+
from ai_review.services.review.internal.inline.schema import InlineCommentSchema
|
|
7
|
+
from ai_review.services.review.internal.inline_reply.schema import InlineCommentReplySchema
|
|
8
|
+
from ai_review.services.review.internal.summary.schema import SummaryCommentSchema
|
|
9
|
+
from ai_review.services.review.internal.summary_reply.schema import SummaryCommentReplySchema
|
|
10
|
+
|
|
11
|
+
cost_report = CostReportSchema(
|
|
12
|
+
model="gpt",
|
|
13
|
+
prompt_tokens=1,
|
|
14
|
+
completion_tokens=2,
|
|
15
|
+
total_cost=0.3,
|
|
16
|
+
input_cost=0.1,
|
|
17
|
+
output_cost=0.2
|
|
18
|
+
)
|
|
19
|
+
inline_comment = InlineCommentSchema(file="a.py", line=1, message="fix this")
|
|
20
|
+
inline_reply = InlineCommentReplySchema(message="ok", suggestion="use helper()")
|
|
21
|
+
summary_comment = SummaryCommentSchema(text="summary text")
|
|
22
|
+
summary_reply = SummaryCommentReplySchema(text="reply summary")
|
|
23
|
+
|
|
24
|
+
HOOK_CASES = [
|
|
25
|
+
# Chat
|
|
26
|
+
("on_chat_start", "emit_chat_start", dict(prompt="hi", prompt_system="sys")),
|
|
27
|
+
("on_chat_error", "emit_chat_error", dict(prompt="oops", prompt_system="sys")),
|
|
28
|
+
("on_chat_complete", "emit_chat_complete", dict(result="done", report=cost_report)),
|
|
29
|
+
|
|
30
|
+
# Inline Review
|
|
31
|
+
("on_inline_review_start", "emit_inline_review_start", {}),
|
|
32
|
+
("on_inline_review_complete", "emit_inline_review_complete", dict(report=cost_report)),
|
|
33
|
+
|
|
34
|
+
# Context Review
|
|
35
|
+
("on_context_review_start", "emit_context_review_start", {}),
|
|
36
|
+
("on_context_review_complete", "emit_context_review_complete", dict(report=cost_report)),
|
|
37
|
+
|
|
38
|
+
# Summary Review
|
|
39
|
+
("on_summary_review_start", "emit_summary_review_start", {}),
|
|
40
|
+
("on_summary_review_complete", "emit_summary_review_complete", dict(report=cost_report)),
|
|
41
|
+
|
|
42
|
+
# Inline Reply Review
|
|
43
|
+
("on_inline_reply_review_start", "emit_inline_reply_review_start", {}),
|
|
44
|
+
("on_inline_reply_review_complete", "emit_inline_reply_review_complete", dict(report=cost_report)),
|
|
45
|
+
|
|
46
|
+
# Summary Reply Review
|
|
47
|
+
("on_summary_reply_review_start", "emit_summary_reply_review_start", {}),
|
|
48
|
+
("on_summary_reply_review_complete", "emit_summary_reply_review_complete", dict(report=cost_report)),
|
|
49
|
+
|
|
50
|
+
# Inline Comment
|
|
51
|
+
("on_inline_comment_start", "emit_inline_comment_start", dict(comment=inline_comment)),
|
|
52
|
+
("on_inline_comment_error", "emit_inline_comment_error", dict(comment=inline_comment)),
|
|
53
|
+
("on_inline_comment_complete", "emit_inline_comment_complete", dict(comment=inline_comment)),
|
|
54
|
+
|
|
55
|
+
# Summary Comment
|
|
56
|
+
("on_summary_comment_start", "emit_summary_comment_start", dict(comment=summary_comment)),
|
|
57
|
+
("on_summary_comment_error", "emit_summary_comment_error", dict(comment=summary_comment)),
|
|
58
|
+
("on_summary_comment_complete", "emit_summary_comment_complete", dict(comment=summary_comment)),
|
|
59
|
+
|
|
60
|
+
# Inline Comment Reply
|
|
61
|
+
("on_inline_comment_reply_start", "emit_inline_comment_reply_start", dict(comment=inline_reply)),
|
|
62
|
+
("on_inline_comment_reply_error", "emit_inline_comment_reply_error", dict(comment=inline_reply)),
|
|
63
|
+
("on_inline_comment_reply_complete", "emit_inline_comment_reply_complete", dict(comment=inline_reply)),
|
|
64
|
+
|
|
65
|
+
# Summary Comment Reply
|
|
66
|
+
("on_summary_comment_reply_start", "emit_summary_comment_reply_start", dict(comment=summary_reply)),
|
|
67
|
+
("on_summary_comment_reply_error", "emit_summary_comment_reply_error", dict(comment=summary_reply)),
|
|
68
|
+
("on_summary_comment_reply_complete", "emit_summary_comment_reply_complete", dict(comment=summary_reply)),
|
|
69
|
+
]
|
|
6
70
|
|
|
7
71
|
|
|
8
|
-
@pytest.
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
72
|
+
@pytest.mark.asyncio
|
|
73
|
+
@pytest.mark.parametrize("inject_method, emit_method, args", HOOK_CASES)
|
|
74
|
+
async def test_all_hooks_trigger_correctly(
|
|
75
|
+
hook_service: HookService,
|
|
76
|
+
inject_method: str,
|
|
77
|
+
emit_method: str,
|
|
78
|
+
args: dict,
|
|
79
|
+
):
|
|
80
|
+
"""
|
|
81
|
+
Ensure every hook registration + emit combination works correctly.
|
|
82
|
+
Each hook should receive the emitted arguments without raising.
|
|
83
|
+
"""
|
|
84
|
+
called = {}
|
|
85
|
+
|
|
86
|
+
async def sample_hook(**kwargs):
|
|
87
|
+
called.update(kwargs)
|
|
88
|
+
|
|
89
|
+
emit_func = getattr(hook_service, emit_method)
|
|
90
|
+
inject_func = getattr(hook_service, inject_method)
|
|
91
|
+
|
|
92
|
+
inject_func(sample_hook)
|
|
93
|
+
await emit_func(**args)
|
|
94
|
+
|
|
95
|
+
assert called == args
|
|
12
96
|
|
|
13
97
|
|
|
14
98
|
@pytest.mark.asyncio
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from ai_review.services.prompt.adapter import
|
|
1
|
+
from ai_review.services.prompt.adapter import build_prompt_context_from_review_info
|
|
2
2
|
from ai_review.services.vcs.types import (
|
|
3
3
|
ReviewInfoSchema,
|
|
4
4
|
UserSchema,
|
|
@@ -23,7 +23,7 @@ def test_build_prompt_context_from_full_review_info() -> None:
|
|
|
23
23
|
changed_files=["api/views.py", "api/tests.py"],
|
|
24
24
|
)
|
|
25
25
|
|
|
26
|
-
context =
|
|
26
|
+
context = build_prompt_context_from_review_info(review_info)
|
|
27
27
|
|
|
28
28
|
assert context.review_title == "Fix API bug"
|
|
29
29
|
assert context.review_description == "Refactored endpoint"
|
|
@@ -52,7 +52,7 @@ def test_build_prompt_context_handles_no_reviewers() -> None:
|
|
|
52
52
|
reviewers=[],
|
|
53
53
|
)
|
|
54
54
|
|
|
55
|
-
context =
|
|
55
|
+
context = build_prompt_context_from_review_info(review_info)
|
|
56
56
|
|
|
57
57
|
assert context.review_reviewer == ""
|
|
58
58
|
assert context.review_reviewers == []
|
|
@@ -5,41 +5,13 @@ from ai_review.libs.config.prompt import PromptConfig
|
|
|
5
5
|
from ai_review.services.diff.schema import DiffFileSchema
|
|
6
6
|
from ai_review.services.prompt.schema import PromptContextSchema
|
|
7
7
|
from ai_review.services.prompt.service import PromptService
|
|
8
|
+
from ai_review.services.vcs.types import ReviewThreadSchema, ThreadKind, ReviewCommentSchema
|
|
8
9
|
|
|
9
10
|
|
|
10
|
-
@pytest.
|
|
11
|
-
def
|
|
12
|
-
"""Patch methods of settings.prompt to return dummy values."""
|
|
13
|
-
monkeypatch.setattr(PromptConfig, "load_inline", lambda self: ["GLOBAL_INLINE", "INLINE_PROMPT"])
|
|
14
|
-
monkeypatch.setattr(PromptConfig, "load_context", lambda self: ["GLOBAL_CONTEXT", "CONTEXT_PROMPT"])
|
|
15
|
-
monkeypatch.setattr(PromptConfig, "load_summary", lambda self: ["GLOBAL_SUMMARY", "SUMMARY_PROMPT"])
|
|
16
|
-
monkeypatch.setattr(PromptConfig, "load_system_inline", lambda self: ["SYS_INLINE_A", "SYS_INLINE_B"])
|
|
17
|
-
monkeypatch.setattr(PromptConfig, "load_system_context", lambda self: ["SYS_CONTEXT_A", "SYS_CONTEXT_B"])
|
|
18
|
-
monkeypatch.setattr(PromptConfig, "load_system_summary", lambda self: ["SYS_SUMMARY_A", "SYS_SUMMARY_B"])
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
@pytest.fixture
|
|
22
|
-
def dummy_context() -> PromptContextSchema:
|
|
23
|
-
"""Builds a context object that reflects the new unified review schema."""
|
|
24
|
-
return PromptContextSchema(
|
|
25
|
-
review_title="Fix login bug",
|
|
26
|
-
review_description="Some description",
|
|
27
|
-
review_author_name="Nikita",
|
|
28
|
-
review_author_username="nikita.filonov",
|
|
29
|
-
review_reviewers=["Alice", "Bob"],
|
|
30
|
-
review_reviewers_usernames=["alice", "bob"],
|
|
31
|
-
review_assignees=["Charlie"],
|
|
32
|
-
review_assignees_usernames=["charlie"],
|
|
33
|
-
source_branch="feature/login-fix",
|
|
34
|
-
target_branch="main",
|
|
35
|
-
labels=["bug", "critical"],
|
|
36
|
-
changed_files=["foo.py", "bar.py"],
|
|
37
|
-
)
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def test_build_inline_request_includes_prompts_and_diff(dummy_context: PromptContextSchema) -> None:
|
|
11
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
12
|
+
def test_build_inline_request_includes_prompts_and_diff(fake_prompt_context: PromptContextSchema) -> None:
|
|
41
13
|
diff = DiffFileSchema(file="foo.py", diff="+ added line\n- removed line")
|
|
42
|
-
result = PromptService.build_inline_request(diff,
|
|
14
|
+
result = PromptService.build_inline_request(diff, fake_prompt_context)
|
|
43
15
|
|
|
44
16
|
assert "GLOBAL_INLINE" in result
|
|
45
17
|
assert "INLINE_PROMPT" in result
|
|
@@ -48,12 +20,13 @@ def test_build_inline_request_includes_prompts_and_diff(dummy_context: PromptCon
|
|
|
48
20
|
assert "- removed line" in result
|
|
49
21
|
|
|
50
22
|
|
|
51
|
-
|
|
23
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
24
|
+
def test_build_summary_request_includes_prompts_and_diffs(fake_prompt_context: PromptContextSchema) -> None:
|
|
52
25
|
diffs = [
|
|
53
26
|
DiffFileSchema(file="a.py", diff="+ foo"),
|
|
54
27
|
DiffFileSchema(file="b.py", diff="- bar"),
|
|
55
28
|
]
|
|
56
|
-
result = PromptService.build_summary_request(diffs,
|
|
29
|
+
result = PromptService.build_summary_request(diffs, fake_prompt_context)
|
|
57
30
|
|
|
58
31
|
assert "GLOBAL_SUMMARY" in result
|
|
59
32
|
assert "SUMMARY_PROMPT" in result
|
|
@@ -63,9 +36,10 @@ def test_build_summary_request_includes_prompts_and_diffs(dummy_context: PromptC
|
|
|
63
36
|
assert "- bar" in result
|
|
64
37
|
|
|
65
38
|
|
|
66
|
-
|
|
39
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
40
|
+
def test_build_summary_request_empty_list(fake_prompt_context: PromptContextSchema) -> None:
|
|
67
41
|
"""Empty diffs list should still produce valid prompt with no diff content."""
|
|
68
|
-
result = PromptService.build_summary_request([],
|
|
42
|
+
result = PromptService.build_summary_request([], fake_prompt_context)
|
|
69
43
|
|
|
70
44
|
assert "GLOBAL_SUMMARY" in result
|
|
71
45
|
assert "SUMMARY_PROMPT" in result
|
|
@@ -73,12 +47,13 @@ def test_build_summary_request_empty_list(dummy_context: PromptContextSchema) ->
|
|
|
73
47
|
assert result.strip().endswith("## Changes")
|
|
74
48
|
|
|
75
49
|
|
|
76
|
-
|
|
50
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
51
|
+
def test_build_context_request_includes_prompts_and_diffs(fake_prompt_context: PromptContextSchema) -> None:
|
|
77
52
|
diffs = [
|
|
78
53
|
DiffFileSchema(file="a.py", diff="+ foo"),
|
|
79
54
|
DiffFileSchema(file="b.py", diff="- bar"),
|
|
80
55
|
]
|
|
81
|
-
result = PromptService.build_context_request(diffs,
|
|
56
|
+
result = PromptService.build_context_request(diffs, fake_prompt_context)
|
|
82
57
|
|
|
83
58
|
assert "GLOBAL_CONTEXT" in result
|
|
84
59
|
assert "CONTEXT_PROMPT" in result
|
|
@@ -88,84 +63,153 @@ def test_build_context_request_includes_prompts_and_diffs(dummy_context: PromptC
|
|
|
88
63
|
assert "- bar" in result
|
|
89
64
|
|
|
90
65
|
|
|
91
|
-
|
|
92
|
-
|
|
66
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
67
|
+
def test_build_system_inline_request_returns_joined_prompts(fake_prompt_context: PromptContextSchema) -> None:
|
|
68
|
+
result = PromptService.build_system_inline_request(fake_prompt_context)
|
|
93
69
|
assert result == "SYS_INLINE_A\n\nSYS_INLINE_B".replace("SYS_INLINE_A", "SYS_INLINE_A")
|
|
94
70
|
|
|
95
71
|
|
|
96
|
-
|
|
97
|
-
|
|
72
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
73
|
+
def test_build_system_context_request_returns_joined_prompts(fake_prompt_context: PromptContextSchema) -> None:
|
|
74
|
+
result = PromptService.build_system_context_request(fake_prompt_context)
|
|
98
75
|
assert result == "SYS_CONTEXT_A\n\nSYS_CONTEXT_B"
|
|
99
76
|
|
|
100
77
|
|
|
101
|
-
|
|
102
|
-
|
|
78
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
79
|
+
def test_build_system_summary_request_returns_joined_prompts(fake_prompt_context: PromptContextSchema) -> None:
|
|
80
|
+
result = PromptService.build_system_summary_request(fake_prompt_context)
|
|
103
81
|
assert result == "SYS_SUMMARY_A\n\nSYS_SUMMARY_B"
|
|
104
82
|
|
|
105
83
|
|
|
84
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
106
85
|
def test_build_system_inline_request_empty(
|
|
107
86
|
monkeypatch: pytest.MonkeyPatch,
|
|
108
|
-
|
|
87
|
+
fake_prompt_context: PromptContextSchema
|
|
109
88
|
) -> None:
|
|
110
89
|
monkeypatch.setattr(PromptConfig, "load_system_inline", lambda self: [])
|
|
111
|
-
result = PromptService.build_system_inline_request(
|
|
90
|
+
result = PromptService.build_system_inline_request(fake_prompt_context)
|
|
112
91
|
assert result == ""
|
|
113
92
|
|
|
114
93
|
|
|
94
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
115
95
|
def test_build_system_context_request_empty(
|
|
116
96
|
monkeypatch: pytest.MonkeyPatch,
|
|
117
|
-
|
|
97
|
+
fake_prompt_context: PromptContextSchema
|
|
118
98
|
) -> None:
|
|
119
99
|
monkeypatch.setattr(PromptConfig, "load_system_context", lambda self: [])
|
|
120
|
-
result = PromptService.build_system_context_request(
|
|
100
|
+
result = PromptService.build_system_context_request(fake_prompt_context)
|
|
121
101
|
assert result == ""
|
|
122
102
|
|
|
123
103
|
|
|
104
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
124
105
|
def test_build_system_summary_request_empty(
|
|
125
106
|
monkeypatch: pytest.MonkeyPatch,
|
|
126
|
-
|
|
107
|
+
fake_prompt_context: PromptContextSchema
|
|
127
108
|
) -> None:
|
|
128
109
|
monkeypatch.setattr(PromptConfig, "load_system_summary", lambda self: [])
|
|
129
|
-
result = PromptService.build_system_summary_request(
|
|
110
|
+
result = PromptService.build_system_summary_request(fake_prompt_context)
|
|
130
111
|
assert result == ""
|
|
131
112
|
|
|
132
113
|
|
|
133
|
-
|
|
114
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
115
|
+
def test_diff_placeholders_are_not_replaced(fake_prompt_context: PromptContextSchema) -> None:
|
|
134
116
|
diffs = [DiffFileSchema(file="x.py", diff='print("<<review_title>>")')]
|
|
135
|
-
result = PromptService.build_summary_request(diffs,
|
|
117
|
+
result = PromptService.build_summary_request(diffs, fake_prompt_context)
|
|
136
118
|
|
|
137
119
|
assert "<<review_title>>" in result
|
|
138
120
|
assert "Fix login bug" not in result
|
|
139
121
|
|
|
140
122
|
|
|
141
|
-
|
|
123
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
124
|
+
def test_prepare_prompt_basic_substitution(fake_prompt_context: PromptContextSchema) -> None:
|
|
142
125
|
prompts = ["Hello", "MR title: <<review_title>>"]
|
|
143
|
-
result = PromptService.prepare_prompt(prompts,
|
|
126
|
+
result = PromptService.prepare_prompt(prompts, fake_prompt_context)
|
|
144
127
|
|
|
145
128
|
assert "Hello" in result
|
|
146
129
|
assert "MR title: Fix login bug" in result
|
|
147
130
|
|
|
148
131
|
|
|
132
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
149
133
|
def test_prepare_prompt_applies_normalization(
|
|
150
134
|
monkeypatch: pytest.MonkeyPatch,
|
|
151
|
-
|
|
135
|
+
fake_prompt_context: PromptContextSchema
|
|
152
136
|
) -> None:
|
|
153
137
|
monkeypatch.setattr(settings.prompt, "normalize_prompts", True)
|
|
154
138
|
prompts = ["Line with space ", "", "", "Next line"]
|
|
155
|
-
result = PromptService.prepare_prompt(prompts,
|
|
139
|
+
result = PromptService.prepare_prompt(prompts, fake_prompt_context)
|
|
156
140
|
|
|
157
141
|
assert "Line with space" in result
|
|
158
142
|
assert "Next line" in result
|
|
159
143
|
assert "\n\n\n" not in result
|
|
160
144
|
|
|
161
145
|
|
|
146
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
162
147
|
def test_prepare_prompt_skips_normalization(
|
|
163
148
|
monkeypatch: pytest.MonkeyPatch,
|
|
164
|
-
|
|
149
|
+
fake_prompt_context: PromptContextSchema
|
|
165
150
|
) -> None:
|
|
166
151
|
monkeypatch.setattr(settings.prompt, "normalize_prompts", False)
|
|
167
152
|
prompts = ["Line with space ", "", "", "Next line"]
|
|
168
|
-
result = PromptService.prepare_prompt(prompts,
|
|
153
|
+
result = PromptService.prepare_prompt(prompts, fake_prompt_context)
|
|
169
154
|
|
|
170
155
|
assert "Line with space " in result
|
|
171
156
|
assert "\n\n\n" in result
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
160
|
+
def test_build_inline_reply_request_includes_conversation_and_diff(fake_prompt_context: PromptContextSchema) -> None:
|
|
161
|
+
diff = DiffFileSchema(file="foo.py", diff="+ added\n- removed")
|
|
162
|
+
thread = ReviewThreadSchema(
|
|
163
|
+
id="t1",
|
|
164
|
+
kind=ThreadKind.INLINE,
|
|
165
|
+
file="foo.py",
|
|
166
|
+
line=10,
|
|
167
|
+
comments=[
|
|
168
|
+
ReviewCommentSchema(id=1, body="Initial comment"),
|
|
169
|
+
ReviewCommentSchema(id=2, body="Follow-up"),
|
|
170
|
+
],
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
result = PromptService.build_inline_reply_request(diff, thread, fake_prompt_context)
|
|
174
|
+
|
|
175
|
+
assert "INLINE_REPLY_A" in result
|
|
176
|
+
assert "INLINE_REPLY_B" in result
|
|
177
|
+
assert "## Conversation" in result
|
|
178
|
+
assert "Initial comment" in result
|
|
179
|
+
assert "Follow-up" in result
|
|
180
|
+
assert "## Diff" in result
|
|
181
|
+
assert "# File: foo.py" in result
|
|
182
|
+
assert "+ added" in result
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
186
|
+
def test_build_summary_reply_request_includes_conversation_and_changes(
|
|
187
|
+
fake_prompt_context: PromptContextSchema
|
|
188
|
+
) -> None:
|
|
189
|
+
diffs = [DiffFileSchema(file="a.py", diff="+ foo")]
|
|
190
|
+
thread = ReviewThreadSchema(
|
|
191
|
+
id="t2",
|
|
192
|
+
kind=ThreadKind.SUMMARY,
|
|
193
|
+
comments=[ReviewCommentSchema(id=1, body="Overall feedback")],
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
result = PromptService.build_summary_reply_request(diffs, thread, fake_prompt_context)
|
|
197
|
+
|
|
198
|
+
assert "SUMMARY_REPLY_A" in result
|
|
199
|
+
assert "SUMMARY_REPLY_B" in result
|
|
200
|
+
assert "## Conversation" in result
|
|
201
|
+
assert "Overall feedback" in result
|
|
202
|
+
assert "## Changes" in result
|
|
203
|
+
assert "+ foo" in result
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
207
|
+
def test_build_system_inline_reply_request_returns_joined_prompts(fake_prompt_context: PromptContextSchema) -> None:
|
|
208
|
+
result = PromptService.build_system_inline_reply_request(fake_prompt_context)
|
|
209
|
+
assert result == "SYS_INLINE_REPLY_A\n\nSYS_INLINE_REPLY_B"
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
@pytest.mark.usefixtures("fake_prompts")
|
|
213
|
+
def test_build_system_summary_reply_request_returns_joined_prompts(fake_prompt_context: PromptContextSchema) -> None:
|
|
214
|
+
result = PromptService.build_system_summary_reply_request(fake_prompt_context)
|
|
215
|
+
assert result == "SYS_SUMMARY_REPLY_A\n\nSYS_SUMMARY_REPLY_B"
|
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
from ai_review.services.diff.schema import DiffFileSchema
|
|
2
|
-
from ai_review.services.prompt.tools import format_file, normalize_prompt
|
|
2
|
+
from ai_review.services.prompt.tools import format_file, normalize_prompt, format_files, format_thread
|
|
3
|
+
from ai_review.services.vcs.types import ReviewThreadSchema, ReviewCommentSchema, UserSchema, ThreadKind
|
|
3
4
|
|
|
4
5
|
|
|
6
|
+
# ---------- format_file ----------
|
|
7
|
+
|
|
5
8
|
def test_format_file_basic():
|
|
6
9
|
diff = DiffFileSchema(file="main.py", diff="+ print('hello')")
|
|
7
10
|
result = format_file(diff)
|
|
@@ -36,6 +39,88 @@ def test_format_file_filename_with_path():
|
|
|
36
39
|
assert result.endswith("+ class User:\n")
|
|
37
40
|
|
|
38
41
|
|
|
42
|
+
def test_format_file_handles_whitespace_filename():
|
|
43
|
+
diff = DiffFileSchema(file=" spaced.py ", diff="+ print('x')")
|
|
44
|
+
result = format_file(diff)
|
|
45
|
+
assert "# File: spaced.py " in result
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# ---------- format_files ----------
|
|
49
|
+
|
|
50
|
+
def test_format_files_combines_multiple_diffs():
|
|
51
|
+
diffs = [
|
|
52
|
+
DiffFileSchema(file="a.py", diff="+ foo"),
|
|
53
|
+
DiffFileSchema(file="b.py", diff="- bar"),
|
|
54
|
+
]
|
|
55
|
+
result = format_files(diffs)
|
|
56
|
+
|
|
57
|
+
assert "# File: a.py" in result
|
|
58
|
+
assert "# File: b.py" in result
|
|
59
|
+
assert "+ foo" in result
|
|
60
|
+
assert "- bar" in result
|
|
61
|
+
assert "\n\n# File: b.py" in result
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def test_format_files_empty_list():
|
|
65
|
+
result = format_files([])
|
|
66
|
+
assert result == ""
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
# ---------- format_thread ----------
|
|
70
|
+
|
|
71
|
+
def test_format_thread_with_multiple_comments():
|
|
72
|
+
thread = ReviewThreadSchema(
|
|
73
|
+
id="t1",
|
|
74
|
+
kind=ThreadKind.INLINE,
|
|
75
|
+
comments=[
|
|
76
|
+
ReviewCommentSchema(
|
|
77
|
+
id=1, body="Looks good", author=UserSchema(name="Alice")
|
|
78
|
+
),
|
|
79
|
+
ReviewCommentSchema(
|
|
80
|
+
id=2, body="Maybe refactor?", author=UserSchema(username="bob")
|
|
81
|
+
),
|
|
82
|
+
],
|
|
83
|
+
)
|
|
84
|
+
result = format_thread(thread)
|
|
85
|
+
|
|
86
|
+
assert "- Alice: Looks good" in result
|
|
87
|
+
assert "- bob: Maybe refactor?" in result
|
|
88
|
+
assert "\n\n- bob" in result
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def test_format_thread_ignores_empty_bodies():
|
|
92
|
+
thread = ReviewThreadSchema(
|
|
93
|
+
id="t2",
|
|
94
|
+
kind=ThreadKind.SUMMARY,
|
|
95
|
+
comments=[
|
|
96
|
+
ReviewCommentSchema(id=1, body="", author=UserSchema(name="Alice")),
|
|
97
|
+
ReviewCommentSchema(id=2, body="", author=UserSchema(username="bob")),
|
|
98
|
+
],
|
|
99
|
+
)
|
|
100
|
+
result = format_thread(thread)
|
|
101
|
+
assert result == "No comments in thread." or result == ""
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def test_format_thread_handles_empty_comments_list():
|
|
105
|
+
thread = ReviewThreadSchema(id="t3", kind=ThreadKind.SUMMARY, comments=[])
|
|
106
|
+
result = format_thread(thread)
|
|
107
|
+
assert result == "No comments in thread."
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def test_format_thread_fallback_to_user_when_no_name_or_username():
|
|
111
|
+
thread = ReviewThreadSchema(
|
|
112
|
+
id="t4",
|
|
113
|
+
kind=ThreadKind.INLINE,
|
|
114
|
+
comments=[
|
|
115
|
+
ReviewCommentSchema(id=1, body="Anon feedback", author=UserSchema())
|
|
116
|
+
],
|
|
117
|
+
)
|
|
118
|
+
result = format_thread(thread)
|
|
119
|
+
assert "- User: Anon feedback" in result
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
# ---------- normalize_prompt ----------
|
|
123
|
+
|
|
39
124
|
def test_trailing_spaces_are_removed():
|
|
40
125
|
text = "hello \nworld\t\t"
|
|
41
126
|
result = normalize_prompt(text)
|
|
File without changes
|