xai-review 0.27.0__py3-none-any.whl → 0.28.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xai-review might be problematic. Click here for more details.
- ai_review/cli/commands/run_inline_reply_review.py +7 -0
- ai_review/cli/commands/run_summary_reply_review.py +7 -0
- ai_review/cli/main.py +17 -0
- ai_review/clients/bitbucket/pr/schema/comments.py +14 -0
- ai_review/clients/bitbucket/pr/schema/pull_request.py +1 -5
- ai_review/clients/bitbucket/pr/schema/user.py +7 -0
- ai_review/clients/github/pr/client.py +35 -4
- ai_review/clients/github/pr/schema/comments.py +21 -0
- ai_review/clients/github/pr/schema/pull_request.py +1 -4
- ai_review/clients/github/pr/schema/user.py +6 -0
- ai_review/clients/github/pr/types.py +11 -1
- ai_review/clients/gitlab/mr/client.py +32 -1
- ai_review/clients/gitlab/mr/schema/changes.py +1 -5
- ai_review/clients/gitlab/mr/schema/discussions.py +17 -7
- ai_review/clients/gitlab/mr/schema/notes.py +3 -0
- ai_review/clients/gitlab/mr/schema/user.py +7 -0
- ai_review/clients/gitlab/mr/types.py +16 -7
- ai_review/libs/config/prompt.py +96 -64
- ai_review/libs/config/review.py +2 -0
- ai_review/libs/llm/output_json_parser.py +60 -0
- ai_review/prompts/default_inline_reply.md +10 -0
- ai_review/prompts/default_summary_reply.md +14 -0
- ai_review/prompts/default_system_inline_reply.md +31 -0
- ai_review/prompts/default_system_summary_reply.md +13 -0
- ai_review/services/artifacts/schema.py +2 -2
- ai_review/services/hook/constants.py +14 -0
- ai_review/services/hook/service.py +95 -4
- ai_review/services/hook/types.py +18 -2
- ai_review/services/prompt/adapter.py +1 -1
- ai_review/services/prompt/service.py +49 -3
- ai_review/services/prompt/tools.py +21 -0
- ai_review/services/prompt/types.py +23 -0
- ai_review/services/review/gateway/comment.py +45 -6
- ai_review/services/review/gateway/llm.py +2 -1
- ai_review/services/review/gateway/types.py +50 -0
- ai_review/services/review/internal/inline/service.py +40 -0
- ai_review/services/review/internal/inline/types.py +8 -0
- ai_review/services/review/internal/inline_reply/schema.py +23 -0
- ai_review/services/review/internal/inline_reply/service.py +20 -0
- ai_review/services/review/internal/inline_reply/types.py +8 -0
- ai_review/services/review/{policy → internal/policy}/service.py +2 -1
- ai_review/services/review/internal/policy/types.py +15 -0
- ai_review/services/review/{summary → internal/summary}/service.py +2 -2
- ai_review/services/review/{summary → internal/summary}/types.py +1 -1
- ai_review/services/review/internal/summary_reply/__init__.py +0 -0
- ai_review/services/review/internal/summary_reply/schema.py +8 -0
- ai_review/services/review/internal/summary_reply/service.py +15 -0
- ai_review/services/review/internal/summary_reply/types.py +8 -0
- ai_review/services/review/runner/__init__.py +0 -0
- ai_review/services/review/runner/context.py +72 -0
- ai_review/services/review/runner/inline.py +80 -0
- ai_review/services/review/runner/inline_reply.py +80 -0
- ai_review/services/review/runner/summary.py +71 -0
- ai_review/services/review/runner/summary_reply.py +79 -0
- ai_review/services/review/runner/types.py +6 -0
- ai_review/services/review/service.py +78 -110
- ai_review/services/vcs/bitbucket/adapter.py +24 -0
- ai_review/services/vcs/bitbucket/client.py +107 -42
- ai_review/services/vcs/github/adapter.py +35 -0
- ai_review/services/vcs/github/client.py +105 -44
- ai_review/services/vcs/gitlab/adapter.py +26 -0
- ai_review/services/vcs/gitlab/client.py +91 -38
- ai_review/services/vcs/types.py +34 -0
- ai_review/tests/fixtures/clients/bitbucket.py +2 -2
- ai_review/tests/fixtures/clients/github.py +35 -6
- ai_review/tests/fixtures/clients/gitlab.py +42 -3
- ai_review/tests/fixtures/libs/__init__.py +0 -0
- ai_review/tests/fixtures/libs/llm/__init__.py +0 -0
- ai_review/tests/fixtures/libs/llm/output_json_parser.py +13 -0
- ai_review/tests/fixtures/services/hook.py +8 -0
- ai_review/tests/fixtures/services/llm.py +8 -5
- ai_review/tests/fixtures/services/prompt.py +70 -0
- ai_review/tests/fixtures/services/review/base.py +41 -0
- ai_review/tests/fixtures/services/review/gateway/__init__.py +0 -0
- ai_review/tests/fixtures/services/review/gateway/comment.py +98 -0
- ai_review/tests/fixtures/services/review/gateway/llm.py +17 -0
- ai_review/tests/fixtures/services/review/internal/__init__.py +0 -0
- ai_review/tests/fixtures/services/review/{inline.py → internal/inline.py} +8 -6
- ai_review/tests/fixtures/services/review/internal/inline_reply.py +25 -0
- ai_review/tests/fixtures/services/review/internal/policy.py +28 -0
- ai_review/tests/fixtures/services/review/internal/summary.py +21 -0
- ai_review/tests/fixtures/services/review/internal/summary_reply.py +19 -0
- ai_review/tests/fixtures/services/review/runner/__init__.py +0 -0
- ai_review/tests/fixtures/services/review/runner/context.py +50 -0
- ai_review/tests/fixtures/services/review/runner/inline.py +50 -0
- ai_review/tests/fixtures/services/review/runner/inline_reply.py +50 -0
- ai_review/tests/fixtures/services/review/runner/summary.py +50 -0
- ai_review/tests/fixtures/services/review/runner/summary_reply.py +50 -0
- ai_review/tests/fixtures/services/vcs.py +23 -0
- ai_review/tests/suites/cli/__init__.py +0 -0
- ai_review/tests/suites/cli/test_main.py +54 -0
- ai_review/tests/suites/libs/config/test_prompt.py +108 -28
- ai_review/tests/suites/libs/llm/__init__.py +0 -0
- ai_review/tests/suites/libs/llm/test_output_json_parser.py +155 -0
- ai_review/tests/suites/services/hook/test_service.py +88 -4
- ai_review/tests/suites/services/prompt/test_adapter.py +3 -3
- ai_review/tests/suites/services/prompt/test_service.py +102 -58
- ai_review/tests/suites/services/prompt/test_tools.py +86 -1
- ai_review/tests/suites/services/review/gateway/__init__.py +0 -0
- ai_review/tests/suites/services/review/gateway/test_comment.py +253 -0
- ai_review/tests/suites/services/review/gateway/test_llm.py +82 -0
- ai_review/tests/suites/services/review/internal/__init__.py +0 -0
- ai_review/tests/suites/services/review/internal/inline/__init__.py +0 -0
- ai_review/tests/suites/services/review/{inline → internal/inline}/test_schema.py +1 -1
- ai_review/tests/suites/services/review/internal/inline/test_service.py +81 -0
- ai_review/tests/suites/services/review/internal/inline_reply/__init__.py +0 -0
- ai_review/tests/suites/services/review/internal/inline_reply/test_schema.py +57 -0
- ai_review/tests/suites/services/review/internal/inline_reply/test_service.py +72 -0
- ai_review/tests/suites/services/review/internal/policy/__init__.py +0 -0
- ai_review/tests/suites/services/review/{policy → internal/policy}/test_service.py +1 -1
- ai_review/tests/suites/services/review/internal/summary/__init__.py +0 -0
- ai_review/tests/suites/services/review/{summary → internal/summary}/test_schema.py +1 -1
- ai_review/tests/suites/services/review/{summary → internal/summary}/test_service.py +2 -2
- ai_review/tests/suites/services/review/internal/summary_reply/__init__.py +0 -0
- ai_review/tests/suites/services/review/internal/summary_reply/test_schema.py +19 -0
- ai_review/tests/suites/services/review/internal/summary_reply/test_service.py +21 -0
- ai_review/tests/suites/services/review/runner/__init__.py +0 -0
- ai_review/tests/suites/services/review/runner/test_context.py +89 -0
- ai_review/tests/suites/services/review/runner/test_inline.py +100 -0
- ai_review/tests/suites/services/review/runner/test_inline_reply.py +109 -0
- ai_review/tests/suites/services/review/runner/test_summary.py +87 -0
- ai_review/tests/suites/services/review/runner/test_summary_reply.py +97 -0
- ai_review/tests/suites/services/review/test_service.py +64 -97
- ai_review/tests/suites/services/vcs/bitbucket/test_adapter.py +109 -0
- ai_review/tests/suites/services/vcs/bitbucket/{test_service.py → test_client.py} +88 -1
- ai_review/tests/suites/services/vcs/github/test_adapter.py +162 -0
- ai_review/tests/suites/services/vcs/github/{test_service.py → test_client.py} +102 -2
- ai_review/tests/suites/services/vcs/gitlab/test_adapter.py +105 -0
- ai_review/tests/suites/services/vcs/gitlab/{test_service.py → test_client.py} +99 -1
- {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/METADATA +8 -5
- {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/RECORD +143 -70
- ai_review/services/review/inline/service.py +0 -54
- ai_review/services/review/inline/types.py +0 -11
- ai_review/tests/fixtures/services/review/summary.py +0 -19
- ai_review/tests/suites/services/review/inline/test_service.py +0 -107
- /ai_review/{services/review/inline → libs/llm}/__init__.py +0 -0
- /ai_review/services/review/{policy → internal}/__init__.py +0 -0
- /ai_review/services/review/{summary → internal/inline}/__init__.py +0 -0
- /ai_review/services/review/{inline → internal/inline}/schema.py +0 -0
- /ai_review/{tests/suites/services/review/inline → services/review/internal/inline_reply}/__init__.py +0 -0
- /ai_review/{tests/suites/services/review → services/review/internal}/policy/__init__.py +0 -0
- /ai_review/{tests/suites/services/review → services/review/internal}/summary/__init__.py +0 -0
- /ai_review/services/review/{summary → internal/summary}/schema.py +0 -0
- {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/WHEEL +0 -0
- {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/entry_points.txt +0 -0
- {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/licenses/LICENSE +0 -0
- {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/top_level.txt +0 -0
ai_review/libs/config/prompt.py
CHANGED
|
@@ -6,104 +6,123 @@ from pydantic import BaseModel, FilePath, Field
|
|
|
6
6
|
from ai_review.libs.resources import load_resource
|
|
7
7
|
|
|
8
8
|
|
|
9
|
+
def resolve_prompt_files(files: list[FilePath] | None, default_file: str) -> list[Path]:
|
|
10
|
+
return files or [
|
|
11
|
+
load_resource(
|
|
12
|
+
package="ai_review.prompts",
|
|
13
|
+
filename=default_file,
|
|
14
|
+
fallback=f"ai_review/prompts/{default_file}"
|
|
15
|
+
)
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def resolve_system_prompt_files(files: list[FilePath] | None, include: bool, default_file: str) -> list[Path]:
|
|
20
|
+
global_files = [
|
|
21
|
+
load_resource(
|
|
22
|
+
package="ai_review.prompts",
|
|
23
|
+
filename=default_file,
|
|
24
|
+
fallback=f"ai_review/prompts/{default_file}"
|
|
25
|
+
)
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
if files is None:
|
|
29
|
+
return global_files
|
|
30
|
+
|
|
31
|
+
if include:
|
|
32
|
+
return global_files + files
|
|
33
|
+
|
|
34
|
+
return files
|
|
35
|
+
|
|
36
|
+
|
|
9
37
|
class PromptConfig(BaseModel):
|
|
10
38
|
context: dict[str, str] = Field(default_factory=dict)
|
|
11
39
|
normalize_prompts: bool = True
|
|
12
40
|
context_placeholder: str = "<<{value}>>"
|
|
41
|
+
|
|
42
|
+
# --- Prompts ---
|
|
13
43
|
inline_prompt_files: list[FilePath] | None = None
|
|
14
44
|
context_prompt_files: list[FilePath] | None = None
|
|
15
45
|
summary_prompt_files: list[FilePath] | None = None
|
|
46
|
+
inline_reply_prompt_files: list[FilePath] | None = None
|
|
47
|
+
summary_reply_prompt_files: list[FilePath] | None = None
|
|
48
|
+
|
|
49
|
+
# --- System Prompts ---
|
|
16
50
|
system_inline_prompt_files: list[FilePath] | None = None
|
|
17
51
|
system_context_prompt_files: list[FilePath] | None = None
|
|
18
52
|
system_summary_prompt_files: list[FilePath] | None = None
|
|
53
|
+
system_inline_reply_prompt_files: list[FilePath] | None = None
|
|
54
|
+
system_summary_reply_prompt_files: list[FilePath] | None = None
|
|
55
|
+
|
|
56
|
+
# --- Include System Prompts ---
|
|
19
57
|
include_inline_system_prompts: bool = True
|
|
20
58
|
include_context_system_prompts: bool = True
|
|
21
59
|
include_summary_system_prompts: bool = True
|
|
60
|
+
include_inline_reply_system_prompts: bool = True
|
|
61
|
+
include_summary_reply_system_prompts: bool = True
|
|
22
62
|
|
|
63
|
+
# --- Prompts ---
|
|
23
64
|
@cached_property
|
|
24
65
|
def inline_prompt_files_or_default(self) -> list[Path]:
|
|
25
|
-
return self.inline_prompt_files
|
|
26
|
-
load_resource(
|
|
27
|
-
package="ai_review.prompts",
|
|
28
|
-
filename="default_inline.md",
|
|
29
|
-
fallback="ai_review/prompts/default_inline.md"
|
|
30
|
-
)
|
|
31
|
-
]
|
|
66
|
+
return resolve_prompt_files(self.inline_prompt_files, "default_inline.md")
|
|
32
67
|
|
|
33
68
|
@cached_property
|
|
34
69
|
def context_prompt_files_or_default(self) -> list[Path]:
|
|
35
|
-
return self.context_prompt_files
|
|
36
|
-
load_resource(
|
|
37
|
-
package="ai_review.prompts",
|
|
38
|
-
filename="default_context.md",
|
|
39
|
-
fallback="ai_review/prompts/default_context.md"
|
|
40
|
-
)
|
|
41
|
-
]
|
|
70
|
+
return resolve_prompt_files(self.context_prompt_files, "default_context.md")
|
|
42
71
|
|
|
43
72
|
@cached_property
|
|
44
73
|
def summary_prompt_files_or_default(self) -> list[Path]:
|
|
45
|
-
return self.summary_prompt_files
|
|
46
|
-
load_resource(
|
|
47
|
-
package="ai_review.prompts",
|
|
48
|
-
filename="default_summary.md",
|
|
49
|
-
fallback="ai_review/prompts/default_summary.md"
|
|
50
|
-
)
|
|
51
|
-
]
|
|
74
|
+
return resolve_prompt_files(self.summary_prompt_files, "default_summary.md")
|
|
52
75
|
|
|
53
76
|
@cached_property
|
|
54
|
-
def
|
|
55
|
-
|
|
56
|
-
load_resource(
|
|
57
|
-
package="ai_review.prompts",
|
|
58
|
-
filename="default_system_inline.md",
|
|
59
|
-
fallback="ai_review/prompts/default_system_inline.md"
|
|
60
|
-
)
|
|
61
|
-
]
|
|
62
|
-
|
|
63
|
-
if self.system_inline_prompt_files is None:
|
|
64
|
-
return global_files
|
|
77
|
+
def inline_reply_prompt_files_or_default(self) -> list[Path]:
|
|
78
|
+
return resolve_prompt_files(self.inline_reply_prompt_files, "default_inline_reply.md")
|
|
65
79
|
|
|
66
|
-
|
|
67
|
-
|
|
80
|
+
@cached_property
|
|
81
|
+
def summary_reply_prompt_files_or_default(self) -> list[Path]:
|
|
82
|
+
return resolve_prompt_files(self.summary_reply_prompt_files, "default_summary_reply.md")
|
|
68
83
|
|
|
69
|
-
|
|
84
|
+
# --- System Prompts ---
|
|
85
|
+
@cached_property
|
|
86
|
+
def system_inline_prompt_files_or_default(self) -> list[Path]:
|
|
87
|
+
return resolve_system_prompt_files(
|
|
88
|
+
files=self.system_inline_prompt_files,
|
|
89
|
+
include=self.include_inline_system_prompts,
|
|
90
|
+
default_file="default_system_inline.md"
|
|
91
|
+
)
|
|
70
92
|
|
|
71
93
|
@cached_property
|
|
72
94
|
def system_context_prompt_files_or_default(self) -> list[Path]:
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
)
|
|
79
|
-
]
|
|
80
|
-
|
|
81
|
-
if self.system_context_prompt_files is None:
|
|
82
|
-
return global_files
|
|
83
|
-
|
|
84
|
-
if self.include_context_system_prompts:
|
|
85
|
-
return global_files + self.system_context_prompt_files
|
|
86
|
-
|
|
87
|
-
return self.system_context_prompt_files
|
|
95
|
+
return resolve_system_prompt_files(
|
|
96
|
+
files=self.system_context_prompt_files,
|
|
97
|
+
include=self.include_context_system_prompts,
|
|
98
|
+
default_file="default_system_context.md"
|
|
99
|
+
)
|
|
88
100
|
|
|
89
101
|
@cached_property
|
|
90
102
|
def system_summary_prompt_files_or_default(self) -> list[Path]:
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
)
|
|
97
|
-
]
|
|
98
|
-
|
|
99
|
-
if self.system_summary_prompt_files is None:
|
|
100
|
-
return global_files
|
|
101
|
-
|
|
102
|
-
if self.include_summary_system_prompts:
|
|
103
|
-
return global_files + self.system_summary_prompt_files
|
|
103
|
+
return resolve_system_prompt_files(
|
|
104
|
+
files=self.system_summary_prompt_files,
|
|
105
|
+
include=self.include_summary_system_prompts,
|
|
106
|
+
default_file="default_system_summary.md"
|
|
107
|
+
)
|
|
104
108
|
|
|
105
|
-
|
|
109
|
+
@cached_property
|
|
110
|
+
def system_inline_reply_prompt_files_or_default(self) -> list[Path]:
|
|
111
|
+
return resolve_system_prompt_files(
|
|
112
|
+
files=self.system_inline_reply_prompt_files,
|
|
113
|
+
include=self.include_inline_reply_system_prompts,
|
|
114
|
+
default_file="default_system_inline_reply.md"
|
|
115
|
+
)
|
|
106
116
|
|
|
117
|
+
@cached_property
|
|
118
|
+
def system_summary_reply_prompt_files_or_default(self) -> list[Path]:
|
|
119
|
+
return resolve_system_prompt_files(
|
|
120
|
+
files=self.system_summary_reply_prompt_files,
|
|
121
|
+
include=self.include_summary_reply_system_prompts,
|
|
122
|
+
default_file="default_system_summary_reply.md"
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
# --- Load Prompts ---
|
|
107
126
|
def load_inline(self) -> list[str]:
|
|
108
127
|
return [file.read_text(encoding="utf-8") for file in self.inline_prompt_files_or_default]
|
|
109
128
|
|
|
@@ -113,6 +132,13 @@ class PromptConfig(BaseModel):
|
|
|
113
132
|
def load_summary(self) -> list[str]:
|
|
114
133
|
return [file.read_text(encoding="utf-8") for file in self.summary_prompt_files_or_default]
|
|
115
134
|
|
|
135
|
+
def load_inline_reply(self) -> list[str]:
|
|
136
|
+
return [file.read_text(encoding="utf-8") for file in self.inline_reply_prompt_files_or_default]
|
|
137
|
+
|
|
138
|
+
def load_summary_reply(self) -> list[str]:
|
|
139
|
+
return [file.read_text(encoding="utf-8") for file in self.summary_reply_prompt_files_or_default]
|
|
140
|
+
|
|
141
|
+
# --- Load System Prompts ---
|
|
116
142
|
def load_system_inline(self) -> list[str]:
|
|
117
143
|
return [file.read_text(encoding="utf-8") for file in self.system_inline_prompt_files_or_default]
|
|
118
144
|
|
|
@@ -121,3 +147,9 @@ class PromptConfig(BaseModel):
|
|
|
121
147
|
|
|
122
148
|
def load_system_summary(self) -> list[str]:
|
|
123
149
|
return [file.read_text(encoding="utf-8") for file in self.system_summary_prompt_files_or_default]
|
|
150
|
+
|
|
151
|
+
def load_system_inline_reply(self) -> list[str]:
|
|
152
|
+
return [file.read_text(encoding="utf-8") for file in self.system_inline_reply_prompt_files_or_default]
|
|
153
|
+
|
|
154
|
+
def load_system_summary_reply(self) -> list[str]:
|
|
155
|
+
return [file.read_text(encoding="utf-8") for file in self.system_summary_reply_prompt_files_or_default]
|
ai_review/libs/config/review.py
CHANGED
|
@@ -20,7 +20,9 @@ class ReviewMode(StrEnum):
|
|
|
20
20
|
class ReviewConfig(BaseModel):
|
|
21
21
|
mode: ReviewMode = ReviewMode.FULL_FILE_DIFF
|
|
22
22
|
inline_tag: str = Field(default="#ai-review-inline")
|
|
23
|
+
inline_reply_tag: str = Field(default="#ai-review-inline-reply")
|
|
23
24
|
summary_tag: str = Field(default="#ai-review-summary")
|
|
25
|
+
summary_reply_tag: str = Field(default="#ai-review-summary-reply")
|
|
24
26
|
context_lines: int = Field(default=10, ge=0)
|
|
25
27
|
allow_changes: list[str] = Field(default_factory=list)
|
|
26
28
|
ignore_changes: list[str] = Field(default_factory=list)
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from typing import TypeVar, Generic, Type
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel, ValidationError
|
|
5
|
+
|
|
6
|
+
from ai_review.libs.json import sanitize_json_string
|
|
7
|
+
from ai_review.libs.logger import get_logger
|
|
8
|
+
|
|
9
|
+
logger = get_logger("LLM_JSON_PARSER")
|
|
10
|
+
|
|
11
|
+
T = TypeVar("T", bound=BaseModel)
|
|
12
|
+
|
|
13
|
+
CLEAN_JSON_BLOCK_RE = re.compile(r"```(?:json)?(.*?)```", re.DOTALL | re.IGNORECASE)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class LLMOutputJSONParser(Generic[T]):
|
|
17
|
+
"""Reusable JSON parser for LLM responses."""
|
|
18
|
+
|
|
19
|
+
def __init__(self, model: Type[T]):
|
|
20
|
+
self.model = model
|
|
21
|
+
self.model_name = self.model.__name__
|
|
22
|
+
|
|
23
|
+
def try_parse(self, raw: str) -> T | None:
|
|
24
|
+
logger.debug(f"[{self.model_name}] Attempting JSON parse (len={len(raw)})")
|
|
25
|
+
|
|
26
|
+
try:
|
|
27
|
+
return self.model.model_validate_json(raw)
|
|
28
|
+
except ValidationError as error:
|
|
29
|
+
logger.warning(f"[{self.model_name}] Raw JSON parse failed: {error}")
|
|
30
|
+
cleaned = sanitize_json_string(raw)
|
|
31
|
+
|
|
32
|
+
if cleaned != raw:
|
|
33
|
+
logger.debug(f"[{self.model_name}] Sanitized JSON differs, retrying parse...")
|
|
34
|
+
try:
|
|
35
|
+
return self.model.model_validate_json(cleaned)
|
|
36
|
+
except ValidationError as error:
|
|
37
|
+
logger.warning(f"[{self.model_name}] Sanitized JSON still invalid: {error}")
|
|
38
|
+
return None
|
|
39
|
+
else:
|
|
40
|
+
logger.debug(f"[{self.model_name}] Sanitized JSON identical — skipping retry")
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
def parse_output(self, output: str) -> T | None:
|
|
44
|
+
output = (output or "").strip()
|
|
45
|
+
if not output:
|
|
46
|
+
logger.warning(f"[{self.model_name}] Empty LLM output")
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
logger.debug(f"[{self.model_name}] Parsing output (len={len(output)})")
|
|
50
|
+
|
|
51
|
+
if match := CLEAN_JSON_BLOCK_RE.search(output):
|
|
52
|
+
logger.debug(f"[{self.model_name}] Found fenced JSON block, extracting...")
|
|
53
|
+
output = match.group(1).strip()
|
|
54
|
+
|
|
55
|
+
if parsed := self.try_parse(output):
|
|
56
|
+
logger.info(f"[{self.model_name}] Successfully parsed")
|
|
57
|
+
return parsed
|
|
58
|
+
|
|
59
|
+
logger.error(f"[{self.model_name}] No valid JSON found in output")
|
|
60
|
+
return None
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
You are an AI assistant replying to a specific inline code review comment.
|
|
2
|
+
|
|
3
|
+
Use the conversation (`## Conversation`) and code diff (`## Diff`) to continue the discussion constructively.
|
|
4
|
+
|
|
5
|
+
Guidelines:
|
|
6
|
+
|
|
7
|
+
- Focus only on the latest comment and relevant code context.
|
|
8
|
+
- Keep your tone concise, professional, and technical (1–2 sentences).
|
|
9
|
+
- If a code change is needed, include it in "suggestion" — provide only the replacement code.
|
|
10
|
+
- If no further action or clarification is required, output exactly: No reply.
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
You are an AI assistant participating in a summary code review discussion.
|
|
2
|
+
|
|
3
|
+
Use the previous conversation (`## Conversation`) and code changes (`## Changes`) to continue the discussion
|
|
4
|
+
constructively.
|
|
5
|
+
|
|
6
|
+
Guidelines:
|
|
7
|
+
|
|
8
|
+
- Act as a **technical reviewer**, not the code author.
|
|
9
|
+
- Keep your tone concise, professional, and focused (1–3 sentences).
|
|
10
|
+
- Address the latest user comment directly, providing clarification, reasoning, or an actionable suggestion.
|
|
11
|
+
- If the comment contains a request or implies an action (e.g. adding tests, refactoring, or improving validation),
|
|
12
|
+
provide a clear recommendation or short illustrative code snippet.
|
|
13
|
+
- Avoid greetings, acknowledgements, or repeating earlier feedback.
|
|
14
|
+
- If no reply is needed, write exactly: `No reply`.
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
You are an AI assistant participating in a code review discussion.
|
|
2
|
+
|
|
3
|
+
Return ONLY a valid JSON object representing a single inline reply to the current comment thread.
|
|
4
|
+
|
|
5
|
+
Format:
|
|
6
|
+
|
|
7
|
+
```json
|
|
8
|
+
{
|
|
9
|
+
"message": "<short reply message to the comment thread>",
|
|
10
|
+
"suggestion": "<replacement code block without markdown, or null if not applicable>"
|
|
11
|
+
}
|
|
12
|
+
```
|
|
13
|
+
|
|
14
|
+
Guidelines:
|
|
15
|
+
|
|
16
|
+
- Output must be exactly one JSON object, not an array or text block.
|
|
17
|
+
- "message" — required, non-empty, short (1–2 sentences), professional, and focused on the specific comment.
|
|
18
|
+
- "suggestion" — optional:
|
|
19
|
+
- If suggesting a fix or refactor, provide only the replacement code (no markdown, no explanations).
|
|
20
|
+
- Maintain indentation and style consistent with the surrounding diff.
|
|
21
|
+
- If no code change is appropriate, use null.
|
|
22
|
+
- Do not quote previous comments or restate context.
|
|
23
|
+
- Never include any extra text outside the JSON object.
|
|
24
|
+
- If no meaningful reply is needed, return:
|
|
25
|
+
|
|
26
|
+
```json
|
|
27
|
+
{
|
|
28
|
+
"message": "No reply.",
|
|
29
|
+
"suggestion": null
|
|
30
|
+
}
|
|
31
|
+
```
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
You are an AI assistant participating in a code review discussion.
|
|
2
|
+
|
|
3
|
+
Your role:
|
|
4
|
+
|
|
5
|
+
- Act as a **technical reviewer**, not the code author.
|
|
6
|
+
- Focus on clarity, correctness, and completeness of the code or proposal.
|
|
7
|
+
- Keep your tone concise, professional, and factual (1–3 sentences).
|
|
8
|
+
- Reply only to the latest message in the discussion thread.
|
|
9
|
+
- When the user requests or implies an action (e.g. adding tests, refactoring, improving performance), provide a
|
|
10
|
+
**specific, actionable suggestion** or short code snippet that addresses it.
|
|
11
|
+
- Avoid greetings, acknowledgements, or filler phrases.
|
|
12
|
+
- Do not summarize past discussion history.
|
|
13
|
+
- If no reply is needed, output exactly: `No reply`.
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from datetime import datetime
|
|
1
|
+
from datetime import datetime, timezone
|
|
2
2
|
|
|
3
3
|
from pydantic import BaseModel, Field
|
|
4
4
|
|
|
@@ -7,5 +7,5 @@ class LLMArtifactSchema(BaseModel):
|
|
|
7
7
|
id: str
|
|
8
8
|
prompt: str
|
|
9
9
|
response: str | None = None
|
|
10
|
-
timestamp: str = Field(default_factory=datetime.
|
|
10
|
+
timestamp: str = Field(default_factory=lambda: datetime.now(timezone.utc).isoformat())
|
|
11
11
|
prompt_system: str
|
|
@@ -15,6 +15,12 @@ class HookType(StrEnum):
|
|
|
15
15
|
ON_SUMMARY_REVIEW_START = "ON_SUMMARY_REVIEW_START"
|
|
16
16
|
ON_SUMMARY_REVIEW_COMPLETE = "ON_SUMMARY_REVIEW_COMPLETE"
|
|
17
17
|
|
|
18
|
+
ON_INLINE_REPLY_REVIEW_START = "ON_INLINE_REPLY_REVIEW_START"
|
|
19
|
+
ON_INLINE_REPLY_REVIEW_COMPLETE = "ON_INLINE_REPLY_REVIEW_COMPLETE"
|
|
20
|
+
|
|
21
|
+
ON_SUMMARY_REPLY_REVIEW_START = "ON_SUMMARY_REPLY_REVIEW_START"
|
|
22
|
+
ON_SUMMARY_REPLY_REVIEW_COMPLETE = "ON_SUMMARY_REPLY_REVIEW_COMPLETE"
|
|
23
|
+
|
|
18
24
|
ON_INLINE_COMMENT_START = "ON_INLINE_COMMENT_START"
|
|
19
25
|
ON_INLINE_COMMENT_ERROR = "ON_INLINE_COMMENT_ERROR"
|
|
20
26
|
ON_INLINE_COMMENT_COMPLETE = "ON_INLINE_COMMENT_COMPLETE"
|
|
@@ -22,3 +28,11 @@ class HookType(StrEnum):
|
|
|
22
28
|
ON_SUMMARY_COMMENT_START = "ON_SUMMARY_COMMENT_START"
|
|
23
29
|
ON_SUMMARY_COMMENT_ERROR = "ON_SUMMARY_COMMENT_ERROR"
|
|
24
30
|
ON_SUMMARY_COMMENT_COMPLETE = "ON_SUMMARY_COMMENT_COMPLETE"
|
|
31
|
+
|
|
32
|
+
ON_INLINE_COMMENT_REPLY_START = "ON_INLINE_COMMENT_REPLY_START"
|
|
33
|
+
ON_INLINE_COMMENT_REPLY_ERROR = "ON_INLINE_COMMENT_REPLY_ERROR"
|
|
34
|
+
ON_INLINE_COMMENT_REPLY_COMPLETE = "ON_INLINE_COMMENT_REPLY_COMPLETE"
|
|
35
|
+
|
|
36
|
+
ON_SUMMARY_COMMENT_REPLY_START = "ON_SUMMARY_COMMENT_REPLY_START"
|
|
37
|
+
ON_SUMMARY_COMMENT_REPLY_ERROR = "ON_SUMMARY_COMMENT_REPLY_ERROR"
|
|
38
|
+
ON_SUMMARY_COMMENT_REPLY_COMPLETE = "ON_SUMMARY_COMMENT_REPLY_COMPLETE"
|
|
@@ -19,16 +19,33 @@ from ai_review.services.hook.types import (
|
|
|
19
19
|
# --- Summary Review ---
|
|
20
20
|
SummaryReviewStartHookFunc,
|
|
21
21
|
SummaryReviewCompleteHookFunc,
|
|
22
|
+
# --- Inline Reply Review ---
|
|
23
|
+
InlineReplyReviewStartHookFunc,
|
|
24
|
+
InlineReplyReviewCompleteHookFunc,
|
|
25
|
+
# --- Summary Reply Review ---
|
|
26
|
+
SummaryReplyReviewStartHookFunc,
|
|
27
|
+
SummaryReplyReviewCompleteHookFunc,
|
|
22
28
|
# --- Inline Comment ---
|
|
23
29
|
InlineCommentStartHookFunc,
|
|
24
30
|
InlineCommentErrorHookFunc,
|
|
25
31
|
InlineCommentCompleteHookFunc,
|
|
26
32
|
# --- Summary Comment ---
|
|
27
33
|
SummaryCommentStartHookFunc,
|
|
28
|
-
|
|
34
|
+
SummaryCommentErrorHookFunc,
|
|
35
|
+
SummaryCommentCompleteHookFunc,
|
|
36
|
+
# --- Inline Reply Comment ---
|
|
37
|
+
InlineCommentReplyStartHookFunc,
|
|
38
|
+
InlineCommentReplyErrorHookFunc,
|
|
39
|
+
InlineCommentReplyCompleteHookFunc,
|
|
40
|
+
# --- Summary Reply Comment ---
|
|
41
|
+
SummaryCommentReplyStartHookFunc,
|
|
42
|
+
SummaryCommentReplyErrorHookFunc,
|
|
43
|
+
SummaryCommentReplyCompleteHookFunc
|
|
29
44
|
)
|
|
30
|
-
from ai_review.services.review.inline.schema import InlineCommentSchema
|
|
31
|
-
from ai_review.services.review.
|
|
45
|
+
from ai_review.services.review.internal.inline.schema import InlineCommentSchema
|
|
46
|
+
from ai_review.services.review.internal.inline_reply.schema import InlineCommentReplySchema
|
|
47
|
+
from ai_review.services.review.internal.summary.schema import SummaryCommentSchema
|
|
48
|
+
from ai_review.services.review.internal.summary_reply.schema import SummaryCommentReplySchema
|
|
32
49
|
|
|
33
50
|
logger = get_logger("HOOK_SERVICE")
|
|
34
51
|
|
|
@@ -117,6 +134,36 @@ class HookService:
|
|
|
117
134
|
async def emit_summary_review_complete(self, report: CostReportSchema | None):
|
|
118
135
|
await self.emit(HookType.ON_SUMMARY_REVIEW_COMPLETE, report=report)
|
|
119
136
|
|
|
137
|
+
# --- Inline Reply Review ---
|
|
138
|
+
def on_inline_reply_review_start(self, func: InlineReplyReviewStartHookFunc):
|
|
139
|
+
self.inject_hook(HookType.ON_INLINE_REPLY_REVIEW_START, func)
|
|
140
|
+
return func
|
|
141
|
+
|
|
142
|
+
def on_inline_reply_review_complete(self, func: InlineReplyReviewCompleteHookFunc):
|
|
143
|
+
self.inject_hook(HookType.ON_INLINE_REPLY_REVIEW_COMPLETE, func)
|
|
144
|
+
return func
|
|
145
|
+
|
|
146
|
+
async def emit_inline_reply_review_start(self):
|
|
147
|
+
await self.emit(HookType.ON_INLINE_REPLY_REVIEW_START)
|
|
148
|
+
|
|
149
|
+
async def emit_inline_reply_review_complete(self, report: CostReportSchema | None):
|
|
150
|
+
await self.emit(HookType.ON_INLINE_REPLY_REVIEW_COMPLETE, report=report)
|
|
151
|
+
|
|
152
|
+
# --- Summary Reply Review ---
|
|
153
|
+
def on_summary_reply_review_start(self, func: SummaryReplyReviewStartHookFunc):
|
|
154
|
+
self.inject_hook(HookType.ON_SUMMARY_REPLY_REVIEW_START, func)
|
|
155
|
+
return func
|
|
156
|
+
|
|
157
|
+
def on_summary_reply_review_complete(self, func: SummaryReplyReviewCompleteHookFunc):
|
|
158
|
+
self.inject_hook(HookType.ON_SUMMARY_REPLY_REVIEW_COMPLETE, func)
|
|
159
|
+
return func
|
|
160
|
+
|
|
161
|
+
async def emit_summary_reply_review_start(self):
|
|
162
|
+
await self.emit(HookType.ON_SUMMARY_REPLY_REVIEW_START)
|
|
163
|
+
|
|
164
|
+
async def emit_summary_reply_review_complete(self, report: CostReportSchema | None):
|
|
165
|
+
await self.emit(HookType.ON_SUMMARY_REPLY_REVIEW_COMPLETE, report=report)
|
|
166
|
+
|
|
120
167
|
# --- Inline Comment ---
|
|
121
168
|
def on_inline_comment_start(self, func: InlineCommentStartHookFunc):
|
|
122
169
|
self.inject_hook(HookType.ON_INLINE_COMMENT_START, func)
|
|
@@ -144,7 +191,7 @@ class HookService:
|
|
|
144
191
|
self.inject_hook(HookType.ON_SUMMARY_COMMENT_START, func)
|
|
145
192
|
return func
|
|
146
193
|
|
|
147
|
-
def on_summary_comment_error(self, func:
|
|
194
|
+
def on_summary_comment_error(self, func: SummaryCommentErrorHookFunc):
|
|
148
195
|
self.inject_hook(HookType.ON_SUMMARY_COMMENT_ERROR, func)
|
|
149
196
|
return func
|
|
150
197
|
|
|
@@ -160,3 +207,47 @@ class HookService:
|
|
|
160
207
|
|
|
161
208
|
async def emit_summary_comment_complete(self, comment: SummaryCommentSchema):
|
|
162
209
|
await self.emit(HookType.ON_SUMMARY_COMMENT_COMPLETE, comment=comment)
|
|
210
|
+
|
|
211
|
+
# --- Inline Reply Comment ---
|
|
212
|
+
def on_inline_comment_reply_start(self, func: InlineCommentReplyStartHookFunc):
|
|
213
|
+
self.inject_hook(HookType.ON_INLINE_COMMENT_REPLY_START, func)
|
|
214
|
+
return func
|
|
215
|
+
|
|
216
|
+
def on_inline_comment_reply_error(self, func: InlineCommentReplyErrorHookFunc):
|
|
217
|
+
self.inject_hook(HookType.ON_INLINE_COMMENT_REPLY_ERROR, func)
|
|
218
|
+
return func
|
|
219
|
+
|
|
220
|
+
def on_inline_comment_reply_complete(self, func: InlineCommentReplyCompleteHookFunc):
|
|
221
|
+
self.inject_hook(HookType.ON_INLINE_COMMENT_REPLY_COMPLETE, func)
|
|
222
|
+
return func
|
|
223
|
+
|
|
224
|
+
async def emit_inline_comment_reply_start(self, comment: InlineCommentReplySchema):
|
|
225
|
+
await self.emit(HookType.ON_INLINE_COMMENT_REPLY_START, comment=comment)
|
|
226
|
+
|
|
227
|
+
async def emit_inline_comment_reply_error(self, comment: InlineCommentReplySchema):
|
|
228
|
+
await self.emit(HookType.ON_INLINE_COMMENT_REPLY_ERROR, comment=comment)
|
|
229
|
+
|
|
230
|
+
async def emit_inline_comment_reply_complete(self, comment: InlineCommentReplySchema):
|
|
231
|
+
await self.emit(HookType.ON_INLINE_COMMENT_REPLY_COMPLETE, comment=comment)
|
|
232
|
+
|
|
233
|
+
# --- Inline Reply Comment ---
|
|
234
|
+
def on_summary_comment_reply_start(self, func: SummaryCommentReplyStartHookFunc):
|
|
235
|
+
self.inject_hook(HookType.ON_SUMMARY_COMMENT_REPLY_START, func)
|
|
236
|
+
return func
|
|
237
|
+
|
|
238
|
+
def on_summary_comment_reply_error(self, func: SummaryCommentReplyErrorHookFunc):
|
|
239
|
+
self.inject_hook(HookType.ON_SUMMARY_COMMENT_REPLY_ERROR, func)
|
|
240
|
+
return func
|
|
241
|
+
|
|
242
|
+
def on_summary_comment_reply_complete(self, func: SummaryCommentReplyCompleteHookFunc):
|
|
243
|
+
self.inject_hook(HookType.ON_SUMMARY_COMMENT_REPLY_COMPLETE, func)
|
|
244
|
+
return func
|
|
245
|
+
|
|
246
|
+
async def emit_summary_comment_reply_start(self, comment: SummaryCommentReplySchema):
|
|
247
|
+
await self.emit(HookType.ON_SUMMARY_COMMENT_REPLY_START, comment=comment)
|
|
248
|
+
|
|
249
|
+
async def emit_summary_comment_reply_error(self, comment: SummaryCommentReplySchema):
|
|
250
|
+
await self.emit(HookType.ON_SUMMARY_COMMENT_REPLY_ERROR, comment=comment)
|
|
251
|
+
|
|
252
|
+
async def emit_summary_comment_reply_complete(self, comment: SummaryCommentReplySchema):
|
|
253
|
+
await self.emit(HookType.ON_SUMMARY_COMMENT_REPLY_COMPLETE, comment=comment)
|
ai_review/services/hook/types.py
CHANGED
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
from typing import Callable, Awaitable
|
|
2
2
|
|
|
3
3
|
from ai_review.services.cost.schema import CostReportSchema
|
|
4
|
-
from ai_review.services.review.inline.schema import InlineCommentSchema
|
|
5
|
-
from ai_review.services.review.
|
|
4
|
+
from ai_review.services.review.internal.inline.schema import InlineCommentSchema
|
|
5
|
+
from ai_review.services.review.internal.inline_reply.schema import InlineCommentReplySchema
|
|
6
|
+
from ai_review.services.review.internal.summary.schema import SummaryCommentSchema
|
|
7
|
+
from ai_review.services.review.internal.summary_reply.schema import SummaryCommentReplySchema
|
|
6
8
|
|
|
7
9
|
HookFunc = Callable[..., Awaitable[None]]
|
|
8
10
|
|
|
@@ -19,6 +21,12 @@ ContextReviewCompleteHookFunc = Callable[[CostReportSchema | None], Awaitable[No
|
|
|
19
21
|
SummaryReviewStartHookFunc = Callable[..., Awaitable[None]]
|
|
20
22
|
SummaryReviewCompleteHookFunc = Callable[[CostReportSchema | None], Awaitable[None]]
|
|
21
23
|
|
|
24
|
+
InlineReplyReviewStartHookFunc = Callable[..., Awaitable[None]]
|
|
25
|
+
InlineReplyReviewCompleteHookFunc = Callable[[CostReportSchema | None], Awaitable[None]]
|
|
26
|
+
|
|
27
|
+
SummaryReplyReviewStartHookFunc = Callable[..., Awaitable[None]]
|
|
28
|
+
SummaryReplyReviewCompleteHookFunc = Callable[[CostReportSchema | None], Awaitable[None]]
|
|
29
|
+
|
|
22
30
|
InlineCommentStartHookFunc = Callable[[InlineCommentSchema], Awaitable[None]]
|
|
23
31
|
InlineCommentErrorHookFunc = Callable[[InlineCommentSchema], Awaitable[None]]
|
|
24
32
|
InlineCommentCompleteHookFunc = Callable[[InlineCommentSchema], Awaitable[None]]
|
|
@@ -26,3 +34,11 @@ InlineCommentCompleteHookFunc = Callable[[InlineCommentSchema], Awaitable[None]]
|
|
|
26
34
|
SummaryCommentStartHookFunc = Callable[[SummaryCommentSchema], Awaitable[None]]
|
|
27
35
|
SummaryCommentErrorHookFunc = Callable[[SummaryCommentSchema], Awaitable[None]]
|
|
28
36
|
SummaryCommentCompleteHookFunc = Callable[[SummaryCommentSchema], Awaitable[None]]
|
|
37
|
+
|
|
38
|
+
InlineCommentReplyStartHookFunc = Callable[[InlineCommentReplySchema], Awaitable[None]]
|
|
39
|
+
InlineCommentReplyErrorHookFunc = Callable[[InlineCommentReplySchema], Awaitable[None]]
|
|
40
|
+
InlineCommentReplyCompleteHookFunc = Callable[[InlineCommentReplySchema], Awaitable[None]]
|
|
41
|
+
|
|
42
|
+
SummaryCommentReplyStartHookFunc = Callable[[SummaryCommentReplySchema], Awaitable[None]]
|
|
43
|
+
SummaryCommentReplyErrorHookFunc = Callable[[SummaryCommentReplySchema], Awaitable[None]]
|
|
44
|
+
SummaryCommentReplyCompleteHookFunc = Callable[[SummaryCommentReplySchema], Awaitable[None]]
|
|
@@ -2,7 +2,7 @@ from ai_review.services.prompt.schema import PromptContextSchema
|
|
|
2
2
|
from ai_review.services.vcs.types import ReviewInfoSchema
|
|
3
3
|
|
|
4
4
|
|
|
5
|
-
def
|
|
5
|
+
def build_prompt_context_from_review_info(review: ReviewInfoSchema) -> PromptContextSchema:
|
|
6
6
|
return PromptContextSchema(
|
|
7
7
|
review_title=review.title,
|
|
8
8
|
review_description=review.description,
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
from ai_review.config import settings
|
|
2
2
|
from ai_review.services.diff.schema import DiffFileSchema
|
|
3
3
|
from ai_review.services.prompt.schema import PromptContextSchema
|
|
4
|
-
from ai_review.services.prompt.tools import normalize_prompt, format_file
|
|
4
|
+
from ai_review.services.prompt.tools import normalize_prompt, format_file, format_thread, format_files
|
|
5
5
|
from ai_review.services.prompt.types import PromptServiceProtocol
|
|
6
|
+
from ai_review.services.vcs.types import ReviewThreadSchema
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
class PromptService(PromptServiceProtocol):
|
|
@@ -28,7 +29,7 @@ class PromptService(PromptServiceProtocol):
|
|
|
28
29
|
@classmethod
|
|
29
30
|
def build_summary_request(cls, diffs: list[DiffFileSchema], context: PromptContextSchema) -> str:
|
|
30
31
|
prompt = cls.prepare_prompt(settings.prompt.load_summary(), context)
|
|
31
|
-
changes =
|
|
32
|
+
changes = format_files(diffs)
|
|
32
33
|
return (
|
|
33
34
|
f"{prompt}\n\n"
|
|
34
35
|
f"## Changes\n\n"
|
|
@@ -38,13 +39,50 @@ class PromptService(PromptServiceProtocol):
|
|
|
38
39
|
@classmethod
|
|
39
40
|
def build_context_request(cls, diffs: list[DiffFileSchema], context: PromptContextSchema) -> str:
|
|
40
41
|
prompt = cls.prepare_prompt(settings.prompt.load_context(), context)
|
|
41
|
-
changes =
|
|
42
|
+
changes = format_files(diffs)
|
|
42
43
|
return (
|
|
43
44
|
f"{prompt}\n\n"
|
|
44
45
|
f"## Diff\n\n"
|
|
45
46
|
f"{changes}\n"
|
|
46
47
|
)
|
|
47
48
|
|
|
49
|
+
@classmethod
|
|
50
|
+
def build_inline_reply_request(
|
|
51
|
+
cls,
|
|
52
|
+
diff: DiffFileSchema,
|
|
53
|
+
thread: ReviewThreadSchema,
|
|
54
|
+
context: PromptContextSchema
|
|
55
|
+
) -> str:
|
|
56
|
+
prompt = cls.prepare_prompt(settings.prompt.load_inline_reply(), context)
|
|
57
|
+
conversation = format_thread(thread)
|
|
58
|
+
|
|
59
|
+
return (
|
|
60
|
+
f"{prompt}\n\n"
|
|
61
|
+
f"## Conversation\n\n"
|
|
62
|
+
f"{conversation}\n\n"
|
|
63
|
+
f"## Diff\n\n"
|
|
64
|
+
f"{format_file(diff)}"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
@classmethod
|
|
68
|
+
def build_summary_reply_request(
|
|
69
|
+
cls,
|
|
70
|
+
diffs: list[DiffFileSchema],
|
|
71
|
+
thread: ReviewThreadSchema,
|
|
72
|
+
context: PromptContextSchema
|
|
73
|
+
) -> str:
|
|
74
|
+
prompt = cls.prepare_prompt(settings.prompt.load_summary_reply(), context)
|
|
75
|
+
changes = format_files(diffs)
|
|
76
|
+
conversation = format_thread(thread)
|
|
77
|
+
|
|
78
|
+
return (
|
|
79
|
+
f"{prompt}\n\n"
|
|
80
|
+
f"## Conversation\n\n"
|
|
81
|
+
f"{conversation}\n\n"
|
|
82
|
+
f"## Changes\n\n"
|
|
83
|
+
f"{changes}"
|
|
84
|
+
)
|
|
85
|
+
|
|
48
86
|
@classmethod
|
|
49
87
|
def build_system_inline_request(cls, context: PromptContextSchema) -> str:
|
|
50
88
|
return cls.prepare_prompt(settings.prompt.load_system_inline(), context)
|
|
@@ -56,3 +94,11 @@ class PromptService(PromptServiceProtocol):
|
|
|
56
94
|
@classmethod
|
|
57
95
|
def build_system_summary_request(cls, context: PromptContextSchema) -> str:
|
|
58
96
|
return cls.prepare_prompt(settings.prompt.load_system_summary(), context)
|
|
97
|
+
|
|
98
|
+
@classmethod
|
|
99
|
+
def build_system_inline_reply_request(cls, context: PromptContextSchema) -> str:
|
|
100
|
+
return cls.prepare_prompt(settings.prompt.load_system_inline_reply(), context)
|
|
101
|
+
|
|
102
|
+
@classmethod
|
|
103
|
+
def build_system_summary_reply_request(cls, context: PromptContextSchema) -> str:
|
|
104
|
+
return cls.prepare_prompt(settings.prompt.load_system_summary_reply(), context)
|