xai-review 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xai-review might be problematic. Click here for more details.

Files changed (159) hide show
  1. xai_review-0.3.0/PKG-INFO +11 -0
  2. xai_review-0.3.0/README.md +260 -0
  3. xai_review-0.3.0/ai_review/__init__.py +0 -0
  4. xai_review-0.3.0/ai_review/cli/__init__.py +0 -0
  5. xai_review-0.3.0/ai_review/cli/commands/__init__.py +0 -0
  6. xai_review-0.3.0/ai_review/cli/commands/run_context_review.py +7 -0
  7. xai_review-0.3.0/ai_review/cli/commands/run_inline_review.py +7 -0
  8. xai_review-0.3.0/ai_review/cli/commands/run_review.py +8 -0
  9. xai_review-0.3.0/ai_review/cli/commands/run_summary_review.py +7 -0
  10. xai_review-0.3.0/ai_review/cli/main.py +54 -0
  11. xai_review-0.3.0/ai_review/clients/__init__.py +0 -0
  12. xai_review-0.3.0/ai_review/clients/claude/__init__.py +0 -0
  13. xai_review-0.3.0/ai_review/clients/claude/client.py +44 -0
  14. xai_review-0.3.0/ai_review/clients/claude/schema.py +44 -0
  15. xai_review-0.3.0/ai_review/clients/gemini/__init__.py +0 -0
  16. xai_review-0.3.0/ai_review/clients/gemini/client.py +45 -0
  17. xai_review-0.3.0/ai_review/clients/gemini/schema.py +78 -0
  18. xai_review-0.3.0/ai_review/clients/gitlab/__init__.py +0 -0
  19. xai_review-0.3.0/ai_review/clients/gitlab/client.py +31 -0
  20. xai_review-0.3.0/ai_review/clients/gitlab/mr/__init__.py +0 -0
  21. xai_review-0.3.0/ai_review/clients/gitlab/mr/client.py +101 -0
  22. xai_review-0.3.0/ai_review/clients/gitlab/mr/schema/__init__.py +0 -0
  23. xai_review-0.3.0/ai_review/clients/gitlab/mr/schema/changes.py +35 -0
  24. xai_review-0.3.0/ai_review/clients/gitlab/mr/schema/comments.py +19 -0
  25. xai_review-0.3.0/ai_review/clients/gitlab/mr/schema/discussions.py +34 -0
  26. xai_review-0.3.0/ai_review/clients/openai/__init__.py +0 -0
  27. xai_review-0.3.0/ai_review/clients/openai/client.py +42 -0
  28. xai_review-0.3.0/ai_review/clients/openai/schema.py +37 -0
  29. xai_review-0.3.0/ai_review/config.py +62 -0
  30. xai_review-0.3.0/ai_review/libs/__init__.py +0 -0
  31. xai_review-0.3.0/ai_review/libs/asynchronous/__init__.py +0 -0
  32. xai_review-0.3.0/ai_review/libs/asynchronous/gather.py +14 -0
  33. xai_review-0.3.0/ai_review/libs/config/__init__.py +0 -0
  34. xai_review-0.3.0/ai_review/libs/config/artifacts.py +12 -0
  35. xai_review-0.3.0/ai_review/libs/config/base.py +24 -0
  36. xai_review-0.3.0/ai_review/libs/config/claude.py +13 -0
  37. xai_review-0.3.0/ai_review/libs/config/gemini.py +13 -0
  38. xai_review-0.3.0/ai_review/libs/config/gitlab.py +12 -0
  39. xai_review-0.3.0/ai_review/libs/config/http.py +19 -0
  40. xai_review-0.3.0/ai_review/libs/config/llm.py +61 -0
  41. xai_review-0.3.0/ai_review/libs/config/logger.py +17 -0
  42. xai_review-0.3.0/ai_review/libs/config/openai.py +13 -0
  43. xai_review-0.3.0/ai_review/libs/config/prompt.py +121 -0
  44. xai_review-0.3.0/ai_review/libs/config/review.py +30 -0
  45. xai_review-0.3.0/ai_review/libs/config/vcs.py +19 -0
  46. xai_review-0.3.0/ai_review/libs/constants/__init__.py +0 -0
  47. xai_review-0.3.0/ai_review/libs/constants/llm_provider.py +7 -0
  48. xai_review-0.3.0/ai_review/libs/constants/vcs_provider.py +6 -0
  49. xai_review-0.3.0/ai_review/libs/diff/__init__.py +0 -0
  50. xai_review-0.3.0/ai_review/libs/diff/models.py +100 -0
  51. xai_review-0.3.0/ai_review/libs/diff/parser.py +111 -0
  52. xai_review-0.3.0/ai_review/libs/diff/tools.py +24 -0
  53. xai_review-0.3.0/ai_review/libs/http/__init__.py +0 -0
  54. xai_review-0.3.0/ai_review/libs/http/client.py +14 -0
  55. xai_review-0.3.0/ai_review/libs/http/event_hooks/__init__.py +0 -0
  56. xai_review-0.3.0/ai_review/libs/http/event_hooks/base.py +13 -0
  57. xai_review-0.3.0/ai_review/libs/http/event_hooks/logger.py +17 -0
  58. xai_review-0.3.0/ai_review/libs/http/handlers.py +34 -0
  59. xai_review-0.3.0/ai_review/libs/http/transports/__init__.py +0 -0
  60. xai_review-0.3.0/ai_review/libs/http/transports/retry.py +34 -0
  61. xai_review-0.3.0/ai_review/libs/logger.py +19 -0
  62. xai_review-0.3.0/ai_review/libs/resources.py +24 -0
  63. xai_review-0.3.0/ai_review/prompts/__init__.py +0 -0
  64. xai_review-0.3.0/ai_review/prompts/default_context.md +14 -0
  65. xai_review-0.3.0/ai_review/prompts/default_inline.md +8 -0
  66. xai_review-0.3.0/ai_review/prompts/default_summary.md +3 -0
  67. xai_review-0.3.0/ai_review/prompts/default_system_context.md +27 -0
  68. xai_review-0.3.0/ai_review/prompts/default_system_inline.md +25 -0
  69. xai_review-0.3.0/ai_review/prompts/default_system_summary.md +7 -0
  70. xai_review-0.3.0/ai_review/resources/__init__.py +0 -0
  71. xai_review-0.3.0/ai_review/resources/pricing.yaml +55 -0
  72. xai_review-0.3.0/ai_review/services/__init__.py +0 -0
  73. xai_review-0.3.0/ai_review/services/artifacts/__init__.py +0 -0
  74. xai_review-0.3.0/ai_review/services/artifacts/schema.py +11 -0
  75. xai_review-0.3.0/ai_review/services/artifacts/service.py +47 -0
  76. xai_review-0.3.0/ai_review/services/artifacts/tools.py +8 -0
  77. xai_review-0.3.0/ai_review/services/cost/__init__.py +0 -0
  78. xai_review-0.3.0/ai_review/services/cost/schema.py +44 -0
  79. xai_review-0.3.0/ai_review/services/cost/service.py +58 -0
  80. xai_review-0.3.0/ai_review/services/diff/__init__.py +0 -0
  81. xai_review-0.3.0/ai_review/services/diff/renderers.py +149 -0
  82. xai_review-0.3.0/ai_review/services/diff/schema.py +6 -0
  83. xai_review-0.3.0/ai_review/services/diff/service.py +96 -0
  84. xai_review-0.3.0/ai_review/services/diff/tools.py +59 -0
  85. xai_review-0.3.0/ai_review/services/git/__init__.py +0 -0
  86. xai_review-0.3.0/ai_review/services/git/service.py +35 -0
  87. xai_review-0.3.0/ai_review/services/git/types.py +11 -0
  88. xai_review-0.3.0/ai_review/services/llm/__init__.py +0 -0
  89. xai_review-0.3.0/ai_review/services/llm/claude/__init__.py +0 -0
  90. xai_review-0.3.0/ai_review/services/llm/claude/client.py +26 -0
  91. xai_review-0.3.0/ai_review/services/llm/factory.py +18 -0
  92. xai_review-0.3.0/ai_review/services/llm/gemini/__init__.py +0 -0
  93. xai_review-0.3.0/ai_review/services/llm/gemini/client.py +31 -0
  94. xai_review-0.3.0/ai_review/services/llm/openai/__init__.py +0 -0
  95. xai_review-0.3.0/ai_review/services/llm/openai/client.py +28 -0
  96. xai_review-0.3.0/ai_review/services/llm/types.py +15 -0
  97. xai_review-0.3.0/ai_review/services/prompt/__init__.py +0 -0
  98. xai_review-0.3.0/ai_review/services/prompt/adapter.py +25 -0
  99. xai_review-0.3.0/ai_review/services/prompt/schema.py +71 -0
  100. xai_review-0.3.0/ai_review/services/prompt/service.py +56 -0
  101. xai_review-0.3.0/ai_review/services/review/__init__.py +0 -0
  102. xai_review-0.3.0/ai_review/services/review/inline/__init__.py +0 -0
  103. xai_review-0.3.0/ai_review/services/review/inline/schema.py +53 -0
  104. xai_review-0.3.0/ai_review/services/review/inline/service.py +38 -0
  105. xai_review-0.3.0/ai_review/services/review/policy/__init__.py +0 -0
  106. xai_review-0.3.0/ai_review/services/review/policy/service.py +60 -0
  107. xai_review-0.3.0/ai_review/services/review/service.py +207 -0
  108. xai_review-0.3.0/ai_review/services/review/summary/__init__.py +0 -0
  109. xai_review-0.3.0/ai_review/services/review/summary/schema.py +15 -0
  110. xai_review-0.3.0/ai_review/services/review/summary/service.py +14 -0
  111. xai_review-0.3.0/ai_review/services/vcs/__init__.py +0 -0
  112. xai_review-0.3.0/ai_review/services/vcs/factory.py +12 -0
  113. xai_review-0.3.0/ai_review/services/vcs/gitlab/__init__.py +0 -0
  114. xai_review-0.3.0/ai_review/services/vcs/gitlab/client.py +152 -0
  115. xai_review-0.3.0/ai_review/services/vcs/types.py +55 -0
  116. xai_review-0.3.0/ai_review/tests/__init__.py +0 -0
  117. xai_review-0.3.0/ai_review/tests/fixtures/__init__.py +0 -0
  118. xai_review-0.3.0/ai_review/tests/fixtures/git.py +31 -0
  119. xai_review-0.3.0/ai_review/tests/suites/__init__.py +0 -0
  120. xai_review-0.3.0/ai_review/tests/suites/clients/__init__.py +0 -0
  121. xai_review-0.3.0/ai_review/tests/suites/clients/claude/__init__.py +0 -0
  122. xai_review-0.3.0/ai_review/tests/suites/clients/claude/test_client.py +31 -0
  123. xai_review-0.3.0/ai_review/tests/suites/clients/claude/test_schema.py +59 -0
  124. xai_review-0.3.0/ai_review/tests/suites/clients/gemini/__init__.py +0 -0
  125. xai_review-0.3.0/ai_review/tests/suites/clients/gemini/test_client.py +30 -0
  126. xai_review-0.3.0/ai_review/tests/suites/clients/gemini/test_schema.py +105 -0
  127. xai_review-0.3.0/ai_review/tests/suites/clients/openai/__init__.py +0 -0
  128. xai_review-0.3.0/ai_review/tests/suites/clients/openai/test_client.py +30 -0
  129. xai_review-0.3.0/ai_review/tests/suites/clients/openai/test_schema.py +53 -0
  130. xai_review-0.3.0/ai_review/tests/suites/libs/__init__.py +0 -0
  131. xai_review-0.3.0/ai_review/tests/suites/libs/diff/__init__.py +0 -0
  132. xai_review-0.3.0/ai_review/tests/suites/libs/diff/test_models.py +105 -0
  133. xai_review-0.3.0/ai_review/tests/suites/libs/diff/test_parser.py +115 -0
  134. xai_review-0.3.0/ai_review/tests/suites/libs/diff/test_tools.py +62 -0
  135. xai_review-0.3.0/ai_review/tests/suites/services/__init__.py +0 -0
  136. xai_review-0.3.0/ai_review/tests/suites/services/diff/__init__.py +0 -0
  137. xai_review-0.3.0/ai_review/tests/suites/services/diff/test_renderers.py +168 -0
  138. xai_review-0.3.0/ai_review/tests/suites/services/diff/test_service.py +84 -0
  139. xai_review-0.3.0/ai_review/tests/suites/services/diff/test_tools.py +108 -0
  140. xai_review-0.3.0/ai_review/tests/suites/services/prompt/__init__.py +0 -0
  141. xai_review-0.3.0/ai_review/tests/suites/services/prompt/test_schema.py +38 -0
  142. xai_review-0.3.0/ai_review/tests/suites/services/prompt/test_service.py +128 -0
  143. xai_review-0.3.0/ai_review/tests/suites/services/review/__init__.py +0 -0
  144. xai_review-0.3.0/ai_review/tests/suites/services/review/inline/__init__.py +0 -0
  145. xai_review-0.3.0/ai_review/tests/suites/services/review/inline/test_schema.py +65 -0
  146. xai_review-0.3.0/ai_review/tests/suites/services/review/inline/test_service.py +49 -0
  147. xai_review-0.3.0/ai_review/tests/suites/services/review/policy/__init__.py +0 -0
  148. xai_review-0.3.0/ai_review/tests/suites/services/review/policy/test_service.py +95 -0
  149. xai_review-0.3.0/ai_review/tests/suites/services/review/summary/__init__.py +0 -0
  150. xai_review-0.3.0/ai_review/tests/suites/services/review/summary/test_schema.py +22 -0
  151. xai_review-0.3.0/ai_review/tests/suites/services/review/summary/test_service.py +16 -0
  152. xai_review-0.3.0/pyproject.toml +31 -0
  153. xai_review-0.3.0/setup.cfg +4 -0
  154. xai_review-0.3.0/xai_review.egg-info/PKG-INFO +11 -0
  155. xai_review-0.3.0/xai_review.egg-info/SOURCES.txt +157 -0
  156. xai_review-0.3.0/xai_review.egg-info/dependency_links.txt +1 -0
  157. xai_review-0.3.0/xai_review.egg-info/entry_points.txt +2 -0
  158. xai_review-0.3.0/xai_review.egg-info/requires.txt +8 -0
  159. xai_review-0.3.0/xai_review.egg-info/top_level.txt +1 -0
@@ -0,0 +1,11 @@
1
+ Metadata-Version: 2.4
2
+ Name: xai-review
3
+ Version: 0.3.0
4
+ Requires-Dist: typer
5
+ Requires-Dist: httpx
6
+ Requires-Dist: pyyaml
7
+ Requires-Dist: pytest
8
+ Requires-Dist: loguru
9
+ Requires-Dist: aiofiles
10
+ Requires-Dist: pydantic
11
+ Requires-Dist: pydantic-settings
@@ -0,0 +1,260 @@
1
+ # AI Review
2
+
3
+ AI-powered code review tool.
4
+
5
+ _Made with ❤️ by [@NikitaFilonov](https://t.me/sound_right)_
6
+
7
+ ---
8
+
9
+ ## 📑 Table of Contents
10
+
11
+ - 🚀 [Quick Start](#-quick-start)
12
+ - ⚙️ [Configuration](#-configuration)
13
+ - 🛠 [Advanced usage](#-advanced-usage)
14
+ - 📂 [Examples](#-examples)
15
+
16
+ ---
17
+
18
+ ## 🚀 Quick Start
19
+
20
+ Install via **pip**:
21
+
22
+ ```bash
23
+ pip install ai-review
24
+ ```
25
+
26
+ Or run directly via Docker:
27
+
28
+ ```bash
29
+ docker run --rm -v $(pwd):/app nikitafilonov/ai-review:latest run-summary
30
+ ```
31
+
32
+ 👉 Before running, create a basic configuration file [.ai-review.yaml](./docs/configs/.ai-review.yaml) in the root of
33
+ your project:
34
+
35
+ ```yaml
36
+ llm:
37
+ provider: OPENAI
38
+
39
+ meta:
40
+ model: gpt-4o-mini
41
+ max_tokens: 1200
42
+ temperature: 0.3
43
+
44
+ http_client:
45
+ timeout: 120
46
+ api_url: https://api.openai.com/v1
47
+ api_token: ${OPENAI_API_KEY}
48
+
49
+ vcs:
50
+ provider: GITLAB
51
+
52
+ pipeline:
53
+ project_id: 1
54
+ merge_request_id: 100
55
+
56
+ http_client:
57
+ timeout: 120
58
+ api_url: https://gitlab.com
59
+ api_token: ${GITLAB_API_TOKEN}
60
+ ```
61
+
62
+ 👉 This will:
63
+
64
+ - Run AI Review against your codebase.
65
+ - Generate inline and/or summary comments (depending on the selected mode).
66
+ - Use your chosen LLM provider (e.g., OpenAI GPT-4o-mini by default in this example).
67
+
68
+ > **Note:** Running `ai-review run` executes the full review (inline + summary).
69
+ > To run only one mode, use the dedicated subcommands:
70
+ > - ai-review run-inline
71
+ > - ai-review run-summary
72
+ > - ai-review run-context
73
+
74
+ ---
75
+
76
+ AI Review can be configured via `.ai-review.yaml`, `.ai-review.json`, or `.env`. See [./docs/configs](./docs/configs)
77
+ for complete, ready-to-use examples.
78
+
79
+ Key things you can customize:
80
+
81
+ - **LLM provider** — OpenAI, Gemini, or Claude
82
+ - **Model settings** — model name, temperature, max tokens
83
+ - **VCS integration** — GitLab (currently supported) with project/MR context
84
+ - **Review policy** — which files to include/exclude, review modes
85
+ - **Prompts** — inline/context/summary prompt templates
86
+
87
+ 👉 Minimal configuration is enough to get started. Use the full reference configs if you want fine-grained control (
88
+ timeouts, artifacts, logging, etc.).
89
+
90
+ ---
91
+
92
+ ## 🛠 Advanced usage
93
+
94
+ Below is the **full configuration reference** with all available options. Most projects only need a
95
+ minimal `.ai-review.yaml`, but you can use these templates as a starting point for advanced setups:
96
+
97
+ - [docs/configs/.ai-review.yaml](./docs/configs/.ai-review.yaml) — YAML configuration (with comments)
98
+ - [docs/configs/.ai-review.json](./docs/configs/.ai-review.json) — JSON configuration
99
+ - [docs/configs/.env.example](./docs/configs/.env.example) — environment variables example
100
+
101
+ 👉 The YAML file includes comments for every option, making it the best place to explore the complete set of settings.
102
+
103
+ Below is an **example GitLab CI job** showing how to run AI Review with these variables:
104
+
105
+ ```yaml
106
+ ai-review:
107
+ tags: [ build ]
108
+ when: manual
109
+ stage: checks
110
+ image: nikitafilonov/ai-review:latest
111
+ rules:
112
+ - if: '$CI_MERGE_REQUEST_IID'
113
+ script:
114
+ - ai-review run
115
+ variables:
116
+ # ===============================
117
+ # LLM provider & model
118
+ # ===============================
119
+ # Which LLM to use.
120
+ # Options: OPENAI | GEMINI | CLAUDE
121
+ LLM__PROVIDER: "OPENAI"
122
+
123
+ # --- Model metadata ---
124
+ # For OpenAI: gpt-4o, gpt-4o-mini, gpt-3.5-turbo
125
+ # For Gemini: gemini-2.0-pro, gemini-2.0-flash
126
+ # For Claude: claude-3-opus, claude-3-sonnet, claude-3-haiku
127
+ LLM__META__MODEL: "gpt-4o-mini"
128
+
129
+ # Max tokens for completion.
130
+ LLM__META__MAX_TOKENS: "1200"
131
+
132
+ # Creativity of responses (0 = deterministic, >0 = more creative).
133
+ LLM__META__TEMPERATURE: "0.3"
134
+
135
+ # --- HTTP client configuration ---
136
+ # API endpoint + token (must be set as CI/CD variables).
137
+ LLM__HTTP_CLIENT__API_URL: "https://api.openai.com/v1"
138
+ LLM__HTTP_CLIENT__API_TOKEN: "$OPENAI_API_KEY"
139
+
140
+ # Example for Gemini:
141
+ # LLM__HTTP_CLIENT__API_URL: "https://generativelanguage.googleapis.com"
142
+ # LLM__HTTP_CLIENT__API_TOKEN: "$GEMINI_API_KEY"
143
+
144
+ # Example for Claude:
145
+ # LLM__HTTP_CLIENT__API_URL: "https://api.anthropic.com"
146
+ # LLM__HTTP_CLIENT__API_TOKEN: "$CLAUDE_API_KEY"
147
+ # LLM__HTTP_CLIENT__API_VERSION: "2023-06-01"
148
+
149
+ # ===============================
150
+ # VCS (GitLab integration)
151
+ # ===============================
152
+ VCS__PROVIDER: "GITLAB"
153
+
154
+ # Context of the current pipeline (auto-populated by GitLab).
155
+ VCS__PIPELINE__PROJECT_ID: "$CI_PROJECT_ID"
156
+ VCS__PIPELINE__MERGE_REQUEST_ID: "$CI_MERGE_REQUEST_IID"
157
+
158
+ # GitLab API access.
159
+ VCS__HTTP_CLIENT__API_URL: "$CI_SERVER_URL"
160
+ VCS__HTTP_CLIENT__API_TOKEN: "$CI_JOB_TOKEN"
161
+
162
+ # ===============================
163
+ # Prompts (optional overrides)
164
+ # ===============================
165
+ # Inline prompts (joined in order, local review instructions).
166
+ # PROMPT__INLINE_PROMPT_FILES__0: "./prompts/inline.md"
167
+
168
+ # Inline system prompts (format/contract rules).
169
+ # PROMPT__SYSTEM_INLINE_PROMPT_FILES__0: "./prompts/system_inline.md"
170
+ # PROMPT__INCLUDE_INLINE_SYSTEM_PROMPTS: "true"
171
+
172
+ # Context prompts (joined in order, broader analysis instructions).
173
+ # PROMPT__CONTEXT_PROMPT_FILES__0: "./prompts/context.md"
174
+
175
+ # Context system prompts (format/contract rules).
176
+ # PROMPT__SYSTEM_CONTEXT_PROMPT_FILES__0: "./prompts/system_context.md"
177
+ # PROMPT__INCLUDE_CONTEXT_SYSTEM_PROMPTS: "true"
178
+
179
+ # Summary prompts (joined in order, local review instructions).
180
+ # PROMPT__SUMMARY_PROMPT_FILES__0: "./prompts/summary.md"
181
+
182
+ # Summary system prompts (format/contract rules).
183
+ # PROMPT__SYSTEM_SUMMARY_PROMPT_FILES__0: "./prompts/system_summary.md"
184
+ # PROMPT__INCLUDE_SUMMARY_SYSTEM_PROMPTS: "true"
185
+
186
+ # ===============================
187
+ # Custom context variables
188
+ # ===============================
189
+ # You can inject custom variables into prompts via PROMPT__CONTEXT__*.
190
+ # These will be available as placeholders {var} in all templates.
191
+ #
192
+ # Example usage in prompt templates:
193
+ # Project: {company_name}
194
+ # Env: {environment}
195
+ # Pipeline: {ci_pipeline_url}
196
+ #
197
+ # Values override built-in variables if names collide.
198
+ # To avoid clashes, prefer namespaced keys (ci_pipeline_url, org_notify_handle, env_name).
199
+ #
200
+ # PROMPT__CONTEXT__ENVIRONMENT: "staging"
201
+ # PROMPT__CONTEXT__COMPANY_NAME: "ACME Corp"
202
+ # PROMPT__CONTEXT__CI_PIPELINE_URL: "https://gitlab.com/pipelines/123"
203
+
204
+ # ===============================
205
+ # Review options
206
+ # ===============================
207
+ # Available modes:
208
+ # FULL_FILE_DIFF
209
+ # FULL_FILE_CURRENT
210
+ # FULL_FILE_PREVIOUS
211
+ # ONLY_ADDED
212
+ # ONLY_REMOVED
213
+ # ADDED_AND_REMOVED
214
+ # ONLY_ADDED_WITH_CONTEXT
215
+ # ONLY_REMOVED_WITH_CONTEXT
216
+ # ADDED_AND_REMOVED_WITH_CONTEXT
217
+ REVIEW__MODE: "FULL_FILE_DIFF"
218
+
219
+ # Tags used to mark AI-generated comments in MR.
220
+ REVIEW__INLINE_TAG: "#ai-review-inline"
221
+ REVIEW__SUMMARY_TAG: "#ai-review-summary"
222
+
223
+ # Context lines (only for *_WITH_CONTEXT modes).
224
+ REVIEW__CONTEXT_LINES: "10"
225
+
226
+ # Markers for changes in output.
227
+ REVIEW__REVIEW_ADDED_MARKER: " # added"
228
+ REVIEW__REVIEW_REMOVED_MARKER: " # removed"
229
+
230
+ # Optional filters:
231
+ # REVIEW__ALLOW_CHANGES: "src/*,lib/*"
232
+ # REVIEW__IGNORE_CHANGES: "docs/*,README.md"
233
+
234
+ # Optional limits for number of AI comments:
235
+ # REVIEW__MAX_INLINE_COMMENTS: "20" # Max inline comments per file (default: unlimited)
236
+ # REVIEW__MAX_CONTEXT_COMMENTS: "50" # Max context comments per MR (default: unlimited)
237
+
238
+ # ===============================
239
+ # Logger (optional)
240
+ # ===============================
241
+ LOGGER__LEVEL: "INFO"
242
+ LOGGER__FORMAT: "{time:YYYY-MM-DD HH:mm:ss} | {level} | {extra[logger_name]} | {message}"
243
+
244
+ # ===============================
245
+ # Artifacts (optional)
246
+ # ===============================
247
+ ARTIFACTS__LLM_DIR: "./artifacts/llm"
248
+ ARTIFACTS__LLM_ENABLED: "false"
249
+
250
+ allow_failure: true
251
+
252
+ ```
253
+
254
+ ---
255
+
256
+ ## 📂 Examples
257
+
258
+ - [./docs/ci/gitlab.yaml](./docs/ci/gitlab.yaml) — ready-to-use CI snippet
259
+ - [./docs/configs](./docs/configs) — sample `.yaml`, `.json`, `.env` configs
260
+ - [./docs/prompts](./docs/prompts) — prompt templates for Python/Go (light & strict modes)
File without changes
File without changes
File without changes
@@ -0,0 +1,7 @@
1
+ from ai_review.services.review.service import ReviewService
2
+
3
+
4
+ async def run_context_review_command():
5
+ review_service = ReviewService()
6
+ await review_service.run_context_review()
7
+ review_service.report_total_cost()
@@ -0,0 +1,7 @@
1
+ from ai_review.services.review.service import ReviewService
2
+
3
+
4
+ async def run_inline_review_command():
5
+ review_service = ReviewService()
6
+ await review_service.run_inline_review()
7
+ review_service.report_total_cost()
@@ -0,0 +1,8 @@
1
+ from ai_review.services.review.service import ReviewService
2
+
3
+
4
+ async def run_review_command():
5
+ review_service = ReviewService()
6
+ await review_service.run_inline_review()
7
+ await review_service.run_summary_review()
8
+ review_service.report_total_cost()
@@ -0,0 +1,7 @@
1
+ from ai_review.services.review.service import ReviewService
2
+
3
+
4
+ async def run_summary_review_command():
5
+ review_service = ReviewService()
6
+ await review_service.run_summary_review()
7
+ review_service.report_total_cost()
@@ -0,0 +1,54 @@
1
+ import asyncio
2
+
3
+ import typer
4
+
5
+ from ai_review.cli.commands.run_context_review import run_context_review_command
6
+ from ai_review.cli.commands.run_inline_review import run_inline_review_command
7
+ from ai_review.cli.commands.run_review import run_review_command
8
+ from ai_review.cli.commands.run_summary_review import run_summary_review_command
9
+ from ai_review.config import settings
10
+
11
+ app = typer.Typer(help="AI Review CLI")
12
+
13
+
14
+ @app.command("run")
15
+ def run():
16
+ """Run the full AI review pipeline"""
17
+ typer.secho("Starting full AI review...", fg=typer.colors.CYAN, bold=True)
18
+ asyncio.run(run_review_command())
19
+ typer.secho("AI review completed successfully!", fg=typer.colors.GREEN, bold=True)
20
+
21
+
22
+ @app.command("run-inline")
23
+ def run_inline():
24
+ """Run only the inline review"""
25
+ typer.secho("Starting inline AI review...", fg=typer.colors.CYAN)
26
+ asyncio.run(run_inline_review_command())
27
+ typer.secho("AI review completed successfully!", fg=typer.colors.GREEN, bold=True)
28
+
29
+
30
+ @app.command("run-context")
31
+ def run_context():
32
+ """Run only the context review"""
33
+ typer.secho("Starting context AI review...", fg=typer.colors.CYAN)
34
+ asyncio.run(run_context_review_command())
35
+ typer.secho("AI review completed successfully!", fg=typer.colors.GREEN, bold=True)
36
+
37
+
38
+ @app.command("run-summary")
39
+ def run_summary():
40
+ """Run only the summary review"""
41
+ typer.secho("Starting summary AI review...", fg=typer.colors.CYAN)
42
+ asyncio.run(run_summary_review_command())
43
+ typer.secho("AI review completed successfully!", fg=typer.colors.GREEN, bold=True)
44
+
45
+
46
+ @app.command("show-config")
47
+ def show_config():
48
+ """Show the current resolved configuration"""
49
+ typer.secho("Loaded AI Review configuration:", fg=typer.colors.CYAN, bold=True)
50
+ typer.echo(settings.model_dump_json(indent=2, exclude_none=True))
51
+
52
+
53
+ if __name__ == "__main__":
54
+ app()
File without changes
File without changes
@@ -0,0 +1,44 @@
1
+ from httpx import AsyncClient, Response, AsyncHTTPTransport
2
+
3
+ from ai_review.clients.claude.schema import ClaudeChatRequestSchema, ClaudeChatResponseSchema
4
+ from ai_review.config import settings
5
+ from ai_review.libs.http.client import HTTPClient
6
+ from ai_review.libs.http.event_hooks.logger import LoggerEventHook
7
+ from ai_review.libs.http.handlers import HTTPClientError, handle_http_error
8
+ from ai_review.libs.http.transports.retry import RetryTransport
9
+ from ai_review.libs.logger import get_logger
10
+
11
+
12
+ class ClaudeHTTPClientError(HTTPClientError):
13
+ pass
14
+
15
+
16
+ class ClaudeHTTPClient(HTTPClient):
17
+ @handle_http_error(client="ClaudeHTTPClient", exception=ClaudeHTTPClientError)
18
+ async def chat_api(self, request: ClaudeChatRequestSchema) -> Response:
19
+ return await self.post("/v1/messages", json=request.model_dump())
20
+
21
+ async def chat(self, request: ClaudeChatRequestSchema) -> ClaudeChatResponseSchema:
22
+ response = await self.chat_api(request)
23
+ return ClaudeChatResponseSchema.model_validate_json(response.text)
24
+
25
+
26
+ def get_claude_http_client() -> ClaudeHTTPClient:
27
+ logger = get_logger("CLAUDE_HTTP_CLIENT")
28
+ logger_event_hook = LoggerEventHook(logger=logger)
29
+ retry_transport = RetryTransport(transport=AsyncHTTPTransport())
30
+
31
+ client = AsyncClient(
32
+ timeout=settings.llm.http_client.timeout,
33
+ headers={
34
+ "x-api-key": settings.llm.http_client.api_key,
35
+ "anthropic-version": settings.llm.http_client.api_version,
36
+ },
37
+ base_url=settings.llm.http_client.base_url,
38
+ transport=retry_transport,
39
+ event_hooks={
40
+ "request": [logger_event_hook.request],
41
+ "response": [logger_event_hook.response],
42
+ },
43
+ )
44
+ return ClaudeHTTPClient(client=client)
@@ -0,0 +1,44 @@
1
+ from typing import Literal
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ class ClaudeMessageSchema(BaseModel):
7
+ role: Literal["user", "assistant", "system"]
8
+ content: str
9
+
10
+
11
+ class ClaudeChatRequestSchema(BaseModel):
12
+ model: str
13
+ system: str | None = None
14
+ messages: list[ClaudeMessageSchema]
15
+ max_tokens: int
16
+ temperature: float
17
+
18
+
19
+ class ClaudeContentSchema(BaseModel):
20
+ type: Literal["text"]
21
+ text: str
22
+
23
+
24
+ class ClaudeUsageSchema(BaseModel):
25
+ input_tokens: int
26
+ output_tokens: int
27
+
28
+ @property
29
+ def total_tokens(self) -> int:
30
+ return self.input_tokens + self.output_tokens
31
+
32
+
33
+ class ClaudeChatResponseSchema(BaseModel):
34
+ id: str
35
+ role: str
36
+ usage: ClaudeUsageSchema
37
+ content: list[ClaudeContentSchema]
38
+
39
+ @property
40
+ def first_text(self) -> str:
41
+ if not self.content:
42
+ return ""
43
+
44
+ return self.content[0].text.strip()
File without changes
@@ -0,0 +1,45 @@
1
+ from httpx import Response, AsyncHTTPTransport, AsyncClient
2
+
3
+ from ai_review.clients.gemini.schema import GeminiChatRequestSchema, GeminiChatResponseSchema
4
+ from ai_review.config import settings
5
+ from ai_review.libs.http.client import HTTPClient
6
+ from ai_review.libs.http.event_hooks.logger import LoggerEventHook
7
+ from ai_review.libs.http.handlers import HTTPClientError, handle_http_error
8
+ from ai_review.libs.http.transports.retry import RetryTransport
9
+ from ai_review.libs.logger import get_logger
10
+
11
+
12
+ class GeminiHTTPClientError(HTTPClientError):
13
+ pass
14
+
15
+
16
+ class GeminiHTTPClient(HTTPClient):
17
+ @handle_http_error(client="GeminiHTTPClient", exception=GeminiHTTPClientError)
18
+ async def chat_api(self, request: GeminiChatRequestSchema) -> Response:
19
+ meta = settings.llm.meta
20
+ return await self.post(
21
+ f"/v1beta/models/{meta.model}:generateContent", json=request.model_dump()
22
+ )
23
+
24
+ async def chat(self, request: GeminiChatRequestSchema) -> GeminiChatResponseSchema:
25
+ response = await self.chat_api(request)
26
+ return GeminiChatResponseSchema.model_validate_json(response.text)
27
+
28
+
29
+ def get_gemini_http_client() -> GeminiHTTPClient:
30
+ logger = get_logger("GEMINI_HTTP_CLIENT")
31
+ logger_event_hook = LoggerEventHook(logger=logger)
32
+ retry_transport = RetryTransport(transport=AsyncHTTPTransport())
33
+
34
+ client = AsyncClient(
35
+ timeout=settings.llm.http_client.timeout,
36
+ headers={"x-goog-api-key": settings.llm.http_client.api_key},
37
+ base_url=settings.llm.http_client.base_url,
38
+ transport=retry_transport,
39
+ event_hooks={
40
+ "request": [logger_event_hook.request],
41
+ "response": [logger_event_hook.response],
42
+ },
43
+ )
44
+
45
+ return GeminiHTTPClient(client=client)
@@ -0,0 +1,78 @@
1
+ from pydantic import BaseModel, Field, ConfigDict
2
+
3
+
4
+ class GeminiPartSchema(BaseModel):
5
+ text: str
6
+
7
+
8
+ class GeminiUsageSchema(BaseModel):
9
+ model_config = ConfigDict(populate_by_name=True)
10
+
11
+ prompt_token_count: int = Field(alias="promptTokenCount")
12
+ total_tokens_count: int | None = Field(default=None, alias="totalTokenCount")
13
+ candidates_token_count: int | None = Field(default=None, alias="candidatesTokenCount")
14
+ output_thoughts_token_count: int | None = Field(default=None, alias="outputThoughtsTokenCount")
15
+
16
+ @property
17
+ def total_tokens(self) -> int:
18
+ if self.total_tokens_count is not None:
19
+ return self.total_tokens_count
20
+
21
+ return (
22
+ (self.prompt_token_count or 0)
23
+ + (self.candidates_token_count or 0)
24
+ + (self.output_thoughts_token_count or 0)
25
+ )
26
+
27
+ @property
28
+ def prompt_tokens(self) -> int:
29
+ return self.prompt_token_count
30
+
31
+ @property
32
+ def completion_tokens(self) -> int | None:
33
+ return self.candidates_token_count or self.output_thoughts_token_count
34
+
35
+
36
+ class GeminiContentSchema(BaseModel):
37
+ role: str = "user"
38
+ parts: list[GeminiPartSchema] | None = None
39
+
40
+
41
+ class GeminiCandidateSchema(BaseModel):
42
+ content: GeminiContentSchema
43
+
44
+
45
+ class GeminiGenerationConfigSchema(BaseModel):
46
+ model_config = ConfigDict(populate_by_name=True)
47
+
48
+ temperature: float
49
+ max_output_tokens: int = Field(alias="maxOutputTokens")
50
+
51
+
52
+ class GeminiChatRequestSchema(BaseModel):
53
+ model_config = ConfigDict(populate_by_name=True)
54
+
55
+ contents: list[GeminiContentSchema]
56
+ generation_config: GeminiGenerationConfigSchema | None = Field(
57
+ alias="generationConfig",
58
+ default=None
59
+ )
60
+ system_instruction: GeminiContentSchema | None = Field(
61
+ alias="systemInstruction",
62
+ default=None
63
+ )
64
+
65
+
66
+ class GeminiChatResponseSchema(BaseModel):
67
+ model_config = ConfigDict(populate_by_name=True)
68
+
69
+ usage: GeminiUsageSchema = Field(alias="usageMetadata")
70
+ candidates: list[GeminiCandidateSchema]
71
+
72
+ @property
73
+ def first_text(self) -> str:
74
+ if not self.candidates:
75
+ return ""
76
+
77
+ parts = self.candidates[0].content.parts or []
78
+ return (parts[0].text if parts else "").strip()
File without changes
@@ -0,0 +1,31 @@
1
+ from httpx import AsyncClient, AsyncHTTPTransport
2
+
3
+ from ai_review.clients.gitlab.mr.client import GitLabMergeRequestsHTTPClient
4
+ from ai_review.config import settings
5
+ from ai_review.libs.http.event_hooks.logger import LoggerEventHook
6
+ from ai_review.libs.http.transports.retry import RetryTransport
7
+ from ai_review.libs.logger import get_logger
8
+
9
+
10
+ class GitLabHTTPClient:
11
+ def __init__(self, client: AsyncClient):
12
+ self.mr = GitLabMergeRequestsHTTPClient(client)
13
+
14
+
15
+ def get_gitlab_http_client() -> GitLabHTTPClient:
16
+ logger = get_logger("GITLAB_MERGE_REQUESTS_HTTP_CLIENT")
17
+ logger_event_hook = LoggerEventHook(logger=logger)
18
+ retry_transport = RetryTransport(transport=AsyncHTTPTransport())
19
+
20
+ client = AsyncClient(
21
+ timeout=settings.llm.http_client.timeout,
22
+ headers={"Authorization": f"Bearer {settings.vcs.http_client.bearer_token}"},
23
+ base_url=settings.vcs.http_client.base_url,
24
+ transport=retry_transport,
25
+ event_hooks={
26
+ 'request': [logger_event_hook.request],
27
+ 'response': [logger_event_hook.response]
28
+ }
29
+ )
30
+
31
+ return GitLabHTTPClient(client=client)