xai-review 0.34.0__py3-none-any.whl → 0.36.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xai-review might be problematic. Click here for more details.

Files changed (38) hide show
  1. ai_review/clients/claude/client.py +1 -1
  2. ai_review/clients/claude/schema.py +2 -2
  3. ai_review/clients/gemini/client.py +2 -1
  4. ai_review/clients/gemini/schema.py +2 -2
  5. ai_review/clients/ollama/client.py +1 -1
  6. ai_review/clients/openai/v1/__init__.py +0 -0
  7. ai_review/clients/openai/{client.py → v1/client.py} +9 -9
  8. ai_review/clients/openai/{schema.py → v1/schema.py} +2 -2
  9. ai_review/clients/openai/v1/types.py +8 -0
  10. ai_review/clients/openai/v2/__init__.py +0 -0
  11. ai_review/clients/openai/v2/client.py +46 -0
  12. ai_review/clients/openai/v2/schema.py +47 -0
  13. ai_review/clients/openai/v2/types.py +11 -0
  14. ai_review/clients/openrouter/client.py +1 -1
  15. ai_review/clients/openrouter/schema.py +2 -2
  16. ai_review/libs/config/llm/meta.py +2 -2
  17. ai_review/libs/config/llm/openai.py +4 -0
  18. ai_review/resources/pricing.yaml +39 -1
  19. ai_review/services/llm/openai/client.py +37 -9
  20. ai_review/tests/fixtures/clients/openai.py +84 -12
  21. ai_review/tests/suites/clients/openai/v1/__init__.py +0 -0
  22. ai_review/tests/suites/clients/openai/v1/test_client.py +12 -0
  23. ai_review/tests/suites/clients/openai/{test_schema.py → v1/test_schema.py} +1 -1
  24. ai_review/tests/suites/clients/openai/v2/__init__.py +0 -0
  25. ai_review/tests/suites/clients/openai/v2/test_client.py +12 -0
  26. ai_review/tests/suites/clients/openai/v2/test_schema.py +80 -0
  27. ai_review/tests/suites/libs/config/llm/__init__.py +0 -0
  28. ai_review/tests/suites/libs/config/llm/test_openai.py +28 -0
  29. ai_review/tests/suites/services/llm/openai/test_client.py +23 -6
  30. ai_review/tests/suites/services/llm/test_factory.py +1 -1
  31. {xai_review-0.34.0.dist-info → xai_review-0.36.0.dist-info}/METADATA +2 -2
  32. {xai_review-0.34.0.dist-info → xai_review-0.36.0.dist-info}/RECORD +36 -25
  33. ai_review/clients/openai/types.py +0 -8
  34. ai_review/tests/suites/clients/openai/test_client.py +0 -12
  35. {xai_review-0.34.0.dist-info → xai_review-0.36.0.dist-info}/WHEEL +0 -0
  36. {xai_review-0.34.0.dist-info → xai_review-0.36.0.dist-info}/entry_points.txt +0 -0
  37. {xai_review-0.34.0.dist-info → xai_review-0.36.0.dist-info}/licenses/LICENSE +0 -0
  38. {xai_review-0.34.0.dist-info → xai_review-0.36.0.dist-info}/top_level.txt +0 -0
@@ -17,7 +17,7 @@ class ClaudeHTTPClientError(HTTPClientError):
17
17
  class ClaudeHTTPClient(HTTPClient, ClaudeHTTPClientProtocol):
18
18
  @handle_http_error(client="ClaudeHTTPClient", exception=ClaudeHTTPClientError)
19
19
  async def chat_api(self, request: ClaudeChatRequestSchema) -> Response:
20
- return await self.post("/v1/messages", json=request.model_dump())
20
+ return await self.post("/v1/messages", json=request.model_dump(exclude_none=True))
21
21
 
22
22
  async def chat(self, request: ClaudeChatRequestSchema) -> ClaudeChatResponseSchema:
23
23
  response = await self.chat_api(request)
@@ -12,8 +12,8 @@ class ClaudeChatRequestSchema(BaseModel):
12
12
  model: str
13
13
  system: str | None = None
14
14
  messages: list[ClaudeMessageSchema]
15
- max_tokens: int
16
- temperature: float
15
+ max_tokens: int | None = None
16
+ temperature: float | None = None
17
17
 
18
18
 
19
19
  class ClaudeContentSchema(BaseModel):
@@ -19,7 +19,8 @@ class GeminiHTTPClient(HTTPClient, GeminiHTTPClientProtocol):
19
19
  async def chat_api(self, request: GeminiChatRequestSchema) -> Response:
20
20
  meta = settings.llm.meta
21
21
  return await self.post(
22
- f"/v1beta/models/{meta.model}:generateContent", json=request.model_dump()
22
+ f"/v1beta/models/{meta.model}:generateContent",
23
+ json=request.model_dump(exclude_none=True)
23
24
  )
24
25
 
25
26
  async def chat(self, request: GeminiChatRequestSchema) -> GeminiChatResponseSchema:
@@ -45,8 +45,8 @@ class GeminiCandidateSchema(BaseModel):
45
45
  class GeminiGenerationConfigSchema(BaseModel):
46
46
  model_config = ConfigDict(populate_by_name=True)
47
47
 
48
- temperature: float
49
- max_output_tokens: int = Field(alias="maxOutputTokens")
48
+ temperature: float | None = None
49
+ max_output_tokens: int | None = Field(alias="maxOutputTokens", default=None)
50
50
 
51
51
 
52
52
  class GeminiChatRequestSchema(BaseModel):
@@ -17,7 +17,7 @@ class OllamaHTTPClientError(HTTPClientError):
17
17
  class OllamaHTTPClient(HTTPClient, OllamaHTTPClientProtocol):
18
18
  @handle_http_error(client="OllamaHTTPClient", exception=OllamaHTTPClientError)
19
19
  async def chat_api(self, request: OllamaChatRequestSchema) -> Response:
20
- return await self.post("/api/chat", json=request.model_dump())
20
+ return await self.post("/api/chat", json=request.model_dump(exclude_none=True))
21
21
 
22
22
  async def chat(self, request: OllamaChatRequestSchema) -> OllamaChatResponseSchema:
23
23
  response = await self.chat_api(request)
File without changes
@@ -1,7 +1,7 @@
1
1
  from httpx import Response, AsyncHTTPTransport, AsyncClient
2
2
 
3
- from ai_review.clients.openai.schema import OpenAIChatRequestSchema, OpenAIChatResponseSchema
4
- from ai_review.clients.openai.types import OpenAIHTTPClientProtocol
3
+ from ai_review.clients.openai.v1.schema import OpenAIChatRequestSchema, OpenAIChatResponseSchema
4
+ from ai_review.clients.openai.v1.types import OpenAIV1HTTPClientProtocol
5
5
  from ai_review.config import settings
6
6
  from ai_review.libs.http.client import HTTPClient
7
7
  from ai_review.libs.http.event_hooks.logger import LoggerEventHook
@@ -10,22 +10,22 @@ from ai_review.libs.http.transports.retry import RetryTransport
10
10
  from ai_review.libs.logger import get_logger
11
11
 
12
12
 
13
- class OpenAIHTTPClientError(HTTPClientError):
13
+ class OpenAIV1HTTPClientError(HTTPClientError):
14
14
  pass
15
15
 
16
16
 
17
- class OpenAIHTTPClient(HTTPClient, OpenAIHTTPClientProtocol):
18
- @handle_http_error(client='OpenAIHTTPClient', exception=OpenAIHTTPClientError)
17
+ class OpenAIV1HTTPClient(HTTPClient, OpenAIV1HTTPClientProtocol):
18
+ @handle_http_error(client='OpenAIV1HTTPClient', exception=OpenAIV1HTTPClientError)
19
19
  async def chat_api(self, request: OpenAIChatRequestSchema) -> Response:
20
- return await self.post("/chat/completions", json=request.model_dump())
20
+ return await self.post("/chat/completions", json=request.model_dump(exclude_none=True))
21
21
 
22
22
  async def chat(self, request: OpenAIChatRequestSchema) -> OpenAIChatResponseSchema:
23
23
  response = await self.chat_api(request)
24
24
  return OpenAIChatResponseSchema.model_validate_json(response.text)
25
25
 
26
26
 
27
- def get_openai_http_client() -> OpenAIHTTPClient:
28
- logger = get_logger("OPENAI_HTTP_CLIENT")
27
+ def get_openai_v1_http_client() -> OpenAIV1HTTPClient:
28
+ logger = get_logger("OPENAI_V1_HTTP_CLIENT")
29
29
  logger_event_hook = LoggerEventHook(logger=logger)
30
30
  retry_transport = RetryTransport(logger=logger, transport=AsyncHTTPTransport())
31
31
 
@@ -40,4 +40,4 @@ def get_openai_http_client() -> OpenAIHTTPClient:
40
40
  }
41
41
  )
42
42
 
43
- return OpenAIHTTPClient(client=client)
43
+ return OpenAIV1HTTPClient(client=client)
@@ -21,8 +21,8 @@ class OpenAIChoiceSchema(BaseModel):
21
21
  class OpenAIChatRequestSchema(BaseModel):
22
22
  model: str
23
23
  messages: list[OpenAIMessageSchema]
24
- max_tokens: int
25
- temperature: float
24
+ max_tokens: int | None = None
25
+ temperature: float | None = None
26
26
 
27
27
 
28
28
  class OpenAIChatResponseSchema(BaseModel):
@@ -0,0 +1,8 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.clients.openai.v1.schema import OpenAIChatRequestSchema, OpenAIChatResponseSchema
4
+
5
+
6
+ class OpenAIV1HTTPClientProtocol(Protocol):
7
+ async def chat(self, request: OpenAIChatRequestSchema) -> OpenAIChatResponseSchema:
8
+ ...
File without changes
@@ -0,0 +1,46 @@
1
+ from httpx import Response, AsyncClient, AsyncHTTPTransport
2
+
3
+ from ai_review.clients.openai.v2.schema import (
4
+ OpenAIResponsesRequestSchema,
5
+ OpenAIResponsesResponseSchema
6
+ )
7
+ from ai_review.clients.openai.v2.types import OpenAIV2HTTPClientProtocol
8
+ from ai_review.config import settings
9
+ from ai_review.libs.http.client import HTTPClient
10
+ from ai_review.libs.http.event_hooks.logger import LoggerEventHook
11
+ from ai_review.libs.http.handlers import HTTPClientError, handle_http_error
12
+ from ai_review.libs.http.transports.retry import RetryTransport
13
+ from ai_review.libs.logger import get_logger
14
+
15
+
16
+ class OpenAIV2HTTPClientError(HTTPClientError):
17
+ pass
18
+
19
+
20
+ class OpenAIV2HTTPClient(HTTPClient, OpenAIV2HTTPClientProtocol):
21
+ @handle_http_error(client='OpenAIV2HTTPClient', exception=OpenAIV2HTTPClientError)
22
+ async def chat_api(self, request: OpenAIResponsesRequestSchema) -> Response:
23
+ return await self.post("/responses", json=request.model_dump(exclude_none=True))
24
+
25
+ async def chat(self, request: OpenAIResponsesRequestSchema) -> OpenAIResponsesResponseSchema:
26
+ response = await self.chat_api(request)
27
+ return OpenAIResponsesResponseSchema.model_validate_json(response.text)
28
+
29
+
30
+ def get_openai_v2_http_client() -> OpenAIV2HTTPClient:
31
+ logger = get_logger("OPENAI_V2_HTTP_CLIENT")
32
+ logger_event_hook = LoggerEventHook(logger=logger)
33
+ retry_transport = RetryTransport(logger=logger, transport=AsyncHTTPTransport())
34
+
35
+ client = AsyncClient(
36
+ timeout=settings.llm.http_client.timeout,
37
+ headers={"Authorization": f"Bearer {settings.llm.http_client.api_token_value}"},
38
+ base_url=settings.llm.http_client.api_url_value,
39
+ transport=retry_transport,
40
+ event_hooks={
41
+ 'request': [logger_event_hook.request],
42
+ 'response': [logger_event_hook.response]
43
+ }
44
+ )
45
+
46
+ return OpenAIV2HTTPClient(client=client)
@@ -0,0 +1,47 @@
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class OpenAIResponseUsageSchema(BaseModel):
5
+ total_tokens: int
6
+ input_tokens: int
7
+ output_tokens: int
8
+
9
+
10
+ class OpenAIInputMessageSchema(BaseModel):
11
+ role: str
12
+ content: str
13
+
14
+
15
+ class OpenAIResponseContentSchema(BaseModel):
16
+ type: str
17
+ text: str | None = None
18
+
19
+
20
+ class OpenAIResponseOutputSchema(BaseModel):
21
+ type: str
22
+ role: str | None = None
23
+ content: list[OpenAIResponseContentSchema] | None = None
24
+
25
+
26
+ class OpenAIResponsesRequestSchema(BaseModel):
27
+ model: str
28
+ input: list[OpenAIInputMessageSchema]
29
+ temperature: float | None = None
30
+ instructions: str | None = None
31
+ max_output_tokens: int | None = None
32
+
33
+
34
+ class OpenAIResponsesResponseSchema(BaseModel):
35
+ usage: OpenAIResponseUsageSchema
36
+ output: list[OpenAIResponseOutputSchema]
37
+
38
+ @property
39
+ def first_text(self) -> str:
40
+ results: list[str] = []
41
+ for block in self.output:
42
+ if block.type == "message" and block.content:
43
+ for content in block.content:
44
+ if content.type == "output_text" and content.text:
45
+ results.append(content.text)
46
+
47
+ return "".join(results).strip()
@@ -0,0 +1,11 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.clients.openai.v2.schema import (
4
+ OpenAIResponsesRequestSchema,
5
+ OpenAIResponsesResponseSchema
6
+ )
7
+
8
+
9
+ class OpenAIV2HTTPClientProtocol(Protocol):
10
+ async def chat(self, request: OpenAIResponsesRequestSchema) -> OpenAIResponsesResponseSchema:
11
+ ...
@@ -17,7 +17,7 @@ class OpenRouterHTTPClientError(HTTPClientError):
17
17
  class OpenRouterHTTPClient(HTTPClient, OpenRouterHTTPClientProtocol):
18
18
  @handle_http_error(client="OpenRouterHTTPClient", exception=OpenRouterHTTPClientError)
19
19
  async def chat_api(self, request: OpenRouterChatRequestSchema) -> Response:
20
- return await self.post("/chat/completions", json=request.model_dump())
20
+ return await self.post("/chat/completions", json=request.model_dump(exclude_none=True))
21
21
 
22
22
  async def chat(self, request: OpenRouterChatRequestSchema) -> OpenRouterChatResponseSchema:
23
23
  response = await self.chat_api(request)
@@ -21,8 +21,8 @@ class OpenRouterChoiceSchema(BaseModel):
21
21
  class OpenRouterChatRequestSchema(BaseModel):
22
22
  model: str
23
23
  messages: list[OpenRouterMessageSchema]
24
- max_tokens: int
25
- temperature: float
24
+ max_tokens: int | None = None
25
+ temperature: float | None = None
26
26
 
27
27
 
28
28
  class OpenRouterChatResponseSchema(BaseModel):
@@ -3,5 +3,5 @@ from pydantic import BaseModel, Field
3
3
 
4
4
  class LLMMetaConfig(BaseModel):
5
5
  model: str
6
- max_tokens: int = Field(default=5000, ge=1)
7
- temperature: float = Field(default=0.3, ge=0.0, le=2.0)
6
+ max_tokens: int | None = Field(default=None, ge=1)
7
+ temperature: float | None = Field(default=None, ge=0.0, le=2.0)
@@ -5,6 +5,10 @@ from ai_review.libs.config.llm.meta import LLMMetaConfig
5
5
  class OpenAIMetaConfig(LLMMetaConfig):
6
6
  model: str = "gpt-4o-mini"
7
7
 
8
+ @property
9
+ def is_v2_model(self) -> bool:
10
+ return any(self.model.startswith(model) for model in ("gpt-5", "gpt-4.1"))
11
+
8
12
 
9
13
  class OpenAIHTTPClientConfig(HTTPClientWithTokenConfig):
10
14
  pass
@@ -1,3 +1,14 @@
1
+ # ===============================
2
+ # 📊 Pricing per 1 token (USD)
3
+ # ===============================
4
+ # NOTE:
5
+ # - Prices are per 1 token and can change over time.
6
+ # - For the latest OpenAI and Anthropic pricing, see:
7
+ # - https://openai.com/api/pricing
8
+ # - https://www.anthropic.com/pricing
9
+ # - For OpenRouter models, prices vary by provider: https://openrouter.ai
10
+
11
+ # --- OpenAI ---
1
12
  gpt-4o-mini:
2
13
  input: 0.15e-6
3
14
  output: 0.60e-6
@@ -14,17 +25,22 @@ gpt-4.1:
14
25
  input: 5.00e-6
15
26
  output: 15.00e-6
16
27
 
28
+ gpt-5:
29
+ input: 10.00e-6
30
+ output: 30.00e-6
31
+
17
32
  gpt-3.5-turbo:
18
33
  input: 0.50e-6
19
34
  output: 1.50e-6
20
35
 
36
+ # --- Google Gemini ---
21
37
  gemini-2.5-flash-lite:
22
38
  input: 0.10e-6
23
39
  output: 0.40e-6
24
40
 
25
41
  gemini-2.0-flash-lite:
26
42
  input: 0.019e-6
27
- output: 0.1e-6
43
+ output: 0.10e-6
28
44
 
29
45
  gemini-2.0-pro:
30
46
  input: 0.125e-6
@@ -38,6 +54,7 @@ gemini-2.5-pro-long-context:
38
54
  input: 2.50e-6
39
55
  output: 15.00e-6
40
56
 
57
+ # --- Anthropic Claude ---
41
58
  claude-3.5-sonnet:
42
59
  input: 3.00e-6
43
60
  output: 15.00e-6
@@ -53,3 +70,24 @@ claude-3-sonnet:
53
70
  claude-3-haiku:
54
71
  input: 0.25e-6
55
72
  output: 1.25e-6
73
+
74
+ # --- OpenRouter ---
75
+ o3-mini:
76
+ input: 1.10e-6
77
+ output: 4.40e-6
78
+
79
+ openai/chatgpt-4o-latest:
80
+ input: 5.00e-6
81
+ output: 15.00e-6
82
+
83
+ mistralai/mixtral-8x7b:
84
+ input: 0.24e-6
85
+ output: 0.48e-6
86
+
87
+ meta-llama/llama-3-8b-instruct:
88
+ input: 0.20e-6
89
+ output: 0.40e-6
90
+
91
+ meta-llama/llama-3-70b-instruct:
92
+ input: 0.80e-6
93
+ output: 1.60e-6
@@ -1,28 +1,56 @@
1
- from ai_review.clients.openai.client import get_openai_http_client
2
- from ai_review.clients.openai.schema import OpenAIChatRequestSchema, OpenAIMessageSchema
1
+ from ai_review.clients.openai.v1.client import get_openai_v1_http_client
2
+ from ai_review.clients.openai.v1.schema import OpenAIChatRequestSchema, OpenAIMessageSchema
3
+ from ai_review.clients.openai.v2.client import get_openai_v2_http_client
4
+ from ai_review.clients.openai.v2.schema import OpenAIInputMessageSchema, OpenAIResponsesRequestSchema
3
5
  from ai_review.config import settings
4
6
  from ai_review.services.llm.types import LLMClientProtocol, ChatResultSchema
5
7
 
6
8
 
7
9
  class OpenAILLMClient(LLMClientProtocol):
8
10
  def __init__(self):
9
- self.http_client = get_openai_http_client()
11
+ self.meta = settings.llm.meta
10
12
 
11
- async def chat(self, prompt: str, prompt_system: str) -> ChatResultSchema:
12
- meta = settings.llm.meta
13
+ self.http_client_v1 = get_openai_v1_http_client()
14
+ self.http_client_v2 = get_openai_v2_http_client()
15
+
16
+ async def chat_v1(self, prompt: str, prompt_system: str) -> ChatResultSchema:
13
17
  request = OpenAIChatRequestSchema(
14
- model=meta.model,
18
+ model=self.meta.model,
15
19
  messages=[
16
20
  OpenAIMessageSchema(role="system", content=prompt_system),
17
21
  OpenAIMessageSchema(role="user", content=prompt),
18
22
  ],
19
- max_tokens=meta.max_tokens,
20
- temperature=meta.temperature,
23
+ max_tokens=self.meta.max_tokens,
24
+ temperature=self.meta.temperature,
21
25
  )
22
- response = await self.http_client.chat(request)
26
+ response = await self.http_client_v1.chat(request)
23
27
  return ChatResultSchema(
24
28
  text=response.first_text,
25
29
  total_tokens=response.usage.total_tokens,
26
30
  prompt_tokens=response.usage.prompt_tokens,
27
31
  completion_tokens=response.usage.completion_tokens,
28
32
  )
33
+
34
+ async def chat_v2(self, prompt: str, prompt_system: str) -> ChatResultSchema:
35
+ request = OpenAIResponsesRequestSchema(
36
+ model=self.meta.model,
37
+ input=[
38
+ OpenAIInputMessageSchema(role="system", content=prompt_system),
39
+ OpenAIInputMessageSchema(role="user", content=prompt),
40
+ ],
41
+ temperature=self.meta.temperature,
42
+ max_output_tokens=self.meta.max_tokens,
43
+ )
44
+ response = await self.http_client_v2.chat(request)
45
+ return ChatResultSchema(
46
+ text=response.first_text,
47
+ total_tokens=response.usage.total_tokens,
48
+ prompt_tokens=response.usage.input_tokens,
49
+ completion_tokens=response.usage.output_tokens,
50
+ )
51
+
52
+ async def chat(self, prompt: str, prompt_system: str) -> ChatResultSchema:
53
+ if self.meta.is_v2_model:
54
+ return await self.chat_v2(prompt, prompt_system)
55
+
56
+ return await self.chat_v1(prompt, prompt_system)
@@ -3,14 +3,22 @@ from typing import Any
3
3
  import pytest
4
4
  from pydantic import HttpUrl, SecretStr
5
5
 
6
- from ai_review.clients.openai.schema import (
6
+ from ai_review.clients.openai.v1.schema import (
7
7
  OpenAIUsageSchema,
8
8
  OpenAIChoiceSchema,
9
9
  OpenAIMessageSchema,
10
10
  OpenAIChatRequestSchema,
11
11
  OpenAIChatResponseSchema,
12
12
  )
13
- from ai_review.clients.openai.types import OpenAIHTTPClientProtocol
13
+ from ai_review.clients.openai.v1.types import OpenAIV1HTTPClientProtocol
14
+ from ai_review.clients.openai.v2.schema import (
15
+ OpenAIResponsesRequestSchema,
16
+ OpenAIResponsesResponseSchema,
17
+ OpenAIResponseUsageSchema,
18
+ OpenAIResponseOutputSchema,
19
+ OpenAIResponseContentSchema,
20
+ )
21
+ from ai_review.clients.openai.v2.types import OpenAIV2HTTPClientProtocol
14
22
  from ai_review.config import settings
15
23
  from ai_review.libs.config.llm.base import OpenAILLMConfig
16
24
  from ai_review.libs.config.llm.openai import OpenAIMetaConfig, OpenAIHTTPClientConfig
@@ -18,7 +26,7 @@ from ai_review.libs.constants.llm_provider import LLMProvider
18
26
  from ai_review.services.llm.openai.client import OpenAILLMClient
19
27
 
20
28
 
21
- class FakeOpenAIHTTPClient(OpenAIHTTPClientProtocol):
29
+ class FakeOpenAIV1HTTPClient(OpenAIV1HTTPClientProtocol):
22
30
  def __init__(self, responses: dict[str, Any] | None = None) -> None:
23
31
  self.calls: list[tuple[str, dict]] = []
24
32
  self.responses = responses or {}
@@ -31,7 +39,39 @@ class FakeOpenAIHTTPClient(OpenAIHTTPClientProtocol):
31
39
  usage=OpenAIUsageSchema(total_tokens=12, prompt_tokens=5, completion_tokens=7),
32
40
  choices=[
33
41
  OpenAIChoiceSchema(
34
- message=OpenAIMessageSchema(role="assistant", content="FAKE_OPENAI_RESPONSE")
42
+ message=OpenAIMessageSchema(
43
+ role="assistant",
44
+ content="FAKE_OPENAI_V1_RESPONSE"
45
+ )
46
+ )
47
+ ],
48
+ ),
49
+ )
50
+
51
+
52
+ class FakeOpenAIV2HTTPClient(OpenAIV2HTTPClientProtocol):
53
+ def __init__(self, responses: dict[str, Any] | None = None) -> None:
54
+ self.calls: list[tuple[str, dict]] = []
55
+ self.responses = responses or {}
56
+
57
+ async def chat(self, request: OpenAIResponsesRequestSchema) -> OpenAIResponsesResponseSchema:
58
+ self.calls.append(("chat", {"request": request}))
59
+ return self.responses.get(
60
+ "chat",
61
+ OpenAIResponsesResponseSchema(
62
+ usage=OpenAIResponseUsageSchema(
63
+ total_tokens=20, input_tokens=10, output_tokens=10
64
+ ),
65
+ output=[
66
+ OpenAIResponseOutputSchema(
67
+ type="message",
68
+ role="assistant",
69
+ content=[
70
+ OpenAIResponseContentSchema(
71
+ type="output_text",
72
+ text="FAKE_OPENAI_V2_RESPONSE"
73
+ )
74
+ ],
35
75
  )
36
76
  ],
37
77
  ),
@@ -39,31 +79,63 @@ class FakeOpenAIHTTPClient(OpenAIHTTPClientProtocol):
39
79
 
40
80
 
41
81
  @pytest.fixture
42
- def fake_openai_http_client() -> FakeOpenAIHTTPClient:
43
- return FakeOpenAIHTTPClient()
82
+ def fake_openai_v1_http_client() -> FakeOpenAIV1HTTPClient:
83
+ return FakeOpenAIV1HTTPClient()
84
+
85
+
86
+ @pytest.fixture
87
+ def fake_openai_v2_http_client() -> FakeOpenAIV2HTTPClient:
88
+ return FakeOpenAIV2HTTPClient()
44
89
 
45
90
 
46
91
  @pytest.fixture
47
92
  def openai_llm_client(
48
93
  monkeypatch: pytest.MonkeyPatch,
49
- fake_openai_http_client: FakeOpenAIHTTPClient
94
+ fake_openai_v1_http_client: FakeOpenAIV1HTTPClient,
95
+ fake_openai_v2_http_client: FakeOpenAIV2HTTPClient,
50
96
  ) -> OpenAILLMClient:
51
97
  monkeypatch.setattr(
52
- "ai_review.services.llm.openai.client.get_openai_http_client",
53
- lambda: fake_openai_http_client,
98
+ "ai_review.services.llm.openai.client.get_openai_v1_http_client",
99
+ lambda: fake_openai_v1_http_client,
100
+ )
101
+ monkeypatch.setattr(
102
+ "ai_review.services.llm.openai.client.get_openai_v2_http_client",
103
+ lambda: fake_openai_v2_http_client,
54
104
  )
55
105
  return OpenAILLMClient()
56
106
 
57
107
 
58
108
  @pytest.fixture
59
- def openai_http_client_config(monkeypatch: pytest.MonkeyPatch):
109
+ def openai_v1_http_client_config(monkeypatch: pytest.MonkeyPatch):
60
110
  fake_config = OpenAILLMConfig(
61
- meta=OpenAIMetaConfig(),
111
+ meta=OpenAIMetaConfig(
112
+ model="gpt-4o-mini",
113
+ max_tokens=1200,
114
+ temperature=0.3
115
+ ),
62
116
  provider=LLMProvider.OPENAI,
63
117
  http_client=OpenAIHTTPClientConfig(
64
118
  timeout=10,
65
119
  api_url=HttpUrl("https://api.openai.com/v1"),
66
120
  api_token=SecretStr("fake-token"),
67
- )
121
+ ),
122
+ )
123
+ monkeypatch.setattr(settings, "llm", fake_config)
124
+
125
+
126
+ @pytest.fixture
127
+ def openai_v2_http_client_config(monkeypatch: pytest.MonkeyPatch):
128
+ fake_config = OpenAILLMConfig(
129
+ meta=OpenAIMetaConfig(
130
+ model="gpt-5",
131
+ max_tokens=2000,
132
+ temperature=0.2
133
+ ),
134
+ provider=LLMProvider.OPENAI,
135
+ http_client=OpenAIHTTPClientConfig(
136
+ timeout=10,
137
+ api_url=HttpUrl("https://api.openai.com/v1"),
138
+ api_token=SecretStr("fake-token"),
139
+ ),
68
140
  )
69
141
  monkeypatch.setattr(settings, "llm", fake_config)
File without changes
@@ -0,0 +1,12 @@
1
+ import pytest
2
+ from httpx import AsyncClient
3
+
4
+ from ai_review.clients.openai.v1.client import get_openai_v1_http_client, OpenAIV1HTTPClient
5
+
6
+
7
+ @pytest.mark.usefixtures('openai_v1_http_client_config')
8
+ def test_get_openai_v1_http_client_builds_ok():
9
+ openai_http_client = get_openai_v1_http_client()
10
+
11
+ assert isinstance(openai_http_client, OpenAIV1HTTPClient)
12
+ assert isinstance(openai_http_client.client, AsyncClient)
@@ -1,4 +1,4 @@
1
- from ai_review.clients.openai.schema import (
1
+ from ai_review.clients.openai.v1.schema import (
2
2
  OpenAIUsageSchema,
3
3
  OpenAIMessageSchema,
4
4
  OpenAIChoiceSchema,
File without changes
@@ -0,0 +1,12 @@
1
+ import pytest
2
+ from httpx import AsyncClient
3
+
4
+ from ai_review.clients.openai.v2.client import get_openai_v2_http_client, OpenAIV2HTTPClient
5
+
6
+
7
+ @pytest.mark.usefixtures('openai_v2_http_client_config')
8
+ def test_get_openai_v2_http_client_builds_ok():
9
+ openai_http_client = get_openai_v2_http_client()
10
+
11
+ assert isinstance(openai_http_client, OpenAIV2HTTPClient)
12
+ assert isinstance(openai_http_client.client, AsyncClient)
@@ -0,0 +1,80 @@
1
+ from ai_review.clients.openai.v2.schema import (
2
+ OpenAIResponseUsageSchema,
3
+ OpenAIInputMessageSchema,
4
+ OpenAIResponseContentSchema,
5
+ OpenAIResponseOutputSchema,
6
+ OpenAIResponsesRequestSchema,
7
+ OpenAIResponsesResponseSchema,
8
+ )
9
+
10
+
11
+ # ---------- OpenAIResponsesResponseSchema ----------
12
+
13
+ def test_first_text_returns_combined_text():
14
+ resp = OpenAIResponsesResponseSchema(
15
+ usage=OpenAIResponseUsageSchema(total_tokens=42, input_tokens=21, output_tokens=21),
16
+ output=[
17
+ OpenAIResponseOutputSchema(
18
+ type="message",
19
+ role="assistant",
20
+ content=[
21
+ OpenAIResponseContentSchema(type="output_text", text="Hello"),
22
+ OpenAIResponseContentSchema(type="output_text", text=" World"),
23
+ ],
24
+ )
25
+ ],
26
+ )
27
+
28
+ assert resp.first_text == "Hello World"
29
+
30
+
31
+ def test_first_text_empty_if_no_output():
32
+ resp = OpenAIResponsesResponseSchema(
33
+ usage=OpenAIResponseUsageSchema(total_tokens=0, input_tokens=0, output_tokens=0),
34
+ output=[],
35
+ )
36
+ assert resp.first_text == ""
37
+
38
+
39
+ def test_first_text_ignores_non_message_blocks():
40
+ resp = OpenAIResponsesResponseSchema(
41
+ usage=OpenAIResponseUsageSchema(total_tokens=5, input_tokens=2, output_tokens=3),
42
+ output=[
43
+ OpenAIResponseOutputSchema(
44
+ type="reasoning", # игнорируется
45
+ role=None,
46
+ content=None,
47
+ )
48
+ ],
49
+ )
50
+ assert resp.first_text == ""
51
+
52
+
53
+ # ---------- OpenAIResponsesRequestSchema ----------
54
+
55
+ def test_responses_request_schema_builds_ok():
56
+ msg = OpenAIInputMessageSchema(role="user", content="hello")
57
+ req = OpenAIResponsesRequestSchema(
58
+ model="gpt-5",
59
+ input=[msg],
60
+ temperature=0.2,
61
+ max_output_tokens=512,
62
+ instructions="You are a helpful assistant.",
63
+ )
64
+
65
+ assert req.model == "gpt-5"
66
+ assert req.input[0].role == "user"
67
+ assert req.input[0].content == "hello"
68
+ assert req.temperature == 0.2
69
+ assert req.max_output_tokens == 512
70
+ assert req.instructions == "You are a helpful assistant."
71
+
72
+
73
+ def test_responses_request_schema_allows_none_tokens():
74
+ req = OpenAIResponsesRequestSchema(
75
+ model="gpt-5",
76
+ input=[OpenAIInputMessageSchema(role="user", content="test")],
77
+ )
78
+
79
+ dumped = req.model_dump(exclude_none=True)
80
+ assert "max_output_tokens" not in dumped
File without changes
@@ -0,0 +1,28 @@
1
+ import pytest
2
+
3
+ from ai_review.libs.config.llm.openai import OpenAIMetaConfig
4
+
5
+
6
+ @pytest.mark.parametrize(
7
+ "model, expected",
8
+ [
9
+ ("gpt-5", True),
10
+ ("gpt-5-preview", True),
11
+ ("gpt-4.1", True),
12
+ ("gpt-4.1-mini", True),
13
+ ("gpt-4o", False),
14
+ ("gpt-4o-mini", False),
15
+ ("gpt-3.5-turbo", False),
16
+ ("text-davinci-003", False),
17
+ ],
18
+ )
19
+ def test_is_v2_model_detection(model: str, expected: bool):
20
+ meta = OpenAIMetaConfig(model=model)
21
+ assert meta.is_v2_model is expected, f"Model {model} expected {expected} but got {meta.is_v2_model}"
22
+
23
+
24
+ def test_is_v2_model_default_false():
25
+ meta = OpenAIMetaConfig()
26
+ assert meta.model == "gpt-4o-mini"
27
+ assert meta.is_v2_model is False
28
+ assert meta.max_tokens is None
@@ -2,21 +2,38 @@ import pytest
2
2
 
3
3
  from ai_review.services.llm.openai.client import OpenAILLMClient
4
4
  from ai_review.services.llm.types import ChatResultSchema
5
- from ai_review.tests.fixtures.clients.openai import FakeOpenAIHTTPClient
5
+ from ai_review.tests.fixtures.clients.openai import FakeOpenAIV1HTTPClient, FakeOpenAIV2HTTPClient
6
6
 
7
7
 
8
8
  @pytest.mark.asyncio
9
- @pytest.mark.usefixtures("openai_http_client_config")
10
- async def test_openai_llm_chat(
9
+ @pytest.mark.usefixtures("openai_v1_http_client_config")
10
+ async def test_openai_llm_chat_v1(
11
11
  openai_llm_client: OpenAILLMClient,
12
- fake_openai_http_client: FakeOpenAIHTTPClient
12
+ fake_openai_v1_http_client: FakeOpenAIV1HTTPClient
13
13
  ):
14
14
  result = await openai_llm_client.chat("prompt", "prompt_system")
15
15
 
16
16
  assert isinstance(result, ChatResultSchema)
17
- assert result.text == "FAKE_OPENAI_RESPONSE"
17
+ assert result.text == "FAKE_OPENAI_V1_RESPONSE"
18
18
  assert result.total_tokens == 12
19
19
  assert result.prompt_tokens == 5
20
20
  assert result.completion_tokens == 7
21
21
 
22
- assert fake_openai_http_client.calls[0][0] == "chat"
22
+ assert fake_openai_v1_http_client.calls[0][0] == "chat"
23
+
24
+
25
+ @pytest.mark.asyncio
26
+ @pytest.mark.usefixtures("openai_v2_http_client_config")
27
+ async def test_openai_llm_chat_v2(
28
+ openai_llm_client: OpenAILLMClient,
29
+ fake_openai_v2_http_client: FakeOpenAIV2HTTPClient
30
+ ):
31
+ result = await openai_llm_client.chat("prompt", "prompt_system")
32
+
33
+ assert isinstance(result, ChatResultSchema)
34
+ assert result.text == "FAKE_OPENAI_V2_RESPONSE"
35
+ assert result.total_tokens == 20
36
+ assert result.prompt_tokens == 10
37
+ assert result.completion_tokens == 10
38
+
39
+ assert fake_openai_v2_http_client.calls[0][0] == "chat"
@@ -8,7 +8,7 @@ from ai_review.services.llm.openai.client import OpenAILLMClient
8
8
  from ai_review.services.llm.openrouter.client import OpenRouterLLMClient
9
9
 
10
10
 
11
- @pytest.mark.usefixtures("openai_http_client_config")
11
+ @pytest.mark.usefixtures("openai_v1_http_client_config")
12
12
  def test_get_llm_client_returns_openai(monkeypatch: pytest.MonkeyPatch):
13
13
  client = get_llm_client()
14
14
  assert isinstance(client, OpenAILLMClient)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xai-review
3
- Version: 0.34.0
3
+ Version: 0.36.0
4
4
  Summary: AI-powered code review tool for GitHub, GitLab, Bitbucket and Gitea — built with LLMs like OpenAI, Claude, Gemini, Ollama, and OpenRouter
5
5
  Author-email: Nikita Filonov <nikita.filonov@example.com>
6
6
  Maintainer-email: Nikita Filonov <nikita.filonov@example.com>
@@ -221,7 +221,7 @@ jobs:
221
221
  with:
222
222
  fetch-depth: 0
223
223
 
224
- - uses: Nikita-Filonov/ai-review@v0.34.0
224
+ - uses: Nikita-Filonov/ai-review@v0.36.0
225
225
  with:
226
226
  review-command: ${{ inputs.review-command }}
227
227
  env:
@@ -22,12 +22,12 @@ ai_review/clients/bitbucket/pr/schema/files.py,sha256=qBDfIv370TDMKM8yoLlm1c-BTH
22
22
  ai_review/clients/bitbucket/pr/schema/pull_request.py,sha256=Eq8huJmZ9smy2JlsPuMF-Vv8EmHYR3rFQ_5EFVc2QRc,845
23
23
  ai_review/clients/bitbucket/pr/schema/user.py,sha256=-TsvtifDNZSK82fqNXv6N3li2hnui1_9g2ugSBx4Ibs,125
24
24
  ai_review/clients/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
- ai_review/clients/claude/client.py,sha256=uEadbBNBJnzjHDczbxXiiw1V1H1PdUWKu-Gn-eIDEmw,1890
26
- ai_review/clients/claude/schema.py,sha256=LE6KCjJKDXqBGU2Cno5XL5R8vUfScgskE9MqvE0Pt2A,887
25
+ ai_review/clients/claude/client.py,sha256=I1UVlUHmLygFYw3pJIP39zYenULmEx_h65JH3XW7jG4,1907
26
+ ai_review/clients/claude/schema.py,sha256=FJy5Wh5A3IJWPNn4K6KIFn4OTC713pxO9KCqdfUKwNA,915
27
27
  ai_review/clients/claude/types.py,sha256=y_-yF7zQrTvyiowS2b9xjIlAzkF8i6OfOjqo9eB8Xo4,267
28
28
  ai_review/clients/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
- ai_review/clients/gemini/client.py,sha256=4G1LBcpiFcrITOysQbMwhY1db4hHcSGgyI-0XazZMV0,1889
30
- ai_review/clients/gemini/schema.py,sha256=5oVvbI-h_sw8bFreS4JUmMj-aXa_frvxK3H8sg4iJIA,2264
29
+ ai_review/clients/gemini/client.py,sha256=rZsvGQG62Wu2LpBlAFr8w6_S_0oHH1fVZ8lK41995bk,1918
30
+ ai_review/clients/gemini/schema.py,sha256=mBESW6CupZaRRSzFv5K0WTEPfzk6brAFpZG_EiloKRI,2299
31
31
  ai_review/clients/gemini/types.py,sha256=D-P0THorrQ8yq5P-NKAC65zzhEYRa9HkiXTORG9QoIk,267
32
32
  ai_review/clients/gitea/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
33
  ai_review/clients/gitea/client.py,sha256=cMxp1Ic44JFvSkuqLdWkAhsgMC6CwM3qmf-1CptqnV8,1152
@@ -65,16 +65,21 @@ ai_review/clients/gitlab/mr/schema/notes.py,sha256=9wmwULegmTO6ETSjYlMC6Fc_DIeT_
65
65
  ai_review/clients/gitlab/mr/schema/position.py,sha256=oYml4x6rlrqGahEEbSB1c1ko70geL_0_otbwP0JqV6k,371
66
66
  ai_review/clients/gitlab/mr/schema/user.py,sha256=RxgCM8oryPBNPDaFxcVqe11MogchMGaO1gALkZiscrU,112
67
67
  ai_review/clients/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
68
- ai_review/clients/ollama/client.py,sha256=KoJ9J5_Vfpv5XNJREshE_gA46uo9J0Z3qVC7wJPEcX8,1720
68
+ ai_review/clients/ollama/client.py,sha256=lo_yOmB6nSdiFdIeSG1t1-AVHUZZEzKMpD6y3w2UJco,1737
69
69
  ai_review/clients/ollama/schema.py,sha256=A6oKwkkEVrduyzMR_lhLnaLyvKXqlfsXjkMIF2eXaYw,1310
70
70
  ai_review/clients/ollama/types.py,sha256=9ES8K-EClKYU7UsaMKgXvZ3sUOF9o6reEvfL6wFOJ4M,267
71
71
  ai_review/clients/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
72
- ai_review/clients/openai/client.py,sha256=jY1XG_5GtNboNjkXu3UtuXFx5V9rD6UskK7VT0lOzP8,1816
73
- ai_review/clients/openai/schema.py,sha256=glxwMtBrDA6W0BQgH-ruKe0bKH3Ps1P-Y1-2jGdqaUM,764
74
- ai_review/clients/openai/types.py,sha256=4VRY45ihKjii8w0d5XLnUGnHuBSh9wRsOP6lmkseC0Q,267
72
+ ai_review/clients/openai/v1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
+ ai_review/clients/openai/v1/client.py,sha256=zThyVUFWOF2wXi47PPrejK9ApXSc7I-2yaBHlDdiMaM,1861
74
+ ai_review/clients/openai/v1/schema.py,sha256=WnYzQP4e5rHfX40kVHCekxgXXufX7pz6FBw4qiSKUKA,792
75
+ ai_review/clients/openai/v1/types.py,sha256=giLb2LKyoHNgIq4wO__asPl4dUi1PEMiOAQIbsPpuNk,272
76
+ ai_review/clients/openai/v2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
+ ai_review/clients/openai/v2/client.py,sha256=MN61bCVkNPuOxaYZEh5duaZgpUmqnnSL2ZVA_9HWTHs,1896
78
+ ai_review/clients/openai/v2/schema.py,sha256=AU9ewqgLSozSgAgVh15ql18VMMchQeTO7MW9gUT9jd0,1195
79
+ ai_review/clients/openai/v2/types.py,sha256=BE4iFUSW1OHsah3YxTwMt1bWhZCZMssBEkTlLcR0POs,304
75
80
  ai_review/clients/openrouter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
76
- ai_review/clients/openrouter/client.py,sha256=HfcfOuUs08w7Na344i197I_WrNNjyB1xSBzirxkPGO0,2088
77
- ai_review/clients/openrouter/schema.py,sha256=U4c1wNhhAVLQ85J69IjR5g5tLly8KfrwGP7T5tsJPwI,799
81
+ ai_review/clients/openrouter/client.py,sha256=llfdisXWXyA7XufqeToGaTKnGHkNdV0y84TrPsaa41A,2105
82
+ ai_review/clients/openrouter/schema.py,sha256=hTPLIMIUHIKD4wDJsVPidn0LTQz58SsI1fId6JVU8KQ,827
78
83
  ai_review/clients/openrouter/types.py,sha256=9CFUy52GnfjjLRufz7SwY_fnzhQnn8czLl-XLWBSKGc,303
79
84
  ai_review/libs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
80
85
  ai_review/libs/json.py,sha256=g-P5_pNUomQ-bGHCXASvPKj9Og0s9MaLFVEAkzqGp1A,350
@@ -94,9 +99,9 @@ ai_review/libs/config/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
94
99
  ai_review/libs/config/llm/base.py,sha256=yhRxizyRwwYH1swKeVzZaGAVm3I3nNME_y-h0iW5WTw,2374
95
100
  ai_review/libs/config/llm/claude.py,sha256=MoalXkBA6pEp01znS8ohTRopfea9RUcqhZX5lOIuek8,293
96
101
  ai_review/libs/config/llm/gemini.py,sha256=SKtlzsRuNWOlM9m3SFvcqOIjnml8lpPidp7FiGmIEz4,265
97
- ai_review/libs/config/llm/meta.py,sha256=cEcAHOwy-mQBKo9_KJrQe0I7qppq6h99lSmoWX4ElJI,195
102
+ ai_review/libs/config/llm/meta.py,sha256=MtWboDCqoj_jZrrGoClVJNcHyskgyqJLcPMOztfxkiM,210
98
103
  ai_review/libs/config/llm/ollama.py,sha256=M6aiPb5GvYvkiGcgHTsh9bOw5JsBLqmfSKoIbHCejrU,372
99
- ai_review/libs/config/llm/openai.py,sha256=jGVL4gJ2wIacoKeK9Zc9LCgY95TxdeYOThdglVPErFU,262
104
+ ai_review/libs/config/llm/openai.py,sha256=g2EF0h2JGD5iTOtNHg-G9bdqgVtqGjg4KLf-p9V6flY,395
100
105
  ai_review/libs/config/llm/openrouter.py,sha256=6G5fApCOv0fKRHCUpsuiPOcEdyUpDe5qiUUbHjA6TbE,337
101
106
  ai_review/libs/config/vcs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
102
107
  ai_review/libs/config/vcs/base.py,sha256=RJZhKysD-d8oYZQ2v1H74jyqdqtOCc8zZ0n9S4ovfHk,1471
@@ -137,7 +142,7 @@ ai_review/prompts/default_system_inline_reply.md,sha256=OhWQ5PZ-QFFgVVfmrsXEmvNA
137
142
  ai_review/prompts/default_system_summary.md,sha256=unEJ09G925TKqvjkTFKgl3g2AXT9GICe8kxTO50QhRg,224
138
143
  ai_review/prompts/default_system_summary_reply.md,sha256=qGc_qQtVUpmj_gOw-6dhL2gz6saSA__9UOcS0jAjBD0,687
139
144
  ai_review/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
140
- ai_review/resources/pricing.yaml,sha256=jZHCGF78GTlZsXC_IGZT8JutKqpUyKikYXwtxIFEAaE,746
145
+ ai_review/resources/pricing.yaml,sha256=wxwF9sH6uUkQyaDok-cBcbXReGxg7gdsXgC2pRrQWn0,1553
141
146
  ai_review/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
142
147
  ai_review/services/artifacts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
143
148
  ai_review/services/artifacts/schema.py,sha256=o4dqG5LFCdAQY3wjRF5rImANe-X20g4zX_bCIKiHLSk,291
@@ -171,7 +176,7 @@ ai_review/services/llm/gemini/client.py,sha256=TR4HshVxtDV8_luQKCM3aFNH9tjAjpzNe
171
176
  ai_review/services/llm/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
172
177
  ai_review/services/llm/ollama/client.py,sha256=817nOQRsnaVqoY6LdO95l5JkRHkGvvS8TX7hezT2gqk,1479
173
178
  ai_review/services/llm/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
174
- ai_review/services/llm/openai/client.py,sha256=c3DWwLnwTheERdSGnMiQIbg5SaICouUAGClcQZSh1fE,1159
179
+ ai_review/services/llm/openai/client.py,sha256=edOBi6pVY_jquUj8BCRj-Gn_q-JxcK_ohBmElGClXlw,2437
175
180
  ai_review/services/llm/openrouter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
176
181
  ai_review/services/llm/openrouter/client.py,sha256=wrUEETrer3XmNRtu3YzLIRNa_3ODTzsR_LU9kDkl-7I,1212
177
182
  ai_review/services/prompt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -239,7 +244,7 @@ ai_review/tests/fixtures/clients/gitea.py,sha256=WQLbOyFTqqtVQGHuLFgk9qANYS03eeC
239
244
  ai_review/tests/fixtures/clients/github.py,sha256=kC1L-nWZMn9O_uRfuT_B8R4sn8FRvISlBJMkRKaioS0,7814
240
245
  ai_review/tests/fixtures/clients/gitlab.py,sha256=AD6NJOJSw76hjAEiWewQ6Vu5g-cfQn0GTtdchuDBH9o,8042
241
246
  ai_review/tests/fixtures/clients/ollama.py,sha256=UUHDDPUraQAG8gBC-0UvftaK0BDYir5cJDlRKJymSQg,2109
242
- ai_review/tests/fixtures/clients/openai.py,sha256=PfnaYdrKwGiuAx8fnSJBZamAZEYJR1y8I4oHBj2SmU4,2291
247
+ ai_review/tests/fixtures/clients/openai.py,sha256=sgRt7DxKqbPUv0GPFE94XUDtFwa5tIgi1DQYWPqPQHA,4749
243
248
  ai_review/tests/fixtures/clients/openrouter.py,sha256=TWCojwXP0y0_dlzFMzJra4uXSQ3Dv5wZQnm_Hbvxodg,2532
244
249
  ai_review/tests/fixtures/libs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
245
250
  ai_review/tests/fixtures/libs/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -297,8 +302,12 @@ ai_review/tests/suites/clients/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCe
297
302
  ai_review/tests/suites/clients/ollama/test_client.py,sha256=XZ8NAd1bS_ltTuYZPgqlutPRA6kbvH3_3SKTCbNBTgA,404
298
303
  ai_review/tests/suites/clients/ollama/test_schema.py,sha256=A93wCmxwGdvudfbA97VCPYP3gT6u6EYMetAg5fgURRA,1836
299
304
  ai_review/tests/suites/clients/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
300
- ai_review/tests/suites/clients/openai/test_client.py,sha256=6Wsxw6-6Uk0uPYFkzpWSwsxfCYUZhT3UYznayo-xlPI,404
301
- ai_review/tests/suites/clients/openai/test_schema.py,sha256=x1tamS4GC9pOTpjieKDbK2D73CVV4BkATppytwMevLo,1599
305
+ ai_review/tests/suites/clients/openai/v1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
306
+ ai_review/tests/suites/clients/openai/v1/test_client.py,sha256=raN0AvAQW46FEDOY8TfTmI3vzD4F2G7mrFdeMF2YSxQ,423
307
+ ai_review/tests/suites/clients/openai/v1/test_schema.py,sha256=vLoo4nzcmVKA1hwuuZI354yYRV2N8ujSoF23iNkkipQ,1602
308
+ ai_review/tests/suites/clients/openai/v2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
309
+ ai_review/tests/suites/clients/openai/v2/test_client.py,sha256=pe72sdUCt916BLZ6vC6xsLPzZqEI-uO-3KvCJqgGf2M,423
310
+ ai_review/tests/suites/clients/openai/v2/test_schema.py,sha256=yzwLILjI4j_MKfNlFeQ1QWISCsQTNNI87WvdWbrRlO4,2461
302
311
  ai_review/tests/suites/clients/openrouter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
303
312
  ai_review/tests/suites/clients/openrouter/test_client.py,sha256=WNH0p1Bl5J6zLKB1gSQ9smQMRTOo5-U-A60iJ0n4_DI,444
304
313
  ai_review/tests/suites/clients/openrouter/test_schema.py,sha256=9wt8-lR1u2KGvd6Iget_Yy-r33BllYLA-3AKe-S2E-c,1731
@@ -308,6 +317,8 @@ ai_review/tests/suites/libs/asynchronous/__init__.py,sha256=47DEQpj8HBSa-_TImW-5
308
317
  ai_review/tests/suites/libs/asynchronous/test_gather.py,sha256=Uzo0ctGFlllJU6TqA6hVWu_mcycJBAX5am8_bDO2eXk,1282
309
318
  ai_review/tests/suites/libs/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
310
319
  ai_review/tests/suites/libs/config/test_prompt.py,sha256=H3QC6LvYAwebnLed-NeT86TY9kPaMPy7Eppk5Bfi2SM,5601
320
+ ai_review/tests/suites/libs/config/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
321
+ ai_review/tests/suites/libs/config/llm/test_openai.py,sha256=H9sPccXuayoYrLVHxWc2WMD2rK2zlGP1xlAaa3G03Gs,777
311
322
  ai_review/tests/suites/libs/diff/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
312
323
  ai_review/tests/suites/libs/diff/test_models.py,sha256=RBFQ97LWhU8TlupxXkJ97ryAvJrSuOHLtT9biUBUMXg,3321
313
324
  ai_review/tests/suites/libs/diff/test_parser.py,sha256=rvWEVGIdaLBlDAnSevjRY7I1Zikj12d5GOgMk9QyHQQ,3013
@@ -332,7 +343,7 @@ ai_review/tests/suites/services/diff/test_tools.py,sha256=vsOSSIDZKkuD8dMCoBBEBt
332
343
  ai_review/tests/suites/services/hook/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
333
344
  ai_review/tests/suites/services/hook/test_service.py,sha256=TjNU2xiOQfUZZa8M3L2eHbtTwxse_B7QNn2h4118z1U,6637
334
345
  ai_review/tests/suites/services/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
335
- ai_review/tests/suites/services/llm/test_factory.py,sha256=e3pND1Rq4lIXpm_R8J1mRiEc7moEqJ9cGU3rK5tk4Bo,1685
346
+ ai_review/tests/suites/services/llm/test_factory.py,sha256=HEE65FBfIrSQbwpIYRq3SyVv1NljELPTGUHpKCahiQ0,1688
336
347
  ai_review/tests/suites/services/llm/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
337
348
  ai_review/tests/suites/services/llm/claude/test_client.py,sha256=ymIeuIax0Bp_CuXBSApK1RDl1JmbGc97uzXZToQOZO8,761
338
349
  ai_review/tests/suites/services/llm/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -340,7 +351,7 @@ ai_review/tests/suites/services/llm/gemini/test_client.py,sha256=RjYViMZTgTdbzmD
340
351
  ai_review/tests/suites/services/llm/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
341
352
  ai_review/tests/suites/services/llm/ollama/test_client.py,sha256=Eu4OERB00SJwCKznyOCyqSFTDBp9J2Lw-BcW7sPJQM4,760
342
353
  ai_review/tests/suites/services/llm/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
343
- ai_review/tests/suites/services/llm/openai/test_client.py,sha256=yzIL8GYHyX9iLKIlaF__87aue9w0cr66feoMaCv5gms,761
354
+ ai_review/tests/suites/services/llm/openai/test_client.py,sha256=bF0MFaTq_45ob4f8eZXF3Fn0aUMy848dBaQXiAiqIPg,1373
344
355
  ai_review/tests/suites/services/llm/openrouter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
345
356
  ai_review/tests/suites/services/llm/openrouter/test_client.py,sha256=9YRwhkgeRDdRi_EMFh_T0u4wgEFj2AMgAiusrYWzeEc,813
346
357
  ai_review/tests/suites/services/prompt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -389,9 +400,9 @@ ai_review/tests/suites/services/vcs/github/test_client.py,sha256=mNt1bA6aVU3REsJ
389
400
  ai_review/tests/suites/services/vcs/gitlab/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
390
401
  ai_review/tests/suites/services/vcs/gitlab/test_adapter.py,sha256=BYBP2g1AKF_jCSJYJj16pW7M_6PprwD9reYEpdw3StU,4340
391
402
  ai_review/tests/suites/services/vcs/gitlab/test_client.py,sha256=dnI-YxYADmVF2GS9rp6-JPkcqsn4sN8Fjbe4MkeYMaE,8476
392
- xai_review-0.34.0.dist-info/licenses/LICENSE,sha256=p-v8m7Kmz4KKc7PcvsGiGEmCw9AiSXY4_ylOPy_u--Y,11343
393
- xai_review-0.34.0.dist-info/METADATA,sha256=Zpuk0sHxSQKsA84Xdv4Ba1V6Seghvxd_XCh48f5BKDw,12911
394
- xai_review-0.34.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
395
- xai_review-0.34.0.dist-info/entry_points.txt,sha256=JyC5URanMi5io5P_PXQf7H_I1OGIpk5cZQhaPQ0g4Zs,53
396
- xai_review-0.34.0.dist-info/top_level.txt,sha256=sTsZbfzLoqvRZKdKa-BcxWvjlHdrpbeJ6DrGY0EuR0E,10
397
- xai_review-0.34.0.dist-info/RECORD,,
403
+ xai_review-0.36.0.dist-info/licenses/LICENSE,sha256=p-v8m7Kmz4KKc7PcvsGiGEmCw9AiSXY4_ylOPy_u--Y,11343
404
+ xai_review-0.36.0.dist-info/METADATA,sha256=kLVjP_H0HJzK7_3tWvhrBbnuYaKPssSx2hzldptp0HI,12911
405
+ xai_review-0.36.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
406
+ xai_review-0.36.0.dist-info/entry_points.txt,sha256=JyC5URanMi5io5P_PXQf7H_I1OGIpk5cZQhaPQ0g4Zs,53
407
+ xai_review-0.36.0.dist-info/top_level.txt,sha256=sTsZbfzLoqvRZKdKa-BcxWvjlHdrpbeJ6DrGY0EuR0E,10
408
+ xai_review-0.36.0.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- from typing import Protocol
2
-
3
- from ai_review.clients.openai.schema import OpenAIChatRequestSchema, OpenAIChatResponseSchema
4
-
5
-
6
- class OpenAIHTTPClientProtocol(Protocol):
7
- async def chat(self, request: OpenAIChatRequestSchema) -> OpenAIChatResponseSchema:
8
- ...
@@ -1,12 +0,0 @@
1
- import pytest
2
- from httpx import AsyncClient
3
-
4
- from ai_review.clients.openai.client import get_openai_http_client, OpenAIHTTPClient
5
-
6
-
7
- @pytest.mark.usefixtures('openai_http_client_config')
8
- def test_get_openai_http_client_builds_ok():
9
- openai_http_client = get_openai_http_client()
10
-
11
- assert isinstance(openai_http_client, OpenAIHTTPClient)
12
- assert isinstance(openai_http_client.client, AsyncClient)