xai-review 0.33.0__py3-none-any.whl → 0.35.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xai-review might be problematic. Click here for more details.
- ai_review/clients/claude/client.py +1 -1
- ai_review/clients/claude/schema.py +2 -2
- ai_review/clients/gemini/client.py +2 -1
- ai_review/clients/gemini/schema.py +2 -2
- ai_review/clients/ollama/client.py +1 -1
- ai_review/clients/openai/v1/__init__.py +0 -0
- ai_review/clients/openai/{client.py → v1/client.py} +9 -9
- ai_review/clients/openai/{schema.py → v1/schema.py} +1 -1
- ai_review/clients/openai/v1/types.py +8 -0
- ai_review/clients/openai/v2/__init__.py +0 -0
- ai_review/clients/openai/v2/client.py +46 -0
- ai_review/clients/openai/v2/schema.py +47 -0
- ai_review/clients/openai/v2/types.py +11 -0
- ai_review/clients/openrouter/__init__.py +0 -0
- ai_review/clients/openrouter/client.py +50 -0
- ai_review/clients/openrouter/schema.py +36 -0
- ai_review/clients/openrouter/types.py +11 -0
- ai_review/libs/config/llm/base.py +8 -1
- ai_review/libs/config/llm/meta.py +2 -2
- ai_review/libs/config/llm/openai.py +4 -0
- ai_review/libs/config/llm/openrouter.py +12 -0
- ai_review/libs/constants/llm_provider.py +1 -0
- ai_review/resources/pricing.yaml +39 -1
- ai_review/services/llm/factory.py +3 -0
- ai_review/services/llm/openai/client.py +37 -9
- ai_review/services/llm/openrouter/__init__.py +0 -0
- ai_review/services/llm/openrouter/client.py +31 -0
- ai_review/tests/fixtures/clients/openai.py +84 -12
- ai_review/tests/fixtures/clients/openrouter.py +72 -0
- ai_review/tests/suites/clients/openai/v1/__init__.py +0 -0
- ai_review/tests/suites/clients/openai/v1/test_client.py +12 -0
- ai_review/tests/suites/clients/openai/{test_schema.py → v1/test_schema.py} +1 -1
- ai_review/tests/suites/clients/openai/v2/__init__.py +0 -0
- ai_review/tests/suites/clients/openai/v2/test_client.py +12 -0
- ai_review/tests/suites/clients/openai/v2/test_schema.py +80 -0
- ai_review/tests/suites/clients/openrouter/__init__.py +0 -0
- ai_review/tests/suites/clients/openrouter/test_client.py +12 -0
- ai_review/tests/suites/clients/openrouter/test_schema.py +57 -0
- ai_review/tests/suites/libs/config/llm/__init__.py +0 -0
- ai_review/tests/suites/libs/config/llm/test_openai.py +28 -0
- ai_review/tests/suites/services/llm/openai/test_client.py +23 -6
- ai_review/tests/suites/services/llm/openrouter/__init__.py +0 -0
- ai_review/tests/suites/services/llm/openrouter/test_client.py +22 -0
- ai_review/tests/suites/services/llm/test_factory.py +8 -1
- ai_review/tests/suites/services/review/gateway/test_review_comment_gateway.py +5 -5
- ai_review/tests/suites/services/review/gateway/test_review_dry_run_comment_gateway.py +5 -5
- ai_review/tests/suites/services/review/gateway/test_review_llm_gateway.py +2 -2
- ai_review/tests/suites/services/review/test_service.py +2 -2
- {xai_review-0.33.0.dist-info → xai_review-0.35.0.dist-info}/METADATA +11 -10
- {xai_review-0.33.0.dist-info → xai_review-0.35.0.dist-info}/RECORD +54 -30
- ai_review/clients/openai/types.py +0 -8
- ai_review/tests/suites/clients/openai/test_client.py +0 -12
- {xai_review-0.33.0.dist-info → xai_review-0.35.0.dist-info}/WHEEL +0 -0
- {xai_review-0.33.0.dist-info → xai_review-0.35.0.dist-info}/entry_points.txt +0 -0
- {xai_review-0.33.0.dist-info → xai_review-0.35.0.dist-info}/licenses/LICENSE +0 -0
- {xai_review-0.33.0.dist-info → xai_review-0.35.0.dist-info}/top_level.txt +0 -0
|
@@ -3,14 +3,22 @@ from typing import Any
|
|
|
3
3
|
import pytest
|
|
4
4
|
from pydantic import HttpUrl, SecretStr
|
|
5
5
|
|
|
6
|
-
from ai_review.clients.openai.schema import (
|
|
6
|
+
from ai_review.clients.openai.v1.schema import (
|
|
7
7
|
OpenAIUsageSchema,
|
|
8
8
|
OpenAIChoiceSchema,
|
|
9
9
|
OpenAIMessageSchema,
|
|
10
10
|
OpenAIChatRequestSchema,
|
|
11
11
|
OpenAIChatResponseSchema,
|
|
12
12
|
)
|
|
13
|
-
from ai_review.clients.openai.types import
|
|
13
|
+
from ai_review.clients.openai.v1.types import OpenAIV1HTTPClientProtocol
|
|
14
|
+
from ai_review.clients.openai.v2.schema import (
|
|
15
|
+
OpenAIResponsesRequestSchema,
|
|
16
|
+
OpenAIResponsesResponseSchema,
|
|
17
|
+
OpenAIResponseUsageSchema,
|
|
18
|
+
OpenAIResponseOutputSchema,
|
|
19
|
+
OpenAIResponseContentSchema,
|
|
20
|
+
)
|
|
21
|
+
from ai_review.clients.openai.v2.types import OpenAIV2HTTPClientProtocol
|
|
14
22
|
from ai_review.config import settings
|
|
15
23
|
from ai_review.libs.config.llm.base import OpenAILLMConfig
|
|
16
24
|
from ai_review.libs.config.llm.openai import OpenAIMetaConfig, OpenAIHTTPClientConfig
|
|
@@ -18,7 +26,7 @@ from ai_review.libs.constants.llm_provider import LLMProvider
|
|
|
18
26
|
from ai_review.services.llm.openai.client import OpenAILLMClient
|
|
19
27
|
|
|
20
28
|
|
|
21
|
-
class
|
|
29
|
+
class FakeOpenAIV1HTTPClient(OpenAIV1HTTPClientProtocol):
|
|
22
30
|
def __init__(self, responses: dict[str, Any] | None = None) -> None:
|
|
23
31
|
self.calls: list[tuple[str, dict]] = []
|
|
24
32
|
self.responses = responses or {}
|
|
@@ -31,7 +39,39 @@ class FakeOpenAIHTTPClient(OpenAIHTTPClientProtocol):
|
|
|
31
39
|
usage=OpenAIUsageSchema(total_tokens=12, prompt_tokens=5, completion_tokens=7),
|
|
32
40
|
choices=[
|
|
33
41
|
OpenAIChoiceSchema(
|
|
34
|
-
message=OpenAIMessageSchema(
|
|
42
|
+
message=OpenAIMessageSchema(
|
|
43
|
+
role="assistant",
|
|
44
|
+
content="FAKE_OPENAI_V1_RESPONSE"
|
|
45
|
+
)
|
|
46
|
+
)
|
|
47
|
+
],
|
|
48
|
+
),
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class FakeOpenAIV2HTTPClient(OpenAIV2HTTPClientProtocol):
|
|
53
|
+
def __init__(self, responses: dict[str, Any] | None = None) -> None:
|
|
54
|
+
self.calls: list[tuple[str, dict]] = []
|
|
55
|
+
self.responses = responses or {}
|
|
56
|
+
|
|
57
|
+
async def chat(self, request: OpenAIResponsesRequestSchema) -> OpenAIResponsesResponseSchema:
|
|
58
|
+
self.calls.append(("chat", {"request": request}))
|
|
59
|
+
return self.responses.get(
|
|
60
|
+
"chat",
|
|
61
|
+
OpenAIResponsesResponseSchema(
|
|
62
|
+
usage=OpenAIResponseUsageSchema(
|
|
63
|
+
total_tokens=20, input_tokens=10, output_tokens=10
|
|
64
|
+
),
|
|
65
|
+
output=[
|
|
66
|
+
OpenAIResponseOutputSchema(
|
|
67
|
+
type="message",
|
|
68
|
+
role="assistant",
|
|
69
|
+
content=[
|
|
70
|
+
OpenAIResponseContentSchema(
|
|
71
|
+
type="output_text",
|
|
72
|
+
text="FAKE_OPENAI_V2_RESPONSE"
|
|
73
|
+
)
|
|
74
|
+
],
|
|
35
75
|
)
|
|
36
76
|
],
|
|
37
77
|
),
|
|
@@ -39,31 +79,63 @@ class FakeOpenAIHTTPClient(OpenAIHTTPClientProtocol):
|
|
|
39
79
|
|
|
40
80
|
|
|
41
81
|
@pytest.fixture
|
|
42
|
-
def
|
|
43
|
-
return
|
|
82
|
+
def fake_openai_v1_http_client() -> FakeOpenAIV1HTTPClient:
|
|
83
|
+
return FakeOpenAIV1HTTPClient()
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@pytest.fixture
|
|
87
|
+
def fake_openai_v2_http_client() -> FakeOpenAIV2HTTPClient:
|
|
88
|
+
return FakeOpenAIV2HTTPClient()
|
|
44
89
|
|
|
45
90
|
|
|
46
91
|
@pytest.fixture
|
|
47
92
|
def openai_llm_client(
|
|
48
93
|
monkeypatch: pytest.MonkeyPatch,
|
|
49
|
-
|
|
94
|
+
fake_openai_v1_http_client: FakeOpenAIV1HTTPClient,
|
|
95
|
+
fake_openai_v2_http_client: FakeOpenAIV2HTTPClient,
|
|
50
96
|
) -> OpenAILLMClient:
|
|
51
97
|
monkeypatch.setattr(
|
|
52
|
-
"ai_review.services.llm.openai.client.
|
|
53
|
-
lambda:
|
|
98
|
+
"ai_review.services.llm.openai.client.get_openai_v1_http_client",
|
|
99
|
+
lambda: fake_openai_v1_http_client,
|
|
100
|
+
)
|
|
101
|
+
monkeypatch.setattr(
|
|
102
|
+
"ai_review.services.llm.openai.client.get_openai_v2_http_client",
|
|
103
|
+
lambda: fake_openai_v2_http_client,
|
|
54
104
|
)
|
|
55
105
|
return OpenAILLMClient()
|
|
56
106
|
|
|
57
107
|
|
|
58
108
|
@pytest.fixture
|
|
59
|
-
def
|
|
109
|
+
def openai_v1_http_client_config(monkeypatch: pytest.MonkeyPatch):
|
|
60
110
|
fake_config = OpenAILLMConfig(
|
|
61
|
-
meta=OpenAIMetaConfig(
|
|
111
|
+
meta=OpenAIMetaConfig(
|
|
112
|
+
model="gpt-4o-mini",
|
|
113
|
+
max_tokens=1200,
|
|
114
|
+
temperature=0.3
|
|
115
|
+
),
|
|
62
116
|
provider=LLMProvider.OPENAI,
|
|
63
117
|
http_client=OpenAIHTTPClientConfig(
|
|
64
118
|
timeout=10,
|
|
65
119
|
api_url=HttpUrl("https://api.openai.com/v1"),
|
|
66
120
|
api_token=SecretStr("fake-token"),
|
|
67
|
-
)
|
|
121
|
+
),
|
|
122
|
+
)
|
|
123
|
+
monkeypatch.setattr(settings, "llm", fake_config)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
@pytest.fixture
|
|
127
|
+
def openai_v2_http_client_config(monkeypatch: pytest.MonkeyPatch):
|
|
128
|
+
fake_config = OpenAILLMConfig(
|
|
129
|
+
meta=OpenAIMetaConfig(
|
|
130
|
+
model="gpt-5",
|
|
131
|
+
max_tokens=2000,
|
|
132
|
+
temperature=0.2
|
|
133
|
+
),
|
|
134
|
+
provider=LLMProvider.OPENAI,
|
|
135
|
+
http_client=OpenAIHTTPClientConfig(
|
|
136
|
+
timeout=10,
|
|
137
|
+
api_url=HttpUrl("https://api.openai.com/v1"),
|
|
138
|
+
api_token=SecretStr("fake-token"),
|
|
139
|
+
),
|
|
68
140
|
)
|
|
69
141
|
monkeypatch.setattr(settings, "llm", fake_config)
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
from pydantic import HttpUrl, SecretStr
|
|
5
|
+
|
|
6
|
+
from ai_review.clients.openrouter.schema import (
|
|
7
|
+
OpenRouterUsageSchema,
|
|
8
|
+
OpenRouterChoiceSchema,
|
|
9
|
+
OpenRouterMessageSchema,
|
|
10
|
+
OpenRouterChatRequestSchema,
|
|
11
|
+
OpenRouterChatResponseSchema,
|
|
12
|
+
)
|
|
13
|
+
from ai_review.clients.openrouter.types import OpenRouterHTTPClientProtocol
|
|
14
|
+
from ai_review.config import settings
|
|
15
|
+
from ai_review.libs.config.llm.base import OpenRouterLLMConfig
|
|
16
|
+
from ai_review.libs.config.llm.openrouter import OpenRouterMetaConfig, OpenRouterHTTPClientConfig
|
|
17
|
+
from ai_review.libs.constants.llm_provider import LLMProvider
|
|
18
|
+
from ai_review.services.llm.openrouter.client import OpenRouterLLMClient
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class FakeOpenRouterHTTPClient(OpenRouterHTTPClientProtocol):
|
|
22
|
+
def __init__(self, responses: dict[str, Any] | None = None) -> None:
|
|
23
|
+
self.calls: list[tuple[str, dict]] = []
|
|
24
|
+
self.responses = responses or {}
|
|
25
|
+
|
|
26
|
+
async def chat(self, request: OpenRouterChatRequestSchema) -> OpenRouterChatResponseSchema:
|
|
27
|
+
self.calls.append(("chat", {"request": request}))
|
|
28
|
+
return self.responses.get(
|
|
29
|
+
"chat",
|
|
30
|
+
OpenRouterChatResponseSchema(
|
|
31
|
+
usage=OpenRouterUsageSchema(total_tokens=12, prompt_tokens=5, completion_tokens=7),
|
|
32
|
+
choices=[
|
|
33
|
+
OpenRouterChoiceSchema(
|
|
34
|
+
message=OpenRouterMessageSchema(
|
|
35
|
+
role="assistant",
|
|
36
|
+
content="FAKE_OPENROUTER_RESPONSE"
|
|
37
|
+
)
|
|
38
|
+
)
|
|
39
|
+
],
|
|
40
|
+
),
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@pytest.fixture
|
|
45
|
+
def fake_openrouter_http_client() -> FakeOpenRouterHTTPClient:
|
|
46
|
+
return FakeOpenRouterHTTPClient()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@pytest.fixture
|
|
50
|
+
def openrouter_llm_client(
|
|
51
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
52
|
+
fake_openrouter_http_client: FakeOpenRouterHTTPClient
|
|
53
|
+
) -> OpenRouterLLMClient:
|
|
54
|
+
monkeypatch.setattr(
|
|
55
|
+
"ai_review.services.llm.openrouter.client.get_openrouter_http_client",
|
|
56
|
+
lambda: fake_openrouter_http_client,
|
|
57
|
+
)
|
|
58
|
+
return OpenRouterLLMClient()
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@pytest.fixture
|
|
62
|
+
def openrouter_http_client_config(monkeypatch: pytest.MonkeyPatch):
|
|
63
|
+
fake_config = OpenRouterLLMConfig(
|
|
64
|
+
meta=OpenRouterMetaConfig(),
|
|
65
|
+
provider=LLMProvider.OPENROUTER,
|
|
66
|
+
http_client=OpenRouterHTTPClientConfig(
|
|
67
|
+
timeout=10,
|
|
68
|
+
api_url=HttpUrl("https://openrouter.ai/api/v1"),
|
|
69
|
+
api_token=SecretStr("fake-token"),
|
|
70
|
+
)
|
|
71
|
+
)
|
|
72
|
+
monkeypatch.setattr(settings, "llm", fake_config)
|
|
File without changes
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from httpx import AsyncClient
|
|
3
|
+
|
|
4
|
+
from ai_review.clients.openai.v1.client import get_openai_v1_http_client, OpenAIV1HTTPClient
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@pytest.mark.usefixtures('openai_v1_http_client_config')
|
|
8
|
+
def test_get_openai_v1_http_client_builds_ok():
|
|
9
|
+
openai_http_client = get_openai_v1_http_client()
|
|
10
|
+
|
|
11
|
+
assert isinstance(openai_http_client, OpenAIV1HTTPClient)
|
|
12
|
+
assert isinstance(openai_http_client.client, AsyncClient)
|
|
File without changes
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from httpx import AsyncClient
|
|
3
|
+
|
|
4
|
+
from ai_review.clients.openai.v2.client import get_openai_v2_http_client, OpenAIV2HTTPClient
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@pytest.mark.usefixtures('openai_v2_http_client_config')
|
|
8
|
+
def test_get_openai_v2_http_client_builds_ok():
|
|
9
|
+
openai_http_client = get_openai_v2_http_client()
|
|
10
|
+
|
|
11
|
+
assert isinstance(openai_http_client, OpenAIV2HTTPClient)
|
|
12
|
+
assert isinstance(openai_http_client.client, AsyncClient)
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
from ai_review.clients.openai.v2.schema import (
|
|
2
|
+
OpenAIResponseUsageSchema,
|
|
3
|
+
OpenAIInputMessageSchema,
|
|
4
|
+
OpenAIResponseContentSchema,
|
|
5
|
+
OpenAIResponseOutputSchema,
|
|
6
|
+
OpenAIResponsesRequestSchema,
|
|
7
|
+
OpenAIResponsesResponseSchema,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# ---------- OpenAIResponsesResponseSchema ----------
|
|
12
|
+
|
|
13
|
+
def test_first_text_returns_combined_text():
|
|
14
|
+
resp = OpenAIResponsesResponseSchema(
|
|
15
|
+
usage=OpenAIResponseUsageSchema(total_tokens=42, input_tokens=21, output_tokens=21),
|
|
16
|
+
output=[
|
|
17
|
+
OpenAIResponseOutputSchema(
|
|
18
|
+
type="message",
|
|
19
|
+
role="assistant",
|
|
20
|
+
content=[
|
|
21
|
+
OpenAIResponseContentSchema(type="output_text", text="Hello"),
|
|
22
|
+
OpenAIResponseContentSchema(type="output_text", text=" World"),
|
|
23
|
+
],
|
|
24
|
+
)
|
|
25
|
+
],
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
assert resp.first_text == "Hello World"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def test_first_text_empty_if_no_output():
|
|
32
|
+
resp = OpenAIResponsesResponseSchema(
|
|
33
|
+
usage=OpenAIResponseUsageSchema(total_tokens=0, input_tokens=0, output_tokens=0),
|
|
34
|
+
output=[],
|
|
35
|
+
)
|
|
36
|
+
assert resp.first_text == ""
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def test_first_text_ignores_non_message_blocks():
|
|
40
|
+
resp = OpenAIResponsesResponseSchema(
|
|
41
|
+
usage=OpenAIResponseUsageSchema(total_tokens=5, input_tokens=2, output_tokens=3),
|
|
42
|
+
output=[
|
|
43
|
+
OpenAIResponseOutputSchema(
|
|
44
|
+
type="reasoning", # игнорируется
|
|
45
|
+
role=None,
|
|
46
|
+
content=None,
|
|
47
|
+
)
|
|
48
|
+
],
|
|
49
|
+
)
|
|
50
|
+
assert resp.first_text == ""
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# ---------- OpenAIResponsesRequestSchema ----------
|
|
54
|
+
|
|
55
|
+
def test_responses_request_schema_builds_ok():
|
|
56
|
+
msg = OpenAIInputMessageSchema(role="user", content="hello")
|
|
57
|
+
req = OpenAIResponsesRequestSchema(
|
|
58
|
+
model="gpt-5",
|
|
59
|
+
input=[msg],
|
|
60
|
+
temperature=0.2,
|
|
61
|
+
max_output_tokens=512,
|
|
62
|
+
instructions="You are a helpful assistant.",
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
assert req.model == "gpt-5"
|
|
66
|
+
assert req.input[0].role == "user"
|
|
67
|
+
assert req.input[0].content == "hello"
|
|
68
|
+
assert req.temperature == 0.2
|
|
69
|
+
assert req.max_output_tokens == 512
|
|
70
|
+
assert req.instructions == "You are a helpful assistant."
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def test_responses_request_schema_allows_none_tokens():
|
|
74
|
+
req = OpenAIResponsesRequestSchema(
|
|
75
|
+
model="gpt-5",
|
|
76
|
+
input=[OpenAIInputMessageSchema(role="user", content="test")],
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
dumped = req.model_dump(exclude_none=True)
|
|
80
|
+
assert "max_output_tokens" not in dumped
|
|
File without changes
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from httpx import AsyncClient
|
|
3
|
+
|
|
4
|
+
from ai_review.clients.openrouter.client import get_openrouter_http_client, OpenRouterHTTPClient
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@pytest.mark.usefixtures('openrouter_http_client_config')
|
|
8
|
+
def test_get_openrouter_http_client_builds_ok():
|
|
9
|
+
openrouter_http_client = get_openrouter_http_client()
|
|
10
|
+
|
|
11
|
+
assert isinstance(openrouter_http_client, OpenRouterHTTPClient)
|
|
12
|
+
assert isinstance(openrouter_http_client.client, AsyncClient)
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
from ai_review.clients.openrouter.schema import (
|
|
2
|
+
OpenRouterUsageSchema,
|
|
3
|
+
OpenRouterChoiceSchema,
|
|
4
|
+
OpenRouterMessageSchema,
|
|
5
|
+
OpenRouterChatRequestSchema,
|
|
6
|
+
OpenRouterChatResponseSchema,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# ---------- OpenRouterChatResponseSchema ----------
|
|
11
|
+
|
|
12
|
+
def test_first_text_returns_text():
|
|
13
|
+
resp = OpenRouterChatResponseSchema(
|
|
14
|
+
usage=OpenRouterUsageSchema(total_tokens=5, prompt_tokens=2, completion_tokens=3),
|
|
15
|
+
choices=[
|
|
16
|
+
OpenRouterChoiceSchema(
|
|
17
|
+
message=OpenRouterMessageSchema(role="assistant", content=" hello world ")
|
|
18
|
+
)
|
|
19
|
+
],
|
|
20
|
+
)
|
|
21
|
+
assert resp.first_text == "hello world"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def test_first_text_empty_if_no_choices():
|
|
25
|
+
resp = OpenRouterChatResponseSchema(
|
|
26
|
+
usage=OpenRouterUsageSchema(total_tokens=1, prompt_tokens=1, completion_tokens=0),
|
|
27
|
+
choices=[],
|
|
28
|
+
)
|
|
29
|
+
assert resp.first_text == ""
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def test_first_text_strips_and_handles_empty_content():
|
|
33
|
+
resp = OpenRouterChatResponseSchema(
|
|
34
|
+
usage=OpenRouterUsageSchema(total_tokens=1, prompt_tokens=1, completion_tokens=0),
|
|
35
|
+
choices=[
|
|
36
|
+
OpenRouterChoiceSchema(
|
|
37
|
+
message=OpenRouterMessageSchema(role="assistant", content=" ")
|
|
38
|
+
)
|
|
39
|
+
],
|
|
40
|
+
)
|
|
41
|
+
assert resp.first_text == ""
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
# ---------- OpenRouterChatRequestSchema ----------
|
|
45
|
+
|
|
46
|
+
def test_chat_request_schema_builds_ok():
|
|
47
|
+
msg = OpenRouterMessageSchema(role="user", content="hello")
|
|
48
|
+
req = OpenRouterChatRequestSchema(
|
|
49
|
+
model="gpt-4o-mini",
|
|
50
|
+
messages=[msg],
|
|
51
|
+
max_tokens=100,
|
|
52
|
+
temperature=0.3,
|
|
53
|
+
)
|
|
54
|
+
assert req.model == "gpt-4o-mini"
|
|
55
|
+
assert req.messages[0].content == "hello"
|
|
56
|
+
assert req.max_tokens == 100
|
|
57
|
+
assert req.temperature == 0.3
|
|
File without changes
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
|
|
3
|
+
from ai_review.libs.config.llm.openai import OpenAIMetaConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@pytest.mark.parametrize(
|
|
7
|
+
"model, expected",
|
|
8
|
+
[
|
|
9
|
+
("gpt-5", True),
|
|
10
|
+
("gpt-5-preview", True),
|
|
11
|
+
("gpt-4.1", True),
|
|
12
|
+
("gpt-4.1-mini", True),
|
|
13
|
+
("gpt-4o", False),
|
|
14
|
+
("gpt-4o-mini", False),
|
|
15
|
+
("gpt-3.5-turbo", False),
|
|
16
|
+
("text-davinci-003", False),
|
|
17
|
+
],
|
|
18
|
+
)
|
|
19
|
+
def test_is_v2_model_detection(model: str, expected: bool):
|
|
20
|
+
meta = OpenAIMetaConfig(model=model)
|
|
21
|
+
assert meta.is_v2_model is expected, f"Model {model} expected {expected} but got {meta.is_v2_model}"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def test_is_v2_model_default_false():
|
|
25
|
+
meta = OpenAIMetaConfig()
|
|
26
|
+
assert meta.model == "gpt-4o-mini"
|
|
27
|
+
assert meta.is_v2_model is False
|
|
28
|
+
assert meta.max_tokens is None
|
|
@@ -2,21 +2,38 @@ import pytest
|
|
|
2
2
|
|
|
3
3
|
from ai_review.services.llm.openai.client import OpenAILLMClient
|
|
4
4
|
from ai_review.services.llm.types import ChatResultSchema
|
|
5
|
-
from ai_review.tests.fixtures.clients.openai import
|
|
5
|
+
from ai_review.tests.fixtures.clients.openai import FakeOpenAIV1HTTPClient, FakeOpenAIV2HTTPClient
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
@pytest.mark.asyncio
|
|
9
|
-
@pytest.mark.usefixtures("
|
|
10
|
-
async def
|
|
9
|
+
@pytest.mark.usefixtures("openai_v1_http_client_config")
|
|
10
|
+
async def test_openai_llm_chat_v1(
|
|
11
11
|
openai_llm_client: OpenAILLMClient,
|
|
12
|
-
|
|
12
|
+
fake_openai_v1_http_client: FakeOpenAIV1HTTPClient
|
|
13
13
|
):
|
|
14
14
|
result = await openai_llm_client.chat("prompt", "prompt_system")
|
|
15
15
|
|
|
16
16
|
assert isinstance(result, ChatResultSchema)
|
|
17
|
-
assert result.text == "
|
|
17
|
+
assert result.text == "FAKE_OPENAI_V1_RESPONSE"
|
|
18
18
|
assert result.total_tokens == 12
|
|
19
19
|
assert result.prompt_tokens == 5
|
|
20
20
|
assert result.completion_tokens == 7
|
|
21
21
|
|
|
22
|
-
assert
|
|
22
|
+
assert fake_openai_v1_http_client.calls[0][0] == "chat"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@pytest.mark.asyncio
|
|
26
|
+
@pytest.mark.usefixtures("openai_v2_http_client_config")
|
|
27
|
+
async def test_openai_llm_chat_v2(
|
|
28
|
+
openai_llm_client: OpenAILLMClient,
|
|
29
|
+
fake_openai_v2_http_client: FakeOpenAIV2HTTPClient
|
|
30
|
+
):
|
|
31
|
+
result = await openai_llm_client.chat("prompt", "prompt_system")
|
|
32
|
+
|
|
33
|
+
assert isinstance(result, ChatResultSchema)
|
|
34
|
+
assert result.text == "FAKE_OPENAI_V2_RESPONSE"
|
|
35
|
+
assert result.total_tokens == 20
|
|
36
|
+
assert result.prompt_tokens == 10
|
|
37
|
+
assert result.completion_tokens == 10
|
|
38
|
+
|
|
39
|
+
assert fake_openai_v2_http_client.calls[0][0] == "chat"
|
|
File without changes
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
|
|
3
|
+
from ai_review.services.llm.openrouter.client import OpenRouterLLMClient
|
|
4
|
+
from ai_review.services.llm.types import ChatResultSchema
|
|
5
|
+
from ai_review.tests.fixtures.clients.openrouter import FakeOpenRouterHTTPClient
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@pytest.mark.asyncio
|
|
9
|
+
@pytest.mark.usefixtures("openrouter_http_client_config")
|
|
10
|
+
async def test_openrouter_llm_chat(
|
|
11
|
+
openrouter_llm_client: OpenRouterLLMClient,
|
|
12
|
+
fake_openrouter_http_client: FakeOpenRouterHTTPClient
|
|
13
|
+
):
|
|
14
|
+
result = await openrouter_llm_client.chat("prompt", "prompt_system")
|
|
15
|
+
|
|
16
|
+
assert isinstance(result, ChatResultSchema)
|
|
17
|
+
assert result.text == "FAKE_OPENROUTER_RESPONSE"
|
|
18
|
+
assert result.total_tokens == 12
|
|
19
|
+
assert result.prompt_tokens == 5
|
|
20
|
+
assert result.completion_tokens == 7
|
|
21
|
+
|
|
22
|
+
assert fake_openrouter_http_client.calls[0][0] == "chat"
|
|
@@ -5,9 +5,10 @@ from ai_review.services.llm.factory import get_llm_client
|
|
|
5
5
|
from ai_review.services.llm.gemini.client import GeminiLLMClient
|
|
6
6
|
from ai_review.services.llm.ollama.client import OllamaLLMClient
|
|
7
7
|
from ai_review.services.llm.openai.client import OpenAILLMClient
|
|
8
|
+
from ai_review.services.llm.openrouter.client import OpenRouterLLMClient
|
|
8
9
|
|
|
9
10
|
|
|
10
|
-
@pytest.mark.usefixtures("
|
|
11
|
+
@pytest.mark.usefixtures("openai_v1_http_client_config")
|
|
11
12
|
def test_get_llm_client_returns_openai(monkeypatch: pytest.MonkeyPatch):
|
|
12
13
|
client = get_llm_client()
|
|
13
14
|
assert isinstance(client, OpenAILLMClient)
|
|
@@ -31,6 +32,12 @@ def test_get_llm_client_returns_ollama(monkeypatch: pytest.MonkeyPatch):
|
|
|
31
32
|
assert isinstance(client, OllamaLLMClient)
|
|
32
33
|
|
|
33
34
|
|
|
35
|
+
@pytest.mark.usefixtures("openrouter_http_client_config")
|
|
36
|
+
def test_get_llm_client_returns_openrouter(monkeypatch: pytest.MonkeyPatch):
|
|
37
|
+
client = get_llm_client()
|
|
38
|
+
assert isinstance(client, OpenRouterLLMClient)
|
|
39
|
+
|
|
40
|
+
|
|
34
41
|
def test_get_llm_client_unsupported_provider(monkeypatch: pytest.MonkeyPatch):
|
|
35
42
|
monkeypatch.setattr("ai_review.services.llm.factory.settings.llm.provider", "UNSUPPORTED")
|
|
36
43
|
with pytest.raises(ValueError):
|
|
@@ -72,7 +72,7 @@ async def test_get_summary_threads_filters_by_tag(
|
|
|
72
72
|
|
|
73
73
|
@pytest.mark.asyncio
|
|
74
74
|
async def test_has_existing_inline_comments_true(
|
|
75
|
-
capsys,
|
|
75
|
+
capsys: pytest.CaptureFixture,
|
|
76
76
|
fake_vcs_client: FakeVCSClient,
|
|
77
77
|
review_comment_gateway: ReviewCommentGateway,
|
|
78
78
|
):
|
|
@@ -118,7 +118,7 @@ async def test_process_inline_reply_happy_path(
|
|
|
118
118
|
|
|
119
119
|
@pytest.mark.asyncio
|
|
120
120
|
async def test_process_inline_reply_error(
|
|
121
|
-
capsys,
|
|
121
|
+
capsys: pytest.CaptureFixture,
|
|
122
122
|
fake_vcs_client: FakeVCSClient,
|
|
123
123
|
review_comment_gateway: ReviewCommentGateway,
|
|
124
124
|
):
|
|
@@ -151,7 +151,7 @@ async def test_process_summary_reply_success(
|
|
|
151
151
|
|
|
152
152
|
@pytest.mark.asyncio
|
|
153
153
|
async def test_process_summary_reply_error(
|
|
154
|
-
capsys,
|
|
154
|
+
capsys: pytest.CaptureFixture,
|
|
155
155
|
fake_vcs_client: FakeVCSClient,
|
|
156
156
|
review_comment_gateway: ReviewCommentGateway,
|
|
157
157
|
):
|
|
@@ -184,7 +184,7 @@ async def test_process_inline_comment_happy_path(
|
|
|
184
184
|
|
|
185
185
|
@pytest.mark.asyncio
|
|
186
186
|
async def test_process_inline_comment_error_fallback(
|
|
187
|
-
capsys,
|
|
187
|
+
capsys: pytest.CaptureFixture,
|
|
188
188
|
fake_vcs_client: FakeVCSClient,
|
|
189
189
|
review_comment_gateway: ReviewCommentGateway,
|
|
190
190
|
):
|
|
@@ -218,7 +218,7 @@ async def test_process_summary_comment_happy_path(
|
|
|
218
218
|
|
|
219
219
|
@pytest.mark.asyncio
|
|
220
220
|
async def test_process_summary_comment_error(
|
|
221
|
-
capsys,
|
|
221
|
+
capsys: pytest.CaptureFixture,
|
|
222
222
|
fake_vcs_client: FakeVCSClient,
|
|
223
223
|
review_comment_gateway: ReviewCommentGateway,
|
|
224
224
|
):
|
|
@@ -10,7 +10,7 @@ from ai_review.tests.fixtures.services.vcs import FakeVCSClient
|
|
|
10
10
|
|
|
11
11
|
@pytest.mark.asyncio
|
|
12
12
|
async def test_process_inline_reply_dry_run_logs_and_no_vcs_calls(
|
|
13
|
-
capsys,
|
|
13
|
+
capsys: pytest.CaptureFixture,
|
|
14
14
|
fake_vcs_client: FakeVCSClient,
|
|
15
15
|
review_dry_run_comment_gateway: ReviewDryRunCommentGateway
|
|
16
16
|
):
|
|
@@ -26,7 +26,7 @@ async def test_process_inline_reply_dry_run_logs_and_no_vcs_calls(
|
|
|
26
26
|
|
|
27
27
|
@pytest.mark.asyncio
|
|
28
28
|
async def test_process_summary_reply_dry_run_logs_and_no_vcs_calls(
|
|
29
|
-
capsys,
|
|
29
|
+
capsys: pytest.CaptureFixture,
|
|
30
30
|
fake_vcs_client: FakeVCSClient,
|
|
31
31
|
review_dry_run_comment_gateway: ReviewDryRunCommentGateway
|
|
32
32
|
):
|
|
@@ -42,7 +42,7 @@ async def test_process_summary_reply_dry_run_logs_and_no_vcs_calls(
|
|
|
42
42
|
|
|
43
43
|
@pytest.mark.asyncio
|
|
44
44
|
async def test_process_inline_comment_dry_run_logs_and_no_vcs_calls(
|
|
45
|
-
capsys,
|
|
45
|
+
capsys: pytest.CaptureFixture,
|
|
46
46
|
fake_vcs_client: FakeVCSClient,
|
|
47
47
|
review_dry_run_comment_gateway: ReviewDryRunCommentGateway
|
|
48
48
|
):
|
|
@@ -59,7 +59,7 @@ async def test_process_inline_comment_dry_run_logs_and_no_vcs_calls(
|
|
|
59
59
|
|
|
60
60
|
@pytest.mark.asyncio
|
|
61
61
|
async def test_process_summary_comment_dry_run_logs_and_no_vcs_calls(
|
|
62
|
-
capsys,
|
|
62
|
+
capsys: pytest.CaptureFixture,
|
|
63
63
|
fake_vcs_client: FakeVCSClient,
|
|
64
64
|
review_dry_run_comment_gateway: ReviewDryRunCommentGateway
|
|
65
65
|
):
|
|
@@ -75,7 +75,7 @@ async def test_process_summary_comment_dry_run_logs_and_no_vcs_calls(
|
|
|
75
75
|
|
|
76
76
|
@pytest.mark.asyncio
|
|
77
77
|
async def test_process_inline_comments_iterates_all(
|
|
78
|
-
capsys,
|
|
78
|
+
capsys: pytest.CaptureFixture,
|
|
79
79
|
fake_vcs_client: FakeVCSClient,
|
|
80
80
|
review_dry_run_comment_gateway: ReviewDryRunCommentGateway
|
|
81
81
|
):
|
|
@@ -27,7 +27,7 @@ async def test_ask_happy_path(
|
|
|
27
27
|
|
|
28
28
|
@pytest.mark.asyncio
|
|
29
29
|
async def test_ask_warns_on_empty_response(
|
|
30
|
-
capsys,
|
|
30
|
+
capsys: pytest.CaptureFixture,
|
|
31
31
|
review_llm_gateway: ReviewLLMGateway,
|
|
32
32
|
fake_llm_client: FakeLLMClient,
|
|
33
33
|
fake_cost_service: FakeCostService,
|
|
@@ -49,7 +49,7 @@ async def test_ask_warns_on_empty_response(
|
|
|
49
49
|
|
|
50
50
|
@pytest.mark.asyncio
|
|
51
51
|
async def test_ask_handles_llm_error(
|
|
52
|
-
capsys,
|
|
52
|
+
capsys: pytest.CaptureFixture,
|
|
53
53
|
fake_llm_client: FakeLLMClient,
|
|
54
54
|
review_llm_gateway: ReviewLLMGateway,
|
|
55
55
|
):
|
|
@@ -63,7 +63,7 @@ async def test_run_summary_reply_review_invokes_runner(
|
|
|
63
63
|
|
|
64
64
|
|
|
65
65
|
def test_report_total_cost_with_data(
|
|
66
|
-
capsys,
|
|
66
|
+
capsys: pytest.CaptureFixture,
|
|
67
67
|
review_service: ReviewService,
|
|
68
68
|
fake_cost_service: FakeCostService
|
|
69
69
|
):
|
|
@@ -87,7 +87,7 @@ def test_report_total_cost_with_data(
|
|
|
87
87
|
assert "0.006" in output
|
|
88
88
|
|
|
89
89
|
|
|
90
|
-
def test_report_total_cost_no_data(capsys, review_service: ReviewService):
|
|
90
|
+
def test_report_total_cost_no_data(capsys: pytest.CaptureFixture, review_service: ReviewService):
|
|
91
91
|
"""Should log message when no cost data is available."""
|
|
92
92
|
review_service.report_total_cost()
|
|
93
93
|
output = capsys.readouterr().out
|