mito-ai 0.1.32__py3-none-any.whl → 0.1.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mito-ai might be problematic. Click here for more details.

Files changed (58) hide show
  1. mito_ai/_version.py +1 -1
  2. mito_ai/anthropic_client.py +52 -54
  3. mito_ai/app_builder/handlers.py +2 -4
  4. mito_ai/completions/models.py +15 -1
  5. mito_ai/completions/prompt_builders/agent_system_message.py +10 -2
  6. mito_ai/completions/providers.py +79 -39
  7. mito_ai/constants.py +11 -24
  8. mito_ai/gemini_client.py +44 -48
  9. mito_ai/openai_client.py +30 -44
  10. mito_ai/tests/message_history/test_generate_short_chat_name.py +0 -4
  11. mito_ai/tests/open_ai_utils_test.py +18 -22
  12. mito_ai/tests/{test_anthropic_client.py → providers/test_anthropic_client.py} +37 -32
  13. mito_ai/tests/providers/test_azure.py +2 -6
  14. mito_ai/tests/providers/test_capabilities.py +120 -0
  15. mito_ai/tests/{test_gemini_client.py → providers/test_gemini_client.py} +40 -36
  16. mito_ai/tests/providers/test_mito_server_utils.py +448 -0
  17. mito_ai/tests/providers/test_model_resolution.py +130 -0
  18. mito_ai/tests/providers/test_openai_client.py +57 -0
  19. mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
  20. mito_ai/tests/providers/test_provider_limits.py +42 -0
  21. mito_ai/tests/providers/test_providers.py +382 -0
  22. mito_ai/tests/providers/test_retry_logic.py +389 -0
  23. mito_ai/tests/providers/utils.py +85 -0
  24. mito_ai/tests/test_constants.py +15 -2
  25. mito_ai/tests/test_telemetry.py +12 -0
  26. mito_ai/utils/anthropic_utils.py +21 -29
  27. mito_ai/utils/gemini_utils.py +18 -22
  28. mito_ai/utils/mito_server_utils.py +92 -0
  29. mito_ai/utils/open_ai_utils.py +22 -46
  30. mito_ai/utils/provider_utils.py +49 -0
  31. mito_ai/utils/telemetry_utils.py +11 -1
  32. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +1 -1
  33. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  34. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  35. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.42b54cf8f038cc526980.js → mito_ai-0.1.34.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a20772bc113422d0f505.js +785 -351
  36. mito_ai-0.1.34.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a20772bc113422d0f505.js.map +1 -0
  37. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.a711c58b58423173bd24.js → mito_ai-0.1.34.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.51d07439b02aaa830975.js +13 -16
  38. mito_ai-0.1.34.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.51d07439b02aaa830975.js.map +1 -0
  39. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js → mito_ai-0.1.34.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.76efcc5c3be4056457ee.js +6 -2
  40. mito_ai-0.1.34.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.76efcc5c3be4056457ee.js.map +1 -0
  41. {mito_ai-0.1.32.dist-info → mito_ai-0.1.34.dist-info}/METADATA +1 -1
  42. {mito_ai-0.1.32.dist-info → mito_ai-0.1.34.dist-info}/RECORD +52 -43
  43. mito_ai/tests/providers_test.py +0 -438
  44. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.42b54cf8f038cc526980.js.map +0 -1
  45. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.a711c58b58423173bd24.js.map +0 -1
  46. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js.map +0 -1
  47. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js +0 -7842
  48. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js.map +0 -1
  49. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  50. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  51. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  52. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js +0 -0
  53. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -0
  54. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  55. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  56. {mito_ai-0.1.32.dist-info → mito_ai-0.1.34.dist-info}/WHEEL +0 -0
  57. {mito_ai-0.1.32.dist-info → mito_ai-0.1.34.dist-info}/entry_points.txt +0 -0
  58. {mito_ai-0.1.32.dist-info → mito_ai-0.1.34.dist-info}/licenses/LICENSE +0 -0
@@ -1,438 +0,0 @@
1
- # Copyright (c) Saga Inc.
2
- # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
-
4
- from __future__ import annotations
5
- from datetime import datetime
6
- from typing import Any, List
7
- from unittest.mock import patch, MagicMock, AsyncMock
8
-
9
- import pytest
10
- from traitlets.config import Config
11
- from mito_ai.completions.providers import OpenAIProvider
12
- from mito_ai.completions.models import (
13
- MessageType,
14
- AICapabilities,
15
- CompletionReply
16
- )
17
- from mito_ai.completions.providers import OpenAIProvider
18
- from mito_ai.completions.models import MessageType, AICapabilities
19
- from mito_ai.utils.server_limits import OS_MONTHLY_AI_COMPLETIONS_LIMIT
20
- from openai.types.chat import ChatCompletionMessageParam
21
-
22
- REALLY_OLD_DATE = "2020-01-01"
23
- TODAY = datetime.now().strftime("%Y-%m-%d")
24
- FAKE_API_KEY = "sk-1234567890"
25
-
26
- @pytest.fixture
27
- def provider_config() -> Config:
28
- """Create a proper Config object for the OpenAIProvider."""
29
- config = Config()
30
- config.OpenAIProvider = Config()
31
- config.OpenAIClient = Config()
32
- return config
33
-
34
- @pytest.fixture(autouse=True)
35
- def reset_env_vars(monkeypatch: pytest.MonkeyPatch) -> None:
36
- for var in [
37
- "OPENAI_API_KEY", "CLAUDE_API_KEY",
38
- "GEMINI_API_KEY", "OLLAMA_MODEL",
39
- "AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_MODEL"
40
- ]:
41
- monkeypatch.delenv(var, raising=False)
42
-
43
-
44
- def patch_server_limits(is_pro: bool = False, completion_count: int = 1, first_date: str = TODAY) -> Any:
45
- return patch.multiple(
46
- "mito_ai.utils.server_limits",
47
- get_chat_completion_count=MagicMock(return_value=completion_count),
48
- get_first_completion_date=MagicMock(return_value=first_date),
49
- is_pro=MagicMock(return_value=is_pro),
50
- check_mito_server_quota=MagicMock(return_value=None)
51
- )
52
-
53
-
54
- def patch_openai_model_list() -> Any:
55
- mock_openai_instance = MagicMock()
56
- mock_openai_instance.models.list.return_value = [MagicMock(id="gpt-4o-mini")]
57
-
58
- # Patch the constructor call to return your mock instance
59
- return patch("openai.OpenAI", return_value=mock_openai_instance)
60
-
61
-
62
- def mock_openai_client() -> Any:
63
- """Mock the OpenAI client with user key capabilities."""
64
- mock_client = MagicMock()
65
- mock_client.capabilities = AICapabilities(
66
- configuration={"model": "gpt-4o-mini"},
67
- provider="OpenAI with user key",
68
- type="ai_capabilities"
69
- )
70
- mock_client.key_type = "user"
71
- mock_client.request_completions = AsyncMock(return_value="Test completion")
72
- mock_client.stream_completions = AsyncMock(return_value="Test completion")
73
- return patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client)
74
-
75
-
76
- def mock_gemini_client() -> Any:
77
- """Mock the Gemini client capabilities."""
78
- mock_client = MagicMock()
79
- mock_client.capabilities = AICapabilities(
80
- configuration={"model": "gemini-2-pro"},
81
- provider="Gemini",
82
- type="ai_capabilities"
83
- )
84
- mock_client.key_type = "gemini"
85
- mock_client.request_completions = AsyncMock(return_value="Test completion")
86
- mock_client.stream_completions = AsyncMock(return_value="Test completion")
87
- return patch("mito_ai.completions.providers.GeminiClient", return_value=mock_client)
88
-
89
-
90
- def mock_azure_openai_client() -> Any:
91
- """Mock the Azure OpenAI client capabilities."""
92
- mock_client = MagicMock()
93
- mock_client.capabilities = AICapabilities(
94
- configuration={"model": "gpt-4o"},
95
- provider="Azure OpenAI",
96
- type="ai_capabilities"
97
- )
98
- mock_client.key_type = "azure"
99
- mock_client.request_completions = AsyncMock(return_value="Test completion")
100
- mock_client.stream_completions = AsyncMock(return_value="Test completion")
101
- return patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client)
102
-
103
-
104
-
105
- def mock_claude_client() -> Any:
106
- """Mock the Claude client capabilities."""
107
- mock_client = MagicMock()
108
- mock_client.capabilities = AICapabilities(
109
- configuration={"model": "claude-3-opus-20240229"},
110
- provider="Claude",
111
- type="ai_capabilities"
112
- )
113
- mock_client.key_type = "claude"
114
- mock_client.request_completions = AsyncMock(return_value="Test completion")
115
- mock_client.stream_completions = AsyncMock(return_value="Test completion")
116
- mock_client.stream_response = AsyncMock(return_value="Test completion")
117
- return patch("mito_ai.completions.providers.AnthropicClient", return_value=mock_client)
118
-
119
-
120
- def test_os_user_openai_key_set_below_limit(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
121
- monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
122
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
123
-
124
- with (
125
- patch_server_limits(is_pro=False, completion_count=1),
126
- mock_openai_client()
127
- ):
128
- llm = OpenAIProvider(config=provider_config)
129
- capabilities = llm.capabilities
130
- assert "user key" in capabilities.provider
131
- assert llm.last_error is None
132
-
133
-
134
- def test_os_user_openai_key_set_above_limit(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
135
- monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
136
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
137
-
138
- with (
139
- patch_server_limits(is_pro=False, completion_count=OS_MONTHLY_AI_COMPLETIONS_LIMIT + 1),
140
- mock_openai_client()
141
- ):
142
- llm = OpenAIProvider(config=provider_config)
143
- capabilities = llm.capabilities
144
- assert "user key" in capabilities.provider
145
- assert llm.last_error is None
146
-
147
-
148
- def test_pro_user_openai_key_set_below_limit(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
149
- monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
150
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
151
-
152
- with (
153
- patch_server_limits(is_pro=True, completion_count=1),
154
- mock_openai_client()
155
- ):
156
- llm = OpenAIProvider(config=provider_config)
157
- capabilities = llm.capabilities
158
- assert "user key" in capabilities.provider
159
- assert llm.last_error is None
160
-
161
-
162
- def test_pro_user_openai_key_set_above_limit(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
163
- monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
164
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
165
-
166
- with (
167
- patch_server_limits(is_pro=True, completion_count=OS_MONTHLY_AI_COMPLETIONS_LIMIT + 1),
168
- mock_openai_client()
169
- ):
170
- llm = OpenAIProvider(config=provider_config)
171
- capabilities = llm.capabilities
172
- assert "user key" in capabilities.provider
173
- assert llm.last_error is None
174
-
175
-
176
- def test_gemini_provider(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
177
- monkeypatch.setenv("GEMINI_API_KEY", "gemini-key")
178
- monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", "gemini-key")
179
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
180
-
181
- with mock_gemini_client():
182
- llm = OpenAIProvider(config=provider_config)
183
- capabilities = llm.capabilities
184
- assert capabilities.provider == "Gemini"
185
-
186
-
187
- def test_azure_openai_provider(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
188
- monkeypatch.setattr("mito_ai.enterprise.utils.is_enterprise", lambda: True)
189
- monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: True)
190
- monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_API_KEY", FAKE_API_KEY)
191
- monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_ENDPOINT", "https://example.com")
192
- monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_MODEL", "gpt-4o")
193
- monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_API_VERSION", "2024-12-01-preview")
194
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
195
-
196
- with mock_azure_openai_client():
197
- llm = OpenAIProvider(config=provider_config)
198
- capabilities = llm.capabilities
199
- assert capabilities.provider == "Azure OpenAI"
200
-
201
-
202
- def test_claude_provider(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
203
- monkeypatch.setenv("CLAUDE_API_KEY", "claude-key")
204
- monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "claude-key")
205
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
206
-
207
- with mock_claude_client():
208
- llm = OpenAIProvider(config=provider_config)
209
- capabilities = llm.capabilities
210
- assert capabilities.provider == "Claude"
211
-
212
-
213
- def test_provider_priority_order(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
214
- # Set up all possible providers
215
- monkeypatch.setattr("mito_ai.enterprise.utils.is_enterprise", lambda: True)
216
- monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: True)
217
- monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_API_KEY", FAKE_API_KEY)
218
- monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_ENDPOINT", "https://example.com")
219
- monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_MODEL", "gpt-4o")
220
- monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_API_VERSION", "2024-12-01-preview")
221
- monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
222
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
223
- monkeypatch.setenv("CLAUDE_API_KEY", "claude-key")
224
- monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "claude-key")
225
-
226
- # Azure OpenAI should have highest priority when enterprise is enabled
227
- # Clear other provider settings to ensure Azure OpenAI is selected
228
- monkeypatch.delenv("GEMINI_API_KEY", raising=False)
229
- monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", None)
230
- # Clear Claude settings to ensure Azure OpenAI is selected
231
- monkeypatch.delenv("CLAUDE_API_KEY", raising=False)
232
- monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", None)
233
- with mock_azure_openai_client():
234
- llm = OpenAIProvider(config=provider_config)
235
- capabilities = llm.capabilities
236
- assert capabilities.provider == "Azure OpenAI"
237
-
238
- # Without enterprise, OpenAI should have highest priority
239
- monkeypatch.setattr("mito_ai.enterprise.utils.is_enterprise", lambda: False)
240
- monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: False)
241
- with mock_openai_client():
242
- llm = OpenAIProvider(config=provider_config)
243
- capabilities = llm.capabilities
244
- assert capabilities.provider == "OpenAI with user key"
245
-
246
- # Without OpenAI key, Claude should be used (higher priority than Gemini)
247
- monkeypatch.delenv("OPENAI_API_KEY")
248
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
249
- # Ensure provider_config doesn't have an api_key set
250
- provider_config.OpenAIProvider.api_key = None
251
- # Re-enable Claude settings
252
- monkeypatch.setenv("CLAUDE_API_KEY", "claude-key")
253
- monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "claude-key")
254
- with mock_claude_client():
255
- llm = OpenAIProvider(config=provider_config)
256
- capabilities = llm.capabilities
257
- assert capabilities.provider == "Claude"
258
-
259
-
260
- @pytest.mark.asyncio
261
- async def test_completion_request(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
262
- monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
263
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
264
-
265
- mock_client = MagicMock()
266
- mock_client.capabilities = AICapabilities(
267
- configuration={"model": "gpt-4o-mini"},
268
- provider="OpenAI with user key",
269
- type="ai_capabilities"
270
- )
271
- mock_client.key_type = "user"
272
- mock_client.request_completions = AsyncMock(return_value="Test completion")
273
- mock_client.stream_completions = AsyncMock(return_value="Test completion")
274
-
275
- with patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client):
276
- llm = OpenAIProvider(config=provider_config)
277
- messages: List[ChatCompletionMessageParam] = [
278
- {"role": "user", "content": "Test message"}
279
- ]
280
-
281
- completion = await llm.request_completions(
282
- message_type=MessageType.CHAT,
283
- messages=messages,
284
- model="gpt-4o-mini"
285
- )
286
-
287
- assert completion == "Test completion"
288
- mock_client.request_completions.assert_called_once()
289
-
290
-
291
- @pytest.mark.asyncio
292
- async def test_stream_completion(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
293
- monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
294
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
295
-
296
- mock_client = MagicMock()
297
- mock_client.capabilities = AICapabilities(
298
- configuration={"model": "gpt-4o-mini"},
299
- provider="OpenAI with user key",
300
- type="ai_capabilities"
301
- )
302
- mock_client.key_type = "user"
303
- mock_client.request_completions = AsyncMock(return_value="Test completion")
304
- mock_client.stream_completions = AsyncMock(return_value="Test completion")
305
-
306
- with patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client):
307
- llm = OpenAIProvider(config=provider_config)
308
- messages: List[ChatCompletionMessageParam] = [
309
- {"role": "user", "content": "Test message"}
310
- ]
311
-
312
- reply_chunks = []
313
- def mock_reply(chunk):
314
- reply_chunks.append(chunk)
315
-
316
- completion = await llm.stream_completions(
317
- message_type=MessageType.CHAT,
318
- messages=messages,
319
- model="gpt-4o-mini",
320
- message_id="test-id",
321
- thread_id="test-thread",
322
- reply_fn=mock_reply
323
- )
324
-
325
- assert completion == "Test completion"
326
- mock_client.stream_completions.assert_called_once()
327
- assert len(reply_chunks) > 0
328
- assert isinstance(reply_chunks[0], CompletionReply)
329
-
330
-
331
- def test_error_handling(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
332
- monkeypatch.setenv("OPENAI_API_KEY", "invalid-key")
333
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", "invalid-key")
334
- mock_client = MagicMock()
335
- mock_client.capabilities = AICapabilities(
336
- configuration={"model": "gpt-4o-mini"},
337
- provider="OpenAI with user key",
338
- type="ai_capabilities"
339
- )
340
- mock_client.key_type = "user"
341
- mock_client.request_completions.side_effect = Exception("API error")
342
-
343
- with patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client):
344
- llm = OpenAIProvider(config=provider_config)
345
- assert llm.last_error is None # Error should be None until a request is made
346
-
347
-
348
- @pytest.mark.asyncio
349
- async def test_claude_completion_request(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
350
- monkeypatch.setenv("CLAUDE_API_KEY", "claude-key")
351
- monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "claude-key")
352
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
353
-
354
- mock_client = MagicMock()
355
- mock_client.capabilities = AICapabilities(
356
- configuration={"model": "claude-3-opus-20240229"},
357
- provider="Claude",
358
- type="ai_capabilities"
359
- )
360
- mock_client.key_type = "claude"
361
- mock_client.request_completions = AsyncMock(return_value="Test completion")
362
- mock_client.stream_completions = AsyncMock(return_value="Test completion")
363
-
364
- with patch("mito_ai.completions.providers.AnthropicClient", return_value=mock_client):
365
- llm = OpenAIProvider(config=provider_config)
366
- messages: List[ChatCompletionMessageParam] = [
367
- {"role": "user", "content": "Test message"}
368
- ]
369
-
370
- completion = await llm.request_completions(
371
- message_type=MessageType.CHAT,
372
- messages=messages,
373
- model="claude-3-opus-20240229"
374
- )
375
-
376
- assert completion == "Test completion"
377
- mock_client.request_completions.assert_called_once()
378
-
379
-
380
- @pytest.mark.asyncio
381
- async def test_claude_stream_completion(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
382
- monkeypatch.setenv("CLAUDE_API_KEY", "claude-key")
383
- monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "claude-key")
384
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
385
-
386
- mock_client = MagicMock()
387
- mock_client.capabilities = AICapabilities(
388
- configuration={"model": "claude-3-opus-20240229"},
389
- provider="Claude",
390
- type="ai_capabilities"
391
- )
392
- mock_client.key_type = "claude"
393
- mock_client.request_completions = AsyncMock(return_value="Test completion")
394
- mock_client.stream_completions = AsyncMock(return_value="Test completion")
395
- mock_client.stream_response = AsyncMock(return_value="Test completion")
396
-
397
- with patch("mito_ai.completions.providers.AnthropicClient", return_value=mock_client):
398
- llm = OpenAIProvider(config=provider_config)
399
- messages: List[ChatCompletionMessageParam] = [
400
- {"role": "user", "content": "Test message"}
401
- ]
402
-
403
- reply_chunks = []
404
- def mock_reply(chunk):
405
- reply_chunks.append(chunk)
406
-
407
- completion = await llm.stream_completions(
408
- message_type=MessageType.CHAT,
409
- messages=messages,
410
- model="claude-3-opus-20240229",
411
- message_id="test-id",
412
- thread_id="test-thread",
413
- reply_fn=mock_reply
414
- )
415
-
416
- assert completion == "Test completion"
417
- mock_client.stream_response.assert_called_once()
418
- assert len(reply_chunks) > 0
419
- assert isinstance(reply_chunks[0], CompletionReply)
420
-
421
-
422
- def test_claude_error_handling(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
423
- monkeypatch.setenv("CLAUDE_API_KEY", "invalid-key")
424
- monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "invalid-key")
425
- monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
426
-
427
- mock_client = MagicMock()
428
- mock_client.capabilities = AICapabilities(
429
- configuration={"model": "claude-3-opus-20240229"},
430
- provider="Claude",
431
- type="ai_capabilities"
432
- )
433
- mock_client.key_type = "claude"
434
- mock_client.request_completions.side_effect = Exception("API error")
435
-
436
- with patch("mito_ai.completions.providers.AnthropicClient", return_value=mock_client):
437
- llm = OpenAIProvider(config=provider_config)
438
- assert llm.last_error is None # Error should be None until a request is made