mito-ai 0.1.33__py3-none-any.whl → 0.1.35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mito-ai might be problematic. Click here for more details.

Files changed (58) hide show
  1. mito_ai/_version.py +1 -1
  2. mito_ai/anthropic_client.py +52 -54
  3. mito_ai/app_builder/handlers.py +2 -4
  4. mito_ai/completions/models.py +15 -1
  5. mito_ai/completions/prompt_builders/agent_system_message.py +10 -2
  6. mito_ai/completions/providers.py +79 -39
  7. mito_ai/constants.py +11 -24
  8. mito_ai/gemini_client.py +44 -48
  9. mito_ai/openai_client.py +30 -44
  10. mito_ai/tests/message_history/test_generate_short_chat_name.py +0 -4
  11. mito_ai/tests/open_ai_utils_test.py +18 -22
  12. mito_ai/tests/{test_anthropic_client.py → providers/test_anthropic_client.py} +37 -32
  13. mito_ai/tests/providers/test_azure.py +2 -6
  14. mito_ai/tests/providers/test_capabilities.py +120 -0
  15. mito_ai/tests/{test_gemini_client.py → providers/test_gemini_client.py} +40 -36
  16. mito_ai/tests/providers/test_mito_server_utils.py +448 -0
  17. mito_ai/tests/providers/test_model_resolution.py +130 -0
  18. mito_ai/tests/providers/test_openai_client.py +57 -0
  19. mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
  20. mito_ai/tests/providers/test_provider_limits.py +42 -0
  21. mito_ai/tests/providers/test_providers.py +382 -0
  22. mito_ai/tests/providers/test_retry_logic.py +389 -0
  23. mito_ai/tests/providers/utils.py +85 -0
  24. mito_ai/tests/test_constants.py +15 -2
  25. mito_ai/tests/test_telemetry.py +12 -0
  26. mito_ai/utils/anthropic_utils.py +21 -29
  27. mito_ai/utils/gemini_utils.py +18 -22
  28. mito_ai/utils/mito_server_utils.py +92 -0
  29. mito_ai/utils/open_ai_utils.py +22 -46
  30. mito_ai/utils/provider_utils.py +49 -0
  31. mito_ai/utils/telemetry_utils.py +11 -1
  32. {mito_ai-0.1.33.data → mito_ai-0.1.35.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +1 -1
  33. {mito_ai-0.1.33.data → mito_ai-0.1.35.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  34. {mito_ai-0.1.33.data → mito_ai-0.1.35.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  35. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js → mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a20772bc113422d0f505.js +737 -319
  36. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a20772bc113422d0f505.js.map +1 -0
  37. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js → mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.d2eea6519fa332d79efb.js +13 -16
  38. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.d2eea6519fa332d79efb.js.map +1 -0
  39. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js → mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.76efcc5c3be4056457ee.js +6 -2
  40. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.76efcc5c3be4056457ee.js.map +1 -0
  41. {mito_ai-0.1.33.dist-info → mito_ai-0.1.35.dist-info}/METADATA +1 -1
  42. {mito_ai-0.1.33.dist-info → mito_ai-0.1.35.dist-info}/RECORD +52 -43
  43. mito_ai/tests/providers_test.py +0 -438
  44. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js.map +0 -1
  45. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js.map +0 -1
  46. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js.map +0 -1
  47. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js +0 -7842
  48. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js.map +0 -1
  49. {mito_ai-0.1.33.data → mito_ai-0.1.35.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  50. {mito_ai-0.1.33.data → mito_ai-0.1.35.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  51. {mito_ai-0.1.33.data → mito_ai-0.1.35.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  52. {mito_ai-0.1.33.data → mito_ai-0.1.35.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js +0 -0
  53. {mito_ai-0.1.33.data → mito_ai-0.1.35.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -0
  54. {mito_ai-0.1.33.data → mito_ai-0.1.35.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  55. {mito_ai-0.1.33.data → mito_ai-0.1.35.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  56. {mito_ai-0.1.33.dist-info → mito_ai-0.1.35.dist-info}/WHEEL +0 -0
  57. {mito_ai-0.1.33.dist-info → mito_ai-0.1.35.dist-info}/entry_points.txt +0 -0
  58. {mito_ai-0.1.33.dist-info → mito_ai-0.1.35.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,389 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import pytest
5
+ import asyncio
6
+ from unittest.mock import AsyncMock, MagicMock, patch
7
+ from mito_ai.completions.providers import OpenAIProvider
8
+ from mito_ai.completions.models import MessageType, CompletionError
9
+ from mito_ai.utils.mito_server_utils import ProviderCompletionException
10
+ from mito_ai.tests.providers.utils import mock_openai_client, patch_server_limits
11
+ from traitlets.config import Config
12
+
13
+ FAKE_API_KEY = "sk-1234567890"
14
+
15
+ @pytest.fixture
16
+ def provider_config() -> Config:
17
+ """Create a proper Config object for the OpenAIProvider."""
18
+ config = Config()
19
+ config.OpenAIProvider = Config()
20
+ config.OpenAIClient = Config()
21
+ return config
22
+
23
+ @pytest.fixture
24
+ def mock_messages():
25
+ """Sample messages for testing."""
26
+ return [{"role": "user", "content": "Test message"}]
27
+
28
+ @pytest.fixture
29
+ def mock_sleep():
30
+ """Mock asyncio.sleep to avoid delays in tests."""
31
+ with patch("asyncio.sleep", new_callable=AsyncMock) as mock:
32
+ yield mock
33
+
34
+ class TestRetryLogic:
35
+ """Test retry logic in OpenAIProvider.request_completions."""
36
+
37
+ @pytest.mark.parametrize("attempts_before_success,max_retries,expected_call_count", [
38
+ (0, 3, 1), # Success on first try
39
+ (1, 3, 2), # Success after 1 retry
40
+ (2, 3, 3), # Success after 2 retries
41
+ (3, 3, 4), # Success on final try
42
+ ])
43
+ @pytest.mark.asyncio
44
+ async def test_success_after_retries(
45
+ self,
46
+ attempts_before_success: int,
47
+ max_retries: int,
48
+ expected_call_count: int,
49
+ provider_config: Config,
50
+ mock_messages,
51
+ mock_sleep,
52
+ monkeypatch: pytest.MonkeyPatch
53
+ ):
54
+ """Test that request_completions succeeds after a certain number of retries."""
55
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
56
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
57
+
58
+ # Create side effect: fail N times, then succeed
59
+ side_effects = [Exception("Test error")] * attempts_before_success + ["Success!"]
60
+
61
+ with (
62
+ patch_server_limits(),
63
+ mock_openai_client() as mock_client
64
+ ):
65
+ mock_client.return_value.request_completions = AsyncMock(side_effect=side_effects)
66
+
67
+ provider = OpenAIProvider(config=provider_config)
68
+
69
+ # Test successful completion
70
+ result = await provider.request_completions(
71
+ message_type=MessageType.CHAT,
72
+ messages=mock_messages,
73
+ model="gpt-4o-mini",
74
+ max_retries=max_retries
75
+ )
76
+
77
+ assert result == "Success!"
78
+ assert mock_client.return_value.request_completions.call_count == expected_call_count
79
+ assert provider.last_error is None # Should be reset on success
80
+
81
+ # Verify sleep was called for retries
82
+ assert mock_sleep.call_count == attempts_before_success
83
+
84
+ @pytest.mark.parametrize("exception_type,max_retries,expected_call_count", [
85
+ (ProviderCompletionException, 3, 4), # Retry ProviderCompletionException
86
+ (Exception, 3, 4), # Retry generic Exception
87
+ (RuntimeError, 3, 4), # Retry RuntimeError
88
+ (ValueError, 2, 3), # Retry ValueError with different max_retries
89
+ ])
90
+ @pytest.mark.asyncio
91
+ async def test_failure_after_all_retries(
92
+ self,
93
+ exception_type,
94
+ max_retries: int,
95
+ expected_call_count: int,
96
+ provider_config: Config,
97
+ mock_messages,
98
+ mock_sleep,
99
+ monkeypatch: pytest.MonkeyPatch
100
+ ):
101
+ """Test that request_completions fails after exhausting all retries."""
102
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
103
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
104
+
105
+ # Create exception instance
106
+ if exception_type == ProviderCompletionException:
107
+ test_exception = ProviderCompletionException("Test provider error")
108
+ else:
109
+ test_exception = exception_type("Test error")
110
+
111
+ with (
112
+ patch_server_limits(),
113
+ mock_openai_client() as mock_client
114
+ ):
115
+ mock_client.return_value.request_completions = AsyncMock(side_effect=test_exception)
116
+
117
+ provider = OpenAIProvider(config=provider_config)
118
+
119
+ # Test failure after all retries
120
+ with pytest.raises(exception_type):
121
+ await provider.request_completions(
122
+ message_type=MessageType.CHAT,
123
+ messages=mock_messages,
124
+ model="gpt-4o-mini",
125
+ max_retries=max_retries
126
+ )
127
+
128
+ assert mock_client.return_value.request_completions.call_count == expected_call_count
129
+ assert provider.last_error is not None # Should be set on final failure
130
+ assert isinstance(provider.last_error, CompletionError)
131
+
132
+ # Verify sleep was called for retries (max_retries times)
133
+ assert mock_sleep.call_count == max_retries
134
+
135
+ @pytest.mark.asyncio
136
+ async def test_mixed_exception_types(
137
+ self,
138
+ provider_config: Config,
139
+ mock_messages,
140
+ mock_sleep,
141
+ monkeypatch: pytest.MonkeyPatch
142
+ ):
143
+ """Test handling of different exception types across retry attempts."""
144
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
145
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
146
+
147
+ # Different exceptions on different attempts
148
+ side_effects = [
149
+ Exception("Generic error"),
150
+ ProviderCompletionException("Provider error"),
151
+ RuntimeError("Runtime error"),
152
+ "Success!"
153
+ ]
154
+
155
+ with (
156
+ patch_server_limits(),
157
+ mock_openai_client() as mock_client
158
+ ):
159
+ mock_client.return_value.request_completions = AsyncMock(side_effect=side_effects)
160
+
161
+ provider = OpenAIProvider(config=provider_config)
162
+
163
+ # Should succeed after 3 retries with mixed exceptions
164
+ result = await provider.request_completions(
165
+ message_type=MessageType.CHAT,
166
+ messages=mock_messages,
167
+ model="gpt-4o-mini",
168
+ max_retries=3
169
+ )
170
+
171
+ assert result == "Success!"
172
+ assert mock_client.return_value.request_completions.call_count == 4
173
+ assert provider.last_error is None # Should be reset on success
174
+ assert mock_sleep.call_count == 3 # Called for each retry
175
+
176
+ @pytest.mark.asyncio
177
+ async def test_last_error_reset_on_success(
178
+ self,
179
+ provider_config: Config,
180
+ mock_messages,
181
+ mock_sleep,
182
+ monkeypatch: pytest.MonkeyPatch
183
+ ):
184
+ """Test that last_error is reset when request succeeds after previous failure."""
185
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
186
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
187
+
188
+ with (
189
+ patch_server_limits(),
190
+ mock_openai_client() as mock_client
191
+ ):
192
+ provider = OpenAIProvider(config=provider_config)
193
+
194
+ # First request fails to set an error
195
+ mock_client.return_value.request_completions = AsyncMock(side_effect=Exception("First error"))
196
+
197
+ with pytest.raises(Exception):
198
+ await provider.request_completions(
199
+ message_type=MessageType.CHAT,
200
+ messages=mock_messages,
201
+ model="gpt-4o-mini",
202
+ max_retries=0 # No retries to fail quickly
203
+ )
204
+
205
+ # Verify error was set
206
+ assert provider.last_error is not None
207
+
208
+ # Second request succeeds
209
+ mock_client.return_value.request_completions = AsyncMock(return_value="Success!")
210
+
211
+ result = await provider.request_completions(
212
+ message_type=MessageType.CHAT,
213
+ messages=mock_messages,
214
+ model="gpt-4o-mini",
215
+ max_retries=3
216
+ )
217
+
218
+ assert result == "Success!"
219
+ assert provider.last_error is None # Should be reset
220
+
221
+ @pytest.mark.asyncio
222
+ async def test_no_retries_when_max_retries_zero(
223
+ self,
224
+ provider_config: Config,
225
+ mock_messages,
226
+ mock_sleep,
227
+ monkeypatch: pytest.MonkeyPatch
228
+ ):
229
+ """Test that no retries happen when max_retries=0."""
230
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
231
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
232
+
233
+ with (
234
+ patch_server_limits(),
235
+ mock_openai_client() as mock_client
236
+ ):
237
+ mock_client.return_value.request_completions = AsyncMock(side_effect=Exception("Test error"))
238
+
239
+ provider = OpenAIProvider(config=provider_config)
240
+
241
+ # Should fail immediately with no retries
242
+ with pytest.raises(Exception):
243
+ await provider.request_completions(
244
+ message_type=MessageType.CHAT,
245
+ messages=mock_messages,
246
+ model="gpt-4o-mini",
247
+ max_retries=0
248
+ )
249
+
250
+ assert mock_client.return_value.request_completions.call_count == 1
251
+ assert mock_sleep.call_count == 0 # No retries, no sleep calls
252
+ assert provider.last_error is not None
253
+
254
+ @pytest.mark.asyncio
255
+ async def test_provider_completion_exception_details(
256
+ self,
257
+ provider_config: Config,
258
+ mock_messages,
259
+ mock_sleep,
260
+ monkeypatch: pytest.MonkeyPatch
261
+ ):
262
+ """Test that ProviderCompletionException details are preserved in last_error."""
263
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
264
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
265
+
266
+ error_message = "Server returned an error: Rate limit exceeded"
267
+ provider_exception = ProviderCompletionException(error_message)
268
+
269
+ with (
270
+ patch_server_limits(),
271
+ mock_openai_client() as mock_client
272
+ ):
273
+ mock_client.return_value.request_completions = AsyncMock(side_effect=provider_exception)
274
+
275
+ provider = OpenAIProvider(config=provider_config)
276
+
277
+ # Should fail after retries
278
+ with pytest.raises(ProviderCompletionException):
279
+ await provider.request_completions(
280
+ message_type=MessageType.CHAT,
281
+ messages=mock_messages,
282
+ model="gpt-4o-mini",
283
+ max_retries=2
284
+ )
285
+
286
+ assert provider.last_error is not None
287
+ assert isinstance(provider.last_error, CompletionError)
288
+ assert error_message in provider.last_error.title
289
+
290
+ @pytest.mark.parametrize("scenario,side_effects,max_retries,expected_retry_logs,expected_error_logs,expected_success_logs", [
291
+ # Scenario 1: Success on first try
292
+ ("immediate_success", ["Success!"], 3, 0, 0, 1),
293
+
294
+ # Scenario 2: Fail once, then succeed
295
+ ("retry_then_success", [Exception("Test error"), "Success!"], 3, 1, 0, 1),
296
+
297
+ # Scenario 3: Fail twice, then succeed
298
+ ("retry_twice_then_success", [Exception("Test error"), Exception("Test error"), "Success!"], 3, 2, 0, 1),
299
+
300
+ # Scenario 4: Fail and never succeed (1 retry)
301
+ ("fail_after_one_retry", [Exception("Test error"), Exception("Test error")], 1, 1, 1, 0),
302
+
303
+ # Scenario 5: Fail and never succeed (2 retries)
304
+ ("fail_after_two_retries", [Exception("Test error"), Exception("Test error"), Exception("Test error")], 2, 2, 1, 0),
305
+
306
+ # Scenario 6: Fail and never succeed (3 retries)
307
+ ("fail_after_three_retries", [Exception("Test error"), Exception("Test error"), Exception("Test error"), Exception("Test error")], 3, 3, 1, 0),
308
+ ])
309
+ @pytest.mark.asyncio
310
+ async def test_logging_functions_comprehensive(
311
+ self,
312
+ scenario: str,
313
+ side_effects: list,
314
+ max_retries: int,
315
+ expected_retry_logs: int,
316
+ expected_error_logs: int,
317
+ expected_success_logs: int,
318
+ provider_config: Config,
319
+ mock_messages,
320
+ mock_sleep,
321
+ monkeypatch: pytest.MonkeyPatch,
322
+ capsys
323
+ ):
324
+ """Test comprehensive logging behavior for all retry scenarios."""
325
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
326
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
327
+
328
+ # Clear other API keys to ensure OpenAI path is used
329
+ monkeypatch.delenv("CLAUDE_API_KEY", raising=False)
330
+ monkeypatch.delenv("GEMINI_API_KEY", raising=False)
331
+ monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", None)
332
+ monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", None)
333
+
334
+ # Enable print logs to capture telemetry output
335
+ monkeypatch.setattr("mito_ai.utils.telemetry_utils.PRINT_LOGS", True)
336
+
337
+ with patch_server_limits():
338
+ # Create a mock OpenAI client that will be used by the provider
339
+ mock_client = MagicMock()
340
+ mock_client.request_completions = AsyncMock(side_effect=side_effects)
341
+ mock_client.key_type = "user"
342
+
343
+ # Create the provider and set the mock client
344
+ provider = OpenAIProvider(config=provider_config)
345
+ provider._openai_client = mock_client
346
+
347
+ # Determine if we expect success or failure
348
+ will_succeed = any(isinstance(effect, str) for effect in side_effects)
349
+
350
+ if will_succeed:
351
+ # Test successful completion
352
+ result = await provider.request_completions(
353
+ message_type=MessageType.CHAT,
354
+ messages=mock_messages,
355
+ model="gpt-4o-mini",
356
+ max_retries=max_retries
357
+ )
358
+
359
+ # Verify we got the expected success result
360
+ assert result == "Success!"
361
+ assert provider.last_error is None
362
+ else:
363
+ # Test failure after all retries
364
+ with pytest.raises(Exception):
365
+ await provider.request_completions(
366
+ message_type=MessageType.CHAT,
367
+ messages=mock_messages,
368
+ model="gpt-4o-mini",
369
+ max_retries=max_retries
370
+ )
371
+
372
+ # Verify error state was set
373
+ assert provider.last_error is not None
374
+ assert isinstance(provider.last_error, CompletionError)
375
+
376
+ # Capture the printed logs
377
+ captured = capsys.readouterr()
378
+ log_output = captured.out
379
+
380
+ # Count the different types of logs
381
+ retry_log_count = log_output.count("mito_ai_retry")
382
+ error_log_count = log_output.count("mito_ai_error")
383
+ success_log_count = log_output.count("mito_ai_chat_success")
384
+
385
+ # Verify logging function calls
386
+ assert retry_log_count == expected_retry_logs, f"Expected {expected_retry_logs} retry logs for scenario '{scenario}', got {retry_log_count}"
387
+ assert error_log_count == expected_error_logs, f"Expected {expected_error_logs} error logs for scenario '{scenario}', got {error_log_count}"
388
+ assert success_log_count == expected_success_logs, f"Expected {expected_success_logs} success logs for scenario '{scenario}', got {success_log_count}"
389
+
@@ -0,0 +1,85 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from datetime import datetime
5
+ from typing import Any
6
+ from unittest.mock import AsyncMock, MagicMock, patch
7
+ from mito_ai.completions.models import AICapabilities
8
+
9
+ TODAY = datetime.now().strftime("%Y-%m-%d")
10
+
11
+ def patch_server_limits(is_pro: bool = False, completion_count: int = 1, first_date: str = TODAY) -> Any:
12
+ return patch.multiple(
13
+ "mito_ai.utils.server_limits",
14
+ get_chat_completion_count=MagicMock(return_value=completion_count),
15
+ get_first_completion_date=MagicMock(return_value=first_date),
16
+ is_pro=MagicMock(return_value=is_pro),
17
+ check_mito_server_quota=MagicMock(return_value=None),
18
+ update_mito_server_quota=MagicMock(return_value=None)
19
+ )
20
+
21
+
22
+ def patch_openai_model_list() -> Any:
23
+ mock_openai_instance = MagicMock()
24
+ mock_openai_instance.models.list.return_value = [MagicMock(id="gpt-4o-mini")]
25
+
26
+ # Patch the constructor call to return your mock instance
27
+ return patch("openai.OpenAI", return_value=mock_openai_instance)
28
+
29
+
30
+ def mock_openai_client() -> Any:
31
+ """Mock the OpenAI client with user key capabilities."""
32
+ mock_client = MagicMock()
33
+ mock_client.capabilities = AICapabilities(
34
+ configuration={"model": "gpt-4o-mini"},
35
+ provider="OpenAI with user key",
36
+ type="ai_capabilities"
37
+ )
38
+ mock_client.key_type = "user"
39
+ mock_client.request_completions = AsyncMock(return_value="Test completion")
40
+ mock_client.stream_completions = AsyncMock(return_value="Test completion")
41
+ return patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client)
42
+
43
+
44
+ def mock_gemini_client() -> Any:
45
+ """Mock the Gemini client capabilities."""
46
+ mock_client = MagicMock()
47
+ mock_client.capabilities = AICapabilities(
48
+ configuration={"model": "gemini-2-pro"},
49
+ provider="Gemini",
50
+ type="ai_capabilities"
51
+ )
52
+ mock_client.key_type = "gemini"
53
+ mock_client.request_completions = AsyncMock(return_value="Test completion")
54
+ mock_client.stream_completions = AsyncMock(return_value="Test completion")
55
+ return patch("mito_ai.completions.providers.GeminiClient", return_value=mock_client)
56
+
57
+
58
+ def mock_azure_openai_client() -> Any:
59
+ """Mock the Azure OpenAI client capabilities."""
60
+ mock_client = MagicMock()
61
+ mock_client.capabilities = AICapabilities(
62
+ configuration={"model": "gpt-4o"},
63
+ provider="Azure OpenAI",
64
+ type="ai_capabilities"
65
+ )
66
+ mock_client.key_type = "azure"
67
+ mock_client.request_completions = AsyncMock(return_value="Test completion")
68
+ mock_client.stream_completions = AsyncMock(return_value="Test completion")
69
+ return patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client)
70
+
71
+
72
+
73
+ def mock_claude_client() -> Any:
74
+ """Mock the Claude client capabilities."""
75
+ mock_client = MagicMock()
76
+ mock_client.capabilities = AICapabilities(
77
+ configuration={"model": "claude-3-opus-20240229"},
78
+ provider="Claude",
79
+ type="ai_capabilities"
80
+ )
81
+ mock_client.key_type = "claude"
82
+ mock_client.request_completions = AsyncMock(return_value="Test completion")
83
+ mock_client.stream_completions = AsyncMock(return_value="Test completion")
84
+ mock_client.stream_response = AsyncMock(return_value="Test completion")
85
+ return patch("mito_ai.completions.providers.AnthropicClient", return_value=mock_client)
@@ -4,16 +4,29 @@
4
4
  from typing import Any
5
5
  import pytest
6
6
  from mito_ai.constants import ACTIVE_BASE_URL, MITO_PROD_BASE_URL, MITO_DEV_BASE_URL
7
+ from mito_ai.constants import MITO_STREAMLIT_DEV_BASE_URL, MITO_STREAMLIT_TEST_BASE_URL, ACTIVE_STREAMLIT_BASE_URL
7
8
 
8
9
 
9
10
  def test_prod_lambda_url() -> Any:
10
11
  """Make sure that the lambda urls are correct"""
11
- assert MITO_PROD_BASE_URL == "https://yxwyadgaznhavqvgnbfuo2k6ca0jboku.lambda-url.us-east-1.on.aws"
12
+ assert MITO_PROD_BASE_URL.startswith("https://7eax4i53f5odkshhlry4gw23by0yvnuv.lambda-url.us-east-1.on.aws/")
12
13
 
13
14
  def test_dev_lambda_url() -> Any:
14
15
  """Make sure that the lambda urls are correct"""
15
- assert MITO_DEV_BASE_URL == "https://x3rafympznv4abp7phos44gzgu0clbui.lambda-url.us-east-1.on.aws"
16
+ assert MITO_DEV_BASE_URL.startswith("https://g5vwmogjg7gh7aktqezyrvcq6a0hyfnr.lambda-url.us-east-1.on.aws/")
16
17
 
17
18
  def test_active_base_url() -> Any:
18
19
  """Make sure that the active base url is correct"""
19
20
  assert ACTIVE_BASE_URL == MITO_PROD_BASE_URL
21
+
22
+ def test_devenv_streamlit_url() -> Any:
23
+ """Make sure that the streamlit urls are correct"""
24
+ assert MITO_STREAMLIT_DEV_BASE_URL == "https://fr12uvtfy5.execute-api.us-east-1.amazonaws.com"
25
+
26
+ def test_testenv_streamlit_url() -> Any:
27
+ """Make sure that the streamlit urls are correct"""
28
+ assert MITO_STREAMLIT_TEST_BASE_URL == "https://iyual08t6d.execute-api.us-east-1.amazonaws.com"
29
+
30
+ def test_streamlit_active_base_url() -> Any:
31
+ """Make sure that the active streamlit base url is correct"""
32
+ assert ACTIVE_STREAMLIT_BASE_URL == MITO_STREAMLIT_TEST_BASE_URL
@@ -0,0 +1,12 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import pytest
5
+ from mito_ai.utils.telemetry_utils import PRINT_LOGS
6
+
7
+ def test_print_logs_is_false():
8
+ """
9
+ Test to ensure that PRINT_LOGS is set to False.
10
+ """
11
+ assert not PRINT_LOGS, "PRINT_LOGS should be False by default."
12
+
@@ -8,6 +8,8 @@ import anthropic
8
8
  from typing import Any, Dict, List, Optional, Union, AsyncGenerator, Tuple, Callable, cast
9
9
 
10
10
  from anthropic.types import MessageParam, Message, TextBlock, ToolUnionParam
11
+ from mito_ai.utils.mito_server_utils import get_response_from_mito_server
12
+ from mito_ai.utils.provider_utils import does_message_require_fast_model
11
13
  from openai.types.chat import ChatCompletionMessageParam
12
14
  from mito_ai.completions.models import AgentResponse, MessageType, ResponseFormatInfo, CompletionReply, CompletionStreamChunk, CompletionItem
13
15
  from mito_ai.utils.schema import UJ_STATIC_USER_ID, UJ_USER_EMAIL
@@ -23,7 +25,8 @@ __user_id: Optional[str] = None
23
25
 
24
26
  timeout = 30
25
27
  max_retries = 1
26
- INLINE_COMPLETION_MODEL = "claude-3-5-haiku-latest"
28
+
29
+ FAST_ANTHROPIC_MODEL = "claude-3-5-haiku-latest"
27
30
 
28
31
  def _prepare_anthropic_request_data_and_headers(
29
32
  model: Union[str, None],
@@ -36,7 +39,7 @@ def _prepare_anthropic_request_data_and_headers(
36
39
  tool_choice: Optional[dict],
37
40
  stream: Optional[bool]
38
41
  ) -> Tuple[Dict[str, Any], Dict[str, str]]:
39
- check_mito_server_quota(message_type)
42
+
40
43
  global __user_email, __user_id
41
44
  if __user_email is None:
42
45
  __user_email = get_user_field(UJ_USER_EMAIL)
@@ -78,31 +81,20 @@ async def get_anthropic_completion_from_mito_server(
78
81
  tools: Optional[List[ToolUnionParam]],
79
82
  tool_choice: Optional[dict],
80
83
  message_type: MessageType
81
- ) -> Message:
84
+ ) -> str:
82
85
  data, headers = _prepare_anthropic_request_data_and_headers(
83
86
  model, max_tokens, temperature, system, messages, message_type, tools, tool_choice, None
84
87
  )
85
- http_client, http_client_timeout = _create_http_client(timeout, max_retries)
86
- start_time = time.time()
87
- try:
88
- res = await http_client.fetch(
89
- MITO_ANTHROPIC_URL,
90
- method="POST",
91
- headers=headers,
92
- body=json.dumps(data),
93
- request_timeout=http_client_timeout
94
- )
95
- print(f"Anthropic request completed in {time.time() - start_time:.2f} seconds")
96
- except Exception as e:
97
- print(f"Anthropic request failed after {time.time() - start_time:.2f} seconds with error: {str(e)}")
98
- raise
99
- finally:
100
- http_client.close()
101
- content = json.loads(res.body)
102
- # If the response is wrapped in a 'data' field, extract it
103
- if isinstance(content, dict) and "data" in content:
104
- return cast(Message, content["data"])
105
- return cast(Message, content)
88
+
89
+ return await get_response_from_mito_server(
90
+ MITO_ANTHROPIC_URL,
91
+ headers,
92
+ data,
93
+ timeout,
94
+ max_retries,
95
+ message_type,
96
+ provider_name="Claude"
97
+ )
106
98
 
107
99
  async def stream_anthropic_completion_from_mito_server(
108
100
  model: Union[str, None],
@@ -189,6 +181,7 @@ async def stream_anthropic_completion_from_mito_server(
189
181
  http_client.close()
190
182
 
191
183
  def get_anthropic_completion_function_params(
184
+ message_type: MessageType,
192
185
  model: str,
193
186
  messages: List[MessageParam],
194
187
  max_tokens: int,
@@ -203,6 +196,10 @@ def get_anthropic_completion_function_params(
203
196
  Build the provider_data dict for Anthropic completions, mirroring the OpenAI approach.
204
197
  Only includes fields needed for the Anthropic API.
205
198
  """
199
+
200
+ message_requires_fast_model = does_message_require_fast_model(message_type)
201
+ model = FAST_ANTHROPIC_MODEL if message_requires_fast_model else model
202
+
206
203
  provider_data = {
207
204
  "model": model,
208
205
  "max_tokens": max_tokens,
@@ -210,11 +207,6 @@ def get_anthropic_completion_function_params(
210
207
  "messages": messages,
211
208
  "system": system,
212
209
  }
213
- if response_format_info is not None:
214
- # TODO: This should not be here.. the model is resolved in the anthropic client.
215
- # This also means that chat is using the fast model...
216
- # I bet the same bug exists in gemini...
217
- provider_data["model"] = INLINE_COMPLETION_MODEL
218
210
  if tools:
219
211
  provider_data["tools"] = tools
220
212
  if response_format_info and response_format_info.name == "agent_response":