mito-ai 0.1.33__py3-none-any.whl → 0.1.49__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. mito_ai/__init__.py +49 -9
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +142 -67
  4. mito_ai/{app_builder → app_deploy}/__init__.py +1 -1
  5. mito_ai/app_deploy/app_deploy_utils.py +44 -0
  6. mito_ai/app_deploy/handlers.py +345 -0
  7. mito_ai/{app_builder → app_deploy}/models.py +35 -22
  8. mito_ai/app_manager/__init__.py +4 -0
  9. mito_ai/app_manager/handlers.py +167 -0
  10. mito_ai/app_manager/models.py +71 -0
  11. mito_ai/app_manager/utils.py +24 -0
  12. mito_ai/auth/README.md +18 -0
  13. mito_ai/auth/__init__.py +6 -0
  14. mito_ai/auth/handlers.py +96 -0
  15. mito_ai/auth/urls.py +13 -0
  16. mito_ai/chat_history/handlers.py +63 -0
  17. mito_ai/chat_history/urls.py +32 -0
  18. mito_ai/completions/completion_handlers/agent_execution_handler.py +1 -1
  19. mito_ai/completions/completion_handlers/chat_completion_handler.py +4 -4
  20. mito_ai/completions/completion_handlers/utils.py +99 -37
  21. mito_ai/completions/handlers.py +57 -20
  22. mito_ai/completions/message_history.py +9 -1
  23. mito_ai/completions/models.py +31 -7
  24. mito_ai/completions/prompt_builders/agent_execution_prompt.py +21 -2
  25. mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +8 -0
  26. mito_ai/completions/prompt_builders/agent_system_message.py +115 -42
  27. mito_ai/completions/prompt_builders/chat_name_prompt.py +6 -6
  28. mito_ai/completions/prompt_builders/chat_prompt.py +18 -11
  29. mito_ai/completions/prompt_builders/chat_system_message.py +4 -0
  30. mito_ai/completions/prompt_builders/prompt_constants.py +23 -4
  31. mito_ai/completions/prompt_builders/utils.py +72 -10
  32. mito_ai/completions/providers.py +81 -47
  33. mito_ai/constants.py +25 -24
  34. mito_ai/file_uploads/__init__.py +3 -0
  35. mito_ai/file_uploads/handlers.py +248 -0
  36. mito_ai/file_uploads/urls.py +21 -0
  37. mito_ai/gemini_client.py +44 -48
  38. mito_ai/log/handlers.py +10 -3
  39. mito_ai/log/urls.py +3 -3
  40. mito_ai/openai_client.py +30 -44
  41. mito_ai/path_utils.py +70 -0
  42. mito_ai/streamlit_conversion/agent_utils.py +37 -0
  43. mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
  44. mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
  45. mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
  46. mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
  47. mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
  48. mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
  49. mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
  50. mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
  51. mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
  52. mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
  53. mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
  54. mito_ai/streamlit_preview/__init__.py +6 -0
  55. mito_ai/streamlit_preview/handlers.py +111 -0
  56. mito_ai/streamlit_preview/manager.py +152 -0
  57. mito_ai/streamlit_preview/urls.py +22 -0
  58. mito_ai/streamlit_preview/utils.py +29 -0
  59. mito_ai/tests/chat_history/test_chat_history.py +211 -0
  60. mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
  61. mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
  62. mito_ai/tests/file_uploads/__init__.py +2 -0
  63. mito_ai/tests/file_uploads/test_handlers.py +282 -0
  64. mito_ai/tests/message_history/test_generate_short_chat_name.py +0 -4
  65. mito_ai/tests/message_history/test_message_history_utils.py +103 -23
  66. mito_ai/tests/open_ai_utils_test.py +18 -22
  67. mito_ai/tests/providers/test_anthropic_client.py +447 -0
  68. mito_ai/tests/providers/test_azure.py +2 -6
  69. mito_ai/tests/providers/test_capabilities.py +120 -0
  70. mito_ai/tests/{test_gemini_client.py → providers/test_gemini_client.py} +40 -36
  71. mito_ai/tests/providers/test_mito_server_utils.py +448 -0
  72. mito_ai/tests/providers/test_model_resolution.py +130 -0
  73. mito_ai/tests/providers/test_openai_client.py +57 -0
  74. mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
  75. mito_ai/tests/providers/test_provider_limits.py +42 -0
  76. mito_ai/tests/providers/test_providers.py +382 -0
  77. mito_ai/tests/providers/test_retry_logic.py +389 -0
  78. mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
  79. mito_ai/tests/providers/utils.py +85 -0
  80. mito_ai/tests/streamlit_conversion/__init__.py +3 -0
  81. mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
  82. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
  83. mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
  84. mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
  85. mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
  86. mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
  87. mito_ai/tests/test_constants.py +31 -3
  88. mito_ai/tests/test_telemetry.py +12 -0
  89. mito_ai/tests/user/__init__.py +2 -0
  90. mito_ai/tests/user/test_user.py +120 -0
  91. mito_ai/tests/utils/test_anthropic_utils.py +6 -6
  92. mito_ai/user/handlers.py +45 -0
  93. mito_ai/user/urls.py +21 -0
  94. mito_ai/utils/anthropic_utils.py +55 -121
  95. mito_ai/utils/create.py +17 -1
  96. mito_ai/utils/error_classes.py +42 -0
  97. mito_ai/utils/gemini_utils.py +39 -94
  98. mito_ai/utils/message_history_utils.py +7 -4
  99. mito_ai/utils/mito_server_utils.py +242 -0
  100. mito_ai/utils/open_ai_utils.py +38 -155
  101. mito_ai/utils/provider_utils.py +49 -0
  102. mito_ai/utils/server_limits.py +1 -1
  103. mito_ai/utils/telemetry_utils.py +137 -5
  104. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -100
  105. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/package.json +4 -2
  106. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +3 -1
  107. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +2 -2
  108. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +15948 -8403
  109. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
  110. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
  111. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
  112. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.8b24b5b3b93f95205b56.js +58 -33
  113. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.8b24b5b3b93f95205b56.js.map +1 -0
  114. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +10 -2
  115. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
  116. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
  117. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
  118. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
  119. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
  120. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
  121. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
  122. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
  123. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
  124. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
  125. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
  126. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2 -240
  127. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
  128. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/METADATA +5 -2
  129. mito_ai-0.1.49.dist-info/RECORD +205 -0
  130. mito_ai/app_builder/handlers.py +0 -218
  131. mito_ai/tests/providers_test.py +0 -438
  132. mito_ai/tests/test_anthropic_client.py +0 -270
  133. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js.map +0 -1
  134. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js.map +0 -1
  135. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js.map +0 -1
  136. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js +0 -7842
  137. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js.map +0 -1
  138. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -1
  139. mito_ai-0.1.33.dist-info/RECORD +0 -134
  140. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  141. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  142. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  143. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  144. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/WHEEL +0 -0
  145. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/entry_points.txt +0 -0
  146. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,447 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import pytest
5
+ from mito_ai.anthropic_client import get_anthropic_system_prompt_and_messages, get_anthropic_system_prompt_and_messages_with_caching, add_cache_control_to_message, extract_and_parse_anthropic_json_response, AnthropicClient
6
+ from mito_ai.utils.anthropic_utils import FAST_ANTHROPIC_MODEL
7
+ from anthropic.types import Message, TextBlock, ToolUseBlock, Usage, ToolUseBlock, Message, Usage, TextBlock
8
+ from openai.types.chat import ChatCompletionMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, ChatCompletionSystemMessageParam
9
+ from mito_ai.completions.models import MessageType
10
+ from unittest.mock import patch
11
+ import anthropic
12
+ from typing import List, Dict, cast
13
+
14
+
15
+ # Dummy base64 image (1x1 PNG)
16
+ DUMMY_IMAGE_DATA_URL = (
17
+ "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/wcAAgMBAp9l9AAAAABJRU5ErkJggg=="
18
+ )
19
+
20
+ def test_mixed_text_and_image():
21
+ messages: List[ChatCompletionMessageParam] = [
22
+ ChatCompletionSystemMessageParam(role="system", content="You are a helpful assistant."),
23
+ ChatCompletionUserMessageParam(role="user", content=[
24
+ {"type": "text", "text": "Here is an image:"},
25
+ {"type": "image_url", "image_url": {"url": DUMMY_IMAGE_DATA_URL}}
26
+ ])
27
+ ]
28
+ system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages(messages)
29
+
30
+ assert system_prompt == "You are a helpful assistant."
31
+ assert len(anthropic_messages) == 1
32
+ message = anthropic_messages[0]
33
+ assert message["role"] == "user"
34
+ content = message["content"]
35
+ assert isinstance(content, list)
36
+ assert len(content) == 2
37
+
38
+ # Check text content
39
+ text_block = cast(Dict[str, str], content[0])
40
+ assert text_block["type"] == "text"
41
+ assert text_block["text"] == "Here is an image:"
42
+
43
+ # Check image content
44
+ image_block = cast(Dict[str, Dict[str, str]], content[1])
45
+ assert image_block["type"] == "image"
46
+ assert image_block["source"]["type"] == "base64"
47
+ assert image_block["source"]["media_type"] == "image/png"
48
+
49
+ def test_no_system_instructions_only_content():
50
+ messages: List[ChatCompletionMessageParam] = [
51
+ ChatCompletionUserMessageParam(role="user", content="Hello!"),
52
+ ChatCompletionAssistantMessageParam(role="assistant", content="Hi, how can I help you?")
53
+ ]
54
+ system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages(messages)
55
+
56
+ assert isinstance(system_prompt, anthropic.Omit)
57
+ assert len(anthropic_messages) == 2
58
+ assert anthropic_messages[0]["role"] == "user"
59
+ assert anthropic_messages[0]["content"] == "Hello!"
60
+ assert anthropic_messages[1]["role"] == "assistant"
61
+ assert anthropic_messages[1]["content"] == "Hi, how can I help you?"
62
+
63
+ def test_system_instructions_and_content():
64
+ messages: List[ChatCompletionMessageParam] = [
65
+ ChatCompletionSystemMessageParam(role="system", content="You are a helpful assistant."),
66
+ ChatCompletionUserMessageParam(role="user", content="What is the weather today?")
67
+ ]
68
+ system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages(messages)
69
+
70
+ assert system_prompt == "You are a helpful assistant."
71
+ assert len(anthropic_messages) == 1
72
+ assert anthropic_messages[0]["role"] == "user"
73
+ assert anthropic_messages[0]["content"] == "What is the weather today?"
74
+
75
+ def test_multiple_system_messages():
76
+ messages: List[ChatCompletionMessageParam] = [
77
+ ChatCompletionSystemMessageParam(role="system", content="First system message."),
78
+ ChatCompletionSystemMessageParam(role="system", content="Second system message."),
79
+ ChatCompletionUserMessageParam(role="user", content="Hello!")
80
+ ]
81
+ system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages(messages)
82
+
83
+ # Should take the last system message
84
+ assert system_prompt == "Second system message."
85
+ assert len(anthropic_messages) == 1
86
+ assert anthropic_messages[0]["role"] == "user"
87
+ assert anthropic_messages[0]["content"] == "Hello!"
88
+
89
+ def test_empty_message_content():
90
+ messages: List[ChatCompletionMessageParam] = [
91
+ cast(ChatCompletionMessageParam, {"role": "user"}), # Missing content
92
+ ChatCompletionAssistantMessageParam(role="assistant", content="Hi!")
93
+ ]
94
+ system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages(messages)
95
+
96
+ assert isinstance(system_prompt, anthropic.Omit)
97
+ assert len(anthropic_messages) == 1 # Should skip the message with missing content
98
+ assert anthropic_messages[0]["role"] == "assistant"
99
+ assert anthropic_messages[0]["content"] == "Hi!"
100
+
101
+ def test_extract_json_from_tool_use():
102
+ # Create a mock response with tool use
103
+ tool_use_block = ToolUseBlock(
104
+ type="tool_use",
105
+ id="test_id",
106
+ name="agent_response",
107
+ input={"key": "value"}
108
+ )
109
+ response = Message(
110
+ id="test_id",
111
+ role="assistant",
112
+ content=[tool_use_block],
113
+ model="claude-3-opus-20240229",
114
+ type="message",
115
+ usage=Usage(input_tokens=0, output_tokens=0)
116
+ )
117
+
118
+ result = extract_and_parse_anthropic_json_response(response)
119
+ assert result == {"key": "value"}
120
+
121
+ def test_extract_json_from_text():
122
+ # Create a mock response with JSON in text
123
+ text_block = TextBlock(
124
+ type="text",
125
+ text='Here is some JSON: {"key": "value"}'
126
+ )
127
+ response = Message(
128
+ id="test_id",
129
+ role="assistant",
130
+ content=[text_block],
131
+ model="claude-3-opus-20240229",
132
+ type="message",
133
+ usage=Usage(input_tokens=0, output_tokens=0)
134
+ )
135
+
136
+ result = extract_and_parse_anthropic_json_response(response)
137
+ assert result == {"key": "value"}
138
+
139
+ def test_extract_json_from_text_with_multiple_blocks():
140
+ # Create a mock response with multiple text blocks
141
+ text_block1 = TextBlock(
142
+ type="text",
143
+ text='Here is the JSON: {"key": "value"}' # Put JSON in first block since that's what the implementation checks
144
+ )
145
+ text_block2 = TextBlock(
146
+ type="text",
147
+ text="Some text after JSON"
148
+ )
149
+ response = Message(
150
+ id="test_id",
151
+ role="assistant",
152
+ content=[text_block1, text_block2],
153
+ model="claude-3-opus-20240229",
154
+ type="message",
155
+ usage=Usage(input_tokens=0, output_tokens=0)
156
+ )
157
+
158
+ result = extract_and_parse_anthropic_json_response(response)
159
+ assert result == {"key": "value"}
160
+
161
+ def test_invalid_json_in_text():
162
+ # Create a mock response with invalid JSON in text
163
+ text_block = TextBlock(
164
+ type="text",
165
+ text='Here is invalid JSON: {"key": value}'
166
+ )
167
+ response = Message(
168
+ id="test_id",
169
+ role="assistant",
170
+ content=[text_block],
171
+ model="claude-3-opus-20240229",
172
+ type="message",
173
+ usage=Usage(input_tokens=0, output_tokens=0)
174
+ )
175
+
176
+ with pytest.raises(Exception) as exc_info:
177
+ extract_and_parse_anthropic_json_response(response)
178
+ assert "No valid AgentResponse format found" in str(exc_info.value)
179
+
180
+ def test_no_json_in_text():
181
+ # Create a mock response with no JSON in text
182
+ text_block = TextBlock(
183
+ type="text",
184
+ text="This is just plain text with no JSON"
185
+ )
186
+ response = Message(
187
+ id="test_id",
188
+ role="assistant",
189
+ content=[text_block],
190
+ model="claude-3-opus-20240229",
191
+ type="message",
192
+ usage=Usage(input_tokens=0, output_tokens=0)
193
+ )
194
+
195
+ with pytest.raises(Exception) as exc_info:
196
+ extract_and_parse_anthropic_json_response(response)
197
+ assert "No valid AgentResponse format found" in str(exc_info.value)
198
+
199
+ def test_empty_content():
200
+ # Create a mock response with empty content
201
+ response = Message(
202
+ id="test_id",
203
+ role="assistant",
204
+ content=[],
205
+ model="claude-3-opus-20240229",
206
+ type="message",
207
+ usage=Usage(input_tokens=0, output_tokens=0)
208
+ )
209
+
210
+ with pytest.raises(Exception) as exc_info:
211
+ extract_and_parse_anthropic_json_response(response)
212
+ assert "No valid AgentResponse format found" in str(exc_info.value)
213
+
214
+ def test_tool_use_without_agent_response():
215
+ # Create a mock response with tool use but not agent_response
216
+ tool_use_block = ToolUseBlock(
217
+ type="tool_use",
218
+ id="test_id",
219
+ name="other_tool",
220
+ input={"key": "value"}
221
+ )
222
+ response = Message(
223
+ id="test_id",
224
+ role="assistant",
225
+ content=[tool_use_block],
226
+ model="claude-3-opus-20240229",
227
+ type="message",
228
+ usage=Usage(input_tokens=0, output_tokens=0)
229
+ )
230
+
231
+ with pytest.raises(Exception) as exc_info:
232
+ extract_and_parse_anthropic_json_response(response)
233
+ assert "No valid AgentResponse format found" in str(exc_info.value)
234
+
235
+ CUSTOM_MODEL = "smart-anthropic-model"
236
+ @pytest.mark.parametrize("message_type, expected_model", [
237
+ (MessageType.CHAT, CUSTOM_MODEL), #
238
+ (MessageType.SMART_DEBUG, CUSTOM_MODEL), #
239
+ (MessageType.CODE_EXPLAIN, CUSTOM_MODEL), #
240
+ (MessageType.AGENT_EXECUTION, CUSTOM_MODEL), #
241
+ (MessageType.AGENT_AUTO_ERROR_FIXUP, CUSTOM_MODEL), #
242
+ (MessageType.INLINE_COMPLETION, FAST_ANTHROPIC_MODEL), #
243
+ (MessageType.CHAT_NAME_GENERATION, FAST_ANTHROPIC_MODEL), #
244
+ ])
245
+ @pytest.mark.asyncio
246
+ async def test_model_selection_based_on_message_type(message_type, expected_model):
247
+ """
248
+ Tests that the correct model is selected based on the message type.
249
+ """
250
+ client = AnthropicClient(api_key="test_key")
251
+
252
+ # Mock the messages.create method directly
253
+ with patch.object(client.client.messages, 'create') as mock_create: # type: ignore
254
+ # Create a mock response
255
+ mock_response = Message(
256
+ id="test_id",
257
+ role="assistant",
258
+ content=[TextBlock(type="text", text="test")],
259
+ model='anthropic-model-we-do-not-check',
260
+ type="message",
261
+ usage=Usage(input_tokens=0, output_tokens=0)
262
+ )
263
+ mock_create.return_value = mock_response
264
+
265
+ await client.request_completions(
266
+ messages=[{"role": "user", "content": "Test message"}],
267
+ model=CUSTOM_MODEL,
268
+ message_type=message_type,
269
+ response_format_info=None
270
+ )
271
+
272
+ # Verify that create was called with the expected model
273
+ mock_create.assert_called_once()
274
+ call_args = mock_create.call_args
275
+ assert call_args[1]['model'] == expected_model
276
+
277
+
278
+ # Caching Tests
279
+
280
+ @pytest.mark.parametrize("message,expected_role,expected_content_type,expected_content_length,expected_cache_control", [
281
+ # String content message
282
+ (
283
+ {"role": "user", "content": "Hello world"},
284
+ "user",
285
+ list,
286
+ 1,
287
+ True
288
+ ),
289
+ # List content message
290
+ (
291
+ {
292
+ "role": "user",
293
+ "content": [
294
+ {"type": "text", "text": "First part"},
295
+ {"type": "text", "text": "Second part"}
296
+ ]
297
+ },
298
+ "user",
299
+ list,
300
+ 2,
301
+ True
302
+ ),
303
+ # Empty content message
304
+ (
305
+ {"role": "user", "content": []},
306
+ "user",
307
+ list,
308
+ 0,
309
+ False
310
+ ),
311
+ # Assistant message with string content
312
+ (
313
+ {"role": "assistant", "content": "I can help you with that."},
314
+ "assistant",
315
+ list,
316
+ 1,
317
+ True
318
+ ),
319
+ ])
320
+ def test_add_cache_control_to_message(message, expected_role, expected_content_type, expected_content_length, expected_cache_control):
321
+ """Test adding cache control to different types of messages."""
322
+ result = add_cache_control_to_message(message)
323
+
324
+ assert result["role"] == expected_role
325
+ assert isinstance(result["content"], expected_content_type)
326
+ assert len(result["content"]) == expected_content_length
327
+
328
+ if expected_cache_control and expected_content_length > 0:
329
+ # Should have cache_control on the last content block
330
+ last_block = result["content"][-1]
331
+ assert last_block["cache_control"] == {"type": "ephemeral"}
332
+
333
+ # If there are multiple blocks, earlier blocks should not have cache_control
334
+ if expected_content_length > 1:
335
+ for i in range(expected_content_length - 1):
336
+ assert "cache_control" not in result["content"][i]
337
+ elif expected_content_length == 0:
338
+ # Empty content should return unchanged
339
+ assert result == message
340
+
341
+
342
+ @pytest.mark.parametrize("messages,expected_system_type,expected_system_content", [
343
+ # With system prompt
344
+ (
345
+ [
346
+ ChatCompletionSystemMessageParam(role="system", content="You are a helpful assistant."),
347
+ ChatCompletionUserMessageParam(role="user", content="Hello!")
348
+ ],
349
+ list,
350
+ "You are a helpful assistant.",
351
+ ),
352
+ # Without system prompt
353
+ (
354
+ [
355
+ ChatCompletionUserMessageParam(role="user", content="Hello!"),
356
+ ChatCompletionAssistantMessageParam(role="assistant", content="Hi there!")
357
+ ],
358
+ anthropic.Omit,
359
+ None,
360
+ ),
361
+ # Multiple system messages (should take last one)
362
+ (
363
+ [
364
+ ChatCompletionSystemMessageParam(role="system", content="First system message."),
365
+ ChatCompletionSystemMessageParam(role="system", content="Second system message."),
366
+ ChatCompletionUserMessageParam(role="user", content="Hello!"),
367
+ ChatCompletionUserMessageParam(role="user", content="Hello!"),
368
+ ChatCompletionUserMessageParam(role="user", content="Hello!")
369
+ ],
370
+ list,
371
+ "Second system message.",
372
+ ),
373
+ ])
374
+ def test_caching_system_prompt_scenarios(messages, expected_system_type, expected_system_content):
375
+ """Test caching with different system prompt scenarios."""
376
+ system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages_with_caching(messages)
377
+
378
+ # Check system prompt
379
+ assert isinstance(system_prompt, expected_system_type)
380
+ if expected_system_content:
381
+ assert system_prompt[0]["text"] == expected_system_content
382
+ assert system_prompt[0]["cache_control"] == {"type": "ephemeral"}
383
+
384
+
385
+ @pytest.mark.parametrize("message_count,expected_cache_boundary", [
386
+ (1, None), # 1 message, No cache boundary
387
+ (3, None), # 3 messages, No cache boundary
388
+ (5, 1), # 5 messages, cache at index 2
389
+ (10, 6), # 10 messages, cache at index 6
390
+ ])
391
+ def test_caching_conversation_history(message_count, expected_cache_boundary):
392
+ """Test that conversation history is cached at the keep_recent boundary for different message counts."""
393
+
394
+ # Create messages based on the parameter
395
+ messages: List[ChatCompletionMessageParam] = [
396
+ ChatCompletionSystemMessageParam(role="system", content="You are helpful.")
397
+ ]
398
+
399
+ # Add message pairs
400
+ for i in range(message_count):
401
+ messages.append(ChatCompletionUserMessageParam(role="user", content=f"Message {i+1}"))
402
+
403
+ system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages_with_caching(messages)
404
+
405
+ # System prompt should have cache control
406
+ assert isinstance(system_prompt, list)
407
+ assert system_prompt[0]["cache_control"] == {"type": "ephemeral"}
408
+
409
+ print(anthropic_messages)
410
+
411
+ if expected_cache_boundary is None:
412
+ # Verify no cache boundry
413
+ assert all("cache_control" not in str(message) for message in anthropic_messages)
414
+ else:
415
+ # Other messages should not have cache control
416
+ for i, message in enumerate(anthropic_messages):
417
+ if i == expected_cache_boundary:
418
+ assert anthropic_messages[expected_cache_boundary]["content"][0]["cache_control"] == {"type": "ephemeral"}
419
+ else:
420
+ assert "cache_control" not in str(message)
421
+
422
+ def test_caching_with_mixed_content():
423
+ """Test caching with mixed text and image content."""
424
+ messages: List[ChatCompletionMessageParam] = [
425
+ ChatCompletionSystemMessageParam(role="system", content="You are a helpful assistant."),
426
+ ChatCompletionUserMessageParam(role="user", content=[
427
+ {"type": "text", "text": "Here is an image:"},
428
+ {"type": "image_url", "image_url": {"url": DUMMY_IMAGE_DATA_URL}}
429
+ ])
430
+ ]
431
+ system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages_with_caching(messages)
432
+
433
+ # System prompt should have cache control
434
+ assert isinstance(system_prompt, list)
435
+ assert system_prompt[0]["cache_control"] == {"type": "ephemeral"}
436
+
437
+ # User message should NOT have cache control (only 1 message, so boundary is invalid)
438
+ user_message = anthropic_messages[0]
439
+ assert user_message["role"] == "user"
440
+ assert isinstance(user_message["content"], list)
441
+ assert len(user_message["content"]) == 2
442
+
443
+ # No content blocks should have cache control (too few messages to cache)
444
+ assert user_message["content"][0]["type"] == "text"
445
+ assert "cache_control" not in user_message["content"][0]
446
+ assert user_message["content"][1]["type"] == "image"
447
+ assert "cache_control" not in user_message["content"][1]
@@ -176,15 +176,11 @@ class TestAzureOpenAIClientCreation:
176
176
  openai_client = OpenAIClient(config=provider_config)
177
177
 
178
178
  # Test with gpt-4.1 model
179
- resolved_model = openai_client._resolve_model("gpt-4.1")
179
+ resolved_model = openai_client._adjust_model_for_azure_or_ollama("gpt-4.1")
180
180
  assert resolved_model == FAKE_AZURE_MODEL
181
181
 
182
182
  # Test with any other model
183
- resolved_model = openai_client._resolve_model("gpt-3.5-turbo")
184
- assert resolved_model == FAKE_AZURE_MODEL
185
-
186
- # Test with no model specified
187
- resolved_model = openai_client._resolve_model()
183
+ resolved_model = openai_client._adjust_model_for_azure_or_ollama("gpt-3.5-turbo")
188
184
  assert resolved_model == FAKE_AZURE_MODEL
189
185
 
190
186
 
@@ -0,0 +1,120 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import pytest
5
+ from unittest.mock import MagicMock, patch
6
+ from mito_ai.completions.providers import OpenAIProvider
7
+ from mito_ai.tests.providers.utils import mock_azure_openai_client, mock_openai_client, patch_server_limits
8
+ from traitlets.config import Config
9
+
10
+ FAKE_API_KEY = "sk-1234567890"
11
+
12
+ @pytest.fixture
13
+ def provider_config() -> Config:
14
+ """Create a proper Config object for the OpenAIProvider."""
15
+ config = Config()
16
+ config.OpenAIProvider = Config()
17
+ config.OpenAIClient = Config()
18
+ return config
19
+
20
+ @pytest.mark.parametrize("test_case", [
21
+ {
22
+ "name": "mito_server_fallback_no_keys",
23
+ "setup": {
24
+ "OPENAI_API_KEY": None,
25
+ "CLAUDE_API_KEY": None,
26
+ "GEMINI_API_KEY": None,
27
+ "is_azure_configured": False,
28
+ },
29
+ "expected_provider": "Mito server",
30
+ "expected_key_type": "mito_server_key"
31
+ },
32
+ {
33
+ "name": "claude_when_only_claude_key",
34
+ "setup": {
35
+ "OPENAI_API_KEY": None,
36
+ "CLAUDE_API_KEY": "claude-test-key",
37
+ "GEMINI_API_KEY": None,
38
+ "is_azure_configured": False,
39
+ },
40
+ "expected_provider": "Claude",
41
+ "expected_key_type": "claude"
42
+ },
43
+ {
44
+ "name": "gemini_when_only_gemini_key",
45
+ "setup": {
46
+ "OPENAI_API_KEY": None,
47
+ "CLAUDE_API_KEY": None,
48
+ "GEMINI_API_KEY": "gemini-test-key",
49
+ "is_azure_configured": False,
50
+ },
51
+ "expected_provider": "Gemini",
52
+ "expected_key_type": "gemini"
53
+ },
54
+ {
55
+ "name": "openai_when_openai_key",
56
+ "setup": {
57
+ "OPENAI_API_KEY": 'openai-test-key',
58
+ "CLAUDE_API_KEY": None,
59
+ "GEMINI_API_KEY": None,
60
+ "is_azure_configured": False,
61
+ },
62
+ "expected_provider": "OpenAI (user key)",
63
+ "expected_key_type": "user_key"
64
+ },
65
+ {
66
+ "name": "claude_priority_over_gemini",
67
+ "setup": {
68
+ "OPENAI_API_KEY": None,
69
+ "CLAUDE_API_KEY": "claude-test-key",
70
+ "GEMINI_API_KEY": "gemini-test-key",
71
+ "is_azure_configured": False,
72
+ },
73
+ "expected_provider": "Claude",
74
+ "expected_key_type": "claude"
75
+ },
76
+ ])
77
+ def test_provider_capabilities_real_logic(
78
+ test_case: dict,
79
+ monkeypatch: pytest.MonkeyPatch,
80
+ provider_config: Config
81
+ ) -> None:
82
+ """Test the actual provider selection logic in OpenAIProvider.capabilities"""
83
+
84
+ # Set up the environment based on test case
85
+ setup = test_case["setup"]
86
+
87
+ # CRITICAL: Set up ALL mocks BEFORE creating any clients
88
+ for key, value in setup.items():
89
+ if key == "is_azure_configured":
90
+ if value:
91
+ # For Azure case, mock to return True and set required constants
92
+ monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: True)
93
+ monkeypatch.setattr("mito_ai.constants.AZURE_OPENAI_MODEL", "gpt-4o")
94
+ else:
95
+ # For non-Azure case, mock to return False
96
+ monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: False)
97
+ else:
98
+ monkeypatch.setattr(f"mito_ai.constants.{key}", value)
99
+
100
+ # Clear the provider config API key to ensure it uses constants
101
+ provider_config.OpenAIProvider.api_key = None
102
+
103
+ # Mock HTTP calls but let the real logic run
104
+ with patch("openai.OpenAI") as mock_openai_constructor:
105
+ with patch("openai.AsyncOpenAI") as mock_async_openai:
106
+ with patch("openai.AsyncAzureOpenAI") as mock_async_azure_openai:
107
+ # Mock successful API key validation for OpenAI
108
+ mock_openai_instance = MagicMock()
109
+ mock_openai_instance.models.list.return_value = [MagicMock(id="gpt-4o-mini")]
110
+ mock_openai_constructor.return_value = mock_openai_instance
111
+
112
+ # Mock server limits for Mito server fallback
113
+ with patch_server_limits():
114
+ # NOW create the provider after ALL mocks are set up
115
+ llm = OpenAIProvider(config=provider_config)
116
+
117
+ # Test capabilities
118
+ capabilities = llm.capabilities
119
+ assert capabilities.provider == test_case["expected_provider"], f"Test case: {test_case['name']}"
120
+ assert llm.key_type == test_case["expected_key_type"], f"Test case: {test_case['name']}"