mito-ai 0.1.50__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (205) hide show
  1. mito_ai/__init__.py +114 -0
  2. mito_ai/_version.py +4 -0
  3. mito_ai/anthropic_client.py +334 -0
  4. mito_ai/app_deploy/__init__.py +6 -0
  5. mito_ai/app_deploy/app_deploy_utils.py +44 -0
  6. mito_ai/app_deploy/handlers.py +345 -0
  7. mito_ai/app_deploy/models.py +98 -0
  8. mito_ai/app_manager/__init__.py +4 -0
  9. mito_ai/app_manager/handlers.py +167 -0
  10. mito_ai/app_manager/models.py +71 -0
  11. mito_ai/app_manager/utils.py +24 -0
  12. mito_ai/auth/README.md +18 -0
  13. mito_ai/auth/__init__.py +6 -0
  14. mito_ai/auth/handlers.py +96 -0
  15. mito_ai/auth/urls.py +13 -0
  16. mito_ai/chat_history/handlers.py +63 -0
  17. mito_ai/chat_history/urls.py +32 -0
  18. mito_ai/completions/completion_handlers/__init__.py +3 -0
  19. mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +59 -0
  20. mito_ai/completions/completion_handlers/agent_execution_handler.py +66 -0
  21. mito_ai/completions/completion_handlers/chat_completion_handler.py +141 -0
  22. mito_ai/completions/completion_handlers/code_explain_handler.py +113 -0
  23. mito_ai/completions/completion_handlers/completion_handler.py +42 -0
  24. mito_ai/completions/completion_handlers/inline_completer_handler.py +48 -0
  25. mito_ai/completions/completion_handlers/smart_debug_handler.py +160 -0
  26. mito_ai/completions/completion_handlers/utils.py +147 -0
  27. mito_ai/completions/handlers.py +415 -0
  28. mito_ai/completions/message_history.py +401 -0
  29. mito_ai/completions/models.py +404 -0
  30. mito_ai/completions/prompt_builders/__init__.py +3 -0
  31. mito_ai/completions/prompt_builders/agent_execution_prompt.py +57 -0
  32. mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +160 -0
  33. mito_ai/completions/prompt_builders/agent_system_message.py +472 -0
  34. mito_ai/completions/prompt_builders/chat_name_prompt.py +15 -0
  35. mito_ai/completions/prompt_builders/chat_prompt.py +116 -0
  36. mito_ai/completions/prompt_builders/chat_system_message.py +92 -0
  37. mito_ai/completions/prompt_builders/explain_code_prompt.py +32 -0
  38. mito_ai/completions/prompt_builders/inline_completer_prompt.py +197 -0
  39. mito_ai/completions/prompt_builders/prompt_constants.py +170 -0
  40. mito_ai/completions/prompt_builders/smart_debug_prompt.py +199 -0
  41. mito_ai/completions/prompt_builders/utils.py +84 -0
  42. mito_ai/completions/providers.py +284 -0
  43. mito_ai/constants.py +63 -0
  44. mito_ai/db/__init__.py +3 -0
  45. mito_ai/db/crawlers/__init__.py +6 -0
  46. mito_ai/db/crawlers/base_crawler.py +61 -0
  47. mito_ai/db/crawlers/constants.py +43 -0
  48. mito_ai/db/crawlers/snowflake.py +71 -0
  49. mito_ai/db/handlers.py +168 -0
  50. mito_ai/db/models.py +31 -0
  51. mito_ai/db/urls.py +34 -0
  52. mito_ai/db/utils.py +185 -0
  53. mito_ai/docker/mssql/compose.yml +37 -0
  54. mito_ai/docker/mssql/init/setup.sql +21 -0
  55. mito_ai/docker/mysql/compose.yml +18 -0
  56. mito_ai/docker/mysql/init/setup.sql +13 -0
  57. mito_ai/docker/oracle/compose.yml +17 -0
  58. mito_ai/docker/oracle/init/setup.sql +20 -0
  59. mito_ai/docker/postgres/compose.yml +17 -0
  60. mito_ai/docker/postgres/init/setup.sql +13 -0
  61. mito_ai/enterprise/__init__.py +3 -0
  62. mito_ai/enterprise/utils.py +15 -0
  63. mito_ai/file_uploads/__init__.py +3 -0
  64. mito_ai/file_uploads/handlers.py +248 -0
  65. mito_ai/file_uploads/urls.py +21 -0
  66. mito_ai/gemini_client.py +232 -0
  67. mito_ai/log/handlers.py +38 -0
  68. mito_ai/log/urls.py +21 -0
  69. mito_ai/logger.py +37 -0
  70. mito_ai/openai_client.py +382 -0
  71. mito_ai/path_utils.py +70 -0
  72. mito_ai/rules/handlers.py +44 -0
  73. mito_ai/rules/urls.py +22 -0
  74. mito_ai/rules/utils.py +56 -0
  75. mito_ai/settings/handlers.py +41 -0
  76. mito_ai/settings/urls.py +20 -0
  77. mito_ai/settings/utils.py +42 -0
  78. mito_ai/streamlit_conversion/agent_utils.py +37 -0
  79. mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
  80. mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
  81. mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
  82. mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
  83. mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
  84. mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
  85. mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
  86. mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
  87. mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
  88. mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
  89. mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
  90. mito_ai/streamlit_preview/__init__.py +6 -0
  91. mito_ai/streamlit_preview/handlers.py +111 -0
  92. mito_ai/streamlit_preview/manager.py +152 -0
  93. mito_ai/streamlit_preview/urls.py +22 -0
  94. mito_ai/streamlit_preview/utils.py +29 -0
  95. mito_ai/tests/__init__.py +3 -0
  96. mito_ai/tests/chat_history/test_chat_history.py +211 -0
  97. mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
  98. mito_ai/tests/conftest.py +53 -0
  99. mito_ai/tests/create_agent_system_message_prompt_test.py +22 -0
  100. mito_ai/tests/data/prompt_lg.py +69 -0
  101. mito_ai/tests/data/prompt_sm.py +6 -0
  102. mito_ai/tests/data/prompt_xl.py +13 -0
  103. mito_ai/tests/data/stock_data.sqlite3 +0 -0
  104. mito_ai/tests/db/conftest.py +39 -0
  105. mito_ai/tests/db/connections_test.py +102 -0
  106. mito_ai/tests/db/mssql_test.py +29 -0
  107. mito_ai/tests/db/mysql_test.py +29 -0
  108. mito_ai/tests/db/oracle_test.py +29 -0
  109. mito_ai/tests/db/postgres_test.py +29 -0
  110. mito_ai/tests/db/schema_test.py +93 -0
  111. mito_ai/tests/db/sqlite_test.py +31 -0
  112. mito_ai/tests/db/test_db_constants.py +61 -0
  113. mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
  114. mito_ai/tests/file_uploads/__init__.py +2 -0
  115. mito_ai/tests/file_uploads/test_handlers.py +282 -0
  116. mito_ai/tests/message_history/test_generate_short_chat_name.py +120 -0
  117. mito_ai/tests/message_history/test_message_history_utils.py +469 -0
  118. mito_ai/tests/open_ai_utils_test.py +152 -0
  119. mito_ai/tests/performance_test.py +329 -0
  120. mito_ai/tests/providers/test_anthropic_client.py +447 -0
  121. mito_ai/tests/providers/test_azure.py +631 -0
  122. mito_ai/tests/providers/test_capabilities.py +120 -0
  123. mito_ai/tests/providers/test_gemini_client.py +195 -0
  124. mito_ai/tests/providers/test_mito_server_utils.py +448 -0
  125. mito_ai/tests/providers/test_model_resolution.py +130 -0
  126. mito_ai/tests/providers/test_openai_client.py +57 -0
  127. mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
  128. mito_ai/tests/providers/test_provider_limits.py +42 -0
  129. mito_ai/tests/providers/test_providers.py +382 -0
  130. mito_ai/tests/providers/test_retry_logic.py +389 -0
  131. mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
  132. mito_ai/tests/providers/utils.py +85 -0
  133. mito_ai/tests/rules/conftest.py +26 -0
  134. mito_ai/tests/rules/rules_test.py +117 -0
  135. mito_ai/tests/server_limits_test.py +406 -0
  136. mito_ai/tests/settings/conftest.py +26 -0
  137. mito_ai/tests/settings/settings_test.py +70 -0
  138. mito_ai/tests/settings/test_settings_constants.py +9 -0
  139. mito_ai/tests/streamlit_conversion/__init__.py +3 -0
  140. mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
  141. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
  142. mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
  143. mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
  144. mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
  145. mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
  146. mito_ai/tests/test_constants.py +47 -0
  147. mito_ai/tests/test_telemetry.py +12 -0
  148. mito_ai/tests/user/__init__.py +2 -0
  149. mito_ai/tests/user/test_user.py +120 -0
  150. mito_ai/tests/utils/__init__.py +3 -0
  151. mito_ai/tests/utils/test_anthropic_utils.py +162 -0
  152. mito_ai/tests/utils/test_gemini_utils.py +98 -0
  153. mito_ai/tests/version_check_test.py +169 -0
  154. mito_ai/user/handlers.py +45 -0
  155. mito_ai/user/urls.py +21 -0
  156. mito_ai/utils/__init__.py +3 -0
  157. mito_ai/utils/anthropic_utils.py +168 -0
  158. mito_ai/utils/create.py +94 -0
  159. mito_ai/utils/db.py +74 -0
  160. mito_ai/utils/error_classes.py +42 -0
  161. mito_ai/utils/gemini_utils.py +133 -0
  162. mito_ai/utils/message_history_utils.py +87 -0
  163. mito_ai/utils/mito_server_utils.py +242 -0
  164. mito_ai/utils/open_ai_utils.py +200 -0
  165. mito_ai/utils/provider_utils.py +49 -0
  166. mito_ai/utils/schema.py +86 -0
  167. mito_ai/utils/server_limits.py +152 -0
  168. mito_ai/utils/telemetry_utils.py +480 -0
  169. mito_ai/utils/utils.py +89 -0
  170. mito_ai/utils/version_utils.py +94 -0
  171. mito_ai/utils/websocket_base.py +88 -0
  172. mito_ai/version_check.py +60 -0
  173. mito_ai-0.1.50.data/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +7 -0
  174. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/build_log.json +728 -0
  175. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/package.json +243 -0
  176. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +238 -0
  177. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +37 -0
  178. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +21602 -0
  179. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
  180. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
  181. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
  182. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js +619 -0
  183. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js.map +1 -0
  184. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style.js +4 -0
  185. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +712 -0
  186. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
  187. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
  188. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
  189. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
  190. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
  191. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
  192. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
  193. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
  194. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
  195. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
  196. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
  197. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2792 -0
  198. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
  199. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +4859 -0
  200. mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +1 -0
  201. mito_ai-0.1.50.dist-info/METADATA +221 -0
  202. mito_ai-0.1.50.dist-info/RECORD +205 -0
  203. mito_ai-0.1.50.dist-info/WHEEL +4 -0
  204. mito_ai-0.1.50.dist-info/entry_points.txt +2 -0
  205. mito_ai-0.1.50.dist-info/licenses/LICENSE +3 -0
@@ -0,0 +1,329 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ # # Copyright (c) Saga Inc.
5
+ # # Distributed under the terms of the GNU Affero General Public License v3.0 License.
6
+
7
+ # """
8
+ # This test will not run in CI, but you can run it locally with:
9
+ # python -m pytest mito_ai/tests/performance_test.py -v -s
10
+
11
+ # Note that you will also need to edit open_ai_utils.py,
12
+ # specifically the `is_running_test` condition. I would recommend
13
+ # copying the code from the else condition into the if condition.
14
+ # """
15
+
16
+ # import time
17
+ # import os
18
+ # import pytest
19
+ # import openai
20
+ # from typing import Generator, List, Dict, Any, Tuple, Optional, cast
21
+
22
+ # from openai.types.chat import ChatCompletionMessageParam
23
+ # from mito_ai.providers import OpenAIProvider
24
+ # from mito_ai.completions.models import MessageType
25
+ # from mito_ai.tests.data.prompt_sm import prompt_sm
26
+ # from mito_ai.tests.data.prompt_lg import prompt_lg
27
+ # from mito_ai.tests.data.prompt_xl import prompt_xl
28
+
29
+ # TEST_MODEL = "o3-mini"
30
+ # NUM_ITERATIONS = 10 # Number of requests to make for each test
31
+
32
+ # # Environment variable to control whether performance tests run in CI
33
+ # IS_CI = os.environ.get("CI", "false").lower() == "true"
34
+
35
+ # # Test messages for all performance tests
36
+ # SMALL_TEST_MESSAGE: List[ChatCompletionMessageParam] = [
37
+ # {"role": "user", "content": prompt_sm}
38
+ # ]
39
+ # LARGE_TEST_MESSAGE: List[ChatCompletionMessageParam] = [
40
+ # cast(
41
+ # ChatCompletionMessageParam,
42
+ # {"role": prompt_lg[i]["role"], "content": prompt_lg[i]["content"]},
43
+ # )
44
+ # for i in range(len(prompt_lg))
45
+ # ]
46
+ # XL_TEST_MESSAGE: List[ChatCompletionMessageParam] = [
47
+ # cast(
48
+ # ChatCompletionMessageParam,
49
+ # {"role": prompt_xl[i]["role"], "content": prompt_xl[i]["content"]},
50
+ # )
51
+ # for i in range(len(prompt_xl))
52
+ # ]
53
+
54
+ # # Dictionary to store performance metrics for all tests
55
+ # ALL_METRICS: Dict[str, Dict[str, Any]] = {}
56
+
57
+
58
+ # async def run_llm_requests(
59
+ # llm: OpenAIProvider,
60
+ # messages: List[ChatCompletionMessageParam],
61
+ # n: int = NUM_ITERATIONS,
62
+ # ) -> Tuple[List[Optional[str]], Dict[str, Any]]:
63
+ # """
64
+ # Run LLM requests n times and collect performance metrics.
65
+
66
+ # Args:
67
+ # llm: The OpenAIProvider instance to use
68
+ # messages: The messages to send to the LLM
69
+ # n: Number of requests to make (default: NUM_ITERATIONS)
70
+
71
+ # Returns:
72
+ # Tuple containing:
73
+ # - List of completion responses
74
+ # - Dictionary with performance metrics (min, max, avg latency)
75
+ # """
76
+ # completions: List[Optional[str]] = []
77
+ # latencies: List[int] = []
78
+ # errors: List[str] = []
79
+
80
+ # for i in range(n):
81
+ # try:
82
+ # start_time = time.time()
83
+ # completion = await llm.request_completions(
84
+ # message_type=MessageType.CHAT, messages=messages, model=TEST_MODEL, thread_id=None
85
+ # )
86
+ # end_time = time.time()
87
+
88
+ # latency_ms = round((end_time - start_time) * 1000)
89
+ # latencies.append(latency_ms)
90
+ # completions.append(completion)
91
+
92
+ # print(f"Request {i+1}/{n} latency: {latency_ms} ms")
93
+ # except openai.APIError as e:
94
+ # error_msg = f"Request {i+1}/{n} failed: {str(e)}"
95
+ # errors.append(str(e))
96
+ # print(error_msg)
97
+ # completions.append(None) # Add None for failed completions
98
+ # except Exception as e:
99
+ # error_msg = f"Request {i+1}/{n} failed: {str(e)}"
100
+ # errors.append(str(e))
101
+ # print(error_msg)
102
+ # completions.append(None) # Add None for failed completions
103
+
104
+ # # Calculate stats on successful requests
105
+ # successful_requests = len(latencies)
106
+ # failed_requests = len(errors)
107
+
108
+ # metrics: Dict[str, Any] = {
109
+ # "total_requests": n,
110
+ # "successful_requests": successful_requests,
111
+ # "failed_requests": failed_requests,
112
+ # }
113
+
114
+ # if successful_requests > 0:
115
+ # avg_latency = sum(latencies) / successful_requests
116
+ # metrics["avg_latency_ms"] = round(avg_latency)
117
+ # metrics["min_latency_ms"] = min(latencies) if latencies else None
118
+ # metrics["max_latency_ms"] = max(latencies) if latencies else None
119
+ # metrics["all_latencies_ms"] = latencies
120
+ # else:
121
+ # metrics["avg_latency_ms"] = None
122
+ # metrics["min_latency_ms"] = None
123
+ # metrics["max_latency_ms"] = None
124
+ # metrics["all_latencies_ms"] = []
125
+
126
+ # return completions, metrics
127
+
128
+
129
+ # async def run_direct_openai_requests(
130
+ # client: openai.OpenAI,
131
+ # messages: List[ChatCompletionMessageParam],
132
+ # n: int = NUM_ITERATIONS,
133
+ # ) -> Tuple[List[Optional[str]], Dict[str, Any]]:
134
+ # """
135
+ # Run direct OpenAI requests n times and collect performance metrics.
136
+
137
+ # Args:
138
+ # client: The OpenAI client instance
139
+ # messages: The messages to send to the API
140
+ # n: Number of requests to make (default: NUM_ITERATIONS)
141
+
142
+ # Returns:
143
+ # Tuple containing:
144
+ # - List of completion responses
145
+ # - Dictionary with performance metrics (min, max, avg latency)
146
+ # """
147
+ # completions: List[Optional[str]] = []
148
+ # latencies: List[int] = []
149
+ # errors: List[str] = []
150
+
151
+ # for i in range(n):
152
+ # try:
153
+ # start_time = time.time()
154
+ # response = client.chat.completions.create(
155
+ # model=TEST_MODEL, messages=messages
156
+ # )
157
+ # completion = response.choices[0].message.content
158
+ # end_time = time.time()
159
+
160
+ # latency_ms = round((end_time - start_time) * 1000)
161
+ # latencies.append(latency_ms)
162
+ # completions.append(completion)
163
+
164
+ # print(f"Direct OpenAI request {i+1}/{n} latency: {latency_ms} ms")
165
+ # except openai.APIError as e:
166
+ # error_msg = f"Direct OpenAI request {i+1}/{n} failed: {str(e)}"
167
+ # errors.append(str(e))
168
+ # print(error_msg)
169
+ # completions.append(None) # Add None for failed completions
170
+ # except Exception as e:
171
+ # error_msg = f"Direct OpenAI request {i+1}/{n} failed: {str(e)}"
172
+ # errors.append(str(e))
173
+ # print(error_msg)
174
+ # completions.append(None) # Add None for failed completions
175
+
176
+ # # Calculate stats on successful requests
177
+ # successful_requests = len(latencies)
178
+ # failed_requests = len(errors)
179
+
180
+ # metrics: Dict[str, Any] = {
181
+ # "total_requests": n,
182
+ # "successful_requests": successful_requests,
183
+ # "failed_requests": failed_requests,
184
+ # }
185
+
186
+ # if successful_requests > 0:
187
+ # avg_latency = sum(latencies) / successful_requests
188
+ # metrics["avg_latency_ms"] = round(avg_latency)
189
+ # metrics["min_latency_ms"] = min(latencies) if latencies else None
190
+ # metrics["max_latency_ms"] = max(latencies) if latencies else None
191
+ # metrics["all_latencies_ms"] = latencies
192
+ # else:
193
+ # metrics["avg_latency_ms"] = None
194
+ # metrics["min_latency_ms"] = None
195
+ # metrics["max_latency_ms"] = None
196
+ # metrics["all_latencies_ms"] = []
197
+
198
+ # return completions, metrics
199
+
200
+
201
+ # # This fixture runs after all tests and prints the metrics summary
202
+ # @pytest.fixture(scope="session", autouse=True)
203
+ # def print_metrics_summary() -> Generator[None, Any, None]:
204
+ # """Print a summary of all collected metrics after all tests have run."""
205
+ # yield # This ensures the code below runs after all tests
206
+
207
+ # if ALL_METRICS:
208
+ # print("\n\n" + "=" * 100)
209
+ # print("PERFORMANCE TEST RESULTS SUMMARY")
210
+ # print("=" * 100)
211
+
212
+ # # Print in a table format
213
+ # headers = [
214
+ # "Test",
215
+ # "Success/Total",
216
+ # "Success %",
217
+ # "Avg Latency (ms)",
218
+ # "Min Latency (ms)",
219
+ # "Max Latency (ms)",
220
+ # ]
221
+ # row_format = "{:<25} {:<15} {:<10} {:<18} {:<18} {:<18}"
222
+
223
+ # print(row_format.format(*headers))
224
+ # print("-" * 100)
225
+
226
+ # for test_name, metrics in ALL_METRICS.items():
227
+ # success_rate = "N/A"
228
+ # if "total_requests" in metrics and metrics["total_requests"] > 0:
229
+ # success_rate = f"{(metrics.get('successful_requests', 0) / metrics['total_requests']) * 100:.1f}%"
230
+
231
+ # success_total = f"{metrics.get('successful_requests', 'N/A')}/{metrics.get('total_requests', 'N/A')}"
232
+
233
+ # row = [
234
+ # test_name,
235
+ # success_total,
236
+ # success_rate,
237
+ # metrics.get("avg_latency_ms", "N/A"),
238
+ # metrics.get("min_latency_ms", "N/A"),
239
+ # metrics.get("max_latency_ms", "N/A"),
240
+ # ]
241
+ # print(row_format.format(*row))
242
+
243
+ # print("=" * 100)
244
+
245
+
246
+ # @pytest.mark.skipif(IS_CI, reason="Performance tests are skipped in CI environments")
247
+ # @pytest.mark.asyncio
248
+ # async def test_server_key_performance() -> None:
249
+ # """Test the performance of the OpenAI provider when using the server key."""
250
+ # # Save the original API key if it exists
251
+ # original_api_key = os.environ.get("OPENAI_API_KEY", "")
252
+
253
+ # try:
254
+ # # Ensure we're using the server key by clearing the API key in environment
255
+ # os.environ["OPENAI_API_KEY"] = ""
256
+
257
+ # # Initialize the provider
258
+ # llm = OpenAIProvider()
259
+
260
+ # print("\nRunning small prompt")
261
+ # completions_sm, metrics_sm = await run_llm_requests(llm, SMALL_TEST_MESSAGE)
262
+ # ALL_METRICS["Server Key (sm prompt)"] = metrics_sm
263
+
264
+ # print("\nRunning large prompt")
265
+ # completions_lg, metrics_lg = await run_llm_requests(llm, LARGE_TEST_MESSAGE)
266
+ # ALL_METRICS["Server Key (lg prompt)"] = metrics_lg
267
+
268
+ # print("\nRunning xl prompt")
269
+ # completions_xl, metrics_xl = await run_llm_requests(llm, XL_TEST_MESSAGE)
270
+ # ALL_METRICS["Server Key (xl prompt)"] = metrics_xl
271
+ # finally:
272
+ # # Restore the original API key
273
+ # os.environ["OPENAI_API_KEY"] = original_api_key
274
+
275
+
276
+ # @pytest.mark.skipif(IS_CI, reason="Performance tests are skipped in CI environments")
277
+ # @pytest.mark.asyncio
278
+ # async def test_user_key_performance() -> None:
279
+ # """Test the performance of the OpenAI provider when using a user key."""
280
+ # # Skip test if no API key is available
281
+ # api_key = os.environ.get("OPENAI_API_KEY")
282
+ # if not api_key:
283
+ # pytest.skip("No OpenAI API key available in environment variables")
284
+
285
+ # # Initialize the provider (will use the API key from environment)
286
+ # llm = OpenAIProvider()
287
+
288
+ # print("\nRunning sm prompt")
289
+ # completions_sm, metrics_sm = await run_llm_requests(llm, SMALL_TEST_MESSAGE)
290
+ # ALL_METRICS["User Key (sm prompt)"] = metrics_sm
291
+
292
+ # print("\nRunning lg prompt")
293
+ # completions_lg, metrics_lg = await run_llm_requests(llm, LARGE_TEST_MESSAGE)
294
+ # ALL_METRICS["User Key (lg prompt)"] = metrics_lg
295
+
296
+ # print("\nRunning xl prompt")
297
+ # completions_xl, metrics_xl = await run_llm_requests(llm, XL_TEST_MESSAGE)
298
+ # ALL_METRICS["User Key (xl prompt)"] = metrics_xl
299
+
300
+
301
+ # @pytest.mark.skipif(IS_CI, reason="Performance tests are skipped in CI environments")
302
+ # @pytest.mark.asyncio
303
+ # async def test_direct_openai_performance() -> None:
304
+ # """Test the performance of direct OpenAI API calls (control group)."""
305
+ # # Skip test if no API key is available
306
+ # api_key = os.environ.get("OPENAI_API_KEY")
307
+ # if not api_key:
308
+ # pytest.skip("No OpenAI API key available in environment variables")
309
+
310
+ # # Initialize the OpenAI client directly
311
+ # client = openai.OpenAI(api_key=api_key)
312
+
313
+ # print("\nRunning sm prompt")
314
+ # completions_sm, metrics_sm = await run_direct_openai_requests(
315
+ # client, SMALL_TEST_MESSAGE
316
+ # )
317
+ # ALL_METRICS["Direct OpenAI (sm prompt)"] = metrics_sm
318
+
319
+ # print("\nRunning lg prompt")
320
+ # completions_lg, metrics_lg = await run_direct_openai_requests(
321
+ # client, LARGE_TEST_MESSAGE
322
+ # )
323
+ # ALL_METRICS["Direct OpenAI (lg prompt)"] = metrics_lg
324
+
325
+ # print("\nRunning xl prompt")
326
+ # completions_xl, metrics_xl = await run_direct_openai_requests(
327
+ # client, XL_TEST_MESSAGE
328
+ # )
329
+ # ALL_METRICS["Direct OpenAI (xl prompt)"] = metrics_xl