mito-ai 0.1.35__py3-none-any.whl → 0.1.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mito-ai might be problematic. Click here for more details.

Files changed (54) hide show
  1. mito_ai/__init__.py +6 -4
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +3 -10
  4. mito_ai/app_builder/handlers.py +89 -11
  5. mito_ai/app_builder/models.py +3 -0
  6. mito_ai/auth/README.md +18 -0
  7. mito_ai/auth/__init__.py +6 -0
  8. mito_ai/auth/handlers.py +96 -0
  9. mito_ai/auth/urls.py +13 -0
  10. mito_ai/completions/completion_handlers/chat_completion_handler.py +2 -2
  11. mito_ai/completions/models.py +7 -6
  12. mito_ai/completions/prompt_builders/agent_execution_prompt.py +8 -3
  13. mito_ai/completions/prompt_builders/agent_system_message.py +21 -7
  14. mito_ai/completions/prompt_builders/chat_prompt.py +18 -11
  15. mito_ai/completions/prompt_builders/utils.py +53 -10
  16. mito_ai/constants.py +11 -1
  17. mito_ai/streamlit_conversion/streamlit_agent_handler.py +112 -0
  18. mito_ai/streamlit_conversion/streamlit_system_prompt.py +42 -0
  19. mito_ai/streamlit_conversion/streamlit_utils.py +96 -0
  20. mito_ai/streamlit_conversion/validate_and_run_streamlit_code.py +207 -0
  21. mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
  22. mito_ai/tests/streamlit_conversion/__init__.py +3 -0
  23. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +265 -0
  24. mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +197 -0
  25. mito_ai/tests/streamlit_conversion/test_validate_and_run_streamlit_code.py +418 -0
  26. mito_ai/tests/test_constants.py +18 -3
  27. mito_ai/utils/anthropic_utils.py +18 -70
  28. mito_ai/utils/gemini_utils.py +22 -73
  29. mito_ai/utils/mito_server_utils.py +147 -4
  30. mito_ai/utils/open_ai_utils.py +18 -107
  31. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +100 -100
  32. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  33. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  34. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a20772bc113422d0f505.js → mito_ai-0.1.37.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.831f63b48760c7119b9b.js +1165 -539
  35. mito_ai-0.1.37.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.831f63b48760c7119b9b.js.map +1 -0
  36. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.d2eea6519fa332d79efb.js → mito_ai-0.1.37.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.93ecc9bc0edba61535cc.js +18 -14
  37. mito_ai-0.1.37.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.93ecc9bc0edba61535cc.js.map +1 -0
  38. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.76efcc5c3be4056457ee.js → mito_ai-0.1.37.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +6 -2
  39. mito_ai-0.1.37.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
  40. {mito_ai-0.1.35.dist-info → mito_ai-0.1.37.dist-info}/METADATA +1 -1
  41. {mito_ai-0.1.35.dist-info → mito_ai-0.1.37.dist-info}/RECORD +51 -38
  42. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a20772bc113422d0f505.js.map +0 -1
  43. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.d2eea6519fa332d79efb.js.map +0 -1
  44. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.76efcc5c3be4056457ee.js.map +0 -1
  45. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  46. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  47. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  48. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js +0 -0
  49. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -0
  50. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  51. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  52. {mito_ai-0.1.35.dist-info → mito_ai-0.1.37.dist-info}/WHEEL +0 -0
  53. {mito_ai-0.1.35.dist-info → mito_ai-0.1.37.dist-info}/entry_points.txt +0 -0
  54. {mito_ai-0.1.35.dist-info → mito_ai-0.1.37.dist-info}/licenses/LICENSE +0 -0
@@ -1,12 +1,13 @@
1
1
  # Copyright (c) Saga Inc.
2
2
  # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
3
 
4
- from mito_ai.completions.models import MessageType
4
+ import asyncio
5
+ import json
6
+ import time
7
+ from typing import Any, Dict, Optional, Callable, Union, AsyncGenerator
8
+ from mito_ai.completions.models import MessageType, CompletionReply, CompletionStreamChunk, CompletionItem
5
9
  from mito_ai.utils.server_limits import check_mito_server_quota, update_mito_server_quota
6
10
  from tornado.httpclient import HTTPResponse
7
- import time
8
- import json
9
- from typing import Any, Dict, Optional
10
11
  from mito_ai.constants import MITO_GEMINI_URL
11
12
  from mito_ai.utils.utils import _create_http_client
12
13
 
@@ -88,5 +89,147 @@ async def get_response_from_mito_server(
88
89
  pass
89
90
 
90
91
  http_client.close()
92
+
93
+
94
+ async def stream_response_from_mito_server(
95
+ url: str,
96
+ headers: Dict[str, str],
97
+ data: Dict[str, Any],
98
+ timeout: int,
99
+ max_retries: int,
100
+ message_type: MessageType,
101
+ reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None],
102
+ message_id: str,
103
+ chunk_processor: Optional[Callable[[str], str]] = None,
104
+ provider_name: str = "Mito Server",
105
+ ) -> AsyncGenerator[str, None]:
106
+ """
107
+ Stream responses from the Mito server.
108
+
109
+ This is a unified streaming function that can be used by all providers (OpenAI, Anthropic, Gemini).
110
+
111
+ Args:
112
+ url: The Mito server URL to stream from
113
+ headers: Request headers
114
+ data: Request data
115
+ timeout: Request timeout in seconds
116
+ max_retries: Maximum number of retries
117
+ message_type: The message type for quota tracking
118
+ provider_name: Name of the provider for error messages
119
+ reply_fn: Optional function to call with each chunk for streaming replies
120
+ message_id: The message ID to track the request
121
+ chunk_processor: Optional function to process chunks before yielding (e.g., for Gemini's special processing)
122
+
123
+ Yields:
124
+ Chunks of text from the streaming response
125
+ """
126
+ # Check the mito server quota
127
+ check_mito_server_quota(message_type)
128
+
129
+ # Create HTTP client with appropriate timeout settings
130
+ http_client, http_client_timeout = _create_http_client(timeout, max_retries)
131
+
132
+ # Set up streaming infrastructure
133
+ start_time = time.time()
134
+ chunk_queue: asyncio.Queue[str] = asyncio.Queue()
135
+ fetch_complete = False
136
+
137
+ # Define a callback to process chunks and add them to the queue
138
+ def chunk_callback(chunk: bytes) -> None:
139
+ try:
140
+ chunk_str = chunk.decode('utf-8')
141
+ asyncio.create_task(chunk_queue.put(chunk_str))
142
+ except Exception as e:
143
+ print(f"Error processing {provider_name} streaming chunk: {str(e)}")
144
+
145
+ # Execute the streaming request
146
+ fetch_future = None
147
+ try:
148
+ fetch_future = http_client.fetch(
149
+ url,
150
+ method="POST",
151
+ headers=headers,
152
+ body=json.dumps(data),
153
+ request_timeout=http_client_timeout,
154
+ streaming_callback=chunk_callback
155
+ )
156
+
157
+ # Create a task to wait for the fetch to complete
158
+ async def wait_for_fetch() -> None:
159
+ try:
160
+ await fetch_future
161
+ nonlocal fetch_complete
162
+ fetch_complete = True
163
+ print(f"{provider_name} fetch completed")
164
+ except Exception as e:
165
+ print(f"Error in {provider_name} fetch: {str(e)}")
166
+ raise
167
+
168
+ # Start the task to wait for fetch completion
169
+ fetch_task = asyncio.create_task(wait_for_fetch())
170
+
171
+ # Yield chunks as they arrive
172
+ while not (fetch_complete and chunk_queue.empty()):
173
+ try:
174
+ # Wait for a chunk with a timeout to prevent deadlocks
175
+ chunk = await asyncio.wait_for(chunk_queue.get(), timeout=0.1)
176
+
177
+ # Process chunk if processor is provided
178
+ processed_chunk = chunk
179
+ if chunk_processor:
180
+ processed_chunk = chunk_processor(chunk)
181
+
182
+ if reply_fn is not None and message_id is not None:
183
+ # Send the chunk directly to the frontend
184
+ reply_fn(CompletionStreamChunk(
185
+ parent_id=message_id,
186
+ chunk=CompletionItem(
187
+ content=processed_chunk,
188
+ isIncomplete=True,
189
+ token=message_id,
190
+ ),
191
+ done=False,
192
+ ))
193
+
194
+ yield chunk
195
+ except asyncio.TimeoutError:
196
+ # No chunk available within timeout, check if fetch is complete
197
+ if fetch_complete and chunk_queue.empty():
198
+ break
199
+
200
+ # Otherwise continue waiting for chunks
201
+ continue
202
+
203
+ print(f"\n{provider_name} stream completed in {time.time() - start_time:.2f} seconds")
204
+
205
+ if reply_fn is not None and message_id is not None:
206
+ # Send a final chunk to indicate completion
207
+ reply_fn(CompletionStreamChunk(
208
+ parent_id=message_id,
209
+ chunk=CompletionItem(
210
+ content="",
211
+ isIncomplete=False,
212
+ token=message_id,
213
+ ),
214
+ done=True,
215
+ ))
216
+ except Exception as e:
217
+ print(f"\n{provider_name} stream failed after {time.time() - start_time:.2f} seconds with error: {str(e)}")
218
+ # If an exception occurred, ensure the fetch future is awaited to properly clean up
219
+ if fetch_future:
220
+ try:
221
+ await fetch_future
222
+ except Exception:
223
+ pass
224
+ raise
225
+ finally:
226
+ # Clean up resources
227
+ try:
228
+ # We always update the quota, even if there is an error
229
+ update_mito_server_quota(message_type)
230
+ except Exception as e:
231
+ pass
232
+
233
+ http_client.close()
91
234
 
92
235
 
@@ -10,7 +10,7 @@ import asyncio
10
10
  import json
11
11
  import time
12
12
  from typing import Any, Dict, List, Optional, Final, Union, AsyncGenerator, Tuple, Callable
13
- from mito_ai.utils.mito_server_utils import get_response_from_mito_server
13
+ from mito_ai.utils.mito_server_utils import get_response_from_mito_server, stream_response_from_mito_server
14
14
  from mito_ai.utils.provider_utils import does_message_require_fast_model
15
15
  from tornado.httpclient import AsyncHTTPClient
16
16
  from openai.types.chat import ChatCompletionMessageParam
@@ -106,8 +106,8 @@ async def stream_ai_completion_from_mito_server(
106
106
  timeout: int,
107
107
  max_retries: int,
108
108
  message_type: MessageType,
109
- reply_fn: Optional[Callable[[Union[CompletionReply, CompletionStreamChunk]], None]] = None,
110
- message_id: Optional[str] = None,
109
+ reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None],
110
+ message_id: str,
111
111
  ) -> AsyncGenerator[str, None]:
112
112
  """
113
113
  Stream AI completions from the Mito server.
@@ -127,7 +127,7 @@ async def stream_ai_completion_from_mito_server(
127
127
  Yields:
128
128
  Chunks of text from the streaming response
129
129
  """
130
- # ===== STEP 1: Prepare request data and headers =====
130
+ # Prepare request data and headers
131
131
  data, headers = _prepare_request_data_and_headers(
132
132
  last_message_content,
133
133
  ai_completion_data,
@@ -136,109 +136,20 @@ async def stream_ai_completion_from_mito_server(
136
136
  message_type
137
137
  )
138
138
 
139
- # ===== STEP 2: Create HTTP client with appropriate timeout settings =====
140
- http_client, http_client_timeout = _create_http_client(timeout, max_retries)
141
-
142
- # ===== STEP 3: Set up streaming infrastructure =====
143
- start_time = time.time()
144
- chunk_queue: asyncio.Queue[str] = asyncio.Queue()
145
- fetch_complete = False
146
-
147
- # Define a callback to process chunks and add them to the queue
148
- def chunk_callback(chunk: bytes) -> None:
149
- try:
150
- chunk_str = chunk.decode('utf-8')
151
- asyncio.create_task(chunk_queue.put(chunk_str))
152
- except Exception as e:
153
- print(f"Error processing streaming chunk: {str(e)}")
154
-
155
- # ===== STEP 4: Execute the streaming request =====
156
- fetch_future = None
157
- try:
158
- # Use fetch with streaming_callback to handle streaming responses.
159
- # The streaming_callback is not sent as part of the POST request.
160
- # It's a parameter for the Tornado AsyncHTTPClient.fetch() method that specifies
161
- # how to handle incoming data chunks as they arrive from the server.
162
- # When the server sends data in chunks, this callback function is called each time
163
- # a new chunk arrives, allowing for immediate processing without waiting for the
164
- # entire response to complete.
165
- fetch_future = http_client.fetch(
166
- MITO_OPENAI_URL,
167
- method="POST",
168
- headers=headers,
169
- body=json.dumps(data),
170
- request_timeout=http_client_timeout,
171
- streaming_callback=chunk_callback
172
- )
173
-
174
- # Create a task to wait for the fetch to complete
175
- async def wait_for_fetch() -> None:
176
- try:
177
- await fetch_future
178
- nonlocal fetch_complete
179
- fetch_complete = True
180
- print("Fetch completed")
181
- except Exception as e:
182
- print(f"Error in fetch: {str(e)}")
183
- raise
184
-
185
- # Start the task to wait for fetch completion
186
- fetch_task = asyncio.create_task(wait_for_fetch())
187
-
188
- # ===== STEP 5: Yield chunks as they arrive =====
189
- while not (fetch_complete and chunk_queue.empty()):
190
- try:
191
- # Wait for a chunk with a timeout. By setting the timeout, we 1. prevent deadlocks
192
- # which could happen if fetch_complete has not been set to true yet, and 2. it enables
193
- # periodic checking if the queue has a new chunk.
194
- chunk = await asyncio.wait_for(chunk_queue.get(), timeout=0.1)
195
-
196
- # If reply_fn is provided, send the chunk directly to the frontend
197
- if reply_fn and message_id:
198
- reply_fn(CompletionStreamChunk(
199
- parent_id=message_id,
200
- chunk=CompletionItem(
201
- content=chunk,
202
- isIncomplete=True,
203
- token=message_id,
204
- ),
205
- done=False,
206
- ))
207
-
208
- yield chunk
209
- except asyncio.TimeoutError:
210
- # No chunk available within timeout, check if fetch is complete
211
- if fetch_complete and chunk_queue.empty():
212
- break
213
-
214
- # Otherwise continue waiting for chunks
215
- continue
216
-
217
- print(f"\nStream completed in {time.time() - start_time:.2f} seconds")
218
-
219
- # Send a final chunk to indicate completion if reply_fn is provided
220
- if reply_fn and message_id:
221
- reply_fn(CompletionStreamChunk(
222
- parent_id=message_id,
223
- chunk=CompletionItem(
224
- content="",
225
- isIncomplete=False,
226
- token=message_id,
227
- ),
228
- done=True,
229
- ))
230
- except Exception as e:
231
- print(f"\nStream failed after {time.time() - start_time:.2f} seconds with error: {str(e)}")
232
- # If an exception occurred, ensure the fetch future is awaited to properly clean up
233
- if fetch_future:
234
- try:
235
- await fetch_future
236
- except Exception:
237
- pass
238
- raise
239
- finally:
240
- # ===== STEP 6: Clean up resources =====
241
- http_client.close()
139
+ # Use the unified streaming function
140
+ async for chunk in stream_response_from_mito_server(
141
+ url=MITO_OPENAI_URL,
142
+ headers=headers,
143
+ data=data,
144
+ timeout=timeout,
145
+ max_retries=max_retries,
146
+ message_type=message_type,
147
+ reply_fn=reply_fn,
148
+ message_id=message_id,
149
+ chunk_processor=None,
150
+ provider_name="OpenAI",
151
+ ):
152
+ yield chunk
242
153
 
243
154
 
244
155
  def get_open_ai_completion_function_params(