mito-ai 0.1.42__py3-none-any.whl → 0.1.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mito-ai might be problematic. Click here for more details.

Files changed (46) hide show
  1. mito_ai/_version.py +1 -1
  2. mito_ai/app_manager/handlers.py +5 -0
  3. mito_ai/app_manager/models.py +1 -2
  4. mito_ai/app_manager/utils.py +24 -0
  5. mito_ai/completions/completion_handlers/agent_execution_handler.py +1 -1
  6. mito_ai/completions/completion_handlers/chat_completion_handler.py +2 -2
  7. mito_ai/completions/completion_handlers/utils.py +27 -5
  8. mito_ai/completions/models.py +0 -2
  9. mito_ai/completions/prompt_builders/utils.py +1 -2
  10. mito_ai/file_uploads/handlers.py +49 -26
  11. mito_ai/tests/completions/completion_handlers_utils_test.py +156 -17
  12. mito_ai/tests/file_uploads/test_handlers.py +15 -0
  13. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +1 -1
  14. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  15. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  16. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a9a35b6fcc54a7bcb32c.js → mito_ai-0.1.43.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.81703ac2bc645e5c2fc2.js +457 -143
  17. mito_ai-0.1.43.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.81703ac2bc645e5c2fc2.js.map +1 -0
  18. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.c7d9d8635826165de52e.js → mito_ai-0.1.43.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.502aef26f0416fab7435.js +3 -3
  19. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.c7d9d8635826165de52e.js.map → mito_ai-0.1.43.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.502aef26f0416fab7435.js.map +1 -1
  20. {mito_ai-0.1.42.dist-info → mito_ai-0.1.43.dist-info}/METADATA +1 -1
  21. {mito_ai-0.1.42.dist-info → mito_ai-0.1.43.dist-info}/RECORD +45 -44
  22. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a9a35b6fcc54a7bcb32c.js.map +0 -1
  23. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  24. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  25. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
  26. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
  27. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  28. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +0 -0
  29. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +0 -0
  30. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
  31. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
  32. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
  33. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
  34. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
  35. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
  36. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
  37. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
  38. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
  39. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
  40. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
  41. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
  42. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  43. {mito_ai-0.1.42.data → mito_ai-0.1.43.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  44. {mito_ai-0.1.42.dist-info → mito_ai-0.1.43.dist-info}/WHEEL +0 -0
  45. {mito_ai-0.1.42.dist-info → mito_ai-0.1.43.dist-info}/entry_points.txt +0 -0
  46. {mito_ai-0.1.42.dist-info → mito_ai-0.1.43.dist-info}/licenses/LICENSE +0 -0
mito_ai/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # This file is auto-generated by Hatchling. As such, do not:
2
2
  # - modify
3
3
  # - track in version control e.g. be sure to add to .gitignore
4
- __version__ = VERSION = '0.1.42'
4
+ __version__ = VERSION = '0.1.43'
@@ -17,6 +17,7 @@ from mito_ai.app_manager.models import (
17
17
  )
18
18
  from mito_ai.constants import ACTIVE_STREAMLIT_BASE_URL
19
19
  from mito_ai.logger import get_logger
20
+ from mito_ai.app_manager.utils import convert_utc_to_local_time
20
21
  import requests
21
22
 
22
23
 
@@ -102,6 +103,10 @@ class AppManagerHandler(BaseWebSocketHandler):
102
103
 
103
104
  apps_data = manage_apps_response.json()
104
105
 
106
+ for app in apps_data:
107
+ if 'last_deployed_at' in app:
108
+ app['last_deployed_at'] = convert_utc_to_local_time(app['last_deployed_at'])
109
+
105
110
  # Create successful response
106
111
  reply = ManageAppReply(
107
112
  apps=apps_data,
@@ -1,5 +1,4 @@
1
1
  # Copyright (c) Saga Inc.
2
-
3
2
  # Distributed under the terms of the GNU Affero General Public License v3.0 License.
4
3
 
5
4
  from dataclasses import dataclass, field
@@ -24,7 +23,7 @@ class App:
24
23
  app_name: str
25
24
  url: str
26
25
  status: str
27
- created_at: str
26
+ last_deployed_at: str
28
27
 
29
28
  @dataclass(frozen=True)
30
29
  class AppManagerError:
@@ -0,0 +1,24 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from datetime import datetime, timezone
5
+
6
+ def convert_utc_to_local_time(time_str: str) -> str:
7
+ """Convert UTC time to a user's local time"""
8
+ try:
9
+ # Remove the 'Z' suffix and parse the UTC datetime
10
+ utc_time_str = time_str.rstrip('Z')
11
+ utc_time = datetime.fromisoformat(utc_time_str)
12
+
13
+ # Set timezone to UTC
14
+ utc_time = utc_time.replace(tzinfo=timezone.utc)
15
+
16
+ # Convert to local timezone (system timezone)
17
+ local_time = utc_time.astimezone()
18
+
19
+ # Format as 'MMM DD HH:MM'
20
+ return local_time.strftime('%m-%d-%Y %H:%M')
21
+
22
+ except (ValueError, AttributeError) as e:
23
+ # Return original string if parsing fails
24
+ return time_str
@@ -38,7 +38,7 @@ class AgentExecutionHandler(CompletionHandler[AgentExecutionMetadata]):
38
38
  display_prompt = metadata.input
39
39
 
40
40
  # Add the prompt to the message history
41
- new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.base64EncodedUploadedImage)
41
+ new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.additionalContext)
42
42
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
43
43
 
44
44
  await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
@@ -47,7 +47,7 @@ class ChatCompletionHandler(CompletionHandler[ChatMessageMetadata]):
47
47
  display_prompt = f"```python{metadata.activeCellCode or ''}```{metadata.input}"
48
48
 
49
49
  # Add the prompt to the message history
50
- new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.base64EncodedUploadedImage)
50
+ new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.additionalContext)
51
51
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
52
52
  await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
53
53
 
@@ -110,7 +110,7 @@ class ChatCompletionHandler(CompletionHandler[ChatMessageMetadata]):
110
110
  display_prompt = f"```python{metadata.activeCellCode or ''}```{metadata.input}"
111
111
 
112
112
  # Add the prompt to the message history
113
- new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.base64EncodedUploadedImage)
113
+ new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.additionalContext)
114
114
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
115
115
  await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
116
116
 
@@ -1,6 +1,7 @@
1
1
  # Copyright (c) Saga Inc.
2
2
  # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
3
 
4
+ import base64
4
5
  from typing import Optional, Union, List, Dict, Any, cast
5
6
  from mito_ai.completions.message_history import GlobalMessageHistory
6
7
  from mito_ai.completions.models import ThreadID
@@ -75,16 +76,37 @@ async def append_agent_system_message(
75
76
  )
76
77
 
77
78
 
79
+ def extract_and_encode_images_from_additional_context(
80
+ additional_context: Optional[List[Dict[str, str]]],
81
+ ) -> List[str]:
82
+ encoded_images = []
83
+
84
+ for context in additional_context or []:
85
+ if context["type"].startswith("image/"):
86
+ try:
87
+ with open(context["value"], "rb") as image_file:
88
+ image_data = image_file.read()
89
+ base64_encoded = base64.b64encode(image_data).decode("utf-8")
90
+ encoded_images.append(f"data:{context['type']};base64,{base64_encoded}")
91
+ except (FileNotFoundError, IOError) as e:
92
+ print(f"Error reading image file {context['value']}: {e}")
93
+ continue
94
+
95
+ return encoded_images
96
+
97
+
78
98
  def create_ai_optimized_message(
79
99
  text: str,
80
100
  base64EncodedActiveCellOutput: Optional[str] = None,
81
- base64EncodedUploadedImage: Optional[str] = None,
101
+ additional_context: Optional[List[Dict[str, str]]] = None,
82
102
  ) -> ChatCompletionMessageParam:
83
103
 
84
104
  message_content: Union[str, List[Dict[str, Any]]]
85
- has_uploaded_image = (
86
- base64EncodedUploadedImage is not None and base64EncodedUploadedImage != ""
105
+ encoded_images = extract_and_encode_images_from_additional_context(
106
+ additional_context
87
107
  )
108
+
109
+ has_uploaded_image = len(encoded_images) > 0
88
110
  has_active_cell_output = (
89
111
  base64EncodedActiveCellOutput is not None
90
112
  and base64EncodedActiveCellOutput != ""
@@ -98,12 +120,12 @@ def create_ai_optimized_message(
98
120
  }
99
121
  ]
100
122
 
101
- if has_uploaded_image:
123
+ for img in encoded_images:
102
124
  message_content.append(
103
125
  {
104
126
  "type": "image_url",
105
127
  "image_url": {
106
- "url": f"data:image/png;base64,{base64EncodedUploadedImage}"
128
+ "url": img
107
129
  },
108
130
  }
109
131
  )
@@ -83,7 +83,6 @@ class ChatMessageMetadata():
83
83
  variables: Optional[List[str]] = None
84
84
  files: Optional[List[str]] = None
85
85
  base64EncodedActiveCellOutput: Optional[str] = None
86
- base64EncodedUploadedImage: Optional[str] = None
87
86
  index: Optional[int] = None
88
87
  stream: bool = False
89
88
  additionalContext: Optional[List[Dict[str, str]]] = None
@@ -97,7 +96,6 @@ class AgentExecutionMetadata():
97
96
  aiOptimizedCells: List[AIOptimizedCell]
98
97
  isChromeBrowser: bool
99
98
  base64EncodedActiveCellOutput: Optional[str] = None
100
- base64EncodedUploadedImage: Optional[str] = None
101
99
  variables: Optional[List[str]] = None
102
100
  files: Optional[List[str]] = None
103
101
  index: Optional[int] = None
@@ -38,7 +38,7 @@ def get_selected_context_str(additional_context: Optional[List[Dict[str, str]]])
38
38
  selected_variables = [context["value"] for context in additional_context if context.get("type") == "variable"]
39
39
  selected_files = [context["value"] for context in additional_context if context.get("type") == "file"]
40
40
  selected_db_connections = [context["value"] for context in additional_context if context.get("type") == "db"]
41
- selected_images = [context["value"] for context in additional_context if context.get("type") == "img"]
41
+ selected_images = [context["value"] for context in additional_context if context.get("type", "").startswith("image/")]
42
42
 
43
43
  # STEP 2: Create a list of strings (instructions) for each context type
44
44
  context_parts = []
@@ -68,5 +68,4 @@ def get_selected_context_str(additional_context: Optional[List[Dict[str, str]]])
68
68
  )
69
69
 
70
70
  # STEP 3: Combine into a single string
71
-
72
71
  return "\n\n".join(context_parts)
@@ -6,7 +6,38 @@ import tempfile
6
6
  import tornado
7
7
  from typing import Dict, Any
8
8
  from jupyter_server.base.handlers import APIHandler
9
- from mito_ai.utils.telemetry_utils import log_file_upload_attempt, log_file_upload_failure
9
+ from mito_ai.utils.telemetry_utils import (
10
+ log_file_upload_attempt,
11
+ log_file_upload_failure,
12
+ )
13
+
14
+ MAX_IMAGE_SIZE_MB = 3
15
+
16
+
17
+ def _is_image_file(filename: str) -> bool:
18
+ image_extensions = {
19
+ ".jpg",
20
+ ".jpeg",
21
+ ".png",
22
+ ".gif",
23
+ ".bmp",
24
+ ".tiff",
25
+ ".tif",
26
+ ".webp",
27
+ ".svg",
28
+ }
29
+ file_extension = os.path.splitext(filename)[1].lower()
30
+ return file_extension in image_extensions
31
+
32
+
33
+ def _check_image_size_limit(file_data: bytes, filename: str) -> None:
34
+ if not _is_image_file(filename):
35
+ return
36
+
37
+ file_size_mb = len(file_data) / (1024 * 1024) # Convert bytes to MB
38
+
39
+ if file_size_mb > MAX_IMAGE_SIZE_MB:
40
+ raise ValueError(f"Image exceeded {MAX_IMAGE_SIZE_MB}MB limit.")
10
41
 
11
42
 
12
43
  class FileUploadHandler(APIHandler):
@@ -50,7 +81,7 @@ class FileUploadHandler(APIHandler):
50
81
  self.finish()
51
82
 
52
83
  except Exception as e:
53
- self._handle_error(f"Failed to save file: {str(e)}")
84
+ self._handle_error(str(e))
54
85
 
55
86
  def _validate_file_upload(self) -> bool:
56
87
  """Validate that a file was uploaded in the request."""
@@ -90,6 +121,9 @@ class FileUploadHandler(APIHandler):
90
121
  self, filename: str, file_data: bytes, notebook_dir: str
91
122
  ) -> None:
92
123
  """Handle regular (non-chunked) file upload."""
124
+ # Check image file size limit before saving
125
+ _check_image_size_limit(file_data, filename)
126
+
93
127
  file_path = os.path.join(notebook_dir, filename)
94
128
  with open(file_path, "wb") as f:
95
129
  f.write(file_data)
@@ -100,8 +134,6 @@ class FileUploadHandler(APIHandler):
100
134
  self, filename: str, file_data: bytes, chunk_number: int, total_chunks: int
101
135
  ) -> None:
102
136
  """Save a chunk to a temporary file."""
103
- print(f"DEBUG: Saving chunk {chunk_number}/{total_chunks} for file {filename}")
104
-
105
137
  # Initialize temporary directory for this file if it doesn't exist
106
138
  if filename not in self._temp_dirs:
107
139
  temp_dir = tempfile.mkdtemp(prefix=f"mito_upload_{filename}_")
@@ -110,7 +142,6 @@ class FileUploadHandler(APIHandler):
110
142
  "total_chunks": total_chunks,
111
143
  "received_chunks": set(),
112
144
  }
113
- print(f"DEBUG: Created temp dir {temp_dir} for file {filename}")
114
145
 
115
146
  # Save the chunk to the temporary directory
116
147
  chunk_filename = os.path.join(
@@ -121,28 +152,20 @@ class FileUploadHandler(APIHandler):
121
152
 
122
153
  # Mark this chunk as received
123
154
  self._temp_dirs[filename]["received_chunks"].add(chunk_number)
124
- print(
125
- f"DEBUG: Saved chunk {chunk_number}, total received: {len(self._temp_dirs[filename]['received_chunks'])}/{total_chunks}"
126
- )
127
155
 
128
156
  def _are_all_chunks_received(self, filename: str, total_chunks: int) -> bool:
129
157
  """Check if all chunks for a file have been received."""
130
158
  if filename not in self._temp_dirs:
131
- print(f"DEBUG: No temp dir found for {filename}")
132
159
  return False
133
160
 
134
161
  received_chunks = self._temp_dirs[filename]["received_chunks"]
135
162
  is_complete = len(received_chunks) == total_chunks
136
- print(
137
- f"DEBUG: Checking completion for {filename}: {len(received_chunks)}/{total_chunks} chunks received, complete: {is_complete}"
138
- )
139
163
  return is_complete
140
164
 
141
165
  def _reconstruct_file(
142
166
  self, filename: str, total_chunks: int, notebook_dir: str
143
167
  ) -> None:
144
168
  """Reconstruct the final file from all chunks and clean up temporary directory."""
145
- print(f"DEBUG: Starting reconstruction for {filename}")
146
169
 
147
170
  if filename not in self._temp_dirs:
148
171
  raise ValueError(f"No temporary directory found for file: {filename}")
@@ -150,23 +173,23 @@ class FileUploadHandler(APIHandler):
150
173
  temp_dir = self._temp_dirs[filename]["temp_dir"]
151
174
  file_path = os.path.join(notebook_dir, filename)
152
175
 
153
- print(f"DEBUG: Reconstructing from {temp_dir} to {file_path}")
154
-
155
176
  try:
156
- # Reconstruct the file from chunks
177
+ # First, read all chunks to check total file size for images
178
+ all_file_data = b""
179
+ for i in range(1, total_chunks + 1):
180
+ chunk_filename = os.path.join(temp_dir, f"chunk_{i}")
181
+ with open(chunk_filename, "rb") as chunk_file:
182
+ chunk_data = chunk_file.read()
183
+ all_file_data += chunk_data
184
+
185
+ # Check image file size limit before saving
186
+ _check_image_size_limit(all_file_data, filename)
187
+
188
+ # Write the complete file
157
189
  with open(file_path, "wb") as final_file:
158
- for i in range(1, total_chunks + 1):
159
- chunk_filename = os.path.join(temp_dir, f"chunk_{i}")
160
- print(f"DEBUG: Reading chunk {i} from {chunk_filename}")
161
- with open(chunk_filename, "rb") as chunk_file:
162
- chunk_data = chunk_file.read()
163
- final_file.write(chunk_data)
164
- print(f"DEBUG: Wrote {len(chunk_data)} bytes from chunk {i}")
165
-
166
- print(f"DEBUG: Successfully reconstructed {filename}")
190
+ final_file.write(all_file_data)
167
191
  finally:
168
192
  # Clean up the temporary directory
169
- print(f"DEBUG: Cleaning up temp dir for {filename}")
170
193
  self._cleanup_temp_dir(filename)
171
194
 
172
195
  def _cleanup_temp_dir(self, filename: str) -> None:
@@ -1,7 +1,28 @@
1
1
  # Copyright (c) Saga Inc.
2
2
  # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
3
 
4
- from mito_ai.completions.completion_handlers.utils import create_ai_optimized_message
4
+ import base64
5
+ import os
6
+ import tempfile
7
+ from contextlib import contextmanager
8
+ from mito_ai.completions.completion_handlers.utils import (
9
+ create_ai_optimized_message,
10
+ extract_and_encode_images_from_additional_context,
11
+ )
12
+
13
+
14
+ @contextmanager
15
+ def temporary_image_file(suffix=".png", content=b"fake_image_data"):
16
+ """Context manager that creates a temporary image file for testing."""
17
+ with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as temp_file:
18
+ temp_file.write(content)
19
+ temp_file_path = temp_file.name
20
+
21
+ try:
22
+ yield temp_file_path
23
+ finally:
24
+ # Clean up the temporary file
25
+ os.unlink(temp_file_path)
5
26
 
6
27
 
7
28
  def test_text_only_message():
@@ -14,14 +35,41 @@ def test_text_only_message():
14
35
 
15
36
  def test_message_with_uploaded_image():
16
37
  """Test scenario where the user uploads an image"""
17
- result = create_ai_optimized_message(
18
- text="Analyze this", base64EncodedUploadedImage="image_data"
19
- )
38
+ with temporary_image_file() as temp_file_path:
39
+ result = create_ai_optimized_message(
40
+ text="Analyze this",
41
+ additional_context=[{"type": "image/png", "value": temp_file_path}],
42
+ )
43
+
44
+ assert result["role"] == "user"
45
+ assert isinstance(result["content"], list)
46
+ assert result["content"][0]["type"] == "text"
47
+ assert result["content"][1]["type"] == "image_url"
20
48
 
21
- assert result["role"] == "user"
22
- assert isinstance(result["content"], list)
23
- assert result["content"][0]["type"] == "text"
24
- assert result["content"][1]["type"] == "image_url"
49
+
50
+ def test_message_with_multiple_uploaded_images():
51
+ """Test scenario where the user uploads multiple images"""
52
+ with temporary_image_file(suffix=".png", content=b"image1_data") as temp_file1:
53
+ with temporary_image_file(suffix=".jpg", content=b"image2_data") as temp_file2:
54
+ result = create_ai_optimized_message(
55
+ text="Analyze these images",
56
+ additional_context=[
57
+ {"type": "image/png", "value": temp_file1},
58
+ {"type": "image/jpeg", "value": temp_file2},
59
+ ],
60
+ )
61
+
62
+ assert result["role"] == "user"
63
+ assert isinstance(result["content"], list)
64
+ assert len(result["content"]) == 3 # text + 2 images
65
+ assert result["content"][0]["type"] == "text"
66
+ assert result["content"][0]["text"] == "Analyze these images"
67
+ assert result["content"][1]["type"] == "image_url"
68
+ assert result["content"][2]["type"] == "image_url"
69
+
70
+ # Verify the image URLs are properly formatted
71
+ assert result["content"][1]["image_url"]["url"].startswith("data:image/png;base64,")
72
+ assert result["content"][2]["image_url"]["url"].startswith("data:image/jpeg;base64,")
25
73
 
26
74
 
27
75
  def test_message_with_active_cell_output():
@@ -38,14 +86,105 @@ def test_message_with_active_cell_output():
38
86
 
39
87
  def test_message_with_uploaded_image_and_active_cell_output():
40
88
  """Test scenario where the user uploads an image and the active cell has an output"""
41
- result = create_ai_optimized_message(
42
- text="Analyze this",
43
- base64EncodedUploadedImage="image_data",
44
- base64EncodedActiveCellOutput="cell_output_data",
89
+ with temporary_image_file() as temp_file_path:
90
+ result = create_ai_optimized_message(
91
+ text="Analyze this",
92
+ additional_context=[{"type": "image/png", "value": temp_file_path}],
93
+ base64EncodedActiveCellOutput="cell_output_data",
94
+ )
95
+
96
+ assert result["role"] == "user"
97
+ assert isinstance(result["content"], list)
98
+ assert result["content"][0]["type"] == "text"
99
+ assert result["content"][1]["type"] == "image_url"
100
+ assert result["content"][2]["type"] == "image_url"
101
+
102
+
103
+ def test_extract_and_encode_images_from_additional_context_valid_image():
104
+ """Test extracting and encoding a valid image file"""
105
+ with temporary_image_file() as temp_file_path:
106
+ additional_context = [{"type": "image/png", "value": temp_file_path}]
107
+
108
+ encoded_images = extract_and_encode_images_from_additional_context(
109
+ additional_context
110
+ )
111
+
112
+ assert len(encoded_images) == 1
113
+ assert encoded_images[0].startswith("data:image/png;base64,")
114
+ # Verify it's valid base64 by checking it can be decoded
115
+ base64_data = encoded_images[0].split(",")[1]
116
+ decoded_data = base64.b64decode(base64_data)
117
+ assert decoded_data == b"fake_image_data"
118
+
119
+
120
+ def test_extract_and_encode_images_from_additional_context_multiple_images():
121
+ """Test extracting and encoding multiple image files"""
122
+ with temporary_image_file(suffix=".png", content=b"image1_data") as temp_file1:
123
+ with temporary_image_file(suffix=".jpg", content=b"image2_data") as temp_file2:
124
+ additional_context = [
125
+ {"type": "image/png", "value": temp_file1},
126
+ {"type": "image/jpeg", "value": temp_file2},
127
+ ]
128
+
129
+ encoded_images = extract_and_encode_images_from_additional_context(
130
+ additional_context
131
+ )
132
+
133
+ assert len(encoded_images) == 2
134
+ assert encoded_images[0].startswith("data:image/png;base64,")
135
+ assert encoded_images[1].startswith("data:image/jpeg;base64,")
136
+
137
+
138
+ def test_extract_and_encode_images_from_additional_context_invalid_file():
139
+ """Test handling of invalid/non-existent image files"""
140
+ additional_context = [{"type": "image/png", "value": "non_existent_file.png"}]
141
+
142
+ encoded_images = extract_and_encode_images_from_additional_context(
143
+ additional_context
45
144
  )
46
145
 
47
- assert result["role"] == "user"
48
- assert isinstance(result["content"], list)
49
- assert result["content"][0]["type"] == "text"
50
- assert result["content"][1]["type"] == "image_url"
51
- assert result["content"][2]["type"] == "image_url"
146
+ assert len(encoded_images) == 0
147
+
148
+
149
+ def test_extract_and_encode_images_from_additional_context_non_image_types():
150
+ """Test that non-image types are ignored"""
151
+ with temporary_image_file(suffix=".txt", content=b"text_data") as temp_file:
152
+ additional_context = [
153
+ {"type": "text/plain", "value": temp_file},
154
+ {"type": "application/pdf", "value": "document.pdf"},
155
+ ]
156
+
157
+ encoded_images = extract_and_encode_images_from_additional_context(
158
+ additional_context
159
+ )
160
+
161
+ assert len(encoded_images) == 0
162
+
163
+
164
+ def test_extract_and_encode_images_from_additional_context_mixed_types():
165
+ """Test handling of mixed image and non-image types"""
166
+ with temporary_image_file() as temp_image_file:
167
+ additional_context = [
168
+ {"type": "image/png", "value": temp_image_file},
169
+ {"type": "text/plain", "value": "document.txt"},
170
+ {"type": "image/jpeg", "value": "non_existent.jpg"},
171
+ ]
172
+
173
+ encoded_images = extract_and_encode_images_from_additional_context(
174
+ additional_context
175
+ )
176
+
177
+ # Should only have the valid PNG image
178
+ assert len(encoded_images) == 1
179
+ assert encoded_images[0].startswith("data:image/png;base64,")
180
+
181
+
182
+ def test_extract_and_encode_images_from_additional_context_empty():
183
+ """Test handling of empty or None additional_context"""
184
+ # Test with None
185
+ encoded_images = extract_and_encode_images_from_additional_context(None)
186
+ assert len(encoded_images) == 0
187
+
188
+ # Test with empty list
189
+ encoded_images = extract_and_encode_images_from_additional_context([])
190
+ assert len(encoded_images) == 0
@@ -265,3 +265,18 @@ def test_save_chunk(handler, temp_dir):
265
265
 
266
266
  # Clean up
267
267
  del handler._temp_dirs[filename]
268
+
269
+
270
+ def test_image_size_limit_exceeded(handler, temp_dir):
271
+ """Test that image uploads exceeding 3MB are rejected."""
272
+ filename = "large_image.jpg"
273
+ # Create 5MB of data (5 * 1024 * 1024 bytes)
274
+ file_data = b"x" * (5 * 1024 * 1024)
275
+ notebook_dir = temp_dir
276
+
277
+ # The _handle_regular_upload should raise a ValueError for oversized images
278
+ with pytest.raises(ValueError) as exc_info:
279
+ handler._handle_regular_upload(filename, file_data, notebook_dir)
280
+
281
+ # Verify the error message mentions the size limit
282
+ assert "exceeded 3MB limit" in str(exc_info.value)
@@ -712,7 +712,7 @@
712
712
  "semver": {},
713
713
  "vscode-diff": {},
714
714
  "mito_ai": {
715
- "version": "0.1.42",
715
+ "version": "0.1.43",
716
716
  "singleton": true,
717
717
  "import": "/home/runner/work/mito/mito/mito-ai/lib/index.js"
718
718
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mito_ai",
3
- "version": "0.1.42",
3
+ "version": "0.1.43",
4
4
  "description": "AI chat for JupyterLab",
5
5
  "keywords": [
6
6
  "jupyter",
@@ -140,7 +140,7 @@
140
140
  "outputDir": "mito_ai/labextension",
141
141
  "schemaDir": "schema",
142
142
  "_build": {
143
- "load": "static/remoteEntry.c7d9d8635826165de52e.js",
143
+ "load": "static/remoteEntry.502aef26f0416fab7435.js",
144
144
  "extension": "./extension",
145
145
  "style": "./style"
146
146
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mito_ai",
3
- "version": "0.1.42",
3
+ "version": "0.1.43",
4
4
  "description": "AI chat for JupyterLab",
5
5
  "keywords": [
6
6
  "jupyter",