camel-ai 0.2.44__py3-none-any.whl → 0.2.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (60) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +6 -0
  3. camel/configs/bedrock_config.py +73 -0
  4. camel/configs/lmstudio_config.py +94 -0
  5. camel/configs/qwen_config.py +3 -3
  6. camel/models/__init__.py +4 -0
  7. camel/models/aiml_model.py +11 -104
  8. camel/models/anthropic_model.py +11 -76
  9. camel/models/aws_bedrock_model.py +112 -0
  10. camel/models/cohere_model.py +32 -4
  11. camel/models/deepseek_model.py +11 -44
  12. camel/models/gemini_model.py +10 -72
  13. camel/models/groq_model.py +11 -131
  14. camel/models/internlm_model.py +11 -61
  15. camel/models/litellm_model.py +11 -4
  16. camel/models/lmstudio_model.py +82 -0
  17. camel/models/mistral_model.py +14 -2
  18. camel/models/model_factory.py +7 -1
  19. camel/models/modelscope_model.py +11 -122
  20. camel/models/moonshot_model.py +10 -76
  21. camel/models/nemotron_model.py +4 -60
  22. camel/models/nvidia_model.py +11 -111
  23. camel/models/ollama_model.py +12 -205
  24. camel/models/openai_compatible_model.py +51 -12
  25. camel/models/openrouter_model.py +12 -131
  26. camel/models/ppio_model.py +10 -99
  27. camel/models/qwen_model.py +11 -122
  28. camel/models/reka_model.py +12 -4
  29. camel/models/sglang_model.py +5 -3
  30. camel/models/siliconflow_model.py +10 -58
  31. camel/models/togetherai_model.py +10 -177
  32. camel/models/vllm_model.py +11 -218
  33. camel/models/volcano_model.py +8 -17
  34. camel/models/yi_model.py +11 -98
  35. camel/models/zhipuai_model.py +11 -102
  36. camel/runtime/__init__.py +2 -0
  37. camel/runtime/ubuntu_docker_runtime.py +340 -0
  38. camel/toolkits/__init__.py +2 -0
  39. camel/toolkits/audio_analysis_toolkit.py +21 -17
  40. camel/toolkits/browser_toolkit.py +2 -1
  41. camel/toolkits/dalle_toolkit.py +15 -0
  42. camel/toolkits/excel_toolkit.py +14 -1
  43. camel/toolkits/image_analysis_toolkit.py +9 -1
  44. camel/toolkits/mcp_toolkit.py +2 -0
  45. camel/toolkits/networkx_toolkit.py +5 -0
  46. camel/toolkits/openai_agent_toolkit.py +5 -1
  47. camel/toolkits/pyautogui_toolkit.py +428 -0
  48. camel/toolkits/searxng_toolkit.py +7 -0
  49. camel/toolkits/slack_toolkit.py +15 -2
  50. camel/toolkits/video_analysis_toolkit.py +218 -78
  51. camel/toolkits/video_download_toolkit.py +10 -3
  52. camel/toolkits/weather_toolkit.py +14 -1
  53. camel/toolkits/zapier_toolkit.py +6 -2
  54. camel/types/enums.py +73 -0
  55. camel/types/unified_model_type.py +10 -0
  56. camel/verifiers/base.py +14 -0
  57. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/METADATA +6 -5
  58. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/RECORD +60 -54
  59. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/WHEEL +0 -0
  60. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/licenses/LICENSE +0 -0
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
+ import io
15
16
  import os
16
17
  import tempfile
17
18
  from pathlib import Path
@@ -92,6 +93,15 @@ class VideoAnalysisToolkit(BaseToolkit):
92
93
  transcription using OpenAI's audio models. Requires a valid OpenAI
93
94
  API key. When disabled, video analysis will be based solely on
94
95
  visual content. (default: :obj:`False`)
96
+ frame_interval (float, optional): Interval in seconds between frames
97
+ to extract from the video. (default: :obj:`4.0`)
98
+ output_language (str, optional): The language for output responses.
99
+ (default: :obj:`"English"`)
100
+ cookies_path (Optional[str]): The path to the cookies file
101
+ for the video service in Netscape format. (default: :obj:`None`)
102
+ timeout (Optional[float]): The timeout value for API requests
103
+ in seconds. If None, no timeout is applied.
104
+ (default: :obj:`None`)
95
105
  """
96
106
 
97
107
  @dependencies_required("ffmpeg", "scenedetect")
@@ -100,25 +110,29 @@ class VideoAnalysisToolkit(BaseToolkit):
100
110
  download_directory: Optional[str] = None,
101
111
  model: Optional[BaseModelBackend] = None,
102
112
  use_audio_transcription: bool = False,
113
+ frame_interval: float = 4.0,
114
+ output_language: str = "English",
115
+ cookies_path: Optional[str] = None,
116
+ timeout: Optional[float] = None,
103
117
  ) -> None:
118
+ super().__init__(timeout=timeout)
104
119
  self._cleanup = download_directory is None
105
120
  self._temp_files: list[str] = [] # Track temporary files for cleanup
106
121
  self._use_audio_transcription = use_audio_transcription
122
+ self.output_language = output_language
123
+ self.frame_interval = frame_interval
107
124
 
108
125
  self._download_directory = Path(
109
126
  download_directory or tempfile.mkdtemp()
110
127
  ).resolve()
111
128
 
112
129
  self.video_downloader_toolkit = VideoDownloaderToolkit(
113
- download_directory=str(self._download_directory)
130
+ download_directory=str(self._download_directory),
131
+ cookies_path=cookies_path,
114
132
  )
115
133
 
116
134
  try:
117
135
  self._download_directory.mkdir(parents=True, exist_ok=True)
118
- except FileExistsError:
119
- raise ValueError(
120
- f"{self._download_directory} is not a valid directory."
121
- )
122
136
  except OSError as e:
123
137
  raise ValueError(
124
138
  f"Error creating directory {self._download_directory}: {e}"
@@ -132,16 +146,18 @@ class VideoAnalysisToolkit(BaseToolkit):
132
146
  # Import ChatAgent at runtime to avoid circular imports
133
147
  from camel.agents import ChatAgent
134
148
 
135
- self.vl_agent = ChatAgent(model=self.vl_model)
149
+ self.vl_agent = ChatAgent(
150
+ model=self.vl_model, output_language=self.output_language
151
+ )
136
152
  else:
137
153
  # If no model is provided, use default model in ChatAgent
138
154
  # Import ChatAgent at runtime to avoid circular imports
139
155
  from camel.agents import ChatAgent
140
156
 
141
- self.vl_agent = ChatAgent()
157
+ self.vl_agent = ChatAgent(output_language=self.output_language)
142
158
  logger.warning(
143
- "No vision-language model provided. Using default model in"
144
- " ChatAgent."
159
+ "No vision-language model provided. Using default model in "
160
+ "ChatAgent."
145
161
  )
146
162
 
147
163
  # Initialize audio models only if audio transcription is enabled
@@ -174,16 +190,22 @@ class VideoAnalysisToolkit(BaseToolkit):
174
190
  # Clean up temporary directory if needed
175
191
  if self._cleanup and os.path.exists(self._download_directory):
176
192
  try:
177
- import shutil
193
+ import sys
178
194
 
179
- shutil.rmtree(self._download_directory)
180
- logger.debug(
181
- f"Removed temporary directory: {self._download_directory}"
182
- )
195
+ if getattr(sys, 'modules', None) is not None:
196
+ import shutil
197
+
198
+ shutil.rmtree(self._download_directory)
199
+ logger.debug(
200
+ f"Removed temp directory: {self._download_directory}"
201
+ )
202
+ except (ImportError, AttributeError):
203
+ # Skip cleanup if interpreter is shutting down
204
+ pass
183
205
  except OSError as e:
184
206
  logger.warning(
185
- f"Failed to remove temporary directory"
186
- f" {self._download_directory}: {e}"
207
+ f"Failed to remove temporary directory "
208
+ f"{self._download_directory}: {e}"
187
209
  )
188
210
 
189
211
  def _extract_audio_from_video(
@@ -237,88 +259,217 @@ class VideoAnalysisToolkit(BaseToolkit):
237
259
  logger.error(f"Audio transcription failed: {e}")
238
260
  return "Audio transcription failed."
239
261
 
240
- def _extract_keyframes(
241
- self, video_path: str, num_frames: int, threshold: float = 25.0
242
- ) -> List[Image.Image]:
243
- r"""Extract keyframes from a video based on scene changes
244
- and return them as PIL.Image.Image objects.
262
+ def _extract_keyframes(self, video_path: str) -> List[Image.Image]:
263
+ r"""Extract keyframes from a video based on scene changes and
264
+ regular intervals,and return them as PIL.Image.Image objects.
245
265
 
246
266
  Args:
247
267
  video_path (str): Path to the video file.
248
- num_frames (int): Number of keyframes to extract.
249
- threshold (float): The threshold value for scene change detection.
250
268
 
251
269
  Returns:
252
- list: A list of PIL.Image.Image objects representing
270
+ List[Image.Image]: A list of PIL.Image.Image objects representing
253
271
  the extracted keyframes.
272
+
273
+ Raises:
274
+ ValueError: If no frames could be extracted from the video.
254
275
  """
276
+ import cv2
277
+ import numpy as np
255
278
  from scenedetect import ( # type: ignore[import-untyped]
256
279
  SceneManager,
257
- VideoManager,
280
+ open_video,
258
281
  )
259
282
  from scenedetect.detectors import ( # type: ignore[import-untyped]
260
283
  ContentDetector,
261
284
  )
262
285
 
263
- if num_frames <= 0:
286
+ # Get video information
287
+ cap = cv2.VideoCapture(video_path)
288
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
289
+ fps = cap.get(cv2.CAP_PROP_FPS)
290
+ duration = total_frames / fps if fps > 0 else 0
291
+ cap.release()
292
+
293
+ frame_interval = self.frame_interval # seconds
294
+ # Maximum number of frames to extract to avoid memory issues
295
+ MAX_FRAMES = 100
296
+ # Minimum time difference (in seconds) to consider frames as distinct
297
+ TIME_THRESHOLD = 1.0
298
+
299
+ # Calculate the total number of frames to extract
300
+ if duration <= 0 or fps <= 0:
264
301
  logger.warning(
265
- f"Invalid num_frames: {num_frames}, using default of 1"
302
+ "Invalid video duration or fps, using default frame count"
266
303
  )
267
- num_frames = 1
304
+ num_frames = 10
305
+ else:
306
+ num_frames = max(int(duration / frame_interval), 1)
307
+
308
+ if num_frames > MAX_FRAMES:
309
+ frame_interval = duration / MAX_FRAMES
310
+ num_frames = MAX_FRAMES
268
311
 
269
- video_manager = VideoManager([video_path])
312
+ logger.info(
313
+ f"Video duration: {duration:.2f}s, target frames: {num_frames}"
314
+ f"at {frame_interval:.2f}s intervals"
315
+ )
316
+
317
+ # Use scene detection to extract keyframes
318
+ # Use open_video instead of VideoManager
319
+ video = open_video(video_path)
270
320
  scene_manager = SceneManager()
271
- scene_manager.add_detector(ContentDetector(threshold=threshold))
321
+ scene_manager.add_detector(ContentDetector())
272
322
 
273
- video_manager.set_duration()
274
- video_manager.start()
275
- scene_manager.detect_scenes(video_manager)
323
+ # Detect scenes using the modern API
324
+ scene_manager.detect_scenes(video)
276
325
 
277
326
  scenes = scene_manager.get_scene_list()
278
327
  keyframes: List[Image.Image] = []
279
328
 
280
- # Handle case where no scenes are detected
281
- if not scenes:
329
+ # If scene detection is successful, prioritize scene change points
330
+ if scenes:
331
+ logger.info(f"Detected {len(scenes)} scene changes")
332
+
333
+ if len(scenes) > num_frames:
334
+ scene_indices = np.linspace(
335
+ 0, len(scenes) - 1, num_frames, dtype=int
336
+ )
337
+ selected_scenes = [scenes[i] for i in scene_indices]
338
+ else:
339
+ selected_scenes = scenes
340
+
341
+ # Extract frames from scenes
342
+ for scene in selected_scenes:
343
+ try:
344
+ # Get start time in seconds
345
+ start_time = scene[0].get_seconds()
346
+ frame = _capture_screenshot(video_path, start_time)
347
+ keyframes.append(frame)
348
+ except Exception as e:
349
+ logger.warning(
350
+ f"Failed to capture frame at scene change"
351
+ f" {scene[0].get_seconds()}s: {e}"
352
+ )
353
+
354
+ if len(keyframes) < num_frames and duration > 0:
355
+ logger.info(
356
+ f"Scene detection provided {len(keyframes)} frames, "
357
+ f"supplementing with regular interval frames"
358
+ )
359
+
360
+ existing_times = []
361
+ if scenes:
362
+ existing_times = [scene[0].get_seconds() for scene in scenes]
363
+
364
+ regular_frames = []
365
+ for i in range(num_frames):
366
+ time_sec = i * frame_interval
367
+
368
+ is_duplicate = False
369
+ for existing_time in existing_times:
370
+ if abs(existing_time - time_sec) < TIME_THRESHOLD:
371
+ is_duplicate = True
372
+ break
373
+
374
+ if not is_duplicate:
375
+ try:
376
+ frame = _capture_screenshot(video_path, time_sec)
377
+ regular_frames.append(frame)
378
+ except Exception as e:
379
+ logger.warning(
380
+ f"Failed to capture frame at {time_sec}s: {e}"
381
+ )
382
+
383
+ frames_needed = num_frames - len(keyframes)
384
+ if frames_needed > 0 and regular_frames:
385
+ if len(regular_frames) > frames_needed:
386
+ indices = np.linspace(
387
+ 0, len(regular_frames) - 1, frames_needed, dtype=int
388
+ )
389
+ selected_frames = [regular_frames[i] for i in indices]
390
+ else:
391
+ selected_frames = regular_frames
392
+
393
+ keyframes.extend(selected_frames)
394
+
395
+ if not keyframes:
282
396
  logger.warning(
283
- "No scenes detected in video, capturing frames at "
284
- "regular intervals"
397
+ "No frames extracted, falling back to simple interval"
398
+ "extraction"
285
399
  )
286
- import cv2
287
-
288
- cap = cv2.VideoCapture(video_path)
289
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
290
- fps = cap.get(cv2.CAP_PROP_FPS)
291
- duration = total_frames / fps if fps > 0 else 0
292
-
293
- if duration > 0 and total_frames > 0:
294
- # Extract frames at regular intervals
295
- interval = duration / min(num_frames, total_frames)
296
- for i in range(min(num_frames, total_frames)):
297
- time_sec = i * interval
400
+ for i in range(
401
+ min(num_frames, 10)
402
+ ): # Limit to a maximum of 10 frames to avoid infinite loops
403
+ time_sec = i * (duration / 10 if duration > 0 else 6.0)
404
+ try:
298
405
  frame = _capture_screenshot(video_path, time_sec)
299
406
  keyframes.append(frame)
300
-
301
- cap.release()
302
- else:
303
- # Extract frames from detected scenes
304
- for start_time, _ in scenes:
305
- if len(keyframes) >= num_frames:
306
- break
307
- frame = _capture_screenshot(video_path, start_time)
308
- keyframes.append(frame)
407
+ except Exception as e:
408
+ logger.warning(
409
+ f"Failed to capture frame at {time_sec}s: {e}"
410
+ )
309
411
 
310
412
  if not keyframes:
311
- logger.error("Failed to extract any keyframes from video")
312
- raise ValueError("Failed to extract keyframes from video")
413
+ error_msg = (
414
+ f"Failed to extract any keyframes from video: {video_path}"
415
+ )
416
+ logger.error(error_msg)
417
+ raise ValueError(error_msg)
418
+
419
+ # Normalize image sizes
420
+ normalized_keyframes = self._normalize_frames(keyframes)
421
+
422
+ logger.info(
423
+ f"Extracted and normalized {len(normalized_keyframes)} keyframes"
424
+ )
425
+ return normalized_keyframes
426
+
427
+ def _normalize_frames(
428
+ self, frames: List[Image.Image], target_width: int = 512
429
+ ) -> List[Image.Image]:
430
+ r"""Normalize the size of extracted frames.
313
431
 
314
- logger.info(f"Extracted {len(keyframes)} keyframes")
315
- return keyframes
432
+ Args:
433
+ frames (List[Image.Image]): List of frames to normalize.
434
+ target_width (int): Target width for normalized frames.
435
+
436
+ Returns:
437
+ List[Image.Image]: List of normalized frames.
438
+ """
439
+ normalized_frames: List[Image.Image] = []
440
+
441
+ for frame in frames:
442
+ # Get original dimensions
443
+ width, height = frame.size
444
+
445
+ # Calculate new height, maintaining aspect ratio
446
+ aspect_ratio = width / height
447
+ new_height = int(target_width / aspect_ratio)
448
+
449
+ # Resize image
450
+ resized_frame = frame.resize(
451
+ (target_width, new_height), Image.Resampling.LANCZOS
452
+ )
453
+
454
+ # Ensure the image has a proper format
455
+ if resized_frame.mode != 'RGB':
456
+ resized_frame = resized_frame.convert('RGB')
457
+
458
+ # Create a new image with explicit format
459
+ with io.BytesIO() as buffer:
460
+ resized_frame.save(buffer, format='JPEG')
461
+ buffer.seek(0)
462
+ formatted_frame = Image.open(buffer)
463
+ formatted_frame.load() # Load the image data
464
+
465
+ normalized_frames.append(formatted_frame)
466
+
467
+ return normalized_frames
316
468
 
317
469
  def ask_question_about_video(
318
470
  self,
319
471
  video_path: str,
320
472
  question: str,
321
- num_frames: int = 28,
322
473
  ) -> str:
323
474
  r"""Ask a question about the video.
324
475
 
@@ -326,24 +477,12 @@ class VideoAnalysisToolkit(BaseToolkit):
326
477
  video_path (str): The path to the video file.
327
478
  It can be a local file or a URL (such as Youtube website).
328
479
  question (str): The question to ask about the video.
329
- num_frames (int): The number of frames to extract from the video.
330
- To be adjusted based on the length of the video.
331
- (default: :obj:`28`)
332
480
 
333
481
  Returns:
334
482
  str: The answer to the question.
335
483
  """
336
484
  from urllib.parse import urlparse
337
485
 
338
- if not question:
339
- raise ValueError("Question cannot be empty")
340
-
341
- if num_frames <= 0:
342
- logger.warning(
343
- f"Invalid num_frames: {num_frames}, using default of 28"
344
- )
345
- num_frames = 28
346
-
347
486
  parsed_url = urlparse(video_path)
348
487
  is_url = all([parsed_url.scheme, parsed_url.netloc])
349
488
 
@@ -369,7 +508,7 @@ class VideoAnalysisToolkit(BaseToolkit):
369
508
  audio_path = self._extract_audio_from_video(video_path)
370
509
  audio_transcript = self._transcribe_audio(audio_path)
371
510
 
372
- video_frames = self._extract_keyframes(video_path, num_frames)
511
+ video_frames = self._extract_keyframes(video_path)
373
512
  prompt = VIDEO_QA_PROMPT.format(
374
513
  audio_transcription=audio_transcript,
375
514
  question=question,
@@ -380,7 +519,8 @@ class VideoAnalysisToolkit(BaseToolkit):
380
519
  content=prompt,
381
520
  image_list=video_frames,
382
521
  )
383
-
522
+ # Reset the agent to clear previous state
523
+ self.vl_agent.reset()
384
524
  response = self.vl_agent.step(msg)
385
525
  if not response or not response.msgs:
386
526
  logger.error("Model returned empty response")
@@ -393,7 +533,7 @@ class VideoAnalysisToolkit(BaseToolkit):
393
533
  return answer
394
534
 
395
535
  except Exception as e:
396
- error_message = f"Error processing video: {e!s}"
536
+ error_message = f"Error processing video: {e}"
397
537
  logger.error(error_message)
398
538
  return f"Error: {error_message}"
399
539
 
@@ -102,10 +102,17 @@ class VideoDownloaderToolkit(BaseToolkit):
102
102
  Cleans up the downloaded video if they are stored in a temporary
103
103
  directory.
104
104
  """
105
- import shutil
106
-
107
105
  if self._cleanup:
108
- shutil.rmtree(self._download_directory, ignore_errors=True)
106
+ try:
107
+ import sys
108
+
109
+ if getattr(sys, 'modules', None) is not None:
110
+ import shutil
111
+
112
+ shutil.rmtree(self._download_directory, ignore_errors=True)
113
+ except (ImportError, AttributeError):
114
+ # Skip cleanup if interpreter is shutting down
115
+ pass
109
116
 
110
117
  def download_video(self, url: str) -> str:
111
118
  r"""Download the video and optionally split it into chunks.
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import List, Literal
15
+ from typing import List, Literal, Optional
16
16
 
17
17
  from camel.toolkits.base import BaseToolkit
18
18
  from camel.toolkits.function_tool import FunctionTool
@@ -27,6 +27,19 @@ class WeatherToolkit(BaseToolkit):
27
27
  using the OpenWeatherMap API.
28
28
  """
29
29
 
30
+ def __init__(
31
+ self,
32
+ timeout: Optional[float] = None,
33
+ ):
34
+ r"""Initializes a new instance of the WeatherToolkit class.
35
+
36
+ Args:
37
+ timeout (Optional[float]): The timeout value for API requests
38
+ in seconds. If None, no timeout is applied.
39
+ (default: :obj:`None`)
40
+ """
41
+ super().__init__(timeout=timeout)
42
+
30
43
  def get_openweathermap_api_key(self) -> str:
31
44
  r"""Retrieve the OpenWeatherMap API key from environment variables.
32
45
 
@@ -13,7 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List
16
+ from typing import Any, Dict, List, Optional
17
17
 
18
18
  import requests
19
19
 
@@ -33,6 +33,9 @@ class ZapierToolkit(BaseToolkit):
33
33
  Attributes:
34
34
  api_key (str): The API key for authenticating with Zapier's API.
35
35
  base_url (str): The base URL for Zapier's API endpoints.
36
+ timeout (Optional[float]): The timeout value for API requests
37
+ in seconds. If None, no timeout is applied.
38
+ (default: :obj:`None`)
36
39
  """
37
40
 
38
41
  @dependencies_required("requests")
@@ -41,7 +44,8 @@ class ZapierToolkit(BaseToolkit):
41
44
  (None, "ZAPIER_NLA_API_KEY"),
42
45
  ]
43
46
  )
44
- def __init__(self) -> None:
47
+ def __init__(self, timeout: Optional[float] = None) -> None:
48
+ super().__init__(timeout=timeout)
45
49
  r"""Initialize the ZapierToolkit with API client. The API key is
46
50
  retrieved from environment variables.
47
51
  """
camel/types/enums.py CHANGED
@@ -39,6 +39,18 @@ class ModelType(UnifiedModelType, Enum):
39
39
  O1_PREVIEW = "o1-preview"
40
40
  O1_MINI = "o1-mini"
41
41
  O3_MINI = "o3-mini"
42
+ GPT_4_1 = "gpt-4.1-2025-04-14"
43
+ GPT_4_1_MINI = "gpt-4.1-mini-2025-04-14"
44
+ GPT_4_1_NANO = "gpt-4.1-nano-2025-04-14"
45
+
46
+ AWS_CLAUDE_3_7_SONNET = "anthropic.claude-3-7-sonnet-20250219-v1:0"
47
+ AWS_CLAUDE_3_5_SONNET = "anthropic.claude-3-5-sonnet-20241022-v2:0"
48
+ AWS_CLAUDE_3_HAIKU = "anthropic.claude-3-haiku-20240307-v1:0"
49
+ AWS_CLAUDE_3_SONNET = "anthropic.claude-3-sonnet-20240229-v1:0"
50
+ AWS_DEEPSEEK_R1 = "us.deepseek.r1-v1:0"
51
+ AWS_LLAMA_3_3_70B_INSTRUCT = "us.meta.llama3-3-70b-instruct-v1:0"
52
+ AWS_LLAMA_3_2_90B_INSTRUCT = "us.meta.llama3-2-90b-instruct-v1:0"
53
+ AWS_LLAMA_3_2_11B_INSTRUCT = "us.meta.llama3-2-11b-instruct-v1:0"
42
54
 
43
55
  GLM_4 = "glm-4"
44
56
  GLM_4V = "glm-4v"
@@ -72,6 +84,12 @@ class ModelType(UnifiedModelType, Enum):
72
84
  OPENROUTER_LLAMA_4_SCOUT_FREE = "meta-llama/llama-4-scout:free"
73
85
  OPENROUTER_OLYMPICODER_7B = "open-r1/olympiccoder-7b:free"
74
86
 
87
+ # LMStudio models
88
+ LMSTUDIO_GEMMA_3_1B = "gemma-3-1b"
89
+ LMSTUDIO_GEMMA_3_4B = "gemma-3-4b"
90
+ LMSTUDIO_GEMMA_3_12B = "gemma-3-12b"
91
+ LMSTUDIO_GEMMA_3_27B = "gemma-3-27b"
92
+
75
93
  # TogetherAI platform models support tool calling
76
94
  TOGETHER_LLAMA_3_1_8B = "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"
77
95
  TOGETHER_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"
@@ -305,6 +323,7 @@ class ModelType(UnifiedModelType, Enum):
305
323
  self.is_sambanova,
306
324
  self.is_groq,
307
325
  self.is_openrouter,
326
+ self.is_lmstudio,
308
327
  self.is_sglang,
309
328
  self.is_moonshot,
310
329
  self.is_siliconflow,
@@ -329,6 +348,23 @@ class ModelType(UnifiedModelType, Enum):
329
348
  ModelType.O1_MINI,
330
349
  ModelType.O3_MINI,
331
350
  ModelType.GPT_4_5_PREVIEW,
351
+ ModelType.GPT_4_1,
352
+ ModelType.GPT_4_1_MINI,
353
+ ModelType.GPT_4_1_NANO,
354
+ }
355
+
356
+ @property
357
+ def is_aws_bedrock(self) -> bool:
358
+ r"""Returns whether this type of models is an AWS Bedrock model."""
359
+ return self in {
360
+ ModelType.AWS_CLAUDE_3_7_SONNET,
361
+ ModelType.AWS_CLAUDE_3_5_SONNET,
362
+ ModelType.AWS_CLAUDE_3_HAIKU,
363
+ ModelType.AWS_CLAUDE_3_SONNET,
364
+ ModelType.AWS_DEEPSEEK_R1,
365
+ ModelType.AWS_LLAMA_3_3_70B_INSTRUCT,
366
+ ModelType.AWS_LLAMA_3_2_90B_INSTRUCT,
367
+ ModelType.AWS_LLAMA_3_2_11B_INSTRUCT,
332
368
  }
333
369
 
334
370
  @property
@@ -408,6 +444,16 @@ class ModelType(UnifiedModelType, Enum):
408
444
  ModelType.OPENROUTER_OLYMPICODER_7B,
409
445
  }
410
446
 
447
+ @property
448
+ def is_lmstudio(self) -> bool:
449
+ r"""Returns whether this type of models is served by LMStudio."""
450
+ return self in {
451
+ ModelType.LMSTUDIO_GEMMA_3_1B,
452
+ ModelType.LMSTUDIO_GEMMA_3_4B,
453
+ ModelType.LMSTUDIO_GEMMA_3_12B,
454
+ ModelType.LMSTUDIO_GEMMA_3_27B,
455
+ }
456
+
411
457
  @property
412
458
  def is_together(self) -> bool:
413
459
  r"""Returns whether this type of models is served by Together AI."""
@@ -684,6 +730,10 @@ class ModelType(UnifiedModelType, Enum):
684
730
  ModelType.GLM_4V_FLASH,
685
731
  ModelType.GLM_4_AIRX,
686
732
  ModelType.OPENROUTER_OLYMPICODER_7B,
733
+ ModelType.LMSTUDIO_GEMMA_3_1B,
734
+ ModelType.LMSTUDIO_GEMMA_3_4B,
735
+ ModelType.LMSTUDIO_GEMMA_3_12B,
736
+ ModelType.LMSTUDIO_GEMMA_3_27B,
687
737
  }:
688
738
  return 8_192
689
739
  elif self in {
@@ -759,6 +809,7 @@ class ModelType(UnifiedModelType, Enum):
759
809
  ModelType.PPIO_DEEPSEEK_V3_COMMUNITY,
760
810
  ModelType.PPIO_DEEPSEEK_R1,
761
811
  ModelType.PPIO_DEEPSEEK_V3,
812
+ ModelType.AWS_DEEPSEEK_R1,
762
813
  }:
763
814
  return 64_000
764
815
  elif self in {
@@ -805,6 +856,9 @@ class ModelType(UnifiedModelType, Enum):
805
856
  ModelType.GLM_4_AIR_0111,
806
857
  ModelType.GLM_4_FLASHX,
807
858
  ModelType.GLM_4_FLASH,
859
+ ModelType.AWS_LLAMA_3_3_70B_INSTRUCT,
860
+ ModelType.AWS_LLAMA_3_2_90B_INSTRUCT,
861
+ ModelType.AWS_LLAMA_3_2_11B_INSTRUCT,
808
862
  }:
809
863
  return 128_000
810
864
  elif self in {
@@ -835,6 +889,10 @@ class ModelType(UnifiedModelType, Enum):
835
889
  ModelType.CLAUDE_3_5_HAIKU,
836
890
  ModelType.CLAUDE_3_7_SONNET,
837
891
  ModelType.YI_MEDIUM_200K,
892
+ ModelType.AWS_CLAUDE_3_5_SONNET,
893
+ ModelType.AWS_CLAUDE_3_HAIKU,
894
+ ModelType.AWS_CLAUDE_3_SONNET,
895
+ ModelType.AWS_CLAUDE_3_7_SONNET,
838
896
  }:
839
897
  return 200_000
840
898
  elif self in {
@@ -857,6 +915,9 @@ class ModelType(UnifiedModelType, Enum):
857
915
  ModelType.GLM_4_LONG,
858
916
  ModelType.TOGETHER_LLAMA_4_MAVERICK,
859
917
  ModelType.OPENROUTER_LLAMA_4_MAVERICK,
918
+ ModelType.GPT_4_1,
919
+ ModelType.GPT_4_1_MINI,
920
+ ModelType.GPT_4_1_NANO,
860
921
  }:
861
922
  return 1_048_576
862
923
  elif self in {
@@ -1025,12 +1086,14 @@ class ModelPlatformType(Enum):
1025
1086
  DEFAULT = os.getenv("DEFAULT_MODEL_PLATFORM_TYPE", "openai")
1026
1087
 
1027
1088
  OPENAI = "openai"
1089
+ AWS_BEDROCK = "aws-bedrock"
1028
1090
  AZURE = "azure"
1029
1091
  ANTHROPIC = "anthropic"
1030
1092
  GROQ = "groq"
1031
1093
  OPENROUTER = "openrouter"
1032
1094
  OLLAMA = "ollama"
1033
1095
  LITELLM = "litellm"
1096
+ LMSTUDIO = "lmstudio"
1034
1097
  ZHIPU = "zhipuai"
1035
1098
  GEMINI = "gemini"
1036
1099
  VLLM = "vllm"
@@ -1066,6 +1129,11 @@ class ModelPlatformType(Enum):
1066
1129
  r"""Returns whether this platform is openai."""
1067
1130
  return self is ModelPlatformType.OPENAI
1068
1131
 
1132
+ @property
1133
+ def is_aws_bedrock(self) -> bool:
1134
+ r"""Returns whether this platform is aws-bedrock."""
1135
+ return self is ModelPlatformType.AWS_BEDROCK
1136
+
1069
1137
  @property
1070
1138
  def is_azure(self) -> bool:
1071
1139
  r"""Returns whether this platform is azure."""
@@ -1086,6 +1154,11 @@ class ModelPlatformType(Enum):
1086
1154
  r"""Returns whether this platform is openrouter."""
1087
1155
  return self is ModelPlatformType.OPENROUTER
1088
1156
 
1157
+ @property
1158
+ def is_lmstudio(self) -> bool:
1159
+ r"""Returns whether this platform is lmstudio."""
1160
+ return self is ModelPlatformType.LMSTUDIO
1161
+
1089
1162
  @property
1090
1163
  def is_ollama(self) -> bool:
1091
1164
  r"""Returns whether this platform is ollama."""