camel-ai 0.2.45__py3-none-any.whl → 0.2.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (42) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +6 -0
  3. camel/configs/bedrock_config.py +73 -0
  4. camel/configs/lmstudio_config.py +94 -0
  5. camel/configs/qwen_config.py +3 -3
  6. camel/models/__init__.py +4 -0
  7. camel/models/aiml_model.py +11 -104
  8. camel/models/anthropic_model.py +11 -76
  9. camel/models/aws_bedrock_model.py +112 -0
  10. camel/models/deepseek_model.py +11 -44
  11. camel/models/gemini_model.py +10 -72
  12. camel/models/groq_model.py +11 -131
  13. camel/models/internlm_model.py +11 -61
  14. camel/models/lmstudio_model.py +82 -0
  15. camel/models/model_factory.py +7 -1
  16. camel/models/modelscope_model.py +11 -122
  17. camel/models/moonshot_model.py +10 -76
  18. camel/models/nemotron_model.py +4 -60
  19. camel/models/nvidia_model.py +11 -111
  20. camel/models/ollama_model.py +12 -205
  21. camel/models/openai_compatible_model.py +51 -12
  22. camel/models/openrouter_model.py +12 -131
  23. camel/models/ppio_model.py +10 -99
  24. camel/models/qwen_model.py +11 -122
  25. camel/models/reka_model.py +1 -1
  26. camel/models/sglang_model.py +5 -3
  27. camel/models/siliconflow_model.py +10 -58
  28. camel/models/togetherai_model.py +10 -177
  29. camel/models/vllm_model.py +11 -218
  30. camel/models/volcano_model.py +1 -15
  31. camel/models/yi_model.py +11 -98
  32. camel/models/zhipuai_model.py +11 -102
  33. camel/toolkits/__init__.py +2 -0
  34. camel/toolkits/pyautogui_toolkit.py +428 -0
  35. camel/toolkits/video_analysis_toolkit.py +215 -80
  36. camel/toolkits/video_download_toolkit.py +10 -3
  37. camel/types/enums.py +64 -0
  38. camel/types/unified_model_type.py +10 -0
  39. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/METADATA +2 -1
  40. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/RECORD +42 -37
  41. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/WHEEL +0 -0
  42. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/licenses/LICENSE +0 -0
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
+ import io
15
16
  import os
16
17
  import tempfile
17
18
  from pathlib import Path
@@ -92,9 +93,15 @@ class VideoAnalysisToolkit(BaseToolkit):
92
93
  transcription using OpenAI's audio models. Requires a valid OpenAI
93
94
  API key. When disabled, video analysis will be based solely on
94
95
  visual content. (default: :obj:`False`)
96
+ frame_interval (float, optional): Interval in seconds between frames
97
+ to extract from the video. (default: :obj:`4.0`)
98
+ output_language (str, optional): The language for output responses.
99
+ (default: :obj:`"English"`)
100
+ cookies_path (Optional[str]): The path to the cookies file
101
+ for the video service in Netscape format. (default: :obj:`None`)
95
102
  timeout (Optional[float]): The timeout value for API requests
96
- in seconds. If None, no timeout is applied.
97
- (default: :obj:`None`)
103
+ in seconds. If None, no timeout is applied.
104
+ (default: :obj:`None`)
98
105
  """
99
106
 
100
107
  @dependencies_required("ffmpeg", "scenedetect")
@@ -103,27 +110,29 @@ class VideoAnalysisToolkit(BaseToolkit):
103
110
  download_directory: Optional[str] = None,
104
111
  model: Optional[BaseModelBackend] = None,
105
112
  use_audio_transcription: bool = False,
113
+ frame_interval: float = 4.0,
114
+ output_language: str = "English",
115
+ cookies_path: Optional[str] = None,
106
116
  timeout: Optional[float] = None,
107
117
  ) -> None:
108
118
  super().__init__(timeout=timeout)
109
119
  self._cleanup = download_directory is None
110
120
  self._temp_files: list[str] = [] # Track temporary files for cleanup
111
121
  self._use_audio_transcription = use_audio_transcription
122
+ self.output_language = output_language
123
+ self.frame_interval = frame_interval
112
124
 
113
125
  self._download_directory = Path(
114
126
  download_directory or tempfile.mkdtemp()
115
127
  ).resolve()
116
128
 
117
129
  self.video_downloader_toolkit = VideoDownloaderToolkit(
118
- download_directory=str(self._download_directory)
130
+ download_directory=str(self._download_directory),
131
+ cookies_path=cookies_path,
119
132
  )
120
133
 
121
134
  try:
122
135
  self._download_directory.mkdir(parents=True, exist_ok=True)
123
- except FileExistsError:
124
- raise ValueError(
125
- f"{self._download_directory} is not a valid directory."
126
- )
127
136
  except OSError as e:
128
137
  raise ValueError(
129
138
  f"Error creating directory {self._download_directory}: {e}"
@@ -137,16 +146,18 @@ class VideoAnalysisToolkit(BaseToolkit):
137
146
  # Import ChatAgent at runtime to avoid circular imports
138
147
  from camel.agents import ChatAgent
139
148
 
140
- self.vl_agent = ChatAgent(model=self.vl_model)
149
+ self.vl_agent = ChatAgent(
150
+ model=self.vl_model, output_language=self.output_language
151
+ )
141
152
  else:
142
153
  # If no model is provided, use default model in ChatAgent
143
154
  # Import ChatAgent at runtime to avoid circular imports
144
155
  from camel.agents import ChatAgent
145
156
 
146
- self.vl_agent = ChatAgent()
157
+ self.vl_agent = ChatAgent(output_language=self.output_language)
147
158
  logger.warning(
148
- "No vision-language model provided. Using default model in"
149
- " ChatAgent."
159
+ "No vision-language model provided. Using default model in "
160
+ "ChatAgent."
150
161
  )
151
162
 
152
163
  # Initialize audio models only if audio transcription is enabled
@@ -179,16 +190,22 @@ class VideoAnalysisToolkit(BaseToolkit):
179
190
  # Clean up temporary directory if needed
180
191
  if self._cleanup and os.path.exists(self._download_directory):
181
192
  try:
182
- import shutil
193
+ import sys
183
194
 
184
- shutil.rmtree(self._download_directory)
185
- logger.debug(
186
- f"Removed temporary directory: {self._download_directory}"
187
- )
195
+ if getattr(sys, 'modules', None) is not None:
196
+ import shutil
197
+
198
+ shutil.rmtree(self._download_directory)
199
+ logger.debug(
200
+ f"Removed temp directory: {self._download_directory}"
201
+ )
202
+ except (ImportError, AttributeError):
203
+ # Skip cleanup if interpreter is shutting down
204
+ pass
188
205
  except OSError as e:
189
206
  logger.warning(
190
- f"Failed to remove temporary directory"
191
- f" {self._download_directory}: {e}"
207
+ f"Failed to remove temporary directory "
208
+ f"{self._download_directory}: {e}"
192
209
  )
193
210
 
194
211
  def _extract_audio_from_video(
@@ -242,88 +259,217 @@ class VideoAnalysisToolkit(BaseToolkit):
242
259
  logger.error(f"Audio transcription failed: {e}")
243
260
  return "Audio transcription failed."
244
261
 
245
- def _extract_keyframes(
246
- self, video_path: str, num_frames: int, threshold: float = 25.0
247
- ) -> List[Image.Image]:
248
- r"""Extract keyframes from a video based on scene changes
249
- and return them as PIL.Image.Image objects.
262
+ def _extract_keyframes(self, video_path: str) -> List[Image.Image]:
263
+ r"""Extract keyframes from a video based on scene changes and
264
+ regular intervals,and return them as PIL.Image.Image objects.
250
265
 
251
266
  Args:
252
267
  video_path (str): Path to the video file.
253
- num_frames (int): Number of keyframes to extract.
254
- threshold (float): The threshold value for scene change detection.
255
268
 
256
269
  Returns:
257
- list: A list of PIL.Image.Image objects representing
270
+ List[Image.Image]: A list of PIL.Image.Image objects representing
258
271
  the extracted keyframes.
272
+
273
+ Raises:
274
+ ValueError: If no frames could be extracted from the video.
259
275
  """
276
+ import cv2
277
+ import numpy as np
260
278
  from scenedetect import ( # type: ignore[import-untyped]
261
279
  SceneManager,
262
- VideoManager,
280
+ open_video,
263
281
  )
264
282
  from scenedetect.detectors import ( # type: ignore[import-untyped]
265
283
  ContentDetector,
266
284
  )
267
285
 
268
- if num_frames <= 0:
286
+ # Get video information
287
+ cap = cv2.VideoCapture(video_path)
288
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
289
+ fps = cap.get(cv2.CAP_PROP_FPS)
290
+ duration = total_frames / fps if fps > 0 else 0
291
+ cap.release()
292
+
293
+ frame_interval = self.frame_interval # seconds
294
+ # Maximum number of frames to extract to avoid memory issues
295
+ MAX_FRAMES = 100
296
+ # Minimum time difference (in seconds) to consider frames as distinct
297
+ TIME_THRESHOLD = 1.0
298
+
299
+ # Calculate the total number of frames to extract
300
+ if duration <= 0 or fps <= 0:
269
301
  logger.warning(
270
- f"Invalid num_frames: {num_frames}, using default of 1"
302
+ "Invalid video duration or fps, using default frame count"
271
303
  )
272
- num_frames = 1
304
+ num_frames = 10
305
+ else:
306
+ num_frames = max(int(duration / frame_interval), 1)
307
+
308
+ if num_frames > MAX_FRAMES:
309
+ frame_interval = duration / MAX_FRAMES
310
+ num_frames = MAX_FRAMES
273
311
 
274
- video_manager = VideoManager([video_path])
312
+ logger.info(
313
+ f"Video duration: {duration:.2f}s, target frames: {num_frames}"
314
+ f"at {frame_interval:.2f}s intervals"
315
+ )
316
+
317
+ # Use scene detection to extract keyframes
318
+ # Use open_video instead of VideoManager
319
+ video = open_video(video_path)
275
320
  scene_manager = SceneManager()
276
- scene_manager.add_detector(ContentDetector(threshold=threshold))
321
+ scene_manager.add_detector(ContentDetector())
277
322
 
278
- video_manager.set_duration()
279
- video_manager.start()
280
- scene_manager.detect_scenes(video_manager)
323
+ # Detect scenes using the modern API
324
+ scene_manager.detect_scenes(video)
281
325
 
282
326
  scenes = scene_manager.get_scene_list()
283
327
  keyframes: List[Image.Image] = []
284
328
 
285
- # Handle case where no scenes are detected
286
- if not scenes:
329
+ # If scene detection is successful, prioritize scene change points
330
+ if scenes:
331
+ logger.info(f"Detected {len(scenes)} scene changes")
332
+
333
+ if len(scenes) > num_frames:
334
+ scene_indices = np.linspace(
335
+ 0, len(scenes) - 1, num_frames, dtype=int
336
+ )
337
+ selected_scenes = [scenes[i] for i in scene_indices]
338
+ else:
339
+ selected_scenes = scenes
340
+
341
+ # Extract frames from scenes
342
+ for scene in selected_scenes:
343
+ try:
344
+ # Get start time in seconds
345
+ start_time = scene[0].get_seconds()
346
+ frame = _capture_screenshot(video_path, start_time)
347
+ keyframes.append(frame)
348
+ except Exception as e:
349
+ logger.warning(
350
+ f"Failed to capture frame at scene change"
351
+ f" {scene[0].get_seconds()}s: {e}"
352
+ )
353
+
354
+ if len(keyframes) < num_frames and duration > 0:
355
+ logger.info(
356
+ f"Scene detection provided {len(keyframes)} frames, "
357
+ f"supplementing with regular interval frames"
358
+ )
359
+
360
+ existing_times = []
361
+ if scenes:
362
+ existing_times = [scene[0].get_seconds() for scene in scenes]
363
+
364
+ regular_frames = []
365
+ for i in range(num_frames):
366
+ time_sec = i * frame_interval
367
+
368
+ is_duplicate = False
369
+ for existing_time in existing_times:
370
+ if abs(existing_time - time_sec) < TIME_THRESHOLD:
371
+ is_duplicate = True
372
+ break
373
+
374
+ if not is_duplicate:
375
+ try:
376
+ frame = _capture_screenshot(video_path, time_sec)
377
+ regular_frames.append(frame)
378
+ except Exception as e:
379
+ logger.warning(
380
+ f"Failed to capture frame at {time_sec}s: {e}"
381
+ )
382
+
383
+ frames_needed = num_frames - len(keyframes)
384
+ if frames_needed > 0 and regular_frames:
385
+ if len(regular_frames) > frames_needed:
386
+ indices = np.linspace(
387
+ 0, len(regular_frames) - 1, frames_needed, dtype=int
388
+ )
389
+ selected_frames = [regular_frames[i] for i in indices]
390
+ else:
391
+ selected_frames = regular_frames
392
+
393
+ keyframes.extend(selected_frames)
394
+
395
+ if not keyframes:
287
396
  logger.warning(
288
- "No scenes detected in video, capturing frames at "
289
- "regular intervals"
397
+ "No frames extracted, falling back to simple interval"
398
+ "extraction"
290
399
  )
291
- import cv2
292
-
293
- cap = cv2.VideoCapture(video_path)
294
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
295
- fps = cap.get(cv2.CAP_PROP_FPS)
296
- duration = total_frames / fps if fps > 0 else 0
297
-
298
- if duration > 0 and total_frames > 0:
299
- # Extract frames at regular intervals
300
- interval = duration / min(num_frames, total_frames)
301
- for i in range(min(num_frames, total_frames)):
302
- time_sec = i * interval
400
+ for i in range(
401
+ min(num_frames, 10)
402
+ ): # Limit to a maximum of 10 frames to avoid infinite loops
403
+ time_sec = i * (duration / 10 if duration > 0 else 6.0)
404
+ try:
303
405
  frame = _capture_screenshot(video_path, time_sec)
304
406
  keyframes.append(frame)
305
-
306
- cap.release()
307
- else:
308
- # Extract frames from detected scenes
309
- for start_time, _ in scenes:
310
- if len(keyframes) >= num_frames:
311
- break
312
- frame = _capture_screenshot(video_path, start_time)
313
- keyframes.append(frame)
407
+ except Exception as e:
408
+ logger.warning(
409
+ f"Failed to capture frame at {time_sec}s: {e}"
410
+ )
314
411
 
315
412
  if not keyframes:
316
- logger.error("Failed to extract any keyframes from video")
317
- raise ValueError("Failed to extract keyframes from video")
413
+ error_msg = (
414
+ f"Failed to extract any keyframes from video: {video_path}"
415
+ )
416
+ logger.error(error_msg)
417
+ raise ValueError(error_msg)
418
+
419
+ # Normalize image sizes
420
+ normalized_keyframes = self._normalize_frames(keyframes)
421
+
422
+ logger.info(
423
+ f"Extracted and normalized {len(normalized_keyframes)} keyframes"
424
+ )
425
+ return normalized_keyframes
426
+
427
+ def _normalize_frames(
428
+ self, frames: List[Image.Image], target_width: int = 512
429
+ ) -> List[Image.Image]:
430
+ r"""Normalize the size of extracted frames.
318
431
 
319
- logger.info(f"Extracted {len(keyframes)} keyframes")
320
- return keyframes
432
+ Args:
433
+ frames (List[Image.Image]): List of frames to normalize.
434
+ target_width (int): Target width for normalized frames.
435
+
436
+ Returns:
437
+ List[Image.Image]: List of normalized frames.
438
+ """
439
+ normalized_frames: List[Image.Image] = []
440
+
441
+ for frame in frames:
442
+ # Get original dimensions
443
+ width, height = frame.size
444
+
445
+ # Calculate new height, maintaining aspect ratio
446
+ aspect_ratio = width / height
447
+ new_height = int(target_width / aspect_ratio)
448
+
449
+ # Resize image
450
+ resized_frame = frame.resize(
451
+ (target_width, new_height), Image.Resampling.LANCZOS
452
+ )
453
+
454
+ # Ensure the image has a proper format
455
+ if resized_frame.mode != 'RGB':
456
+ resized_frame = resized_frame.convert('RGB')
457
+
458
+ # Create a new image with explicit format
459
+ with io.BytesIO() as buffer:
460
+ resized_frame.save(buffer, format='JPEG')
461
+ buffer.seek(0)
462
+ formatted_frame = Image.open(buffer)
463
+ formatted_frame.load() # Load the image data
464
+
465
+ normalized_frames.append(formatted_frame)
466
+
467
+ return normalized_frames
321
468
 
322
469
  def ask_question_about_video(
323
470
  self,
324
471
  video_path: str,
325
472
  question: str,
326
- num_frames: int = 28,
327
473
  ) -> str:
328
474
  r"""Ask a question about the video.
329
475
 
@@ -331,24 +477,12 @@ class VideoAnalysisToolkit(BaseToolkit):
331
477
  video_path (str): The path to the video file.
332
478
  It can be a local file or a URL (such as Youtube website).
333
479
  question (str): The question to ask about the video.
334
- num_frames (int): The number of frames to extract from the video.
335
- To be adjusted based on the length of the video.
336
- (default: :obj:`28`)
337
480
 
338
481
  Returns:
339
482
  str: The answer to the question.
340
483
  """
341
484
  from urllib.parse import urlparse
342
485
 
343
- if not question:
344
- raise ValueError("Question cannot be empty")
345
-
346
- if num_frames <= 0:
347
- logger.warning(
348
- f"Invalid num_frames: {num_frames}, using default of 28"
349
- )
350
- num_frames = 28
351
-
352
486
  parsed_url = urlparse(video_path)
353
487
  is_url = all([parsed_url.scheme, parsed_url.netloc])
354
488
 
@@ -374,7 +508,7 @@ class VideoAnalysisToolkit(BaseToolkit):
374
508
  audio_path = self._extract_audio_from_video(video_path)
375
509
  audio_transcript = self._transcribe_audio(audio_path)
376
510
 
377
- video_frames = self._extract_keyframes(video_path, num_frames)
511
+ video_frames = self._extract_keyframes(video_path)
378
512
  prompt = VIDEO_QA_PROMPT.format(
379
513
  audio_transcription=audio_transcript,
380
514
  question=question,
@@ -385,7 +519,8 @@ class VideoAnalysisToolkit(BaseToolkit):
385
519
  content=prompt,
386
520
  image_list=video_frames,
387
521
  )
388
-
522
+ # Reset the agent to clear previous state
523
+ self.vl_agent.reset()
389
524
  response = self.vl_agent.step(msg)
390
525
  if not response or not response.msgs:
391
526
  logger.error("Model returned empty response")
@@ -398,7 +533,7 @@ class VideoAnalysisToolkit(BaseToolkit):
398
533
  return answer
399
534
 
400
535
  except Exception as e:
401
- error_message = f"Error processing video: {e!s}"
536
+ error_message = f"Error processing video: {e}"
402
537
  logger.error(error_message)
403
538
  return f"Error: {error_message}"
404
539
 
@@ -102,10 +102,17 @@ class VideoDownloaderToolkit(BaseToolkit):
102
102
  Cleans up the downloaded video if they are stored in a temporary
103
103
  directory.
104
104
  """
105
- import shutil
106
-
107
105
  if self._cleanup:
108
- shutil.rmtree(self._download_directory, ignore_errors=True)
106
+ try:
107
+ import sys
108
+
109
+ if getattr(sys, 'modules', None) is not None:
110
+ import shutil
111
+
112
+ shutil.rmtree(self._download_directory, ignore_errors=True)
113
+ except (ImportError, AttributeError):
114
+ # Skip cleanup if interpreter is shutting down
115
+ pass
109
116
 
110
117
  def download_video(self, url: str) -> str:
111
118
  r"""Download the video and optionally split it into chunks.
camel/types/enums.py CHANGED
@@ -43,6 +43,15 @@ class ModelType(UnifiedModelType, Enum):
43
43
  GPT_4_1_MINI = "gpt-4.1-mini-2025-04-14"
44
44
  GPT_4_1_NANO = "gpt-4.1-nano-2025-04-14"
45
45
 
46
+ AWS_CLAUDE_3_7_SONNET = "anthropic.claude-3-7-sonnet-20250219-v1:0"
47
+ AWS_CLAUDE_3_5_SONNET = "anthropic.claude-3-5-sonnet-20241022-v2:0"
48
+ AWS_CLAUDE_3_HAIKU = "anthropic.claude-3-haiku-20240307-v1:0"
49
+ AWS_CLAUDE_3_SONNET = "anthropic.claude-3-sonnet-20240229-v1:0"
50
+ AWS_DEEPSEEK_R1 = "us.deepseek.r1-v1:0"
51
+ AWS_LLAMA_3_3_70B_INSTRUCT = "us.meta.llama3-3-70b-instruct-v1:0"
52
+ AWS_LLAMA_3_2_90B_INSTRUCT = "us.meta.llama3-2-90b-instruct-v1:0"
53
+ AWS_LLAMA_3_2_11B_INSTRUCT = "us.meta.llama3-2-11b-instruct-v1:0"
54
+
46
55
  GLM_4 = "glm-4"
47
56
  GLM_4V = "glm-4v"
48
57
  GLM_4V_FLASH = "glm-4v-flash"
@@ -75,6 +84,12 @@ class ModelType(UnifiedModelType, Enum):
75
84
  OPENROUTER_LLAMA_4_SCOUT_FREE = "meta-llama/llama-4-scout:free"
76
85
  OPENROUTER_OLYMPICODER_7B = "open-r1/olympiccoder-7b:free"
77
86
 
87
+ # LMStudio models
88
+ LMSTUDIO_GEMMA_3_1B = "gemma-3-1b"
89
+ LMSTUDIO_GEMMA_3_4B = "gemma-3-4b"
90
+ LMSTUDIO_GEMMA_3_12B = "gemma-3-12b"
91
+ LMSTUDIO_GEMMA_3_27B = "gemma-3-27b"
92
+
78
93
  # TogetherAI platform models support tool calling
79
94
  TOGETHER_LLAMA_3_1_8B = "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"
80
95
  TOGETHER_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"
@@ -308,6 +323,7 @@ class ModelType(UnifiedModelType, Enum):
308
323
  self.is_sambanova,
309
324
  self.is_groq,
310
325
  self.is_openrouter,
326
+ self.is_lmstudio,
311
327
  self.is_sglang,
312
328
  self.is_moonshot,
313
329
  self.is_siliconflow,
@@ -337,6 +353,20 @@ class ModelType(UnifiedModelType, Enum):
337
353
  ModelType.GPT_4_1_NANO,
338
354
  }
339
355
 
356
+ @property
357
+ def is_aws_bedrock(self) -> bool:
358
+ r"""Returns whether this type of models is an AWS Bedrock model."""
359
+ return self in {
360
+ ModelType.AWS_CLAUDE_3_7_SONNET,
361
+ ModelType.AWS_CLAUDE_3_5_SONNET,
362
+ ModelType.AWS_CLAUDE_3_HAIKU,
363
+ ModelType.AWS_CLAUDE_3_SONNET,
364
+ ModelType.AWS_DEEPSEEK_R1,
365
+ ModelType.AWS_LLAMA_3_3_70B_INSTRUCT,
366
+ ModelType.AWS_LLAMA_3_2_90B_INSTRUCT,
367
+ ModelType.AWS_LLAMA_3_2_11B_INSTRUCT,
368
+ }
369
+
340
370
  @property
341
371
  def is_azure_openai(self) -> bool:
342
372
  r"""Returns whether this type of models is an OpenAI-released model
@@ -414,6 +444,16 @@ class ModelType(UnifiedModelType, Enum):
414
444
  ModelType.OPENROUTER_OLYMPICODER_7B,
415
445
  }
416
446
 
447
+ @property
448
+ def is_lmstudio(self) -> bool:
449
+ r"""Returns whether this type of models is served by LMStudio."""
450
+ return self in {
451
+ ModelType.LMSTUDIO_GEMMA_3_1B,
452
+ ModelType.LMSTUDIO_GEMMA_3_4B,
453
+ ModelType.LMSTUDIO_GEMMA_3_12B,
454
+ ModelType.LMSTUDIO_GEMMA_3_27B,
455
+ }
456
+
417
457
  @property
418
458
  def is_together(self) -> bool:
419
459
  r"""Returns whether this type of models is served by Together AI."""
@@ -690,6 +730,10 @@ class ModelType(UnifiedModelType, Enum):
690
730
  ModelType.GLM_4V_FLASH,
691
731
  ModelType.GLM_4_AIRX,
692
732
  ModelType.OPENROUTER_OLYMPICODER_7B,
733
+ ModelType.LMSTUDIO_GEMMA_3_1B,
734
+ ModelType.LMSTUDIO_GEMMA_3_4B,
735
+ ModelType.LMSTUDIO_GEMMA_3_12B,
736
+ ModelType.LMSTUDIO_GEMMA_3_27B,
693
737
  }:
694
738
  return 8_192
695
739
  elif self in {
@@ -765,6 +809,7 @@ class ModelType(UnifiedModelType, Enum):
765
809
  ModelType.PPIO_DEEPSEEK_V3_COMMUNITY,
766
810
  ModelType.PPIO_DEEPSEEK_R1,
767
811
  ModelType.PPIO_DEEPSEEK_V3,
812
+ ModelType.AWS_DEEPSEEK_R1,
768
813
  }:
769
814
  return 64_000
770
815
  elif self in {
@@ -811,6 +856,9 @@ class ModelType(UnifiedModelType, Enum):
811
856
  ModelType.GLM_4_AIR_0111,
812
857
  ModelType.GLM_4_FLASHX,
813
858
  ModelType.GLM_4_FLASH,
859
+ ModelType.AWS_LLAMA_3_3_70B_INSTRUCT,
860
+ ModelType.AWS_LLAMA_3_2_90B_INSTRUCT,
861
+ ModelType.AWS_LLAMA_3_2_11B_INSTRUCT,
814
862
  }:
815
863
  return 128_000
816
864
  elif self in {
@@ -841,6 +889,10 @@ class ModelType(UnifiedModelType, Enum):
841
889
  ModelType.CLAUDE_3_5_HAIKU,
842
890
  ModelType.CLAUDE_3_7_SONNET,
843
891
  ModelType.YI_MEDIUM_200K,
892
+ ModelType.AWS_CLAUDE_3_5_SONNET,
893
+ ModelType.AWS_CLAUDE_3_HAIKU,
894
+ ModelType.AWS_CLAUDE_3_SONNET,
895
+ ModelType.AWS_CLAUDE_3_7_SONNET,
844
896
  }:
845
897
  return 200_000
846
898
  elif self in {
@@ -1034,12 +1086,14 @@ class ModelPlatformType(Enum):
1034
1086
  DEFAULT = os.getenv("DEFAULT_MODEL_PLATFORM_TYPE", "openai")
1035
1087
 
1036
1088
  OPENAI = "openai"
1089
+ AWS_BEDROCK = "aws-bedrock"
1037
1090
  AZURE = "azure"
1038
1091
  ANTHROPIC = "anthropic"
1039
1092
  GROQ = "groq"
1040
1093
  OPENROUTER = "openrouter"
1041
1094
  OLLAMA = "ollama"
1042
1095
  LITELLM = "litellm"
1096
+ LMSTUDIO = "lmstudio"
1043
1097
  ZHIPU = "zhipuai"
1044
1098
  GEMINI = "gemini"
1045
1099
  VLLM = "vllm"
@@ -1075,6 +1129,11 @@ class ModelPlatformType(Enum):
1075
1129
  r"""Returns whether this platform is openai."""
1076
1130
  return self is ModelPlatformType.OPENAI
1077
1131
 
1132
+ @property
1133
+ def is_aws_bedrock(self) -> bool:
1134
+ r"""Returns whether this platform is aws-bedrock."""
1135
+ return self is ModelPlatformType.AWS_BEDROCK
1136
+
1078
1137
  @property
1079
1138
  def is_azure(self) -> bool:
1080
1139
  r"""Returns whether this platform is azure."""
@@ -1095,6 +1154,11 @@ class ModelPlatformType(Enum):
1095
1154
  r"""Returns whether this platform is openrouter."""
1096
1155
  return self is ModelPlatformType.OPENROUTER
1097
1156
 
1157
+ @property
1158
+ def is_lmstudio(self) -> bool:
1159
+ r"""Returns whether this platform is lmstudio."""
1160
+ return self is ModelPlatformType.LMSTUDIO
1161
+
1098
1162
  @property
1099
1163
  def is_ollama(self) -> bool:
1100
1164
  r"""Returns whether this platform is ollama."""
@@ -63,6 +63,11 @@ class UnifiedModelType(str):
63
63
  r"""Returns whether the model is an OpenAI model."""
64
64
  return True
65
65
 
66
+ @property
67
+ def is_aws_bedrock(self) -> bool:
68
+ r"""Returns whether the model is an AWS Bedrock model."""
69
+ return True
70
+
66
71
  @property
67
72
  def is_anthropic(self) -> bool:
68
73
  r"""Returns whether the model is an Anthropic model."""
@@ -83,6 +88,11 @@ class UnifiedModelType(str):
83
88
  r"""Returns whether the model is a OpenRouter served model."""
84
89
  return True
85
90
 
91
+ @property
92
+ def is_lmstudio(self) -> bool:
93
+ r"""Returns whether the model is a LMStudio served model."""
94
+ return True
95
+
86
96
  @property
87
97
  def is_ppio(self) -> bool:
88
98
  r"""Returns whether the model is a PPIO served model."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: camel-ai
3
- Version: 0.2.45
3
+ Version: 0.2.46
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Project-URL: Homepage, https://www.camel-ai.org/
6
6
  Project-URL: Repository, https://github.com/camel-ai/camel
@@ -84,6 +84,7 @@ Requires-Dist: playwright>=1.50.0; extra == 'all'
84
84
  Requires-Dist: prance<24,>=23.6.21.0; extra == 'all'
85
85
  Requires-Dist: praw<8,>=7.7.1; extra == 'all'
86
86
  Requires-Dist: pre-commit<4,>=3; extra == 'all'
87
+ Requires-Dist: pyautogui<0.10,>=0.9.54; extra == 'all'
87
88
  Requires-Dist: pydub<0.26,>=0.25.1; extra == 'all'
88
89
  Requires-Dist: pygithub<3,>=2.6.0; extra == 'all'
89
90
  Requires-Dist: pymilvus<3,>=2.4.0; extra == 'all'