camel-ai 0.2.75a6__py3-none-any.whl → 0.2.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (97) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +1001 -205
  3. camel/agents/mcp_agent.py +30 -27
  4. camel/configs/__init__.py +6 -0
  5. camel/configs/amd_config.py +70 -0
  6. camel/configs/cometapi_config.py +104 -0
  7. camel/data_collectors/alpaca_collector.py +15 -6
  8. camel/environments/tic_tac_toe.py +1 -1
  9. camel/interpreters/__init__.py +2 -0
  10. camel/interpreters/docker/Dockerfile +3 -12
  11. camel/interpreters/microsandbox_interpreter.py +395 -0
  12. camel/loaders/__init__.py +11 -2
  13. camel/loaders/chunkr_reader.py +9 -0
  14. camel/memories/__init__.py +2 -1
  15. camel/memories/agent_memories.py +3 -1
  16. camel/memories/blocks/chat_history_block.py +21 -3
  17. camel/memories/records.py +88 -8
  18. camel/messages/base.py +127 -34
  19. camel/models/__init__.py +4 -0
  20. camel/models/amd_model.py +101 -0
  21. camel/models/azure_openai_model.py +0 -6
  22. camel/models/base_model.py +30 -0
  23. camel/models/cometapi_model.py +83 -0
  24. camel/models/model_factory.py +4 -0
  25. camel/models/openai_compatible_model.py +0 -6
  26. camel/models/openai_model.py +0 -6
  27. camel/models/zhipuai_model.py +61 -2
  28. camel/parsers/__init__.py +18 -0
  29. camel/parsers/mcp_tool_call_parser.py +176 -0
  30. camel/retrievers/auto_retriever.py +1 -0
  31. camel/runtimes/daytona_runtime.py +11 -12
  32. camel/societies/workforce/prompts.py +131 -50
  33. camel/societies/workforce/single_agent_worker.py +434 -49
  34. camel/societies/workforce/structured_output_handler.py +30 -18
  35. camel/societies/workforce/task_channel.py +43 -0
  36. camel/societies/workforce/utils.py +105 -12
  37. camel/societies/workforce/workforce.py +1322 -311
  38. camel/societies/workforce/workforce_logger.py +24 -5
  39. camel/storages/key_value_storages/json.py +15 -2
  40. camel/storages/object_storages/google_cloud.py +1 -1
  41. camel/storages/vectordb_storages/oceanbase.py +10 -11
  42. camel/storages/vectordb_storages/tidb.py +8 -6
  43. camel/tasks/task.py +4 -3
  44. camel/toolkits/__init__.py +18 -5
  45. camel/toolkits/aci_toolkit.py +45 -0
  46. camel/toolkits/code_execution.py +28 -1
  47. camel/toolkits/context_summarizer_toolkit.py +684 -0
  48. camel/toolkits/dingtalk.py +1135 -0
  49. camel/toolkits/edgeone_pages_mcp_toolkit.py +11 -31
  50. camel/toolkits/{file_write_toolkit.py → file_toolkit.py} +194 -34
  51. camel/toolkits/function_tool.py +6 -1
  52. camel/toolkits/google_drive_mcp_toolkit.py +12 -31
  53. camel/toolkits/hybrid_browser_toolkit/config_loader.py +12 -0
  54. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +79 -2
  55. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +95 -59
  56. camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
  57. camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +5 -612
  58. camel/toolkits/hybrid_browser_toolkit/ts/package.json +0 -1
  59. camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +619 -95
  60. camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +7 -2
  61. camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +115 -219
  62. camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
  63. camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
  64. camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
  65. camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +1 -0
  66. camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +39 -6
  67. camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +405 -131
  68. camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +9 -5
  69. camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +98 -31
  70. camel/toolkits/markitdown_toolkit.py +27 -1
  71. camel/toolkits/mcp_toolkit.py +348 -348
  72. camel/toolkits/message_integration.py +3 -0
  73. camel/toolkits/minimax_mcp_toolkit.py +195 -0
  74. camel/toolkits/note_taking_toolkit.py +18 -8
  75. camel/toolkits/notion_mcp_toolkit.py +16 -26
  76. camel/toolkits/origene_mcp_toolkit.py +8 -49
  77. camel/toolkits/playwright_mcp_toolkit.py +12 -31
  78. camel/toolkits/resend_toolkit.py +168 -0
  79. camel/toolkits/slack_toolkit.py +50 -1
  80. camel/toolkits/terminal_toolkit/__init__.py +18 -0
  81. camel/toolkits/terminal_toolkit/terminal_toolkit.py +924 -0
  82. camel/toolkits/terminal_toolkit/utils.py +532 -0
  83. camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
  84. camel/toolkits/video_analysis_toolkit.py +17 -11
  85. camel/toolkits/wechat_official_toolkit.py +483 -0
  86. camel/types/enums.py +124 -1
  87. camel/types/unified_model_type.py +5 -0
  88. camel/utils/commons.py +17 -0
  89. camel/utils/context_utils.py +804 -0
  90. camel/utils/mcp.py +136 -2
  91. camel/utils/token_counting.py +25 -17
  92. {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76.dist-info}/METADATA +158 -59
  93. {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76.dist-info}/RECORD +95 -76
  94. camel/loaders/pandas_reader.py +0 -368
  95. camel/toolkits/terminal_toolkit.py +0 -1788
  96. {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76.dist-info}/WHEEL +0 -0
  97. {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76.dist-info}/licenses/LICENSE +0 -0
camel/messages/base.py CHANGED
@@ -64,8 +64,9 @@ class BaseMessage:
64
64
  content (str): The content of the message.
65
65
  video_bytes (Optional[bytes]): Optional bytes of a video associated
66
66
  with the message. (default: :obj:`None`)
67
- image_list (Optional[List[Image.Image]]): Optional list of PIL Image
68
- objects associated with the message. (default: :obj:`None`)
67
+ image_list (Optional[List[Union[Image.Image, str]]]): Optional list of
68
+ PIL Image objects or image URLs (strings) associated with the
69
+ message. (default: :obj:`None`)
69
70
  image_detail (Literal["auto", "low", "high"]): Detail level of the
70
71
  images associated with the message. (default: :obj:`auto`)
71
72
  video_detail (Literal["auto", "low", "high"]): Detail level of the
@@ -80,7 +81,7 @@ class BaseMessage:
80
81
  content: str
81
82
 
82
83
  video_bytes: Optional[bytes] = None
83
- image_list: Optional[List[Image.Image]] = None
84
+ image_list: Optional[List[Union[Image.Image, str]]] = None
84
85
  image_detail: Literal["auto", "low", "high"] = "auto"
85
86
  video_detail: Literal["auto", "low", "high"] = "auto"
86
87
  parsed: Optional[Union[BaseModel, dict]] = None
@@ -92,7 +93,7 @@ class BaseMessage:
92
93
  content: str,
93
94
  meta_dict: Optional[Dict[str, str]] = None,
94
95
  video_bytes: Optional[bytes] = None,
95
- image_list: Optional[List[Image.Image]] = None,
96
+ image_list: Optional[List[Union[Image.Image, str]]] = None,
96
97
  image_detail: Union[
97
98
  OpenAIVisionDetailType, str
98
99
  ] = OpenAIVisionDetailType.AUTO,
@@ -109,8 +110,9 @@ class BaseMessage:
109
110
  dictionary for the message.
110
111
  video_bytes (Optional[bytes]): Optional bytes of a video
111
112
  associated with the message.
112
- image_list (Optional[List[Image.Image]]): Optional list of PIL
113
- Image objects associated with the message.
113
+ image_list (Optional[List[Union[Image.Image, str]]]): Optional list
114
+ of PIL Image objects or image URLs (strings) associated with
115
+ the message.
114
116
  image_detail (Union[OpenAIVisionDetailType, str]): Detail level of
115
117
  the images associated with the message.
116
118
  video_detail (Union[OpenAIVisionDetailType, str]): Detail level of
@@ -137,7 +139,7 @@ class BaseMessage:
137
139
  content: str,
138
140
  meta_dict: Optional[Dict[str, str]] = None,
139
141
  video_bytes: Optional[bytes] = None,
140
- image_list: Optional[List[Image.Image]] = None,
142
+ image_list: Optional[List[Union[Image.Image, str]]] = None,
141
143
  image_detail: Union[
142
144
  OpenAIVisionDetailType, str
143
145
  ] = OpenAIVisionDetailType.AUTO,
@@ -154,8 +156,9 @@ class BaseMessage:
154
156
  dictionary for the message.
155
157
  video_bytes (Optional[bytes]): Optional bytes of a video
156
158
  associated with the message.
157
- image_list (Optional[List[Image.Image]]): Optional list of PIL
158
- Image objects associated with the message.
159
+ image_list (Optional[List[Union[Image.Image, str]]]): Optional list
160
+ of PIL Image objects or image URLs (strings) associated with
161
+ the message.
159
162
  image_detail (Union[OpenAIVisionDetailType, str]): Detail level of
160
163
  the images associated with the message.
161
164
  video_detail (Union[OpenAIVisionDetailType, str]): Detail level of
@@ -436,31 +439,64 @@ class BaseMessage:
436
439
  )
437
440
  if self.image_list and len(self.image_list) > 0:
438
441
  for image in self.image_list:
439
- if image.format is None:
440
- # Set default format to PNG as fallback
441
- image.format = 'PNG'
442
-
443
- image_type: str = image.format.lower()
444
- if image_type not in OpenAIImageType:
445
- raise ValueError(
446
- f"Image type {image.format} "
447
- f"is not supported by OpenAI vision model"
442
+ # Check if image is a URL string or PIL Image
443
+ if isinstance(image, str):
444
+ # Image is a URL string
445
+ hybrid_content.append(
446
+ {
447
+ "type": "image_url",
448
+ "image_url": {
449
+ "url": image,
450
+ "detail": self.image_detail,
451
+ },
452
+ }
448
453
  )
449
- with io.BytesIO() as buffer:
450
- image.save(fp=buffer, format=image.format)
451
- encoded_image = base64.b64encode(buffer.getvalue()).decode(
452
- "utf-8"
454
+ else:
455
+ # Image is a PIL Image object
456
+ if image.format is None:
457
+ # Set default format to PNG as fallback
458
+ image.format = 'PNG'
459
+
460
+ image_type: str = image.format.lower()
461
+ if image_type not in OpenAIImageType:
462
+ raise ValueError(
463
+ f"Image type {image.format} "
464
+ f"is not supported by OpenAI vision model"
465
+ )
466
+
467
+ # Convert RGBA to RGB for formats that don't support
468
+ # transparency or when the image has transparency channel
469
+ img_to_save = image
470
+ if image.mode in ('RGBA', 'LA', 'P') and image_type in (
471
+ 'jpeg',
472
+ 'jpg',
473
+ ):
474
+ # JPEG doesn't support transparency, convert to RGB
475
+ img_to_save = image.convert('RGB')
476
+ elif (
477
+ image.mode in ('RGBA', 'LA', 'P')
478
+ and image_type == 'png'
479
+ ):
480
+ # For PNG with transparency, convert to RGBA if needed
481
+ if image.mode in ('LA', 'P'):
482
+ img_to_save = image.convert('RGBA')
483
+ # else: RGBA mode, keep as-is
484
+
485
+ with io.BytesIO() as buffer:
486
+ img_to_save.save(fp=buffer, format=image.format)
487
+ encoded_image = base64.b64encode(
488
+ buffer.getvalue()
489
+ ).decode("utf-8")
490
+ image_prefix = f"data:image/{image_type};base64,"
491
+ hybrid_content.append(
492
+ {
493
+ "type": "image_url",
494
+ "image_url": {
495
+ "url": f"{image_prefix}{encoded_image}",
496
+ "detail": self.image_detail,
497
+ },
498
+ }
453
499
  )
454
- image_prefix = f"data:image/{image_type};base64,"
455
- hybrid_content.append(
456
- {
457
- "type": "image_url",
458
- "image_url": {
459
- "url": f"{image_prefix}{encoded_image}",
460
- "detail": self.image_detail,
461
- },
462
- }
463
- )
464
500
 
465
501
  if self.video_bytes:
466
502
  import imageio.v3 as iio
@@ -552,9 +588,66 @@ class BaseMessage:
552
588
  Returns:
553
589
  dict: The converted dictionary.
554
590
  """
555
- return {
591
+ result = {
556
592
  "role_name": self.role_name,
557
- "role_type": self.role_type.name,
593
+ "role_type": self.role_type.value,
558
594
  **(self.meta_dict or {}),
559
595
  "content": self.content,
560
596
  }
597
+
598
+ # Include image/video fields if present
599
+ if self.image_list is not None:
600
+ # Handle both PIL Images and URL strings
601
+ import base64
602
+ from io import BytesIO
603
+
604
+ image_data_list = []
605
+ for img in self.image_list:
606
+ if isinstance(img, str):
607
+ # Image is a URL string, store as-is
608
+ image_data_list.append({"type": "url", "data": img})
609
+ else:
610
+ # Image is a PIL Image, convert to base64
611
+ # Preserve format, default to PNG if not set
612
+ img_format = img.format if img.format else "PNG"
613
+
614
+ # Handle transparency for different formats
615
+ img_to_save = img
616
+ if img.mode in (
617
+ 'RGBA',
618
+ 'LA',
619
+ 'P',
620
+ ) and img_format.upper() in ('JPEG', 'JPG'):
621
+ # JPEG doesn't support transparency, convert to RGB
622
+ img_to_save = img.convert('RGB')
623
+ elif (
624
+ img.mode in ('LA', 'P') and img_format.upper() == 'PNG'
625
+ ):
626
+ # For PNG with transparency, convert to RGBA if needed
627
+ img_to_save = img.convert('RGBA')
628
+ # else: keep as-is for other combinations
629
+
630
+ buffered = BytesIO()
631
+ img_to_save.save(buffered, format=img_format)
632
+ img_str = base64.b64encode(buffered.getvalue()).decode()
633
+ image_data_list.append(
634
+ {
635
+ "type": "base64",
636
+ "data": img_str,
637
+ "format": img_format, # Preserve format
638
+ }
639
+ )
640
+ result["image_list"] = image_data_list
641
+
642
+ if self.video_bytes is not None:
643
+ import base64
644
+
645
+ result["video_bytes"] = base64.b64encode(self.video_bytes).decode()
646
+
647
+ if self.image_detail is not None:
648
+ result["image_detail"] = self.image_detail
649
+
650
+ if self.video_detail is not None:
651
+ result["video_detail"] = self.video_detail
652
+
653
+ return result
camel/models/__init__.py CHANGED
@@ -12,12 +12,14 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from .aiml_model import AIMLModel
15
+ from .amd_model import AMDModel
15
16
  from .anthropic_model import AnthropicModel
16
17
  from .aws_bedrock_model import AWSBedrockModel
17
18
  from .azure_openai_model import AzureOpenAIModel
18
19
  from .base_audio_model import BaseAudioModel
19
20
  from .base_model import BaseModelBackend
20
21
  from .cohere_model import CohereModel
22
+ from .cometapi_model import CometAPIModel
21
23
  from .crynux_model import CrynuxModel
22
24
  from .deepseek_model import DeepSeekModel
23
25
  from .fish_audio_model import FishAudioModel
@@ -62,11 +64,13 @@ __all__ = [
62
64
  'OpenRouterModel',
63
65
  'AzureOpenAIModel',
64
66
  'AnthropicModel',
67
+ 'AMDModel',
65
68
  'MistralModel',
66
69
  'GroqModel',
67
70
  'StubModel',
68
71
  'ZhipuAIModel',
69
72
  'CohereModel',
73
+ 'CometAPIModel',
70
74
  'ModelFactory',
71
75
  'ModelManager',
72
76
  'LiteLLMModel',
@@ -0,0 +1,101 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ from camel.configs import AMD_API_PARAMS, AMDConfig
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
21
+ from camel.utils import BaseTokenCounter, api_keys_required
22
+
23
+
24
+ class AMDModel(OpenAICompatibleModel):
25
+ r"""AMD API in a unified OpenAICompatibleModel interface.
26
+
27
+ Args:
28
+ model_type (Union[ModelType, str]): Model for which a backend is
29
+ created, one of AMD series.
30
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
31
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
32
+ :obj:`None`, :obj:`AMDConfig().as_dict()` will be used.
33
+ (default: :obj:`None`)
34
+ api_key (Optional[str], optional): The API key for authenticating with
35
+ the AMD service. (default: :obj:`None`)
36
+ url (Optional[str], optional): The url to the AMD service.
37
+ (default: :obj:`None`)
38
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
39
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
40
+ ModelType.GPT_4)` will be used.
41
+ (default: :obj:`None`)
42
+ timeout (Optional[float], optional): The timeout value in seconds for
43
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
44
+ environment variable or default to 180 seconds.
45
+ (default: :obj:`None`)
46
+ max_retries (int, optional): Maximum number of retries for API calls.
47
+ (default: :obj:`3`)
48
+ **kwargs (Any): Additional arguments to pass to the client
49
+ initialization.
50
+ """
51
+
52
+ @api_keys_required(
53
+ [
54
+ ("api_key", "AMD_API_KEY"),
55
+ ]
56
+ )
57
+ def __init__(
58
+ self,
59
+ model_type: Union[ModelType, str],
60
+ model_config_dict: Optional[Dict[str, Any]] = None,
61
+ api_key: Optional[str] = None,
62
+ url: Optional[str] = None,
63
+ token_counter: Optional[BaseTokenCounter] = None,
64
+ timeout: Optional[float] = None,
65
+ max_retries: int = 3,
66
+ **kwargs: Any,
67
+ ) -> None:
68
+ if model_config_dict is None:
69
+ model_config_dict = AMDConfig().as_dict()
70
+ api_key = api_key or os.environ.get("AMD_API_KEY")
71
+ url = url or os.environ.get(
72
+ "AMD_API_BASE_URL", "https://llm-api.amd.com"
73
+ )
74
+ headers = {'Ocp-Apim-Subscription-Key': api_key}
75
+ kwargs["default_headers"] = headers
76
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
77
+ super().__init__(
78
+ model_type=model_type,
79
+ model_config_dict=model_config_dict,
80
+ api_key=api_key,
81
+ url=url,
82
+ token_counter=token_counter,
83
+ timeout=timeout,
84
+ max_retries=max_retries,
85
+ **kwargs,
86
+ )
87
+
88
+ def check_model_config(self):
89
+ r"""Check whether the model configuration contains any
90
+ unexpected arguments to AMD API.
91
+
92
+ Raises:
93
+ ValueError: If the model configuration dictionary contains any
94
+ unexpected arguments to AMD API.
95
+ """
96
+ for param in self.model_config_dict:
97
+ if param not in AMD_API_PARAMS:
98
+ raise ValueError(
99
+ f"Unexpected argument `{param}` is "
100
+ "input into AMD model backend."
101
+ )
@@ -247,9 +247,6 @@ class AzureOpenAIModel(BaseModelBackend):
247
247
  )
248
248
  is_streaming = self.model_config_dict.get("stream", False)
249
249
  if response_format:
250
- result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
251
- self._request_parse(messages, response_format, tools)
252
- )
253
250
  if is_streaming:
254
251
  return self._request_stream_parse(
255
252
  messages, response_format, tools
@@ -308,9 +305,6 @@ class AzureOpenAIModel(BaseModelBackend):
308
305
  )
309
306
  is_streaming = self.model_config_dict.get("stream", False)
310
307
  if response_format:
311
- result: Union[
312
- ChatCompletion, AsyncStream[ChatCompletionChunk]
313
- ] = await self._arequest_parse(messages, response_format, tools)
314
308
  if is_streaming:
315
309
  return await self._arequest_stream_parse(
316
310
  messages, response_format, tools
@@ -24,6 +24,7 @@ from openai.lib.streaming.chat import (
24
24
  )
25
25
  from pydantic import BaseModel
26
26
 
27
+ from camel.logger import get_logger as camel_get_logger
27
28
  from camel.messages import OpenAIMessage
28
29
  from camel.types import (
29
30
  ChatCompletion,
@@ -34,6 +35,21 @@ from camel.types import (
34
35
  )
35
36
  from camel.utils import BaseTokenCounter
36
37
 
38
+ if os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
39
+ try:
40
+ from traceroot import get_logger # type: ignore[import]
41
+ from traceroot import trace as observe # type: ignore[import]
42
+
43
+ logger = get_logger('base_model')
44
+ except ImportError:
45
+ from camel.utils import observe
46
+
47
+ logger = camel_get_logger('base_model')
48
+ else:
49
+ from camel.utils import observe
50
+
51
+ logger = camel_get_logger('base_model')
52
+
37
53
 
38
54
  class ModelBackendMeta(abc.ABCMeta):
39
55
  r"""Metaclass that automatically preprocesses messages in run method.
@@ -364,6 +380,7 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
364
380
  """
365
381
  pass
366
382
 
383
+ @observe()
367
384
  def run(
368
385
  self,
369
386
  messages: List[OpenAIMessage],
@@ -403,7 +420,13 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
403
420
  elif not tools:
404
421
  tools = None
405
422
 
423
+ logger.info("Running model: %s", self.model_type)
424
+ logger.info("Messages: %s", messages)
425
+ logger.info("Response format: %s", response_format)
426
+ logger.info("Tools: %s", tools)
427
+
406
428
  result = self._run(messages, response_format, tools)
429
+ logger.info("Result: %s", result)
407
430
 
408
431
  # Log the response if logging is enabled
409
432
  if log_path:
@@ -411,6 +434,7 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
411
434
 
412
435
  return result
413
436
 
437
+ @observe()
414
438
  async def arun(
415
439
  self,
416
440
  messages: List[OpenAIMessage],
@@ -448,7 +472,13 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
448
472
  elif not tools:
449
473
  tools = None
450
474
 
475
+ logger.info("Running model: %s", self.model_type)
476
+ logger.info("Messages: %s", messages)
477
+ logger.info("Response format: %s", response_format)
478
+ logger.info("Tools: %s", tools)
479
+
451
480
  result = await self._arun(messages, response_format, tools)
481
+ logger.info("Result: %s", result)
452
482
 
453
483
  # Log the response if logging is enabled
454
484
  if log_path:
@@ -0,0 +1,83 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
15
+ from typing import Any, Dict, Optional, Union
16
+
17
+ from camel.configs import CometAPIConfig
18
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
19
+ from camel.types import ModelType
20
+ from camel.utils import (
21
+ BaseTokenCounter,
22
+ api_keys_required,
23
+ )
24
+
25
+
26
+ class CometAPIModel(OpenAICompatibleModel):
27
+ r"""LLM API served by CometAPI in a unified OpenAICompatibleModel
28
+ interface.
29
+
30
+ Args:
31
+ model_type (Union[ModelType, str]): Model for which a backend is
32
+ created.
33
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
34
+ that will be fed into:obj:`openai.ChatCompletion.create()`.
35
+ If:obj:`None`, :obj:`CometAPIConfig().as_dict()` will be used.
36
+ (default: :obj:`None`)
37
+ api_key (Optional[str], optional): The API key for authenticating
38
+ with the CometAPI service. (default: :obj:`None`).
39
+ url (Optional[str], optional): The url to the CometAPI service.
40
+ (default: :obj:`None`)
41
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
42
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
43
+ ModelType.GPT_4O_MINI)` will be used.
44
+ (default: :obj:`None`)
45
+ timeout (Optional[float], optional): The timeout value in seconds for
46
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
47
+ environment variable or default to 180 seconds.
48
+ (default: :obj:`None`)
49
+ max_retries (int, optional): Maximum number of retries for API calls.
50
+ (default: :obj:`3`)
51
+ **kwargs (Any): Additional arguments to pass to the client
52
+ initialization.
53
+ """
54
+
55
+ @api_keys_required([("api_key", "COMETAPI_KEY")])
56
+ def __init__(
57
+ self,
58
+ model_type: Union[ModelType, str],
59
+ model_config_dict: Optional[Dict[str, Any]] = None,
60
+ api_key: Optional[str] = None,
61
+ url: Optional[str] = None,
62
+ token_counter: Optional[BaseTokenCounter] = None,
63
+ timeout: Optional[float] = None,
64
+ max_retries: int = 3,
65
+ **kwargs: Any,
66
+ ) -> None:
67
+ if model_config_dict is None:
68
+ model_config_dict = CometAPIConfig().as_dict()
69
+ api_key = api_key or os.environ.get("COMETAPI_KEY")
70
+ url = url or os.environ.get(
71
+ "COMETAPI_API_BASE_URL", "https://api.cometapi.com/v1"
72
+ )
73
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
74
+ super().__init__(
75
+ model_type=model_type,
76
+ model_config_dict=model_config_dict,
77
+ api_key=api_key,
78
+ url=url,
79
+ token_counter=token_counter,
80
+ timeout=timeout,
81
+ max_retries=max_retries,
82
+ **kwargs,
83
+ )
@@ -16,11 +16,13 @@ import os
16
16
  from typing import ClassVar, Dict, Optional, Type, Union
17
17
 
18
18
  from camel.models.aiml_model import AIMLModel
19
+ from camel.models.amd_model import AMDModel
19
20
  from camel.models.anthropic_model import AnthropicModel
20
21
  from camel.models.aws_bedrock_model import AWSBedrockModel
21
22
  from camel.models.azure_openai_model import AzureOpenAIModel
22
23
  from camel.models.base_model import BaseModelBackend
23
24
  from camel.models.cohere_model import CohereModel
25
+ from camel.models.cometapi_model import CometAPIModel
24
26
  from camel.models.crynux_model import CrynuxModel
25
27
  from camel.models.deepseek_model import DeepSeekModel
26
28
  from camel.models.gemini_model import GeminiModel
@@ -77,6 +79,7 @@ class ModelFactory:
77
79
  ModelPlatformType.AWS_BEDROCK: AWSBedrockModel,
78
80
  ModelPlatformType.NVIDIA: NvidiaModel,
79
81
  ModelPlatformType.SILICONFLOW: SiliconFlowModel,
82
+ ModelPlatformType.AMD: AMDModel,
80
83
  ModelPlatformType.AIML: AIMLModel,
81
84
  ModelPlatformType.VOLCANO: VolcanoModel,
82
85
  ModelPlatformType.NETMIND: NetmindModel,
@@ -84,6 +87,7 @@ class ModelFactory:
84
87
  ModelPlatformType.AZURE: AzureOpenAIModel,
85
88
  ModelPlatformType.ANTHROPIC: AnthropicModel,
86
89
  ModelPlatformType.GROQ: GroqModel,
90
+ ModelPlatformType.COMETAPI: CometAPIModel,
87
91
  ModelPlatformType.NEBIUS: NebiusModel,
88
92
  ModelPlatformType.LMSTUDIO: LMStudioModel,
89
93
  ModelPlatformType.OPENROUTER: OpenRouterModel,
@@ -190,9 +190,6 @@ class OpenAICompatibleModel(BaseModelBackend):
190
190
  is_streaming = self.model_config_dict.get("stream", False)
191
191
 
192
192
  if response_format:
193
- result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
194
- self._request_parse(messages, response_format, tools)
195
- )
196
193
  if is_streaming:
197
194
  # Use streaming parse for structured output
198
195
  return self._request_stream_parse(
@@ -256,9 +253,6 @@ class OpenAICompatibleModel(BaseModelBackend):
256
253
  is_streaming = self.model_config_dict.get("stream", False)
257
254
 
258
255
  if response_format:
259
- result: Union[
260
- ChatCompletion, AsyncStream[ChatCompletionChunk]
261
- ] = await self._arequest_parse(messages, response_format, tools)
262
256
  if is_streaming:
263
257
  # Use streaming parse for structured output
264
258
  return await self._arequest_stream_parse(
@@ -303,9 +303,6 @@ class OpenAIModel(BaseModelBackend):
303
303
  is_streaming = self.model_config_dict.get("stream", False)
304
304
 
305
305
  if response_format:
306
- result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
307
- self._request_parse(messages, response_format, tools)
308
- )
309
306
  if is_streaming:
310
307
  # Use streaming parse for structured output
311
308
  return self._request_stream_parse(
@@ -377,9 +374,6 @@ class OpenAIModel(BaseModelBackend):
377
374
  is_streaming = self.model_config_dict.get("stream", False)
378
375
 
379
376
  if response_format:
380
- result: Union[
381
- ChatCompletion, AsyncStream[ChatCompletionChunk]
382
- ] = await self._arequest_parse(messages, response_format, tools)
383
377
  if is_streaming:
384
378
  # Use streaming parse for structured output
385
379
  return await self._arequest_stream_parse(
@@ -13,16 +13,26 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
+
18
+ from pydantic import BaseModel
17
19
 
18
20
  from camel.configs import ZhipuAIConfig
21
+ from camel.logger import get_logger
22
+ from camel.messages import OpenAIMessage
23
+ from camel.models._utils import try_modify_message_with_format
19
24
  from camel.models.openai_compatible_model import OpenAICompatibleModel
20
- from camel.types import ModelType
25
+ from camel.types import (
26
+ ChatCompletion,
27
+ ModelType,
28
+ )
21
29
  from camel.utils import (
22
30
  BaseTokenCounter,
23
31
  api_keys_required,
24
32
  )
25
33
 
34
+ logger = get_logger(__name__)
35
+
26
36
 
27
37
  class ZhipuAIModel(OpenAICompatibleModel):
28
38
  r"""ZhipuAI API in a unified OpenAICompatibleModel interface.
@@ -85,3 +95,52 @@ class ZhipuAIModel(OpenAICompatibleModel):
85
95
  max_retries=max_retries,
86
96
  **kwargs,
87
97
  )
98
+
99
+ def _request_parse(
100
+ self,
101
+ messages: List[OpenAIMessage],
102
+ response_format: Type[BaseModel],
103
+ tools: Optional[List[Dict[str, Any]]] = None,
104
+ ) -> ChatCompletion:
105
+ import copy
106
+
107
+ request_config = copy.deepcopy(self.model_config_dict)
108
+ request_config.pop("stream", None)
109
+ if tools is not None:
110
+ request_config["tools"] = tools
111
+
112
+ try_modify_message_with_format(messages[-1], response_format)
113
+ request_config["response_format"] = {"type": "json_object"}
114
+ try:
115
+ return self._client.beta.chat.completions.parse(
116
+ messages=messages,
117
+ model=self.model_type,
118
+ **request_config,
119
+ )
120
+ except Exception as e:
121
+ logger.error(f"Fallback attempt also failed: {e}")
122
+ raise
123
+
124
+ async def _arequest_parse(
125
+ self,
126
+ messages: List[OpenAIMessage],
127
+ response_format: Type[BaseModel],
128
+ tools: Optional[List[Dict[str, Any]]] = None,
129
+ ) -> ChatCompletion:
130
+ import copy
131
+
132
+ request_config = copy.deepcopy(self.model_config_dict)
133
+ request_config.pop("stream", None)
134
+ if tools is not None:
135
+ request_config["tools"] = tools
136
+ try_modify_message_with_format(messages[-1], response_format)
137
+ request_config["response_format"] = {"type": "json_object"}
138
+ try:
139
+ return await self._async_client.beta.chat.completions.parse(
140
+ messages=messages,
141
+ model=self.model_type,
142
+ **request_config,
143
+ )
144
+ except Exception as e:
145
+ logger.error(f"Fallback attempt also failed: {e}")
146
+ raise