camel-ai 0.1.5.9__py3-none-any.whl → 0.1.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (69) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +246 -33
  3. camel/agents/critic_agent.py +17 -1
  4. camel/agents/deductive_reasoner_agent.py +12 -0
  5. camel/agents/embodied_agent.py +19 -5
  6. camel/agents/knowledge_graph_agent.py +22 -3
  7. camel/agents/role_assignment_agent.py +12 -0
  8. camel/agents/search_agent.py +12 -0
  9. camel/agents/task_agent.py +15 -0
  10. camel/configs/__init__.py +2 -9
  11. camel/configs/anthropic_config.py +5 -6
  12. camel/configs/base_config.py +50 -4
  13. camel/configs/gemini_config.py +69 -18
  14. camel/configs/groq_config.py +6 -20
  15. camel/configs/litellm_config.py +2 -8
  16. camel/configs/mistral_config.py +17 -20
  17. camel/configs/ollama_config.py +6 -8
  18. camel/configs/openai_config.py +12 -23
  19. camel/configs/vllm_config.py +7 -8
  20. camel/configs/zhipuai_config.py +5 -11
  21. camel/human.py +1 -1
  22. camel/loaders/__init__.py +2 -0
  23. camel/loaders/firecrawl_reader.py +213 -0
  24. camel/memories/agent_memories.py +1 -4
  25. camel/memories/blocks/chat_history_block.py +6 -2
  26. camel/memories/blocks/vectordb_block.py +3 -1
  27. camel/memories/context_creators/score_based.py +6 -6
  28. camel/memories/records.py +9 -7
  29. camel/messages/base.py +1 -0
  30. camel/models/open_source_model.py +2 -2
  31. camel/prompts/__init__.py +7 -0
  32. camel/prompts/image_craft.py +34 -0
  33. camel/prompts/multi_condition_image_craft.py +34 -0
  34. camel/prompts/task_prompt_template.py +6 -0
  35. camel/responses/agent_responses.py +4 -3
  36. camel/retrievers/auto_retriever.py +0 -2
  37. camel/societies/babyagi_playing.py +6 -4
  38. camel/societies/role_playing.py +16 -8
  39. camel/storages/graph_storages/graph_element.py +10 -14
  40. camel/storages/vectordb_storages/base.py +24 -13
  41. camel/storages/vectordb_storages/milvus.py +1 -1
  42. camel/storages/vectordb_storages/qdrant.py +2 -3
  43. camel/tasks/__init__.py +22 -0
  44. camel/tasks/task.py +408 -0
  45. camel/tasks/task_prompt.py +65 -0
  46. camel/toolkits/__init__.py +3 -0
  47. camel/toolkits/base.py +3 -1
  48. camel/toolkits/dalle_toolkit.py +146 -0
  49. camel/toolkits/github_toolkit.py +16 -32
  50. camel/toolkits/google_maps_toolkit.py +2 -1
  51. camel/toolkits/open_api_toolkit.py +1 -2
  52. camel/toolkits/openai_function.py +2 -7
  53. camel/types/enums.py +6 -2
  54. camel/utils/__init__.py +14 -2
  55. camel/utils/commons.py +167 -2
  56. camel/utils/constants.py +3 -0
  57. camel/workforce/__init__.py +23 -0
  58. camel/workforce/base.py +50 -0
  59. camel/workforce/manager_node.py +299 -0
  60. camel/workforce/role_playing_node.py +168 -0
  61. camel/workforce/single_agent_node.py +77 -0
  62. camel/workforce/task_channel.py +173 -0
  63. camel/workforce/utils.py +97 -0
  64. camel/workforce/worker_node.py +115 -0
  65. camel/workforce/workforce.py +49 -0
  66. camel/workforce/workforce_prompt.py +125 -0
  67. {camel_ai-0.1.5.9.dist-info → camel_ai-0.1.6.2.dist-info}/METADATA +5 -2
  68. {camel_ai-0.1.5.9.dist-info → camel_ai-0.1.6.2.dist-info}/RECORD +69 -52
  69. {camel_ai-0.1.5.9.dist-info → camel_ai-0.1.6.2.dist-info}/WHEEL +0 -0
camel/configs/__init__.py CHANGED
@@ -13,19 +13,12 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
15
15
  from .base_config import BaseConfig
16
- from .gemini_config import (
17
- Gemini_API_PARAMS,
18
- GeminiConfig,
19
- )
16
+ from .gemini_config import Gemini_API_PARAMS, GeminiConfig
20
17
  from .groq_config import GROQ_API_PARAMS, GroqConfig
21
18
  from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
22
19
  from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
23
20
  from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
24
- from .openai_config import (
25
- OPENAI_API_PARAMS,
26
- ChatGPTConfig,
27
- OpenSourceConfig,
28
- )
21
+ from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig, OpenSourceConfig
29
22
  from .vllm_config import VLLM_API_PARAMS, VLLMConfig
30
23
  from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
31
24
 
@@ -13,14 +13,13 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass
16
+ from typing import List, Union
17
17
 
18
18
  from anthropic import NOT_GIVEN, NotGiven
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
21
 
22
22
 
23
- @dataclass(frozen=True)
24
23
  class AnthropicConfig(BaseConfig):
25
24
  r"""Defines the parameters for generating chat completions using the
26
25
  Anthropic API.
@@ -62,12 +61,12 @@ class AnthropicConfig(BaseConfig):
62
61
  """
63
62
 
64
63
  max_tokens: int = 256
65
- stop_sequences: list[str] | NotGiven = NOT_GIVEN
64
+ stop_sequences: Union[List[str], NotGiven] = NOT_GIVEN
66
65
  temperature: float = 1
67
- top_p: float | NotGiven = NOT_GIVEN
68
- top_k: int | NotGiven = NOT_GIVEN
66
+ top_p: Union[float, NotGiven] = NOT_GIVEN
67
+ top_k: Union[int, NotGiven] = NOT_GIVEN
69
68
  metadata: NotGiven = NOT_GIVEN
70
69
  stream: bool = False
71
70
 
72
71
 
73
- ANTHROPIC_API_PARAMS = {param for param in asdict(AnthropicConfig()).keys()}
72
+ ANTHROPIC_API_PARAMS = {param for param in AnthropicConfig.model_fields.keys()}
@@ -14,9 +14,55 @@
14
14
  from __future__ import annotations
15
15
 
16
16
  from abc import ABC
17
- from dataclasses import dataclass
17
+ from typing import Any, List, Optional
18
18
 
19
+ from pydantic import BaseModel, ConfigDict, field_validator
19
20
 
20
- @dataclass(frozen=True)
21
- class BaseConfig(ABC): # noqa: B024
22
- pass
21
+
22
+ class BaseConfig(ABC, BaseModel):
23
+ model_config = ConfigDict(
24
+ arbitrary_types_allowed=True,
25
+ extra="forbid",
26
+ frozen=True,
27
+ # UserWarning: conflict with protected namespace "model_"
28
+ protected_namespaces=(),
29
+ )
30
+
31
+ tools: Optional[List[Any]] = None
32
+ """A list of tools the model may
33
+ call. Currently, only functions are supported as a tool. Use this
34
+ to provide a list of functions the model may generate JSON inputs
35
+ for. A max of 128 functions are supported.
36
+ """
37
+
38
+ @field_validator("tools", mode="before")
39
+ @classmethod
40
+ def fields_type_checking(cls, tools):
41
+ if tools is not None:
42
+ from camel.toolkits import OpenAIFunction
43
+
44
+ for tool in tools:
45
+ if not isinstance(tool, OpenAIFunction):
46
+ raise ValueError(
47
+ f"The tool {tool} should "
48
+ "be an instance of `OpenAIFunction`."
49
+ )
50
+ return tools
51
+
52
+ def as_dict(self) -> dict[str, Any]:
53
+ config_dict = self.model_dump()
54
+
55
+ tools_schema = None
56
+ if self.tools:
57
+ from camel.toolkits import OpenAIFunction
58
+
59
+ tools_schema = []
60
+ for tool in self.tools:
61
+ if not isinstance(tool, OpenAIFunction):
62
+ raise ValueError(
63
+ f"The tool {tool} should "
64
+ "be an instance of `OpenAIFunction`."
65
+ )
66
+ tools_schema.append(tool.get_openai_tool_schema())
67
+ config_dict["tools"] = tools_schema
68
+ return config_dict
@@ -14,22 +14,13 @@
14
14
 
15
15
 
16
16
  from collections.abc import Iterable
17
- from dataclasses import asdict, dataclass
18
- from typing import TYPE_CHECKING, Optional
17
+ from typing import Any, Optional
19
18
 
20
- from camel.configs.base_config import BaseConfig
19
+ from pydantic import model_validator
21
20
 
22
- if TYPE_CHECKING:
23
- from google.generativeai.protos import Schema
24
- from google.generativeai.types.content_types import (
25
- FunctionLibraryType,
26
- ToolConfigType,
27
- )
28
- from google.generativeai.types.helper_types import RequestOptionsType
29
- from google.generativeai.types.safety_types import SafetySettingOptions
21
+ from camel.configs.base_config import BaseConfig
30
22
 
31
23
 
32
- @dataclass(frozen=True)
33
24
  class GeminiConfig(BaseConfig):
34
25
  r"""A simple dataclass used to configure the generation parameters of
35
26
  `GenerativeModel.generate_content`.
@@ -88,11 +79,71 @@ class GeminiConfig(BaseConfig):
88
79
  top_p: Optional[float] = None
89
80
  top_k: Optional[int] = None
90
81
  response_mime_type: Optional[str] = None
91
- response_schema: Optional['Schema'] = None
92
- safety_settings: Optional['SafetySettingOptions'] = None
93
- tools: Optional['FunctionLibraryType'] = None
94
- tool_config: Optional['ToolConfigType'] = None
95
- request_options: Optional['RequestOptionsType'] = None
82
+ response_schema: Optional[Any] = None
83
+ safety_settings: Optional[Any] = None
84
+ tools: Optional[Any] = None
85
+ tool_config: Optional[Any] = None
86
+ request_options: Optional[Any] = None
87
+
88
+ @model_validator(mode="before")
89
+ @classmethod
90
+ def fields_type_checking(cls, data: Any):
91
+ if isinstance(data, dict):
92
+ response_schema = data.get("response_schema")
93
+ safety_settings = data.get("safety_settings")
94
+ tools = data.get("tools")
95
+ tool_config = data.get("tool_config")
96
+ request_options = data.get("request_options")
97
+
98
+ if response_schema:
99
+ from google.generativeai.protos import Schema
100
+ from google.generativeai.types.content_types import (
101
+ FunctionLibraryType,
102
+ ToolConfigType,
103
+ )
104
+ from google.generativeai.types.helper_types import (
105
+ RequestOptionsType,
106
+ )
107
+ from google.generativeai.types.safety_types import (
108
+ SafetySettingOptions,
109
+ )
110
+ else:
111
+ return data
112
+
113
+ if response_schema and not isinstance(response_schema, Schema):
114
+ raise ValueError(
115
+ "The response_schema should be "
116
+ "an instance of `google.generativeai.protos.Schema`."
117
+ )
118
+
119
+ if safety_settings and not isinstance(
120
+ safety_settings, SafetySettingOptions
121
+ ):
122
+ raise ValueError(
123
+ "The response_schema should be an instance of "
124
+ "`google.generativeai.types.safety_types.SafetySettingOptions`."
125
+ )
126
+
127
+ if tools is not None:
128
+ for tool in tools:
129
+ if not isinstance(tool, FunctionLibraryType):
130
+ raise ValueError(
131
+ "The tool should be an instance of "
132
+ "`google.generativeai.types.content_types.FunctionLibraryType`."
133
+ )
134
+ if tool_config and not isinstance(tool_config, ToolConfigType):
135
+ raise ValueError(
136
+ "The response_schema should be an instance of "
137
+ "`google.generativeai.types.content_types.ToolConfigType`."
138
+ )
139
+ if request_options and not isinstance(
140
+ request_options, RequestOptionsType
141
+ ):
142
+ raise ValueError(
143
+ "The response_schema should be an instance of "
144
+ "`google.generativeai.types.helper_types.RequestOptionsType`."
145
+ )
146
+ return data
96
147
 
97
148
 
98
- Gemini_API_PARAMS = {param for param in asdict(GeminiConfig()).keys()}
149
+ Gemini_API_PARAMS = {param for param in GeminiConfig().model_fields.keys()}
@@ -13,18 +13,13 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass
17
- from typing import TYPE_CHECKING, Optional, Sequence
16
+ from typing import Optional, Sequence, Union
18
17
 
19
18
  from openai._types import NOT_GIVEN, NotGiven
20
19
 
21
20
  from camel.configs.base_config import BaseConfig
22
21
 
23
- if TYPE_CHECKING:
24
- from camel.toolkits import OpenAIFunction
25
22
 
26
-
27
- @dataclass(frozen=True)
28
23
  class GroqConfig(BaseConfig):
29
24
  r"""Defines the parameters for generating chat completions using OpenAI
30
25
  compatibility.
@@ -98,22 +93,13 @@ class GroqConfig(BaseConfig):
98
93
  top_p: float = 1.0
99
94
  n: int = 1
100
95
  stream: bool = False
101
- stop: str | Sequence[str] | NotGiven = NOT_GIVEN
102
- max_tokens: int | NotGiven = NOT_GIVEN
96
+ stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
97
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
103
98
  presence_penalty: float = 0.0
104
- response_format: dict | NotGiven = NOT_GIVEN
99
+ response_format: Union[dict, NotGiven] = NOT_GIVEN
105
100
  frequency_penalty: float = 0.0
106
101
  user: str = ""
107
- tools: Optional[list[OpenAIFunction]] = None
108
- tool_choice: Optional[dict[str, str] | str] = "none"
109
-
110
- def __post_init__(self):
111
- if self.tools is not None:
112
- object.__setattr__(
113
- self,
114
- 'tools',
115
- [tool.get_openai_tool_schema() for tool in self.tools],
116
- )
102
+ tool_choice: Optional[Union[dict[str, str], str]] = "none"
117
103
 
118
104
 
119
- GROQ_API_PARAMS = {param for param in asdict(GroqConfig()).keys()}
105
+ GROQ_API_PARAMS = {param for param in GroqConfig.model_fields.keys()}
@@ -13,16 +13,11 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass
17
- from typing import TYPE_CHECKING, List, Optional, Union
16
+ from typing import List, Optional, Union
18
17
 
19
18
  from camel.configs.base_config import BaseConfig
20
19
 
21
- if TYPE_CHECKING:
22
- from camel.toolkits import OpenAIFunction
23
20
 
24
-
25
- @dataclass(frozen=True)
26
21
  class LiteLLMConfig(BaseConfig):
27
22
  r"""Defines the parameters for generating chat completions using the
28
23
  LiteLLM API.
@@ -88,7 +83,6 @@ class LiteLLMConfig(BaseConfig):
88
83
  user: Optional[str] = None
89
84
  response_format: Optional[dict] = None
90
85
  seed: Optional[int] = None
91
- tools: Optional[list[OpenAIFunction]] = None
92
86
  tool_choice: Optional[Union[str, dict]] = None
93
87
  logprobs: Optional[bool] = None
94
88
  top_logprobs: Optional[int] = None
@@ -100,4 +94,4 @@ class LiteLLMConfig(BaseConfig):
100
94
  max_retries: Optional[int] = None
101
95
 
102
96
 
103
- LITELLM_API_PARAMS = {param for param in asdict(LiteLLMConfig()).keys()}
97
+ LITELLM_API_PARAMS = {param for param in LiteLLMConfig.model_fields.keys()}
@@ -13,20 +13,13 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass
17
- from typing import TYPE_CHECKING, Dict, Optional, Union
16
+ from typing import Any, Dict, Optional, Union
18
17
 
19
- from camel.configs.base_config import BaseConfig
20
-
21
- if TYPE_CHECKING:
22
- from mistralai.models.chat_completion import (
23
- ResponseFormat,
24
- )
18
+ from pydantic import field_validator
25
19
 
26
- from camel.toolkits import OpenAIFunction
20
+ from camel.configs.base_config import BaseConfig
27
21
 
28
22
 
29
- @dataclass(frozen=True)
30
23
  class MistralConfig(BaseConfig):
31
24
  r"""Defines the parameters for generating chat completions using the
32
25
  Mistral API.
@@ -65,17 +58,21 @@ class MistralConfig(BaseConfig):
65
58
  random_seed: Optional[int] = None
66
59
  safe_mode: bool = False
67
60
  safe_prompt: bool = False
68
- response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None
69
- tools: Optional[list[OpenAIFunction]] = None
61
+ response_format: Optional[Union[Dict[str, str], Any]] = None
70
62
  tool_choice: Optional[str] = "auto"
71
63
 
72
- def __post_init__(self):
73
- if self.tools is not None:
74
- object.__setattr__(
75
- self,
76
- 'tools',
77
- [tool.get_openai_tool_schema() for tool in self.tools],
78
- )
64
+ @field_validator("response_format", mode="before")
65
+ @classmethod
66
+ def fields_type_checking(cls, response_format):
67
+ if response_format and not isinstance(response_format, dict):
68
+ from mistralai.models.chat_completion import ResponseFormat
69
+
70
+ if not isinstance(response_format, ResponseFormat):
71
+ raise ValueError(
72
+ f"The tool {response_format} should be an instance "
73
+ "of `mistralai.models.chat_completion.ResponseFormat`."
74
+ )
75
+ return response_format
79
76
 
80
77
 
81
- MISTRAL_API_PARAMS = {param for param in asdict(MistralConfig()).keys()}
78
+ MISTRAL_API_PARAMS = {param for param in MistralConfig().model_fields.keys()}
@@ -13,18 +13,16 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass
17
- from typing import Sequence
16
+ from typing import Sequence, Union
18
17
 
19
18
  from openai._types import NOT_GIVEN, NotGiven
20
19
 
21
20
  from camel.configs.base_config import BaseConfig
22
21
 
23
22
 
24
- @dataclass(frozen=True)
25
23
  class OllamaConfig(BaseConfig):
26
24
  r"""Defines the parameters for generating chat completions using OpenAI
27
- compatibility.
25
+ compatibility
28
26
 
29
27
  Reference: https://github.com/ollama/ollama/blob/main/docs/openai.md
30
28
 
@@ -75,11 +73,11 @@ class OllamaConfig(BaseConfig):
75
73
  temperature: float = 0.2
76
74
  top_p: float = 1.0
77
75
  stream: bool = False
78
- stop: str | Sequence[str] | NotGiven = NOT_GIVEN
79
- max_tokens: int | NotGiven = NOT_GIVEN
76
+ stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
77
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
80
78
  presence_penalty: float = 0.0
81
- response_format: dict | NotGiven = NOT_GIVEN
79
+ response_format: Union[dict, NotGiven] = NOT_GIVEN
82
80
  frequency_penalty: float = 0.0
83
81
 
84
82
 
85
- OLLAMA_API_PARAMS = {param for param in asdict(OllamaConfig()).keys()}
83
+ OLLAMA_API_PARAMS = {param for param in OllamaConfig.model_fields.keys()}
@@ -13,18 +13,14 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass, field
17
- from typing import TYPE_CHECKING, Optional, Sequence
16
+ from typing import Optional, Sequence, Union
18
17
 
19
18
  from openai._types import NOT_GIVEN, NotGiven
19
+ from pydantic import Field
20
20
 
21
21
  from camel.configs.base_config import BaseConfig
22
22
 
23
- if TYPE_CHECKING:
24
- from camel.toolkits import OpenAIFunction
25
23
 
26
-
27
- @dataclass(frozen=True)
28
24
  class ChatGPTConfig(BaseConfig):
29
25
  r"""Defines the parameters for generating chat completions using the
30
26
  OpenAI API.
@@ -105,29 +101,19 @@ class ChatGPTConfig(BaseConfig):
105
101
  top_p: float = 1.0
106
102
  n: int = 1
107
103
  stream: bool = False
108
- stop: str | Sequence[str] | NotGiven = NOT_GIVEN
109
- max_tokens: int | NotGiven = NOT_GIVEN
104
+ stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
105
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
110
106
  presence_penalty: float = 0.0
111
- response_format: dict | NotGiven = NOT_GIVEN
107
+ response_format: Union[dict, NotGiven] = NOT_GIVEN
112
108
  frequency_penalty: float = 0.0
113
- logit_bias: dict = field(default_factory=dict)
109
+ logit_bias: dict = Field(default_factory=dict)
114
110
  user: str = ""
115
- tools: Optional[list[OpenAIFunction]] = None
116
- tool_choice: Optional[dict[str, str] | str] = None
117
-
118
- def __post_init__(self):
119
- if self.tools is not None:
120
- object.__setattr__(
121
- self,
122
- 'tools',
123
- [tool.get_openai_tool_schema() for tool in self.tools],
124
- )
111
+ tool_choice: Optional[Union[dict[str, str], str]] = None
125
112
 
126
113
 
127
- OPENAI_API_PARAMS = {param for param in asdict(ChatGPTConfig()).keys()}
114
+ OPENAI_API_PARAMS = {param for param in ChatGPTConfig.model_fields.keys()}
128
115
 
129
116
 
130
- @dataclass(frozen=True)
131
117
  class OpenSourceConfig(BaseConfig):
132
118
  r"""Defines parameters for setting up open-source models and includes
133
119
  parameters to be passed to chat completion function of OpenAI API.
@@ -141,6 +127,9 @@ class OpenSourceConfig(BaseConfig):
141
127
  contain the arguments to be passed to OpenAI API.
142
128
  """
143
129
 
130
+ # Maybe the param needs to be renamed.
131
+ # Warning: Field "model_path" has conflict with protected namespace
132
+ # "model_".
144
133
  model_path: str
145
134
  server_url: str
146
- api_params: ChatGPTConfig = field(default_factory=ChatGPTConfig)
135
+ api_params: ChatGPTConfig = Field(default_factory=ChatGPTConfig)
@@ -13,16 +13,15 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass, field
17
- from typing import Sequence
16
+ from typing import Sequence, Union
18
17
 
19
18
  from openai._types import NOT_GIVEN, NotGiven
19
+ from pydantic import Field
20
20
 
21
21
  from camel.configs.base_config import BaseConfig
22
22
 
23
23
 
24
24
  # flake8: noqa: E501
25
- @dataclass(frozen=True)
26
25
  class VLLMConfig(BaseConfig):
27
26
  r"""Defines the parameters for generating chat completions using the
28
27
  OpenAI API.
@@ -91,13 +90,13 @@ class VLLMConfig(BaseConfig):
91
90
  top_p: float = 1.0
92
91
  n: int = 1
93
92
  stream: bool = False
94
- stop: str | Sequence[str] | NotGiven = NOT_GIVEN
95
- max_tokens: int | NotGiven = NOT_GIVEN
93
+ stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
94
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
96
95
  presence_penalty: float = 0.0
97
- response_format: dict | NotGiven = NOT_GIVEN
96
+ response_format: Union[dict, NotGiven] = NOT_GIVEN
98
97
  frequency_penalty: float = 0.0
99
- logit_bias: dict = field(default_factory=dict)
98
+ logit_bias: dict = Field(default_factory=dict)
100
99
  user: str = ""
101
100
 
102
101
 
103
- VLLM_API_PARAMS = {param for param in asdict(VLLMConfig()).keys()}
102
+ VLLM_API_PARAMS = {param for param in VLLMConfig.model_fields.keys()}
@@ -13,18 +13,13 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass
17
- from typing import TYPE_CHECKING, Optional, Sequence
16
+ from typing import Optional, Sequence, Union
18
17
 
19
18
  from openai._types import NOT_GIVEN, NotGiven
20
19
 
21
20
  from camel.configs.base_config import BaseConfig
22
21
 
23
- if TYPE_CHECKING:
24
- from camel.toolkits import OpenAIFunction
25
22
 
26
-
27
- @dataclass(frozen=True)
28
23
  class ZhipuAIConfig(BaseConfig):
29
24
  r"""Defines the parameters for generating chat completions using OpenAI
30
25
  compatibility
@@ -69,10 +64,9 @@ class ZhipuAIConfig(BaseConfig):
69
64
  temperature: float = 0.2
70
65
  top_p: float = 0.6
71
66
  stream: bool = False
72
- stop: str | Sequence[str] | NotGiven = NOT_GIVEN
73
- max_tokens: int | NotGiven = NOT_GIVEN
74
- tools: Optional[list[OpenAIFunction]] = None
75
- tool_choice: Optional[dict[str, str] | str] = None
67
+ stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
68
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
69
+ tool_choice: Optional[Union[dict[str, str], str]] = None
76
70
 
77
71
 
78
- ZHIPUAI_API_PARAMS = {param for param in asdict(ZhipuAIConfig()).keys()}
72
+ ZHIPUAI_API_PARAMS = {param for param in ZhipuAIConfig.model_fields.keys()}
camel/human.py CHANGED
@@ -135,4 +135,4 @@ class Human:
135
135
  human_input = self.get_input()
136
136
  content = self.parse_input(human_input)
137
137
  message = meta_chat_message.create_new_instance(content)
138
- return ChatAgentResponse([message], terminated=False, info={})
138
+ return ChatAgentResponse(msgs=[message], terminated=False, info={})
camel/loaders/__init__.py CHANGED
@@ -13,6 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
15
  from .base_io import File, read_file
16
+ from .firecrawl_reader import Firecrawl
16
17
  from .jina_url_reader import JinaURLReader
17
18
  from .unstructured_io import UnstructuredIO
18
19
 
@@ -21,4 +22,5 @@ __all__ = [
21
22
  'read_file',
22
23
  'UnstructuredIO',
23
24
  'JinaURLReader',
25
+ 'Firecrawl',
24
26
  ]