camel-ai 0.2.42__py3-none-any.whl → 0.2.44__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (43) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +3 -0
  3. camel/configs/anthropic_config.py +2 -24
  4. camel/configs/ppio_config.py +102 -0
  5. camel/configs/reka_config.py +1 -7
  6. camel/configs/samba_config.py +1 -7
  7. camel/configs/togetherai_config.py +1 -7
  8. camel/datasets/few_shot_generator.py +1 -0
  9. camel/embeddings/__init__.py +4 -0
  10. camel/embeddings/azure_embedding.py +119 -0
  11. camel/embeddings/together_embedding.py +136 -0
  12. camel/environments/__init__.py +3 -0
  13. camel/environments/multi_step.py +12 -10
  14. camel/environments/single_step.py +14 -2
  15. camel/environments/tic_tac_toe.py +518 -0
  16. camel/extractors/python_strategies.py +14 -5
  17. camel/loaders/__init__.py +2 -0
  18. camel/loaders/crawl4ai_reader.py +230 -0
  19. camel/models/__init__.py +2 -0
  20. camel/models/azure_openai_model.py +10 -2
  21. camel/models/base_model.py +111 -28
  22. camel/models/cohere_model.py +5 -1
  23. camel/models/deepseek_model.py +4 -0
  24. camel/models/gemini_model.py +8 -2
  25. camel/models/model_factory.py +3 -0
  26. camel/models/ollama_model.py +8 -2
  27. camel/models/openai_compatible_model.py +8 -2
  28. camel/models/openai_model.py +16 -4
  29. camel/models/ppio_model.py +184 -0
  30. camel/models/togetherai_model.py +106 -31
  31. camel/models/vllm_model.py +140 -57
  32. camel/societies/workforce/workforce.py +26 -3
  33. camel/toolkits/__init__.py +2 -0
  34. camel/toolkits/browser_toolkit.py +11 -3
  35. camel/toolkits/google_calendar_toolkit.py +432 -0
  36. camel/toolkits/search_toolkit.py +119 -1
  37. camel/types/enums.py +74 -3
  38. camel/types/unified_model_type.py +5 -0
  39. camel/verifiers/python_verifier.py +93 -9
  40. {camel_ai-0.2.42.dist-info → camel_ai-0.2.44.dist-info}/METADATA +21 -2
  41. {camel_ai-0.2.42.dist-info → camel_ai-0.2.44.dist-info}/RECORD +43 -36
  42. {camel_ai-0.2.42.dist-info → camel_ai-0.2.44.dist-info}/WHEEL +0 -0
  43. {camel_ai-0.2.42.dist-info → camel_ai-0.2.44.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,184 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
+
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
20
+
21
+ from camel.configs import PPIO_API_PARAMS, PPIOConfig
22
+ from camel.messages import OpenAIMessage
23
+ from camel.models import BaseModelBackend
24
+ from camel.types import (
25
+ ChatCompletion,
26
+ ChatCompletionChunk,
27
+ ModelType,
28
+ )
29
+ from camel.utils import (
30
+ BaseTokenCounter,
31
+ OpenAITokenCounter,
32
+ api_keys_required,
33
+ )
34
+
35
+
36
+ class PPIOModel(BaseModelBackend):
37
+ r"""Constructor for PPIO backend with OpenAI compatibility.
38
+
39
+ Args:
40
+ model_type (Union[ModelType, str]): Model for which a backend is
41
+ created, supported model can be found here:
42
+ https://ppinfra.com/model-api/product/llm-api?utm_source=github_owl
43
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
44
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
45
+ :obj:`None`, :obj:`PPIOConfig().as_dict()` will be used.
46
+ (default: :obj:`None`)
47
+ api_key (Optional[str], optional): The API key for authenticating with
48
+ the PPIO service. (default: :obj:`None`)
49
+ url (Optional[str], optional): The url to the PPIO service.
50
+ If not provided, "https://api.ppinfra.com/v3/openai" will be used.
51
+ (default: :obj:`None`)
52
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
53
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
54
+ ModelType.GPT_4O_MINI)` will be used.
55
+ timeout (Optional[float], optional): The timeout value in seconds for
56
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
57
+ environment variable or default to 180 seconds.
58
+ (default: :obj:`None`)
59
+ """
60
+
61
+ @api_keys_required(
62
+ [
63
+ ("api_key", 'PPIO_API_KEY'),
64
+ ]
65
+ )
66
+ def __init__(
67
+ self,
68
+ model_type: Union[ModelType, str],
69
+ model_config_dict: Optional[Dict[str, Any]] = None,
70
+ api_key: Optional[str] = None,
71
+ url: Optional[str] = None,
72
+ token_counter: Optional[BaseTokenCounter] = None,
73
+ timeout: Optional[float] = None,
74
+ ) -> None:
75
+ if model_config_dict is None:
76
+ model_config_dict = PPIOConfig().as_dict()
77
+ api_key = api_key or os.environ.get("PPIO_API_KEY")
78
+ url = url or os.environ.get(
79
+ "PPIO_API_BASE_URL", "https://api.ppinfra.com/v3/openai"
80
+ )
81
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
82
+
83
+ super().__init__(
84
+ model_type, model_config_dict, api_key, url, token_counter, timeout
85
+ )
86
+
87
+ self._client = OpenAI(
88
+ timeout=self._timeout,
89
+ max_retries=3,
90
+ api_key=self._api_key,
91
+ base_url=self._url,
92
+ )
93
+ self._async_client = AsyncOpenAI(
94
+ timeout=self._timeout,
95
+ max_retries=3,
96
+ api_key=self._api_key,
97
+ base_url=self._url,
98
+ )
99
+
100
+ async def _arun(
101
+ self,
102
+ messages: List[OpenAIMessage],
103
+ response_format: Optional[Type[BaseModel]] = None,
104
+ tools: Optional[List[Dict[str, Any]]] = None,
105
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
106
+ r"""Runs inference of OpenAI chat completion.
107
+
108
+ Args:
109
+ messages (List[OpenAIMessage]): Message list with the chat history
110
+ in OpenAI API format.
111
+
112
+ Returns:
113
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
114
+ `ChatCompletion` in the non-stream mode, or
115
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
116
+ """
117
+ response = await self._async_client.chat.completions.create(
118
+ messages=messages,
119
+ model=self.model_type,
120
+ **self.model_config_dict,
121
+ )
122
+ return response
123
+
124
+ def _run(
125
+ self,
126
+ messages: List[OpenAIMessage],
127
+ response_format: Optional[Type[BaseModel]] = None,
128
+ tools: Optional[List[Dict[str, Any]]] = None,
129
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
130
+ r"""Runs inference of OpenAI chat completion.
131
+
132
+ Args:
133
+ messages (List[OpenAIMessage]): Message list with the chat history
134
+ in OpenAI API format.
135
+
136
+ Returns:
137
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
138
+ `ChatCompletion` in the non-stream mode, or
139
+ `Stream[ChatCompletionChunk]` in the stream mode.
140
+ """
141
+ response = self._client.chat.completions.create(
142
+ messages=messages,
143
+ model=self.model_type,
144
+ **self.model_config_dict,
145
+ )
146
+ return response
147
+
148
+ @property
149
+ def token_counter(self) -> BaseTokenCounter:
150
+ r"""Initialize the token counter for the model backend.
151
+
152
+ Returns:
153
+ OpenAITokenCounter: The token counter following the model's
154
+ tokenization style.
155
+ """
156
+
157
+ if not self._token_counter:
158
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
159
+ return self._token_counter
160
+
161
+ def check_model_config(self):
162
+ r"""Check whether the model configuration contains any
163
+ unexpected arguments to PPIO API.
164
+
165
+ Raises:
166
+ ValueError: If the model configuration dictionary contains any
167
+ unexpected arguments to PPIO API.
168
+ """
169
+ for param in self.model_config_dict:
170
+ if param not in PPIO_API_PARAMS:
171
+ raise ValueError(
172
+ f"Unexpected argument `{param}` is "
173
+ "input into PPIO model backend."
174
+ )
175
+
176
+ @property
177
+ def stream(self) -> bool:
178
+ r"""Returns whether the model is in stream mode, which sends partial
179
+ results each time.
180
+
181
+ Returns:
182
+ bool: Whether the model is in stream mode.
183
+ """
184
+ return self.model_config_dict.get('stream', False)
@@ -96,70 +96,145 @@ class TogetherAIModel(BaseModelBackend):
96
96
  base_url=self._url,
97
97
  )
98
98
 
99
- async def _arun(
99
+ @property
100
+ def token_counter(self) -> BaseTokenCounter:
101
+ r"""Initialize the token counter for the model backend.
102
+
103
+ Returns:
104
+ BaseTokenCounter: The token counter following the model's
105
+ tokenization style.
106
+ """
107
+ if not self._token_counter:
108
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
109
+ return self._token_counter
110
+
111
+ def _run(
100
112
  self,
101
113
  messages: List[OpenAIMessage],
102
114
  response_format: Optional[Type[BaseModel]] = None,
103
115
  tools: Optional[List[Dict[str, Any]]] = None,
104
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
116
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
105
117
  r"""Runs inference of OpenAI chat completion.
106
118
 
107
119
  Args:
108
120
  messages (List[OpenAIMessage]): Message list with the chat history
109
121
  in OpenAI API format.
122
+ response_format (Optional[Type[BaseModel]]): The format of the
123
+ response.
124
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
125
+ use for the request.
110
126
 
111
127
  Returns:
112
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
128
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
113
129
  `ChatCompletion` in the non-stream mode, or
114
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
130
+ `Stream[ChatCompletionChunk]` in the stream mode.
115
131
  """
116
- # Use OpenAI client as interface call Together AI
117
- # Reference: https://docs.together.ai/docs/openai-api-compatibility
118
- response = await self._async_client.chat.completions.create(
119
- messages=messages,
120
- model=self.model_type,
121
- **self.model_config_dict,
132
+ response_format = response_format or self.model_config_dict.get(
133
+ "response_format", None
122
134
  )
123
- return response
135
+ if response_format:
136
+ return self._request_parse(messages, response_format, tools)
137
+ else:
138
+ return self._request_chat_completion(messages, tools)
124
139
 
125
- def _run(
140
+ async def _arun(
126
141
  self,
127
142
  messages: List[OpenAIMessage],
128
143
  response_format: Optional[Type[BaseModel]] = None,
129
144
  tools: Optional[List[Dict[str, Any]]] = None,
130
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
131
- r"""Runs inference of OpenAI chat completion.
145
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
146
+ r"""Runs inference of OpenAI chat completion in async mode.
132
147
 
133
148
  Args:
134
149
  messages (List[OpenAIMessage]): Message list with the chat history
135
150
  in OpenAI API format.
151
+ response_format (Optional[Type[BaseModel]]): The format of the
152
+ response.
153
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
154
+ use for the request.
136
155
 
137
156
  Returns:
138
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
157
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
139
158
  `ChatCompletion` in the non-stream mode, or
140
- `Stream[ChatCompletionChunk]` in the stream mode.
159
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
141
160
  """
142
- # Use OpenAI client as interface call Together AI
143
- # Reference: https://docs.together.ai/docs/openai-api-compatibility
144
- response = self._client.chat.completions.create(
161
+ response_format = response_format or self.model_config_dict.get(
162
+ "response_format", None
163
+ )
164
+ if response_format:
165
+ return await self._arequest_parse(messages, response_format, tools)
166
+ else:
167
+ return await self._arequest_chat_completion(messages, tools)
168
+
169
+ def _request_chat_completion(
170
+ self,
171
+ messages: List[OpenAIMessage],
172
+ tools: Optional[List[Dict[str, Any]]] = None,
173
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
174
+ request_config = self.model_config_dict.copy()
175
+
176
+ if tools:
177
+ request_config["tools"] = tools
178
+
179
+ return self._client.chat.completions.create(
145
180
  messages=messages,
146
181
  model=self.model_type,
147
- **self.model_config_dict,
182
+ **request_config,
148
183
  )
149
- return response
150
184
 
151
- @property
152
- def token_counter(self) -> BaseTokenCounter:
153
- r"""Initialize the token counter for the model backend.
185
+ async def _arequest_chat_completion(
186
+ self,
187
+ messages: List[OpenAIMessage],
188
+ tools: Optional[List[Dict[str, Any]]] = None,
189
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
190
+ request_config = self.model_config_dict.copy()
154
191
 
155
- Returns:
156
- OpenAITokenCounter: The token counter following the model's
157
- tokenization style.
158
- """
192
+ if tools:
193
+ request_config["tools"] = tools
159
194
 
160
- if not self._token_counter:
161
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
162
- return self._token_counter
195
+ return await self._async_client.chat.completions.create(
196
+ messages=messages,
197
+ model=self.model_type,
198
+ **request_config,
199
+ )
200
+
201
+ def _request_parse(
202
+ self,
203
+ messages: List[OpenAIMessage],
204
+ response_format: Type[BaseModel],
205
+ tools: Optional[List[Dict[str, Any]]] = None,
206
+ ) -> ChatCompletion:
207
+ request_config = self.model_config_dict.copy()
208
+
209
+ request_config["response_format"] = response_format
210
+
211
+ if tools is not None:
212
+ request_config["tools"] = tools
213
+
214
+ return self._client.beta.chat.completions.parse(
215
+ messages=messages,
216
+ model=self.model_type,
217
+ **request_config,
218
+ )
219
+
220
+ async def _arequest_parse(
221
+ self,
222
+ messages: List[OpenAIMessage],
223
+ response_format: Type[BaseModel],
224
+ tools: Optional[List[Dict[str, Any]]] = None,
225
+ ) -> ChatCompletion:
226
+ request_config = self.model_config_dict.copy()
227
+
228
+ request_config["response_format"] = response_format
229
+
230
+ if tools is not None:
231
+ request_config["tools"] = tools
232
+
233
+ return await self._async_client.beta.chat.completions.parse(
234
+ messages=messages,
235
+ model=self.model_type,
236
+ **request_config,
237
+ )
163
238
 
164
239
  def check_model_config(self):
165
240
  r"""Check whether the model configuration contains any
@@ -119,20 +119,34 @@ class VLLMModel(BaseModelBackend):
119
119
  self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
120
120
  return self._token_counter
121
121
 
122
- def check_model_config(self):
123
- r"""Check whether the model configuration contains any
124
- unexpected arguments to vLLM API.
122
+ def _run(
123
+ self,
124
+ messages: List[OpenAIMessage],
125
+ response_format: Optional[Type[BaseModel]] = None,
126
+ tools: Optional[List[Dict[str, Any]]] = None,
127
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
128
+ r"""Runs inference of OpenAI chat completion.
125
129
 
126
- Raises:
127
- ValueError: If the model configuration dictionary contains any
128
- unexpected arguments to OpenAI API.
130
+ Args:
131
+ messages (List[OpenAIMessage]): Message list with the chat history
132
+ in OpenAI API format.
133
+ response_format (Optional[Type[BaseModel]]): The format of the
134
+ response.
135
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
136
+ use for the request.
137
+
138
+ Returns:
139
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
140
+ `ChatCompletion` in the non-stream mode, or
141
+ `Stream[ChatCompletionChunk]` in the stream mode.
129
142
  """
130
- for param in self.model_config_dict:
131
- if param not in VLLM_API_PARAMS:
132
- raise ValueError(
133
- f"Unexpected argument `{param}` is "
134
- "input into vLLM model backend."
135
- )
143
+ response_format = response_format or self.model_config_dict.get(
144
+ "response_format", None
145
+ )
146
+ if response_format:
147
+ return self._request_parse(messages, response_format, tools)
148
+ else:
149
+ return self._request_chat_completion(messages, tools)
136
150
 
137
151
  async def _arun(
138
152
  self,
@@ -140,85 +154,154 @@ class VLLMModel(BaseModelBackend):
140
154
  response_format: Optional[Type[BaseModel]] = None,
141
155
  tools: Optional[List[Dict[str, Any]]] = None,
142
156
  ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
143
- r"""Runs inference of OpenAI chat completion.
157
+ r"""Runs inference of OpenAI chat completion in async mode.
144
158
 
145
159
  Args:
146
160
  messages (List[OpenAIMessage]): Message list with the chat history
147
161
  in OpenAI API format.
148
- response_format (Optional[Type[BaseModel]], optional): The format
149
- to return the response in.
150
- tools (Optional[List[Dict[str, Any]]], optional): List of tools
151
- the model may call.
162
+ response_format (Optional[Type[BaseModel]]): The format of the
163
+ response.
164
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
165
+ use for the request.
152
166
 
153
167
  Returns:
154
168
  Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
155
169
  `ChatCompletion` in the non-stream mode, or
156
170
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
157
171
  """
172
+ response_format = response_format or self.model_config_dict.get(
173
+ "response_format", None
174
+ )
175
+ if response_format:
176
+ return await self._arequest_parse(messages, response_format, tools)
177
+ else:
178
+ return await self._arequest_chat_completion(messages, tools)
179
+
180
+ def _request_chat_completion(
181
+ self,
182
+ messages: List[OpenAIMessage],
183
+ tools: Optional[List[Dict[str, Any]]] = None,
184
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
185
+ request_config = self.model_config_dict.copy()
158
186
 
159
- kwargs = self.model_config_dict.copy()
160
187
  if tools:
161
- kwargs["tools"] = tools
162
- if response_format:
163
- kwargs["response_format"] = {"type": "json_object"}
188
+ request_config["tools"] = tools
164
189
 
165
190
  # Remove additionalProperties from each tool's function parameters
166
- if tools and "tools" in kwargs:
167
- for tool in kwargs["tools"]:
191
+ if tools and "tools" in request_config:
192
+ for tool in request_config["tools"]:
168
193
  if "function" in tool and "parameters" in tool["function"]:
169
194
  tool["function"]["parameters"].pop(
170
195
  "additionalProperties", None
171
196
  )
172
197
 
173
- response = await self._async_client.chat.completions.create(
198
+ return self._client.chat.completions.create(
174
199
  messages=messages,
175
200
  model=self.model_type,
176
- **kwargs,
201
+ **request_config,
177
202
  )
178
- return response
179
203
 
180
- def _run(
204
+ async def _arequest_chat_completion(
181
205
  self,
182
206
  messages: List[OpenAIMessage],
183
- response_format: Optional[Type[BaseModel]] = None,
184
207
  tools: Optional[List[Dict[str, Any]]] = None,
185
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
186
- r"""Runs inference of OpenAI chat completion.
208
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
209
+ request_config = self.model_config_dict.copy()
187
210
 
188
- Args:
189
- messages (List[OpenAIMessage]): Message list with the chat history
190
- in OpenAI API format.
191
- response_format (Optional[Type[BaseModel]], optional): The format
192
- to return the response in.
193
- tools (Optional[List[Dict[str, Any]]], optional): List of tools
194
- the model may call.
211
+ if tools:
212
+ request_config["tools"] = tools
213
+ # Remove additionalProperties from each tool's function parameters
214
+ if "tools" in request_config:
215
+ for tool in request_config["tools"]:
216
+ if "function" in tool and "parameters" in tool["function"]:
217
+ tool["function"]["parameters"].pop(
218
+ "additionalProperties", None
219
+ )
195
220
 
196
- Returns:
197
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
198
- `ChatCompletion` in the non-stream mode, or
199
- `Stream[ChatCompletionChunk]` in the stream mode.
200
- """
221
+ return await self._async_client.chat.completions.create(
222
+ messages=messages,
223
+ model=self.model_type,
224
+ **request_config,
225
+ )
201
226
 
202
- kwargs = self.model_config_dict.copy()
203
- if tools:
204
- kwargs["tools"] = tools
205
- if response_format:
206
- kwargs["response_format"] = {"type": "json_object"}
227
+ def _request_parse(
228
+ self,
229
+ messages: List[OpenAIMessage],
230
+ response_format: Type[BaseModel],
231
+ tools: Optional[List[Dict[str, Any]]] = None,
232
+ ) -> ChatCompletion:
233
+ request_config = self.model_config_dict.copy()
207
234
 
208
- # Remove additionalProperties from each tool's function parameters
209
- if tools and "tools" in kwargs:
210
- for tool in kwargs["tools"]:
211
- if "function" in tool and "parameters" in tool["function"]:
212
- tool["function"]["parameters"].pop(
213
- "additionalProperties", None
214
- )
235
+ request_config["response_format"] = response_format
236
+ request_config.pop("stream", None)
237
+ if tools is not None:
238
+ # Create a deep copy of tools to avoid modifying the original
239
+ import copy
240
+
241
+ request_config["tools"] = copy.deepcopy(tools)
242
+ # Remove additionalProperties and strict from each tool's function
243
+ # parameters since vLLM does not support them
244
+ if "tools" in request_config:
245
+ for tool in request_config["tools"]:
246
+ if "function" in tool and "parameters" in tool["function"]:
247
+ tool["function"]["parameters"].pop(
248
+ "additionalProperties", None
249
+ )
250
+ if "strict" in tool.get("function", {}):
251
+ tool["function"].pop("strict")
215
252
 
216
- response = self._client.chat.completions.create(
253
+ return self._client.beta.chat.completions.parse(
217
254
  messages=messages,
218
255
  model=self.model_type,
219
- **kwargs,
256
+ **request_config,
220
257
  )
221
- return response
258
+
259
+ async def _arequest_parse(
260
+ self,
261
+ messages: List[OpenAIMessage],
262
+ response_format: Type[BaseModel],
263
+ tools: Optional[List[Dict[str, Any]]] = None,
264
+ ) -> ChatCompletion:
265
+ request_config = self.model_config_dict.copy()
266
+
267
+ request_config["response_format"] = response_format
268
+ request_config.pop("stream", None)
269
+ if tools is not None:
270
+ # Create a deep copy of tools to avoid modifying the original
271
+ import copy
272
+
273
+ request_config["tools"] = copy.deepcopy(tools)
274
+ # Remove additionalProperties and strict from each tool's function
275
+ # parameters since vLLM does not support them
276
+ if "tools" in request_config:
277
+ for tool in request_config["tools"]:
278
+ if "function" in tool and "parameters" in tool["function"]:
279
+ tool["function"]["parameters"].pop(
280
+ "additionalProperties", None
281
+ )
282
+ if "strict" in tool.get("function", {}):
283
+ tool["function"].pop("strict")
284
+
285
+ return await self._async_client.beta.chat.completions.parse(
286
+ messages=messages,
287
+ model=self.model_type,
288
+ **request_config,
289
+ )
290
+
291
+ def check_model_config(self):
292
+ r"""Check whether the model configuration contains any
293
+ unexpected arguments to vLLM API.
294
+
295
+ Raises:
296
+ ValueError: If the model configuration dictionary contains any
297
+ unexpected arguments to OpenAI API.
298
+ """
299
+ for param in self.model_config_dict:
300
+ if param not in VLLM_API_PARAMS:
301
+ raise ValueError(
302
+ f"Unexpected argument `{param}` is "
303
+ "input into vLLM model backend."
304
+ )
222
305
 
223
306
  @property
224
307
  def stream(self) -> bool:
@@ -15,7 +15,6 @@ from __future__ import annotations
15
15
 
16
16
  import asyncio
17
17
  import json
18
- import logging
19
18
  from collections import deque
20
19
  from typing import Deque, Dict, List, Optional
21
20
 
@@ -23,6 +22,7 @@ from colorama import Fore
23
22
 
24
23
  from camel.agents import ChatAgent
25
24
  from camel.configs import ChatGPTConfig
25
+ from camel.logger import get_logger
26
26
  from camel.messages.base import BaseMessage
27
27
  from camel.models import ModelFactory
28
28
  from camel.societies.workforce.base import BaseNode
@@ -44,7 +44,7 @@ from camel.tasks.task import Task, TaskState
44
44
  from camel.toolkits import GoogleMapsToolkit, SearchToolkit, WeatherToolkit
45
45
  from camel.types import ModelPlatformType, ModelType
46
46
 
47
- logger = logging.getLogger(__name__)
47
+ logger = get_logger(__name__)
48
48
 
49
49
 
50
50
  class Workforce(BaseNode):
@@ -60,13 +60,16 @@ class Workforce(BaseNode):
60
60
  another workforce node. (default: :obj:`None`)
61
61
  coordinator_agent_kwargs (Optional[Dict], optional): Keyword
62
62
  arguments for the coordinator agent, e.g. `model`, `api_key`,
63
- `tools`, etc. (default: :obj:`None`)
63
+ `tools`, etc. If not provided, default model settings will be used.
64
+ (default: :obj:`None`)
64
65
  task_agent_kwargs (Optional[Dict], optional): Keyword arguments for
65
66
  the task agent, e.g. `model`, `api_key`, `tools`, etc.
67
+ If not provided, default model settings will be used.
66
68
  (default: :obj:`None`)
67
69
  new_worker_agent_kwargs (Optional[Dict]): Default keyword arguments
68
70
  for the worker agent that will be created during runtime to
69
71
  handle failed tasks, e.g. `model`, `api_key`, `tools`, etc.
72
+ If not provided, default model settings will be used.
70
73
  (default: :obj:`None`)
71
74
  """
72
75
 
@@ -83,6 +86,26 @@ class Workforce(BaseNode):
83
86
  self._children = children or []
84
87
  self.new_worker_agent_kwargs = new_worker_agent_kwargs
85
88
 
89
+ # Warning messages for default model usage
90
+ if coordinator_agent_kwargs is None:
91
+ logger.warning(
92
+ "No coordinator_agent_kwargs provided. "
93
+ "Using `ModelPlatformType.DEFAULT` and `ModelType.DEFAULT` "
94
+ "for coordinator agent."
95
+ )
96
+ if task_agent_kwargs is None:
97
+ logger.warning(
98
+ "No task_agent_kwargs provided. "
99
+ "Using `ModelPlatformType.DEFAULT` and `ModelType.DEFAULT` "
100
+ "for task agent."
101
+ )
102
+ if new_worker_agent_kwargs is None:
103
+ logger.warning(
104
+ "No new_worker_agent_kwargs provided. "
105
+ "Using `ModelPlatformType.DEFAULT` and `ModelType.DEFAULT` "
106
+ "for worker agents created during runtime."
107
+ )
108
+
86
109
  coord_agent_sys_msg = BaseMessage.make_assistant_message(
87
110
  role_name="Workforce Manager",
88
111
  content="You are coordinating a group of workers. A worker can be "