camel-ai 0.1.9__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +334 -113
- camel/agents/knowledge_graph_agent.py +4 -6
- camel/bots/__init__.py +34 -0
- camel/bots/discord_app.py +138 -0
- camel/bots/slack/__init__.py +30 -0
- camel/bots/slack/models.py +158 -0
- camel/bots/slack/slack_app.py +255 -0
- camel/bots/telegram_bot.py +82 -0
- camel/configs/__init__.py +1 -2
- camel/configs/anthropic_config.py +2 -5
- camel/configs/base_config.py +6 -6
- camel/configs/gemini_config.py +1 -1
- camel/configs/groq_config.py +2 -3
- camel/configs/ollama_config.py +1 -2
- camel/configs/openai_config.py +2 -23
- camel/configs/samba_config.py +2 -2
- camel/configs/togetherai_config.py +1 -1
- camel/configs/vllm_config.py +1 -1
- camel/configs/zhipuai_config.py +2 -3
- camel/embeddings/openai_embedding.py +2 -2
- camel/loaders/__init__.py +2 -0
- camel/loaders/chunkr_reader.py +163 -0
- camel/loaders/firecrawl_reader.py +13 -45
- camel/loaders/unstructured_io.py +65 -29
- camel/messages/__init__.py +1 -0
- camel/messages/func_message.py +2 -2
- camel/models/__init__.py +2 -4
- camel/models/anthropic_model.py +32 -26
- camel/models/azure_openai_model.py +39 -36
- camel/models/base_model.py +31 -20
- camel/models/gemini_model.py +37 -29
- camel/models/groq_model.py +29 -23
- camel/models/litellm_model.py +44 -61
- camel/models/mistral_model.py +33 -30
- camel/models/model_factory.py +66 -76
- camel/models/nemotron_model.py +33 -23
- camel/models/ollama_model.py +42 -47
- camel/models/{openai_compatibility_model.py → openai_compatible_model.py} +36 -41
- camel/models/openai_model.py +60 -25
- camel/models/reka_model.py +30 -28
- camel/models/samba_model.py +82 -177
- camel/models/stub_model.py +2 -2
- camel/models/togetherai_model.py +37 -43
- camel/models/vllm_model.py +43 -50
- camel/models/zhipuai_model.py +33 -27
- camel/retrievers/auto_retriever.py +28 -10
- camel/retrievers/vector_retriever.py +72 -44
- camel/societies/babyagi_playing.py +6 -3
- camel/societies/role_playing.py +17 -3
- camel/storages/__init__.py +2 -0
- camel/storages/graph_storages/__init__.py +2 -0
- camel/storages/graph_storages/graph_element.py +3 -5
- camel/storages/graph_storages/nebula_graph.py +547 -0
- camel/storages/key_value_storages/json.py +6 -1
- camel/tasks/task.py +11 -4
- camel/tasks/task_prompt.py +4 -0
- camel/toolkits/__init__.py +28 -24
- camel/toolkits/arxiv_toolkit.py +155 -0
- camel/toolkits/ask_news_toolkit.py +653 -0
- camel/toolkits/base.py +2 -3
- camel/toolkits/code_execution.py +6 -7
- camel/toolkits/dalle_toolkit.py +6 -6
- camel/toolkits/{openai_function.py → function_tool.py} +34 -11
- camel/toolkits/github_toolkit.py +9 -10
- camel/toolkits/google_maps_toolkit.py +7 -14
- camel/toolkits/google_scholar_toolkit.py +146 -0
- camel/toolkits/linkedin_toolkit.py +7 -10
- camel/toolkits/math_toolkit.py +8 -8
- camel/toolkits/open_api_toolkit.py +5 -8
- camel/toolkits/reddit_toolkit.py +7 -10
- camel/toolkits/retrieval_toolkit.py +5 -9
- camel/toolkits/search_toolkit.py +9 -9
- camel/toolkits/slack_toolkit.py +11 -14
- camel/toolkits/twitter_toolkit.py +377 -454
- camel/toolkits/weather_toolkit.py +6 -6
- camel/toolkits/whatsapp_toolkit.py +177 -0
- camel/types/__init__.py +6 -1
- camel/types/enums.py +43 -85
- camel/types/openai_types.py +3 -0
- camel/types/unified_model_type.py +104 -0
- camel/utils/__init__.py +0 -2
- camel/utils/async_func.py +7 -7
- camel/utils/commons.py +40 -4
- camel/utils/token_counting.py +38 -214
- camel/workforce/__init__.py +6 -6
- camel/workforce/base.py +9 -5
- camel/workforce/prompts.py +179 -0
- camel/workforce/role_playing_worker.py +181 -0
- camel/workforce/{single_agent_node.py → single_agent_worker.py} +49 -23
- camel/workforce/task_channel.py +7 -8
- camel/workforce/utils.py +20 -50
- camel/workforce/{worker_node.py → worker.py} +15 -12
- camel/workforce/workforce.py +456 -19
- camel_ai-0.2.3.dist-info/LICENSE +201 -0
- {camel_ai-0.1.9.dist-info → camel_ai-0.2.3.dist-info}/METADATA +40 -65
- {camel_ai-0.1.9.dist-info → camel_ai-0.2.3.dist-info}/RECORD +98 -86
- {camel_ai-0.1.9.dist-info → camel_ai-0.2.3.dist-info}/WHEEL +1 -1
- camel/models/open_source_model.py +0 -170
- camel/workforce/manager_node.py +0 -299
- camel/workforce/role_playing_node.py +0 -168
- camel/workforce/workforce_prompt.py +0 -125
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -15,6 +15,8 @@ from __future__ import annotations
|
|
|
15
15
|
|
|
16
16
|
import json
|
|
17
17
|
import logging
|
|
18
|
+
import re
|
|
19
|
+
import uuid
|
|
18
20
|
from collections import defaultdict
|
|
19
21
|
from typing import (
|
|
20
22
|
TYPE_CHECKING,
|
|
@@ -28,10 +30,10 @@ from typing import (
|
|
|
28
30
|
)
|
|
29
31
|
|
|
30
32
|
from openai.types.chat import ChatCompletionMessageToolCall
|
|
33
|
+
from openai.types.chat.chat_completion_message_tool_call import Function
|
|
31
34
|
from pydantic import BaseModel
|
|
32
35
|
|
|
33
36
|
from camel.agents.base import BaseAgent
|
|
34
|
-
from camel.configs import ChatGPTConfig
|
|
35
37
|
from camel.memories import (
|
|
36
38
|
AgentMemory,
|
|
37
39
|
ChatHistoryMemory,
|
|
@@ -60,7 +62,7 @@ if TYPE_CHECKING:
|
|
|
60
62
|
from openai import Stream
|
|
61
63
|
|
|
62
64
|
from camel.terminators import ResponseTerminator
|
|
63
|
-
from camel.toolkits import
|
|
65
|
+
from camel.toolkits import FunctionTool
|
|
64
66
|
|
|
65
67
|
|
|
66
68
|
logger = logging.getLogger(__name__)
|
|
@@ -112,7 +114,8 @@ class ChatAgent(BaseAgent):
|
|
|
112
114
|
r"""Class for managing conversations of CAMEL Chat Agents.
|
|
113
115
|
|
|
114
116
|
Args:
|
|
115
|
-
system_message (BaseMessage): The system message
|
|
117
|
+
system_message (Union[BaseMessage, str], optional): The system message
|
|
118
|
+
for the chat agent.
|
|
116
119
|
model (BaseModelBackend, optional): The model backend to use for
|
|
117
120
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
118
121
|
`GPT_4O_MINI`)
|
|
@@ -128,10 +131,10 @@ class ChatAgent(BaseAgent):
|
|
|
128
131
|
(default: :obj:`None`)
|
|
129
132
|
output_language (str, optional): The language to be output by the
|
|
130
133
|
agent. (default: :obj:`None`)
|
|
131
|
-
tools (List[
|
|
132
|
-
:obj:`
|
|
133
|
-
external_tools (List[
|
|
134
|
-
(:obj:`
|
|
134
|
+
tools (List[FunctionTool], optional): List of available
|
|
135
|
+
:obj:`FunctionTool`. (default: :obj:`None`)
|
|
136
|
+
external_tools (List[FunctionTool], optional): List of external tools
|
|
137
|
+
(:obj:`FunctionTool`) bind to one chat agent. When these tools
|
|
135
138
|
are called, the agent will directly return the request instead of
|
|
136
139
|
processing it. (default: :obj:`None`)
|
|
137
140
|
response_terminators (List[ResponseTerminator], optional): List of
|
|
@@ -141,34 +144,42 @@ class ChatAgent(BaseAgent):
|
|
|
141
144
|
|
|
142
145
|
def __init__(
|
|
143
146
|
self,
|
|
144
|
-
system_message: BaseMessage,
|
|
147
|
+
system_message: Optional[Union[BaseMessage, str]] = None,
|
|
145
148
|
model: Optional[BaseModelBackend] = None,
|
|
146
149
|
memory: Optional[AgentMemory] = None,
|
|
147
150
|
message_window_size: Optional[int] = None,
|
|
148
151
|
token_limit: Optional[int] = None,
|
|
149
152
|
output_language: Optional[str] = None,
|
|
150
|
-
tools: Optional[List[
|
|
151
|
-
external_tools: Optional[List[
|
|
153
|
+
tools: Optional[List[FunctionTool]] = None,
|
|
154
|
+
external_tools: Optional[List[FunctionTool]] = None,
|
|
152
155
|
response_terminators: Optional[List[ResponseTerminator]] = None,
|
|
153
156
|
) -> None:
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
157
|
+
if isinstance(system_message, str):
|
|
158
|
+
system_message = BaseMessage.make_assistant_message(
|
|
159
|
+
role_name='Assistant', content=system_message
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
self.orig_sys_message: Optional[BaseMessage] = system_message
|
|
163
|
+
self._system_message: Optional[BaseMessage] = system_message
|
|
164
|
+
self.role_name: str = (
|
|
165
|
+
getattr(system_message, 'role_name', None) or "assistant"
|
|
166
|
+
)
|
|
167
|
+
self.role_type: RoleType = (
|
|
168
|
+
getattr(system_message, 'role_type', None) or RoleType.ASSISTANT
|
|
169
|
+
)
|
|
158
170
|
self.model_backend: BaseModelBackend = (
|
|
159
171
|
model
|
|
160
172
|
if model is not None
|
|
161
173
|
else ModelFactory.create(
|
|
162
|
-
model_platform=ModelPlatformType.
|
|
163
|
-
model_type=ModelType.
|
|
164
|
-
model_config_dict=ChatGPTConfig().as_dict(),
|
|
174
|
+
model_platform=ModelPlatformType.DEFAULT,
|
|
175
|
+
model_type=ModelType.DEFAULT,
|
|
165
176
|
)
|
|
166
177
|
)
|
|
167
178
|
self.output_language: Optional[str] = output_language
|
|
168
179
|
if self.output_language is not None:
|
|
169
180
|
self.set_output_language(self.output_language)
|
|
170
181
|
|
|
171
|
-
self.model_type
|
|
182
|
+
self.model_type = self.model_backend.model_type
|
|
172
183
|
|
|
173
184
|
# tool registration
|
|
174
185
|
external_tools = external_tools or []
|
|
@@ -181,6 +192,16 @@ class ChatAgent(BaseAgent):
|
|
|
181
192
|
tool.get_function_name(): tool.func for tool in all_tools
|
|
182
193
|
}
|
|
183
194
|
|
|
195
|
+
# If the user hasn't configured tools in `BaseModelBackend`,
|
|
196
|
+
# the tools set from `ChatAgent` will be used.
|
|
197
|
+
# This design simplifies the interface while retaining tool-running
|
|
198
|
+
# capabilities for `BaseModelBackend`.
|
|
199
|
+
if all_tools and not self.model_backend.model_config_dict.get("tools"):
|
|
200
|
+
tool_schema_list = [
|
|
201
|
+
tool.get_openai_tool_schema() for tool in all_tools
|
|
202
|
+
]
|
|
203
|
+
self.model_backend.model_config_dict['tools'] = tool_schema_list
|
|
204
|
+
self.tool_schema_list = tool_schema_list
|
|
184
205
|
self.model_config_dict = self.model_backend.model_config_dict
|
|
185
206
|
|
|
186
207
|
self.model_token_limit = token_limit or self.model_backend.token_limit
|
|
@@ -196,6 +217,56 @@ class ChatAgent(BaseAgent):
|
|
|
196
217
|
self.response_terminators = response_terminators or []
|
|
197
218
|
self.init_messages()
|
|
198
219
|
|
|
220
|
+
# ruff: noqa: E501
|
|
221
|
+
def _generate_tool_prompt(self, tool_schema_list: List[Dict]) -> str:
|
|
222
|
+
tool_prompts = []
|
|
223
|
+
|
|
224
|
+
for tool in tool_schema_list:
|
|
225
|
+
tool_info = tool['function']
|
|
226
|
+
tool_name = tool_info['name']
|
|
227
|
+
tool_description = tool_info['description']
|
|
228
|
+
tool_json = json.dumps(tool_info, indent=4)
|
|
229
|
+
|
|
230
|
+
prompt = f"Use the function '{tool_name}' to '{tool_description}':\n{tool_json}\n"
|
|
231
|
+
tool_prompts.append(prompt)
|
|
232
|
+
|
|
233
|
+
tool_prompt_str = "\n".join(tool_prompts)
|
|
234
|
+
|
|
235
|
+
final_prompt = f'''
|
|
236
|
+
# Tool prompt
|
|
237
|
+
TOOL_PROMPT = f"""
|
|
238
|
+
You have access to the following functions:
|
|
239
|
+
|
|
240
|
+
{tool_prompt_str}
|
|
241
|
+
|
|
242
|
+
If you choose to call a function ONLY reply in the following format with no prefix or suffix:
|
|
243
|
+
|
|
244
|
+
<function=example_function_name>{{"example_name": "example_value"}}</function>
|
|
245
|
+
|
|
246
|
+
Reminder:
|
|
247
|
+
- Function calls MUST follow the specified format, start with <function= and end with </function>
|
|
248
|
+
- Required parameters MUST be specified
|
|
249
|
+
- Only call one function at a time
|
|
250
|
+
- Put the entire function call reply on one line
|
|
251
|
+
- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls
|
|
252
|
+
"""
|
|
253
|
+
'''
|
|
254
|
+
return final_prompt
|
|
255
|
+
|
|
256
|
+
def _parse_tool_response(self, response: str):
|
|
257
|
+
function_regex = r"<function=(\w+)>(.*?)</function>"
|
|
258
|
+
match = re.search(function_regex, response)
|
|
259
|
+
|
|
260
|
+
if match:
|
|
261
|
+
function_name, args_string = match.groups()
|
|
262
|
+
try:
|
|
263
|
+
args = json.loads(args_string)
|
|
264
|
+
return {"function": function_name, "arguments": args}
|
|
265
|
+
except json.JSONDecodeError as error:
|
|
266
|
+
print(f"Error parsing function arguments: {error}")
|
|
267
|
+
return None
|
|
268
|
+
return None
|
|
269
|
+
|
|
199
270
|
def reset(self):
|
|
200
271
|
r"""Resets the :obj:`ChatAgent` to its initial state and returns the
|
|
201
272
|
stored messages.
|
|
@@ -209,11 +280,12 @@ class ChatAgent(BaseAgent):
|
|
|
209
280
|
terminator.reset()
|
|
210
281
|
|
|
211
282
|
@property
|
|
212
|
-
def system_message(self) -> BaseMessage:
|
|
283
|
+
def system_message(self) -> Optional[BaseMessage]:
|
|
213
284
|
r"""The getter method for the property :obj:`system_message`.
|
|
214
285
|
|
|
215
286
|
Returns:
|
|
216
|
-
BaseMessage: The system message of this agent
|
|
287
|
+
Optional[BaseMessage]: The system message of this agent if set,
|
|
288
|
+
else :obj:`None`.
|
|
217
289
|
"""
|
|
218
290
|
return self._system_message
|
|
219
291
|
|
|
@@ -264,12 +336,22 @@ class ChatAgent(BaseAgent):
|
|
|
264
336
|
BaseMessage: The updated system message object.
|
|
265
337
|
"""
|
|
266
338
|
self.output_language = output_language
|
|
267
|
-
|
|
339
|
+
language_prompt = (
|
|
268
340
|
"\nRegardless of the input language, "
|
|
269
341
|
f"you must output text in {output_language}."
|
|
270
342
|
)
|
|
271
|
-
self.
|
|
272
|
-
|
|
343
|
+
if self.orig_sys_message is not None:
|
|
344
|
+
content = self.orig_sys_message.content + language_prompt
|
|
345
|
+
self._system_message = self.orig_sys_message.create_new_instance(
|
|
346
|
+
content
|
|
347
|
+
)
|
|
348
|
+
return self._system_message
|
|
349
|
+
else:
|
|
350
|
+
self._system_message = BaseMessage.make_assistant_message(
|
|
351
|
+
role_name="Assistant",
|
|
352
|
+
content=language_prompt,
|
|
353
|
+
)
|
|
354
|
+
return self._system_message
|
|
273
355
|
|
|
274
356
|
def get_info(
|
|
275
357
|
self,
|
|
@@ -314,12 +396,15 @@ class ChatAgent(BaseAgent):
|
|
|
314
396
|
r"""Initializes the stored messages list with the initial system
|
|
315
397
|
message.
|
|
316
398
|
"""
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
399
|
+
if self.orig_sys_message is not None:
|
|
400
|
+
system_record = MemoryRecord(
|
|
401
|
+
message=self.orig_sys_message,
|
|
402
|
+
role_at_backend=OpenAIBackendRole.SYSTEM,
|
|
403
|
+
)
|
|
404
|
+
self.memory.clear()
|
|
405
|
+
self.memory.write_record(system_record)
|
|
406
|
+
else:
|
|
407
|
+
self.memory.clear()
|
|
323
408
|
|
|
324
409
|
def record_message(self, message: BaseMessage) -> None:
|
|
325
410
|
r"""Records the externally provided message into the agent memory as if
|
|
@@ -334,19 +419,19 @@ class ChatAgent(BaseAgent):
|
|
|
334
419
|
|
|
335
420
|
def step(
|
|
336
421
|
self,
|
|
337
|
-
input_message: BaseMessage,
|
|
338
|
-
|
|
422
|
+
input_message: Union[BaseMessage, str],
|
|
423
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
339
424
|
) -> ChatAgentResponse:
|
|
340
425
|
r"""Performs a single step in the chat session by generating a response
|
|
341
426
|
to the input message.
|
|
342
427
|
|
|
343
428
|
Args:
|
|
344
|
-
input_message (BaseMessage): The input message to the
|
|
345
|
-
|
|
346
|
-
either `user` or `assistant` but it
|
|
347
|
-
anyway since for the self agent any
|
|
348
|
-
external.
|
|
349
|
-
|
|
429
|
+
input_message (Union[BaseMessage, str]): The input message to the
|
|
430
|
+
agent. For BaseMessage input, its `role` field that specifies
|
|
431
|
+
the role at backend may be either `user` or `assistant` but it
|
|
432
|
+
will be set to `user` anyway since for the self agent any
|
|
433
|
+
incoming message is external. For str input, the `role_name` would be `User`.
|
|
434
|
+
response_format (Optional[Type[BaseModel]], optional): A pydantic
|
|
350
435
|
model class that includes value types and field descriptions
|
|
351
436
|
used to generate a structured response by LLM. This schema
|
|
352
437
|
helps in defining the expected output format. (default:
|
|
@@ -357,104 +442,233 @@ class ChatAgent(BaseAgent):
|
|
|
357
442
|
a boolean indicating whether the chat session has terminated,
|
|
358
443
|
and information about the chat session.
|
|
359
444
|
"""
|
|
360
|
-
|
|
445
|
+
if isinstance(input_message, str):
|
|
446
|
+
input_message = BaseMessage.make_user_message(
|
|
447
|
+
role_name='User', content=input_message
|
|
448
|
+
)
|
|
361
449
|
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
e.args[1], tool_call_records, "max_tokens_exceeded"
|
|
450
|
+
if "llama" in self.model_type.lower():
|
|
451
|
+
if self.model_backend.model_config_dict.get("tools", None):
|
|
452
|
+
tool_prompt = self._generate_tool_prompt(self.tool_schema_list)
|
|
453
|
+
|
|
454
|
+
tool_sys_msg = BaseMessage.make_assistant_message(
|
|
455
|
+
role_name="Assistant",
|
|
456
|
+
content=tool_prompt,
|
|
370
457
|
)
|
|
371
458
|
|
|
372
|
-
|
|
373
|
-
|
|
459
|
+
self.update_memory(tool_sys_msg, OpenAIBackendRole.SYSTEM)
|
|
460
|
+
|
|
461
|
+
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
462
|
+
|
|
463
|
+
tool_call_records: List[FunctionCallingRecord] = []
|
|
464
|
+
while True:
|
|
465
|
+
# Check if token has exceeded
|
|
466
|
+
try:
|
|
467
|
+
openai_messages, num_tokens = self.memory.get_context()
|
|
468
|
+
except RuntimeError as e:
|
|
469
|
+
return self._step_token_exceed(
|
|
470
|
+
e.args[1], tool_call_records, "max_tokens_exceeded"
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
(
|
|
474
|
+
response,
|
|
475
|
+
output_messages,
|
|
476
|
+
finish_reasons,
|
|
477
|
+
usage_dict,
|
|
478
|
+
response_id,
|
|
479
|
+
) = self._step_model_response(openai_messages, num_tokens)
|
|
480
|
+
# If the model response is not a function call, meaning the
|
|
481
|
+
# model has generated a message response, break the loop
|
|
482
|
+
if (
|
|
483
|
+
not self.is_tools_added()
|
|
484
|
+
or not isinstance(response, ChatCompletion)
|
|
485
|
+
or "</function>" not in response.choices[0].message.content # type: ignore[operator]
|
|
486
|
+
):
|
|
487
|
+
break
|
|
488
|
+
|
|
489
|
+
parsed_content = self._parse_tool_response(
|
|
490
|
+
response.choices[0].message.content # type: ignore[arg-type]
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
response.choices[0].message.tool_calls = [
|
|
494
|
+
ChatCompletionMessageToolCall(
|
|
495
|
+
id=str(uuid.uuid4()),
|
|
496
|
+
function=Function(
|
|
497
|
+
arguments=str(parsed_content["arguments"]).replace(
|
|
498
|
+
"'", '"'
|
|
499
|
+
),
|
|
500
|
+
name=str(parsed_content["function"]),
|
|
501
|
+
),
|
|
502
|
+
type="function",
|
|
503
|
+
)
|
|
504
|
+
]
|
|
505
|
+
|
|
506
|
+
# Check for external tool call
|
|
507
|
+
tool_call_request = response.choices[0].message.tool_calls[0]
|
|
508
|
+
if tool_call_request.function.name in self.external_tool_names:
|
|
509
|
+
# if model calls an external tool, directly return the
|
|
510
|
+
# request
|
|
511
|
+
info = self._step_get_info(
|
|
512
|
+
output_messages,
|
|
513
|
+
finish_reasons,
|
|
514
|
+
usage_dict,
|
|
515
|
+
response_id,
|
|
516
|
+
tool_call_records,
|
|
517
|
+
num_tokens,
|
|
518
|
+
tool_call_request,
|
|
519
|
+
)
|
|
520
|
+
return ChatAgentResponse(
|
|
521
|
+
msgs=output_messages,
|
|
522
|
+
terminated=self.terminated,
|
|
523
|
+
info=info,
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
# Normal function calling
|
|
527
|
+
tool_call_records.append(
|
|
528
|
+
self._step_tool_call_and_update(response)
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
if response_format is not None:
|
|
532
|
+
(
|
|
533
|
+
output_messages,
|
|
534
|
+
finish_reasons,
|
|
535
|
+
usage_dict,
|
|
536
|
+
response_id,
|
|
537
|
+
tool_call,
|
|
538
|
+
num_tokens,
|
|
539
|
+
) = self._structure_output_with_function(response_format)
|
|
540
|
+
tool_call_records.append(tool_call)
|
|
541
|
+
|
|
542
|
+
info = self._step_get_info(
|
|
374
543
|
output_messages,
|
|
375
544
|
finish_reasons,
|
|
376
545
|
usage_dict,
|
|
377
546
|
response_id,
|
|
378
|
-
|
|
547
|
+
tool_call_records,
|
|
548
|
+
num_tokens,
|
|
549
|
+
)
|
|
379
550
|
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
551
|
+
if len(output_messages) == 1:
|
|
552
|
+
# Auto record if the output result is a single message
|
|
553
|
+
self.record_message(output_messages[0])
|
|
554
|
+
else:
|
|
555
|
+
logger.warning(
|
|
556
|
+
"Multiple messages returned in `step()`, message won't be "
|
|
557
|
+
"recorded automatically. Please call `record_message()` "
|
|
558
|
+
"to record the selected message manually."
|
|
559
|
+
)
|
|
388
560
|
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
561
|
+
return ChatAgentResponse(
|
|
562
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
563
|
+
)
|
|
564
|
+
|
|
565
|
+
else:
|
|
566
|
+
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
567
|
+
|
|
568
|
+
tool_call_records: List[FunctionCallingRecord] = [] # type: ignore[no-redef]
|
|
569
|
+
while True:
|
|
570
|
+
# Check if token has exceeded
|
|
571
|
+
try:
|
|
572
|
+
openai_messages, num_tokens = self.memory.get_context()
|
|
573
|
+
except RuntimeError as e:
|
|
574
|
+
return self._step_token_exceed(
|
|
575
|
+
e.args[1], tool_call_records, "max_tokens_exceeded"
|
|
576
|
+
)
|
|
577
|
+
|
|
578
|
+
(
|
|
579
|
+
response,
|
|
394
580
|
output_messages,
|
|
395
581
|
finish_reasons,
|
|
396
582
|
usage_dict,
|
|
397
583
|
response_id,
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
584
|
+
) = self._step_model_response(openai_messages, num_tokens)
|
|
585
|
+
# If the model response is not a function call, meaning the
|
|
586
|
+
# model has generated a message response, break the loop
|
|
587
|
+
if (
|
|
588
|
+
not self.is_tools_added()
|
|
589
|
+
or not isinstance(response, ChatCompletion)
|
|
590
|
+
or response.choices[0].message.tool_calls is None
|
|
591
|
+
):
|
|
592
|
+
break
|
|
593
|
+
|
|
594
|
+
# Check for external tool call
|
|
595
|
+
tool_call_request = response.choices[0].message.tool_calls[0]
|
|
596
|
+
|
|
597
|
+
if tool_call_request.function.name in self.external_tool_names:
|
|
598
|
+
# if model calls an external tool, directly return the
|
|
599
|
+
# request
|
|
600
|
+
info = self._step_get_info(
|
|
601
|
+
output_messages,
|
|
602
|
+
finish_reasons,
|
|
603
|
+
usage_dict,
|
|
604
|
+
response_id,
|
|
605
|
+
tool_call_records,
|
|
606
|
+
num_tokens,
|
|
607
|
+
tool_call_request,
|
|
608
|
+
)
|
|
609
|
+
return ChatAgentResponse(
|
|
610
|
+
msgs=output_messages,
|
|
611
|
+
terminated=self.terminated,
|
|
612
|
+
info=info,
|
|
613
|
+
)
|
|
614
|
+
|
|
615
|
+
# Normal function calling
|
|
616
|
+
tool_call_records.append(
|
|
617
|
+
self._step_tool_call_and_update(response)
|
|
404
618
|
)
|
|
405
619
|
|
|
406
|
-
|
|
407
|
-
|
|
620
|
+
if (
|
|
621
|
+
response_format is not None
|
|
622
|
+
and self.model_type.support_native_tool_calling
|
|
623
|
+
):
|
|
624
|
+
(
|
|
625
|
+
output_messages,
|
|
626
|
+
finish_reasons,
|
|
627
|
+
usage_dict,
|
|
628
|
+
response_id,
|
|
629
|
+
tool_call,
|
|
630
|
+
num_tokens,
|
|
631
|
+
) = self._structure_output_with_function(response_format)
|
|
632
|
+
tool_call_records.append(tool_call)
|
|
408
633
|
|
|
409
|
-
|
|
410
|
-
(
|
|
634
|
+
info = self._step_get_info(
|
|
411
635
|
output_messages,
|
|
412
636
|
finish_reasons,
|
|
413
637
|
usage_dict,
|
|
414
638
|
response_id,
|
|
415
|
-
|
|
639
|
+
tool_call_records,
|
|
416
640
|
num_tokens,
|
|
417
|
-
)
|
|
418
|
-
tool_call_records.append(tool_call)
|
|
641
|
+
)
|
|
419
642
|
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
643
|
+
if len(output_messages) == 1:
|
|
644
|
+
# Auto record if the output result is a single message
|
|
645
|
+
self.record_message(output_messages[0])
|
|
646
|
+
else:
|
|
647
|
+
logger.warning(
|
|
648
|
+
"Multiple messages returned in `step()`, message won't be "
|
|
649
|
+
"recorded automatically. Please call `record_message()` "
|
|
650
|
+
"to record the selected message manually."
|
|
651
|
+
)
|
|
428
652
|
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
self.record_message(output_messages[0])
|
|
432
|
-
else:
|
|
433
|
-
logger.warning(
|
|
434
|
-
"Multiple messages returned in `step()`, message won't be "
|
|
435
|
-
"recorded automatically. Please call `record_message()` to "
|
|
436
|
-
"record the selected message manually."
|
|
653
|
+
return ChatAgentResponse(
|
|
654
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
437
655
|
)
|
|
438
656
|
|
|
439
|
-
return ChatAgentResponse(
|
|
440
|
-
msgs=output_messages, terminated=self.terminated, info=info
|
|
441
|
-
)
|
|
442
|
-
|
|
443
657
|
async def step_async(
|
|
444
658
|
self,
|
|
445
|
-
input_message: BaseMessage,
|
|
446
|
-
|
|
659
|
+
input_message: Union[BaseMessage, str],
|
|
660
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
447
661
|
) -> ChatAgentResponse:
|
|
448
662
|
r"""Performs a single step in the chat session by generating a response
|
|
449
663
|
to the input message. This agent step can call async function calls.
|
|
450
664
|
|
|
451
665
|
Args:
|
|
452
|
-
input_message (BaseMessage): The input message to the
|
|
453
|
-
|
|
454
|
-
either `user` or `assistant` but it
|
|
455
|
-
anyway since for the self agent any
|
|
456
|
-
external.
|
|
457
|
-
|
|
666
|
+
input_message (Union[BaseMessage, str]): The input message to the
|
|
667
|
+
agent. For BaseMessage input, its `role` field that specifies
|
|
668
|
+
the role at backend may be either `user` or `assistant` but it
|
|
669
|
+
will be set to `user` anyway since for the self agent any
|
|
670
|
+
incoming message is external. For str input, the `role_name` would be `User`.
|
|
671
|
+
response_format (Optional[Type[BaseModel]], optional): A pydantic
|
|
458
672
|
model class that includes value types and field descriptions
|
|
459
673
|
used to generate a structured response by LLM. This schema
|
|
460
674
|
helps in defining the expected output format. (default:
|
|
@@ -465,6 +679,11 @@ class ChatAgent(BaseAgent):
|
|
|
465
679
|
a boolean indicating whether the chat session has terminated,
|
|
466
680
|
and information about the chat session.
|
|
467
681
|
"""
|
|
682
|
+
if isinstance(input_message, str):
|
|
683
|
+
input_message = BaseMessage.make_user_message(
|
|
684
|
+
role_name='User', content=input_message
|
|
685
|
+
)
|
|
686
|
+
|
|
468
687
|
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
469
688
|
|
|
470
689
|
tool_call_records: List[FunctionCallingRecord] = []
|
|
@@ -513,7 +732,10 @@ class ChatAgent(BaseAgent):
|
|
|
513
732
|
await self._step_tool_call_and_update_async(response)
|
|
514
733
|
)
|
|
515
734
|
|
|
516
|
-
if
|
|
735
|
+
if (
|
|
736
|
+
response_format is not None
|
|
737
|
+
and self.model_type.support_native_tool_calling
|
|
738
|
+
):
|
|
517
739
|
(
|
|
518
740
|
output_messages,
|
|
519
741
|
finish_reasons,
|
|
@@ -521,7 +743,7 @@ class ChatAgent(BaseAgent):
|
|
|
521
743
|
response_id,
|
|
522
744
|
tool_call_record,
|
|
523
745
|
num_tokens,
|
|
524
|
-
) = self._structure_output_with_function(
|
|
746
|
+
) = self._structure_output_with_function(response_format)
|
|
525
747
|
tool_call_records.append(tool_call_record)
|
|
526
748
|
|
|
527
749
|
info = self._step_get_info(
|
|
@@ -588,7 +810,7 @@ class ChatAgent(BaseAgent):
|
|
|
588
810
|
return func_record
|
|
589
811
|
|
|
590
812
|
def _structure_output_with_function(
|
|
591
|
-
self,
|
|
813
|
+
self, response_format: Type[BaseModel]
|
|
592
814
|
) -> Tuple[
|
|
593
815
|
List[BaseMessage],
|
|
594
816
|
List[str],
|
|
@@ -600,21 +822,23 @@ class ChatAgent(BaseAgent):
|
|
|
600
822
|
r"""Internal function of structuring the output of the agent based on
|
|
601
823
|
the given output schema.
|
|
602
824
|
"""
|
|
603
|
-
from camel.toolkits import
|
|
825
|
+
from camel.toolkits import FunctionTool
|
|
604
826
|
|
|
605
|
-
schema_json = get_pydantic_object_schema(
|
|
827
|
+
schema_json = get_pydantic_object_schema(response_format)
|
|
606
828
|
func_str = json_to_function_code(schema_json)
|
|
607
829
|
func_callable = func_string_to_callable(func_str)
|
|
608
|
-
func =
|
|
830
|
+
func = FunctionTool(func_callable)
|
|
609
831
|
|
|
610
832
|
original_func_dict = self.func_dict
|
|
611
833
|
original_model_dict = self.model_backend.model_config_dict
|
|
612
834
|
|
|
613
835
|
# Replace the original tools with the structuring function
|
|
614
836
|
self.func_dict = {func.get_function_name(): func.func}
|
|
837
|
+
self.model_backend.model_config_dict = original_model_dict.copy()
|
|
615
838
|
self.model_backend.model_config_dict["tools"] = [
|
|
616
839
|
func.get_openai_tool_schema()
|
|
617
840
|
]
|
|
841
|
+
self.model_backend.model_config_dict["tool_choice"] = "required"
|
|
618
842
|
|
|
619
843
|
openai_messages, num_tokens = self.memory.get_context()
|
|
620
844
|
(
|
|
@@ -977,10 +1201,7 @@ class ChatAgent(BaseAgent):
|
|
|
977
1201
|
Returns:
|
|
978
1202
|
dict: Usage dictionary.
|
|
979
1203
|
"""
|
|
980
|
-
|
|
981
|
-
encoding = get_model_encoding(self.model_type.value_for_tiktoken)
|
|
982
|
-
else:
|
|
983
|
-
encoding = get_model_encoding("gpt-4o-mini")
|
|
1204
|
+
encoding = get_model_encoding(self.model_type.value_for_tiktoken)
|
|
984
1205
|
completion_tokens = 0
|
|
985
1206
|
for message in output_messages:
|
|
986
1207
|
completion_tokens += len(encoding.encode(message.content))
|