camel-ai 0.2.0__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +326 -115
- camel/agents/knowledge_graph_agent.py +4 -6
- camel/bots/__init__.py +34 -0
- camel/bots/discord_app.py +138 -0
- camel/bots/slack/__init__.py +30 -0
- camel/bots/slack/models.py +158 -0
- camel/bots/slack/slack_app.py +255 -0
- camel/bots/telegram_bot.py +82 -0
- camel/configs/__init__.py +1 -2
- camel/configs/anthropic_config.py +2 -5
- camel/configs/base_config.py +6 -6
- camel/configs/gemini_config.py +1 -1
- camel/configs/groq_config.py +2 -3
- camel/configs/ollama_config.py +1 -2
- camel/configs/openai_config.py +2 -23
- camel/configs/samba_config.py +2 -2
- camel/configs/togetherai_config.py +1 -1
- camel/configs/vllm_config.py +1 -1
- camel/configs/zhipuai_config.py +2 -3
- camel/embeddings/openai_embedding.py +2 -2
- camel/loaders/__init__.py +2 -0
- camel/loaders/chunkr_reader.py +163 -0
- camel/loaders/firecrawl_reader.py +13 -45
- camel/loaders/unstructured_io.py +65 -29
- camel/messages/__init__.py +1 -0
- camel/messages/func_message.py +2 -2
- camel/models/__init__.py +2 -4
- camel/models/anthropic_model.py +32 -26
- camel/models/azure_openai_model.py +39 -36
- camel/models/base_model.py +31 -20
- camel/models/gemini_model.py +37 -29
- camel/models/groq_model.py +29 -23
- camel/models/litellm_model.py +44 -61
- camel/models/mistral_model.py +33 -30
- camel/models/model_factory.py +66 -76
- camel/models/nemotron_model.py +33 -23
- camel/models/ollama_model.py +42 -47
- camel/models/{openai_compatibility_model.py → openai_compatible_model.py} +36 -41
- camel/models/openai_model.py +48 -29
- camel/models/reka_model.py +30 -28
- camel/models/samba_model.py +82 -177
- camel/models/stub_model.py +2 -2
- camel/models/togetherai_model.py +37 -43
- camel/models/vllm_model.py +43 -50
- camel/models/zhipuai_model.py +33 -27
- camel/retrievers/auto_retriever.py +28 -10
- camel/retrievers/vector_retriever.py +72 -44
- camel/societies/babyagi_playing.py +6 -3
- camel/societies/role_playing.py +17 -3
- camel/storages/__init__.py +2 -0
- camel/storages/graph_storages/__init__.py +2 -0
- camel/storages/graph_storages/graph_element.py +3 -5
- camel/storages/graph_storages/nebula_graph.py +547 -0
- camel/storages/key_value_storages/json.py +6 -1
- camel/tasks/task.py +11 -4
- camel/tasks/task_prompt.py +4 -0
- camel/toolkits/__init__.py +20 -7
- camel/toolkits/arxiv_toolkit.py +155 -0
- camel/toolkits/ask_news_toolkit.py +653 -0
- camel/toolkits/base.py +2 -3
- camel/toolkits/code_execution.py +6 -7
- camel/toolkits/dalle_toolkit.py +6 -6
- camel/toolkits/{openai_function.py → function_tool.py} +34 -11
- camel/toolkits/github_toolkit.py +9 -10
- camel/toolkits/google_maps_toolkit.py +7 -7
- camel/toolkits/google_scholar_toolkit.py +146 -0
- camel/toolkits/linkedin_toolkit.py +7 -7
- camel/toolkits/math_toolkit.py +8 -8
- camel/toolkits/open_api_toolkit.py +5 -5
- camel/toolkits/reddit_toolkit.py +7 -7
- camel/toolkits/retrieval_toolkit.py +5 -5
- camel/toolkits/search_toolkit.py +9 -9
- camel/toolkits/slack_toolkit.py +11 -11
- camel/toolkits/twitter_toolkit.py +378 -452
- camel/toolkits/weather_toolkit.py +6 -6
- camel/toolkits/whatsapp_toolkit.py +177 -0
- camel/types/__init__.py +6 -1
- camel/types/enums.py +43 -85
- camel/types/openai_types.py +3 -0
- camel/types/unified_model_type.py +104 -0
- camel/utils/__init__.py +0 -2
- camel/utils/async_func.py +7 -7
- camel/utils/commons.py +40 -4
- camel/utils/token_counting.py +30 -212
- camel/workforce/__init__.py +6 -6
- camel/workforce/base.py +9 -5
- camel/workforce/prompts.py +179 -0
- camel/workforce/role_playing_worker.py +181 -0
- camel/workforce/{single_agent_node.py → single_agent_worker.py} +49 -23
- camel/workforce/task_channel.py +7 -8
- camel/workforce/utils.py +20 -50
- camel/workforce/{worker_node.py → worker.py} +15 -12
- camel/workforce/workforce.py +456 -19
- camel_ai-0.2.3.dist-info/LICENSE +201 -0
- {camel_ai-0.2.0.dist-info → camel_ai-0.2.3.dist-info}/METADATA +39 -65
- {camel_ai-0.2.0.dist-info → camel_ai-0.2.3.dist-info}/RECORD +98 -86
- {camel_ai-0.2.0.dist-info → camel_ai-0.2.3.dist-info}/WHEEL +1 -1
- camel/models/open_source_model.py +0 -170
- camel/workforce/manager_node.py +0 -299
- camel/workforce/role_playing_node.py +0 -168
- camel/workforce/workforce_prompt.py +0 -125
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -15,6 +15,8 @@ from __future__ import annotations
|
|
|
15
15
|
|
|
16
16
|
import json
|
|
17
17
|
import logging
|
|
18
|
+
import re
|
|
19
|
+
import uuid
|
|
18
20
|
from collections import defaultdict
|
|
19
21
|
from typing import (
|
|
20
22
|
TYPE_CHECKING,
|
|
@@ -28,10 +30,10 @@ from typing import (
|
|
|
28
30
|
)
|
|
29
31
|
|
|
30
32
|
from openai.types.chat import ChatCompletionMessageToolCall
|
|
33
|
+
from openai.types.chat.chat_completion_message_tool_call import Function
|
|
31
34
|
from pydantic import BaseModel
|
|
32
35
|
|
|
33
36
|
from camel.agents.base import BaseAgent
|
|
34
|
-
from camel.configs import ChatGPTConfig
|
|
35
37
|
from camel.memories import (
|
|
36
38
|
AgentMemory,
|
|
37
39
|
ChatHistoryMemory,
|
|
@@ -60,7 +62,7 @@ if TYPE_CHECKING:
|
|
|
60
62
|
from openai import Stream
|
|
61
63
|
|
|
62
64
|
from camel.terminators import ResponseTerminator
|
|
63
|
-
from camel.toolkits import
|
|
65
|
+
from camel.toolkits import FunctionTool
|
|
64
66
|
|
|
65
67
|
|
|
66
68
|
logger = logging.getLogger(__name__)
|
|
@@ -112,7 +114,8 @@ class ChatAgent(BaseAgent):
|
|
|
112
114
|
r"""Class for managing conversations of CAMEL Chat Agents.
|
|
113
115
|
|
|
114
116
|
Args:
|
|
115
|
-
system_message (BaseMessage): The system message
|
|
117
|
+
system_message (Union[BaseMessage, str], optional): The system message
|
|
118
|
+
for the chat agent.
|
|
116
119
|
model (BaseModelBackend, optional): The model backend to use for
|
|
117
120
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
118
121
|
`GPT_4O_MINI`)
|
|
@@ -128,10 +131,10 @@ class ChatAgent(BaseAgent):
|
|
|
128
131
|
(default: :obj:`None`)
|
|
129
132
|
output_language (str, optional): The language to be output by the
|
|
130
133
|
agent. (default: :obj:`None`)
|
|
131
|
-
tools (List[
|
|
132
|
-
:obj:`
|
|
133
|
-
external_tools (List[
|
|
134
|
-
(:obj:`
|
|
134
|
+
tools (List[FunctionTool], optional): List of available
|
|
135
|
+
:obj:`FunctionTool`. (default: :obj:`None`)
|
|
136
|
+
external_tools (List[FunctionTool], optional): List of external tools
|
|
137
|
+
(:obj:`FunctionTool`) bind to one chat agent. When these tools
|
|
135
138
|
are called, the agent will directly return the request instead of
|
|
136
139
|
processing it. (default: :obj:`None`)
|
|
137
140
|
response_terminators (List[ResponseTerminator], optional): List of
|
|
@@ -141,34 +144,42 @@ class ChatAgent(BaseAgent):
|
|
|
141
144
|
|
|
142
145
|
def __init__(
|
|
143
146
|
self,
|
|
144
|
-
system_message: BaseMessage,
|
|
147
|
+
system_message: Optional[Union[BaseMessage, str]] = None,
|
|
145
148
|
model: Optional[BaseModelBackend] = None,
|
|
146
149
|
memory: Optional[AgentMemory] = None,
|
|
147
150
|
message_window_size: Optional[int] = None,
|
|
148
151
|
token_limit: Optional[int] = None,
|
|
149
152
|
output_language: Optional[str] = None,
|
|
150
|
-
tools: Optional[List[
|
|
151
|
-
external_tools: Optional[List[
|
|
153
|
+
tools: Optional[List[FunctionTool]] = None,
|
|
154
|
+
external_tools: Optional[List[FunctionTool]] = None,
|
|
152
155
|
response_terminators: Optional[List[ResponseTerminator]] = None,
|
|
153
156
|
) -> None:
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
157
|
+
if isinstance(system_message, str):
|
|
158
|
+
system_message = BaseMessage.make_assistant_message(
|
|
159
|
+
role_name='Assistant', content=system_message
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
self.orig_sys_message: Optional[BaseMessage] = system_message
|
|
163
|
+
self._system_message: Optional[BaseMessage] = system_message
|
|
164
|
+
self.role_name: str = (
|
|
165
|
+
getattr(system_message, 'role_name', None) or "assistant"
|
|
166
|
+
)
|
|
167
|
+
self.role_type: RoleType = (
|
|
168
|
+
getattr(system_message, 'role_type', None) or RoleType.ASSISTANT
|
|
169
|
+
)
|
|
158
170
|
self.model_backend: BaseModelBackend = (
|
|
159
171
|
model
|
|
160
172
|
if model is not None
|
|
161
173
|
else ModelFactory.create(
|
|
162
|
-
model_platform=ModelPlatformType.
|
|
163
|
-
model_type=ModelType.
|
|
164
|
-
model_config_dict=ChatGPTConfig().as_dict(),
|
|
174
|
+
model_platform=ModelPlatformType.DEFAULT,
|
|
175
|
+
model_type=ModelType.DEFAULT,
|
|
165
176
|
)
|
|
166
177
|
)
|
|
167
178
|
self.output_language: Optional[str] = output_language
|
|
168
179
|
if self.output_language is not None:
|
|
169
180
|
self.set_output_language(self.output_language)
|
|
170
181
|
|
|
171
|
-
self.model_type
|
|
182
|
+
self.model_type = self.model_backend.model_type
|
|
172
183
|
|
|
173
184
|
# tool registration
|
|
174
185
|
external_tools = external_tools or []
|
|
@@ -185,12 +196,12 @@ class ChatAgent(BaseAgent):
|
|
|
185
196
|
# the tools set from `ChatAgent` will be used.
|
|
186
197
|
# This design simplifies the interface while retaining tool-running
|
|
187
198
|
# capabilities for `BaseModelBackend`.
|
|
188
|
-
if all_tools and not self.model_backend.model_config_dict
|
|
199
|
+
if all_tools and not self.model_backend.model_config_dict.get("tools"):
|
|
189
200
|
tool_schema_list = [
|
|
190
201
|
tool.get_openai_tool_schema() for tool in all_tools
|
|
191
202
|
]
|
|
192
203
|
self.model_backend.model_config_dict['tools'] = tool_schema_list
|
|
193
|
-
|
|
204
|
+
self.tool_schema_list = tool_schema_list
|
|
194
205
|
self.model_config_dict = self.model_backend.model_config_dict
|
|
195
206
|
|
|
196
207
|
self.model_token_limit = token_limit or self.model_backend.token_limit
|
|
@@ -206,6 +217,56 @@ class ChatAgent(BaseAgent):
|
|
|
206
217
|
self.response_terminators = response_terminators or []
|
|
207
218
|
self.init_messages()
|
|
208
219
|
|
|
220
|
+
# ruff: noqa: E501
|
|
221
|
+
def _generate_tool_prompt(self, tool_schema_list: List[Dict]) -> str:
|
|
222
|
+
tool_prompts = []
|
|
223
|
+
|
|
224
|
+
for tool in tool_schema_list:
|
|
225
|
+
tool_info = tool['function']
|
|
226
|
+
tool_name = tool_info['name']
|
|
227
|
+
tool_description = tool_info['description']
|
|
228
|
+
tool_json = json.dumps(tool_info, indent=4)
|
|
229
|
+
|
|
230
|
+
prompt = f"Use the function '{tool_name}' to '{tool_description}':\n{tool_json}\n"
|
|
231
|
+
tool_prompts.append(prompt)
|
|
232
|
+
|
|
233
|
+
tool_prompt_str = "\n".join(tool_prompts)
|
|
234
|
+
|
|
235
|
+
final_prompt = f'''
|
|
236
|
+
# Tool prompt
|
|
237
|
+
TOOL_PROMPT = f"""
|
|
238
|
+
You have access to the following functions:
|
|
239
|
+
|
|
240
|
+
{tool_prompt_str}
|
|
241
|
+
|
|
242
|
+
If you choose to call a function ONLY reply in the following format with no prefix or suffix:
|
|
243
|
+
|
|
244
|
+
<function=example_function_name>{{"example_name": "example_value"}}</function>
|
|
245
|
+
|
|
246
|
+
Reminder:
|
|
247
|
+
- Function calls MUST follow the specified format, start with <function= and end with </function>
|
|
248
|
+
- Required parameters MUST be specified
|
|
249
|
+
- Only call one function at a time
|
|
250
|
+
- Put the entire function call reply on one line
|
|
251
|
+
- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls
|
|
252
|
+
"""
|
|
253
|
+
'''
|
|
254
|
+
return final_prompt
|
|
255
|
+
|
|
256
|
+
def _parse_tool_response(self, response: str):
|
|
257
|
+
function_regex = r"<function=(\w+)>(.*?)</function>"
|
|
258
|
+
match = re.search(function_regex, response)
|
|
259
|
+
|
|
260
|
+
if match:
|
|
261
|
+
function_name, args_string = match.groups()
|
|
262
|
+
try:
|
|
263
|
+
args = json.loads(args_string)
|
|
264
|
+
return {"function": function_name, "arguments": args}
|
|
265
|
+
except json.JSONDecodeError as error:
|
|
266
|
+
print(f"Error parsing function arguments: {error}")
|
|
267
|
+
return None
|
|
268
|
+
return None
|
|
269
|
+
|
|
209
270
|
def reset(self):
|
|
210
271
|
r"""Resets the :obj:`ChatAgent` to its initial state and returns the
|
|
211
272
|
stored messages.
|
|
@@ -219,11 +280,12 @@ class ChatAgent(BaseAgent):
|
|
|
219
280
|
terminator.reset()
|
|
220
281
|
|
|
221
282
|
@property
|
|
222
|
-
def system_message(self) -> BaseMessage:
|
|
283
|
+
def system_message(self) -> Optional[BaseMessage]:
|
|
223
284
|
r"""The getter method for the property :obj:`system_message`.
|
|
224
285
|
|
|
225
286
|
Returns:
|
|
226
|
-
BaseMessage: The system message of this agent
|
|
287
|
+
Optional[BaseMessage]: The system message of this agent if set,
|
|
288
|
+
else :obj:`None`.
|
|
227
289
|
"""
|
|
228
290
|
return self._system_message
|
|
229
291
|
|
|
@@ -274,12 +336,22 @@ class ChatAgent(BaseAgent):
|
|
|
274
336
|
BaseMessage: The updated system message object.
|
|
275
337
|
"""
|
|
276
338
|
self.output_language = output_language
|
|
277
|
-
|
|
339
|
+
language_prompt = (
|
|
278
340
|
"\nRegardless of the input language, "
|
|
279
341
|
f"you must output text in {output_language}."
|
|
280
342
|
)
|
|
281
|
-
self.
|
|
282
|
-
|
|
343
|
+
if self.orig_sys_message is not None:
|
|
344
|
+
content = self.orig_sys_message.content + language_prompt
|
|
345
|
+
self._system_message = self.orig_sys_message.create_new_instance(
|
|
346
|
+
content
|
|
347
|
+
)
|
|
348
|
+
return self._system_message
|
|
349
|
+
else:
|
|
350
|
+
self._system_message = BaseMessage.make_assistant_message(
|
|
351
|
+
role_name="Assistant",
|
|
352
|
+
content=language_prompt,
|
|
353
|
+
)
|
|
354
|
+
return self._system_message
|
|
283
355
|
|
|
284
356
|
def get_info(
|
|
285
357
|
self,
|
|
@@ -324,12 +396,15 @@ class ChatAgent(BaseAgent):
|
|
|
324
396
|
r"""Initializes the stored messages list with the initial system
|
|
325
397
|
message.
|
|
326
398
|
"""
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
399
|
+
if self.orig_sys_message is not None:
|
|
400
|
+
system_record = MemoryRecord(
|
|
401
|
+
message=self.orig_sys_message,
|
|
402
|
+
role_at_backend=OpenAIBackendRole.SYSTEM,
|
|
403
|
+
)
|
|
404
|
+
self.memory.clear()
|
|
405
|
+
self.memory.write_record(system_record)
|
|
406
|
+
else:
|
|
407
|
+
self.memory.clear()
|
|
333
408
|
|
|
334
409
|
def record_message(self, message: BaseMessage) -> None:
|
|
335
410
|
r"""Records the externally provided message into the agent memory as if
|
|
@@ -344,19 +419,19 @@ class ChatAgent(BaseAgent):
|
|
|
344
419
|
|
|
345
420
|
def step(
|
|
346
421
|
self,
|
|
347
|
-
input_message: BaseMessage,
|
|
348
|
-
|
|
422
|
+
input_message: Union[BaseMessage, str],
|
|
423
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
349
424
|
) -> ChatAgentResponse:
|
|
350
425
|
r"""Performs a single step in the chat session by generating a response
|
|
351
426
|
to the input message.
|
|
352
427
|
|
|
353
428
|
Args:
|
|
354
|
-
input_message (BaseMessage): The input message to the
|
|
355
|
-
|
|
356
|
-
either `user` or `assistant` but it
|
|
357
|
-
anyway since for the self agent any
|
|
358
|
-
external.
|
|
359
|
-
|
|
429
|
+
input_message (Union[BaseMessage, str]): The input message to the
|
|
430
|
+
agent. For BaseMessage input, its `role` field that specifies
|
|
431
|
+
the role at backend may be either `user` or `assistant` but it
|
|
432
|
+
will be set to `user` anyway since for the self agent any
|
|
433
|
+
incoming message is external. For str input, the `role_name` would be `User`.
|
|
434
|
+
response_format (Optional[Type[BaseModel]], optional): A pydantic
|
|
360
435
|
model class that includes value types and field descriptions
|
|
361
436
|
used to generate a structured response by LLM. This schema
|
|
362
437
|
helps in defining the expected output format. (default:
|
|
@@ -367,104 +442,233 @@ class ChatAgent(BaseAgent):
|
|
|
367
442
|
a boolean indicating whether the chat session has terminated,
|
|
368
443
|
and information about the chat session.
|
|
369
444
|
"""
|
|
370
|
-
|
|
445
|
+
if isinstance(input_message, str):
|
|
446
|
+
input_message = BaseMessage.make_user_message(
|
|
447
|
+
role_name='User', content=input_message
|
|
448
|
+
)
|
|
371
449
|
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
e.args[1], tool_call_records, "max_tokens_exceeded"
|
|
450
|
+
if "llama" in self.model_type.lower():
|
|
451
|
+
if self.model_backend.model_config_dict.get("tools", None):
|
|
452
|
+
tool_prompt = self._generate_tool_prompt(self.tool_schema_list)
|
|
453
|
+
|
|
454
|
+
tool_sys_msg = BaseMessage.make_assistant_message(
|
|
455
|
+
role_name="Assistant",
|
|
456
|
+
content=tool_prompt,
|
|
380
457
|
)
|
|
381
458
|
|
|
382
|
-
|
|
383
|
-
|
|
459
|
+
self.update_memory(tool_sys_msg, OpenAIBackendRole.SYSTEM)
|
|
460
|
+
|
|
461
|
+
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
462
|
+
|
|
463
|
+
tool_call_records: List[FunctionCallingRecord] = []
|
|
464
|
+
while True:
|
|
465
|
+
# Check if token has exceeded
|
|
466
|
+
try:
|
|
467
|
+
openai_messages, num_tokens = self.memory.get_context()
|
|
468
|
+
except RuntimeError as e:
|
|
469
|
+
return self._step_token_exceed(
|
|
470
|
+
e.args[1], tool_call_records, "max_tokens_exceeded"
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
(
|
|
474
|
+
response,
|
|
475
|
+
output_messages,
|
|
476
|
+
finish_reasons,
|
|
477
|
+
usage_dict,
|
|
478
|
+
response_id,
|
|
479
|
+
) = self._step_model_response(openai_messages, num_tokens)
|
|
480
|
+
# If the model response is not a function call, meaning the
|
|
481
|
+
# model has generated a message response, break the loop
|
|
482
|
+
if (
|
|
483
|
+
not self.is_tools_added()
|
|
484
|
+
or not isinstance(response, ChatCompletion)
|
|
485
|
+
or "</function>" not in response.choices[0].message.content # type: ignore[operator]
|
|
486
|
+
):
|
|
487
|
+
break
|
|
488
|
+
|
|
489
|
+
parsed_content = self._parse_tool_response(
|
|
490
|
+
response.choices[0].message.content # type: ignore[arg-type]
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
response.choices[0].message.tool_calls = [
|
|
494
|
+
ChatCompletionMessageToolCall(
|
|
495
|
+
id=str(uuid.uuid4()),
|
|
496
|
+
function=Function(
|
|
497
|
+
arguments=str(parsed_content["arguments"]).replace(
|
|
498
|
+
"'", '"'
|
|
499
|
+
),
|
|
500
|
+
name=str(parsed_content["function"]),
|
|
501
|
+
),
|
|
502
|
+
type="function",
|
|
503
|
+
)
|
|
504
|
+
]
|
|
505
|
+
|
|
506
|
+
# Check for external tool call
|
|
507
|
+
tool_call_request = response.choices[0].message.tool_calls[0]
|
|
508
|
+
if tool_call_request.function.name in self.external_tool_names:
|
|
509
|
+
# if model calls an external tool, directly return the
|
|
510
|
+
# request
|
|
511
|
+
info = self._step_get_info(
|
|
512
|
+
output_messages,
|
|
513
|
+
finish_reasons,
|
|
514
|
+
usage_dict,
|
|
515
|
+
response_id,
|
|
516
|
+
tool_call_records,
|
|
517
|
+
num_tokens,
|
|
518
|
+
tool_call_request,
|
|
519
|
+
)
|
|
520
|
+
return ChatAgentResponse(
|
|
521
|
+
msgs=output_messages,
|
|
522
|
+
terminated=self.terminated,
|
|
523
|
+
info=info,
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
# Normal function calling
|
|
527
|
+
tool_call_records.append(
|
|
528
|
+
self._step_tool_call_and_update(response)
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
if response_format is not None:
|
|
532
|
+
(
|
|
533
|
+
output_messages,
|
|
534
|
+
finish_reasons,
|
|
535
|
+
usage_dict,
|
|
536
|
+
response_id,
|
|
537
|
+
tool_call,
|
|
538
|
+
num_tokens,
|
|
539
|
+
) = self._structure_output_with_function(response_format)
|
|
540
|
+
tool_call_records.append(tool_call)
|
|
541
|
+
|
|
542
|
+
info = self._step_get_info(
|
|
384
543
|
output_messages,
|
|
385
544
|
finish_reasons,
|
|
386
545
|
usage_dict,
|
|
387
546
|
response_id,
|
|
388
|
-
|
|
547
|
+
tool_call_records,
|
|
548
|
+
num_tokens,
|
|
549
|
+
)
|
|
389
550
|
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
551
|
+
if len(output_messages) == 1:
|
|
552
|
+
# Auto record if the output result is a single message
|
|
553
|
+
self.record_message(output_messages[0])
|
|
554
|
+
else:
|
|
555
|
+
logger.warning(
|
|
556
|
+
"Multiple messages returned in `step()`, message won't be "
|
|
557
|
+
"recorded automatically. Please call `record_message()` "
|
|
558
|
+
"to record the selected message manually."
|
|
559
|
+
)
|
|
398
560
|
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
561
|
+
return ChatAgentResponse(
|
|
562
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
563
|
+
)
|
|
564
|
+
|
|
565
|
+
else:
|
|
566
|
+
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
567
|
+
|
|
568
|
+
tool_call_records: List[FunctionCallingRecord] = [] # type: ignore[no-redef]
|
|
569
|
+
while True:
|
|
570
|
+
# Check if token has exceeded
|
|
571
|
+
try:
|
|
572
|
+
openai_messages, num_tokens = self.memory.get_context()
|
|
573
|
+
except RuntimeError as e:
|
|
574
|
+
return self._step_token_exceed(
|
|
575
|
+
e.args[1], tool_call_records, "max_tokens_exceeded"
|
|
576
|
+
)
|
|
577
|
+
|
|
578
|
+
(
|
|
579
|
+
response,
|
|
404
580
|
output_messages,
|
|
405
581
|
finish_reasons,
|
|
406
582
|
usage_dict,
|
|
407
583
|
response_id,
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
584
|
+
) = self._step_model_response(openai_messages, num_tokens)
|
|
585
|
+
# If the model response is not a function call, meaning the
|
|
586
|
+
# model has generated a message response, break the loop
|
|
587
|
+
if (
|
|
588
|
+
not self.is_tools_added()
|
|
589
|
+
or not isinstance(response, ChatCompletion)
|
|
590
|
+
or response.choices[0].message.tool_calls is None
|
|
591
|
+
):
|
|
592
|
+
break
|
|
593
|
+
|
|
594
|
+
# Check for external tool call
|
|
595
|
+
tool_call_request = response.choices[0].message.tool_calls[0]
|
|
596
|
+
|
|
597
|
+
if tool_call_request.function.name in self.external_tool_names:
|
|
598
|
+
# if model calls an external tool, directly return the
|
|
599
|
+
# request
|
|
600
|
+
info = self._step_get_info(
|
|
601
|
+
output_messages,
|
|
602
|
+
finish_reasons,
|
|
603
|
+
usage_dict,
|
|
604
|
+
response_id,
|
|
605
|
+
tool_call_records,
|
|
606
|
+
num_tokens,
|
|
607
|
+
tool_call_request,
|
|
608
|
+
)
|
|
609
|
+
return ChatAgentResponse(
|
|
610
|
+
msgs=output_messages,
|
|
611
|
+
terminated=self.terminated,
|
|
612
|
+
info=info,
|
|
613
|
+
)
|
|
614
|
+
|
|
615
|
+
# Normal function calling
|
|
616
|
+
tool_call_records.append(
|
|
617
|
+
self._step_tool_call_and_update(response)
|
|
414
618
|
)
|
|
415
619
|
|
|
416
|
-
|
|
417
|
-
|
|
620
|
+
if (
|
|
621
|
+
response_format is not None
|
|
622
|
+
and self.model_type.support_native_tool_calling
|
|
623
|
+
):
|
|
624
|
+
(
|
|
625
|
+
output_messages,
|
|
626
|
+
finish_reasons,
|
|
627
|
+
usage_dict,
|
|
628
|
+
response_id,
|
|
629
|
+
tool_call,
|
|
630
|
+
num_tokens,
|
|
631
|
+
) = self._structure_output_with_function(response_format)
|
|
632
|
+
tool_call_records.append(tool_call)
|
|
418
633
|
|
|
419
|
-
|
|
420
|
-
(
|
|
634
|
+
info = self._step_get_info(
|
|
421
635
|
output_messages,
|
|
422
636
|
finish_reasons,
|
|
423
637
|
usage_dict,
|
|
424
638
|
response_id,
|
|
425
|
-
|
|
639
|
+
tool_call_records,
|
|
426
640
|
num_tokens,
|
|
427
|
-
)
|
|
428
|
-
tool_call_records.append(tool_call)
|
|
641
|
+
)
|
|
429
642
|
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
643
|
+
if len(output_messages) == 1:
|
|
644
|
+
# Auto record if the output result is a single message
|
|
645
|
+
self.record_message(output_messages[0])
|
|
646
|
+
else:
|
|
647
|
+
logger.warning(
|
|
648
|
+
"Multiple messages returned in `step()`, message won't be "
|
|
649
|
+
"recorded automatically. Please call `record_message()` "
|
|
650
|
+
"to record the selected message manually."
|
|
651
|
+
)
|
|
438
652
|
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
self.record_message(output_messages[0])
|
|
442
|
-
else:
|
|
443
|
-
logger.warning(
|
|
444
|
-
"Multiple messages returned in `step()`, message won't be "
|
|
445
|
-
"recorded automatically. Please call `record_message()` to "
|
|
446
|
-
"record the selected message manually."
|
|
653
|
+
return ChatAgentResponse(
|
|
654
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
447
655
|
)
|
|
448
656
|
|
|
449
|
-
return ChatAgentResponse(
|
|
450
|
-
msgs=output_messages, terminated=self.terminated, info=info
|
|
451
|
-
)
|
|
452
|
-
|
|
453
657
|
async def step_async(
|
|
454
658
|
self,
|
|
455
|
-
input_message: BaseMessage,
|
|
456
|
-
|
|
659
|
+
input_message: Union[BaseMessage, str],
|
|
660
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
457
661
|
) -> ChatAgentResponse:
|
|
458
662
|
r"""Performs a single step in the chat session by generating a response
|
|
459
663
|
to the input message. This agent step can call async function calls.
|
|
460
664
|
|
|
461
665
|
Args:
|
|
462
|
-
input_message (BaseMessage): The input message to the
|
|
463
|
-
|
|
464
|
-
either `user` or `assistant` but it
|
|
465
|
-
anyway since for the self agent any
|
|
466
|
-
external.
|
|
467
|
-
|
|
666
|
+
input_message (Union[BaseMessage, str]): The input message to the
|
|
667
|
+
agent. For BaseMessage input, its `role` field that specifies
|
|
668
|
+
the role at backend may be either `user` or `assistant` but it
|
|
669
|
+
will be set to `user` anyway since for the self agent any
|
|
670
|
+
incoming message is external. For str input, the `role_name` would be `User`.
|
|
671
|
+
response_format (Optional[Type[BaseModel]], optional): A pydantic
|
|
468
672
|
model class that includes value types and field descriptions
|
|
469
673
|
used to generate a structured response by LLM. This schema
|
|
470
674
|
helps in defining the expected output format. (default:
|
|
@@ -475,6 +679,11 @@ class ChatAgent(BaseAgent):
|
|
|
475
679
|
a boolean indicating whether the chat session has terminated,
|
|
476
680
|
and information about the chat session.
|
|
477
681
|
"""
|
|
682
|
+
if isinstance(input_message, str):
|
|
683
|
+
input_message = BaseMessage.make_user_message(
|
|
684
|
+
role_name='User', content=input_message
|
|
685
|
+
)
|
|
686
|
+
|
|
478
687
|
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
479
688
|
|
|
480
689
|
tool_call_records: List[FunctionCallingRecord] = []
|
|
@@ -523,7 +732,10 @@ class ChatAgent(BaseAgent):
|
|
|
523
732
|
await self._step_tool_call_and_update_async(response)
|
|
524
733
|
)
|
|
525
734
|
|
|
526
|
-
if
|
|
735
|
+
if (
|
|
736
|
+
response_format is not None
|
|
737
|
+
and self.model_type.support_native_tool_calling
|
|
738
|
+
):
|
|
527
739
|
(
|
|
528
740
|
output_messages,
|
|
529
741
|
finish_reasons,
|
|
@@ -531,7 +743,7 @@ class ChatAgent(BaseAgent):
|
|
|
531
743
|
response_id,
|
|
532
744
|
tool_call_record,
|
|
533
745
|
num_tokens,
|
|
534
|
-
) = self._structure_output_with_function(
|
|
746
|
+
) = self._structure_output_with_function(response_format)
|
|
535
747
|
tool_call_records.append(tool_call_record)
|
|
536
748
|
|
|
537
749
|
info = self._step_get_info(
|
|
@@ -598,7 +810,7 @@ class ChatAgent(BaseAgent):
|
|
|
598
810
|
return func_record
|
|
599
811
|
|
|
600
812
|
def _structure_output_with_function(
|
|
601
|
-
self,
|
|
813
|
+
self, response_format: Type[BaseModel]
|
|
602
814
|
) -> Tuple[
|
|
603
815
|
List[BaseMessage],
|
|
604
816
|
List[str],
|
|
@@ -610,21 +822,23 @@ class ChatAgent(BaseAgent):
|
|
|
610
822
|
r"""Internal function of structuring the output of the agent based on
|
|
611
823
|
the given output schema.
|
|
612
824
|
"""
|
|
613
|
-
from camel.toolkits import
|
|
825
|
+
from camel.toolkits import FunctionTool
|
|
614
826
|
|
|
615
|
-
schema_json = get_pydantic_object_schema(
|
|
827
|
+
schema_json = get_pydantic_object_schema(response_format)
|
|
616
828
|
func_str = json_to_function_code(schema_json)
|
|
617
829
|
func_callable = func_string_to_callable(func_str)
|
|
618
|
-
func =
|
|
830
|
+
func = FunctionTool(func_callable)
|
|
619
831
|
|
|
620
832
|
original_func_dict = self.func_dict
|
|
621
833
|
original_model_dict = self.model_backend.model_config_dict
|
|
622
834
|
|
|
623
835
|
# Replace the original tools with the structuring function
|
|
624
836
|
self.func_dict = {func.get_function_name(): func.func}
|
|
837
|
+
self.model_backend.model_config_dict = original_model_dict.copy()
|
|
625
838
|
self.model_backend.model_config_dict["tools"] = [
|
|
626
839
|
func.get_openai_tool_schema()
|
|
627
840
|
]
|
|
841
|
+
self.model_backend.model_config_dict["tool_choice"] = "required"
|
|
628
842
|
|
|
629
843
|
openai_messages, num_tokens = self.memory.get_context()
|
|
630
844
|
(
|
|
@@ -987,10 +1201,7 @@ class ChatAgent(BaseAgent):
|
|
|
987
1201
|
Returns:
|
|
988
1202
|
dict: Usage dictionary.
|
|
989
1203
|
"""
|
|
990
|
-
|
|
991
|
-
encoding = get_model_encoding(self.model_type.value_for_tiktoken)
|
|
992
|
-
else:
|
|
993
|
-
encoding = get_model_encoding("gpt-4o-mini")
|
|
1204
|
+
encoding = get_model_encoding(self.model_type.value_for_tiktoken)
|
|
994
1205
|
completion_tokens = 0
|
|
995
1206
|
for message in output_messages:
|
|
996
1207
|
completion_tokens += len(encoding.encode(message.content))
|