camel-ai 0.1.7.2__py3-none-any.whl → 0.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +223 -244
- camel/types/enums.py +4 -0
- camel/utils/commons.py +6 -5
- camel/utils/constants.py +1 -1
- {camel_ai-0.1.7.2.dist-info → camel_ai-0.1.8.dist-info}/METADATA +2 -2
- {camel_ai-0.1.7.2.dist-info → camel_ai-0.1.8.dist-info}/RECORD +8 -8
- {camel_ai-0.1.7.2.dist-info → camel_ai-0.1.8.dist-info}/WHEEL +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -19,14 +19,15 @@ from collections import defaultdict
|
|
|
19
19
|
from typing import (
|
|
20
20
|
TYPE_CHECKING,
|
|
21
21
|
Any,
|
|
22
|
-
Callable,
|
|
23
22
|
Dict,
|
|
24
23
|
List,
|
|
25
24
|
Optional,
|
|
26
25
|
Tuple,
|
|
26
|
+
Type,
|
|
27
27
|
Union,
|
|
28
28
|
)
|
|
29
29
|
|
|
30
|
+
from openai.types.chat import ChatCompletionMessageToolCall
|
|
30
31
|
from pydantic import BaseModel
|
|
31
32
|
|
|
32
33
|
from camel.agents.base import BaseAgent
|
|
@@ -49,7 +50,6 @@ from camel.types import (
|
|
|
49
50
|
RoleType,
|
|
50
51
|
)
|
|
51
52
|
from camel.utils import (
|
|
52
|
-
Constants,
|
|
53
53
|
func_string_to_callable,
|
|
54
54
|
get_model_encoding,
|
|
55
55
|
get_pydantic_object_schema,
|
|
@@ -130,6 +130,10 @@ class ChatAgent(BaseAgent):
|
|
|
130
130
|
agent. (default: :obj:`None`)
|
|
131
131
|
tools (List[OpenAIFunction], optional): List of available
|
|
132
132
|
:obj:`OpenAIFunction`. (default: :obj:`None`)
|
|
133
|
+
external_tools (List[OpenAIFunction], optional): List of external tools
|
|
134
|
+
(:obj:`OpenAIFunction`) bind to one chat agent. When these tools
|
|
135
|
+
are called, the agent will directly return the request instead of
|
|
136
|
+
processing it. (default: :obj:`None`)
|
|
133
137
|
response_terminators (List[ResponseTerminator], optional): List of
|
|
134
138
|
:obj:`ResponseTerminator` bind to one chat agent.
|
|
135
139
|
(default: :obj:`None`)
|
|
@@ -144,6 +148,7 @@ class ChatAgent(BaseAgent):
|
|
|
144
148
|
token_limit: Optional[int] = None,
|
|
145
149
|
output_language: Optional[str] = None,
|
|
146
150
|
tools: Optional[List[OpenAIFunction]] = None,
|
|
151
|
+
external_tools: Optional[List[OpenAIFunction]] = None,
|
|
147
152
|
response_terminators: Optional[List[ResponseTerminator]] = None,
|
|
148
153
|
) -> None:
|
|
149
154
|
self.orig_sys_message: BaseMessage = system_message
|
|
@@ -165,10 +170,16 @@ class ChatAgent(BaseAgent):
|
|
|
165
170
|
|
|
166
171
|
self.model_type: ModelType = self.model_backend.model_type
|
|
167
172
|
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
173
|
+
# tool registration
|
|
174
|
+
external_tools = external_tools or []
|
|
175
|
+
tools = tools or []
|
|
176
|
+
all_tools = tools + external_tools
|
|
177
|
+
self.external_tool_names = [
|
|
178
|
+
tool.get_function_name() for tool in external_tools
|
|
179
|
+
]
|
|
180
|
+
self.func_dict = {
|
|
181
|
+
tool.get_function_name(): tool.func for tool in all_tools
|
|
182
|
+
}
|
|
172
183
|
|
|
173
184
|
self.model_config_dict = self.model_backend.model_config_dict
|
|
174
185
|
|
|
@@ -262,34 +273,41 @@ class ChatAgent(BaseAgent):
|
|
|
262
273
|
|
|
263
274
|
def get_info(
|
|
264
275
|
self,
|
|
265
|
-
|
|
276
|
+
session_id: Optional[str],
|
|
266
277
|
usage: Optional[Dict[str, int]],
|
|
267
278
|
termination_reasons: List[str],
|
|
268
279
|
num_tokens: int,
|
|
269
280
|
tool_calls: List[FunctionCallingRecord],
|
|
281
|
+
external_tool_request: Optional[ChatCompletionMessageToolCall] = None,
|
|
270
282
|
) -> Dict[str, Any]:
|
|
271
283
|
r"""Returns a dictionary containing information about the chat session.
|
|
272
284
|
|
|
273
285
|
Args:
|
|
274
|
-
|
|
286
|
+
session_id (str, optional): The ID of the chat session.
|
|
275
287
|
usage (Dict[str, int], optional): Information about the usage of
|
|
276
288
|
the LLM model.
|
|
277
289
|
termination_reasons (List[str]): The reasons for the termination
|
|
278
290
|
of the chat session.
|
|
279
291
|
num_tokens (int): The number of tokens used in the chat session.
|
|
280
292
|
tool_calls (List[FunctionCallingRecord]): The list of function
|
|
281
|
-
calling records, containing the information of called
|
|
282
|
-
|
|
293
|
+
calling records, containing the information of called tools.
|
|
294
|
+
external_tool_request
|
|
295
|
+
(Optional[ChatCompletionMessageToolCall], optional):
|
|
296
|
+
The tool calling request of external tools from the model.
|
|
297
|
+
These requests are directly returned to the user instead of
|
|
298
|
+
being processed by the agent automatically.
|
|
299
|
+
(default: :obj:`None`)
|
|
283
300
|
|
|
284
301
|
Returns:
|
|
285
302
|
Dict[str, Any]: The chat session information.
|
|
286
303
|
"""
|
|
287
304
|
return {
|
|
288
|
-
"id":
|
|
305
|
+
"id": session_id,
|
|
289
306
|
"usage": usage,
|
|
290
307
|
"termination_reasons": termination_reasons,
|
|
291
308
|
"num_tokens": num_tokens,
|
|
292
309
|
"tool_calls": tool_calls,
|
|
310
|
+
"external_tool_request": external_tool_request,
|
|
293
311
|
}
|
|
294
312
|
|
|
295
313
|
def init_messages(self) -> None:
|
|
@@ -317,7 +335,7 @@ class ChatAgent(BaseAgent):
|
|
|
317
335
|
def step(
|
|
318
336
|
self,
|
|
319
337
|
input_message: BaseMessage,
|
|
320
|
-
output_schema: Optional[BaseModel] = None,
|
|
338
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
321
339
|
) -> ChatAgentResponse:
|
|
322
340
|
r"""Performs a single step in the chat session by generating a response
|
|
323
341
|
to the input message.
|
|
@@ -328,10 +346,11 @@ class ChatAgent(BaseAgent):
|
|
|
328
346
|
either `user` or `assistant` but it will be set to `user`
|
|
329
347
|
anyway since for the self agent any incoming message is
|
|
330
348
|
external.
|
|
331
|
-
output_schema (Optional[BaseModel]):
|
|
332
|
-
that includes value types and field descriptions
|
|
333
|
-
generate a structured response by LLM. This schema
|
|
334
|
-
in defining the expected output format.
|
|
349
|
+
output_schema (Optional[Type[BaseModel]], optional): A pydantic
|
|
350
|
+
model class that includes value types and field descriptions
|
|
351
|
+
used to generate a structured response by LLM. This schema
|
|
352
|
+
helps in defining the expected output format. (default:
|
|
353
|
+
:obj:`None`)
|
|
335
354
|
|
|
336
355
|
Returns:
|
|
337
356
|
ChatAgentResponse: A struct containing the output messages,
|
|
@@ -340,27 +359,15 @@ class ChatAgent(BaseAgent):
|
|
|
340
359
|
"""
|
|
341
360
|
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
342
361
|
|
|
343
|
-
|
|
344
|
-
info: Dict[str, Any]
|
|
345
|
-
tool_calls: List[FunctionCallingRecord] = []
|
|
362
|
+
tool_call_records: List[FunctionCallingRecord] = []
|
|
346
363
|
while True:
|
|
347
|
-
#
|
|
348
|
-
openai_messages: Optional[List[OpenAIMessage]]
|
|
349
|
-
|
|
364
|
+
# Check if token has exceeded
|
|
350
365
|
try:
|
|
351
366
|
openai_messages, num_tokens = self.memory.get_context()
|
|
352
367
|
except RuntimeError as e:
|
|
353
|
-
return self.
|
|
354
|
-
e.args[1],
|
|
368
|
+
return self._step_token_exceed(
|
|
369
|
+
e.args[1], tool_call_records, "max_tokens_exceeded"
|
|
355
370
|
)
|
|
356
|
-
# use structed output response without tools
|
|
357
|
-
# If the user provides the output_schema parameter and does not
|
|
358
|
-
# specify the use of tools, then in the model config of the
|
|
359
|
-
# chatgent, call the model specified by tools with
|
|
360
|
-
# return_json_response of OpenAIFunction format, and return a
|
|
361
|
-
# structured response with the user-specified output schema.
|
|
362
|
-
if output_schema is not None and len(self.func_dict) == 0:
|
|
363
|
-
self._add_output_schema_to_tool_list(output_schema)
|
|
364
371
|
|
|
365
372
|
(
|
|
366
373
|
response,
|
|
@@ -370,97 +377,73 @@ class ChatAgent(BaseAgent):
|
|
|
370
377
|
response_id,
|
|
371
378
|
) = self._step_model_response(openai_messages, num_tokens)
|
|
372
379
|
|
|
380
|
+
# If the model response is not a function call, meaning the model
|
|
381
|
+
# has generated a message response, break the loop
|
|
373
382
|
if (
|
|
374
|
-
self.is_tools_added()
|
|
375
|
-
|
|
376
|
-
|
|
383
|
+
not self.is_tools_added()
|
|
384
|
+
or not isinstance(response, ChatCompletion)
|
|
385
|
+
or response.choices[0].message.tool_calls is None
|
|
377
386
|
):
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
tool_calls, func_assistant_msg, func_result_msg = (
|
|
381
|
-
self._add_tools_for_func_call(response, tool_calls)
|
|
382
|
-
)
|
|
383
|
-
|
|
384
|
-
# Update the messages
|
|
385
|
-
self.update_memory(
|
|
386
|
-
func_assistant_msg, OpenAIBackendRole.ASSISTANT
|
|
387
|
-
)
|
|
388
|
-
self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
|
|
389
|
-
|
|
390
|
-
else:
|
|
391
|
-
# If the user specifies tools, it is necessary to wait for the
|
|
392
|
-
# model to complete all tools' calls. Finally, use the
|
|
393
|
-
# generated response as the input for the structure,
|
|
394
|
-
# simultaneously calling the return_json_response function.
|
|
395
|
-
# Call the model again with return_json_response in the format
|
|
396
|
-
# of OpenAIFunction as the last tool, returning a structured
|
|
397
|
-
# response with the user-specified output schema.
|
|
398
|
-
if output_schema is not None and all(
|
|
399
|
-
record.func_name
|
|
400
|
-
!= Constants.FUNC_NAME_FOR_STRUCTURE_OUTPUT
|
|
401
|
-
for record in tool_calls
|
|
402
|
-
):
|
|
403
|
-
self._add_output_schema_to_tool_list(output_schema)
|
|
404
|
-
|
|
405
|
-
(
|
|
406
|
-
response,
|
|
407
|
-
output_messages,
|
|
408
|
-
finish_reasons,
|
|
409
|
-
usage_dict,
|
|
410
|
-
response_id,
|
|
411
|
-
) = self._step_model_response(openai_messages, num_tokens)
|
|
412
|
-
|
|
413
|
-
if isinstance(response, ChatCompletion):
|
|
414
|
-
# Tools added for function calling and not in stream
|
|
415
|
-
# mode
|
|
416
|
-
tool_calls, func_assistant_msg, func_result_msg = (
|
|
417
|
-
self._add_tools_for_func_call(response, tool_calls)
|
|
418
|
-
)
|
|
419
|
-
|
|
420
|
-
# Update the messages
|
|
421
|
-
self.update_memory(
|
|
422
|
-
func_assistant_msg, OpenAIBackendRole.ASSISTANT
|
|
423
|
-
)
|
|
424
|
-
self.update_memory(
|
|
425
|
-
func_result_msg, OpenAIBackendRole.FUNCTION
|
|
426
|
-
)
|
|
387
|
+
break
|
|
427
388
|
|
|
389
|
+
# Check for external tool call
|
|
390
|
+
tool_call_request = response.choices[0].message.tool_calls[0]
|
|
391
|
+
if tool_call_request.function.name in self.external_tool_names:
|
|
392
|
+
# if model calls an external tool, directly return the request
|
|
428
393
|
info = self._step_get_info(
|
|
429
394
|
output_messages,
|
|
430
395
|
finish_reasons,
|
|
431
396
|
usage_dict,
|
|
432
397
|
response_id,
|
|
433
|
-
|
|
398
|
+
tool_call_records,
|
|
434
399
|
num_tokens,
|
|
400
|
+
tool_call_request,
|
|
401
|
+
)
|
|
402
|
+
return ChatAgentResponse(
|
|
403
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
435
404
|
)
|
|
436
|
-
break
|
|
437
405
|
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
if output_schema and self.model_type.is_openai:
|
|
441
|
-
for base_message_item in output_messages:
|
|
442
|
-
base_message_item.content = str(info['tool_calls'][-1].result)
|
|
406
|
+
# Normal function calling
|
|
407
|
+
tool_call_records.append(self._step_tool_call_and_update(response))
|
|
443
408
|
|
|
444
|
-
|
|
445
|
-
|
|
409
|
+
if output_schema is not None and self.model_type.supports_tool_calling:
|
|
410
|
+
(
|
|
411
|
+
output_messages,
|
|
412
|
+
finish_reasons,
|
|
413
|
+
usage_dict,
|
|
414
|
+
response_id,
|
|
415
|
+
tool_call,
|
|
416
|
+
num_tokens,
|
|
417
|
+
) = self._structure_output_with_function(output_schema)
|
|
418
|
+
tool_call_records.append(tool_call)
|
|
419
|
+
|
|
420
|
+
info = self._step_get_info(
|
|
421
|
+
output_messages,
|
|
422
|
+
finish_reasons,
|
|
423
|
+
usage_dict,
|
|
424
|
+
response_id,
|
|
425
|
+
tool_call_records,
|
|
426
|
+
num_tokens,
|
|
446
427
|
)
|
|
447
428
|
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
self.record_message(chat_agent_response.msg)
|
|
429
|
+
if len(output_messages) == 1:
|
|
430
|
+
# Auto record if the output result is a single message
|
|
431
|
+
self.record_message(output_messages[0])
|
|
452
432
|
else:
|
|
453
433
|
logger.warning(
|
|
454
|
-
"Multiple messages
|
|
455
|
-
"
|
|
456
|
-
"record the selected message."
|
|
434
|
+
"Multiple messages returned in `step()`, message won't be "
|
|
435
|
+
"recorded automatically. Please call `record_message()` to "
|
|
436
|
+
"record the selected message manually."
|
|
457
437
|
)
|
|
458
|
-
|
|
438
|
+
|
|
439
|
+
return ChatAgentResponse(
|
|
440
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
441
|
+
)
|
|
459
442
|
|
|
460
443
|
async def step_async(
|
|
461
444
|
self,
|
|
462
445
|
input_message: BaseMessage,
|
|
463
|
-
output_schema: Optional[BaseModel] = None,
|
|
446
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
464
447
|
) -> ChatAgentResponse:
|
|
465
448
|
r"""Performs a single step in the chat session by generating a response
|
|
466
449
|
to the input message. This agent step can call async function calls.
|
|
@@ -471,10 +454,11 @@ class ChatAgent(BaseAgent):
|
|
|
471
454
|
either `user` or `assistant` but it will be set to `user`
|
|
472
455
|
anyway since for the self agent any incoming message is
|
|
473
456
|
external.
|
|
474
|
-
output_schema (Optional[BaseModel]):
|
|
475
|
-
that includes value types and field descriptions
|
|
476
|
-
generate a structured response by LLM. This schema
|
|
477
|
-
in defining the expected output format.
|
|
457
|
+
output_schema (Optional[Type[BaseModel]], optional): A pydantic
|
|
458
|
+
model class that includes value types and field descriptions
|
|
459
|
+
used to generate a structured response by LLM. This schema
|
|
460
|
+
helps in defining the expected output format. (default:
|
|
461
|
+
:obj:`None`)
|
|
478
462
|
|
|
479
463
|
Returns:
|
|
480
464
|
ChatAgentResponse: A struct containing the output messages,
|
|
@@ -483,21 +467,14 @@ class ChatAgent(BaseAgent):
|
|
|
483
467
|
"""
|
|
484
468
|
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
485
469
|
|
|
486
|
-
|
|
487
|
-
info: Dict[str, Any]
|
|
488
|
-
tool_calls: List[FunctionCallingRecord] = []
|
|
470
|
+
tool_call_records: List[FunctionCallingRecord] = []
|
|
489
471
|
while True:
|
|
490
|
-
# Format messages and get the token number
|
|
491
|
-
openai_messages: Optional[List[OpenAIMessage]]
|
|
492
|
-
|
|
493
472
|
try:
|
|
494
473
|
openai_messages, num_tokens = self.memory.get_context()
|
|
495
474
|
except RuntimeError as e:
|
|
496
|
-
return self.
|
|
497
|
-
e.args[1],
|
|
475
|
+
return self._step_token_exceed(
|
|
476
|
+
e.args[1], tool_call_records, "max_tokens_exceeded"
|
|
498
477
|
)
|
|
499
|
-
if output_schema is not None:
|
|
500
|
-
self._add_output_schema_to_tool_list(output_schema)
|
|
501
478
|
|
|
502
479
|
(
|
|
503
480
|
response,
|
|
@@ -508,168 +485,168 @@ class ChatAgent(BaseAgent):
|
|
|
508
485
|
) = self._step_model_response(openai_messages, num_tokens)
|
|
509
486
|
|
|
510
487
|
if (
|
|
511
|
-
self.is_tools_added()
|
|
512
|
-
|
|
513
|
-
|
|
488
|
+
not self.is_tools_added()
|
|
489
|
+
or not isinstance(response, ChatCompletion)
|
|
490
|
+
or response.choices[0].message.tool_calls is None
|
|
514
491
|
):
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
func_record,
|
|
522
|
-
) = await self.step_tool_call_async(response)
|
|
523
|
-
|
|
524
|
-
# Update the messages
|
|
525
|
-
self.update_memory(
|
|
526
|
-
func_assistant_msg, OpenAIBackendRole.ASSISTANT
|
|
527
|
-
)
|
|
528
|
-
self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
|
|
529
|
-
|
|
530
|
-
# Record the function calling
|
|
531
|
-
tool_calls.append(func_record)
|
|
532
|
-
|
|
533
|
-
else:
|
|
534
|
-
# use structed output response without tools
|
|
535
|
-
if output_schema is not None and all(
|
|
536
|
-
record.func_name
|
|
537
|
-
!= Constants.FUNC_NAME_FOR_STRUCTURE_OUTPUT
|
|
538
|
-
for record in tool_calls
|
|
539
|
-
):
|
|
540
|
-
self._add_output_schema_to_tool_list(output_schema)
|
|
541
|
-
|
|
542
|
-
(
|
|
543
|
-
response,
|
|
544
|
-
output_messages,
|
|
545
|
-
finish_reasons,
|
|
546
|
-
usage_dict,
|
|
547
|
-
response_id,
|
|
548
|
-
) = self._step_model_response(openai_messages, num_tokens)
|
|
549
|
-
|
|
550
|
-
if isinstance(response, ChatCompletion):
|
|
551
|
-
# Tools added for function calling and not in stream
|
|
552
|
-
# mode
|
|
553
|
-
tool_calls, func_assistant_msg, func_result_msg = (
|
|
554
|
-
self._add_tools_for_func_call(response, tool_calls)
|
|
555
|
-
)
|
|
556
|
-
|
|
557
|
-
# Update the messages
|
|
558
|
-
self.update_memory(
|
|
559
|
-
func_assistant_msg, OpenAIBackendRole.ASSISTANT
|
|
560
|
-
)
|
|
561
|
-
self.update_memory(
|
|
562
|
-
func_result_msg, OpenAIBackendRole.FUNCTION
|
|
563
|
-
)
|
|
564
|
-
|
|
565
|
-
# Function calling disabled or not a function calling
|
|
492
|
+
break
|
|
493
|
+
|
|
494
|
+
# Check for external tool call
|
|
495
|
+
tool_call_request = response.choices[0].message.tool_calls[0]
|
|
496
|
+
if tool_call_request.function.name in self.external_tool_names:
|
|
497
|
+
# if model calls an external tool, directly return the request
|
|
566
498
|
info = self._step_get_info(
|
|
567
499
|
output_messages,
|
|
568
500
|
finish_reasons,
|
|
569
501
|
usage_dict,
|
|
570
502
|
response_id,
|
|
571
|
-
|
|
503
|
+
tool_call_records,
|
|
572
504
|
num_tokens,
|
|
505
|
+
tool_call_request,
|
|
506
|
+
)
|
|
507
|
+
return ChatAgentResponse(
|
|
508
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
573
509
|
)
|
|
574
|
-
break
|
|
575
510
|
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
base_message_item.content = str(info['tool_calls'][0].result)
|
|
511
|
+
# Normal function calling
|
|
512
|
+
tool_call_records.append(
|
|
513
|
+
await self._step_tool_call_and_update_async(response)
|
|
514
|
+
)
|
|
581
515
|
|
|
582
|
-
|
|
583
|
-
|
|
516
|
+
if output_schema is not None and self.model_type.supports_tool_calling:
|
|
517
|
+
(
|
|
518
|
+
output_messages,
|
|
519
|
+
finish_reasons,
|
|
520
|
+
usage_dict,
|
|
521
|
+
response_id,
|
|
522
|
+
tool_call_record,
|
|
523
|
+
num_tokens,
|
|
524
|
+
) = self._structure_output_with_function(output_schema)
|
|
525
|
+
tool_call_records.append(tool_call_record)
|
|
526
|
+
|
|
527
|
+
info = self._step_get_info(
|
|
528
|
+
output_messages,
|
|
529
|
+
finish_reasons,
|
|
530
|
+
usage_dict,
|
|
531
|
+
response_id,
|
|
532
|
+
tool_call_records,
|
|
533
|
+
num_tokens,
|
|
584
534
|
)
|
|
585
535
|
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
self.record_message(chat_agent_response.msg)
|
|
536
|
+
if len(output_messages) == 1:
|
|
537
|
+
# Auto record if the output result is a single message
|
|
538
|
+
self.record_message(output_messages[0])
|
|
590
539
|
else:
|
|
591
540
|
logger.warning(
|
|
592
|
-
"Multiple messages
|
|
593
|
-
"Please
|
|
594
|
-
"record the
|
|
541
|
+
"Multiple messages returned in `step()`, message won't be "
|
|
542
|
+
"recorded automatically. Please call `record_message()` to "
|
|
543
|
+
"record the selected message manually."
|
|
595
544
|
)
|
|
596
545
|
|
|
597
|
-
return
|
|
546
|
+
return ChatAgentResponse(
|
|
547
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
548
|
+
)
|
|
549
|
+
|
|
550
|
+
def _step_tool_call_and_update(
|
|
551
|
+
self, response: ChatCompletion
|
|
552
|
+
) -> FunctionCallingRecord:
|
|
553
|
+
r"""Processes a function call within the chat completion response,
|
|
554
|
+
records the function call in the provided list of tool calls and
|
|
555
|
+
updates the memory of the current agent.
|
|
598
556
|
|
|
599
|
-
def _add_tools_for_func_call(
|
|
600
|
-
self,
|
|
601
|
-
response: ChatCompletion,
|
|
602
|
-
tool_calls: List[FunctionCallingRecord],
|
|
603
|
-
) -> tuple[
|
|
604
|
-
List[FunctionCallingRecord],
|
|
605
|
-
FunctionCallingMessage,
|
|
606
|
-
FunctionCallingMessage,
|
|
607
|
-
]:
|
|
608
|
-
r"""
|
|
609
|
-
Handles adding tools for function calls based on the response.
|
|
610
|
-
This method processes a function call within the chat completion
|
|
611
|
-
response, and records the function call in the provided
|
|
612
|
-
list of tool calls.
|
|
613
557
|
Args:
|
|
614
558
|
response (ChatCompletion): The response object from the chat
|
|
615
559
|
completion.
|
|
616
|
-
|
|
617
|
-
function calls.
|
|
560
|
+
|
|
618
561
|
Returns:
|
|
619
|
-
|
|
620
|
-
- List[FunctionCallingRecord]: The updated list of function
|
|
621
|
-
call records.
|
|
622
|
-
- FunctionCallingMessage: The assistant's message regarding the
|
|
623
|
-
function call.
|
|
624
|
-
- FunctionCallingMessage: The result message of the function
|
|
625
|
-
call.
|
|
562
|
+
FunctionCallingRecord: The record of calling the function.
|
|
626
563
|
"""
|
|
627
564
|
|
|
628
565
|
# Perform function calling
|
|
629
|
-
func_assistant_msg, func_result_msg,
|
|
630
|
-
response
|
|
566
|
+
func_assistant_msg, func_result_msg, tool_call_record = (
|
|
567
|
+
self.step_tool_call(response)
|
|
631
568
|
)
|
|
632
569
|
|
|
633
|
-
#
|
|
634
|
-
|
|
570
|
+
# Update the messages
|
|
571
|
+
self.update_memory(func_assistant_msg, OpenAIBackendRole.ASSISTANT)
|
|
572
|
+
self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
|
|
635
573
|
|
|
636
|
-
|
|
637
|
-
# result message
|
|
638
|
-
return tool_calls, func_assistant_msg, func_result_msg
|
|
574
|
+
return tool_call_record
|
|
639
575
|
|
|
640
|
-
def
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
576
|
+
async def _step_tool_call_and_update_async(
|
|
577
|
+
self, response: ChatCompletion
|
|
578
|
+
) -> FunctionCallingRecord:
|
|
579
|
+
(
|
|
580
|
+
func_assistant_msg,
|
|
581
|
+
func_result_msg,
|
|
582
|
+
func_record,
|
|
583
|
+
) = await self.step_tool_call_async(response)
|
|
584
|
+
|
|
585
|
+
self.update_memory(func_assistant_msg, OpenAIBackendRole.ASSISTANT)
|
|
586
|
+
self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
|
|
587
|
+
|
|
588
|
+
return func_record
|
|
589
|
+
|
|
590
|
+
def _structure_output_with_function(
|
|
591
|
+
self, output_schema: Type[BaseModel]
|
|
592
|
+
) -> Tuple[
|
|
593
|
+
List[BaseMessage],
|
|
594
|
+
List[str],
|
|
595
|
+
Dict[str, int],
|
|
596
|
+
str,
|
|
597
|
+
FunctionCallingRecord,
|
|
598
|
+
int,
|
|
599
|
+
]:
|
|
600
|
+
r"""Internal function of structuring the output of the agent based on
|
|
601
|
+
the given output schema.
|
|
647
602
|
"""
|
|
648
603
|
from camel.toolkits import OpenAIFunction
|
|
649
604
|
|
|
650
|
-
# step 1 extract the output_schema info as json.
|
|
651
605
|
schema_json = get_pydantic_object_schema(output_schema)
|
|
652
|
-
|
|
653
|
-
# step 2 convert output schema json as callable string
|
|
654
606
|
func_str = json_to_function_code(schema_json)
|
|
655
|
-
|
|
656
|
-
# step 3 get callable function from string
|
|
657
607
|
func_callable = func_string_to_callable(func_str)
|
|
658
|
-
|
|
659
|
-
# step 4 add return_json_func into tools
|
|
660
608
|
func = OpenAIFunction(func_callable)
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
609
|
+
|
|
610
|
+
original_func_dict = self.func_dict
|
|
611
|
+
original_model_dict = self.model_backend.model_config_dict
|
|
612
|
+
|
|
613
|
+
# Replace the original tools with the structuring function
|
|
614
|
+
self.func_dict = {func.get_function_name(): func.func}
|
|
615
|
+
self.model_backend.model_config_dict["tools"] = [
|
|
616
|
+
func.get_openai_tool_schema()
|
|
617
|
+
]
|
|
618
|
+
|
|
619
|
+
openai_messages, num_tokens = self.memory.get_context()
|
|
620
|
+
(
|
|
621
|
+
response,
|
|
622
|
+
output_messages,
|
|
623
|
+
finish_reasons,
|
|
624
|
+
usage_dict,
|
|
625
|
+
response_id,
|
|
626
|
+
) = self._step_model_response(openai_messages, num_tokens)
|
|
627
|
+
|
|
628
|
+
if isinstance(response, ChatCompletion):
|
|
629
|
+
tool_call_record = self._step_tool_call_and_update(response)
|
|
630
|
+
else:
|
|
631
|
+
raise ValueError(
|
|
632
|
+
"Structured output is not supported for stream responses."
|
|
633
|
+
)
|
|
634
|
+
|
|
635
|
+
for base_message_item in output_messages:
|
|
636
|
+
base_message_item.content = str(tool_call_record.result)
|
|
637
|
+
|
|
638
|
+
# Recover the original tools
|
|
639
|
+
self.func_dict = original_func_dict
|
|
640
|
+
self.model_backend.model_config_dict = original_model_dict
|
|
641
|
+
|
|
642
|
+
return (
|
|
643
|
+
output_messages,
|
|
644
|
+
finish_reasons,
|
|
645
|
+
usage_dict,
|
|
646
|
+
response_id,
|
|
647
|
+
tool_call_record,
|
|
648
|
+
num_tokens,
|
|
649
|
+
)
|
|
673
650
|
|
|
674
651
|
def _step_model_response(
|
|
675
652
|
self,
|
|
@@ -710,6 +687,7 @@ class ChatAgent(BaseAgent):
|
|
|
710
687
|
response_id: str,
|
|
711
688
|
tool_calls: List[FunctionCallingRecord],
|
|
712
689
|
num_tokens: int,
|
|
690
|
+
external_tool_request: Optional[ChatCompletionMessageToolCall] = None,
|
|
713
691
|
) -> Dict[str, Any]:
|
|
714
692
|
# Loop over responses terminators, get list of termination
|
|
715
693
|
# tuples with whether the terminator terminates the agent
|
|
@@ -737,6 +715,7 @@ class ChatAgent(BaseAgent):
|
|
|
737
715
|
finish_reasons,
|
|
738
716
|
num_tokens,
|
|
739
717
|
tool_calls,
|
|
718
|
+
external_tool_request,
|
|
740
719
|
)
|
|
741
720
|
return info
|
|
742
721
|
|
|
@@ -830,7 +809,7 @@ class ChatAgent(BaseAgent):
|
|
|
830
809
|
usage_dict = self.get_usage_dict(output_messages, prompt_tokens)
|
|
831
810
|
return output_messages, finish_reasons, usage_dict, response_id
|
|
832
811
|
|
|
833
|
-
def
|
|
812
|
+
def _step_token_exceed(
|
|
834
813
|
self,
|
|
835
814
|
num_tokens: int,
|
|
836
815
|
tool_calls: List[FunctionCallingRecord],
|
camel/types/enums.py
CHANGED
|
@@ -92,6 +92,10 @@ class ModelType(Enum):
|
|
|
92
92
|
return self.value
|
|
93
93
|
return "gpt-4o-mini"
|
|
94
94
|
|
|
95
|
+
@property
|
|
96
|
+
def supports_tool_calling(self) -> bool:
|
|
97
|
+
return any([self.is_openai, self.is_gemini, self.is_mistral])
|
|
98
|
+
|
|
95
99
|
@property
|
|
96
100
|
def is_openai(self) -> bool:
|
|
97
101
|
r"""Returns whether this type of models is an OpenAI-released model."""
|
camel/utils/commons.py
CHANGED
|
@@ -29,6 +29,7 @@ from typing import (
|
|
|
29
29
|
Mapping,
|
|
30
30
|
Optional,
|
|
31
31
|
Set,
|
|
32
|
+
Type,
|
|
32
33
|
TypeVar,
|
|
33
34
|
cast,
|
|
34
35
|
)
|
|
@@ -329,12 +330,12 @@ def get_pydantic_major_version() -> int:
|
|
|
329
330
|
return 0
|
|
330
331
|
|
|
331
332
|
|
|
332
|
-
def get_pydantic_object_schema(pydantic_params: BaseModel) -> Dict:
|
|
333
|
+
def get_pydantic_object_schema(pydantic_params: Type[BaseModel]) -> Dict:
|
|
333
334
|
r"""Get the JSON schema of a Pydantic model.
|
|
334
335
|
|
|
335
336
|
Args:
|
|
336
|
-
pydantic_params (BaseModel): The Pydantic model to retrieve
|
|
337
|
-
for.
|
|
337
|
+
pydantic_params (Type[BaseModel]): The Pydantic model class to retrieve
|
|
338
|
+
the schema for.
|
|
338
339
|
|
|
339
340
|
Returns:
|
|
340
341
|
dict: The JSON schema of the Pydantic model.
|
|
@@ -354,7 +355,7 @@ def func_string_to_callable(code: str):
|
|
|
354
355
|
"""
|
|
355
356
|
local_vars: Mapping[str, object] = {}
|
|
356
357
|
exec(code, globals(), local_vars)
|
|
357
|
-
func = local_vars.get(Constants.
|
|
358
|
+
func = local_vars.get(Constants.FUNC_NAME_FOR_STRUCTURED_OUTPUT)
|
|
358
359
|
return func
|
|
359
360
|
|
|
360
361
|
|
|
@@ -397,7 +398,7 @@ def json_to_function_code(json_obj: Dict) -> str:
|
|
|
397
398
|
|
|
398
399
|
# function template
|
|
399
400
|
function_code = f'''
|
|
400
|
-
def {Constants.
|
|
401
|
+
def {Constants.FUNC_NAME_FOR_STRUCTURED_OUTPUT}({args_str}):
|
|
401
402
|
r"""Return response with a specified json format.
|
|
402
403
|
Args:
|
|
403
404
|
{docstring_args_str}
|
camel/utils/constants.py
CHANGED
|
@@ -26,7 +26,7 @@ class Constants:
|
|
|
26
26
|
VIDEO_DEFAULT_PLUG_PYAV = "pyav"
|
|
27
27
|
|
|
28
28
|
# Return response with json format
|
|
29
|
-
|
|
29
|
+
FUNC_NAME_FOR_STRUCTURED_OUTPUT = "return_json_response"
|
|
30
30
|
|
|
31
31
|
# Default top k vaule for RAG
|
|
32
32
|
DEFAULT_TOP_K_RESULTS = 1
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.8
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Home-page: https://www.camel-ai.org/
|
|
6
6
|
License: Apache-2.0
|
|
@@ -213,7 +213,7 @@ conda create --name camel python=3.10
|
|
|
213
213
|
conda activate camel
|
|
214
214
|
|
|
215
215
|
# Clone github repo
|
|
216
|
-
git clone -b v0.1.
|
|
216
|
+
git clone -b v0.1.8 https://github.com/camel-ai/camel.git
|
|
217
217
|
|
|
218
218
|
# Change directory into project directory
|
|
219
219
|
cd camel
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
camel/__init__.py,sha256=
|
|
1
|
+
camel/__init__.py,sha256=LIlV3O5R-G_rJo3VJCy1pfsbJTHL2EOHf6G78HpJgqs,778
|
|
2
2
|
camel/agents/__init__.py,sha256=SSU1wbhZXWwQnE0rRxkpyN57kEu72KklsZNcdLkXfTs,1551
|
|
3
3
|
camel/agents/base.py,sha256=X39qWSiT1WnDqaJ9k3gQrTpOQSwUKzNEVpp5AY6fDH8,1130
|
|
4
|
-
camel/agents/chat_agent.py,sha256=
|
|
4
|
+
camel/agents/chat_agent.py,sha256=ikYGo5ahcmubb5OHNcHlehTC0adnGxCMx0iKPnyUYoU,35760
|
|
5
5
|
camel/agents/critic_agent.py,sha256=To-istnO-9Eb0iabdeIDrgfvkxYYfsdX9xIZiSrc3oM,7493
|
|
6
6
|
camel/agents/deductive_reasoner_agent.py,sha256=49vwglWYHgXf-VRftdMN9OFGOwqdsXyTt45PP6z-pbg,13473
|
|
7
7
|
camel/agents/embodied_agent.py,sha256=3ABuiRQXBpplKbuhPY5KNLJyKc6Z8SgXgzIges3ZwVs,7542
|
|
@@ -174,12 +174,12 @@ camel/toolkits/slack_toolkit.py,sha256=JdgDJe7iExTmG7dDXOG6v5KpVjZ6_My_d_WFTYSxk
|
|
|
174
174
|
camel/toolkits/twitter_toolkit.py,sha256=oQw8wRkU7iDxaocsmWvio4pU75pmq6FJAorPdQ2xEAE,19810
|
|
175
175
|
camel/toolkits/weather_toolkit.py,sha256=n4YrUI_jTIH7oqH918IdHbXLgfQ2BPGIWWK8Jp8G1Uw,7054
|
|
176
176
|
camel/types/__init__.py,sha256=ArKXATj3z_Vv4ISmROVeo6Mv3tj5kE1dTkqfgwyxVY4,1975
|
|
177
|
-
camel/types/enums.py,sha256=
|
|
177
|
+
camel/types/enums.py,sha256=Nx35qNmjRTwT_B6uceZ3BtwxthrRqbRRnOk558_vQUk,17458
|
|
178
178
|
camel/types/openai_types.py,sha256=BNQ6iCzKTjSvgcXFsAFIgrUS_YUFZBU6bDoyAp387hI,2045
|
|
179
179
|
camel/utils/__init__.py,sha256=IdI9v0FetNR-nx-Hg4bmNHoYto6Xfcs_uaomksdewmo,2303
|
|
180
180
|
camel/utils/async_func.py,sha256=SLo8KPkrNKdsONvFf3KBb33EgFn4gH2EKSX1aI_LKes,1578
|
|
181
|
-
camel/utils/commons.py,sha256=
|
|
182
|
-
camel/utils/constants.py,sha256=
|
|
181
|
+
camel/utils/commons.py,sha256=y7eng5QF5Hkt5tuNhtEOJycTIq9hXymrUuwIS5nRad4,16481
|
|
182
|
+
camel/utils/constants.py,sha256=8n4F8Y-DZy4z2F0hRvAq6f-d9SbS59kK5FyLrnJ3mkY,1360
|
|
183
183
|
camel/utils/token_counting.py,sha256=G7vBzrxSXm4DzHMOfMXaOYjYf8WJTpxjHjlzmngHlYQ,21004
|
|
184
184
|
camel/workforce/__init__.py,sha256=6jwJWDlESEqcnWCm61WCyjzFUF6KLzXA_fGI86rHfiE,878
|
|
185
185
|
camel/workforce/base.py,sha256=lEHqgOV1tmsy7y4wuuKClcDkoPCRvXVdMrBngsM_6yY,1722
|
|
@@ -191,6 +191,6 @@ camel/workforce/utils.py,sha256=Z-kODz5PMPtfeKKVqpcQq-b-B8oqC7XSwi_F3__Ijhs,3526
|
|
|
191
191
|
camel/workforce/worker_node.py,sha256=wsRqk2rugCvvkcmCzvn-y-gQuyuJGAG8PIr1KtgqJFw,3878
|
|
192
192
|
camel/workforce/workforce.py,sha256=SVJJgSSkYvk05RgL9oaJzHwzziH7u51KLINRuzLB8BI,1773
|
|
193
193
|
camel/workforce/workforce_prompt.py,sha256=cAWYEIA0rau5itEekSoUIFttBzpKM9RzB6x-mfukGSU,4665
|
|
194
|
-
camel_ai-0.1.
|
|
195
|
-
camel_ai-0.1.
|
|
196
|
-
camel_ai-0.1.
|
|
194
|
+
camel_ai-0.1.8.dist-info/METADATA,sha256=ceVYBfVAJD9y8zBVbAMJmFC2omjmZlvftsFxVTHBtHU,24654
|
|
195
|
+
camel_ai-0.1.8.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
196
|
+
camel_ai-0.1.8.dist-info/RECORD,,
|
|
File without changes
|