camel-ai 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (99) hide show
  1. camel/__init__.py +1 -11
  2. camel/agents/__init__.py +5 -5
  3. camel/agents/chat_agent.py +124 -63
  4. camel/agents/critic_agent.py +28 -17
  5. camel/agents/deductive_reasoner_agent.py +235 -0
  6. camel/agents/embodied_agent.py +92 -40
  7. camel/agents/role_assignment_agent.py +27 -17
  8. camel/agents/task_agent.py +60 -34
  9. camel/agents/tool_agents/base.py +0 -1
  10. camel/agents/tool_agents/hugging_face_tool_agent.py +7 -4
  11. camel/configs.py +119 -7
  12. camel/embeddings/__init__.py +2 -0
  13. camel/embeddings/base.py +3 -2
  14. camel/embeddings/openai_embedding.py +3 -3
  15. camel/embeddings/sentence_transformers_embeddings.py +65 -0
  16. camel/functions/__init__.py +13 -3
  17. camel/functions/google_maps_function.py +335 -0
  18. camel/functions/math_functions.py +7 -7
  19. camel/functions/openai_function.py +344 -42
  20. camel/functions/search_functions.py +100 -35
  21. camel/functions/twitter_function.py +484 -0
  22. camel/functions/weather_functions.py +36 -23
  23. camel/generators.py +65 -46
  24. camel/human.py +17 -11
  25. camel/interpreters/__init__.py +25 -0
  26. camel/interpreters/base.py +49 -0
  27. camel/{utils/python_interpreter.py → interpreters/internal_python_interpreter.py} +129 -48
  28. camel/interpreters/interpreter_error.py +19 -0
  29. camel/interpreters/subprocess_interpreter.py +190 -0
  30. camel/loaders/__init__.py +22 -0
  31. camel/{functions/base_io_functions.py → loaders/base_io.py} +38 -35
  32. camel/{functions/unstructured_io_fuctions.py → loaders/unstructured_io.py} +199 -110
  33. camel/memories/__init__.py +17 -7
  34. camel/memories/agent_memories.py +156 -0
  35. camel/memories/base.py +97 -32
  36. camel/memories/blocks/__init__.py +21 -0
  37. camel/memories/{chat_history_memory.py → blocks/chat_history_block.py} +34 -34
  38. camel/memories/blocks/vectordb_block.py +101 -0
  39. camel/memories/context_creators/__init__.py +3 -2
  40. camel/memories/context_creators/score_based.py +32 -20
  41. camel/memories/records.py +6 -5
  42. camel/messages/__init__.py +2 -2
  43. camel/messages/base.py +99 -16
  44. camel/messages/func_message.py +7 -4
  45. camel/models/__init__.py +4 -2
  46. camel/models/anthropic_model.py +132 -0
  47. camel/models/base_model.py +3 -2
  48. camel/models/model_factory.py +10 -8
  49. camel/models/open_source_model.py +25 -13
  50. camel/models/openai_model.py +9 -10
  51. camel/models/stub_model.py +6 -5
  52. camel/prompts/__init__.py +7 -5
  53. camel/prompts/ai_society.py +21 -14
  54. camel/prompts/base.py +54 -47
  55. camel/prompts/code.py +22 -14
  56. camel/prompts/evaluation.py +8 -5
  57. camel/prompts/misalignment.py +26 -19
  58. camel/prompts/object_recognition.py +35 -0
  59. camel/prompts/prompt_templates.py +14 -8
  60. camel/prompts/role_description_prompt_template.py +16 -10
  61. camel/prompts/solution_extraction.py +9 -5
  62. camel/prompts/task_prompt_template.py +24 -21
  63. camel/prompts/translation.py +9 -5
  64. camel/responses/agent_responses.py +5 -2
  65. camel/retrievers/__init__.py +24 -0
  66. camel/retrievers/auto_retriever.py +319 -0
  67. camel/retrievers/base.py +64 -0
  68. camel/retrievers/bm25_retriever.py +149 -0
  69. camel/retrievers/vector_retriever.py +166 -0
  70. camel/societies/__init__.py +1 -1
  71. camel/societies/babyagi_playing.py +56 -32
  72. camel/societies/role_playing.py +188 -133
  73. camel/storages/__init__.py +18 -0
  74. camel/storages/graph_storages/__init__.py +23 -0
  75. camel/storages/graph_storages/base.py +82 -0
  76. camel/storages/graph_storages/graph_element.py +74 -0
  77. camel/storages/graph_storages/neo4j_graph.py +582 -0
  78. camel/storages/key_value_storages/base.py +1 -2
  79. camel/storages/key_value_storages/in_memory.py +1 -2
  80. camel/storages/key_value_storages/json.py +8 -13
  81. camel/storages/vectordb_storages/__init__.py +33 -0
  82. camel/storages/vectordb_storages/base.py +202 -0
  83. camel/storages/vectordb_storages/milvus.py +396 -0
  84. camel/storages/vectordb_storages/qdrant.py +371 -0
  85. camel/terminators/__init__.py +1 -1
  86. camel/terminators/base.py +2 -3
  87. camel/terminators/response_terminator.py +21 -12
  88. camel/terminators/token_limit_terminator.py +5 -3
  89. camel/types/__init__.py +12 -6
  90. camel/types/enums.py +86 -13
  91. camel/types/openai_types.py +10 -5
  92. camel/utils/__init__.py +18 -13
  93. camel/utils/commons.py +242 -81
  94. camel/utils/token_counting.py +135 -15
  95. {camel_ai-0.1.1.dist-info → camel_ai-0.1.3.dist-info}/METADATA +116 -74
  96. camel_ai-0.1.3.dist-info/RECORD +101 -0
  97. {camel_ai-0.1.1.dist-info → camel_ai-0.1.3.dist-info}/WHEEL +1 -1
  98. camel/memories/context_creators/base.py +0 -72
  99. camel_ai-0.1.1.dist-info/RECORD +0 -75
camel/types/enums.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import re
15
- from enum import Enum
15
+ from enum import Enum, EnumMeta
16
16
 
17
17
 
18
18
  class RoleType(Enum):
@@ -24,12 +24,11 @@ class RoleType(Enum):
24
24
 
25
25
 
26
26
  class ModelType(Enum):
27
- GPT_3_5_TURBO = "gpt-3.5-turbo-1106"
28
- GPT_3_5_TURBO_16K = "gpt-3.5-turbo-1106"
27
+ GPT_3_5_TURBO = "gpt-3.5-turbo"
29
28
  GPT_4 = "gpt-4"
30
29
  GPT_4_32K = "gpt-4-32k"
31
- GPT_4_TURBO = "gpt-4-1106-preview"
32
- GPT_4_TURBO_VISION = "gpt-4-vision-preview"
30
+ GPT_4_TURBO = "gpt-4-turbo"
31
+ GPT_4_TURBO_VISION = "gpt-4-turbo"
33
32
 
34
33
  STUB = "stub"
35
34
 
@@ -37,6 +36,17 @@ class ModelType(Enum):
37
36
  VICUNA = "vicuna"
38
37
  VICUNA_16K = "vicuna-16k"
39
38
 
39
+ # Legacy anthropic models
40
+ # NOTE: anthropic lagecy models only Claude 2.1 has system prompt support
41
+ CLAUDE_2_1 = "claude-2.1"
42
+ CLAUDE_2_0 = "claude-2.0"
43
+ CLAUDE_INSTANT_1_2 = "claude-instant-1.2"
44
+
45
+ # 3 models
46
+ CLAUDE_3_OPUS = "claude-3-opus-20240229"
47
+ CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
48
+ CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
49
+
40
50
  @property
41
51
  def value_for_tiktoken(self) -> str:
42
52
  return self.value if self is not ModelType.STUB else "gpt-3.5-turbo"
@@ -46,7 +56,6 @@ class ModelType(Enum):
46
56
  r"""Returns whether this type of models is an OpenAI-released model."""
47
57
  return self in {
48
58
  ModelType.GPT_3_5_TURBO,
49
- ModelType.GPT_3_5_TURBO_16K,
50
59
  ModelType.GPT_4,
51
60
  ModelType.GPT_4_32K,
52
61
  ModelType.GPT_4_TURBO,
@@ -62,6 +71,22 @@ class ModelType(Enum):
62
71
  ModelType.VICUNA_16K,
63
72
  }
64
73
 
74
+ @property
75
+ def is_anthropic(self) -> bool:
76
+ r"""Returns whether this type of models is Anthropic-released model.
77
+
78
+ Returns:
79
+ bool: Whether this type of models is anthropic.
80
+ """
81
+ return self in {
82
+ ModelType.CLAUDE_INSTANT_1_2,
83
+ ModelType.CLAUDE_2_0,
84
+ ModelType.CLAUDE_2_1,
85
+ ModelType.CLAUDE_3_OPUS,
86
+ ModelType.CLAUDE_3_SONNET,
87
+ ModelType.CLAUDE_3_HAIKU,
88
+ }
89
+
65
90
  @property
66
91
  def token_limit(self) -> int:
67
92
  r"""Returns the maximum token limit for a given model.
@@ -70,8 +95,6 @@ class ModelType(Enum):
70
95
  """
71
96
  if self is ModelType.GPT_3_5_TURBO:
72
97
  return 16385
73
- elif self is ModelType.GPT_3_5_TURBO_16K:
74
- return 16385
75
98
  elif self is ModelType.GPT_4:
76
99
  return 8192
77
100
  elif self is ModelType.GPT_4_32K:
@@ -89,6 +112,15 @@ class ModelType(Enum):
89
112
  return 2048
90
113
  elif self is ModelType.VICUNA_16K:
91
114
  return 16384
115
+ if self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}:
116
+ return 100_000
117
+ elif self in {
118
+ ModelType.CLAUDE_2_1,
119
+ ModelType.CLAUDE_3_OPUS,
120
+ ModelType.CLAUDE_3_SONNET,
121
+ ModelType.CLAUDE_3_HAIKU,
122
+ }:
123
+ return 200_000
92
124
  else:
93
125
  raise ValueError("Unknown model type")
94
126
 
@@ -107,8 +139,10 @@ class ModelType(Enum):
107
139
  pattern = r'^vicuna-\d+b-v\d+\.\d+-16k$'
108
140
  return bool(re.match(pattern, model_name))
109
141
  elif self is ModelType.LLAMA_2:
110
- return (self.value in model_name.lower()
111
- or "llama2" in model_name.lower())
142
+ return (
143
+ self.value in model_name.lower()
144
+ or "llama2" in model_name.lower()
145
+ )
112
146
  else:
113
147
  return self.value in model_name.lower()
114
148
 
@@ -155,13 +189,21 @@ class TaskType(Enum):
155
189
  EVALUATION = "evaluation"
156
190
  SOLUTION_EXTRACTION = "solution_extraction"
157
191
  ROLE_DESCRIPTION = "role_description"
192
+ OBJECT_RECOGNITION = "object_recognition"
158
193
  DEFAULT = "default"
159
194
 
160
195
 
161
196
  class VectorDistance(Enum):
162
- DOT = 1
163
- COSINE = 2
164
- EUCLIDEAN = 3
197
+ r"""Distance metrics used in a vector database."""
198
+
199
+ DOT = "dot"
200
+ r"""Dot product. https://en.wikipedia.org/wiki/Dot_product"""
201
+
202
+ COSINE = "cosine"
203
+ r"""Cosine similarity. https://en.wikipedia.org/wiki/Cosine_similarity"""
204
+
205
+ EUCLIDEAN = "euclidean"
206
+ r"""Euclidean distance. https://en.wikipedia.org/wiki/Euclidean_distance"""
165
207
 
166
208
 
167
209
  class OpenAIBackendRole(Enum):
@@ -174,3 +216,34 @@ class OpenAIBackendRole(Enum):
174
216
  class TerminationMode(Enum):
175
217
  ANY = "any"
176
218
  ALL = "all"
219
+
220
+
221
+ class OpenAIImageTypeMeta(EnumMeta):
222
+ def __contains__(cls, image_type: object) -> bool:
223
+ try:
224
+ cls(image_type)
225
+ except ValueError:
226
+ return False
227
+ return True
228
+
229
+
230
+ class OpenAIImageType(Enum, metaclass=OpenAIImageTypeMeta):
231
+ r"""Image types supported by OpenAI vision model."""
232
+
233
+ # https://platform.openai.com/docs/guides/vision
234
+ PNG = "png"
235
+ JPEG = "jpeg"
236
+ JPG = "jpg"
237
+ WEBP = "webp"
238
+ GIF = "gif"
239
+
240
+
241
+ class OpenAIImageDetailType(Enum):
242
+ AUTO = "auto"
243
+ LOW = "low"
244
+ HIGH = "high"
245
+
246
+
247
+ class StorageType(Enum):
248
+ MILVUS = "milvus"
249
+ QDRANT = "qdrant"
@@ -14,17 +14,22 @@
14
14
  # isort: skip_file
15
15
  from openai.types.chat.chat_completion import ChatCompletion, Choice
16
16
  from openai.types.chat.chat_completion_assistant_message_param import (
17
- ChatCompletionAssistantMessageParam, )
17
+ ChatCompletionAssistantMessageParam,
18
+ )
18
19
  from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
19
20
  from openai.types.chat.chat_completion_function_message_param import (
20
- ChatCompletionFunctionMessageParam, )
21
+ ChatCompletionFunctionMessageParam,
22
+ )
21
23
  from openai.types.chat.chat_completion_message import ChatCompletionMessage
22
24
  from openai.types.chat.chat_completion_message_param import (
23
- ChatCompletionMessageParam, )
25
+ ChatCompletionMessageParam,
26
+ )
24
27
  from openai.types.chat.chat_completion_system_message_param import (
25
- ChatCompletionSystemMessageParam, )
28
+ ChatCompletionSystemMessageParam,
29
+ )
26
30
  from openai.types.chat.chat_completion_user_message_param import (
27
- ChatCompletionUserMessageParam, )
31
+ ChatCompletionUserMessageParam,
32
+ )
28
33
  from openai.types.completion_usage import CompletionUsage
29
34
 
30
35
  Choice = Choice
camel/utils/__init__.py CHANGED
@@ -11,37 +11,42 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from .python_interpreter import PythonInterpreter
15
14
  from .commons import (
16
- openai_api_key_required,
17
- print_text_animated,
18
- get_prompt_template_key_words,
19
- get_first_int,
15
+ PYDANTIC_V2,
16
+ api_key_required,
17
+ check_server_running,
20
18
  download_tasks,
21
- parse_doc,
19
+ get_first_int,
20
+ get_prompt_template_key_words,
21
+ get_system_information,
22
22
  get_task_list,
23
- check_server_running,
23
+ print_text_animated,
24
+ role_playing_with_function,
25
+ to_pascal,
24
26
  )
25
27
  from .token_counting import (
26
- get_model_encoding,
28
+ AnthropicTokenCounter,
27
29
  BaseTokenCounter,
28
30
  OpenAITokenCounter,
29
31
  OpenSourceTokenCounter,
32
+ get_model_encoding,
30
33
  )
31
34
 
32
35
  __all__ = [
33
- 'count_tokens_openai_chat_models',
34
- 'openai_api_key_required',
36
+ 'api_key_required',
35
37
  'print_text_animated',
36
38
  'get_prompt_template_key_words',
37
39
  'get_first_int',
38
40
  'download_tasks',
39
- 'PythonInterpreter',
40
- 'parse_doc',
41
41
  'get_task_list',
42
- 'get_model_encoding',
43
42
  'check_server_running',
43
+ 'AnthropicTokenCounter',
44
+ 'get_system_information',
45
+ 'to_pascal',
46
+ 'PYDANTIC_V2',
47
+ 'get_model_encoding',
44
48
  'BaseTokenCounter',
45
49
  'OpenAITokenCounter',
46
50
  'OpenSourceTokenCounter',
51
+ 'role_playing_with_function',
47
52
  ]
camel/utils/commons.py CHANGED
@@ -11,26 +11,17 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- import inspect
15
14
  import os
15
+ import platform
16
16
  import re
17
17
  import socket
18
18
  import time
19
19
  import zipfile
20
20
  from functools import wraps
21
- from typing import (
22
- Any,
23
- Callable,
24
- Dict,
25
- List,
26
- Optional,
27
- Set,
28
- Tuple,
29
- TypeVar,
30
- cast,
31
- )
21
+ from typing import Any, Callable, List, Optional, Set, TypeVar, cast
32
22
  from urllib.parse import urlparse
33
23
 
24
+ import pydantic
34
25
  import requests
35
26
 
36
27
  from camel.types import TaskType
@@ -38,7 +29,33 @@ from camel.types import TaskType
38
29
  F = TypeVar('F', bound=Callable[..., Any])
39
30
 
40
31
 
41
- def openai_api_key_required(func: F) -> F:
32
+ # Set lazy import
33
+ def get_lazy_imported_functions_module():
34
+ from camel.functions import (
35
+ MAP_FUNCS,
36
+ MATH_FUNCS,
37
+ SEARCH_FUNCS,
38
+ TWITTER_FUNCS,
39
+ WEATHER_FUNCS,
40
+ )
41
+
42
+ return [
43
+ *MATH_FUNCS,
44
+ *SEARCH_FUNCS,
45
+ *WEATHER_FUNCS,
46
+ *MAP_FUNCS,
47
+ *TWITTER_FUNCS,
48
+ ]
49
+
50
+
51
+ # Set lazy import
52
+ def get_lazy_imported_types_module():
53
+ from camel.types import ModelType
54
+
55
+ return ModelType.GPT_4_TURBO
56
+
57
+
58
+ def api_key_required(func: F) -> F:
42
59
  r"""Decorator that checks if the OpenAI API key is available in the
43
60
  environment variables.
44
61
 
@@ -55,10 +72,16 @@ def openai_api_key_required(func: F) -> F:
55
72
 
56
73
  @wraps(func)
57
74
  def wrapper(self, *args, **kwargs):
58
- if 'OPENAI_API_KEY' in os.environ:
75
+ if self.model_type.is_openai:
76
+ if 'OPENAI_API_KEY' not in os.environ:
77
+ raise ValueError('OpenAI API key not found.')
78
+ return func(self, *args, **kwargs)
79
+ elif self.model_type.is_anthropic:
80
+ if 'ANTHROPIC_API_KEY' not in os.environ:
81
+ raise ValueError('Anthropic API key not found.')
59
82
  return func(self, *args, **kwargs)
60
83
  else:
61
- raise ValueError('OpenAI API key not found.')
84
+ raise ValueError('Unsupported model type.')
62
85
 
63
86
  return cast(F, wrapper)
64
87
 
@@ -116,12 +139,26 @@ def get_first_int(string: str) -> Optional[int]:
116
139
 
117
140
 
118
141
  def download_tasks(task: TaskType, folder_path: str) -> None:
142
+ r"""Downloads task-related files from a specified URL and extracts them.
143
+
144
+ This function downloads a zip file containing tasks based on the specified
145
+ `task` type from a predefined URL, saves it to `folder_path`, and then
146
+ extracts the contents of the zip file into the same folder. After
147
+ extraction, the zip file is deleted.
148
+
149
+ Args:
150
+ task (TaskType): An enum representing the type of task to download.
151
+ folder_path (str): The path of the folder where the zip file will be
152
+ downloaded and extracted.
153
+ """
119
154
  # Define the path to save the zip file
120
155
  zip_file_path = os.path.join(folder_path, "tasks.zip")
121
156
 
122
157
  # Download the zip file from the Google Drive link
123
- response = requests.get("https://huggingface.co/datasets/camel-ai/"
124
- f"metadata/resolve/main/{task.value}_tasks.zip")
158
+ response = requests.get(
159
+ "https://huggingface.co/datasets/camel-ai/"
160
+ f"metadata/resolve/main/{task.value}_tasks.zip"
161
+ )
125
162
 
126
163
  # Save the zip file
127
164
  with open(zip_file_path, "wb") as f:
@@ -134,70 +171,6 @@ def download_tasks(task: TaskType, folder_path: str) -> None:
134
171
  os.remove(zip_file_path)
135
172
 
136
173
 
137
- def parse_doc(func: Callable) -> Dict[str, Any]:
138
- r"""Parse the docstrings of a function to extract the function name,
139
- description and parameters.
140
-
141
- Args:
142
- func (Callable): The function to be parsed.
143
- Returns:
144
- Dict[str, Any]: A dictionary with the function's name,
145
- description, and parameters.
146
- """
147
-
148
- doc = inspect.getdoc(func)
149
- if not doc:
150
- raise ValueError(
151
- f"Invalid function {func.__name__}: no docstring provided.")
152
-
153
- properties = {}
154
- required = []
155
-
156
- parts = re.split(r'\n\s*\n', doc)
157
- func_desc = parts[0].strip()
158
-
159
- args_section = next((p for p in parts if 'Args:' in p), None)
160
- if args_section:
161
- args_descs: List[Tuple[str, str, str, ]] = re.findall(
162
- r'(\w+)\s*\((\w+)\):\s*(.*)', args_section)
163
- properties = {
164
- name.strip(): {
165
- 'type': type,
166
- 'description': desc
167
- }
168
- for name, type, desc in args_descs
169
- }
170
- for name in properties:
171
- required.append(name)
172
-
173
- # Parameters from the function signature
174
- sign_params = list(inspect.signature(func).parameters.keys())
175
- if len(sign_params) != len(required):
176
- raise ValueError(
177
- f"Number of parameters in function signature ({len(sign_params)})"
178
- f" does not match that in docstring ({len(required)}).")
179
-
180
- for param in sign_params:
181
- if param not in required:
182
- raise ValueError(f"Parameter '{param}' in function signature"
183
- " is missing in the docstring.")
184
-
185
- parameters = {
186
- "type": "object",
187
- "properties": properties,
188
- "required": required,
189
- }
190
-
191
- # Construct the function dictionary
192
- function_dict = {
193
- "name": func.__name__,
194
- "description": func_desc,
195
- "parameters": parameters,
196
- }
197
-
198
- return function_dict
199
-
200
-
201
174
  def get_task_list(task_response: str) -> List[str]:
202
175
  r"""Parse the response of the Agent and return task list.
203
176
 
@@ -241,3 +214,191 @@ def check_server_running(server_url: str) -> bool:
241
214
 
242
215
  # if the port is open, the result should be 0.
243
216
  return result == 0
217
+
218
+
219
+ def get_system_information():
220
+ r"""Gathers information about the operating system.
221
+
222
+ Returns:
223
+ dict: A dictionary containing various pieces of OS information.
224
+ """
225
+ sys_info = {
226
+ "OS Name": os.name,
227
+ "System": platform.system(),
228
+ "Release": platform.release(),
229
+ "Version": platform.version(),
230
+ "Machine": platform.machine(),
231
+ "Processor": platform.processor(),
232
+ "Platform": platform.platform(),
233
+ }
234
+
235
+ return sys_info
236
+
237
+
238
+ def to_pascal(snake: str) -> str:
239
+ """Convert a snake_case string to PascalCase.
240
+
241
+ Args:
242
+ snake (str): The snake_case string to be converted.
243
+
244
+ Returns:
245
+ str: The converted PascalCase string.
246
+ """
247
+ # Check if the string is already in PascalCase
248
+ if re.match(r'^[A-Z][a-zA-Z0-9]*([A-Z][a-zA-Z0-9]*)*$', snake):
249
+ return snake
250
+ # Remove leading and trailing underscores
251
+ snake = snake.strip('_')
252
+ # Replace multiple underscores with a single one
253
+ snake = re.sub('_+', '_', snake)
254
+ # Convert to PascalCase
255
+ return re.sub(
256
+ '_([0-9A-Za-z])',
257
+ lambda m: m.group(1).upper(),
258
+ snake.title(),
259
+ )
260
+
261
+
262
+ PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
263
+
264
+
265
+ def role_playing_with_function(
266
+ task_prompt: str = (
267
+ "Assume now is 2024 in the Gregorian calendar, "
268
+ "estimate the current age of University of Oxford "
269
+ "and then add 10 more years to this age, "
270
+ "and get the current weather of the city where "
271
+ "the University is located. And tell me what time "
272
+ "zone University of Oxford is in. And use my twitter "
273
+ "account infomation to create a tweet. "
274
+ ),
275
+ function_list: Optional[List] = None,
276
+ model_type=None,
277
+ chat_turn_limit=10,
278
+ assistant_role_name: str = "Searcher",
279
+ user_role_name: str = "Professor",
280
+ ) -> None:
281
+ r"""Initializes and conducts a `RolePlaying` with `FunctionCallingConfig`
282
+ session. The function creates an interactive and dynamic role-play session
283
+ where the AI Assistant and User engage based on the given task, roles, and
284
+ available functions. It demonstrates the versatility of AI in handling
285
+ diverse tasks and user interactions within a structured `RolePlaying`
286
+ framework.
287
+
288
+ Args:
289
+ task_prompt (str): The initial task or scenario description to start
290
+ the `RolePlaying` session. Defaults to a prompt involving the
291
+ estimation of KAUST's age and weather information.
292
+ function_list (list): A list of functions that the agent can utilize
293
+ during the session. Defaults to a combination of math, search, and
294
+ weather functions.
295
+ model_type (ModelType): The type of chatbot model used for both the
296
+ assistant and the user. Defaults to `GPT-4 Turbo`.
297
+ chat_turn_limit (int): The maximum number of turns (exchanges) in the
298
+ chat session. Defaults to 10.
299
+ assistant_role_name (str): The role name assigned to the AI Assistant.
300
+ Defaults to 'Searcher'.
301
+ user_role_name (str): The role name assigned to the User. Defaults to
302
+ 'Professor'.
303
+
304
+ Returns:
305
+ None: This function does not return any value but prints out the
306
+ session's dialogues and outputs.
307
+ """
308
+
309
+ # Run lazy import
310
+ if function_list is None:
311
+ function_list = get_lazy_imported_functions_module()
312
+ if model_type is None:
313
+ model_type = get_lazy_imported_types_module()
314
+
315
+ from colorama import Fore
316
+
317
+ from camel.agents.chat_agent import FunctionCallingRecord
318
+ from camel.configs import ChatGPTConfig, FunctionCallingConfig
319
+ from camel.societies import RolePlaying
320
+
321
+ task_prompt = task_prompt
322
+ user_model_config = ChatGPTConfig(temperature=0.0)
323
+
324
+ function_list = function_list
325
+ assistant_model_config = FunctionCallingConfig.from_openai_function_list(
326
+ function_list=function_list,
327
+ kwargs=dict(temperature=0.0),
328
+ )
329
+
330
+ role_play_session = RolePlaying(
331
+ assistant_role_name=assistant_role_name,
332
+ user_role_name=user_role_name,
333
+ assistant_agent_kwargs=dict(
334
+ model_type=model_type,
335
+ model_config=assistant_model_config,
336
+ function_list=function_list,
337
+ ),
338
+ user_agent_kwargs=dict(
339
+ model_type=model_type,
340
+ model_config=user_model_config,
341
+ ),
342
+ task_prompt=task_prompt,
343
+ with_task_specify=False,
344
+ )
345
+
346
+ print(
347
+ Fore.GREEN
348
+ + f"AI Assistant sys message:\n{role_play_session.assistant_sys_msg}\n"
349
+ )
350
+ print(
351
+ Fore.BLUE + f"AI User sys message:\n{role_play_session.user_sys_msg}\n"
352
+ )
353
+
354
+ print(Fore.YELLOW + f"Original task prompt:\n{task_prompt}\n")
355
+ print(
356
+ Fore.CYAN
357
+ + f"Specified task prompt:\n{role_play_session.specified_task_prompt}\n"
358
+ )
359
+ print(Fore.RED + f"Final task prompt:\n{role_play_session.task_prompt}\n")
360
+
361
+ n = 0
362
+ input_msg = role_play_session.init_chat()
363
+ while n < chat_turn_limit:
364
+ n += 1
365
+ assistant_response, user_response = role_play_session.step(input_msg)
366
+
367
+ if assistant_response.terminated:
368
+ print(
369
+ Fore.GREEN
370
+ + (
371
+ "AI Assistant terminated. Reason: "
372
+ f"{assistant_response.info['termination_reasons']}."
373
+ )
374
+ )
375
+ break
376
+ if user_response.terminated:
377
+ print(
378
+ Fore.GREEN
379
+ + (
380
+ "AI User terminated. "
381
+ f"Reason: {user_response.info['termination_reasons']}."
382
+ )
383
+ )
384
+ break
385
+
386
+ # Print output from the user
387
+ print_text_animated(
388
+ Fore.BLUE + f"AI User:\n\n{user_response.msg.content}\n"
389
+ )
390
+
391
+ # Print output from the assistant, including any function
392
+ # execution information
393
+ print_text_animated(Fore.GREEN + "AI Assistant:")
394
+ called_functions: List[FunctionCallingRecord] = assistant_response.info[
395
+ 'called_functions'
396
+ ]
397
+ for func_record in called_functions:
398
+ print_text_animated(f"{func_record}")
399
+ print_text_animated(f"{assistant_response.msg.content}\n")
400
+
401
+ if "CAMEL_TASK_DONE" in user_response.msg.content:
402
+ break
403
+
404
+ input_msg = assistant_response.msg