camel-ai 0.1.3__tar.gz → 0.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (121) hide show
  1. {camel_ai-0.1.3 → camel_ai-0.1.4}/PKG-INFO +9 -3
  2. {camel_ai-0.1.3 → camel_ai-0.1.4}/README.md +1 -1
  3. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/__init__.py +1 -1
  4. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/agents/__init__.py +2 -0
  5. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/agents/chat_agent.py +40 -53
  6. camel_ai-0.1.4/camel/agents/knowledge_graph_agent.py +221 -0
  7. camel_ai-0.1.4/camel/configs/__init__.py +29 -0
  8. camel_ai-0.1.4/camel/configs/anthropic_config.py +73 -0
  9. camel_ai-0.1.4/camel/configs/base_config.py +22 -0
  10. camel_ai-0.1.4/camel/configs/openai_config.py +132 -0
  11. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/embeddings/openai_embedding.py +7 -2
  12. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/functions/__init__.py +13 -8
  13. camel_ai-0.1.4/camel/functions/open_api_function.py +380 -0
  14. camel_ai-0.1.4/camel/functions/open_api_specs/coursera/__init__.py +13 -0
  15. camel_ai-0.1.4/camel/functions/open_api_specs/coursera/openapi.yaml +82 -0
  16. camel_ai-0.1.4/camel/functions/open_api_specs/klarna/__init__.py +13 -0
  17. camel_ai-0.1.4/camel/functions/open_api_specs/klarna/openapi.yaml +87 -0
  18. camel_ai-0.1.4/camel/functions/open_api_specs/speak/__init__.py +13 -0
  19. camel_ai-0.1.4/camel/functions/open_api_specs/speak/openapi.yaml +151 -0
  20. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/functions/openai_function.py +3 -1
  21. camel_ai-0.1.4/camel/functions/retrieval_functions.py +61 -0
  22. camel_ai-0.1.4/camel/functions/slack_functions.py +275 -0
  23. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/models/__init__.py +2 -0
  24. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/models/anthropic_model.py +16 -2
  25. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/models/base_model.py +8 -2
  26. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/models/model_factory.py +7 -3
  27. camel_ai-0.1.4/camel/models/openai_audio_models.py +251 -0
  28. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/models/openai_model.py +12 -4
  29. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/models/stub_model.py +5 -1
  30. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/retrievers/__init__.py +2 -0
  31. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/retrievers/auto_retriever.py +47 -36
  32. camel_ai-0.1.4/camel/retrievers/base.py +69 -0
  33. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/retrievers/bm25_retriever.py +10 -19
  34. camel_ai-0.1.4/camel/retrievers/cohere_rerank_retriever.py +108 -0
  35. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/retrievers/vector_retriever.py +43 -26
  36. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/vectordb_storages/qdrant.py +3 -1
  37. camel_ai-0.1.4/camel/toolkits/__init__.py +21 -0
  38. camel_ai-0.1.4/camel/toolkits/base.py +22 -0
  39. camel_ai-0.1.4/camel/toolkits/github_toolkit.py +245 -0
  40. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/types/__init__.py +6 -0
  41. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/types/enums.py +44 -3
  42. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/utils/__init__.py +4 -2
  43. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/utils/commons.py +97 -173
  44. {camel_ai-0.1.3 → camel_ai-0.1.4}/pyproject.toml +27 -2
  45. camel_ai-0.1.3/camel/configs.py +0 -271
  46. camel_ai-0.1.3/camel/retrievers/base.py +0 -64
  47. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/agents/base.py +0 -0
  48. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/agents/critic_agent.py +0 -0
  49. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/agents/deductive_reasoner_agent.py +0 -0
  50. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/agents/embodied_agent.py +0 -0
  51. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/agents/role_assignment_agent.py +0 -0
  52. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/agents/task_agent.py +0 -0
  53. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/agents/tool_agents/__init__.py +0 -0
  54. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/agents/tool_agents/base.py +0 -0
  55. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
  56. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/embeddings/__init__.py +0 -0
  57. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/embeddings/base.py +0 -0
  58. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
  59. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/functions/google_maps_function.py +0 -0
  60. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/functions/math_functions.py +0 -0
  61. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/functions/search_functions.py +0 -0
  62. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/functions/twitter_function.py +0 -0
  63. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/functions/weather_functions.py +0 -0
  64. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/generators.py +0 -0
  65. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/human.py +0 -0
  66. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/interpreters/__init__.py +0 -0
  67. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/interpreters/base.py +0 -0
  68. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/interpreters/internal_python_interpreter.py +0 -0
  69. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/interpreters/interpreter_error.py +0 -0
  70. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/interpreters/subprocess_interpreter.py +0 -0
  71. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/loaders/__init__.py +0 -0
  72. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/loaders/base_io.py +0 -0
  73. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/loaders/unstructured_io.py +0 -0
  74. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/memories/__init__.py +0 -0
  75. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/memories/agent_memories.py +0 -0
  76. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/memories/base.py +0 -0
  77. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/memories/blocks/__init__.py +0 -0
  78. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/memories/blocks/chat_history_block.py +0 -0
  79. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/memories/blocks/vectordb_block.py +0 -0
  80. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/memories/context_creators/__init__.py +0 -0
  81. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/memories/context_creators/score_based.py +0 -0
  82. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/memories/records.py +0 -0
  83. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/messages/__init__.py +0 -0
  84. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/messages/base.py +0 -0
  85. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/messages/func_message.py +0 -0
  86. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/models/open_source_model.py +0 -0
  87. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/prompts/__init__.py +0 -0
  88. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/prompts/ai_society.py +0 -0
  89. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/prompts/base.py +0 -0
  90. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/prompts/code.py +0 -0
  91. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/prompts/evaluation.py +0 -0
  92. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/prompts/misalignment.py +0 -0
  93. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/prompts/object_recognition.py +0 -0
  94. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/prompts/prompt_templates.py +0 -0
  95. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/prompts/role_description_prompt_template.py +0 -0
  96. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/prompts/solution_extraction.py +0 -0
  97. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/prompts/task_prompt_template.py +0 -0
  98. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/prompts/translation.py +0 -0
  99. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/responses/__init__.py +0 -0
  100. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/responses/agent_responses.py +0 -0
  101. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/societies/__init__.py +0 -0
  102. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/societies/babyagi_playing.py +0 -0
  103. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/societies/role_playing.py +0 -0
  104. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/__init__.py +0 -0
  105. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/graph_storages/__init__.py +0 -0
  106. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/graph_storages/base.py +0 -0
  107. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/graph_storages/graph_element.py +0 -0
  108. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/graph_storages/neo4j_graph.py +0 -0
  109. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/key_value_storages/__init__.py +0 -0
  110. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/key_value_storages/base.py +0 -0
  111. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/key_value_storages/in_memory.py +0 -0
  112. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/key_value_storages/json.py +0 -0
  113. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/vectordb_storages/__init__.py +0 -0
  114. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/vectordb_storages/base.py +0 -0
  115. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/storages/vectordb_storages/milvus.py +0 -0
  116. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/terminators/__init__.py +0 -0
  117. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/terminators/base.py +0 -0
  118. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/terminators/response_terminator.py +0 -0
  119. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/terminators/token_limit_terminator.py +0 -0
  120. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/types/openai_types.py +0 -0
  121. {camel_ai-0.1.3 → camel_ai-0.1.4}/camel/utils/token_counting.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
@@ -24,6 +24,7 @@ Requires-Dist: PyMuPDF (>=1.22.5,<2.0.0) ; extra == "tools" or extra == "all"
24
24
  Requires-Dist: accelerate (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
25
25
  Requires-Dist: anthropic (>=0.21.3,<0.22.0)
26
26
  Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
27
+ Requires-Dist: cohere (>=4.56,<5.0) ; extra == "retrievers" or extra == "all"
27
28
  Requires-Dist: colorama (>=0,<1)
28
29
  Requires-Dist: datasets (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
29
30
  Requires-Dist: diffusers (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
@@ -35,10 +36,14 @@ Requires-Dist: mock (>=5,<6) ; extra == "test"
35
36
  Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "all"
36
37
  Requires-Dist: numpy (>=1,<2)
37
38
  Requires-Dist: openai (>=1.2.3,<2.0.0)
39
+ Requires-Dist: openapi-spec-validator (>=0.7.1,<0.8.0) ; extra == "tools" or extra == "all"
38
40
  Requires-Dist: opencv-python (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
39
41
  Requires-Dist: pathlib (>=1.0.1,<2.0.0)
42
+ Requires-Dist: prance (>=23.6.21.0,<24.0.0.0) ; extra == "tools" or extra == "all"
40
43
  Requires-Dist: protobuf (>=4,<5)
41
44
  Requires-Dist: pydantic (>=1.9,<3)
45
+ Requires-Dist: pydub (>=0.25.1,<0.26.0) ; extra == "tools" or extra == "all"
46
+ Requires-Dist: pygithub (>=2.3.0,<3.0.0) ; extra == "tools" or extra == "all"
42
47
  Requires-Dist: pymilvus (>=2.4.0,<3.0.0) ; extra == "vector-databases" or extra == "all"
43
48
  Requires-Dist: pyowm (>=3.3.0,<4.0.0) ; extra == "tools" or extra == "all"
44
49
  Requires-Dist: pytest (>=7,<8) ; extra == "test"
@@ -47,8 +52,9 @@ Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "retrievers" or extra == "a
47
52
  Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra == "all"
48
53
  Requires-Dist: sentence-transformers (>=2.2.2,<3.0.0) ; extra == "encoders" or extra == "all"
49
54
  Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
55
+ Requires-Dist: slack-sdk (>=3.27.2,<4.0.0) ; extra == "tools" or extra == "all"
50
56
  Requires-Dist: soundfile (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
51
- Requires-Dist: tiktoken (>=0,<1)
57
+ Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
52
58
  Requires-Dist: torch (>=1,<2) ; extra == "huggingface-agent" or extra == "all"
53
59
  Requires-Dist: transformers (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
54
60
  Requires-Dist: unstructured[all-docs] (>=0.10.30,<0.11.0) ; extra == "tools" or extra == "all"
@@ -164,7 +170,7 @@ conda create --name camel python=3.10
164
170
  conda activate camel
165
171
 
166
172
  # Clone github repo
167
- git clone -b v0.1.3 https://github.com/camel-ai/camel.git
173
+ git clone -b v0.1.4 https://github.com/camel-ai/camel.git
168
174
 
169
175
  # Change directory into project directory
170
176
  cd camel
@@ -104,7 +104,7 @@ conda create --name camel python=3.10
104
104
  conda activate camel
105
105
 
106
106
  # Clone github repo
107
- git clone -b v0.1.3 https://github.com/camel-ai/camel.git
107
+ git clone -b v0.1.4 https://github.com/camel-ai/camel.git
108
108
 
109
109
  # Change directory into project directory
110
110
  cd camel
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.3'
15
+ __version__ = '0.1.4'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -15,6 +15,7 @@ from .base import BaseAgent
15
15
  from .chat_agent import ChatAgent
16
16
  from .critic_agent import CriticAgent
17
17
  from .embodied_agent import EmbodiedAgent
18
+ from .knowledge_graph_agent import KnowledgeGraphAgent
18
19
  from .role_assignment_agent import RoleAssignmentAgent
19
20
  from .task_agent import (
20
21
  TaskCreationAgent,
@@ -37,4 +38,5 @@ __all__ = [
37
38
  'HuggingFaceToolAgent',
38
39
  'EmbodiedAgent',
39
40
  'RoleAssignmentAgent',
41
+ 'KnowledgeGraphAgent',
40
42
  ]
@@ -19,7 +19,7 @@ from dataclasses import dataclass
19
19
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
20
20
 
21
21
  from camel.agents.base import BaseAgent
22
- from camel.configs import ChatGPTConfig, ChatGPTVisionConfig
22
+ from camel.configs import ChatGPTConfig
23
23
  from camel.memories import (
24
24
  AgentMemory,
25
25
  ChatHistoryMemory,
@@ -84,6 +84,9 @@ class ChatAgent(BaseAgent):
84
84
  responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
85
85
  model_config (BaseConfig, optional): Configuration options for the
86
86
  LLM model. (default: :obj:`None`)
87
+ api_key (str, optional): The API key for authenticating with the
88
+ LLM service. Only OpenAI and Anthropic model supported (default:
89
+ :obj:`None`)
87
90
  memory (AgentMemory, optional): The agent memory for managing chat
88
91
  messages. If `None`, a :obj:`ChatHistoryMemory` will be used.
89
92
  (default: :obj:`None`)
@@ -96,7 +99,7 @@ class ChatAgent(BaseAgent):
96
99
  (default: :obj:`None`)
97
100
  output_language (str, optional): The language to be output by the
98
101
  agent. (default: :obj:`None`)
99
- function_list (List[OpenAIFunction], optional): List of available
102
+ tools (List[OpenAIFunction], optional): List of available
100
103
  :obj:`OpenAIFunction`. (default: :obj:`None`)
101
104
  response_terminators (List[ResponseTerminator], optional): List of
102
105
  :obj:`ResponseTerminator` bind to one chat agent.
@@ -108,11 +111,12 @@ class ChatAgent(BaseAgent):
108
111
  system_message: BaseMessage,
109
112
  model_type: Optional[ModelType] = None,
110
113
  model_config: Optional[BaseConfig] = None,
114
+ api_key: Optional[str] = None,
111
115
  memory: Optional[AgentMemory] = None,
112
116
  message_window_size: Optional[int] = None,
113
117
  token_limit: Optional[int] = None,
114
118
  output_language: Optional[str] = None,
115
- function_list: Optional[List[OpenAIFunction]] = None,
119
+ tools: Optional[List[OpenAIFunction]] = None,
116
120
  response_terminators: Optional[List[ResponseTerminator]] = None,
117
121
  ) -> None:
118
122
  self.orig_sys_message: BaseMessage = system_message
@@ -128,34 +132,14 @@ class ChatAgent(BaseAgent):
128
132
  )
129
133
 
130
134
  self.func_dict: Dict[str, Callable] = {}
131
- if function_list is not None:
132
- for func in function_list:
135
+ if tools is not None:
136
+ for func in tools:
133
137
  self.func_dict[func.get_function_name()] = func.func
134
138
 
135
- self.model_config: BaseConfig
136
- if self.model_type == ModelType.GPT_4_TURBO_VISION:
137
- if model_config is not None and not isinstance(
138
- model_config, ChatGPTVisionConfig
139
- ):
140
- raise ValueError(
141
- "Please use `ChatGPTVisionConfig` as "
142
- "the `model_config` when `model_type` "
143
- "is `GPT_4_TURBO_VISION`"
144
- )
145
- self.model_config = model_config or ChatGPTVisionConfig()
146
- else:
147
- if model_config is not None and isinstance(
148
- model_config, ChatGPTVisionConfig
149
- ):
150
- raise ValueError(
151
- "Please don't use `ChatGPTVisionConfig` as "
152
- "the `model_config` when `model_type` "
153
- "is not `GPT_4_TURBO_VISION`"
154
- )
155
- self.model_config = model_config or ChatGPTConfig()
156
-
139
+ self.model_config = model_config or ChatGPTConfig()
140
+ self._api_key = api_key
157
141
  self.model_backend: BaseModelBackend = ModelFactory.create(
158
- self.model_type, self.model_config.__dict__
142
+ self.model_type, self.model_config.__dict__, self._api_key
159
143
  )
160
144
  self.model_token_limit = token_limit or self.model_backend.token_limit
161
145
  context_creator = ScoreBasedContextCreator(
@@ -201,12 +185,12 @@ class ChatAgent(BaseAgent):
201
185
  """
202
186
  self._system_message = message
203
187
 
204
- def is_function_calling_enabled(self) -> bool:
188
+ def is_tools_added(self) -> bool:
205
189
  r"""Whether OpenAI function calling is enabled for this agent.
206
190
 
207
191
  Returns:
208
192
  bool: Whether OpenAI function calling is enabled for this
209
- agent, determined by whether the dictionary of functions
193
+ agent, determined by whether the dictionary of tools
210
194
  is empty.
211
195
  """
212
196
  return len(self.func_dict) > 0
@@ -249,7 +233,7 @@ class ChatAgent(BaseAgent):
249
233
  usage: Optional[Dict[str, int]],
250
234
  termination_reasons: List[str],
251
235
  num_tokens: int,
252
- called_funcs: List[FunctionCallingRecord],
236
+ tool_calls: List[FunctionCallingRecord],
253
237
  ) -> Dict[str, Any]:
254
238
  r"""Returns a dictionary containing information about the chat session.
255
239
 
@@ -260,9 +244,9 @@ class ChatAgent(BaseAgent):
260
244
  termination_reasons (List[str]): The reasons for the termination
261
245
  of the chat session.
262
246
  num_tokens (int): The number of tokens used in the chat session.
263
- called_funcs (List[FunctionCallingRecord]): The list of function
247
+ tool_calls (List[FunctionCallingRecord]): The list of function
264
248
  calling records, containing the information of called
265
- functions.
249
+ tools.
266
250
 
267
251
  Returns:
268
252
  Dict[str, Any]: The chat session information.
@@ -272,7 +256,7 @@ class ChatAgent(BaseAgent):
272
256
  "usage": usage,
273
257
  "termination_reasons": termination_reasons,
274
258
  "num_tokens": num_tokens,
275
- "called_functions": called_funcs,
259
+ "tool_calls": tool_calls,
276
260
  }
277
261
 
278
262
  def init_messages(self) -> None:
@@ -305,9 +289,10 @@ class ChatAgent(BaseAgent):
305
289
 
306
290
  Args:
307
291
  input_message (BaseMessage): The input message to the agent.
308
- Its `role` field that specifies the role at backend may be either
309
- `user` or `assistant` but it will be set to `user` anyway since
310
- for the self agent any incoming message is external.
292
+ Its `role` field that specifies the role at backend may be
293
+ either `user` or `assistant` but it will be set to `user`
294
+ anyway since for the self agent any incoming message is
295
+ external.
311
296
 
312
297
  Returns:
313
298
  ChatAgentResponse: A struct containing the output messages,
@@ -318,7 +303,7 @@ class ChatAgent(BaseAgent):
318
303
 
319
304
  output_messages: List[BaseMessage]
320
305
  info: Dict[str, Any]
321
- called_funcs: List[FunctionCallingRecord] = []
306
+ tool_calls: List[FunctionCallingRecord] = []
322
307
  while True:
323
308
  # Format messages and get the token number
324
309
  openai_messages: Optional[List[OpenAIMessage]]
@@ -327,7 +312,7 @@ class ChatAgent(BaseAgent):
327
312
  openai_messages, num_tokens = self.memory.get_context()
328
313
  except RuntimeError as e:
329
314
  return self.step_token_exceed(
330
- e.args[1], called_funcs, "max_tokens_exceeded"
315
+ e.args[1], tool_calls, "max_tokens_exceeded"
331
316
  )
332
317
 
333
318
  # Obtain the model's response
@@ -343,13 +328,15 @@ class ChatAgent(BaseAgent):
343
328
  )
344
329
 
345
330
  if (
346
- self.is_function_calling_enabled()
347
- and finish_reasons[0] == 'function_call'
331
+ self.is_tools_added()
348
332
  and isinstance(response, ChatCompletion)
333
+ and response.choices[0].message.tool_calls is not None
349
334
  ):
335
+ # Tools added for function calling and not in stream mode
336
+
350
337
  # Do function calling
351
338
  func_assistant_msg, func_result_msg, func_record = (
352
- self.step_function_call(response)
339
+ self.step_tool_call(response)
353
340
  )
354
341
 
355
342
  # Update the messages
@@ -359,7 +346,8 @@ class ChatAgent(BaseAgent):
359
346
  self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
360
347
 
361
348
  # Record the function calling
362
- called_funcs.append(func_record)
349
+ tool_calls.append(func_record)
350
+
363
351
  else:
364
352
  # Function calling disabled or not a function calling
365
353
 
@@ -388,7 +376,7 @@ class ChatAgent(BaseAgent):
388
376
  usage_dict,
389
377
  finish_reasons,
390
378
  num_tokens,
391
- called_funcs,
379
+ tool_calls,
392
380
  )
393
381
  break
394
382
 
@@ -475,7 +463,7 @@ class ChatAgent(BaseAgent):
475
463
  def step_token_exceed(
476
464
  self,
477
465
  num_tokens: int,
478
- called_funcs: List[FunctionCallingRecord],
466
+ tool_calls: List[FunctionCallingRecord],
479
467
  termination_reason: str,
480
468
  ) -> ChatAgentResponse:
481
469
  r"""Return trivial response containing number of tokens and information
@@ -483,7 +471,7 @@ class ChatAgent(BaseAgent):
483
471
 
484
472
  Args:
485
473
  num_tokens (int): Number of tokens in the messages.
486
- called_funcs (List[FunctionCallingRecord]): List of information
474
+ tool_calls (List[FunctionCallingRecord]): List of information
487
475
  objects of functions called in the current step.
488
476
  termination_reason (str): String of termination reason.
489
477
 
@@ -499,7 +487,7 @@ class ChatAgent(BaseAgent):
499
487
  None,
500
488
  [termination_reason],
501
489
  num_tokens,
502
- called_funcs,
490
+ tool_calls,
503
491
  )
504
492
 
505
493
  return ChatAgentResponse(
@@ -508,7 +496,7 @@ class ChatAgent(BaseAgent):
508
496
  info,
509
497
  )
510
498
 
511
- def step_function_call(
499
+ def step_tool_call(
512
500
  self,
513
501
  response: ChatCompletion,
514
502
  ) -> Tuple[
@@ -526,14 +514,13 @@ class ChatAgent(BaseAgent):
526
514
  result, and a struct for logging information about this
527
515
  function call.
528
516
  """
529
- # Note that when function calling is enabled, `n` is set to 1.
530
517
  choice = response.choices[0]
531
- if choice.message.function_call is None:
532
- raise RuntimeError("Function call is None")
533
- func_name = choice.message.function_call.name
518
+ if choice.message.tool_calls is None:
519
+ raise RuntimeError("Tool calls is None")
520
+ func_name = choice.message.tool_calls[0].function.name
534
521
  func = self.func_dict[func_name]
535
522
 
536
- args_str: str = choice.message.function_call.arguments
523
+ args_str: str = choice.message.tool_calls[0].function.arguments
537
524
  args = json.loads(args_str.replace("'", "\""))
538
525
 
539
526
  # Pass the extracted arguments to the indicated function
@@ -0,0 +1,221 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import Any, Optional, Union
15
+
16
+ from unstructured.documents.elements import Element
17
+
18
+ from camel.agents import ChatAgent
19
+ from camel.messages import BaseMessage
20
+ from camel.prompts import TextPrompt
21
+ from camel.storages.graph_storages.graph_element import (
22
+ GraphElement,
23
+ Node,
24
+ Relationship,
25
+ )
26
+ from camel.types import ModelType, RoleType
27
+
28
+ text_prompt = """
29
+ You are tasked with extracting nodes and relationships from given content and structures them into Node and Relationship objects. Here's the outline of what you needs to do:
30
+
31
+ Content Extraction:
32
+ You should be able to process input content and identify entities mentioned within it.
33
+ Entities can be any noun phrases or concepts that represent distinct entities in the context of the given content.
34
+
35
+ Node Extraction:
36
+ For each identified entity, you should create a Node object.
37
+ Each Node object should have a unique identifier (id) and a type (type).
38
+ Additional properties associated with the node can also be extracted and stored.
39
+
40
+ Relationship Extraction:
41
+ You should identify relationships between entities mentioned in the content.
42
+ For each relationship, create a Relationship object.
43
+ A Relationship object should have a subject (subj) and an object (obj) which are Node objects representing the entities involved in the relationship.
44
+ Each relationship should also have a type (type), and additional properties if applicable.
45
+
46
+ Output Formatting:
47
+ The extracted nodes and relationships should be formatted as instances of the provided Node and Relationship classes.
48
+ Ensure that the extracted data adheres to the structure defined by the classes.
49
+ Output the structured data in a format that can be easily validated against the provided code.
50
+
51
+ Instructions for you:
52
+ Read the provided content thoroughly.
53
+ Identify distinct entities mentioned in the content and categorize them as nodes.
54
+ Determine relationships between these entities and represent them as directed relationships.
55
+ Provide the extracted nodes and relationships in the specified format below.
56
+ Example for you:
57
+
58
+ Example Content:
59
+ "John works at XYZ Corporation. He is a software engineer. The company is located in New York City."
60
+
61
+ Expected Output:
62
+
63
+ Nodes:
64
+
65
+ Node(id='John', type='Person', properties={'agent_generated'})
66
+ Node(id='XYZ Corporation', type='Organization', properties={'agent_generated'})
67
+ Node(id='New York City', type='Location', properties={'agent_generated'})
68
+
69
+ Relationships:
70
+
71
+ Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ Corporation', type='Organization'), type='WorksAt', properties={'agent_generated'})
72
+ Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City', type='Location'), type='ResidesIn', properties={'agent_generated'})
73
+
74
+ ===== TASK =====
75
+ Please extracts nodes and relationships from given content and structures them into Node and Relationship objects.
76
+
77
+ {task}
78
+ """
79
+
80
+
81
+ class KnowledgeGraphAgent(ChatAgent):
82
+ r"""An agent that can extract node and relationship information for different entities from given `Element` content.
83
+
84
+ Attributes:
85
+ task_prompt (TextPrompt): A prompt for the agent to extract node and
86
+ relationship information for different entities.
87
+ """
88
+
89
+ def __init__(
90
+ self,
91
+ model_type: ModelType = ModelType.GPT_3_5_TURBO,
92
+ model_config: Optional[Any] = None,
93
+ ) -> None:
94
+ r"""Initialize the `KnowledgeGraphAgent`.
95
+
96
+ Args:
97
+ model_type (ModelType, optional): The type of model to use for the
98
+ agent. Defaults to `ModelType.GPT_3_5_TURBO`.
99
+ model_config (Any, optional): The configuration for the model.
100
+ Defaults to `None`.
101
+ """
102
+ system_message = BaseMessage(
103
+ role_name="Graphify",
104
+ role_type=RoleType.ASSISTANT,
105
+ meta_dict=None,
106
+ content="Your mission is to transform unstructured content "
107
+ "intostructured graph data. Extract nodes and relationships with "
108
+ "precision, and let the connections unfold. Your graphs will "
109
+ "illuminate the hidden connections within the chaos of information.",
110
+ )
111
+ super().__init__(system_message, model_type, model_config)
112
+
113
+ def run(
114
+ self,
115
+ element: Union[str, Element],
116
+ parse_graph_elements: bool = False,
117
+ ) -> Union[str, GraphElement]:
118
+ r"""Run the agent to extract node and relationship information.
119
+
120
+ Args:
121
+ element (Union[str, Element]): The input element or string.
122
+ parse_graph_elements (bool, optional): Whether to parse into
123
+ `GraphElement`. Defaults to `False`.
124
+
125
+ Returns:
126
+ Union[str, GraphElement]: The extracted node and relationship
127
+ information. If `parse_graph_elements` is `True` then return `GraphElement`, else return `str`.
128
+ """
129
+ self.reset()
130
+ self.element = element
131
+
132
+ knowledge_graph_prompt = TextPrompt(text_prompt)
133
+ knowledge_graph_generation = knowledge_graph_prompt.format(
134
+ task=str(element)
135
+ )
136
+
137
+ knowledge_graph_generation_msg = BaseMessage.make_user_message(
138
+ role_name="Graphify", content=knowledge_graph_generation
139
+ )
140
+
141
+ response = self.step(input_message=knowledge_graph_generation_msg)
142
+
143
+ content = response.msg.content
144
+
145
+ if parse_graph_elements:
146
+ content = self._parse_graph_elements(content)
147
+
148
+ return content
149
+
150
+ def _validate_node(self, node: Node) -> bool:
151
+ r"""Validate if the object is a valid Node.
152
+
153
+ Args:
154
+ node (Node): Object to be validated.
155
+
156
+ Returns:
157
+ bool: True if the object is a valid Node, False otherwise.
158
+ """
159
+ return (
160
+ isinstance(node, Node)
161
+ and isinstance(node.id, (str, int))
162
+ and isinstance(node.type, str)
163
+ )
164
+
165
+ def _validate_relationship(self, relationship: Relationship) -> bool:
166
+ r"""Validate if the object is a valid Relationship.
167
+
168
+ Args:
169
+ relationship (Relationship): Object to be validated.
170
+
171
+ Returns:
172
+ bool: True if the object is a valid Relationship, False otherwise.
173
+ """
174
+ return (
175
+ isinstance(relationship, Relationship)
176
+ and self._validate_node(relationship.subj)
177
+ and self._validate_node(relationship.obj)
178
+ and isinstance(relationship.type, str)
179
+ )
180
+
181
+ def _parse_graph_elements(self, input_string: str) -> GraphElement:
182
+ r"""Parses graph elements from given content.
183
+
184
+ Args:
185
+ input_string (str): The input content.
186
+
187
+ Returns:
188
+ GraphElement: The parsed graph elements.
189
+ """
190
+ import re
191
+
192
+ # Regular expressions to extract nodes and relationships
193
+ node_pattern = r"Node\(id='(.*?)', type='(.*?)', properties=(.*?)\)"
194
+ rel_pattern = r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)', properties=\{(.*?)\}\)"
195
+
196
+ nodes = {}
197
+ relationships = []
198
+
199
+ # Extract nodes
200
+ for match in re.finditer(node_pattern, input_string):
201
+ id, type, properties = match.groups()
202
+ properties = eval(properties)
203
+ if id not in nodes:
204
+ node = Node(id, type, properties)
205
+ if self._validate_node(node):
206
+ nodes[id] = node
207
+
208
+ # Extract relationships
209
+ for match in re.finditer(rel_pattern, input_string):
210
+ subj_id, subj_type, obj_id, obj_type, rel_type, properties_str = (
211
+ match.groups()
212
+ )
213
+ properties = eval(properties_str)
214
+ if subj_id in nodes and obj_id in nodes:
215
+ subj = nodes[subj_id]
216
+ obj = nodes[obj_id]
217
+ relationship = Relationship(subj, obj, rel_type, properties)
218
+ if self._validate_relationship(relationship):
219
+ relationships.append(relationship)
220
+
221
+ return GraphElement(list(nodes.values()), relationships, self.element)
@@ -0,0 +1,29 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
15
+ from .base_config import BaseConfig
16
+ from .openai_config import (
17
+ OPENAI_API_PARAMS,
18
+ ChatGPTConfig,
19
+ OpenSourceConfig,
20
+ )
21
+
22
+ __all__ = [
23
+ 'BaseConfig',
24
+ 'ChatGPTConfig',
25
+ 'OPENAI_API_PARAMS',
26
+ 'AnthropicConfig',
27
+ 'ANTHROPIC_API_PARAMS',
28
+ 'OpenSourceConfig',
29
+ ]
@@ -0,0 +1,73 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass
17
+
18
+ from anthropic._types import NOT_GIVEN, NotGiven
19
+
20
+ from camel.configs.base_config import BaseConfig
21
+
22
+
23
+ @dataclass(frozen=True)
24
+ class AnthropicConfig(BaseConfig):
25
+ r"""Defines the parameters for generating chat completions using the
26
+ Anthropic API.
27
+
28
+ See: https://docs.anthropic.com/claude/reference/complete_post
29
+ Args:
30
+ max_tokens (int, optional): The maximum number of tokens to
31
+ generate before stopping. Note that Anthropic models may stop
32
+ before reaching this maximum. This parameter only specifies the
33
+ absolute maximum number of tokens to generate.
34
+ (default: :obj:`256`)
35
+ stop_sequences (List[str], optional): Sequences that will cause the
36
+ model to stop generating completion text. Anthropic models stop
37
+ on "\n\nHuman:", and may include additional built-in stop sequences
38
+ in the future. By providing the stop_sequences parameter, you may
39
+ include additional strings that will cause the model to stop
40
+ generating.
41
+ temperature (float, optional): Amount of randomness injected into the
42
+ response. Defaults to 1. Ranges from 0 to 1. Use temp closer to 0
43
+ for analytical / multiple choice, and closer to 1 for creative
44
+ and generative tasks.
45
+ (default: :obj:`1`)
46
+ top_p (float, optional): Use nucleus sampling. In nucleus sampling, we
47
+ compute the cumulative distribution over all the options for each
48
+ subsequent token in decreasing probability order and cut it off
49
+ once it reaches a particular probability specified by `top_p`.
50
+ You should either alter `temperature` or `top_p`,
51
+ but not both.
52
+ (default: :obj:`0.7`)
53
+ top_k (int, optional): Only sample from the top K options for each
54
+ subsequent token. Used to remove "long tail" low probability
55
+ responses.
56
+ (default: :obj:`5`)
57
+ metadata: An object describing metadata about the request.
58
+ stream (bool, optional): Whether to incrementally stream the response
59
+ using server-sent events.
60
+ (default: :obj:`False`)
61
+
62
+ """
63
+
64
+ max_tokens: int = 256
65
+ stop_sequences: list[str] | NotGiven = NOT_GIVEN
66
+ temperature: float = 1
67
+ top_p: float | NotGiven = NOT_GIVEN
68
+ top_k: int | NotGiven = NOT_GIVEN
69
+ metadata: NotGiven = NOT_GIVEN
70
+ stream: bool = False
71
+
72
+
73
+ ANTHROPIC_API_PARAMS = {param for param in asdict(AnthropicConfig()).keys()}
@@ -0,0 +1,22 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from abc import ABC
17
+ from dataclasses import dataclass
18
+
19
+
20
+ @dataclass(frozen=True)
21
+ class BaseConfig(ABC): # noqa: B024
22
+ pass