camel-ai 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/__init__.py +2 -0
- camel/agents/chat_agent.py +40 -53
- camel/agents/knowledge_graph_agent.py +221 -0
- camel/configs/__init__.py +29 -0
- camel/configs/anthropic_config.py +73 -0
- camel/configs/base_config.py +22 -0
- camel/configs/openai_config.py +132 -0
- camel/embeddings/openai_embedding.py +7 -2
- camel/functions/__init__.py +13 -8
- camel/functions/open_api_function.py +380 -0
- camel/functions/open_api_specs/coursera/__init__.py +13 -0
- camel/functions/open_api_specs/coursera/openapi.yaml +82 -0
- camel/functions/open_api_specs/klarna/__init__.py +13 -0
- camel/functions/open_api_specs/klarna/openapi.yaml +87 -0
- camel/functions/open_api_specs/speak/__init__.py +13 -0
- camel/functions/open_api_specs/speak/openapi.yaml +151 -0
- camel/functions/openai_function.py +3 -1
- camel/functions/retrieval_functions.py +61 -0
- camel/functions/slack_functions.py +275 -0
- camel/models/__init__.py +2 -0
- camel/models/anthropic_model.py +16 -2
- camel/models/base_model.py +8 -2
- camel/models/model_factory.py +7 -3
- camel/models/openai_audio_models.py +251 -0
- camel/models/openai_model.py +12 -4
- camel/models/stub_model.py +5 -1
- camel/retrievers/__init__.py +2 -0
- camel/retrievers/auto_retriever.py +47 -36
- camel/retrievers/base.py +42 -37
- camel/retrievers/bm25_retriever.py +10 -19
- camel/retrievers/cohere_rerank_retriever.py +108 -0
- camel/retrievers/vector_retriever.py +43 -26
- camel/storages/vectordb_storages/qdrant.py +3 -1
- camel/toolkits/__init__.py +21 -0
- camel/toolkits/base.py +22 -0
- camel/toolkits/github_toolkit.py +245 -0
- camel/types/__init__.py +6 -0
- camel/types/enums.py +44 -3
- camel/utils/__init__.py +4 -2
- camel/utils/commons.py +97 -173
- {camel_ai-0.1.3.dist-info → camel_ai-0.1.5.dist-info}/METADATA +9 -3
- {camel_ai-0.1.3.dist-info → camel_ai-0.1.5.dist-info}/RECORD +44 -26
- camel/configs.py +0 -271
- {camel_ai-0.1.3.dist-info → camel_ai-0.1.5.dist-info}/WHEEL +0 -0
camel/__init__.py
CHANGED
camel/agents/__init__.py
CHANGED
|
@@ -15,6 +15,7 @@ from .base import BaseAgent
|
|
|
15
15
|
from .chat_agent import ChatAgent
|
|
16
16
|
from .critic_agent import CriticAgent
|
|
17
17
|
from .embodied_agent import EmbodiedAgent
|
|
18
|
+
from .knowledge_graph_agent import KnowledgeGraphAgent
|
|
18
19
|
from .role_assignment_agent import RoleAssignmentAgent
|
|
19
20
|
from .task_agent import (
|
|
20
21
|
TaskCreationAgent,
|
|
@@ -37,4 +38,5 @@ __all__ = [
|
|
|
37
38
|
'HuggingFaceToolAgent',
|
|
38
39
|
'EmbodiedAgent',
|
|
39
40
|
'RoleAssignmentAgent',
|
|
41
|
+
'KnowledgeGraphAgent',
|
|
40
42
|
]
|
camel/agents/chat_agent.py
CHANGED
|
@@ -19,7 +19,7 @@ from dataclasses import dataclass
|
|
|
19
19
|
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
|
|
20
20
|
|
|
21
21
|
from camel.agents.base import BaseAgent
|
|
22
|
-
from camel.configs import ChatGPTConfig
|
|
22
|
+
from camel.configs import ChatGPTConfig
|
|
23
23
|
from camel.memories import (
|
|
24
24
|
AgentMemory,
|
|
25
25
|
ChatHistoryMemory,
|
|
@@ -84,6 +84,9 @@ class ChatAgent(BaseAgent):
|
|
|
84
84
|
responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
|
|
85
85
|
model_config (BaseConfig, optional): Configuration options for the
|
|
86
86
|
LLM model. (default: :obj:`None`)
|
|
87
|
+
api_key (str, optional): The API key for authenticating with the
|
|
88
|
+
LLM service. Only OpenAI and Anthropic model supported (default:
|
|
89
|
+
:obj:`None`)
|
|
87
90
|
memory (AgentMemory, optional): The agent memory for managing chat
|
|
88
91
|
messages. If `None`, a :obj:`ChatHistoryMemory` will be used.
|
|
89
92
|
(default: :obj:`None`)
|
|
@@ -96,7 +99,7 @@ class ChatAgent(BaseAgent):
|
|
|
96
99
|
(default: :obj:`None`)
|
|
97
100
|
output_language (str, optional): The language to be output by the
|
|
98
101
|
agent. (default: :obj:`None`)
|
|
99
|
-
|
|
102
|
+
tools (List[OpenAIFunction], optional): List of available
|
|
100
103
|
:obj:`OpenAIFunction`. (default: :obj:`None`)
|
|
101
104
|
response_terminators (List[ResponseTerminator], optional): List of
|
|
102
105
|
:obj:`ResponseTerminator` bind to one chat agent.
|
|
@@ -108,11 +111,12 @@ class ChatAgent(BaseAgent):
|
|
|
108
111
|
system_message: BaseMessage,
|
|
109
112
|
model_type: Optional[ModelType] = None,
|
|
110
113
|
model_config: Optional[BaseConfig] = None,
|
|
114
|
+
api_key: Optional[str] = None,
|
|
111
115
|
memory: Optional[AgentMemory] = None,
|
|
112
116
|
message_window_size: Optional[int] = None,
|
|
113
117
|
token_limit: Optional[int] = None,
|
|
114
118
|
output_language: Optional[str] = None,
|
|
115
|
-
|
|
119
|
+
tools: Optional[List[OpenAIFunction]] = None,
|
|
116
120
|
response_terminators: Optional[List[ResponseTerminator]] = None,
|
|
117
121
|
) -> None:
|
|
118
122
|
self.orig_sys_message: BaseMessage = system_message
|
|
@@ -128,34 +132,14 @@ class ChatAgent(BaseAgent):
|
|
|
128
132
|
)
|
|
129
133
|
|
|
130
134
|
self.func_dict: Dict[str, Callable] = {}
|
|
131
|
-
if
|
|
132
|
-
for func in
|
|
135
|
+
if tools is not None:
|
|
136
|
+
for func in tools:
|
|
133
137
|
self.func_dict[func.get_function_name()] = func.func
|
|
134
138
|
|
|
135
|
-
self.model_config
|
|
136
|
-
|
|
137
|
-
if model_config is not None and not isinstance(
|
|
138
|
-
model_config, ChatGPTVisionConfig
|
|
139
|
-
):
|
|
140
|
-
raise ValueError(
|
|
141
|
-
"Please use `ChatGPTVisionConfig` as "
|
|
142
|
-
"the `model_config` when `model_type` "
|
|
143
|
-
"is `GPT_4_TURBO_VISION`"
|
|
144
|
-
)
|
|
145
|
-
self.model_config = model_config or ChatGPTVisionConfig()
|
|
146
|
-
else:
|
|
147
|
-
if model_config is not None and isinstance(
|
|
148
|
-
model_config, ChatGPTVisionConfig
|
|
149
|
-
):
|
|
150
|
-
raise ValueError(
|
|
151
|
-
"Please don't use `ChatGPTVisionConfig` as "
|
|
152
|
-
"the `model_config` when `model_type` "
|
|
153
|
-
"is not `GPT_4_TURBO_VISION`"
|
|
154
|
-
)
|
|
155
|
-
self.model_config = model_config or ChatGPTConfig()
|
|
156
|
-
|
|
139
|
+
self.model_config = model_config or ChatGPTConfig()
|
|
140
|
+
self._api_key = api_key
|
|
157
141
|
self.model_backend: BaseModelBackend = ModelFactory.create(
|
|
158
|
-
self.model_type, self.model_config.__dict__
|
|
142
|
+
self.model_type, self.model_config.__dict__, self._api_key
|
|
159
143
|
)
|
|
160
144
|
self.model_token_limit = token_limit or self.model_backend.token_limit
|
|
161
145
|
context_creator = ScoreBasedContextCreator(
|
|
@@ -201,12 +185,12 @@ class ChatAgent(BaseAgent):
|
|
|
201
185
|
"""
|
|
202
186
|
self._system_message = message
|
|
203
187
|
|
|
204
|
-
def
|
|
188
|
+
def is_tools_added(self) -> bool:
|
|
205
189
|
r"""Whether OpenAI function calling is enabled for this agent.
|
|
206
190
|
|
|
207
191
|
Returns:
|
|
208
192
|
bool: Whether OpenAI function calling is enabled for this
|
|
209
|
-
agent, determined by whether the dictionary of
|
|
193
|
+
agent, determined by whether the dictionary of tools
|
|
210
194
|
is empty.
|
|
211
195
|
"""
|
|
212
196
|
return len(self.func_dict) > 0
|
|
@@ -249,7 +233,7 @@ class ChatAgent(BaseAgent):
|
|
|
249
233
|
usage: Optional[Dict[str, int]],
|
|
250
234
|
termination_reasons: List[str],
|
|
251
235
|
num_tokens: int,
|
|
252
|
-
|
|
236
|
+
tool_calls: List[FunctionCallingRecord],
|
|
253
237
|
) -> Dict[str, Any]:
|
|
254
238
|
r"""Returns a dictionary containing information about the chat session.
|
|
255
239
|
|
|
@@ -260,9 +244,9 @@ class ChatAgent(BaseAgent):
|
|
|
260
244
|
termination_reasons (List[str]): The reasons for the termination
|
|
261
245
|
of the chat session.
|
|
262
246
|
num_tokens (int): The number of tokens used in the chat session.
|
|
263
|
-
|
|
247
|
+
tool_calls (List[FunctionCallingRecord]): The list of function
|
|
264
248
|
calling records, containing the information of called
|
|
265
|
-
|
|
249
|
+
tools.
|
|
266
250
|
|
|
267
251
|
Returns:
|
|
268
252
|
Dict[str, Any]: The chat session information.
|
|
@@ -272,7 +256,7 @@ class ChatAgent(BaseAgent):
|
|
|
272
256
|
"usage": usage,
|
|
273
257
|
"termination_reasons": termination_reasons,
|
|
274
258
|
"num_tokens": num_tokens,
|
|
275
|
-
"
|
|
259
|
+
"tool_calls": tool_calls,
|
|
276
260
|
}
|
|
277
261
|
|
|
278
262
|
def init_messages(self) -> None:
|
|
@@ -305,9 +289,10 @@ class ChatAgent(BaseAgent):
|
|
|
305
289
|
|
|
306
290
|
Args:
|
|
307
291
|
input_message (BaseMessage): The input message to the agent.
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
292
|
+
Its `role` field that specifies the role at backend may be
|
|
293
|
+
either `user` or `assistant` but it will be set to `user`
|
|
294
|
+
anyway since for the self agent any incoming message is
|
|
295
|
+
external.
|
|
311
296
|
|
|
312
297
|
Returns:
|
|
313
298
|
ChatAgentResponse: A struct containing the output messages,
|
|
@@ -318,7 +303,7 @@ class ChatAgent(BaseAgent):
|
|
|
318
303
|
|
|
319
304
|
output_messages: List[BaseMessage]
|
|
320
305
|
info: Dict[str, Any]
|
|
321
|
-
|
|
306
|
+
tool_calls: List[FunctionCallingRecord] = []
|
|
322
307
|
while True:
|
|
323
308
|
# Format messages and get the token number
|
|
324
309
|
openai_messages: Optional[List[OpenAIMessage]]
|
|
@@ -327,7 +312,7 @@ class ChatAgent(BaseAgent):
|
|
|
327
312
|
openai_messages, num_tokens = self.memory.get_context()
|
|
328
313
|
except RuntimeError as e:
|
|
329
314
|
return self.step_token_exceed(
|
|
330
|
-
e.args[1],
|
|
315
|
+
e.args[1], tool_calls, "max_tokens_exceeded"
|
|
331
316
|
)
|
|
332
317
|
|
|
333
318
|
# Obtain the model's response
|
|
@@ -343,13 +328,15 @@ class ChatAgent(BaseAgent):
|
|
|
343
328
|
)
|
|
344
329
|
|
|
345
330
|
if (
|
|
346
|
-
self.
|
|
347
|
-
and finish_reasons[0] == 'function_call'
|
|
331
|
+
self.is_tools_added()
|
|
348
332
|
and isinstance(response, ChatCompletion)
|
|
333
|
+
and response.choices[0].message.tool_calls is not None
|
|
349
334
|
):
|
|
335
|
+
# Tools added for function calling and not in stream mode
|
|
336
|
+
|
|
350
337
|
# Do function calling
|
|
351
338
|
func_assistant_msg, func_result_msg, func_record = (
|
|
352
|
-
self.
|
|
339
|
+
self.step_tool_call(response)
|
|
353
340
|
)
|
|
354
341
|
|
|
355
342
|
# Update the messages
|
|
@@ -359,7 +346,8 @@ class ChatAgent(BaseAgent):
|
|
|
359
346
|
self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
|
|
360
347
|
|
|
361
348
|
# Record the function calling
|
|
362
|
-
|
|
349
|
+
tool_calls.append(func_record)
|
|
350
|
+
|
|
363
351
|
else:
|
|
364
352
|
# Function calling disabled or not a function calling
|
|
365
353
|
|
|
@@ -388,7 +376,7 @@ class ChatAgent(BaseAgent):
|
|
|
388
376
|
usage_dict,
|
|
389
377
|
finish_reasons,
|
|
390
378
|
num_tokens,
|
|
391
|
-
|
|
379
|
+
tool_calls,
|
|
392
380
|
)
|
|
393
381
|
break
|
|
394
382
|
|
|
@@ -475,7 +463,7 @@ class ChatAgent(BaseAgent):
|
|
|
475
463
|
def step_token_exceed(
|
|
476
464
|
self,
|
|
477
465
|
num_tokens: int,
|
|
478
|
-
|
|
466
|
+
tool_calls: List[FunctionCallingRecord],
|
|
479
467
|
termination_reason: str,
|
|
480
468
|
) -> ChatAgentResponse:
|
|
481
469
|
r"""Return trivial response containing number of tokens and information
|
|
@@ -483,7 +471,7 @@ class ChatAgent(BaseAgent):
|
|
|
483
471
|
|
|
484
472
|
Args:
|
|
485
473
|
num_tokens (int): Number of tokens in the messages.
|
|
486
|
-
|
|
474
|
+
tool_calls (List[FunctionCallingRecord]): List of information
|
|
487
475
|
objects of functions called in the current step.
|
|
488
476
|
termination_reason (str): String of termination reason.
|
|
489
477
|
|
|
@@ -499,7 +487,7 @@ class ChatAgent(BaseAgent):
|
|
|
499
487
|
None,
|
|
500
488
|
[termination_reason],
|
|
501
489
|
num_tokens,
|
|
502
|
-
|
|
490
|
+
tool_calls,
|
|
503
491
|
)
|
|
504
492
|
|
|
505
493
|
return ChatAgentResponse(
|
|
@@ -508,7 +496,7 @@ class ChatAgent(BaseAgent):
|
|
|
508
496
|
info,
|
|
509
497
|
)
|
|
510
498
|
|
|
511
|
-
def
|
|
499
|
+
def step_tool_call(
|
|
512
500
|
self,
|
|
513
501
|
response: ChatCompletion,
|
|
514
502
|
) -> Tuple[
|
|
@@ -526,14 +514,13 @@ class ChatAgent(BaseAgent):
|
|
|
526
514
|
result, and a struct for logging information about this
|
|
527
515
|
function call.
|
|
528
516
|
"""
|
|
529
|
-
# Note that when function calling is enabled, `n` is set to 1.
|
|
530
517
|
choice = response.choices[0]
|
|
531
|
-
if choice.message.
|
|
532
|
-
raise RuntimeError("
|
|
533
|
-
func_name = choice.message.
|
|
518
|
+
if choice.message.tool_calls is None:
|
|
519
|
+
raise RuntimeError("Tool calls is None")
|
|
520
|
+
func_name = choice.message.tool_calls[0].function.name
|
|
534
521
|
func = self.func_dict[func_name]
|
|
535
522
|
|
|
536
|
-
args_str: str = choice.message.
|
|
523
|
+
args_str: str = choice.message.tool_calls[0].function.arguments
|
|
537
524
|
args = json.loads(args_str.replace("'", "\""))
|
|
538
525
|
|
|
539
526
|
# Pass the extracted arguments to the indicated function
|
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from typing import Any, Optional, Union
|
|
15
|
+
|
|
16
|
+
from unstructured.documents.elements import Element
|
|
17
|
+
|
|
18
|
+
from camel.agents import ChatAgent
|
|
19
|
+
from camel.messages import BaseMessage
|
|
20
|
+
from camel.prompts import TextPrompt
|
|
21
|
+
from camel.storages.graph_storages.graph_element import (
|
|
22
|
+
GraphElement,
|
|
23
|
+
Node,
|
|
24
|
+
Relationship,
|
|
25
|
+
)
|
|
26
|
+
from camel.types import ModelType, RoleType
|
|
27
|
+
|
|
28
|
+
text_prompt = """
|
|
29
|
+
You are tasked with extracting nodes and relationships from given content and structures them into Node and Relationship objects. Here's the outline of what you needs to do:
|
|
30
|
+
|
|
31
|
+
Content Extraction:
|
|
32
|
+
You should be able to process input content and identify entities mentioned within it.
|
|
33
|
+
Entities can be any noun phrases or concepts that represent distinct entities in the context of the given content.
|
|
34
|
+
|
|
35
|
+
Node Extraction:
|
|
36
|
+
For each identified entity, you should create a Node object.
|
|
37
|
+
Each Node object should have a unique identifier (id) and a type (type).
|
|
38
|
+
Additional properties associated with the node can also be extracted and stored.
|
|
39
|
+
|
|
40
|
+
Relationship Extraction:
|
|
41
|
+
You should identify relationships between entities mentioned in the content.
|
|
42
|
+
For each relationship, create a Relationship object.
|
|
43
|
+
A Relationship object should have a subject (subj) and an object (obj) which are Node objects representing the entities involved in the relationship.
|
|
44
|
+
Each relationship should also have a type (type), and additional properties if applicable.
|
|
45
|
+
|
|
46
|
+
Output Formatting:
|
|
47
|
+
The extracted nodes and relationships should be formatted as instances of the provided Node and Relationship classes.
|
|
48
|
+
Ensure that the extracted data adheres to the structure defined by the classes.
|
|
49
|
+
Output the structured data in a format that can be easily validated against the provided code.
|
|
50
|
+
|
|
51
|
+
Instructions for you:
|
|
52
|
+
Read the provided content thoroughly.
|
|
53
|
+
Identify distinct entities mentioned in the content and categorize them as nodes.
|
|
54
|
+
Determine relationships between these entities and represent them as directed relationships.
|
|
55
|
+
Provide the extracted nodes and relationships in the specified format below.
|
|
56
|
+
Example for you:
|
|
57
|
+
|
|
58
|
+
Example Content:
|
|
59
|
+
"John works at XYZ Corporation. He is a software engineer. The company is located in New York City."
|
|
60
|
+
|
|
61
|
+
Expected Output:
|
|
62
|
+
|
|
63
|
+
Nodes:
|
|
64
|
+
|
|
65
|
+
Node(id='John', type='Person', properties={'agent_generated'})
|
|
66
|
+
Node(id='XYZ Corporation', type='Organization', properties={'agent_generated'})
|
|
67
|
+
Node(id='New York City', type='Location', properties={'agent_generated'})
|
|
68
|
+
|
|
69
|
+
Relationships:
|
|
70
|
+
|
|
71
|
+
Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ Corporation', type='Organization'), type='WorksAt', properties={'agent_generated'})
|
|
72
|
+
Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City', type='Location'), type='ResidesIn', properties={'agent_generated'})
|
|
73
|
+
|
|
74
|
+
===== TASK =====
|
|
75
|
+
Please extracts nodes and relationships from given content and structures them into Node and Relationship objects.
|
|
76
|
+
|
|
77
|
+
{task}
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class KnowledgeGraphAgent(ChatAgent):
|
|
82
|
+
r"""An agent that can extract node and relationship information for different entities from given `Element` content.
|
|
83
|
+
|
|
84
|
+
Attributes:
|
|
85
|
+
task_prompt (TextPrompt): A prompt for the agent to extract node and
|
|
86
|
+
relationship information for different entities.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
def __init__(
|
|
90
|
+
self,
|
|
91
|
+
model_type: ModelType = ModelType.GPT_3_5_TURBO,
|
|
92
|
+
model_config: Optional[Any] = None,
|
|
93
|
+
) -> None:
|
|
94
|
+
r"""Initialize the `KnowledgeGraphAgent`.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
model_type (ModelType, optional): The type of model to use for the
|
|
98
|
+
agent. Defaults to `ModelType.GPT_3_5_TURBO`.
|
|
99
|
+
model_config (Any, optional): The configuration for the model.
|
|
100
|
+
Defaults to `None`.
|
|
101
|
+
"""
|
|
102
|
+
system_message = BaseMessage(
|
|
103
|
+
role_name="Graphify",
|
|
104
|
+
role_type=RoleType.ASSISTANT,
|
|
105
|
+
meta_dict=None,
|
|
106
|
+
content="Your mission is to transform unstructured content "
|
|
107
|
+
"intostructured graph data. Extract nodes and relationships with "
|
|
108
|
+
"precision, and let the connections unfold. Your graphs will "
|
|
109
|
+
"illuminate the hidden connections within the chaos of information.",
|
|
110
|
+
)
|
|
111
|
+
super().__init__(system_message, model_type, model_config)
|
|
112
|
+
|
|
113
|
+
def run(
|
|
114
|
+
self,
|
|
115
|
+
element: Union[str, Element],
|
|
116
|
+
parse_graph_elements: bool = False,
|
|
117
|
+
) -> Union[str, GraphElement]:
|
|
118
|
+
r"""Run the agent to extract node and relationship information.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
element (Union[str, Element]): The input element or string.
|
|
122
|
+
parse_graph_elements (bool, optional): Whether to parse into
|
|
123
|
+
`GraphElement`. Defaults to `False`.
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
Union[str, GraphElement]: The extracted node and relationship
|
|
127
|
+
information. If `parse_graph_elements` is `True` then return `GraphElement`, else return `str`.
|
|
128
|
+
"""
|
|
129
|
+
self.reset()
|
|
130
|
+
self.element = element
|
|
131
|
+
|
|
132
|
+
knowledge_graph_prompt = TextPrompt(text_prompt)
|
|
133
|
+
knowledge_graph_generation = knowledge_graph_prompt.format(
|
|
134
|
+
task=str(element)
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
knowledge_graph_generation_msg = BaseMessage.make_user_message(
|
|
138
|
+
role_name="Graphify", content=knowledge_graph_generation
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
response = self.step(input_message=knowledge_graph_generation_msg)
|
|
142
|
+
|
|
143
|
+
content = response.msg.content
|
|
144
|
+
|
|
145
|
+
if parse_graph_elements:
|
|
146
|
+
content = self._parse_graph_elements(content)
|
|
147
|
+
|
|
148
|
+
return content
|
|
149
|
+
|
|
150
|
+
def _validate_node(self, node: Node) -> bool:
|
|
151
|
+
r"""Validate if the object is a valid Node.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
node (Node): Object to be validated.
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
bool: True if the object is a valid Node, False otherwise.
|
|
158
|
+
"""
|
|
159
|
+
return (
|
|
160
|
+
isinstance(node, Node)
|
|
161
|
+
and isinstance(node.id, (str, int))
|
|
162
|
+
and isinstance(node.type, str)
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
def _validate_relationship(self, relationship: Relationship) -> bool:
|
|
166
|
+
r"""Validate if the object is a valid Relationship.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
relationship (Relationship): Object to be validated.
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
bool: True if the object is a valid Relationship, False otherwise.
|
|
173
|
+
"""
|
|
174
|
+
return (
|
|
175
|
+
isinstance(relationship, Relationship)
|
|
176
|
+
and self._validate_node(relationship.subj)
|
|
177
|
+
and self._validate_node(relationship.obj)
|
|
178
|
+
and isinstance(relationship.type, str)
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
def _parse_graph_elements(self, input_string: str) -> GraphElement:
|
|
182
|
+
r"""Parses graph elements from given content.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
input_string (str): The input content.
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
GraphElement: The parsed graph elements.
|
|
189
|
+
"""
|
|
190
|
+
import re
|
|
191
|
+
|
|
192
|
+
# Regular expressions to extract nodes and relationships
|
|
193
|
+
node_pattern = r"Node\(id='(.*?)', type='(.*?)', properties=(.*?)\)"
|
|
194
|
+
rel_pattern = r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)', properties=\{(.*?)\}\)"
|
|
195
|
+
|
|
196
|
+
nodes = {}
|
|
197
|
+
relationships = []
|
|
198
|
+
|
|
199
|
+
# Extract nodes
|
|
200
|
+
for match in re.finditer(node_pattern, input_string):
|
|
201
|
+
id, type, properties = match.groups()
|
|
202
|
+
properties = eval(properties)
|
|
203
|
+
if id not in nodes:
|
|
204
|
+
node = Node(id, type, properties)
|
|
205
|
+
if self._validate_node(node):
|
|
206
|
+
nodes[id] = node
|
|
207
|
+
|
|
208
|
+
# Extract relationships
|
|
209
|
+
for match in re.finditer(rel_pattern, input_string):
|
|
210
|
+
subj_id, subj_type, obj_id, obj_type, rel_type, properties_str = (
|
|
211
|
+
match.groups()
|
|
212
|
+
)
|
|
213
|
+
properties = eval(properties_str)
|
|
214
|
+
if subj_id in nodes and obj_id in nodes:
|
|
215
|
+
subj = nodes[subj_id]
|
|
216
|
+
obj = nodes[obj_id]
|
|
217
|
+
relationship = Relationship(subj, obj, rel_type, properties)
|
|
218
|
+
if self._validate_relationship(relationship):
|
|
219
|
+
relationships.append(relationship)
|
|
220
|
+
|
|
221
|
+
return GraphElement(list(nodes.values()), relationships, self.element)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
|
|
15
|
+
from .base_config import BaseConfig
|
|
16
|
+
from .openai_config import (
|
|
17
|
+
OPENAI_API_PARAMS,
|
|
18
|
+
ChatGPTConfig,
|
|
19
|
+
OpenSourceConfig,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
__all__ = [
|
|
23
|
+
'BaseConfig',
|
|
24
|
+
'ChatGPTConfig',
|
|
25
|
+
'OPENAI_API_PARAMS',
|
|
26
|
+
'AnthropicConfig',
|
|
27
|
+
'ANTHROPIC_API_PARAMS',
|
|
28
|
+
'OpenSourceConfig',
|
|
29
|
+
]
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from dataclasses import asdict, dataclass
|
|
17
|
+
|
|
18
|
+
from anthropic._types import NOT_GIVEN, NotGiven
|
|
19
|
+
|
|
20
|
+
from camel.configs.base_config import BaseConfig
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass(frozen=True)
|
|
24
|
+
class AnthropicConfig(BaseConfig):
|
|
25
|
+
r"""Defines the parameters for generating chat completions using the
|
|
26
|
+
Anthropic API.
|
|
27
|
+
|
|
28
|
+
See: https://docs.anthropic.com/claude/reference/complete_post
|
|
29
|
+
Args:
|
|
30
|
+
max_tokens (int, optional): The maximum number of tokens to
|
|
31
|
+
generate before stopping. Note that Anthropic models may stop
|
|
32
|
+
before reaching this maximum. This parameter only specifies the
|
|
33
|
+
absolute maximum number of tokens to generate.
|
|
34
|
+
(default: :obj:`256`)
|
|
35
|
+
stop_sequences (List[str], optional): Sequences that will cause the
|
|
36
|
+
model to stop generating completion text. Anthropic models stop
|
|
37
|
+
on "\n\nHuman:", and may include additional built-in stop sequences
|
|
38
|
+
in the future. By providing the stop_sequences parameter, you may
|
|
39
|
+
include additional strings that will cause the model to stop
|
|
40
|
+
generating.
|
|
41
|
+
temperature (float, optional): Amount of randomness injected into the
|
|
42
|
+
response. Defaults to 1. Ranges from 0 to 1. Use temp closer to 0
|
|
43
|
+
for analytical / multiple choice, and closer to 1 for creative
|
|
44
|
+
and generative tasks.
|
|
45
|
+
(default: :obj:`1`)
|
|
46
|
+
top_p (float, optional): Use nucleus sampling. In nucleus sampling, we
|
|
47
|
+
compute the cumulative distribution over all the options for each
|
|
48
|
+
subsequent token in decreasing probability order and cut it off
|
|
49
|
+
once it reaches a particular probability specified by `top_p`.
|
|
50
|
+
You should either alter `temperature` or `top_p`,
|
|
51
|
+
but not both.
|
|
52
|
+
(default: :obj:`0.7`)
|
|
53
|
+
top_k (int, optional): Only sample from the top K options for each
|
|
54
|
+
subsequent token. Used to remove "long tail" low probability
|
|
55
|
+
responses.
|
|
56
|
+
(default: :obj:`5`)
|
|
57
|
+
metadata: An object describing metadata about the request.
|
|
58
|
+
stream (bool, optional): Whether to incrementally stream the response
|
|
59
|
+
using server-sent events.
|
|
60
|
+
(default: :obj:`False`)
|
|
61
|
+
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
max_tokens: int = 256
|
|
65
|
+
stop_sequences: list[str] | NotGiven = NOT_GIVEN
|
|
66
|
+
temperature: float = 1
|
|
67
|
+
top_p: float | NotGiven = NOT_GIVEN
|
|
68
|
+
top_k: int | NotGiven = NOT_GIVEN
|
|
69
|
+
metadata: NotGiven = NOT_GIVEN
|
|
70
|
+
stream: bool = False
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
ANTHROPIC_API_PARAMS = {param for param in asdict(AnthropicConfig()).keys()}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from abc import ABC
|
|
17
|
+
from dataclasses import dataclass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass(frozen=True)
|
|
21
|
+
class BaseConfig(ABC): # noqa: B024
|
|
22
|
+
pass
|