camel-ai 0.1.5.1__py3-none-any.whl → 0.1.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (86) hide show
  1. camel/agents/__init__.py +2 -0
  2. camel/agents/chat_agent.py +237 -52
  3. camel/agents/critic_agent.py +6 -9
  4. camel/agents/deductive_reasoner_agent.py +93 -40
  5. camel/agents/embodied_agent.py +6 -9
  6. camel/agents/knowledge_graph_agent.py +49 -27
  7. camel/agents/role_assignment_agent.py +14 -12
  8. camel/agents/search_agent.py +122 -0
  9. camel/agents/task_agent.py +26 -38
  10. camel/bots/__init__.py +20 -0
  11. camel/bots/discord_bot.py +103 -0
  12. camel/bots/telegram_bot.py +84 -0
  13. camel/configs/__init__.py +3 -0
  14. camel/configs/anthropic_config.py +1 -1
  15. camel/configs/litellm_config.py +113 -0
  16. camel/configs/openai_config.py +14 -0
  17. camel/embeddings/__init__.py +2 -0
  18. camel/embeddings/openai_embedding.py +2 -2
  19. camel/embeddings/sentence_transformers_embeddings.py +6 -5
  20. camel/embeddings/vlm_embedding.py +146 -0
  21. camel/functions/__init__.py +9 -0
  22. camel/functions/open_api_function.py +161 -33
  23. camel/functions/open_api_specs/biztoc/__init__.py +13 -0
  24. camel/functions/open_api_specs/biztoc/ai-plugin.json +34 -0
  25. camel/functions/open_api_specs/biztoc/openapi.yaml +21 -0
  26. camel/functions/open_api_specs/create_qr_code/__init__.py +13 -0
  27. camel/functions/open_api_specs/create_qr_code/openapi.yaml +44 -0
  28. camel/functions/open_api_specs/nasa_apod/__init__.py +13 -0
  29. camel/functions/open_api_specs/nasa_apod/openapi.yaml +72 -0
  30. camel/functions/open_api_specs/outschool/__init__.py +13 -0
  31. camel/functions/open_api_specs/outschool/ai-plugin.json +34 -0
  32. camel/functions/open_api_specs/outschool/openapi.yaml +1 -0
  33. camel/functions/open_api_specs/outschool/paths/__init__.py +14 -0
  34. camel/functions/open_api_specs/outschool/paths/get_classes.py +29 -0
  35. camel/functions/open_api_specs/outschool/paths/search_teachers.py +29 -0
  36. camel/functions/open_api_specs/security_config.py +21 -0
  37. camel/functions/open_api_specs/web_scraper/__init__.py +13 -0
  38. camel/functions/open_api_specs/web_scraper/ai-plugin.json +34 -0
  39. camel/functions/open_api_specs/web_scraper/openapi.yaml +71 -0
  40. camel/functions/open_api_specs/web_scraper/paths/__init__.py +13 -0
  41. camel/functions/open_api_specs/web_scraper/paths/scraper.py +29 -0
  42. camel/functions/openai_function.py +3 -1
  43. camel/functions/search_functions.py +104 -171
  44. camel/functions/slack_functions.py +16 -3
  45. camel/human.py +3 -1
  46. camel/loaders/base_io.py +3 -1
  47. camel/loaders/unstructured_io.py +16 -22
  48. camel/messages/base.py +135 -46
  49. camel/models/__init__.py +8 -0
  50. camel/models/anthropic_model.py +24 -16
  51. camel/models/base_model.py +6 -1
  52. camel/models/litellm_model.py +112 -0
  53. camel/models/model_factory.py +44 -16
  54. camel/models/nemotron_model.py +71 -0
  55. camel/models/ollama_model.py +121 -0
  56. camel/models/open_source_model.py +8 -2
  57. camel/models/openai_model.py +14 -5
  58. camel/models/stub_model.py +3 -1
  59. camel/models/zhipuai_model.py +125 -0
  60. camel/prompts/__init__.py +6 -0
  61. camel/prompts/base.py +2 -1
  62. camel/prompts/descripte_video_prompt.py +33 -0
  63. camel/prompts/generate_text_embedding_data.py +79 -0
  64. camel/prompts/task_prompt_template.py +13 -3
  65. camel/retrievers/auto_retriever.py +20 -11
  66. camel/retrievers/base.py +4 -2
  67. camel/retrievers/bm25_retriever.py +2 -1
  68. camel/retrievers/cohere_rerank_retriever.py +2 -1
  69. camel/retrievers/vector_retriever.py +10 -4
  70. camel/societies/babyagi_playing.py +2 -1
  71. camel/societies/role_playing.py +18 -20
  72. camel/storages/graph_storages/base.py +1 -0
  73. camel/storages/graph_storages/neo4j_graph.py +5 -3
  74. camel/storages/vectordb_storages/base.py +2 -1
  75. camel/storages/vectordb_storages/milvus.py +5 -2
  76. camel/toolkits/github_toolkit.py +120 -26
  77. camel/types/__init__.py +5 -2
  78. camel/types/enums.py +95 -4
  79. camel/utils/__init__.py +11 -2
  80. camel/utils/commons.py +78 -4
  81. camel/utils/constants.py +26 -0
  82. camel/utils/token_counting.py +62 -7
  83. {camel_ai-0.1.5.1.dist-info → camel_ai-0.1.5.3.dist-info}/METADATA +82 -53
  84. camel_ai-0.1.5.3.dist-info/RECORD +151 -0
  85. camel_ai-0.1.5.1.dist-info/RECORD +0 -119
  86. {camel_ai-0.1.5.1.dist-info → camel_ai-0.1.5.3.dist-info}/WHEEL +0 -0
@@ -23,8 +23,8 @@ from camel.interpreters import (
23
23
  SubprocessInterpreter,
24
24
  )
25
25
  from camel.messages import BaseMessage
26
+ from camel.models import BaseModelBackend
26
27
  from camel.responses import ChatAgentResponse
27
- from camel.types import ModelType
28
28
  from camel.utils import print_text_animated
29
29
 
30
30
 
@@ -33,10 +33,9 @@ class EmbodiedAgent(ChatAgent):
33
33
 
34
34
  Args:
35
35
  system_message (BaseMessage): The system message for the chat agent.
36
- model_type (ModelType, optional): The LLM model to use for generating
37
- responses. (default :obj:`ModelType.GPT_4`)
38
- model_config (Any, optional): Configuration options for the LLM model.
39
- (default: :obj:`None`)
36
+ model (BaseModelBackend, optional): The model backend to use for
37
+ generating responses. (default: :obj:`OpenAIModel` with
38
+ `GPT_3_5_TURBO`)
40
39
  message_window_size (int, optional): The maximum number of previous
41
40
  messages to include in the context window. If `None`, no windowing
42
41
  is performed. (default: :obj:`None`)
@@ -55,8 +54,7 @@ class EmbodiedAgent(ChatAgent):
55
54
  def __init__(
56
55
  self,
57
56
  system_message: BaseMessage,
58
- model_type: ModelType = ModelType.GPT_4,
59
- model_config: Optional[Any] = None,
57
+ model: Optional[BaseModelBackend] = None,
60
58
  message_window_size: Optional[int] = None,
61
59
  tool_agents: Optional[List[BaseToolAgent]] = None,
62
60
  code_interpreter: Optional[BaseInterpreter] = None,
@@ -78,8 +76,7 @@ class EmbodiedAgent(ChatAgent):
78
76
  self.logger_color = logger_color
79
77
  super().__init__(
80
78
  system_message=system_message,
81
- model_type=model_type,
82
- model_config=model_config,
79
+ model=model,
83
80
  message_window_size=message_window_size,
84
81
  )
85
82
 
@@ -11,52 +11,65 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, Optional, Union
14
+ from typing import Optional, Union
15
15
 
16
16
  from unstructured.documents.elements import Element
17
17
 
18
18
  from camel.agents import ChatAgent
19
19
  from camel.messages import BaseMessage
20
+ from camel.models import BaseModelBackend
20
21
  from camel.prompts import TextPrompt
21
22
  from camel.storages.graph_storages.graph_element import (
22
23
  GraphElement,
23
24
  Node,
24
25
  Relationship,
25
26
  )
26
- from camel.types import ModelType, RoleType
27
+ from camel.types import RoleType
27
28
 
28
29
  text_prompt = """
29
- You are tasked with extracting nodes and relationships from given content and structures them into Node and Relationship objects. Here's the outline of what you needs to do:
30
+ You are tasked with extracting nodes and relationships from given content and
31
+ structures them into Node and Relationship objects. Here's the outline of what
32
+ you needs to do:
30
33
 
31
34
  Content Extraction:
32
- You should be able to process input content and identify entities mentioned within it.
33
- Entities can be any noun phrases or concepts that represent distinct entities in the context of the given content.
35
+ You should be able to process input content and identify entities mentioned
36
+ within it.
37
+ Entities can be any noun phrases or concepts that represent distinct entities
38
+ in the context of the given content.
34
39
 
35
40
  Node Extraction:
36
41
  For each identified entity, you should create a Node object.
37
42
  Each Node object should have a unique identifier (id) and a type (type).
38
- Additional properties associated with the node can also be extracted and stored.
43
+ Additional properties associated with the node can also be extracted and
44
+ stored.
39
45
 
40
46
  Relationship Extraction:
41
47
  You should identify relationships between entities mentioned in the content.
42
48
  For each relationship, create a Relationship object.
43
- A Relationship object should have a subject (subj) and an object (obj) which are Node objects representing the entities involved in the relationship.
44
- Each relationship should also have a type (type), and additional properties if applicable.
49
+ A Relationship object should have a subject (subj) and an object (obj) which
50
+ are Node objects representing the entities involved in the relationship.
51
+ Each relationship should also have a type (type), and additional properties if
52
+ applicable.
45
53
 
46
54
  Output Formatting:
47
- The extracted nodes and relationships should be formatted as instances of the provided Node and Relationship classes.
55
+ The extracted nodes and relationships should be formatted as instances of the
56
+ provided Node and Relationship classes.
48
57
  Ensure that the extracted data adheres to the structure defined by the classes.
49
- Output the structured data in a format that can be easily validated against the provided code.
58
+ Output the structured data in a format that can be easily validated against
59
+ the provided code.
50
60
 
51
61
  Instructions for you:
52
62
  Read the provided content thoroughly.
53
- Identify distinct entities mentioned in the content and categorize them as nodes.
54
- Determine relationships between these entities and represent them as directed relationships.
63
+ Identify distinct entities mentioned in the content and categorize them as
64
+ nodes.
65
+ Determine relationships between these entities and represent them as directed
66
+ relationships.
55
67
  Provide the extracted nodes and relationships in the specified format below.
56
68
  Example for you:
57
69
 
58
70
  Example Content:
59
- "John works at XYZ Corporation. He is a software engineer. The company is located in New York City."
71
+ "John works at XYZ Corporation. He is a software engineer. The company is
72
+ located in New York City."
60
73
 
61
74
  Expected Output:
62
75
 
@@ -68,18 +81,23 @@ Node(id='New York City', type='Location', properties={'agent_generated'})
68
81
 
69
82
  Relationships:
70
83
 
71
- Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ Corporation', type='Organization'), type='WorksAt', properties={'agent_generated'})
72
- Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City', type='Location'), type='ResidesIn', properties={'agent_generated'})
84
+ Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ
85
+ Corporation', type='Organization'), type='WorksAt', properties=
86
+ {'agent_generated'})
87
+ Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City',
88
+ type='Location'), type='ResidesIn', properties={'agent_generated'})
73
89
 
74
90
  ===== TASK =====
75
- Please extracts nodes and relationships from given content and structures them into Node and Relationship objects.
91
+ Please extracts nodes and relationships from given content and structures them
92
+ into Node and Relationship objects.
76
93
 
77
94
  {task}
78
95
  """
79
96
 
80
97
 
81
98
  class KnowledgeGraphAgent(ChatAgent):
82
- r"""An agent that can extract node and relationship information for different entities from given `Element` content.
99
+ r"""An agent that can extract node and relationship information for
100
+ different entities from given `Element` content.
83
101
 
84
102
  Attributes:
85
103
  task_prompt (TextPrompt): A prompt for the agent to extract node and
@@ -88,16 +106,14 @@ class KnowledgeGraphAgent(ChatAgent):
88
106
 
89
107
  def __init__(
90
108
  self,
91
- model_type: ModelType = ModelType.GPT_3_5_TURBO,
92
- model_config: Optional[Any] = None,
109
+ model: Optional[BaseModelBackend] = None,
93
110
  ) -> None:
94
111
  r"""Initialize the `KnowledgeGraphAgent`.
95
112
 
96
113
  Args:
97
- model_type (ModelType, optional): The type of model to use for the
98
- agent. Defaults to `ModelType.GPT_3_5_TURBO`.
99
- model_config (Any, optional): The configuration for the model.
100
- Defaults to `None`.
114
+ model (BaseModelBackend, optional): The model backend to use for
115
+ generating responses. (default: :obj:`OpenAIModel` with
116
+ `GPT_3_5_TURBO`)
101
117
  """
102
118
  system_message = BaseMessage(
103
119
  role_name="Graphify",
@@ -106,9 +122,10 @@ class KnowledgeGraphAgent(ChatAgent):
106
122
  content="Your mission is to transform unstructured content "
107
123
  "intostructured graph data. Extract nodes and relationships with "
108
124
  "precision, and let the connections unfold. Your graphs will "
109
- "illuminate the hidden connections within the chaos of information.",
125
+ "illuminate the hidden connections within the chaos of "
126
+ "information.",
110
127
  )
111
- super().__init__(system_message, model_type, model_config)
128
+ super().__init__(system_message, model=model)
112
129
 
113
130
  def run(
114
131
  self,
@@ -124,7 +141,8 @@ class KnowledgeGraphAgent(ChatAgent):
124
141
 
125
142
  Returns:
126
143
  Union[str, GraphElement]: The extracted node and relationship
127
- information. If `parse_graph_elements` is `True` then return `GraphElement`, else return `str`.
144
+ information. If `parse_graph_elements` is `True` then return
145
+ `GraphElement`, else return `str`.
128
146
  """
129
147
  self.reset()
130
148
  self.element = element
@@ -191,7 +209,11 @@ class KnowledgeGraphAgent(ChatAgent):
191
209
 
192
210
  # Regular expressions to extract nodes and relationships
193
211
  node_pattern = r"Node\(id='(.*?)', type='(.*?)', properties=(.*?)\)"
194
- rel_pattern = r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)', properties=\{(.*?)\}\)"
212
+ rel_pattern = (
213
+ r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), "
214
+ r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)', "
215
+ r"properties=\{(.*?)\}\)"
216
+ )
195
217
 
196
218
  nodes = {}
197
219
  relationships = []
@@ -12,31 +12,31 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import re
15
- from typing import Any, Dict, Optional, Union
15
+ from typing import Dict, Optional, Union
16
16
 
17
17
  from camel.agents.chat_agent import ChatAgent
18
18
  from camel.messages import BaseMessage
19
+ from camel.models import BaseModelBackend
19
20
  from camel.prompts import TextPrompt
20
- from camel.types import ModelType, RoleType
21
+ from camel.types import RoleType
21
22
 
22
23
 
23
24
  class RoleAssignmentAgent(ChatAgent):
24
25
  r"""An agent that generates role names based on the task prompt.
26
+
27
+ Args:
28
+ model (BaseModelBackend, optional): The model backend to use for
29
+ generating responses. (default: :obj:`OpenAIModel` with
30
+ `GPT_3_5_TURBO`)
31
+
25
32
  Attributes:
26
33
  role_assignment_prompt (TextPrompt): A prompt for the agent to generate
27
34
  role names.
28
-
29
- Args:
30
- model_type (ModelType, optional): The type of model to use for the
31
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
32
- model_config (Any, optional): The configuration for the model.
33
- (default: :obj:`None`)
34
35
  """
35
36
 
36
37
  def __init__(
37
38
  self,
38
- model_type: ModelType = ModelType.GPT_3_5_TURBO,
39
- model_config: Optional[Any] = None,
39
+ model: Optional[BaseModelBackend] = None,
40
40
  ) -> None:
41
41
  system_message = BaseMessage(
42
42
  role_name="Role Assigner",
@@ -44,7 +44,7 @@ class RoleAssignmentAgent(ChatAgent):
44
44
  meta_dict=None,
45
45
  content="You assign roles based on tasks.",
46
46
  )
47
- super().__init__(system_message, model_type, model_config)
47
+ super().__init__(system_message, model=model)
48
48
 
49
49
  def run(
50
50
  self,
@@ -115,7 +115,9 @@ class RoleAssignmentAgent(ChatAgent):
115
115
  ]
116
116
 
117
117
  if len(role_names) != num_roles or len(role_descriptions) != num_roles:
118
- raise RuntimeError("Got None or insufficient information of roles.")
118
+ raise RuntimeError(
119
+ "Got None or insufficient information of roles."
120
+ )
119
121
  if terminated:
120
122
  raise RuntimeError("Role assignment failed.")
121
123
 
@@ -0,0 +1,122 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import Optional
15
+
16
+ from camel.agents.chat_agent import ChatAgent
17
+ from camel.messages import BaseMessage
18
+ from camel.models import BaseModelBackend
19
+ from camel.prompts import TextPrompt
20
+ from camel.types import RoleType
21
+ from camel.utils import create_chunks
22
+
23
+
24
+ class SearchAgent(ChatAgent):
25
+ r"""An agent that summarizes text based on a query and evaluates the
26
+ relevance of an answer.
27
+
28
+ Args:
29
+ model_type (ModelType, optional): The type of model to use for the
30
+ agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
31
+ model_config (Any, optional): The configuration for the model.
32
+ (default: :obj:`None`)
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ model: Optional[BaseModelBackend] = None,
38
+ ) -> None:
39
+ system_message = BaseMessage(
40
+ role_name="Assistant",
41
+ role_type=RoleType.ASSISTANT,
42
+ meta_dict=None,
43
+ content="You are a helpful assistant.",
44
+ )
45
+ super().__init__(system_message, model=model)
46
+
47
+ def summarize_text(self, text: str, query: str) -> str:
48
+ r"""Summarize the information from the text, base on the query.
49
+
50
+ Args:
51
+ text (str): Text to summarize.
52
+ query (str): What information you want.
53
+
54
+ Returns:
55
+ str: Strings with information.
56
+ """
57
+ self.reset()
58
+
59
+ summary_prompt = TextPrompt(
60
+ '''Gather information from this text that relative to the
61
+ question, but do not directly answer the question.\nquestion:
62
+ {query}\ntext '''
63
+ )
64
+ summary_prompt = summary_prompt.format(query=query)
65
+ # Max length of each chunk
66
+ max_len = 3000
67
+ results = ""
68
+ chunks = create_chunks(text, max_len)
69
+ # Summarize
70
+ for i, chunk in enumerate(chunks, start=1):
71
+ prompt = summary_prompt + str(i) + ": " + chunk
72
+ user_msg = BaseMessage.make_user_message(
73
+ role_name="User",
74
+ content=prompt,
75
+ )
76
+ result = self.step(user_msg).msg.content
77
+ results += result + "\n"
78
+
79
+ # Final summarise
80
+ final_prompt = TextPrompt(
81
+ '''Here are some summarized texts which split from one text. Using
82
+ the information to answer the question. If can't find the answer,
83
+ you must answer "I can not find the answer to the query" and
84
+ explain why.\n Query:\n{query}.\n\nText:\n'''
85
+ )
86
+ final_prompt = final_prompt.format(query=query)
87
+ prompt = final_prompt + results
88
+
89
+ user_msg = BaseMessage.make_user_message(
90
+ role_name="User",
91
+ content=prompt,
92
+ )
93
+ response = self.step(user_msg).msg.content
94
+
95
+ return response
96
+
97
+ def continue_search(self, query: str, answer: str) -> bool:
98
+ r"""Ask whether to continue search or not based on the provided answer.
99
+
100
+ Args:
101
+ query (str): The question.
102
+ answer (str): The answer to the question.
103
+
104
+ Returns:
105
+ bool: `True` if the user want to continue search, `False`
106
+ otherwise.
107
+ """
108
+ prompt = TextPrompt(
109
+ "Do you think the ANSWER can answer the QUERY? "
110
+ "Use only 'yes' or 'no' to answer.\n"
111
+ "===== QUERY =====\n{query}\n\n"
112
+ "===== ANSWER =====\n{answer}"
113
+ )
114
+ prompt = prompt.format(query=query, answer=answer)
115
+ user_msg = BaseMessage.make_user_message(
116
+ role_name="User",
117
+ content=prompt,
118
+ )
119
+ response = self.step(user_msg).msg.content
120
+ if "yes" in str(response).lower():
121
+ return False
122
+ return True
@@ -14,10 +14,10 @@
14
14
  from typing import Any, Dict, List, Optional, Union
15
15
 
16
16
  from camel.agents.chat_agent import ChatAgent
17
- from camel.configs import ChatGPTConfig
18
17
  from camel.messages import BaseMessage
18
+ from camel.models import BaseModelBackend
19
19
  from camel.prompts import PromptTemplateGenerator, TextPrompt
20
- from camel.types import ModelType, RoleType, TaskType
20
+ from camel.types import RoleType, TaskType
21
21
  from camel.utils import get_task_list
22
22
 
23
23
 
@@ -30,27 +30,25 @@ class TaskSpecifyAgent(ChatAgent):
30
30
  task_specify_prompt (TextPrompt): The prompt for specifying the task.
31
31
 
32
32
  Args:
33
- model_type (ModelType, optional): The type of model to use for the
34
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
33
+ model (BaseModelBackend, optional): The model backend to use for
34
+ generating responses. (default: :obj:`OpenAIModel` with
35
+ `GPT_3_5_TURBO`)
35
36
  task_type (TaskType, optional): The type of task for which to generate
36
37
  a prompt. (default: :obj:`TaskType.AI_SOCIETY`)
37
- model_config (Any, optional): The configuration for the model.
38
- (default: :obj:`None`)
39
38
  task_specify_prompt (Union[str, TextPrompt], optional): The prompt for
40
39
  specifying the task. (default: :obj:`None`)
41
40
  word_limit (int, optional): The word limit for the task prompt.
42
41
  (default: :obj:`50`)
43
42
  output_language (str, optional): The language to be output by the
44
- agent. (default: :obj:`None`)
43
+ agent. (default: :obj:`None`)
45
44
  """
46
45
 
47
46
  DEFAULT_WORD_LIMIT = 50
48
47
 
49
48
  def __init__(
50
49
  self,
51
- model_type: Optional[ModelType] = None,
50
+ model: Optional[BaseModelBackend] = None,
52
51
  task_type: TaskType = TaskType.AI_SOCIETY,
53
- model_config: Optional[Any] = None,
54
52
  task_specify_prompt: Optional[Union[str, TextPrompt]] = None,
55
53
  word_limit: int = DEFAULT_WORD_LIMIT,
56
54
  output_language: Optional[str] = None,
@@ -67,8 +65,6 @@ class TaskSpecifyAgent(ChatAgent):
67
65
  else:
68
66
  self.task_specify_prompt = TextPrompt(task_specify_prompt)
69
67
 
70
- model_config = model_config or ChatGPTConfig(temperature=1.0)
71
-
72
68
  system_message = BaseMessage(
73
69
  role_name="Task Specifier",
74
70
  role_type=RoleType.ASSISTANT,
@@ -78,8 +74,7 @@ class TaskSpecifyAgent(ChatAgent):
78
74
 
79
75
  super().__init__(
80
76
  system_message,
81
- model_type=model_type,
82
- model_config=model_config,
77
+ model=model,
83
78
  output_language=output_language,
84
79
  )
85
80
 
@@ -130,18 +125,16 @@ class TaskPlannerAgent(ChatAgent):
130
125
  the task into subtasks.
131
126
 
132
127
  Args:
133
- model_type (ModelType, optional): The type of model to use for the
134
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
135
- model_config (Any, optional): The configuration for the model.
136
- (default: :obj:`None`)
128
+ model (BaseModelBackend, optional): The model backend to use for
129
+ generating responses. (default: :obj:`OpenAIModel` with
130
+ `GPT_3_5_TURBO`)
137
131
  output_language (str, optional): The language to be output by the
138
- agent. (default: :obj:`None`)
132
+ agent. (default: :obj:`None`)
139
133
  """
140
134
 
141
135
  def __init__(
142
136
  self,
143
- model_type: Optional[ModelType] = None,
144
- model_config: Optional[Any] = None,
137
+ model: Optional[BaseModelBackend] = None,
145
138
  output_language: Optional[str] = None,
146
139
  ) -> None:
147
140
  self.task_planner_prompt = TextPrompt(
@@ -156,8 +149,7 @@ class TaskPlannerAgent(ChatAgent):
156
149
 
157
150
  super().__init__(
158
151
  system_message,
159
- model_type,
160
- model_config,
152
+ model=model,
161
153
  output_language=output_language,
162
154
  )
163
155
 
@@ -208,10 +200,9 @@ class TaskCreationAgent(ChatAgent):
208
200
  role_name (str): The role name of the Agent to create the task.
209
201
  objective (Union[str, TextPrompt]): The objective of the Agent to
210
202
  perform the task.
211
- model_type (ModelType, optional): The type of model to use for the
212
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
213
- model_config (Any, optional): The configuration for the model.
214
- (default: :obj:`None`)
203
+ model (BaseModelBackend, optional): The LLM backend to use for
204
+ generating responses. (default: :obj:`OpenAIModel` with
205
+ `GPT_3_5_TURBO`)
215
206
  output_language (str, optional): The language to be output by the
216
207
  agent. (default: :obj:`None`)
217
208
  message_window_size (int, optional): The maximum number of previous
@@ -225,8 +216,7 @@ class TaskCreationAgent(ChatAgent):
225
216
  self,
226
217
  role_name: str,
227
218
  objective: Union[str, TextPrompt],
228
- model_type: Optional[ModelType] = None,
229
- model_config: Optional[Any] = None,
219
+ model: Optional[BaseModelBackend] = None,
230
220
  output_language: Optional[str] = None,
231
221
  message_window_size: Optional[int] = None,
232
222
  max_task_num: Optional[int] = 3,
@@ -266,8 +256,7 @@ Be concrete.
266
256
 
267
257
  super().__init__(
268
258
  system_message,
269
- model_type,
270
- model_config,
259
+ model=model,
271
260
  output_language=output_language,
272
261
  message_window_size=message_window_size,
273
262
  )
@@ -282,6 +271,7 @@ Be concrete.
282
271
  Args:
283
272
  task_list (List[str]): The completed or in-progress
284
273
  tasks which should not overlap with new created tasks.
274
+
285
275
  Returns:
286
276
  List[str]: The new task list generated by the Agent.
287
277
  """
@@ -321,10 +311,9 @@ class TaskPrioritizationAgent(ChatAgent):
321
311
  Args:
322
312
  objective (Union[str, TextPrompt]): The objective of the Agent to
323
313
  perform the task.
324
- model_type (ModelType, optional): The type of model to use for the
325
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
326
- model_config (Any, optional): The configuration for the model.
327
- (default: :obj:`None`)
314
+ model (BaseModelBackend, optional): The LLM backend to use for
315
+ generating responses. (default: :obj:`OpenAIModel` with
316
+ `GPT_3_5_TURBO`)
328
317
  output_language (str, optional): The language to be output by the
329
318
  agent. (default: :obj:`None`)
330
319
  message_window_size (int, optional): The maximum number of previous
@@ -335,8 +324,7 @@ class TaskPrioritizationAgent(ChatAgent):
335
324
  def __init__(
336
325
  self,
337
326
  objective: Union[str, TextPrompt],
338
- model_type: Optional[ModelType] = None,
339
- model_config: Optional[Any] = None,
327
+ model: Optional[BaseModelBackend] = None,
340
328
  output_language: Optional[str] = None,
341
329
  message_window_size: Optional[int] = None,
342
330
  ) -> None:
@@ -372,8 +360,7 @@ with any other output."""
372
360
 
373
361
  super().__init__(
374
362
  system_message,
375
- model_type,
376
- model_config,
363
+ model=model,
377
364
  output_language=output_language,
378
365
  message_window_size=message_window_size,
379
366
  )
@@ -386,6 +373,7 @@ with any other output."""
386
373
 
387
374
  Args:
388
375
  task_list (List[str]): The unprioritized tasks of agent.
376
+
389
377
  Returns:
390
378
  List[str]: The new prioritized task list generated by the Agent.
391
379
  """
camel/bots/__init__.py ADDED
@@ -0,0 +1,20 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from .discord_bot import DiscordBot
15
+ from .telegram_bot import TelegramBot
16
+
17
+ __all__ = [
18
+ 'DiscordBot',
19
+ 'TelegramBot',
20
+ ]