camel-ai 0.1.9__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.9'
15
+ __version__ = '0.2.1'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -181,6 +181,16 @@ class ChatAgent(BaseAgent):
181
181
  tool.get_function_name(): tool.func for tool in all_tools
182
182
  }
183
183
 
184
+ # If the user hasn't configured tools in `BaseModelBackend`,
185
+ # the tools set from `ChatAgent` will be used.
186
+ # This design simplifies the interface while retaining tool-running
187
+ # capabilities for `BaseModelBackend`.
188
+ if all_tools and not self.model_backend.model_config_dict['tools']:
189
+ tool_schema_list = [
190
+ tool.get_openai_tool_schema() for tool in all_tools
191
+ ]
192
+ self.model_backend.model_config_dict['tools'] = tool_schema_list
193
+
184
194
  self.model_config_dict = self.model_backend.model_config_dict
185
195
 
186
196
  self.model_token_limit = token_limit or self.model_backend.token_limit
@@ -612,9 +622,11 @@ class ChatAgent(BaseAgent):
612
622
 
613
623
  # Replace the original tools with the structuring function
614
624
  self.func_dict = {func.get_function_name(): func.func}
625
+ self.model_backend.model_config_dict = original_model_dict.copy()
615
626
  self.model_backend.model_config_dict["tools"] = [
616
627
  func.get_openai_tool_schema()
617
628
  ]
629
+ self.model_backend.model_config_dict["tool_choice"] = "required"
618
630
 
619
631
  openai_messages, num_tokens = self.memory.get_context()
620
632
  (
@@ -49,7 +49,6 @@ class Firecrawl:
49
49
  self,
50
50
  url: str,
51
51
  params: Optional[Dict[str, Any]] = None,
52
- wait_until_done: bool = True,
53
52
  **kwargs: Any,
54
53
  ) -> Any:
55
54
  r"""Crawl a URL and all accessible subpages. Customize the crawl by
@@ -60,14 +59,12 @@ class Firecrawl:
60
59
  url (str): The URL to crawl.
61
60
  params (Optional[Dict[str, Any]]): Additional parameters for the
62
61
  crawl request. Defaults to `None`.
63
- wait_until_done (bool): Whether to wait until the crawl job is
64
- completed. Defaults to `True`.
65
62
  **kwargs (Any): Additional keyword arguments, such as
66
- `poll_interval`, `idempotency_key`, etc.
63
+ `poll_interval`, `idempotency_key`.
67
64
 
68
65
  Returns:
69
- Any: The list content of the URL if `wait_until_done` is True;
70
- otherwise, a string job ID.
66
+ Any: The crawl job ID or the crawl results if waiting until
67
+ completion.
71
68
 
72
69
  Raises:
73
70
  RuntimeError: If the crawling process fails.
@@ -78,13 +75,8 @@ class Firecrawl:
78
75
  url=url,
79
76
  params=params,
80
77
  **kwargs,
81
- wait_until_done=wait_until_done,
82
- )
83
- return (
84
- crawl_response
85
- if wait_until_done
86
- else crawl_response.get("jobId")
87
78
  )
79
+ return crawl_response
88
80
  except Exception as e:
89
81
  raise RuntimeError(f"Failed to crawl the URL: {e}")
90
82
 
@@ -103,7 +95,10 @@ class Firecrawl:
103
95
  """
104
96
 
105
97
  try:
106
- crawl_result = self.app.crawl_url(url=url)
98
+ crawl_result = self.app.crawl_url(
99
+ url,
100
+ {'formats': ['markdown']},
101
+ )
107
102
  if not isinstance(crawl_result, list):
108
103
  raise ValueError("Unexpected response format")
109
104
  markdown_contents = [
@@ -180,41 +175,14 @@ class Firecrawl:
180
175
  data = self.app.scrape_url(
181
176
  url,
182
177
  {
183
- 'extractorOptions': {
184
- "mode": "llm-extraction",
185
- "extractionPrompt": "Based on the information on "
186
- "the page, extract the information from the schema.",
187
- 'extractionSchema': output_schema.model_json_schema(),
188
- },
189
- 'pageOptions': {'onlyMainContent': True},
178
+ 'formats': ['extract'],
179
+ 'extract': {'schema': output_schema.model_json_schema()},
190
180
  },
191
181
  )
192
- return data.get("llm_extraction", {})
182
+ return data.get("extract", {})
193
183
  except Exception as e:
194
184
  raise RuntimeError(f"Failed to perform structured scrape: {e}")
195
185
 
196
- def tidy_scrape(self, url: str) -> str:
197
- r"""Only return the main content of the page, excluding headers,
198
- navigation bars, footers, etc. in Markdown format.
199
-
200
- Args:
201
- url (str): The URL to read.
202
-
203
- Returns:
204
- str: The markdown content of the URL.
205
-
206
- Raises:
207
- RuntimeError: If the scrape process fails.
208
- """
209
-
210
- try:
211
- scrape_result = self.app.scrape_url(
212
- url, {'pageOptions': {'onlyMainContent': True}}
213
- )
214
- return scrape_result.get("markdown", "")
215
- except Exception as e:
216
- raise RuntimeError(f"Failed to perform tidy scrape: {e}")
217
-
218
186
  def map_site(
219
187
  self, url: str, params: Optional[Dict[str, Any]] = None
220
188
  ) -> list:
@@ -93,7 +93,7 @@ class MistralModel(BaseModelBackend):
93
93
  "name": tool_call.function.name, # type: ignore[union-attr]
94
94
  "arguments": tool_call.function.arguments, # type: ignore[union-attr]
95
95
  },
96
- type=tool_call.TYPE, # type: ignore[union-attr]
96
+ type=tool_call.type, # type: ignore[union-attr]
97
97
  )
98
98
  for tool_call in response.choices[0].message.tool_calls
99
99
  ]
@@ -93,6 +93,22 @@ class OpenAIModel(BaseModelBackend):
93
93
  `ChatCompletion` in the non-stream mode, or
94
94
  `Stream[ChatCompletionChunk]` in the stream mode.
95
95
  """
96
+ # o1-preview and o1-mini have Beta limitations
97
+ # reference: https://platform.openai.com/docs/guides/reasoning
98
+ if self.model_type in [ModelType.O1_MINI, ModelType.O1_PREVIEW]:
99
+ # Remove system message that is not supported in o1 model.
100
+ messages = [msg for msg in messages if msg.get("role") != "system"]
101
+
102
+ # Remove unsupported parameters and reset the fixed parameters
103
+ del self.model_config_dict["stream"]
104
+ del self.model_config_dict["tools"]
105
+ del self.model_config_dict["tool_choice"]
106
+ self.model_config_dict["temperature"] = 1.0
107
+ self.model_config_dict["top_p"] = 1.0
108
+ self.model_config_dict["n"] = 1.0
109
+ self.model_config_dict["presence_penalty"] = 0.0
110
+ self.model_config_dict["frequency_penalty"] = 0.0
111
+
96
112
  response = self._client.chat.completions.create(
97
113
  messages=messages,
98
114
  model=self.model_type.value,
camel/tasks/task.py CHANGED
@@ -93,6 +93,10 @@ class Task(BaseModel):
93
93
 
94
94
  result: Optional[str] = ""
95
95
 
96
+ failure_count: int = 0
97
+
98
+ additional_info: Optional[str] = None
99
+
96
100
  @classmethod
97
101
  def from_message(cls, message: BaseMessage) -> "Task":
98
102
  r"""Create a task from a message.
@@ -193,7 +197,7 @@ class Task(BaseModel):
193
197
  def decompose(
194
198
  self,
195
199
  agent: ChatAgent,
196
- template: TextPrompt = TASK_DECOMPOSE_PROMPT,
200
+ prompt: Optional[str] = None,
197
201
  task_parser: Callable[[str, str], List["Task"]] = parse_response,
198
202
  ) -> List["Task"]:
199
203
  r"""Decompose a task to a list of sub-tasks. It can be used for data
@@ -201,8 +205,8 @@ class Task(BaseModel):
201
205
 
202
206
  Args:
203
207
  agent (ChatAgent): An agent that used to decompose the task.
204
- template (TextPrompt): The prompt template to decompose
205
- task. If not provided, the default template will be used.
208
+ prompt (str, optional): A prompt to decompose the task. If not
209
+ provided, the default prompt will be used.
206
210
  task_parser (Callable[[str, str], List[Task]], optional): A
207
211
  function to extract Task from response. If not provided,
208
212
  the default parse_response will be used.
@@ -212,7 +216,7 @@ class Task(BaseModel):
212
216
  """
213
217
 
214
218
  role_name = agent.role_name
215
- content = template.format(
219
+ content = prompt or TASK_DECOMPOSE_PROMPT.format(
216
220
  role_name=role_name,
217
221
  content=self.content,
218
222
  )
@@ -221,6 +225,8 @@ class Task(BaseModel):
221
225
  )
222
226
  response = agent.step(msg)
223
227
  tasks = task_parser(response.msg.content, self.id)
228
+ for task in tasks:
229
+ task.additional_info = self.additional_info
224
230
  return tasks
225
231
 
226
232
  def compose(
@@ -248,6 +254,7 @@ class Task(BaseModel):
248
254
  content = template.format(
249
255
  role_name=role_name,
250
256
  content=self.content,
257
+ additional_info=self.additional_info,
251
258
  other_results=sub_tasks_result,
252
259
  )
253
260
  msg = BaseMessage.make_user_message(
@@ -39,6 +39,10 @@ The root task is:
39
39
 
40
40
  {content}
41
41
 
42
+ The additional information of the task is:
43
+
44
+ {additional_info}
45
+
42
46
  The related tasks result and status:
43
47
 
44
48
  {other_results}
@@ -19,19 +19,18 @@ from .openai_function import (
19
19
  )
20
20
  from .open_api_specs.security_config import openapi_security_config
21
21
 
22
- from .google_maps_toolkit import MAP_FUNCS, GoogleMapsToolkit
23
- from .math_toolkit import MATH_FUNCS, MathToolkit
24
- from .open_api_toolkit import OPENAPI_FUNCS, OpenAPIToolkit
25
- from .retrieval_toolkit import RETRIEVAL_FUNCS, RetrievalToolkit
26
- from .search_toolkit import SEARCH_FUNCS, SearchToolkit
27
- from .twitter_toolkit import TWITTER_FUNCS, TwitterToolkit
28
- from .weather_toolkit import WEATHER_FUNCS, WeatherToolkit
29
- from .slack_toolkit import SLACK_FUNCS, SlackToolkit
30
- from .dalle_toolkit import DALLE_FUNCS, DalleToolkit
31
- from .linkedin_toolkit import LINKEDIN_FUNCS, LinkedInToolkit
32
- from .reddit_toolkit import REDDIT_FUNCS, RedditToolkit
22
+ from .google_maps_toolkit import GoogleMapsToolkit
23
+ from .math_toolkit import MathToolkit, MATH_FUNCS
24
+ from .open_api_toolkit import OpenAPIToolkit
25
+ from .retrieval_toolkit import RetrievalToolkit
26
+ from .search_toolkit import SearchToolkit, SEARCH_FUNCS
27
+ from .twitter_toolkit import TwitterToolkit
28
+ from .weather_toolkit import WeatherToolkit, WEATHER_FUNCS
29
+ from .slack_toolkit import SlackToolkit
30
+ from .dalle_toolkit import DalleToolkit, DALLE_FUNCS
31
+ from .linkedin_toolkit import LinkedInToolkit
32
+ from .reddit_toolkit import RedditToolkit
33
33
 
34
- from .base import BaseToolkit
35
34
  from .code_execution import CodeExecutionToolkit
36
35
  from .github_toolkit import GithubToolkit
37
36
 
@@ -40,18 +39,6 @@ __all__ = [
40
39
  'get_openai_function_schema',
41
40
  'get_openai_tool_schema',
42
41
  'openapi_security_config',
43
- 'MATH_FUNCS',
44
- 'MAP_FUNCS',
45
- 'OPENAPI_FUNCS',
46
- 'RETRIEVAL_FUNCS',
47
- 'SEARCH_FUNCS',
48
- 'TWITTER_FUNCS',
49
- 'WEATHER_FUNCS',
50
- 'SLACK_FUNCS',
51
- 'DALLE_FUNCS',
52
- 'LINKEDIN_FUNCS',
53
- 'REDDIT_FUNCS',
54
- 'BaseToolkit',
55
42
  'GithubToolkit',
56
43
  'MathToolkit',
57
44
  'GoogleMapsToolkit',
@@ -65,4 +52,8 @@ __all__ = [
65
52
  'LinkedInToolkit',
66
53
  'RedditToolkit',
67
54
  'CodeExecutionToolkit',
55
+ 'MATH_FUNCS',
56
+ 'SEARCH_FUNCS',
57
+ 'WEATHER_FUNCS',
58
+ 'DALLE_FUNCS',
68
59
  ]
@@ -142,10 +142,6 @@ class GoogleMapsToolkit(BaseToolkit):
142
142
  information on address completion, formatted address,
143
143
  geographical coordinates (latitude and longitude), and metadata
144
144
  types true for the address.
145
-
146
- Raises:
147
- ImportError: If the `googlemaps` library is not installed.
148
- Exception: For unexpected errors during the address validation.
149
145
  """
150
146
  addressvalidation_result = self.gmaps.addressvalidation(
151
147
  [address],
@@ -304,6 +300,3 @@ class GoogleMapsToolkit(BaseToolkit):
304
300
  OpenAIFunction(self.get_elevation),
305
301
  OpenAIFunction(self.get_timezone),
306
302
  ]
307
-
308
-
309
- MAP_FUNCS: List[OpenAIFunction] = GoogleMapsToolkit().get_tools()
@@ -225,6 +225,3 @@ class LinkedInToolkit(BaseToolkit):
225
225
  if not token:
226
226
  return "Access token not found. Please set LINKEDIN_ACCESS_TOKEN."
227
227
  return token
228
-
229
-
230
- LINKEDIN_FUNCS: List[OpenAIFunction] = LinkedInToolkit().get_tools()
@@ -542,6 +542,3 @@ class OpenAPIToolkit:
542
542
  OpenAIFunction(a_func, a_schema)
543
543
  for a_func, a_schema in zip(all_funcs_lst, all_schemas_lst)
544
544
  ]
545
-
546
-
547
- OPENAPI_FUNCS: List[OpenAIFunction] = OpenAPIToolkit().get_tools()
@@ -232,6 +232,3 @@ class RedditToolkit(BaseToolkit):
232
232
  OpenAIFunction(self.perform_sentiment_analysis),
233
233
  OpenAIFunction(self.track_keyword_discussions),
234
234
  ]
235
-
236
-
237
- REDDIT_FUNCS: List[OpenAIFunction] = RedditToolkit().get_tools()
@@ -86,7 +86,3 @@ class RetrievalToolkit(BaseToolkit):
86
86
  return [
87
87
  OpenAIFunction(self.information_retrieval),
88
88
  ]
89
-
90
-
91
- # add the function to OpenAIFunction list
92
- RETRIEVAL_FUNCS: List[OpenAIFunction] = RetrievalToolkit().get_tools()
@@ -303,6 +303,3 @@ class SlackToolkit(BaseToolkit):
303
303
  OpenAIFunction(self.send_slack_message),
304
304
  OpenAIFunction(self.delete_slack_message),
305
305
  ]
306
-
307
-
308
- SLACK_FUNCS: List[OpenAIFunction] = SlackToolkit().get_tools()
@@ -410,7 +410,7 @@ class TwitterToolkit(BaseToolkit):
410
410
  return TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET
411
411
 
412
412
  def _get_oauth_session(self) -> requests.Session:
413
- r'''Initiates an OAuth1Session with Twitter's API and returns it.
413
+ r"""Initiates an OAuth1Session with Twitter's API and returns it.
414
414
 
415
415
  The function first fetches a request token, then prompts the user to
416
416
  authorize the application. After the user has authorized the
@@ -431,7 +431,7 @@ class TwitterToolkit(BaseToolkit):
431
431
  Manage-Tweets/create_tweet.py
432
432
  https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/main/
433
433
  User-Lookup/get_users_me_user_context.py
434
- '''
434
+ """
435
435
  try:
436
436
  from requests_oauthlib import OAuth1Session
437
437
  except ImportError:
@@ -517,6 +517,3 @@ class TwitterToolkit(BaseToolkit):
517
517
  return "HTTP Exception"
518
518
  else:
519
519
  return "Unexpected Exception"
520
-
521
-
522
- TWITTER_FUNCS: List[OpenAIFunction] = TwitterToolkit().get_tools()
camel/utils/commons.py CHANGED
@@ -381,10 +381,17 @@ def json_to_function_code(json_obj: Dict) -> str:
381
381
  docstring_args = []
382
382
  return_keys = []
383
383
 
384
+ prop_to_python = {
385
+ 'string': 'str',
386
+ 'number': 'float',
387
+ 'integer': 'int',
388
+ 'boolean': 'bool',
389
+ }
390
+
384
391
  for prop in required:
385
392
  description = properties[prop]['description']
386
393
  prop_type = properties[prop]['type']
387
- python_type = 'str' if prop_type == 'string' else prop_type
394
+ python_type = prop_to_python.get(prop_type, prop_type)
388
395
  args.append(f"{prop}: {python_type}")
389
396
  docstring_args.append(
390
397
  f" {prop} ({python_type}): {description}."
@@ -193,8 +193,14 @@ def get_model_encoding(value_for_tiktoken: str):
193
193
  try:
194
194
  encoding = tiktoken.encoding_for_model(value_for_tiktoken)
195
195
  except KeyError:
196
- print("Model not found. Using cl100k_base encoding.")
197
- encoding = tiktoken.get_encoding("cl100k_base")
196
+ if value_for_tiktoken in [
197
+ ModelType.O1_MINI.value,
198
+ ModelType.O1_PREVIEW.value,
199
+ ]:
200
+ encoding = tiktoken.get_encoding("o200k_base")
201
+ else:
202
+ print("Model not found. Using cl100k_base encoding.")
203
+ encoding = tiktoken.get_encoding("cl100k_base")
198
204
  return encoding
199
205
 
200
206
 
@@ -12,12 +12,12 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- from .base import BaseNode
16
- from .manager_node import ManagerNode
17
- from .worker_node import WorkerNode
15
+ from .role_playing_worker import RolePlayingWorker
16
+ from .single_agent_worker import SingleAgentWorker
17
+ from .workforce import Workforce
18
18
 
19
19
  __all__ = [
20
- "BaseNode",
21
- "WorkerNode",
22
- "ManagerNode",
20
+ "Workforce",
21
+ "SingleAgentWorker",
22
+ "RolePlayingWorker",
23
23
  ]
camel/workforce/base.py CHANGED
@@ -15,36 +15,40 @@ from abc import ABC, abstractmethod
15
15
  from typing import Any
16
16
 
17
17
  from camel.workforce.task_channel import TaskChannel
18
+ from camel.workforce.utils import check_if_running
18
19
 
19
20
 
20
21
  class BaseNode(ABC):
21
22
  def __init__(self, description: str) -> None:
22
23
  self.node_id = str(id(self))
23
24
  self.description = description
24
- # every node is initialized to use its own channel
25
25
  self._channel: TaskChannel = TaskChannel()
26
26
  self._running = False
27
27
 
28
+ @check_if_running(False)
28
29
  def reset(self, *args: Any, **kwargs: Any) -> Any:
29
30
  """Resets the node to its initial state."""
30
- raise NotImplementedError()
31
+ self._channel = TaskChannel()
32
+ self._running = False
31
33
 
32
34
  @abstractmethod
33
35
  def set_channel(self, channel: TaskChannel):
34
36
  r"""Sets the channel for the node."""
37
+ pass
35
38
 
36
39
  @abstractmethod
37
40
  async def _listen_to_channel(self):
38
41
  r"""Listens to the channel and handle tasks. This method should be
39
42
  the main loop for the node.
40
43
  """
44
+ pass
41
45
 
42
46
  @abstractmethod
43
47
  async def start(self):
44
48
  r"""Start the node."""
49
+ pass
45
50
 
46
51
  @abstractmethod
47
52
  def stop(self):
48
- r"""
49
- Stop the node.
50
- """
53
+ r"""Stop the node."""
54
+ pass
@@ -0,0 +1,175 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from camel.prompts import TextPrompt
15
+
16
+ # ruff: noqa: E501
17
+ CREATE_NODE_PROMPT = TextPrompt(
18
+ """You need to use the given information to create a new worker node that contains a single agent for solving the category of tasks of the given one.
19
+ The content of the given task is:
20
+
21
+ ==============================
22
+ {content}
23
+ ==============================
24
+
25
+ Here are some additional information about the task:
26
+
27
+ ==============================
28
+ {additional_info}
29
+ ==============================
30
+
31
+ Following is the information of the existing worker nodes. The format is <ID>:<description>:<additional_info>.
32
+
33
+ ==============================
34
+ {child_nodes_info}
35
+ ==============================
36
+
37
+ You must return the following information:
38
+ 1. The role of the agent working in the worker node, e.g. "programmer", "researcher", "product owner".
39
+ 2. The system message that will be sent to the agent in the node.
40
+ 3. The description of the new worker node itself.
41
+
42
+ You should ensure that the node created is capable of solving all the tasks in the same category as the given one, don't make it too specific.
43
+ Also, there should be no big overlap between the new work node and the existing ones.
44
+ The information returned should be concise and clear.
45
+ """
46
+ )
47
+
48
+ ASSIGN_TASK_PROMPT = TextPrompt(
49
+ """You need to assign the task to a worker node.
50
+ The content of the task is:
51
+
52
+ ==============================
53
+ {content}
54
+ ==============================
55
+
56
+ Here are some additional information about the task:
57
+
58
+ ==============================
59
+ {additional_info}
60
+ ==============================
61
+
62
+ Following is the information of the existing worker nodes. The format is <ID>:<description>:<additional_info>.
63
+
64
+ ==============================
65
+ {child_nodes_info}
66
+ ==============================
67
+
68
+ You must return the ID of the worker node that you think is most capable of doing the task.
69
+ """
70
+ )
71
+
72
+ PROCESS_TASK_PROMPT = TextPrompt(
73
+ """You need to process one given task.
74
+ Here are results of some prerequisite tasks that you can refer to:
75
+
76
+ ==============================
77
+ {dependency_tasks_info}
78
+ ==============================
79
+
80
+ The content of the task that you need to do is:
81
+
82
+ ==============================
83
+ {content}
84
+ ==============================
85
+
86
+ Here are some additional information about the task:
87
+
88
+ ==============================
89
+ {additional_info}
90
+ ==============================
91
+
92
+ You are asked to return the result of the given task.
93
+ However, if you think you can't finish the task, you MUST set the fail flag and leave the result empty.
94
+ """
95
+ )
96
+
97
+
98
+ ROLEPLAY_PROCESS_TASK_PROMPT = TextPrompt(
99
+ """You need to process the task. It is recommended that tools be actively called when needed.
100
+ Here are results of some prerequisite tasks that you can refer to:
101
+
102
+ ==============================
103
+ {dependency_task_info}
104
+ ==============================
105
+
106
+ The content of the task that you need to do is:
107
+
108
+ ==============================
109
+ {content}
110
+ ==============================
111
+
112
+ Here are some additional information about the task:
113
+
114
+ ==============================
115
+ {additional_info}
116
+ ==============================
117
+
118
+ You must return the result of the given task.
119
+ """
120
+ )
121
+
122
+ ROLEPLAY_SUMMARIZE_PROMPT = TextPrompt(
123
+ """For this scenario, the roles of the user is {user_role} and role of the assistant is {assistant_role}.
124
+ Here is the content of the task they are trying to solve:
125
+
126
+ ==============================
127
+ {task_content}
128
+ ==============================
129
+
130
+ Here are some additional information about the task:
131
+
132
+ ==============================
133
+ {additional_info}
134
+ ==============================
135
+
136
+ Here is their chat history on the task:
137
+
138
+ ==============================
139
+ {chat_history}
140
+ ==============================
141
+
142
+ Now you should summarize the scenario and return the result of the task.
143
+ However, if you think they didn't finish the task, you MUST set the fail flag and leave the result empty.
144
+ """
145
+ )
146
+
147
+ WF_TASK_DECOMPOSE_PROMPT = r"""You need to split the given task into
148
+ subtasks according to the workers available in the group.
149
+ The content of the task is:
150
+
151
+ ==============================
152
+ {content}
153
+ ==============================
154
+
155
+ There are some additional information about the task:
156
+
157
+ ==============================
158
+ {additional_info}
159
+ ==============================
160
+
161
+ Following are the available workers, given in the format <ID>: <description>.
162
+
163
+ ==============================
164
+ {child_nodes_info}
165
+ ==============================
166
+
167
+ You must return the subtasks in the format of a numbered list within <tasks> tags, as shown below:
168
+
169
+ <tasks>
170
+ <task>Subtask 1</task>
171
+ <task>Subtask 2</task>
172
+ </tasks>
173
+
174
+ Though it's not a must, you should try your best effort to make each subtask achievable for a worker. The tasks should be clear and concise.
175
+ """