camel-ai 0.1.5.2__py3-none-any.whl → 0.1.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

@@ -24,8 +24,6 @@ if TYPE_CHECKING:
24
24
 
25
25
  from slack_sdk import WebClient
26
26
 
27
- from slack_sdk.errors import SlackApiError
28
-
29
27
  from camel.functions import OpenAIFunction
30
28
 
31
29
  logger = logging.getLogger(__name__)
@@ -89,6 +87,8 @@ def create_slack_channel(name: str, is_private: Optional[bool] = True) -> str:
89
87
  SlackApiError: If there is an error during get slack channel
90
88
  information.
91
89
  """
90
+ from slack_sdk.errors import SlackApiError
91
+
92
92
  try:
93
93
  slack_client = _login_slack()
94
94
  response = slack_client.conversations_create(
@@ -115,6 +115,8 @@ def join_slack_channel(channel_id: str) -> str:
115
115
  SlackApiError: If there is an error during get slack channel
116
116
  information.
117
117
  """
118
+ from slack_sdk.errors import SlackApiError
119
+
118
120
  try:
119
121
  slack_client = _login_slack()
120
122
  response = slack_client.conversations_join(channel=channel_id)
@@ -137,6 +139,8 @@ def leave_slack_channel(channel_id: str) -> str:
137
139
  SlackApiError: If there is an error during get slack channel
138
140
  information.
139
141
  """
142
+ from slack_sdk.errors import SlackApiError
143
+
140
144
  try:
141
145
  slack_client = _login_slack()
142
146
  response = slack_client.conversations_leave(channel=channel_id)
@@ -155,6 +159,8 @@ def get_slack_channel_information() -> str:
155
159
  SlackApiError: If there is an error during get slack channel
156
160
  information.
157
161
  """
162
+ from slack_sdk.errors import SlackApiError
163
+
158
164
  try:
159
165
  slack_client = _login_slack()
160
166
  response = slack_client.conversations_list()
@@ -189,6 +195,8 @@ def get_slack_channel_message(channel_id: str) -> str:
189
195
  Raises:
190
196
  SlackApiError: If there is an error during get slack channel message.
191
197
  """
198
+ from slack_sdk.errors import SlackApiError
199
+
192
200
  try:
193
201
  slack_client = _login_slack()
194
202
  result = slack_client.conversations_history(channel=channel_id)
@@ -222,6 +230,8 @@ def send_slack_message(
222
230
  Raises:
223
231
  SlackApiError: If an error occurs while sending the message.
224
232
  """
233
+ from slack_sdk.errors import SlackApiError
234
+
225
235
  try:
226
236
  slack_client = _login_slack()
227
237
  if user:
@@ -254,6 +264,8 @@ def delete_slack_message(
254
264
  Raises:
255
265
  SlackApiError: If an error occurs while sending the message.
256
266
  """
267
+ from slack_sdk.errors import SlackApiError
268
+
257
269
  try:
258
270
  slack_client = _login_slack()
259
271
  response = slack_client.chat_delete(channel=channel_id, ts=time_stamp)
camel/models/__init__.py CHANGED
@@ -15,6 +15,8 @@ from .anthropic_model import AnthropicModel
15
15
  from .base_model import BaseModelBackend
16
16
  from .litellm_model import LiteLLMModel
17
17
  from .model_factory import ModelFactory
18
+ from .nemotron_model import NemotronModel
19
+ from .ollama_model import OllamaModel
18
20
  from .open_source_model import OpenSourceModel
19
21
  from .openai_audio_models import OpenAIAudioModels
20
22
  from .openai_model import OpenAIModel
@@ -31,4 +33,6 @@ __all__ = [
31
33
  'ModelFactory',
32
34
  'LiteLLMModel',
33
35
  'OpenAIAudioModels',
36
+ 'NemotronModel',
37
+ 'OllamaModel',
34
38
  ]
@@ -35,6 +35,7 @@ class AnthropicModel(BaseModelBackend):
35
35
  model_type: ModelType,
36
36
  model_config_dict: Dict[str, Any],
37
37
  api_key: Optional[str] = None,
38
+ url: Optional[str] = None,
38
39
  ) -> None:
39
40
  r"""Constructor for Anthropic backend.
40
41
 
@@ -45,10 +46,11 @@ class AnthropicModel(BaseModelBackend):
45
46
  be fed into Anthropic.messages.create().
46
47
  api_key (Optional[str]): The API key for authenticating with the
47
48
  Anthropic service. (default: :obj:`None`)
49
+ url (Optional[str]): The url to the model service.
48
50
  """
49
- super().__init__(model_type, model_config_dict)
51
+ super().__init__(model_type, model_config_dict, api_key, url)
50
52
  self._api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
51
- self.client = Anthropic(api_key=self._api_key)
53
+ self.client = Anthropic(api_key=self._api_key, base_url=url)
52
54
  self._token_counter: Optional[BaseTokenCounter] = None
53
55
 
54
56
  def _convert_response_from_anthropic_to_openai(self, response):
@@ -31,6 +31,7 @@ class BaseModelBackend(ABC):
31
31
  model_type: ModelType,
32
32
  model_config_dict: Dict[str, Any],
33
33
  api_key: Optional[str] = None,
34
+ url: Optional[str] = None,
34
35
  ) -> None:
35
36
  r"""Constructor for the model backend.
36
37
 
@@ -38,12 +39,14 @@ class BaseModelBackend(ABC):
38
39
  model_type (ModelType): Model for which a backend is created.
39
40
  model_config_dict (Dict[str, Any]): A config dictionary.
40
41
  api_key (Optional[str]): The API key for authenticating with the
41
- LLM service.
42
+ model service.
43
+ url (Optional[str]): The url to the model service.
42
44
  """
43
45
  self.model_type = model_type
44
46
 
45
47
  self.model_config_dict = model_config_dict
46
48
  self._api_key = api_key
49
+ self._url = url
47
50
  self.check_model_config()
48
51
 
49
52
  @property
@@ -11,15 +11,17 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, Dict, Optional
14
+ from typing import Any, Dict, Optional, Union
15
15
 
16
16
  from camel.models.anthropic_model import AnthropicModel
17
17
  from camel.models.base_model import BaseModelBackend
18
+ from camel.models.litellm_model import LiteLLMModel
19
+ from camel.models.ollama_model import OllamaModel
18
20
  from camel.models.open_source_model import OpenSourceModel
19
21
  from camel.models.openai_model import OpenAIModel
20
22
  from camel.models.stub_model import StubModel
21
23
  from camel.models.zhipuai_model import ZhipuAIModel
22
- from camel.types import ModelType
24
+ from camel.types import ModelPlatformType, ModelType
23
25
 
24
26
 
25
27
  class ModelFactory:
@@ -31,18 +33,24 @@ class ModelFactory:
31
33
 
32
34
  @staticmethod
33
35
  def create(
34
- model_type: ModelType,
36
+ model_platform: ModelPlatformType,
37
+ model_type: Union[ModelType, str],
35
38
  model_config_dict: Dict,
36
39
  api_key: Optional[str] = None,
40
+ url: Optional[str] = None,
37
41
  ) -> BaseModelBackend:
38
42
  r"""Creates an instance of `BaseModelBackend` of the specified type.
39
43
 
40
44
  Args:
41
- model_type (ModelType): Model for which a backend is created.
45
+ model_platform (ModelPlatformType): Platform from which the model
46
+ originates.
47
+ model_type (Union[ModelType, str]): Model for which a backend is
48
+ created can be a `str` for open source platforms.
42
49
  model_config_dict (Dict): A dictionary that will be fed into
43
50
  the backend constructor.
44
51
  api_key (Optional[str]): The API key for authenticating with the
45
- LLM service.
52
+ model service.
53
+ url (Optional[str]): The url to the model service.
46
54
 
47
55
  Raises:
48
56
  ValueError: If there is not backend for the model.
@@ -51,22 +59,35 @@ class ModelFactory:
51
59
  BaseModelBackend: The initialized backend.
52
60
  """
53
61
  model_class: Any
54
- if model_type.is_openai:
55
- model_class = OpenAIModel
56
- elif model_type == ModelType.STUB:
57
- model_class = StubModel
58
- elif model_type.is_open_source:
59
- model_class = OpenSourceModel
60
- elif model_type.is_anthropic:
61
- model_class = AnthropicModel
62
- elif model_type.is_zhipuai:
63
- model_class = ZhipuAIModel
64
- else:
65
- raise ValueError(f"Unknown model type `{model_type}` is input")
66
62
 
67
- if model_type.is_open_source:
68
- inst = model_class(model_type, model_config_dict)
63
+ if isinstance(model_type, ModelType):
64
+ if model_platform.is_open_source and model_type.is_open_source:
65
+ model_class = OpenSourceModel
66
+ return model_class(model_type, model_config_dict, url)
67
+ if model_platform.is_openai and model_type.is_openai:
68
+ model_class = OpenAIModel
69
+ elif model_platform.is_anthropic and model_type.is_anthropic:
70
+ model_class = AnthropicModel
71
+ elif model_platform.is_zhipuai and model_type.is_zhipuai:
72
+ model_class = ZhipuAIModel
73
+ elif model_type == ModelType.STUB:
74
+ model_class = StubModel
75
+ else:
76
+ raise ValueError(
77
+ f"Unknown pair of model platform `{model_platform}` "
78
+ f"and model type `{model_type}`."
79
+ )
80
+ elif isinstance(model_type, str):
81
+ if model_platform.is_ollama:
82
+ model_class = OllamaModel
83
+ elif model_platform.is_litellm:
84
+ model_class = LiteLLMModel
85
+ else:
86
+ raise ValueError(
87
+ f"Unknown pair of model platform `{model_platform}` "
88
+ f"and model type `{model_type}`."
89
+ )
69
90
  else:
70
- inst = model_class(model_type, model_config_dict, api_key)
91
+ raise ValueError(f"Invalid model type `{model_type}` provided.")
71
92
 
72
- return inst
93
+ return model_class(model_type, model_config_dict, api_key, url)
@@ -0,0 +1,71 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import List, Optional
16
+
17
+ from openai import OpenAI
18
+
19
+ from camel.messages import OpenAIMessage
20
+ from camel.types import ChatCompletion, ModelType
21
+ from camel.utils import (
22
+ BaseTokenCounter,
23
+ model_api_key_required,
24
+ )
25
+
26
+
27
+ class NemotronModel:
28
+ r"""Nemotron model API backend with OpenAI compatibility."""
29
+
30
+ # NOTE: Nemotron model doesn't support additional model config like OpenAI.
31
+
32
+ def __init__(
33
+ self,
34
+ model_type: ModelType,
35
+ api_key: Optional[str] = None,
36
+ ) -> None:
37
+ r"""Constructor for Nvidia backend.
38
+
39
+ Args:
40
+ model_type (ModelType): Model for which a backend is created.
41
+ api_key (Optional[str]): The API key for authenticating with the
42
+ Nvidia service. (default: :obj:`None`)
43
+ """
44
+ self.model_type = model_type
45
+ url = os.environ.get('NVIDIA_API_BASE_URL', None)
46
+ self._api_key = api_key or os.environ.get("NVIDIA_API_KEY")
47
+ if not url or not self._api_key:
48
+ raise ValueError("The NVIDIA API base url and key should be set.")
49
+ self._client = OpenAI(
50
+ timeout=60, max_retries=3, base_url=url, api_key=self._api_key
51
+ )
52
+ self._token_counter: Optional[BaseTokenCounter] = None
53
+
54
+ @model_api_key_required
55
+ def run(
56
+ self,
57
+ messages: List[OpenAIMessage],
58
+ ) -> ChatCompletion:
59
+ r"""Runs inference of OpenAI chat completion.
60
+
61
+ Args:
62
+ messages (List[OpenAIMessage]): Message list.
63
+
64
+ Returns:
65
+ ChatCompletion.
66
+ """
67
+ response = self._client.chat.completions.create(
68
+ messages=messages,
69
+ model=self.model_type.value,
70
+ )
71
+ return response
@@ -0,0 +1,121 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import Any, Dict, List, Optional, Union
16
+
17
+ from openai import OpenAI, Stream
18
+
19
+ from camel.configs import OPENAI_API_PARAMS
20
+ from camel.messages import OpenAIMessage
21
+ from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
22
+ from camel.utils import BaseTokenCounter, OpenAITokenCounter
23
+
24
+
25
+ class OllamaModel:
26
+ r"""Ollama service interface."""
27
+
28
+ # NOTE: Current `ModelType and `TokenCounter` desigen is not suitable,
29
+ # stream mode is not supported
30
+
31
+ def __init__(
32
+ self,
33
+ model_type: str,
34
+ model_config_dict: Dict[str, Any],
35
+ api_key: Optional[str] = None,
36
+ url: Optional[str] = None,
37
+ ) -> None:
38
+ r"""Constructor for Ollama backend with OpenAI compatibility.
39
+
40
+ Args:
41
+ model_type (str): Model for which a backend is created.
42
+ model_config_dict (Dict[str, Any]): A dictionary that will
43
+ be fed into openai.ChatCompletion.create().
44
+ api_key (Optional[str]): The API key for authenticating with the
45
+ model service. (default: :obj:`None`)
46
+ url (Optional[str]): The url to the model service.
47
+ """
48
+ self.model_type = model_type
49
+ self.model_config_dict = model_config_dict
50
+ self._url = url or os.environ.get('OPENAI_API_BASE_URL')
51
+ self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
52
+ # Use OpenAI cilent as interface call Ollama
53
+ # Reference: https://github.com/ollama/ollama/blob/main/docs/openai.md
54
+ self._client = OpenAI(
55
+ timeout=60,
56
+ max_retries=3,
57
+ base_url=self._url,
58
+ api_key=self._api_key,
59
+ )
60
+ self._token_counter: Optional[BaseTokenCounter] = None
61
+
62
+ @property
63
+ def token_counter(self) -> BaseTokenCounter:
64
+ r"""Initialize the token counter for the model backend.
65
+
66
+ Returns:
67
+ BaseTokenCounter: The token counter following the model's
68
+ tokenization style.
69
+ """
70
+ # NOTE: Use OpenAITokenCounter temporarily
71
+ if not self._token_counter:
72
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_3_5_TURBO)
73
+ return self._token_counter
74
+
75
+ def check_model_config(self):
76
+ r"""Check whether the model configuration contains any
77
+ unexpected arguments to OpenAI API.
78
+
79
+ Raises:
80
+ ValueError: If the model configuration dictionary contains any
81
+ unexpected arguments to OpenAI API.
82
+ """
83
+ for param in self.model_config_dict:
84
+ if param not in OPENAI_API_PARAMS:
85
+ raise ValueError(
86
+ f"Unexpected argument `{param}` is "
87
+ "input into OpenAI model backend."
88
+ )
89
+
90
+ def run(
91
+ self,
92
+ messages: List[OpenAIMessage],
93
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
94
+ r"""Runs inference of OpenAI chat completion.
95
+
96
+ Args:
97
+ messages (List[OpenAIMessage]): Message list with the chat history
98
+ in OpenAI API format.
99
+
100
+ Returns:
101
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
102
+ `ChatCompletion` in the non-stream mode, or
103
+ `Stream[ChatCompletionChunk]` in the stream mode.
104
+ """
105
+
106
+ response = self._client.chat.completions.create(
107
+ messages=messages,
108
+ model=self.model_type,
109
+ **self.model_config_dict,
110
+ )
111
+ return response
112
+
113
+ @property
114
+ def stream(self) -> bool:
115
+ r"""Returns whether the model is in stream mode, which sends partial
116
+ results each time.
117
+
118
+ Returns:
119
+ bool: Whether the model is in stream mode.
120
+ """
121
+ return self.model_config_dict.get('stream', False)
@@ -31,6 +31,8 @@ class OpenSourceModel(BaseModelBackend):
31
31
  self,
32
32
  model_type: ModelType,
33
33
  model_config_dict: Dict[str, Any],
34
+ api_key: Optional[str] = None,
35
+ url: Optional[str] = None,
34
36
  ) -> None:
35
37
  r"""Constructor for model backends of Open-source models.
36
38
 
@@ -38,8 +40,11 @@ class OpenSourceModel(BaseModelBackend):
38
40
  model_type (ModelType): Model for which a backend is created.
39
41
  model_config_dict (Dict[str, Any]): A dictionary that will
40
42
  be fed into :obj:`openai.ChatCompletion.create()`.
43
+ api_key (Optional[str]): The API key for authenticating with the
44
+ model service. (ignored for open-source models)
45
+ url (Optional[str]): The url to the model service.
41
46
  """
42
- super().__init__(model_type, model_config_dict)
47
+ super().__init__(model_type, model_config_dict, api_key, url)
43
48
  self._token_counter: Optional[BaseTokenCounter] = None
44
49
 
45
50
  # Check whether the input model type is open-source
@@ -65,7 +70,7 @@ class OpenSourceModel(BaseModelBackend):
65
70
  )
66
71
 
67
72
  # Load the server URL and check whether it is None
68
- server_url: Optional[str] = self.model_config_dict.get(
73
+ server_url: Optional[str] = url or self.model_config_dict.get(
69
74
  "server_url", None
70
75
  )
71
76
  if not server_url:
@@ -35,6 +35,7 @@ class OpenAIModel(BaseModelBackend):
35
35
  model_type: ModelType,
36
36
  model_config_dict: Dict[str, Any],
37
37
  api_key: Optional[str] = None,
38
+ url: Optional[str] = None,
38
39
  ) -> None:
39
40
  r"""Constructor for OpenAI backend.
40
41
 
@@ -45,12 +46,16 @@ class OpenAIModel(BaseModelBackend):
45
46
  be fed into openai.ChatCompletion.create().
46
47
  api_key (Optional[str]): The API key for authenticating with the
47
48
  OpenAI service. (default: :obj:`None`)
49
+ url (Optional[str]): The url to the OpenAI service.
48
50
  """
49
- super().__init__(model_type, model_config_dict)
50
- url = os.environ.get('OPENAI_API_BASE_URL', None)
51
+ super().__init__(model_type, model_config_dict, api_key, url)
52
+ self._url = url or os.environ.get("OPENAI_API_BASE_URL")
51
53
  self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
52
54
  self._client = OpenAI(
53
- timeout=60, max_retries=3, base_url=url, api_key=self._api_key
55
+ timeout=60,
56
+ max_retries=3,
57
+ base_url=self._url,
58
+ api_key=self._api_key,
54
59
  )
55
60
  self._token_counter: Optional[BaseTokenCounter] = None
56
61
 
@@ -54,11 +54,13 @@ class StubModel(BaseModelBackend):
54
54
  model_type: ModelType,
55
55
  model_config_dict: Dict[str, Any],
56
56
  api_key: Optional[str] = None,
57
+ url: Optional[str] = None,
57
58
  ) -> None:
58
59
  r"""All arguments are unused for the dummy model."""
59
- super().__init__(model_type, model_config_dict)
60
+ super().__init__(model_type, model_config_dict, api_key, url)
60
61
  self._token_counter: Optional[BaseTokenCounter] = None
61
62
  self._api_key = api_key
63
+ self._url = url
62
64
 
63
65
  @property
64
66
  def token_counter(self) -> BaseTokenCounter:
camel/prompts/__init__.py CHANGED
@@ -16,6 +16,9 @@ from .base import CodePrompt, TextPrompt, TextPromptDict
16
16
  from .code import CodePromptTemplateDict
17
17
  from .descripte_video_prompt import DescriptionVideoPromptTemplateDict
18
18
  from .evaluation import EvaluationPromptTemplateDict
19
+ from .generate_text_embedding_data import (
20
+ GenerateTextEmbeddingDataPromptTemplateDict,
21
+ )
19
22
  from .misalignment import MisalignmentPromptTemplateDict
20
23
  from .object_recognition import ObjectRecognitionPromptTemplateDict
21
24
  from .prompt_templates import PromptTemplateGenerator
@@ -37,6 +40,7 @@ __all__ = [
37
40
  'TaskPromptTemplateDict',
38
41
  'PromptTemplateGenerator',
39
42
  'SolutionExtractionPromptTemplateDict',
43
+ 'GenerateTextEmbeddingDataPromptTemplateDict',
40
44
  'ObjectRecognitionPromptTemplateDict',
41
45
  'DescriptionVideoPromptTemplateDict',
42
46
  ]
@@ -0,0 +1,79 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import Any
15
+
16
+ from camel.prompts import TextPrompt, TextPromptDict
17
+ from camel.types import RoleType
18
+
19
+
20
+ # flake8: noqa :E501
21
+ class GenerateTextEmbeddingDataPromptTemplateDict(TextPromptDict):
22
+ r"""A :obj:`TextPrompt` dictionary containing text embedding tasks
23
+ generation, query, positive and hard negative samples generation,
24
+ from the `"Improving Text Embeddings with Large Language Models"
25
+ <https://arxiv.org/abs/2401.00368>`_ paper.
26
+
27
+
28
+ Attributes:
29
+ GENERATE_TASKS (TextPrompt): A prompt to generate a list
30
+ of :obj:`num_tasks` synthetic text_embedding tasks.
31
+ ASSISTANT_PROMPT (TextPrompt): A system prompt for the AI assistant
32
+ to generate synthetic :obj:`user_query`, :obj:`positive document`,
33
+ and :obj:`hard_negative_document` for a specific :obj:`task` with
34
+ specified parameters including :obj:`query_type`,
35
+ :obj:`query_length`, :obj:`clarity`, :obj:`num_words`,
36
+ :obj:`language` and :obj:`difficulty`.
37
+ """
38
+
39
+ GENERATE_TASKS = TextPrompt(
40
+ """You are an expert to brainstorm a list of {num_tasks} potentially useful text retrieval tasks
41
+ Here are a few examples for your reference:
42
+ - Provided a scientific claim as query, retrieve documents that help verify or refute the claim.
43
+ - Search for documents that answers a FAQ-style query on children's nutrition.
44
+ Please adhere to the following guidelines:
45
+ - Specify what the query is, and what the desired documents are.
46
+ - Each retrieval task should cover a wide range of queries, and should not be too specific.
47
+ Your output should always be a python list of strings starting with `1.`, `2.` etc.
48
+ And each element corresponds to a distinct retrieval task in one sentence.
49
+ Do not explain yourself or output anything else.
50
+ Be creative!"""
51
+ )
52
+
53
+ ASSISTANT_PROMPT = TextPrompt(
54
+ """You have been assigned a retrieval task: {task}
55
+ Your mission is to write one text retrieval example for this task in JSON format. The JSON object must
56
+ contain the following keys:
57
+ - "user_query": a string, a random user search query specified by the retrieval task.
58
+ - "positive_document": a string, a relevant document for the user query.
59
+ - "hard_negative_document": a string, a hard negative document that only appears relevant to the query.
60
+ Please adhere to the following guidelines:
61
+ - The "user_query" should be {query_type}, {query_length}, {clarity}, and diverse in topic.
62
+ - All documents must be created independent of the query. Avoid copying the query verbatim.
63
+ It's acceptable if some parts of the "positive_document" are not topically related to the query.
64
+ - All documents should be at least {num_words} words long.
65
+ - The "hard_negative_document" contains some useful information, but it should be less useful or comprehensive compared to the "positive_document".
66
+ - Both the query and documents should be in {language}.
67
+ - Do not provide any explanation in any document on why it is relevant or not relevant to the query.
68
+ - Both the query and documents require {difficulty} level education to understand.
69
+ Your output must always be a JSON object only (starting and ending with curly brackets), do not explain yourself or output anything else. Be creative!"""
70
+ )
71
+
72
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
73
+ super().__init__(*args, **kwargs)
74
+ self.update(
75
+ {
76
+ "generate_tasks": self.GENERATE_TASKS,
77
+ RoleType.ASSISTANT: self.ASSISTANT_PROMPT,
78
+ }
79
+ )
@@ -24,6 +24,9 @@ from camel.prompts.descripte_video_prompt import (
24
24
  from camel.prompts.evaluation import (
25
25
  EvaluationPromptTemplateDict,
26
26
  )
27
+ from camel.prompts.generate_text_embedding_data import (
28
+ GenerateTextEmbeddingDataPromptTemplateDict,
29
+ )
27
30
  from camel.prompts.misalignment import MisalignmentPromptTemplateDict
28
31
  from camel.prompts.object_recognition import (
29
32
  ObjectRecognitionPromptTemplateDict,
@@ -60,6 +63,7 @@ class TaskPromptTemplateDict(Dict[Any, TextPromptDict]):
60
63
  TaskType.SOLUTION_EXTRACTION: SolutionExtractionPromptTemplateDict(), # noqa: E501
61
64
  TaskType.ROLE_DESCRIPTION: RoleDescriptionPromptTemplateDict(),
62
65
  TaskType.OBJECT_RECOGNITION: ObjectRecognitionPromptTemplateDict(), # noqa: E501
66
+ TaskType.GENERATE_TEXT_EMBEDDING_DATA: GenerateTextEmbeddingDataPromptTemplateDict(), # noqa: E501
63
67
  TaskType.VIDEO_DESCRIPTION: DescriptionVideoPromptTemplateDict(), # noqa: E501
64
68
  }
65
69
  )