camel-ai 0.2.12__py3-none-any.whl → 0.2.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (47) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +1 -1
  3. camel/embeddings/openai_compatible_embedding.py +1 -1
  4. camel/embeddings/openai_embedding.py +1 -1
  5. camel/messages/base.py +5 -5
  6. camel/models/__init__.py +2 -0
  7. camel/models/anthropic_model.py +1 -1
  8. camel/models/azure_openai_model.py +1 -1
  9. camel/models/deepseek_model.py +1 -1
  10. camel/models/fish_audio_model.py +146 -0
  11. camel/models/gemini_model.py +1 -1
  12. camel/models/groq_model.py +1 -1
  13. camel/models/nemotron_model.py +1 -1
  14. camel/models/nvidia_model.py +1 -1
  15. camel/models/ollama_model.py +1 -1
  16. camel/models/openai_compatible_model.py +1 -1
  17. camel/models/openai_model.py +24 -12
  18. camel/models/qwen_model.py +1 -1
  19. camel/models/reward/nemotron_model.py +1 -1
  20. camel/models/samba_model.py +1 -1
  21. camel/models/sglang_model.py +2 -2
  22. camel/models/togetherai_model.py +1 -1
  23. camel/models/vllm_model.py +1 -1
  24. camel/models/yi_model.py +1 -1
  25. camel/models/zhipuai_model.py +1 -1
  26. camel/runtime/configs.py +12 -12
  27. camel/runtime/docker_runtime.py +7 -7
  28. camel/runtime/llm_guard_runtime.py +3 -3
  29. camel/runtime/remote_http_runtime.py +5 -5
  30. camel/runtime/utils/function_risk_toolkit.py +1 -1
  31. camel/runtime/utils/ignore_risk_toolkit.py +2 -2
  32. camel/schemas/__init__.py +2 -1
  33. camel/schemas/base.py +2 -4
  34. camel/schemas/outlines_converter.py +249 -0
  35. camel/toolkits/arxiv_toolkit.py +6 -6
  36. camel/toolkits/ask_news_toolkit.py +2 -2
  37. camel/toolkits/github_toolkit.py +3 -3
  38. camel/toolkits/google_scholar_toolkit.py +16 -2
  39. camel/toolkits/meshy_toolkit.py +2 -2
  40. camel/toolkits/search_toolkit.py +2 -2
  41. camel/types/enums.py +3 -0
  42. camel/utils/commons.py +4 -22
  43. camel/utils/token_counting.py +10 -2
  44. {camel_ai-0.2.12.dist-info → camel_ai-0.2.14.dist-info}/METADATA +14 -11
  45. {camel_ai-0.2.12.dist-info → camel_ai-0.2.14.dist-info}/RECORD +47 -45
  46. {camel_ai-0.2.12.dist-info → camel_ai-0.2.14.dist-info}/LICENSE +0 -0
  47. {camel_ai-0.2.12.dist-info → camel_ai-0.2.14.dist-info}/WHEEL +0 -0
camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.12'
17
+ __version__ = '0.2.14'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -1042,7 +1042,7 @@ class ChatAgent(BaseAgent):
1042
1042
  num_tokens (int): The number of tokens used in this step.
1043
1043
  external_tool_request (Optional[ChatCompletionMessageToolCall]):
1044
1044
  Any external tool request made during this step.
1045
- (default::obj:`None`)
1045
+ (default: :obj:`None`)
1046
1046
 
1047
1047
  Returns:
1048
1048
  Dict[str, Any]: A dictionary containing information about the chat
@@ -46,7 +46,7 @@ class OpenAICompatibleEmbedding(BaseEmbedding[str]):
46
46
  )
47
47
  self._url = url or os.environ.get("OPENAI_COMPATIBILIY_API_BASE_URL")
48
48
  self._client = OpenAI(
49
- timeout=60,
49
+ timeout=180,
50
50
  max_retries=3,
51
51
  api_key=self._api_key,
52
52
  base_url=self._url,
@@ -56,7 +56,7 @@ class OpenAIEmbedding(BaseEmbedding[str]):
56
56
  assert isinstance(dimensions, int)
57
57
  self.output_dim = dimensions
58
58
  self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
59
- self.client = OpenAI(timeout=60, max_retries=3, api_key=self._api_key)
59
+ self.client = OpenAI(timeout=180, max_retries=3, api_key=self._api_key)
60
60
 
61
61
  @api_keys_required("OPENAI_API_KEY")
62
62
  def embed_list(
camel/messages/base.py CHANGED
@@ -52,15 +52,15 @@ class BaseMessage:
52
52
  for the message.
53
53
  content (str): The content of the message.
54
54
  video_bytes (Optional[bytes]): Optional bytes of a video associated
55
- with the message. (default::obj:`None`)
55
+ with the message. (default: :obj:`None`)
56
56
  image_list (Optional[List[Image.Image]]): Optional list of PIL Image
57
- objects associated with the message. (default::obj:`None`)
57
+ objects associated with the message. (default: :obj:`None`)
58
58
  image_detail (Literal["auto", "low", "high"]): Detail level of the
59
- images associated with the message. (default::obj:`auto`)
59
+ images associated with the message. (default: :obj:`auto`)
60
60
  video_detail (Literal["auto", "low", "high"]): Detail level of the
61
- videos associated with the message. (default::obj:`low`)
61
+ videos associated with the message. (default: :obj:`low`)
62
62
  parsed: Optional[Union[Type[BaseModel], dict]]: Optional object which
63
- is parsed from the content. (default::obj:`None`)
63
+ is parsed from the content. (default: :obj:`None`)
64
64
  """
65
65
 
66
66
  role_name: str
camel/models/__init__.py CHANGED
@@ -16,6 +16,7 @@ from .azure_openai_model import AzureOpenAIModel
16
16
  from .base_model import BaseModelBackend
17
17
  from .cohere_model import CohereModel
18
18
  from .deepseek_model import DeepSeekModel
19
+ from .fish_audio_model import FishAudioModel
19
20
  from .gemini_model import GeminiModel
20
21
  from .groq_model import GroqModel
21
22
  from .litellm_model import LiteLLMModel
@@ -66,4 +67,5 @@ __all__ = [
66
67
  'QwenModel',
67
68
  'ModelProcessingError',
68
69
  'DeepSeekModel',
70
+ 'FishAudioModel',
69
71
  ]
@@ -35,7 +35,7 @@ class AnthropicModel(BaseModelBackend):
35
35
  model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
36
36
  that will be fed into Anthropic.messages.create(). If
37
37
  :obj:`None`, :obj:`AnthropicConfig().as_dict()` will be used.
38
- (default::obj:`None`)
38
+ (default: :obj:`None`)
39
39
  api_key (Optional[str], optional): The API key for authenticating with
40
40
  the Anthropic service. (default: :obj:`None`)
41
41
  url (Optional[str], optional): The url to the Anthropic service.
@@ -91,7 +91,7 @@ class AzureOpenAIModel(BaseModelBackend):
91
91
  azure_deployment=self.azure_deployment_name,
92
92
  api_version=self.api_version,
93
93
  api_key=self._api_key,
94
- timeout=60,
94
+ timeout=180,
95
95
  max_retries=3,
96
96
  )
97
97
 
@@ -70,7 +70,7 @@ class DeepSeekModel(BaseModelBackend):
70
70
  )
71
71
 
72
72
  self._client = OpenAI(
73
- timeout=60,
73
+ timeout=180,
74
74
  max_retries=3,
75
75
  api_key=self._api_key,
76
76
  base_url=self._url,
@@ -0,0 +1,146 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Optional
17
+
18
+
19
+ class FishAudioModel:
20
+ r"""Provides access to FishAudio's Text-to-Speech (TTS) and Speech_to_Text
21
+ (STT) models.
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ api_key: Optional[str] = None,
27
+ url: Optional[str] = None,
28
+ ) -> None:
29
+ r"""Initialize an instance of FishAudioModel.
30
+
31
+ Args:
32
+ api_key (Optional[str]): API key for FishAudio service. If not
33
+ provided, the environment variable `FISHAUDIO_API_KEY` will be
34
+ used.
35
+ url (Optional[str]): Base URL for FishAudio API. If not provided,
36
+ the environment variable `FISHAUDIO_API_BASE_URL` will be used.
37
+ """
38
+ from fish_audio_sdk import Session
39
+
40
+ self._api_key = api_key or os.environ.get("FISHAUDIO_API_KEY")
41
+ self._url = url or os.environ.get(
42
+ "FISHAUDIO_API_BASE_URL", "https://api.fish.audio"
43
+ )
44
+ self.session = Session(apikey=self._api_key, base_url=self._url)
45
+
46
+ def text_to_speech(
47
+ self,
48
+ input: str,
49
+ storage_path: str,
50
+ reference_id: Optional[str] = None,
51
+ reference_audio: Optional[str] = None,
52
+ reference_audio_text: Optional[str] = None,
53
+ **kwargs: Any,
54
+ ) -> Any:
55
+ r"""Convert text to speech and save the output to a file.
56
+
57
+ Args:
58
+ input_text (str): The text to convert to speech.
59
+ storage_path (str): The file path where the resulting speech will
60
+ be saved.
61
+ reference_id (Optional[str]): An optional reference ID to
62
+ associate with the request. (default: :obj:`None`)
63
+ reference_audio (Optional[str]): Path to an audio file for
64
+ reference speech. (default: :obj:`None`)
65
+ reference_audio_text (Optional[str]): Text for the reference audio.
66
+ (default: :obj:`None`)
67
+ **kwargs (Any): Additional parameters to pass to the TTS request.
68
+
69
+ Raises:
70
+ FileNotFoundError: If the reference audio file cannot be found.
71
+ """
72
+ from fish_audio_sdk import ReferenceAudio, TTSRequest
73
+
74
+ directory = os.path.dirname(storage_path)
75
+ if directory and not os.path.exists(directory):
76
+ os.makedirs(directory)
77
+
78
+ if not reference_audio:
79
+ with open(f"{storage_path}", "wb") as f:
80
+ for chunk in self.session.tts(
81
+ TTSRequest(reference_id=reference_id, text=input, **kwargs)
82
+ ):
83
+ f.write(chunk)
84
+ else:
85
+ if not os.path.exists(reference_audio):
86
+ raise FileNotFoundError(
87
+ f"Reference audio file not found: {reference_audio}"
88
+ )
89
+ if not reference_audio_text:
90
+ raise ValueError("reference_audio_text should be provided")
91
+ with open(f"{reference_audio}", "rb") as audio_file:
92
+ with open(f"{storage_path}", "wb") as f:
93
+ for chunk in self.session.tts(
94
+ TTSRequest(
95
+ text=input,
96
+ references=[
97
+ ReferenceAudio(
98
+ audio=audio_file.read(),
99
+ text=reference_audio_text,
100
+ )
101
+ ],
102
+ **kwargs,
103
+ )
104
+ ):
105
+ f.write(chunk)
106
+
107
+ def speech_to_text(
108
+ self,
109
+ audio_file_path: str,
110
+ language: Optional[str] = None,
111
+ ignore_timestamps: Optional[bool] = None,
112
+ **kwargs: Any,
113
+ ) -> str:
114
+ r"""Convert speech to text from an audio file.
115
+
116
+ Args:
117
+ audio_file_path (str): The path to the audio file to transcribe.
118
+ language (Optional[str]): The language of the audio. (default:
119
+ :obj:`None`)
120
+ ignore_timestamps (Optional[bool]): Whether to ignore timestamps.
121
+ (default: :obj:`None`)
122
+ **kwargs (Any): Additional parameters to pass to the STT request.
123
+
124
+ Returns:
125
+ str: The transcribed text from the audio.
126
+
127
+ Raises:
128
+ FileNotFoundError: If the audio file cannot be found.
129
+ """
130
+ from fish_audio_sdk import ASRRequest
131
+
132
+ if not os.path.exists(audio_file_path):
133
+ raise FileNotFoundError(f"Audio file not found: {audio_file_path}")
134
+
135
+ with open(f"{audio_file_path}", "rb") as audio_file:
136
+ audio_data = audio_file.read()
137
+
138
+ response = self.session.asr(
139
+ ASRRequest(
140
+ audio=audio_data,
141
+ language=language,
142
+ ignore_timestamps=ignore_timestamps,
143
+ **kwargs,
144
+ )
145
+ )
146
+ return response.text
@@ -71,7 +71,7 @@ class GeminiModel(BaseModelBackend):
71
71
  model_type, model_config_dict, api_key, url, token_counter
72
72
  )
73
73
  self._client = OpenAI(
74
- timeout=60,
74
+ timeout=180,
75
75
  max_retries=3,
76
76
  api_key=self._api_key,
77
77
  base_url=self._url,
@@ -69,7 +69,7 @@ class GroqModel(BaseModelBackend):
69
69
  model_type, model_config_dict, api_key, url, token_counter
70
70
  )
71
71
  self._client = OpenAI(
72
- timeout=60,
72
+ timeout=180,
73
73
  max_retries=3,
74
74
  api_key=self._api_key,
75
75
  base_url=self._url,
@@ -52,7 +52,7 @@ class NemotronModel(BaseModelBackend):
52
52
  api_key = api_key or os.environ.get("NVIDIA_API_KEY")
53
53
  super().__init__(model_type, {}, api_key, url)
54
54
  self._client = OpenAI(
55
- timeout=60,
55
+ timeout=180,
56
56
  max_retries=3,
57
57
  base_url=self._url,
58
58
  api_key=self._api_key,
@@ -66,7 +66,7 @@ class NvidiaModel(BaseModelBackend):
66
66
  model_type, model_config_dict, api_key, url, token_counter
67
67
  )
68
68
  self._client = OpenAI(
69
- timeout=60,
69
+ timeout=180,
70
70
  max_retries=3,
71
71
  api_key=self._api_key,
72
72
  base_url=self._url,
@@ -70,7 +70,7 @@ class OllamaModel(BaseModelBackend):
70
70
  self._start_server()
71
71
  # Use OpenAI client as interface call Ollama
72
72
  self._client = OpenAI(
73
- timeout=60,
73
+ timeout=180,
74
74
  max_retries=3,
75
75
  api_key="Set-but-ignored", # required but ignored
76
76
  base_url=self._url,
@@ -61,7 +61,7 @@ class OpenAICompatibleModel(BaseModelBackend):
61
61
  model_type, model_config_dict, api_key, url, token_counter
62
62
  )
63
63
  self._client = OpenAI(
64
- timeout=60,
64
+ timeout=180,
65
65
  max_retries=3,
66
66
  api_key=self._api_key,
67
67
  base_url=self._url,
@@ -21,6 +21,7 @@ from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
21
21
  from camel.messages import OpenAIMessage
22
22
  from camel.models import BaseModelBackend
23
23
  from camel.types import (
24
+ NOT_GIVEN,
24
25
  ChatCompletion,
25
26
  ChatCompletionChunk,
26
27
  ModelType,
@@ -67,7 +68,7 @@ class OpenAIModel(BaseModelBackend):
67
68
  model_type, model_config_dict, api_key, url, token_counter
68
69
  )
69
70
  self._client = OpenAI(
70
- timeout=60,
71
+ timeout=180,
71
72
  max_retries=3,
72
73
  base_url=self._url,
73
74
  api_key=self._api_key,
@@ -103,7 +104,11 @@ class OpenAIModel(BaseModelBackend):
103
104
  """
104
105
  # o1-preview and o1-mini have Beta limitations
105
106
  # reference: https://platform.openai.com/docs/guides/reasoning
106
- if self.model_type in [ModelType.O1_MINI, ModelType.O1_PREVIEW]:
107
+ if self.model_type in [
108
+ ModelType.O1,
109
+ ModelType.O1_MINI,
110
+ ModelType.O1_PREVIEW,
111
+ ]:
107
112
  warnings.warn(
108
113
  "Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), "
109
114
  "which has certain limitations, reference: "
@@ -111,22 +116,21 @@ class OpenAIModel(BaseModelBackend):
111
116
  UserWarning,
112
117
  )
113
118
 
114
- # Remove system message that is not supported in o1 model.
115
- messages = [msg for msg in messages if msg.get("role") != "system"]
116
-
117
119
  # Check and remove unsupported parameters and reset the fixed
118
120
  # parameters
119
- unsupported_keys = ["stream", "tools", "tool_choice"]
121
+ unsupported_keys = [
122
+ "temperature",
123
+ "top_p",
124
+ "presence_penalty",
125
+ "frequency_penalty",
126
+ "logprobs",
127
+ "top_logprobs",
128
+ "logit_bias",
129
+ ]
120
130
  for key in unsupported_keys:
121
131
  if key in self.model_config_dict:
122
132
  del self.model_config_dict[key]
123
133
 
124
- self.model_config_dict["temperature"] = 1.0
125
- self.model_config_dict["top_p"] = 1.0
126
- self.model_config_dict["n"] = 1
127
- self.model_config_dict["presence_penalty"] = 0.0
128
- self.model_config_dict["frequency_penalty"] = 0.0
129
-
130
134
  if self.model_config_dict.get("response_format"):
131
135
  # stream is not supported in beta.chat.completions.parse
132
136
  if "stream" in self.model_config_dict:
@@ -140,6 +144,14 @@ class OpenAIModel(BaseModelBackend):
140
144
 
141
145
  return self._to_chat_completion(response)
142
146
 
147
+ # Removing 'strict': True from the dictionary for
148
+ # client.chat.completions.create
149
+ if self.model_config_dict.get('tools') is not NOT_GIVEN:
150
+ for tool in self.model_config_dict.get('tools', []):
151
+ function_dict = tool.get('function', {})
152
+ if 'strict' in function_dict:
153
+ del function_dict['strict']
154
+
143
155
  response = self._client.chat.completions.create(
144
156
  messages=messages,
145
157
  model=self.model_type,
@@ -71,7 +71,7 @@ class QwenModel(BaseModelBackend):
71
71
  model_type, model_config_dict, api_key, url, token_counter
72
72
  )
73
73
  self._client = OpenAI(
74
- timeout=60,
74
+ timeout=180,
75
75
  max_retries=3,
76
76
  api_key=self._api_key,
77
77
  base_url=self._url,
@@ -47,7 +47,7 @@ class NemotronRewardModel(BaseRewardModel):
47
47
  api_key = api_key or os.environ.get("NVIDIA_API_KEY")
48
48
  super().__init__(model_type, api_key, url)
49
49
  self._client = OpenAI(
50
- timeout=60,
50
+ timeout=180,
51
51
  max_retries=3,
52
52
  base_url=self.url,
53
53
  api_key=self.api_key,
@@ -95,7 +95,7 @@ class SambaModel(BaseModelBackend):
95
95
 
96
96
  if self._url == "https://api.sambanova.ai/v1":
97
97
  self._client = OpenAI(
98
- timeout=60,
98
+ timeout=180,
99
99
  max_retries=3,
100
100
  base_url=self._url,
101
101
  api_key=self._api_key,
@@ -80,7 +80,7 @@ class SGLangModel(BaseModelBackend):
80
80
  if self._url:
81
81
  # Initialize the client if an existing URL is provided
82
82
  self._client = OpenAI(
83
- timeout=60,
83
+ timeout=180,
84
84
  max_retries=3,
85
85
  api_key="Set-but-ignored", # required but ignored
86
86
  base_url=self._url,
@@ -113,7 +113,7 @@ class SGLangModel(BaseModelBackend):
113
113
  self.last_run_time = time.time()
114
114
  # Initialize the client after the server starts
115
115
  self._client = OpenAI(
116
- timeout=60,
116
+ timeout=180,
117
117
  max_retries=3,
118
118
  api_key="Set-but-ignored", # required but ignored
119
119
  base_url=self._url,
@@ -72,7 +72,7 @@ class TogetherAIModel(BaseModelBackend):
72
72
  )
73
73
 
74
74
  self._client = OpenAI(
75
- timeout=60,
75
+ timeout=180,
76
76
  max_retries=3,
77
77
  api_key=self._api_key,
78
78
  base_url=self._url,
@@ -72,7 +72,7 @@ class VLLMModel(BaseModelBackend):
72
72
  self._start_server()
73
73
  # Use OpenAI cilent as interface call vLLM
74
74
  self._client = OpenAI(
75
- timeout=60,
75
+ timeout=180,
76
76
  max_retries=3,
77
77
  api_key="EMPTY", # required but ignored
78
78
  base_url=self._url,
camel/models/yi_model.py CHANGED
@@ -70,7 +70,7 @@ class YiModel(BaseModelBackend):
70
70
  model_type, model_config_dict, api_key, url, token_counter
71
71
  )
72
72
  self._client = OpenAI(
73
- timeout=60,
73
+ timeout=180,
74
74
  max_retries=3,
75
75
  api_key=self._api_key,
76
76
  base_url=self._url,
@@ -70,7 +70,7 @@ class ZhipuAIModel(BaseModelBackend):
70
70
  model_type, model_config_dict, api_key, url, token_counter
71
71
  )
72
72
  self._client = OpenAI(
73
- timeout=60,
73
+ timeout=180,
74
74
  max_retries=3,
75
75
  api_key=self._api_key,
76
76
  base_url=self._url,
camel/runtime/configs.py CHANGED
@@ -21,23 +21,23 @@ class TaskConfig(BaseModel):
21
21
 
22
22
  Arttributes:
23
23
  cmd (str or list): Command to be executed
24
- stdout (bool): Attach to stdout. (default::obj: `True`)
25
- stderr (bool): Attach to stderr. (default::obj: `True`)
26
- stdin (bool): Attach to stdin. (default::obj: `False`)
27
- tty (bool): Allocate a pseudo-TTY. (default::obj: `False`)
28
- privileged (bool): Run as privileged. (default::obj: `False`)
29
- user (str): User to execute command as. (default::obj: `""`)
24
+ stdout (bool): Attach to stdout. (default: :obj: `True`)
25
+ stderr (bool): Attach to stderr. (default: :obj: `True`)
26
+ stdin (bool): Attach to stdin. (default: :obj: `False`)
27
+ tty (bool): Allocate a pseudo-TTY. (default: :obj: `False`)
28
+ privileged (bool): Run as privileged. (default: :obj: `False`)
29
+ user (str): User to execute command as. (default: :obj: `""`)
30
30
  detach (bool): If true, detach from the exec command.
31
- (default::obj: `False`)
32
- stream (bool): Stream response data. (default::obj: `False`)
31
+ (default: :obj: `False`)
32
+ stream (bool): Stream response data. (default: :obj: `False`)
33
33
  socket (bool): Return the connection socket to allow custom
34
- read/write operations. (default::obj: `False`)
34
+ read/write operations. (default: :obj: `False`)
35
35
  environment (dict or list): A dictionary or a list of strings in
36
36
  the following format ``["PASSWORD=xxx"]`` or
37
- ``{"PASSWORD": "xxx"}``. (default::obj: `None`)
37
+ ``{"PASSWORD": "xxx"}``. (default: :obj: `None`)
38
38
  workdir (str): Path to working directory for this exec session.
39
- (default::obj: `None`)
40
- demux (bool): Return stdout and stderr separately. (default::obj:
39
+ (default: :obj: `None`)
40
+ demux (bool): Return stdout and stderr separately. (default: :obj:
41
41
  `False`)
42
42
  """
43
43
 
@@ -42,10 +42,10 @@ class DockerRuntime(BaseRuntime):
42
42
 
43
43
  Args:
44
44
  image (str): The name of the Docker image to use for the runtime.
45
- port (int): The port number to use for the runtime API. (default::obj:
45
+ port (int): The port number to use for the runtime API. (default: :obj:
46
46
  `8000`)
47
47
  remove (bool): Whether to remove the container after stopping it. '
48
- (default::obj: `True`)
48
+ (default: :obj: `True`)
49
49
  kwargs (dict): Additional keyword arguments to pass to the
50
50
  Docker client.
51
51
  """
@@ -170,7 +170,7 @@ class DockerRuntime(BaseRuntime):
170
170
 
171
171
  Args:
172
172
  time_out (int): The number of seconds to wait for the container to
173
- start. (default::obj: `15`)
173
+ start. (default: :obj: `15`)
174
174
 
175
175
  Returns:
176
176
  DockerRuntime: The DockerRuntime instance.
@@ -259,9 +259,9 @@ class DockerRuntime(BaseRuntime):
259
259
  list of functions to add.
260
260
  entrypoint (str): The entrypoint for the function.
261
261
  redirect_stdout (bool): Whether to return the stdout of
262
- the function. (default::obj: `False`)
262
+ the function. (default: :obj: `False`)
263
263
  arguments (Optional[Dict[str, Any]]): The arguments for the
264
- function. (default::obj: `None`)
264
+ function. (default: :obj: `None`)
265
265
 
266
266
  Returns:
267
267
  DockerRuntime: The DockerRuntime instance.
@@ -330,7 +330,7 @@ class DockerRuntime(BaseRuntime):
330
330
 
331
331
  Args:
332
332
  remove (Optional[bool]): Whether to remove the container
333
- after stopping it. (default::obj: `None`)
333
+ after stopping it. (default: :obj: `None`)
334
334
 
335
335
  Returns:
336
336
  DockerRuntime: The DockerRuntime instance.
@@ -366,7 +366,7 @@ class DockerRuntime(BaseRuntime):
366
366
  r"""Wait for the API Server to be ready.
367
367
 
368
368
  Args:
369
- timeout (int): The number of seconds to wait. (default::obj: `10`)
369
+ timeout (int): The number of seconds to wait. (default: :obj: `10`)
370
370
 
371
371
  Returns:
372
372
  bool: Whether the API Server is ready.
@@ -68,9 +68,9 @@ class LLMGuardRuntime(BaseRuntime):
68
68
  Arguments:
69
69
  prompt (str): The prompt to use for the language model. (default:
70
70
  :obj:`GUARDPROMPT`)
71
- model (BaseModelBackend): The language model to use. (default::obj:
71
+ model (BaseModelBackend): The language model to use. (default: :obj:
72
72
  `None`)
73
- verbose (bool): Whether to print verbose output. (default::obj:
73
+ verbose (bool): Whether to print verbose output. (default: :obj:
74
74
  `False`)
75
75
  """
76
76
 
@@ -114,7 +114,7 @@ class LLMGuardRuntime(BaseRuntime):
114
114
  funcs (FunctionTool or List[FunctionTool]): The function or
115
115
  list of functions to add.
116
116
  threshold (int): The risk threshold for functions.
117
- (default::obj:`2`)
117
+ (default: :obj:`2`)
118
118
 
119
119
  Returns:
120
120
  LLMGuardRuntime: The current runtime.
@@ -36,9 +36,9 @@ class RemoteHttpRuntime(BaseRuntime):
36
36
 
37
37
  Args:
38
38
  host (str): The host of the remote server.
39
- port (int): The port of the remote server. (default::obj: `8000`)
39
+ port (int): The port of the remote server. (default: :obj: `8000`)
40
40
  python_exec (str): The python executable to run the API server.
41
- (default::obj: `python3`)
41
+ (default: :obj: `python3`)
42
42
  """
43
43
 
44
44
  def __init__(
@@ -90,9 +90,9 @@ class RemoteHttpRuntime(BaseRuntime):
90
90
  list of functions to add.
91
91
  entrypoint (str): The entrypoint for the function.
92
92
  redirect_stdout (bool): Whether to return the stdout of
93
- the function. (default::obj: `False`)
93
+ the function. (default: :obj: `False`)
94
94
  arguments (Optional[Dict[str, Any]]): The arguments for the
95
- function. (default::obj: `None`)
95
+ function. (default: :obj: `None`)
96
96
 
97
97
  Returns:
98
98
  RemoteHttpRuntime: The current runtime.
@@ -162,7 +162,7 @@ class RemoteHttpRuntime(BaseRuntime):
162
162
  r"""Wait for the API Server to be ready.
163
163
 
164
164
  Args:
165
- timeout (int): The number of seconds to wait. (default::obj: `10`)
165
+ timeout (int): The number of seconds to wait. (default: :obj: `10`)
166
166
 
167
167
  Returns:
168
168
  bool: Whether the API Server is ready.
@@ -22,7 +22,7 @@ class FunctionRiskToolkit(BaseToolkit):
22
22
 
23
23
  Args:
24
24
  verbose (Optional[bool]): Whether to print verbose output.
25
- (default::obj:`False`)
25
+ (default: :obj:`False`)
26
26
  """
27
27
 
28
28
  def __init__(self, verbose: Optional[bool] = False):
@@ -22,9 +22,9 @@ class IgnoreRiskToolkit(BaseToolkit):
22
22
 
23
23
  Args:
24
24
  function_names (Optional[List[str]]): A list of function names to
25
- ignore risks for. (default::obj:`None`)
25
+ ignore risks for. (default: :obj:`None`)
26
26
  verbose (Optional[bool]): Whether to print verbose output.
27
- (default::obj:`False`)
27
+ (default: :obj:`False`)
28
28
  """
29
29
 
30
30
  def __init__(
camel/schemas/__init__.py CHANGED
@@ -13,5 +13,6 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  from .openai_converter import OpenAISchemaConverter
16
+ from .outlines_converter import OutlinesConverter
16
17
 
17
- __all__ = ["OpenAISchemaConverter"]
18
+ __all__ = ["OpenAISchemaConverter", "OutlinesConverter"]