camel-ai 0.2.11__py3-none-any.whl → 0.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +14 -2
- camel/benchmarks/__init__.py +18 -0
- camel/benchmarks/base.py +152 -0
- camel/benchmarks/gaia.py +478 -0
- camel/configs/__init__.py +3 -0
- camel/configs/ollama_config.py +4 -2
- camel/configs/sglang_config.py +71 -0
- camel/data_collector/__init__.py +19 -0
- camel/data_collector/alpaca_collector.py +127 -0
- camel/data_collector/base.py +211 -0
- camel/data_collector/sharegpt_collector.py +205 -0
- camel/datahubs/__init__.py +23 -0
- camel/datahubs/base.py +136 -0
- camel/datahubs/huggingface.py +433 -0
- camel/datahubs/models.py +22 -0
- camel/embeddings/openai_compatible_embedding.py +1 -1
- camel/embeddings/openai_embedding.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/e2b_interpreter.py +136 -0
- camel/loaders/__init__.py +3 -1
- camel/loaders/base_io.py +41 -41
- camel/messages/__init__.py +2 -0
- camel/messages/base.py +5 -5
- camel/models/__init__.py +4 -0
- camel/models/anthropic_model.py +15 -5
- camel/models/azure_openai_model.py +1 -1
- camel/models/base_model.py +28 -0
- camel/models/deepseek_model.py +1 -1
- camel/models/fish_audio_model.py +146 -0
- camel/models/gemini_model.py +1 -1
- camel/models/groq_model.py +2 -2
- camel/models/model_factory.py +3 -0
- camel/models/nemotron_model.py +1 -1
- camel/models/nvidia_model.py +1 -1
- camel/models/ollama_model.py +13 -1
- camel/models/openai_compatible_model.py +1 -1
- camel/models/openai_model.py +1 -27
- camel/models/qwen_model.py +1 -1
- camel/models/reward/__init__.py +22 -0
- camel/models/reward/base_reward_model.py +58 -0
- camel/models/reward/evaluator.py +63 -0
- camel/models/reward/nemotron_model.py +112 -0
- camel/models/samba_model.py +1 -1
- camel/models/sglang_model.py +225 -0
- camel/models/togetherai_model.py +1 -1
- camel/models/vllm_model.py +2 -2
- camel/models/yi_model.py +1 -1
- camel/models/zhipuai_model.py +1 -1
- camel/personas/persona_hub.py +2 -2
- camel/runtime/configs.py +12 -12
- camel/runtime/docker_runtime.py +7 -7
- camel/runtime/llm_guard_runtime.py +3 -3
- camel/runtime/remote_http_runtime.py +5 -5
- camel/runtime/utils/function_risk_toolkit.py +1 -1
- camel/runtime/utils/ignore_risk_toolkit.py +2 -2
- camel/schemas/openai_converter.py +2 -2
- camel/societies/workforce/role_playing_worker.py +2 -2
- camel/societies/workforce/single_agent_worker.py +2 -2
- camel/societies/workforce/workforce.py +3 -3
- camel/storages/object_storages/amazon_s3.py +2 -2
- camel/storages/object_storages/azure_blob.py +2 -2
- camel/storages/object_storages/google_cloud.py +2 -2
- camel/toolkits/__init__.py +2 -0
- camel/toolkits/arxiv_toolkit.py +6 -6
- camel/toolkits/ask_news_toolkit.py +2 -2
- camel/toolkits/code_execution.py +5 -1
- camel/toolkits/function_tool.py +41 -0
- camel/toolkits/github_toolkit.py +3 -3
- camel/toolkits/google_scholar_toolkit.py +16 -2
- camel/toolkits/math_toolkit.py +47 -16
- camel/toolkits/meshy_toolkit.py +2 -2
- camel/toolkits/search_toolkit.py +155 -3
- camel/toolkits/stripe_toolkit.py +273 -0
- camel/types/__init__.py +2 -0
- camel/types/enums.py +27 -2
- camel/utils/token_counting.py +31 -12
- {camel_ai-0.2.11.dist-info → camel_ai-0.2.13.dist-info}/METADATA +24 -14
- {camel_ai-0.2.11.dist-info → camel_ai-0.2.13.dist-info}/RECORD +81 -61
- {camel_ai-0.2.11.dist-info → camel_ai-0.2.13.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.11.dist-info → camel_ai-0.2.13.dist-info}/WHEEL +0 -0
camel/loaders/base_io.py
CHANGED
|
@@ -22,6 +22,47 @@ from typing import Any, Dict, List, Optional
|
|
|
22
22
|
from camel.utils import dependencies_required
|
|
23
23
|
|
|
24
24
|
|
|
25
|
+
def create_file(file: BytesIO, filename: str) -> "File":
|
|
26
|
+
r"""Reads an uploaded file and returns a File object.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
file (BytesIO): A BytesIO object representing the contents of the
|
|
30
|
+
file.
|
|
31
|
+
filename (str): The name of the file.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
File: A File object.
|
|
35
|
+
"""
|
|
36
|
+
ext_to_cls = {
|
|
37
|
+
"docx": DocxFile,
|
|
38
|
+
"pdf": PdfFile,
|
|
39
|
+
"txt": TxtFile,
|
|
40
|
+
"json": JsonFile,
|
|
41
|
+
"html": HtmlFile,
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
ext = filename.split(".")[-1].lower()
|
|
45
|
+
if ext not in ext_to_cls:
|
|
46
|
+
raise NotImplementedError(f"File type {ext} not supported")
|
|
47
|
+
|
|
48
|
+
out_file = ext_to_cls[ext].from_bytes(file, filename)
|
|
49
|
+
return out_file
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def create_file_from_raw_bytes(raw_bytes: bytes, filename: str) -> "File":
|
|
53
|
+
r"""Reads raw bytes and returns a File object.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
raw_bytes (bytes): The raw bytes content of the file.
|
|
57
|
+
filename (str): The name of the file.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
File: A File object.
|
|
61
|
+
"""
|
|
62
|
+
file = BytesIO(raw_bytes)
|
|
63
|
+
return create_file(file, filename)
|
|
64
|
+
|
|
65
|
+
|
|
25
66
|
class File(ABC):
|
|
26
67
|
r"""Represents an uploaded file comprised of Documents.
|
|
27
68
|
|
|
@@ -79,47 +120,6 @@ class File(ABC):
|
|
|
79
120
|
file = BytesIO(raw_bytes)
|
|
80
121
|
return cls.from_bytes(file, filename)
|
|
81
122
|
|
|
82
|
-
@staticmethod
|
|
83
|
-
def create_file(file: BytesIO, filename: str) -> "File":
|
|
84
|
-
r"""Reads an uploaded file and returns a File object.
|
|
85
|
-
|
|
86
|
-
Args:
|
|
87
|
-
file (BytesIO): A BytesIO object representing the contents of the
|
|
88
|
-
file.
|
|
89
|
-
filename (str): The name of the file.
|
|
90
|
-
|
|
91
|
-
Returns:
|
|
92
|
-
File: A File object.
|
|
93
|
-
"""
|
|
94
|
-
ext_to_cls = {
|
|
95
|
-
"docx": DocxFile,
|
|
96
|
-
"pdf": PdfFile,
|
|
97
|
-
"txt": TxtFile,
|
|
98
|
-
"json": JsonFile,
|
|
99
|
-
"html": HtmlFile,
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
ext = filename.split(".")[-1].lower()
|
|
103
|
-
if ext not in ext_to_cls:
|
|
104
|
-
raise NotImplementedError(f"File type {ext} not supported")
|
|
105
|
-
|
|
106
|
-
out_file = ext_to_cls[ext].from_bytes(file, filename)
|
|
107
|
-
return out_file
|
|
108
|
-
|
|
109
|
-
@staticmethod
|
|
110
|
-
def create_file_from_raw_bytes(raw_bytes: bytes, filename: str) -> "File":
|
|
111
|
-
r"""Reads raw bytes and returns a File object.
|
|
112
|
-
|
|
113
|
-
Args:
|
|
114
|
-
raw_bytes (bytes): The raw bytes content of the file.
|
|
115
|
-
filename (str): The name of the file.
|
|
116
|
-
|
|
117
|
-
Returns:
|
|
118
|
-
File: A File object.
|
|
119
|
-
"""
|
|
120
|
-
file = BytesIO(raw_bytes)
|
|
121
|
-
return File.create_file(file, filename)
|
|
122
|
-
|
|
123
123
|
def __repr__(self) -> str:
|
|
124
124
|
return (
|
|
125
125
|
f"File(name={self.name}, id={self.file_id}, "
|
camel/messages/__init__.py
CHANGED
|
@@ -20,6 +20,7 @@ from camel.types import (
|
|
|
20
20
|
)
|
|
21
21
|
|
|
22
22
|
from .conversion import (
|
|
23
|
+
AlpacaItem,
|
|
23
24
|
HermesFunctionFormatter,
|
|
24
25
|
ShareGPTMessage,
|
|
25
26
|
)
|
|
@@ -52,4 +53,5 @@ __all__ = [
|
|
|
52
53
|
'ShareGPTMessage',
|
|
53
54
|
'BaseMessage',
|
|
54
55
|
'FunctionCallingMessage',
|
|
56
|
+
'AlpacaItem',
|
|
55
57
|
]
|
camel/messages/base.py
CHANGED
|
@@ -52,15 +52,15 @@ class BaseMessage:
|
|
|
52
52
|
for the message.
|
|
53
53
|
content (str): The content of the message.
|
|
54
54
|
video_bytes (Optional[bytes]): Optional bytes of a video associated
|
|
55
|
-
with the message. (default
|
|
55
|
+
with the message. (default: :obj:`None`)
|
|
56
56
|
image_list (Optional[List[Image.Image]]): Optional list of PIL Image
|
|
57
|
-
objects associated with the message. (default
|
|
57
|
+
objects associated with the message. (default: :obj:`None`)
|
|
58
58
|
image_detail (Literal["auto", "low", "high"]): Detail level of the
|
|
59
|
-
images associated with the message. (default
|
|
59
|
+
images associated with the message. (default: :obj:`auto`)
|
|
60
60
|
video_detail (Literal["auto", "low", "high"]): Detail level of the
|
|
61
|
-
videos associated with the message. (default
|
|
61
|
+
videos associated with the message. (default: :obj:`low`)
|
|
62
62
|
parsed: Optional[Union[Type[BaseModel], dict]]: Optional object which
|
|
63
|
-
is parsed from the content. (default
|
|
63
|
+
is parsed from the content. (default: :obj:`None`)
|
|
64
64
|
"""
|
|
65
65
|
|
|
66
66
|
role_name: str
|
camel/models/__init__.py
CHANGED
|
@@ -16,6 +16,7 @@ from .azure_openai_model import AzureOpenAIModel
|
|
|
16
16
|
from .base_model import BaseModelBackend
|
|
17
17
|
from .cohere_model import CohereModel
|
|
18
18
|
from .deepseek_model import DeepSeekModel
|
|
19
|
+
from .fish_audio_model import FishAudioModel
|
|
19
20
|
from .gemini_model import GeminiModel
|
|
20
21
|
from .groq_model import GroqModel
|
|
21
22
|
from .litellm_model import LiteLLMModel
|
|
@@ -31,6 +32,7 @@ from .openai_model import OpenAIModel
|
|
|
31
32
|
from .qwen_model import QwenModel
|
|
32
33
|
from .reka_model import RekaModel
|
|
33
34
|
from .samba_model import SambaModel
|
|
35
|
+
from .sglang_model import SGLangModel
|
|
34
36
|
from .stub_model import StubModel
|
|
35
37
|
from .togetherai_model import TogetherAIModel
|
|
36
38
|
from .vllm_model import VLLMModel
|
|
@@ -55,6 +57,7 @@ __all__ = [
|
|
|
55
57
|
'NvidiaModel',
|
|
56
58
|
'OllamaModel',
|
|
57
59
|
'VLLMModel',
|
|
60
|
+
'SGLangModel',
|
|
58
61
|
'GeminiModel',
|
|
59
62
|
'OpenAICompatibleModel',
|
|
60
63
|
'RekaModel',
|
|
@@ -64,4 +67,5 @@ __all__ = [
|
|
|
64
67
|
'QwenModel',
|
|
65
68
|
'ModelProcessingError',
|
|
66
69
|
'DeepSeekModel',
|
|
70
|
+
'FishAudioModel',
|
|
67
71
|
]
|
camel/models/anthropic_model.py
CHANGED
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import Any, Dict, List, Optional, Union
|
|
15
|
+
from typing import Any, Dict, List, Literal, Optional, Union
|
|
16
16
|
|
|
17
17
|
from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig
|
|
18
18
|
from camel.messages import OpenAIMessage
|
|
@@ -35,7 +35,7 @@ class AnthropicModel(BaseModelBackend):
|
|
|
35
35
|
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
36
36
|
that will be fed into Anthropic.messages.create(). If
|
|
37
37
|
:obj:`None`, :obj:`AnthropicConfig().as_dict()` will be used.
|
|
38
|
-
(default
|
|
38
|
+
(default: :obj:`None`)
|
|
39
39
|
api_key (Optional[str], optional): The API key for authenticating with
|
|
40
40
|
the Anthropic service. (default: :obj:`None`)
|
|
41
41
|
url (Optional[str], optional): The url to the Anthropic service.
|
|
@@ -94,19 +94,29 @@ class AnthropicModel(BaseModelBackend):
|
|
|
94
94
|
tokenization style.
|
|
95
95
|
"""
|
|
96
96
|
if not self._token_counter:
|
|
97
|
-
self._token_counter = AnthropicTokenCounter()
|
|
97
|
+
self._token_counter = AnthropicTokenCounter(self.model_type)
|
|
98
98
|
return self._token_counter
|
|
99
99
|
|
|
100
|
-
|
|
100
|
+
@dependencies_required('anthropic')
|
|
101
|
+
def count_tokens_from_prompt(
|
|
102
|
+
self, prompt: str, role: Literal["user", "assistant"]
|
|
103
|
+
) -> int:
|
|
101
104
|
r"""Count the number of tokens from a prompt.
|
|
102
105
|
|
|
103
106
|
Args:
|
|
104
107
|
prompt (str): The prompt string.
|
|
108
|
+
role (Literal["user", "assistant"]): The role of the message
|
|
109
|
+
sender, either "user" or "assistant".
|
|
105
110
|
|
|
106
111
|
Returns:
|
|
107
112
|
int: The number of tokens in the prompt.
|
|
108
113
|
"""
|
|
109
|
-
|
|
114
|
+
from anthropic.types.beta import BetaMessageParam
|
|
115
|
+
|
|
116
|
+
return self.client.beta.messages.count_tokens(
|
|
117
|
+
messages=[BetaMessageParam(content=prompt, role=role)],
|
|
118
|
+
model=self.model_type,
|
|
119
|
+
).input_tokens
|
|
110
120
|
|
|
111
121
|
@api_keys_required("ANTHROPIC_API_KEY")
|
|
112
122
|
def run(
|
camel/models/base_model.py
CHANGED
|
@@ -21,6 +21,7 @@ from camel.types import (
|
|
|
21
21
|
ChatCompletion,
|
|
22
22
|
ChatCompletionChunk,
|
|
23
23
|
ModelType,
|
|
24
|
+
ParsedChatCompletion,
|
|
24
25
|
UnifiedModelType,
|
|
25
26
|
)
|
|
26
27
|
from camel.utils import BaseTokenCounter
|
|
@@ -114,6 +115,33 @@ class BaseModelBackend(ABC):
|
|
|
114
115
|
"""
|
|
115
116
|
return self.token_counter.count_tokens_from_messages(messages)
|
|
116
117
|
|
|
118
|
+
def _to_chat_completion(
|
|
119
|
+
self, response: ParsedChatCompletion
|
|
120
|
+
) -> ChatCompletion:
|
|
121
|
+
if len(response.choices) > 1:
|
|
122
|
+
print("Warning: Multiple response choices detected")
|
|
123
|
+
|
|
124
|
+
choice = dict(
|
|
125
|
+
index=response.choices[0].index,
|
|
126
|
+
message={
|
|
127
|
+
"role": response.choices[0].message.role,
|
|
128
|
+
"content": response.choices[0].message.content,
|
|
129
|
+
"tool_calls": response.choices[0].message.tool_calls,
|
|
130
|
+
"parsed": response.choices[0].message.parsed,
|
|
131
|
+
},
|
|
132
|
+
finish_reason=response.choices[0].finish_reason,
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
obj = ChatCompletion.construct(
|
|
136
|
+
id=response.id,
|
|
137
|
+
choices=[choice],
|
|
138
|
+
created=response.created,
|
|
139
|
+
model=response.model,
|
|
140
|
+
object="chat.completion",
|
|
141
|
+
usage=response.usage,
|
|
142
|
+
)
|
|
143
|
+
return obj
|
|
144
|
+
|
|
117
145
|
@property
|
|
118
146
|
def token_limit(self) -> int:
|
|
119
147
|
r"""Returns the maximum token limit for a given model.
|
camel/models/deepseek_model.py
CHANGED
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from typing import Any, Optional
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class FishAudioModel:
|
|
20
|
+
r"""Provides access to FishAudio's Text-to-Speech (TTS) and Speech_to_Text
|
|
21
|
+
(STT) models.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
api_key: Optional[str] = None,
|
|
27
|
+
url: Optional[str] = None,
|
|
28
|
+
) -> None:
|
|
29
|
+
r"""Initialize an instance of FishAudioModel.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
api_key (Optional[str]): API key for FishAudio service. If not
|
|
33
|
+
provided, the environment variable `FISHAUDIO_API_KEY` will be
|
|
34
|
+
used.
|
|
35
|
+
url (Optional[str]): Base URL for FishAudio API. If not provided,
|
|
36
|
+
the environment variable `FISHAUDIO_API_BASE_URL` will be used.
|
|
37
|
+
"""
|
|
38
|
+
from fish_audio_sdk import Session
|
|
39
|
+
|
|
40
|
+
self._api_key = api_key or os.environ.get("FISHAUDIO_API_KEY")
|
|
41
|
+
self._url = url or os.environ.get(
|
|
42
|
+
"FISHAUDIO_API_BASE_URL", "https://api.fish.audio"
|
|
43
|
+
)
|
|
44
|
+
self.session = Session(apikey=self._api_key, base_url=self._url)
|
|
45
|
+
|
|
46
|
+
def text_to_speech(
|
|
47
|
+
self,
|
|
48
|
+
input: str,
|
|
49
|
+
storage_path: str,
|
|
50
|
+
reference_id: Optional[str] = None,
|
|
51
|
+
reference_audio: Optional[str] = None,
|
|
52
|
+
reference_audio_text: Optional[str] = None,
|
|
53
|
+
**kwargs: Any,
|
|
54
|
+
) -> Any:
|
|
55
|
+
r"""Convert text to speech and save the output to a file.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
input_text (str): The text to convert to speech.
|
|
59
|
+
storage_path (str): The file path where the resulting speech will
|
|
60
|
+
be saved.
|
|
61
|
+
reference_id (Optional[str]): An optional reference ID to
|
|
62
|
+
associate with the request. (default: :obj:`None`)
|
|
63
|
+
reference_audio (Optional[str]): Path to an audio file for
|
|
64
|
+
reference speech. (default: :obj:`None`)
|
|
65
|
+
reference_audio_text (Optional[str]): Text for the reference audio.
|
|
66
|
+
(default: :obj:`None`)
|
|
67
|
+
**kwargs (Any): Additional parameters to pass to the TTS request.
|
|
68
|
+
|
|
69
|
+
Raises:
|
|
70
|
+
FileNotFoundError: If the reference audio file cannot be found.
|
|
71
|
+
"""
|
|
72
|
+
from fish_audio_sdk import ReferenceAudio, TTSRequest
|
|
73
|
+
|
|
74
|
+
directory = os.path.dirname(storage_path)
|
|
75
|
+
if directory and not os.path.exists(directory):
|
|
76
|
+
os.makedirs(directory)
|
|
77
|
+
|
|
78
|
+
if not reference_audio:
|
|
79
|
+
with open(f"{storage_path}", "wb") as f:
|
|
80
|
+
for chunk in self.session.tts(
|
|
81
|
+
TTSRequest(reference_id=reference_id, text=input, **kwargs)
|
|
82
|
+
):
|
|
83
|
+
f.write(chunk)
|
|
84
|
+
else:
|
|
85
|
+
if not os.path.exists(reference_audio):
|
|
86
|
+
raise FileNotFoundError(
|
|
87
|
+
f"Reference audio file not found: {reference_audio}"
|
|
88
|
+
)
|
|
89
|
+
if not reference_audio_text:
|
|
90
|
+
raise ValueError("reference_audio_text should be provided")
|
|
91
|
+
with open(f"{reference_audio}", "rb") as audio_file:
|
|
92
|
+
with open(f"{storage_path}", "wb") as f:
|
|
93
|
+
for chunk in self.session.tts(
|
|
94
|
+
TTSRequest(
|
|
95
|
+
text=input,
|
|
96
|
+
references=[
|
|
97
|
+
ReferenceAudio(
|
|
98
|
+
audio=audio_file.read(),
|
|
99
|
+
text=reference_audio_text,
|
|
100
|
+
)
|
|
101
|
+
],
|
|
102
|
+
**kwargs,
|
|
103
|
+
)
|
|
104
|
+
):
|
|
105
|
+
f.write(chunk)
|
|
106
|
+
|
|
107
|
+
def speech_to_text(
|
|
108
|
+
self,
|
|
109
|
+
audio_file_path: str,
|
|
110
|
+
language: Optional[str] = None,
|
|
111
|
+
ignore_timestamps: Optional[bool] = None,
|
|
112
|
+
**kwargs: Any,
|
|
113
|
+
) -> str:
|
|
114
|
+
r"""Convert speech to text from an audio file.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
audio_file_path (str): The path to the audio file to transcribe.
|
|
118
|
+
language (Optional[str]): The language of the audio. (default:
|
|
119
|
+
:obj:`None`)
|
|
120
|
+
ignore_timestamps (Optional[bool]): Whether to ignore timestamps.
|
|
121
|
+
(default: :obj:`None`)
|
|
122
|
+
**kwargs (Any): Additional parameters to pass to the STT request.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
str: The transcribed text from the audio.
|
|
126
|
+
|
|
127
|
+
Raises:
|
|
128
|
+
FileNotFoundError: If the audio file cannot be found.
|
|
129
|
+
"""
|
|
130
|
+
from fish_audio_sdk import ASRRequest
|
|
131
|
+
|
|
132
|
+
if not os.path.exists(audio_file_path):
|
|
133
|
+
raise FileNotFoundError(f"Audio file not found: {audio_file_path}")
|
|
134
|
+
|
|
135
|
+
with open(f"{audio_file_path}", "rb") as audio_file:
|
|
136
|
+
audio_data = audio_file.read()
|
|
137
|
+
|
|
138
|
+
response = self.session.asr(
|
|
139
|
+
ASRRequest(
|
|
140
|
+
audio=audio_data,
|
|
141
|
+
language=language,
|
|
142
|
+
ignore_timestamps=ignore_timestamps,
|
|
143
|
+
**kwargs,
|
|
144
|
+
)
|
|
145
|
+
)
|
|
146
|
+
return response.text
|
camel/models/gemini_model.py
CHANGED
camel/models/groq_model.py
CHANGED
|
@@ -63,13 +63,13 @@ class GroqModel(BaseModelBackend):
|
|
|
63
63
|
model_config_dict = GroqConfig().as_dict()
|
|
64
64
|
api_key = api_key or os.environ.get("GROQ_API_KEY")
|
|
65
65
|
url = url or os.environ.get(
|
|
66
|
-
"GROQ_API_BASE_URL"
|
|
66
|
+
"GROQ_API_BASE_URL", "https://api.groq.com/openai/v1"
|
|
67
67
|
)
|
|
68
68
|
super().__init__(
|
|
69
69
|
model_type, model_config_dict, api_key, url, token_counter
|
|
70
70
|
)
|
|
71
71
|
self._client = OpenAI(
|
|
72
|
-
timeout=
|
|
72
|
+
timeout=180,
|
|
73
73
|
max_retries=3,
|
|
74
74
|
api_key=self._api_key,
|
|
75
75
|
base_url=self._url,
|
camel/models/model_factory.py
CHANGED
|
@@ -29,6 +29,7 @@ from camel.models.openai_model import OpenAIModel
|
|
|
29
29
|
from camel.models.qwen_model import QwenModel
|
|
30
30
|
from camel.models.reka_model import RekaModel
|
|
31
31
|
from camel.models.samba_model import SambaModel
|
|
32
|
+
from camel.models.sglang_model import SGLangModel
|
|
32
33
|
from camel.models.stub_model import StubModel
|
|
33
34
|
from camel.models.togetherai_model import TogetherAIModel
|
|
34
35
|
from camel.models.vllm_model import VLLMModel
|
|
@@ -86,6 +87,8 @@ class ModelFactory:
|
|
|
86
87
|
model_class = OllamaModel
|
|
87
88
|
elif model_platform.is_vllm:
|
|
88
89
|
model_class = VLLMModel
|
|
90
|
+
elif model_platform.is_sglang:
|
|
91
|
+
model_class = SGLangModel
|
|
89
92
|
elif model_platform.is_openai_compatible_model:
|
|
90
93
|
model_class = OpenAICompatibleModel
|
|
91
94
|
elif model_platform.is_samba:
|
camel/models/nemotron_model.py
CHANGED
|
@@ -52,7 +52,7 @@ class NemotronModel(BaseModelBackend):
|
|
|
52
52
|
api_key = api_key or os.environ.get("NVIDIA_API_KEY")
|
|
53
53
|
super().__init__(model_type, {}, api_key, url)
|
|
54
54
|
self._client = OpenAI(
|
|
55
|
-
timeout=
|
|
55
|
+
timeout=180,
|
|
56
56
|
max_retries=3,
|
|
57
57
|
base_url=self._url,
|
|
58
58
|
api_key=self._api_key,
|
camel/models/nvidia_model.py
CHANGED
camel/models/ollama_model.py
CHANGED
|
@@ -70,7 +70,7 @@ class OllamaModel(BaseModelBackend):
|
|
|
70
70
|
self._start_server()
|
|
71
71
|
# Use OpenAI client as interface call Ollama
|
|
72
72
|
self._client = OpenAI(
|
|
73
|
-
timeout=
|
|
73
|
+
timeout=180,
|
|
74
74
|
max_retries=3,
|
|
75
75
|
api_key="Set-but-ignored", # required but ignored
|
|
76
76
|
base_url=self._url,
|
|
@@ -134,6 +134,18 @@ class OllamaModel(BaseModelBackend):
|
|
|
134
134
|
`ChatCompletion` in the non-stream mode, or
|
|
135
135
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
136
136
|
"""
|
|
137
|
+
if self.model_config_dict.get("response_format"):
|
|
138
|
+
# stream is not supported in beta.chat.completions.parse
|
|
139
|
+
if "stream" in self.model_config_dict:
|
|
140
|
+
del self.model_config_dict["stream"]
|
|
141
|
+
|
|
142
|
+
response = self._client.beta.chat.completions.parse(
|
|
143
|
+
messages=messages,
|
|
144
|
+
model=self.model_type,
|
|
145
|
+
**self.model_config_dict,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
return self._to_chat_completion(response)
|
|
137
149
|
|
|
138
150
|
response = self._client.chat.completions.create(
|
|
139
151
|
messages=messages,
|
camel/models/openai_model.py
CHANGED
|
@@ -24,7 +24,6 @@ from camel.types import (
|
|
|
24
24
|
ChatCompletion,
|
|
25
25
|
ChatCompletionChunk,
|
|
26
26
|
ModelType,
|
|
27
|
-
ParsedChatCompletion,
|
|
28
27
|
)
|
|
29
28
|
from camel.utils import (
|
|
30
29
|
BaseTokenCounter,
|
|
@@ -68,7 +67,7 @@ class OpenAIModel(BaseModelBackend):
|
|
|
68
67
|
model_type, model_config_dict, api_key, url, token_counter
|
|
69
68
|
)
|
|
70
69
|
self._client = OpenAI(
|
|
71
|
-
timeout=
|
|
70
|
+
timeout=180,
|
|
72
71
|
max_retries=3,
|
|
73
72
|
base_url=self._url,
|
|
74
73
|
api_key=self._api_key,
|
|
@@ -148,31 +147,6 @@ class OpenAIModel(BaseModelBackend):
|
|
|
148
147
|
)
|
|
149
148
|
return response
|
|
150
149
|
|
|
151
|
-
def _to_chat_completion(
|
|
152
|
-
self, response: "ParsedChatCompletion"
|
|
153
|
-
) -> ChatCompletion:
|
|
154
|
-
# TODO: Handle n > 1 or warn consumers it's not supported
|
|
155
|
-
choice = dict(
|
|
156
|
-
index=response.choices[0].index,
|
|
157
|
-
message={
|
|
158
|
-
"role": response.choices[0].message.role,
|
|
159
|
-
"content": response.choices[0].message.content,
|
|
160
|
-
"tool_calls": response.choices[0].message.tool_calls,
|
|
161
|
-
"parsed": response.choices[0].message.parsed,
|
|
162
|
-
},
|
|
163
|
-
finish_reason=response.choices[0].finish_reason,
|
|
164
|
-
)
|
|
165
|
-
|
|
166
|
-
obj = ChatCompletion.construct(
|
|
167
|
-
id=response.id,
|
|
168
|
-
choices=[choice],
|
|
169
|
-
created=response.created,
|
|
170
|
-
model=response.model,
|
|
171
|
-
object="chat.completion",
|
|
172
|
-
usage=response.usage,
|
|
173
|
-
)
|
|
174
|
-
return obj
|
|
175
|
-
|
|
176
150
|
def check_model_config(self):
|
|
177
151
|
r"""Check whether the model configuration contains any
|
|
178
152
|
unexpected arguments to OpenAI API.
|
camel/models/qwen_model.py
CHANGED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from .base_reward_model import BaseRewardModel
|
|
15
|
+
from .evaluator import Evaluator
|
|
16
|
+
from .nemotron_model import NemotronRewardModel
|
|
17
|
+
|
|
18
|
+
__all__ = [
|
|
19
|
+
'BaseRewardModel',
|
|
20
|
+
'NemotronRewardModel',
|
|
21
|
+
'Evaluator',
|
|
22
|
+
]
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from abc import ABC, abstractmethod
|
|
15
|
+
from typing import Dict, List, Optional, Union
|
|
16
|
+
|
|
17
|
+
from camel.types import ModelType
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class BaseRewardModel(ABC):
|
|
21
|
+
r"""Abstract base class for reward models. Reward models are used to
|
|
22
|
+
evaluate messages and return scores based on different criteria.
|
|
23
|
+
|
|
24
|
+
Subclasses should implement the 'evaluate' and 'get_scores_types' methods.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
model_type: Union[ModelType, str],
|
|
30
|
+
api_key: Optional[str] = None,
|
|
31
|
+
url: Optional[str] = None,
|
|
32
|
+
) -> None:
|
|
33
|
+
self.model_type = model_type
|
|
34
|
+
self.api_key = api_key
|
|
35
|
+
self.url = url
|
|
36
|
+
|
|
37
|
+
@abstractmethod
|
|
38
|
+
def evaluate(self, messages: List[Dict[str, str]]) -> Dict[str, float]:
|
|
39
|
+
r"""Evaluate the messages and return scores based on different
|
|
40
|
+
criteria.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
messages (List[Dict[str, str]]): A list of messages where each
|
|
44
|
+
message is a dictionary with 'role' and 'content'.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Dict[str, float]: A dictionary mapping score types to their values.
|
|
48
|
+
"""
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
@abstractmethod
|
|
52
|
+
def get_scores_types(self) -> List[str]:
|
|
53
|
+
r"""Get the list of score types that the reward model can return.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
List[str]: A list of score types that the reward model can return.
|
|
57
|
+
"""
|
|
58
|
+
pass
|