camel-ai 0.2.16__py3-none-any.whl → 0.2.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (51) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +30 -6
  3. camel/agents/multi_hop_generator_agent.py +85 -0
  4. camel/agents/programmed_agent_instruction.py +148 -0
  5. camel/benchmarks/__init__.py +2 -0
  6. camel/benchmarks/apibank.py +5 -0
  7. camel/benchmarks/apibench.py +8 -4
  8. camel/benchmarks/gaia.py +2 -2
  9. camel/benchmarks/ragbench.py +333 -0
  10. camel/bots/__init__.py +1 -1
  11. camel/bots/discord/__init__.py +26 -0
  12. camel/bots/discord/discord_app.py +384 -0
  13. camel/bots/discord/discord_installation.py +64 -0
  14. camel/bots/discord/discord_store.py +160 -0
  15. camel/configs/__init__.py +3 -0
  16. camel/configs/anthropic_config.py +17 -15
  17. camel/configs/deepseek_config.py +2 -2
  18. camel/configs/internlm_config.py +60 -0
  19. camel/data_collector/base.py +5 -5
  20. camel/data_collector/sharegpt_collector.py +2 -2
  21. camel/datagen/self_instruct/self_instruct.py +4 -1
  22. camel/datagen/self_instruct/templates.py +12 -14
  23. camel/interpreters/internal_python_interpreter.py +24 -7
  24. camel/loaders/__init__.py +2 -0
  25. camel/loaders/panda_reader.py +337 -0
  26. camel/messages/__init__.py +10 -4
  27. camel/messages/func_message.py +30 -22
  28. camel/models/__init__.py +2 -0
  29. camel/models/anthropic_model.py +1 -22
  30. camel/models/cohere_model.py +8 -0
  31. camel/models/deepseek_model.py +67 -0
  32. camel/models/gemini_model.py +10 -1
  33. camel/models/internlm_model.py +143 -0
  34. camel/models/mistral_model.py +14 -7
  35. camel/models/model_factory.py +3 -0
  36. camel/models/reward/__init__.py +2 -0
  37. camel/models/reward/skywork_model.py +88 -0
  38. camel/synthetic_datagen/source2synth/data_processor.py +373 -0
  39. camel/synthetic_datagen/source2synth/models.py +68 -0
  40. camel/synthetic_datagen/source2synth/user_data_processor_config.py +73 -0
  41. camel/toolkits/google_scholar_toolkit.py +9 -0
  42. camel/types/__init__.py +4 -2
  43. camel/types/enums.py +81 -1
  44. camel/types/openai_types.py +6 -4
  45. camel/types/unified_model_type.py +5 -0
  46. camel/utils/token_counting.py +3 -3
  47. {camel_ai-0.2.16.dist-info → camel_ai-0.2.18.dist-info}/METADATA +158 -187
  48. {camel_ai-0.2.16.dist-info → camel_ai-0.2.18.dist-info}/RECORD +50 -37
  49. {camel_ai-0.2.16.dist-info → camel_ai-0.2.18.dist-info}/WHEEL +1 -1
  50. camel/bots/discord_app.py +0 -138
  51. {camel_ai-0.2.16.dist-info → camel_ai-0.2.18.dist-info}/LICENSE +0 -0
@@ -11,6 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import json
14
15
  from dataclasses import dataclass
15
16
  from typing import Any, Dict, Optional
16
17
 
@@ -18,8 +19,8 @@ from camel.messages import (
18
19
  BaseMessage,
19
20
  HermesFunctionFormatter,
20
21
  OpenAIAssistantMessage,
21
- OpenAIFunctionMessage,
22
22
  OpenAIMessage,
23
+ OpenAIToolMessageParam,
23
24
  )
24
25
  from camel.messages.conversion import (
25
26
  ShareGPTMessage,
@@ -44,11 +45,14 @@ class FunctionCallingMessage(BaseMessage):
44
45
  function. (default: :obj:`None`)
45
46
  result (Optional[Any]): The result of function execution.
46
47
  (default: :obj:`None`)
48
+ tool_call_id (Optional[str]): The ID of the tool call, if available.
49
+ (default: :obj:`None`)
47
50
  """
48
51
 
49
52
  func_name: Optional[str] = None
50
53
  args: Optional[Dict] = None
51
54
  result: Optional[Any] = None
55
+ tool_call_id: Optional[str] = None
52
56
 
53
57
  def to_openai_message(
54
58
  self,
@@ -66,7 +70,7 @@ class FunctionCallingMessage(BaseMessage):
66
70
  if role_at_backend == OpenAIBackendRole.ASSISTANT:
67
71
  return self.to_openai_assistant_message()
68
72
  elif role_at_backend == OpenAIBackendRole.FUNCTION:
69
- return self.to_openai_function_message()
73
+ return self.to_openai_tool_message()
70
74
  else:
71
75
  raise ValueError(f"Unsupported role: {role_at_backend}.")
72
76
 
@@ -120,24 +124,29 @@ class FunctionCallingMessage(BaseMessage):
120
124
  " due to missing function name or arguments."
121
125
  )
122
126
 
123
- msg_dict: OpenAIAssistantMessage = {
127
+ return {
124
128
  "role": "assistant",
125
- "content": self.content,
126
- "function_call": {
127
- "name": self.func_name,
128
- "arguments": str(self.args),
129
- },
129
+ "content": self.content or "",
130
+ "tool_calls": [
131
+ {
132
+ "id": self.tool_call_id or "null",
133
+ "type": "function",
134
+ "function": {
135
+ "name": self.func_name,
136
+ "arguments": json.dumps(self.args),
137
+ },
138
+ }
139
+ ],
130
140
  }
131
141
 
132
- return msg_dict
133
-
134
- def to_openai_function_message(self) -> OpenAIFunctionMessage:
135
- r"""Converts the message to an :obj:`OpenAIMessage` object
136
- with the role being "function".
142
+ def to_openai_tool_message(self) -> OpenAIToolMessageParam:
143
+ r"""Converts the message to an :obj:`OpenAIToolMessageParam` object
144
+ with the role being "tool".
137
145
 
138
146
  Returns:
139
- OpenAIMessage: The converted :obj:`OpenAIMessage` object
140
- with its role being "function".
147
+ OpenAIToolMessageParam: The converted
148
+ :obj:`OpenAIToolMessageParam` object with its role being
149
+ "tool".
141
150
  """
142
151
  if not self.func_name:
143
152
  raise ValueError(
@@ -145,11 +154,10 @@ class FunctionCallingMessage(BaseMessage):
145
154
  " due to missing function name."
146
155
  )
147
156
 
148
- result_content = {"result": {str(self.result)}}
149
- msg_dict: OpenAIFunctionMessage = {
150
- "role": "function",
151
- "name": self.func_name,
152
- "content": f'{result_content}',
153
- }
157
+ result_content = json.dumps(self.result)
154
158
 
155
- return msg_dict
159
+ return {
160
+ "role": "tool",
161
+ "content": result_content,
162
+ "tool_call_id": self.tool_call_id or "null",
163
+ }
camel/models/__init__.py CHANGED
@@ -19,6 +19,7 @@ from .deepseek_model import DeepSeekModel
19
19
  from .fish_audio_model import FishAudioModel
20
20
  from .gemini_model import GeminiModel
21
21
  from .groq_model import GroqModel
22
+ from .internlm_model import InternLMModel
22
23
  from .litellm_model import LiteLLMModel
23
24
  from .mistral_model import MistralModel
24
25
  from .model_factory import ModelFactory
@@ -68,4 +69,5 @@ __all__ = [
68
69
  'ModelProcessingError',
69
70
  'DeepSeekModel',
70
71
  'FishAudioModel',
72
+ 'InternLMModel',
71
73
  ]
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Literal, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Union
16
16
 
17
17
  from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig
18
18
  from camel.messages import OpenAIMessage
@@ -102,27 +102,6 @@ class AnthropicModel(BaseModelBackend):
102
102
  self._token_counter = AnthropicTokenCounter(self.model_type)
103
103
  return self._token_counter
104
104
 
105
- @dependencies_required('anthropic')
106
- def count_tokens_from_prompt(
107
- self, prompt: str, role: Literal["user", "assistant"]
108
- ) -> int:
109
- r"""Count the number of tokens from a prompt.
110
-
111
- Args:
112
- prompt (str): The prompt string.
113
- role (Literal["user", "assistant"]): The role of the message
114
- sender, either "user" or "assistant".
115
-
116
- Returns:
117
- int: The number of tokens in the prompt.
118
- """
119
- from anthropic.types.beta import BetaMessageParam
120
-
121
- return self.client.beta.messages.count_tokens(
122
- messages=[BetaMessageParam(content=prompt, role=role)],
123
- model=self.model_type,
124
- ).input_tokens
125
-
126
105
  def run(
127
106
  self,
128
107
  messages: List[OpenAIMessage],
@@ -228,6 +228,14 @@ class CohereModel(BaseModelBackend):
228
228
 
229
229
  cohere_messages = self._to_cohere_chatmessage(messages)
230
230
 
231
+ # Removing 'strict': True from the dictionary for
232
+ # cohere client
233
+ if self.model_config_dict.get('tools') is not None:
234
+ for tool in self.model_config_dict.get('tools', []):
235
+ function_dict = tool.get('function', {})
236
+ if 'strict' in function_dict:
237
+ del function_dict['strict']
238
+
231
239
  try:
232
240
  response = self._client.chat(
233
241
  messages=cohere_messages,
@@ -13,6 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
+ import warnings
16
17
  from typing import Any, Dict, List, Optional, Union
17
18
 
18
19
  from openai import OpenAI, Stream
@@ -110,11 +111,77 @@ class DeepSeekModel(BaseModelBackend):
110
111
  `ChatCompletion` in the non-stream mode, or
111
112
  `Stream[ChatCompletionChunk]` in the stream mode.
112
113
  """
114
+ # deepseek reasoner has limitations
115
+ # reference: https://api-docs.deepseek.com/guides/reasoning_model#api-parameters
116
+ if self.model_type in [
117
+ ModelType.DEEPSEEK_REASONER,
118
+ ]:
119
+ warnings.warn(
120
+ "Warning: You are using an DeepSeek Reasoner model, "
121
+ "which has certain limitations, reference: "
122
+ "`https://api-docs.deepseek.com/guides/reasoning_model#api-parameters`.",
123
+ UserWarning,
124
+ )
125
+
126
+ # Check and remove unsupported parameters and reset the fixed
127
+ # parameters
128
+ unsupported_keys = [
129
+ "temperature",
130
+ "top_p",
131
+ "presence_penalty",
132
+ "frequency_penalty",
133
+ "logprobs",
134
+ "top_logprobs",
135
+ "tools",
136
+ ]
137
+ for key in unsupported_keys:
138
+ if key in self.model_config_dict:
139
+ del self.model_config_dict[key]
140
+
113
141
  response = self._client.chat.completions.create(
114
142
  messages=messages,
115
143
  model=self.model_type,
116
144
  **self.model_config_dict,
117
145
  )
146
+
147
+ # Temporary solution to handle the case where
148
+ # deepseek returns a reasoning_content
149
+ if (
150
+ self.model_type
151
+ in [
152
+ ModelType.DEEPSEEK_REASONER,
153
+ ]
154
+ and os.environ.get("GET_REASONING_CONTENT", "false").lower()
155
+ == "true"
156
+ ):
157
+ reasoning_content = response.choices[0].message.reasoning_content
158
+ combined_content = (
159
+ response.choices[0].message.content
160
+ + "\n\nBELOW IS THE REASONING CONTENT:\n\n"
161
+ + (reasoning_content if reasoning_content else "")
162
+ )
163
+
164
+ response = ChatCompletion.construct(
165
+ id=response.id,
166
+ choices=[
167
+ dict(
168
+ index=response.choices[0].index,
169
+ message={
170
+ "role": response.choices[0].message.role,
171
+ "content": combined_content,
172
+ "tool_calls": None,
173
+ },
174
+ finish_reason=response.choices[0].finish_reason
175
+ if response.choices[0].finish_reason
176
+ else None,
177
+ )
178
+ ],
179
+ created=response.created,
180
+ model=response.model,
181
+ object="chat.completion",
182
+ usage=response.usage,
183
+ )
184
+
118
185
  return response
119
186
 
120
187
  def check_model_config(self):
@@ -97,8 +97,17 @@ class GeminiModel(BaseModelBackend):
97
97
  `ChatCompletion` in the non-stream mode, or
98
98
  `Stream[ChatCompletionChunk]` in the stream mode.
99
99
  """
100
+ # Process messages to ensure no empty content, it's not accepeted by
101
+ # Gemini
102
+ processed_messages = []
103
+ for msg in messages:
104
+ msg_copy = msg.copy()
105
+ if 'content' in msg_copy and msg_copy['content'] == '':
106
+ msg_copy['content'] = 'null'
107
+ processed_messages.append(msg_copy)
108
+
100
109
  response = self._client.chat.completions.create(
101
- messages=messages,
110
+ messages=processed_messages,
102
111
  model=self.model_type,
103
112
  **self.model_config_dict,
104
113
  )
@@ -0,0 +1,143 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, List, Optional, Union
17
+
18
+ from openai import OpenAI, Stream
19
+
20
+ from camel.configs import INTERNLM_API_PARAMS, InternLMConfig
21
+ from camel.messages import OpenAIMessage
22
+ from camel.models import BaseModelBackend
23
+ from camel.types import (
24
+ ChatCompletion,
25
+ ChatCompletionChunk,
26
+ ModelType,
27
+ )
28
+ from camel.utils import (
29
+ BaseTokenCounter,
30
+ OpenAITokenCounter,
31
+ api_keys_required,
32
+ )
33
+
34
+
35
+ class InternLMModel(BaseModelBackend):
36
+ r"""InternLM API in a unified BaseModelBackend interface.
37
+
38
+ Args:
39
+ model_type (Union[ModelType, str]): Model for which a backend is
40
+ created, one of InternLM series.
41
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
42
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
43
+ :obj:`None`, :obj:`InternLMConfig().as_dict()` will be used.
44
+ (default: :obj:`None`)
45
+ api_key (Optional[str], optional): The API key for authenticating with
46
+ the InternLM service. (default: :obj:`None`)
47
+ url (Optional[str], optional): The url to the InternLM service.
48
+ (default: :obj:`https://internlm-chat.intern-ai.org.cn/puyu/api/v1`)
49
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
50
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
51
+ ModelType.GPT_4O_MINI)` will be used.
52
+ (default: :obj:`None`)
53
+ """
54
+
55
+ @api_keys_required(
56
+ [
57
+ ("api_key", "INTERNLM_API_KEY"),
58
+ ]
59
+ )
60
+ def __init__(
61
+ self,
62
+ model_type: Union[ModelType, str],
63
+ model_config_dict: Optional[Dict[str, Any]] = None,
64
+ api_key: Optional[str] = None,
65
+ url: Optional[str] = None,
66
+ token_counter: Optional[BaseTokenCounter] = None,
67
+ ) -> None:
68
+ if model_config_dict is None:
69
+ model_config_dict = InternLMConfig().as_dict()
70
+ api_key = api_key or os.environ.get("INTERNLM_API_KEY")
71
+ url = url or os.environ.get(
72
+ "INTERNLM_API_BASE_URL",
73
+ "https://internlm-chat.intern-ai.org.cn/puyu/api/v1",
74
+ )
75
+ super().__init__(
76
+ model_type, model_config_dict, api_key, url, token_counter
77
+ )
78
+ self._client = OpenAI(
79
+ timeout=180,
80
+ max_retries=3,
81
+ api_key=self._api_key,
82
+ base_url=self._url,
83
+ )
84
+
85
+ def run(
86
+ self,
87
+ messages: List[OpenAIMessage],
88
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
89
+ r"""Runs inference of InternLM chat completion.
90
+
91
+ Args:
92
+ messages (List[OpenAIMessage]): Message list with the chat history
93
+ in OpenAI API format.
94
+
95
+ Returns:
96
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
97
+ `ChatCompletion` in the non-stream mode, or
98
+ `Stream[ChatCompletionChunk]` in the stream mode.
99
+ """
100
+ response = self._client.chat.completions.create(
101
+ messages=messages,
102
+ model=self.model_type,
103
+ **self.model_config_dict,
104
+ )
105
+ return response
106
+
107
+ @property
108
+ def token_counter(self) -> BaseTokenCounter:
109
+ r"""Initialize the token counter for the model backend.
110
+
111
+ Returns:
112
+ OpenAITokenCounter: The token counter following the model's
113
+ tokenization style.
114
+ """
115
+
116
+ if not self._token_counter:
117
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
118
+ return self._token_counter
119
+
120
+ def check_model_config(self):
121
+ r"""Check whether the model configuration contains any
122
+ unexpected arguments to InternLM API.
123
+
124
+ Raises:
125
+ ValueError: If the model configuration dictionary contains any
126
+ unexpected arguments to InternLM API.
127
+ """
128
+ for param in self.model_config_dict:
129
+ if param not in INTERNLM_API_PARAMS:
130
+ raise ValueError(
131
+ f"Unexpected argument `{param}` is "
132
+ "input into InternLM model backend."
133
+ )
134
+
135
+ @property
136
+ def stream(self) -> bool:
137
+ r"""Returns whether the model is in stream mode, which sends partial
138
+ results each time.
139
+
140
+ Returns:
141
+ bool: Whether the model is in stream mode.
142
+ """
143
+ return self.model_config_dict.get('stream', False)
@@ -147,18 +147,25 @@ class MistralModel(BaseModelBackend):
147
147
  new_messages = []
148
148
  for msg in messages:
149
149
  tool_id = uuid.uuid4().hex[:9]
150
- tool_call_id = uuid.uuid4().hex[:9]
150
+ tool_call_id = msg.get("tool_call_id") or uuid.uuid4().hex[:9]
151
151
 
152
152
  role = msg.get("role")
153
- function_call = msg.get("function_call")
153
+ tool_calls = msg.get("tool_calls")
154
154
  content = msg.get("content")
155
155
 
156
156
  mistral_function_call = None
157
- if function_call:
158
- mistral_function_call = FunctionCall(
159
- name=function_call.get("name"), # type: ignore[attr-defined]
160
- arguments=function_call.get("arguments"), # type: ignore[attr-defined]
157
+ if tool_calls:
158
+ # Ensure tool_calls is treated as a list
159
+ tool_calls_list = (
160
+ tool_calls
161
+ if isinstance(tool_calls, list)
162
+ else [tool_calls]
161
163
  )
164
+ for tool_call in tool_calls_list:
165
+ mistral_function_call = FunctionCall(
166
+ name=tool_call["function"].get("name"), # type: ignore[attr-defined]
167
+ arguments=tool_call["function"].get("arguments"), # type: ignore[attr-defined]
168
+ )
162
169
 
163
170
  tool_calls = None
164
171
  if mistral_function_call:
@@ -178,7 +185,7 @@ class MistralModel(BaseModelBackend):
178
185
  new_messages.append(
179
186
  ToolMessage(
180
187
  content=content, # type: ignore[arg-type]
181
- tool_call_id=tool_call_id,
188
+ tool_call_id=tool_call_id, # type: ignore[arg-type]
182
189
  name=msg.get("name"), # type: ignore[arg-type]
183
190
  )
184
191
  )
@@ -20,6 +20,7 @@ from camel.models.cohere_model import CohereModel
20
20
  from camel.models.deepseek_model import DeepSeekModel
21
21
  from camel.models.gemini_model import GeminiModel
22
22
  from camel.models.groq_model import GroqModel
23
+ from camel.models.internlm_model import InternLMModel
23
24
  from camel.models.litellm_model import LiteLLMModel
24
25
  from camel.models.mistral_model import MistralModel
25
26
  from camel.models.nvidia_model import NvidiaModel
@@ -124,6 +125,8 @@ class ModelFactory:
124
125
  model_class = QwenModel
125
126
  elif model_platform.is_deepseek:
126
127
  model_class = DeepSeekModel
128
+ elif model_platform.is_internlm and model_type.is_internlm:
129
+ model_class = InternLMModel
127
130
  elif model_type == ModelType.STUB:
128
131
  model_class = StubModel
129
132
 
@@ -14,9 +14,11 @@
14
14
  from .base_reward_model import BaseRewardModel
15
15
  from .evaluator import Evaluator
16
16
  from .nemotron_model import NemotronRewardModel
17
+ from .skywork_model import SkyworkRewardModel
17
18
 
18
19
  __all__ = [
19
20
  'BaseRewardModel',
20
21
  'NemotronRewardModel',
21
22
  'Evaluator',
23
+ 'SkyworkRewardModel',
22
24
  ]
@@ -0,0 +1,88 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from typing import Dict, List, Optional, Union
15
+
16
+ import torch
17
+
18
+ from camel.models.reward import BaseRewardModel
19
+ from camel.types import ModelType
20
+
21
+
22
+ class SkyworkRewardModel(BaseRewardModel):
23
+ r"""Reward model based on the transformers, it will download the model
24
+ from huggingface.
25
+
26
+ Args:
27
+ model_type (Union[ModelType, str]): Model for which a backend is
28
+ created.
29
+ api_key (Optional[str], optional): Not used. (default: :obj:`None`)
30
+ url (Optional[str], optional): Not used. (default: :obj:`None`)
31
+ device_map (Optional[str], optional): choose the device map.
32
+ (default: :obj:`auto`)
33
+ attn_implementation (Optional[str], optional): choose the attention
34
+ implementation. (default: :obj:`flash_attention_2`)
35
+ offload_folder (Optional[str], optional): choose the offload folder.
36
+ (default: :obj:`offload`)
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ model_type: Union[ModelType, str],
42
+ api_key: Optional[str] = None,
43
+ url: Optional[str] = None,
44
+ device_map: Optional[str] = "auto",
45
+ attn_implementation: Optional[str] = "flash_attention_2",
46
+ offload_folder: Optional[str] = "offload",
47
+ ) -> None:
48
+ from transformers import (
49
+ AutoModelForSequenceClassification,
50
+ AutoTokenizer,
51
+ )
52
+
53
+ super().__init__(model_type, api_key, url)
54
+ self._client = AutoModelForSequenceClassification.from_pretrained(
55
+ model_type,
56
+ torch_dtype=torch.bfloat16,
57
+ device_map=device_map,
58
+ attn_implementation=attn_implementation,
59
+ offload_folder=offload_folder,
60
+ num_labels=1,
61
+ )
62
+ self._tokenizer = AutoTokenizer.from_pretrained(model_type)
63
+
64
+ def evaluate(self, messages: List[Dict[str, str]]) -> Dict[str, float]:
65
+ r"""Evaluate the messages using the Skywork model.
66
+
67
+ Args:
68
+ messages (List[Dict[str, str]]): A list of messages.
69
+
70
+ Returns:
71
+ ChatCompletion: A ChatCompletion object with the scores.
72
+ """
73
+ inputs = self._tokenizer.apply_chat_template(
74
+ messages,
75
+ tokenize=True,
76
+ return_tensors="pt",
77
+ )
78
+ with torch.no_grad():
79
+ score = self._client(inputs).logits[0][0].item()
80
+ return {"Score": score}
81
+
82
+ def get_scores_types(self) -> List[str]:
83
+ r"""get the scores types
84
+
85
+ Returns:
86
+ List[str]: list of scores types
87
+ """
88
+ return ["Score"]