camel-ai 0.2.3__py3-none-any.whl → 0.2.3a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (87) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +69 -93
  3. camel/agents/knowledge_graph_agent.py +6 -4
  4. camel/bots/__init__.py +2 -16
  5. camel/bots/discord_bot.py +206 -0
  6. camel/configs/__init__.py +2 -1
  7. camel/configs/anthropic_config.py +5 -2
  8. camel/configs/base_config.py +6 -6
  9. camel/configs/groq_config.py +3 -2
  10. camel/configs/ollama_config.py +2 -1
  11. camel/configs/openai_config.py +23 -2
  12. camel/configs/samba_config.py +2 -2
  13. camel/configs/togetherai_config.py +1 -1
  14. camel/configs/vllm_config.py +1 -1
  15. camel/configs/zhipuai_config.py +3 -2
  16. camel/embeddings/openai_embedding.py +2 -2
  17. camel/loaders/__init__.py +0 -2
  18. camel/loaders/firecrawl_reader.py +3 -3
  19. camel/loaders/unstructured_io.py +33 -35
  20. camel/messages/__init__.py +0 -1
  21. camel/models/__init__.py +4 -2
  22. camel/models/anthropic_model.py +26 -32
  23. camel/models/azure_openai_model.py +36 -39
  24. camel/models/base_model.py +20 -31
  25. camel/models/gemini_model.py +29 -37
  26. camel/models/groq_model.py +23 -29
  27. camel/models/litellm_model.py +61 -44
  28. camel/models/mistral_model.py +29 -32
  29. camel/models/model_factory.py +76 -66
  30. camel/models/nemotron_model.py +23 -33
  31. camel/models/ollama_model.py +47 -42
  32. camel/models/open_source_model.py +170 -0
  33. camel/models/{openai_compatible_model.py → openai_compatibility_model.py} +49 -31
  34. camel/models/openai_model.py +29 -48
  35. camel/models/reka_model.py +28 -30
  36. camel/models/samba_model.py +177 -82
  37. camel/models/stub_model.py +2 -2
  38. camel/models/togetherai_model.py +43 -37
  39. camel/models/vllm_model.py +50 -43
  40. camel/models/zhipuai_model.py +27 -33
  41. camel/retrievers/auto_retriever.py +10 -28
  42. camel/retrievers/vector_retriever.py +47 -58
  43. camel/societies/babyagi_playing.py +3 -6
  44. camel/societies/role_playing.py +3 -5
  45. camel/storages/graph_storages/graph_element.py +5 -3
  46. camel/storages/key_value_storages/json.py +1 -6
  47. camel/toolkits/__init__.py +7 -20
  48. camel/toolkits/base.py +3 -2
  49. camel/toolkits/code_execution.py +7 -6
  50. camel/toolkits/dalle_toolkit.py +6 -6
  51. camel/toolkits/github_toolkit.py +10 -9
  52. camel/toolkits/google_maps_toolkit.py +7 -7
  53. camel/toolkits/linkedin_toolkit.py +7 -7
  54. camel/toolkits/math_toolkit.py +8 -8
  55. camel/toolkits/open_api_toolkit.py +5 -5
  56. camel/toolkits/{function_tool.py → openai_function.py} +11 -34
  57. camel/toolkits/reddit_toolkit.py +7 -7
  58. camel/toolkits/retrieval_toolkit.py +5 -5
  59. camel/toolkits/search_toolkit.py +9 -9
  60. camel/toolkits/slack_toolkit.py +11 -11
  61. camel/toolkits/twitter_toolkit.py +452 -378
  62. camel/toolkits/weather_toolkit.py +6 -6
  63. camel/types/__init__.py +1 -6
  64. camel/types/enums.py +85 -40
  65. camel/types/openai_types.py +0 -3
  66. camel/utils/__init__.py +2 -0
  67. camel/utils/async_func.py +7 -7
  68. camel/utils/commons.py +3 -32
  69. camel/utils/token_counting.py +212 -30
  70. camel/workforce/role_playing_worker.py +1 -1
  71. camel/workforce/single_agent_worker.py +1 -1
  72. camel/workforce/task_channel.py +3 -4
  73. camel/workforce/workforce.py +4 -4
  74. {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a0.dist-info}/METADATA +56 -27
  75. {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a0.dist-info}/RECORD +76 -85
  76. {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a0.dist-info}/WHEEL +1 -1
  77. camel/bots/discord_app.py +0 -138
  78. camel/bots/slack/__init__.py +0 -30
  79. camel/bots/slack/models.py +0 -158
  80. camel/bots/slack/slack_app.py +0 -255
  81. camel/loaders/chunkr_reader.py +0 -163
  82. camel/toolkits/arxiv_toolkit.py +0 -155
  83. camel/toolkits/ask_news_toolkit.py +0 -653
  84. camel/toolkits/google_scholar_toolkit.py +0 -146
  85. camel/toolkits/whatsapp_toolkit.py +0 -177
  86. camel/types/unified_model_type.py +0 -104
  87. camel_ai-0.2.3.dist-info/LICENSE +0 -201
@@ -17,49 +17,41 @@ from typing import Any, Dict, List, Optional, Union
17
17
  from openai import Stream
18
18
 
19
19
  from camel.messages import OpenAIMessage
20
- from camel.types import (
21
- ChatCompletion,
22
- ChatCompletionChunk,
23
- ModelType,
24
- UnifiedModelType,
25
- )
20
+ from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
26
21
  from camel.utils import BaseTokenCounter
27
22
 
28
23
 
29
24
  class BaseModelBackend(ABC):
30
25
  r"""Base class for different model backends.
31
- It may be OpenAI API, a local LLM, a stub for unit tests, etc.
32
-
33
- Args:
34
- model_type (Union[ModelType, str]): Model for which a backend is
35
- created.
36
- model_config_dict (Optional[Dict[str, Any]], optional): A config
37
- dictionary. (default: :obj:`{}`)
38
- api_key (Optional[str], optional): The API key for authenticating
39
- with the model service. (default: :obj:`None`)
40
- url (Optional[str], optional): The url to the model service.
41
- (default: :obj:`None`)
42
- token_counter (Optional[BaseTokenCounter], optional): Token
43
- counter to use for the model. If not provided,
44
- :obj:`OpenAITokenCounter` will be used. (default: :obj:`None`)
26
+ May be OpenAI API, a local LLM, a stub for unit tests, etc.
45
27
  """
46
28
 
47
29
  def __init__(
48
30
  self,
49
- model_type: Union[ModelType, str],
50
- model_config_dict: Optional[Dict[str, Any]] = None,
31
+ model_type: ModelType,
32
+ model_config_dict: Dict[str, Any],
51
33
  api_key: Optional[str] = None,
52
34
  url: Optional[str] = None,
53
35
  token_counter: Optional[BaseTokenCounter] = None,
54
36
  ) -> None:
55
- self.model_type: UnifiedModelType = UnifiedModelType(model_type)
56
- if model_config_dict is None:
57
- model_config_dict = {}
37
+ r"""Constructor for the model backend.
38
+
39
+ Args:
40
+ model_type (ModelType): Model for which a backend is created.
41
+ model_config_dict (Dict[str, Any]): A config dictionary.
42
+ api_key (Optional[str]): The API key for authenticating with the
43
+ model service.
44
+ url (Optional[str]): The url to the model service.
45
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
46
+ for the model. If not provided, `OpenAITokenCounter` will
47
+ be used.
48
+ """
49
+ self.model_type = model_type
58
50
  self.model_config_dict = model_config_dict
59
51
  self._api_key = api_key
60
52
  self._url = url
61
- self._token_counter = token_counter
62
53
  self.check_model_config()
54
+ self._token_counter = token_counter
63
55
 
64
56
  @property
65
57
  @abstractmethod
@@ -118,9 +110,6 @@ class BaseModelBackend(ABC):
118
110
  def token_limit(self) -> int:
119
111
  r"""Returns the maximum token limit for a given model.
120
112
 
121
- This method retrieves the maximum token limit either from the
122
- `model_config_dict` or from the model's default token limit.
123
-
124
113
  Returns:
125
114
  int: The maximum token limit for the given model.
126
115
  """
@@ -131,8 +120,8 @@ class BaseModelBackend(ABC):
131
120
 
132
121
  @property
133
122
  def stream(self) -> bool:
134
- r"""Returns whether the model is in stream mode, which sends partial
135
- results each time.
123
+ r"""Returns whether the model is in stream mode,
124
+ which sends partial results each time.
136
125
 
137
126
  Returns:
138
127
  bool: Whether the model is in stream mode.
@@ -11,10 +11,9 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- import os
15
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
14
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
16
15
 
17
- from camel.configs import Gemini_API_PARAMS, GeminiConfig
16
+ from camel.configs import Gemini_API_PARAMS
18
17
  from camel.messages import OpenAIMessage
19
18
  from camel.models import BaseModelBackend
20
19
  from camel.types import (
@@ -27,7 +26,6 @@ from camel.utils import (
27
26
  BaseTokenCounter,
28
27
  GeminiTokenCounter,
29
28
  api_keys_required,
30
- dependencies_required,
31
29
  )
32
30
 
33
31
  if TYPE_CHECKING:
@@ -35,49 +33,43 @@ if TYPE_CHECKING:
35
33
 
36
34
 
37
35
  class GeminiModel(BaseModelBackend):
38
- r"""Gemini API in a unified BaseModelBackend interface.
39
-
40
- Args:
41
- model_type (Union[ModelType, str]): Model for which a backend is
42
- created.
43
- model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
44
- that will be fed into:obj:`genai.GenerativeModel.generate_content()
45
- `. If:obj:`None`, :obj:`GeminiConfig().as_dict()` will be used.
46
- (default: :obj:`None`)
47
- api_key (Optional[str], optional): The API key for authenticating with
48
- the gemini service. (default: :obj:`None`)
49
- url (Optional[str], optional): The url to the gemini service.
50
- (default: :obj:`None`)
51
- token_counter (Optional[BaseTokenCounter], optional): Token counter to
52
- use for the model. If not provided, :obj:`GeminiTokenCounter` will
53
- be used. (default: :obj:`None`)
54
-
55
- Notes:
56
- Currently :obj:`"stream": True` is not supported with Gemini due to the
57
- limitation of the current camel design.
58
- """
59
-
60
- @dependencies_required('google')
36
+ r"""Gemini API in a unified BaseModelBackend interface."""
37
+
38
+ # NOTE: Currently "stream": True is not supported with Gemini due to the
39
+ # limitation of the current camel design.
40
+
61
41
  def __init__(
62
42
  self,
63
- model_type: Union[ModelType, str],
64
- model_config_dict: Optional[Dict[str, Any]] = None,
43
+ model_type: ModelType,
44
+ model_config_dict: Dict[str, Any],
65
45
  api_key: Optional[str] = None,
66
46
  url: Optional[str] = None,
67
47
  token_counter: Optional[BaseTokenCounter] = None,
68
48
  ) -> None:
49
+ r"""Constructor for Gemini backend.
50
+
51
+ Args:
52
+ model_type (ModelType): Model for which a backend is created.
53
+ model_config_dict (Dict[str, Any]): A dictionary that will
54
+ be fed into generate_content().
55
+ api_key (Optional[str]): The API key for authenticating with the
56
+ gemini service. (default: :obj:`None`)
57
+ url (Optional[str]): The url to the gemini service.
58
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
59
+ for the model. If not provided, `GeminiTokenCounter` will be
60
+ used.
61
+ """
62
+ import os
63
+
69
64
  import google.generativeai as genai
70
65
  from google.generativeai.types.generation_types import GenerationConfig
71
66
 
72
- if model_config_dict is None:
73
- model_config_dict = GeminiConfig().as_dict()
74
-
75
- api_key = api_key or os.environ.get("GOOGLE_API_KEY")
76
67
  super().__init__(
77
68
  model_type, model_config_dict, api_key, url, token_counter
78
69
  )
70
+ self._api_key = api_key or os.environ.get("GOOGLE_API_KEY")
79
71
  genai.configure(api_key=self._api_key)
80
- self._client = genai.GenerativeModel(self.model_type)
72
+ self._client = genai.GenerativeModel(self.model_type.value)
81
73
 
82
74
  keys = list(self.model_config_dict.keys())
83
75
  generation_config_dict = {
@@ -151,8 +143,8 @@ class GeminiModel(BaseModelBackend):
151
143
  return self.model_config_dict.get('stream', False)
152
144
 
153
145
  def to_gemini_req(self, messages: List[OpenAIMessage]) -> 'ContentsType':
154
- r"""Converts the request from the OpenAI API format to the Gemini API
155
- request format.
146
+ r"""Converts the request from the OpenAI API format to
147
+ the Gemini API request format.
156
148
 
157
149
  Args:
158
150
  messages: The request object from the OpenAI API.
@@ -197,7 +189,7 @@ class GeminiModel(BaseModelBackend):
197
189
  id=f"chatcmpl-{uuid.uuid4().hex!s}",
198
190
  object="chat.completion",
199
191
  created=int(time.time()),
200
- model=self.model_type,
192
+ model=self.model_type.value,
201
193
  choices=[],
202
194
  )
203
195
  for i, candidate in enumerate(response.candidates):
@@ -16,7 +16,7 @@ from typing import Any, Dict, List, Optional, Union
16
16
 
17
17
  from openai import OpenAI, Stream
18
18
 
19
- from camel.configs import GROQ_API_PARAMS, GroqConfig
19
+ from camel.configs import GROQ_API_PARAMS
20
20
  from camel.messages import OpenAIMessage
21
21
  from camel.models import BaseModelBackend
22
22
  from camel.types import (
@@ -32,48 +32,42 @@ from camel.utils import (
32
32
 
33
33
 
34
34
  class GroqModel(BaseModelBackend):
35
- r"""LLM API served by Groq in a unified BaseModelBackend interface.
36
-
37
- Args:
38
- model_type (Union[ModelType, str]): Model for which a backend is
39
- created.
40
- model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
41
- that will be fed into:obj:`openai.ChatCompletion.create()`.
42
- If:obj:`None`, :obj:`GroqConfig().as_dict()` will be used.
43
- (default: :obj:`None`)
44
- api_key (Optional[str], optional): The API key for authenticating
45
- with the Groq service. (default: :obj:`None`).
46
- url (Optional[str], optional): The url to the Groq service.
47
- (default: :obj:`None`)
48
- token_counter (Optional[BaseTokenCounter], optional): Token counter to
49
- use for the model. If not provided, :obj:`OpenAITokenCounter(
50
- ModelType.GPT_4O_MINI)` will be used.
51
- (default: :obj:`None`)
52
- """
35
+ r"""LLM API served by Groq in a unified BaseModelBackend interface."""
53
36
 
54
37
  def __init__(
55
38
  self,
56
- model_type: Union[ModelType, str],
57
- model_config_dict: Optional[Dict[str, Any]] = None,
39
+ model_type: ModelType,
40
+ model_config_dict: Dict[str, Any],
58
41
  api_key: Optional[str] = None,
59
42
  url: Optional[str] = None,
60
43
  token_counter: Optional[BaseTokenCounter] = None,
61
44
  ) -> None:
62
- if model_config_dict is None:
63
- model_config_dict = GroqConfig().as_dict()
64
- api_key = api_key or os.environ.get("GROQ_API_KEY")
65
- url = url or os.environ.get(
66
- "GROQ_API_BASE_URL" or "https://api.groq.com/openai/v1"
67
- )
45
+ r"""Constructor for Groq backend.
46
+
47
+ Args:
48
+ model_type (str): Model for which a backend is created.
49
+ model_config_dict (Dict[str, Any]): A dictionary of parameters for
50
+ the model configuration.
51
+ api_key (Optional[str]): The API key for authenticating with the
52
+ Groq service. (default: :obj:`None`).
53
+ url (Optional[str]): The url to the Groq service. (default:
54
+ :obj:`"https://api.groq.com/openai/v1"`)
55
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
56
+ for the model. If not provided, `OpenAITokenCounter(ModelType.
57
+ GPT_4O_MINI)` will be used.
58
+ """
68
59
  super().__init__(
69
60
  model_type, model_config_dict, api_key, url, token_counter
70
61
  )
62
+ self._url = url or os.environ.get("GROQ_API_BASE_URL")
63
+ self._api_key = api_key or os.environ.get("GROQ_API_KEY")
71
64
  self._client = OpenAI(
72
65
  timeout=60,
73
66
  max_retries=3,
74
67
  api_key=self._api_key,
75
- base_url=self._url,
68
+ base_url=self._url or "https://api.groq.com/openai/v1",
76
69
  )
70
+ self._token_counter = token_counter
77
71
 
78
72
  @property
79
73
  def token_counter(self) -> BaseTokenCounter:
@@ -107,7 +101,7 @@ class GroqModel(BaseModelBackend):
107
101
  """
108
102
  response = self._client.chat.completions.create(
109
103
  messages=messages,
110
- model=self.model_type,
104
+ model=self.model_type.value,
111
105
  **self.model_config_dict,
112
106
  )
113
107
 
@@ -11,58 +11,49 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, Dict, List, Optional, Union
14
+ from typing import Any, Dict, List, Optional
15
15
 
16
- from camel.configs import LITELLM_API_PARAMS, LiteLLMConfig
16
+ from camel.configs import LITELLM_API_PARAMS
17
17
  from camel.messages import OpenAIMessage
18
- from camel.models import BaseModelBackend
19
- from camel.types import ChatCompletion, ModelType
20
- from camel.utils import (
21
- BaseTokenCounter,
22
- LiteLLMTokenCounter,
23
- dependencies_required,
24
- )
25
-
26
-
27
- class LiteLLMModel(BaseModelBackend):
28
- r"""Constructor for LiteLLM backend with OpenAI compatibility.
29
-
30
- Args:
31
- model_type (Union[ModelType, str]): Model for which a backend is
32
- created, such as GPT-3.5-turbo, Claude-2, etc.
33
- model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
34
- that will be fed into:obj:`openai.ChatCompletion.create()`.
35
- If:obj:`None`, :obj:`LiteLLMConfig().as_dict()` will be used.
36
- (default: :obj:`None`)
37
- api_key (Optional[str], optional): The API key for authenticating with
38
- the model service. (default: :obj:`None`)
39
- url (Optional[str], optional): The url to the model service.
40
- (default: :obj:`None`)
41
- token_counter (Optional[BaseTokenCounter], optional): Token counter to
42
- use for the model. If not provided, :obj:`LiteLLMTokenCounter` will
43
- be used. (default: :obj:`None`)
44
- """
18
+ from camel.types import ChatCompletion
19
+ from camel.utils import BaseTokenCounter, LiteLLMTokenCounter
20
+
21
+
22
+ class LiteLLMModel:
23
+ r"""Constructor for LiteLLM backend with OpenAI compatibility."""
45
24
 
46
25
  # NOTE: Currently stream mode is not supported.
47
26
 
48
- @dependencies_required('litellm')
49
27
  def __init__(
50
28
  self,
51
- model_type: Union[ModelType, str],
52
- model_config_dict: Optional[Dict[str, Any]] = None,
29
+ model_type: str,
30
+ model_config_dict: Dict[str, Any],
53
31
  api_key: Optional[str] = None,
54
32
  url: Optional[str] = None,
55
33
  token_counter: Optional[BaseTokenCounter] = None,
56
34
  ) -> None:
57
- from litellm import completion
35
+ r"""Constructor for LiteLLM backend.
58
36
 
59
- if model_config_dict is None:
60
- model_config_dict = LiteLLMConfig().as_dict()
61
-
62
- super().__init__(
63
- model_type, model_config_dict, api_key, url, token_counter
64
- )
65
- self.client = completion
37
+ Args:
38
+ model_type (str): Model for which a backend is created,
39
+ such as GPT-3.5-turbo, Claude-2, etc.
40
+ model_config_dict (Dict[str, Any]): A dictionary of parameters for
41
+ the model configuration.
42
+ api_key (Optional[str]): The API key for authenticating with the
43
+ model service. (default: :obj:`None`)
44
+ url (Optional[str]): The url to the model service. (default:
45
+ :obj:`None`)
46
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
47
+ for the model. If not provided, `LiteLLMTokenCounter` will
48
+ be used.
49
+ """
50
+ self.model_type = model_type
51
+ self.model_config_dict = model_config_dict
52
+ self._client = None
53
+ self._token_counter = token_counter
54
+ self.check_model_config()
55
+ self._url = url
56
+ self._api_key = api_key
66
57
 
67
58
  def _convert_response_from_litellm_to_openai(
68
59
  self, response
@@ -95,16 +86,26 @@ class LiteLLMModel(BaseModelBackend):
95
86
  )
96
87
 
97
88
  @property
98
- def token_counter(self) -> BaseTokenCounter:
89
+ def client(self):
90
+ if self._client is None:
91
+ from litellm import completion
92
+
93
+ self._client = completion
94
+ return self._client
95
+
96
+ @property
97
+ def token_counter(self) -> LiteLLMTokenCounter:
99
98
  r"""Initialize the token counter for the model backend.
100
99
 
101
100
  Returns:
102
- BaseTokenCounter: The token counter following the model's
101
+ LiteLLMTokenCounter: The token counter following the model's
103
102
  tokenization style.
104
103
  """
105
104
  if not self._token_counter:
106
- self._token_counter = LiteLLMTokenCounter(self.model_type)
107
- return self._token_counter
105
+ self._token_counter = LiteLLMTokenCounter( # type: ignore[assignment]
106
+ self.model_type
107
+ )
108
+ return self._token_counter # type: ignore[return-value]
108
109
 
109
110
  def run(
110
111
  self,
@@ -143,3 +144,19 @@ class LiteLLMModel(BaseModelBackend):
143
144
  f"Unexpected argument `{param}` is "
144
145
  "input into LiteLLM model backend."
145
146
  )
147
+
148
+ @property
149
+ def token_limit(self) -> int:
150
+ r"""Returns the maximum token limit for the given model.
151
+
152
+ Returns:
153
+ int: The maximum token limit for the given model.
154
+ """
155
+ max_tokens = self.model_config_dict.get("max_tokens")
156
+ if isinstance(max_tokens, int):
157
+ return max_tokens
158
+ print(
159
+ "Must set `max_tokens` as an integer in `model_config_dict` when"
160
+ " setting up the model. Using 4096 as default value."
161
+ )
162
+ return 4096
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import os
15
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
15
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
16
16
 
17
17
  if TYPE_CHECKING:
18
18
  from mistralai.models import (
@@ -20,7 +20,7 @@ if TYPE_CHECKING:
20
20
  Messages,
21
21
  )
22
22
 
23
- from camel.configs import MISTRAL_API_PARAMS, MistralConfig
23
+ from camel.configs import MISTRAL_API_PARAMS
24
24
  from camel.messages import OpenAIMessage
25
25
  from camel.models import BaseModelBackend
26
26
  from camel.types import ChatCompletion, ModelType
@@ -28,10 +28,11 @@ from camel.utils import (
28
28
  BaseTokenCounter,
29
29
  OpenAITokenCounter,
30
30
  api_keys_required,
31
- dependencies_required,
32
31
  )
33
32
 
34
33
  try:
34
+ import os
35
+
35
36
  if os.getenv("AGENTOPS_API_KEY") is not None:
36
37
  from agentops import LLMEvent, record
37
38
  else:
@@ -41,44 +42,40 @@ except (ImportError, AttributeError):
41
42
 
42
43
 
43
44
  class MistralModel(BaseModelBackend):
44
- r"""Mistral API in a unified BaseModelBackend interface.
45
-
46
- Args:
47
- model_type (Union[ModelType, str]): Model for which a backend is
48
- created, one of MISTRAL_* series.
49
- model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
50
- that will be fed into:obj:`Mistral.chat.complete()`.
51
- If:obj:`None`, :obj:`MistralConfig().as_dict()` will be used.
52
- (default: :obj:`None`)
53
- api_key (Optional[str], optional): The API key for authenticating with
54
- the mistral service. (default: :obj:`None`)
55
- url (Optional[str], optional): The url to the mistral service.
56
- (default: :obj:`None`)
57
- token_counter (Optional[BaseTokenCounter], optional): Token counter to
58
- use for the model. If not provided, :obj:`OpenAITokenCounter` will
59
- be used. (default: :obj:`None`)
60
- """
61
-
62
- @dependencies_required('mistralai')
45
+ r"""Mistral API in a unified BaseModelBackend interface."""
46
+
63
47
  def __init__(
64
48
  self,
65
- model_type: Union[ModelType, str],
66
- model_config_dict: Optional[Dict[str, Any]] = None,
49
+ model_type: ModelType,
50
+ model_config_dict: Dict[str, Any],
67
51
  api_key: Optional[str] = None,
68
52
  url: Optional[str] = None,
69
53
  token_counter: Optional[BaseTokenCounter] = None,
70
54
  ) -> None:
71
- from mistralai import Mistral
55
+ r"""Constructor for Mistral backend.
72
56
 
73
- if model_config_dict is None:
74
- model_config_dict = MistralConfig().as_dict()
75
-
76
- api_key = api_key or os.environ.get("MISTRAL_API_KEY")
77
- url = url or os.environ.get("MISTRAL_API_BASE_URL")
57
+ Args:
58
+ model_type (ModelType): Model for which a backend is created,
59
+ one of MISTRAL_* series.
60
+ model_config_dict (Dict[str, Any]): A dictionary that will
61
+ be fed into `MistralClient.chat`.
62
+ api_key (Optional[str]): The API key for authenticating with the
63
+ mistral service. (default: :obj:`None`)
64
+ url (Optional[str]): The url to the mistral service.
65
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
66
+ for the model. If not provided, `OpenAITokenCounter` will be
67
+ used.
68
+ """
78
69
  super().__init__(
79
70
  model_type, model_config_dict, api_key, url, token_counter
80
71
  )
72
+ self._api_key = api_key or os.environ.get("MISTRAL_API_KEY")
73
+ self._url = url or os.environ.get("MISTRAL_SERVER_URL")
74
+
75
+ from mistralai import Mistral
76
+
81
77
  self._client = Mistral(api_key=self._api_key, server_url=self._url)
78
+ self._token_counter: Optional[BaseTokenCounter] = None
82
79
 
83
80
  def _to_openai_response(
84
81
  self, response: 'ChatCompletionResponse'
@@ -218,7 +215,7 @@ class MistralModel(BaseModelBackend):
218
215
 
219
216
  response = self._client.chat.complete(
220
217
  messages=mistral_messages,
221
- model=self.model_type,
218
+ model=self.model_type.value,
222
219
  **self.model_config_dict,
223
220
  )
224
221
 
@@ -234,7 +231,7 @@ class MistralModel(BaseModelBackend):
234
231
  prompt_tokens=openai_response.usage.prompt_tokens, # type: ignore[union-attr]
235
232
  completion=openai_response.choices[0].message.content,
236
233
  completion_tokens=openai_response.usage.completion_tokens, # type: ignore[union-attr]
237
- model=self.model_type,
234
+ model=self.model_type.value,
238
235
  )
239
236
  record(llm_event)
240
237