camel-ai 0.2.3a1__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (87) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +93 -69
  3. camel/agents/knowledge_graph_agent.py +4 -6
  4. camel/bots/__init__.py +16 -2
  5. camel/bots/discord_app.py +138 -0
  6. camel/bots/slack/__init__.py +30 -0
  7. camel/bots/slack/models.py +158 -0
  8. camel/bots/slack/slack_app.py +255 -0
  9. camel/configs/__init__.py +1 -2
  10. camel/configs/anthropic_config.py +2 -5
  11. camel/configs/base_config.py +6 -6
  12. camel/configs/groq_config.py +2 -3
  13. camel/configs/ollama_config.py +1 -2
  14. camel/configs/openai_config.py +2 -23
  15. camel/configs/samba_config.py +2 -2
  16. camel/configs/togetherai_config.py +1 -1
  17. camel/configs/vllm_config.py +1 -1
  18. camel/configs/zhipuai_config.py +2 -3
  19. camel/embeddings/openai_embedding.py +2 -2
  20. camel/loaders/__init__.py +2 -0
  21. camel/loaders/chunkr_reader.py +163 -0
  22. camel/loaders/firecrawl_reader.py +3 -3
  23. camel/loaders/unstructured_io.py +35 -33
  24. camel/messages/__init__.py +1 -0
  25. camel/models/__init__.py +2 -4
  26. camel/models/anthropic_model.py +32 -26
  27. camel/models/azure_openai_model.py +39 -36
  28. camel/models/base_model.py +31 -20
  29. camel/models/gemini_model.py +37 -29
  30. camel/models/groq_model.py +29 -23
  31. camel/models/litellm_model.py +44 -61
  32. camel/models/mistral_model.py +32 -29
  33. camel/models/model_factory.py +66 -76
  34. camel/models/nemotron_model.py +33 -23
  35. camel/models/ollama_model.py +42 -47
  36. camel/models/{openai_compatibility_model.py → openai_compatible_model.py} +31 -49
  37. camel/models/openai_model.py +48 -29
  38. camel/models/reka_model.py +30 -28
  39. camel/models/samba_model.py +82 -177
  40. camel/models/stub_model.py +2 -2
  41. camel/models/togetherai_model.py +37 -43
  42. camel/models/vllm_model.py +43 -50
  43. camel/models/zhipuai_model.py +33 -27
  44. camel/retrievers/auto_retriever.py +29 -97
  45. camel/retrievers/vector_retriever.py +58 -47
  46. camel/societies/babyagi_playing.py +6 -3
  47. camel/societies/role_playing.py +5 -3
  48. camel/storages/graph_storages/graph_element.py +2 -2
  49. camel/storages/key_value_storages/json.py +6 -1
  50. camel/toolkits/__init__.py +20 -7
  51. camel/toolkits/arxiv_toolkit.py +155 -0
  52. camel/toolkits/ask_news_toolkit.py +653 -0
  53. camel/toolkits/base.py +2 -3
  54. camel/toolkits/code_execution.py +6 -7
  55. camel/toolkits/dalle_toolkit.py +6 -6
  56. camel/toolkits/{openai_function.py → function_tool.py} +34 -11
  57. camel/toolkits/github_toolkit.py +9 -10
  58. camel/toolkits/google_maps_toolkit.py +7 -7
  59. camel/toolkits/google_scholar_toolkit.py +146 -0
  60. camel/toolkits/linkedin_toolkit.py +7 -7
  61. camel/toolkits/math_toolkit.py +8 -8
  62. camel/toolkits/open_api_toolkit.py +5 -5
  63. camel/toolkits/reddit_toolkit.py +7 -7
  64. camel/toolkits/retrieval_toolkit.py +5 -5
  65. camel/toolkits/search_toolkit.py +9 -9
  66. camel/toolkits/slack_toolkit.py +11 -11
  67. camel/toolkits/twitter_toolkit.py +378 -452
  68. camel/toolkits/weather_toolkit.py +6 -6
  69. camel/toolkits/whatsapp_toolkit.py +177 -0
  70. camel/types/__init__.py +6 -1
  71. camel/types/enums.py +40 -85
  72. camel/types/openai_types.py +3 -0
  73. camel/types/unified_model_type.py +104 -0
  74. camel/utils/__init__.py +0 -2
  75. camel/utils/async_func.py +7 -7
  76. camel/utils/commons.py +32 -3
  77. camel/utils/token_counting.py +30 -212
  78. camel/workforce/role_playing_worker.py +1 -1
  79. camel/workforce/single_agent_worker.py +1 -1
  80. camel/workforce/task_channel.py +4 -3
  81. camel/workforce/workforce.py +4 -4
  82. camel_ai-0.2.4.dist-info/LICENSE +201 -0
  83. {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.4.dist-info}/METADATA +27 -56
  84. {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.4.dist-info}/RECORD +85 -76
  85. {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.4.dist-info}/WHEEL +1 -1
  86. camel/bots/discord_bot.py +0 -206
  87. camel/models/open_source_model.py +0 -170
@@ -17,41 +17,49 @@ from typing import Any, Dict, List, Optional, Union
17
17
  from openai import Stream
18
18
 
19
19
  from camel.messages import OpenAIMessage
20
- from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
20
+ from camel.types import (
21
+ ChatCompletion,
22
+ ChatCompletionChunk,
23
+ ModelType,
24
+ UnifiedModelType,
25
+ )
21
26
  from camel.utils import BaseTokenCounter
22
27
 
23
28
 
24
29
  class BaseModelBackend(ABC):
25
30
  r"""Base class for different model backends.
26
- May be OpenAI API, a local LLM, a stub for unit tests, etc.
31
+ It may be OpenAI API, a local LLM, a stub for unit tests, etc.
32
+
33
+ Args:
34
+ model_type (Union[ModelType, str]): Model for which a backend is
35
+ created.
36
+ model_config_dict (Optional[Dict[str, Any]], optional): A config
37
+ dictionary. (default: :obj:`{}`)
38
+ api_key (Optional[str], optional): The API key for authenticating
39
+ with the model service. (default: :obj:`None`)
40
+ url (Optional[str], optional): The url to the model service.
41
+ (default: :obj:`None`)
42
+ token_counter (Optional[BaseTokenCounter], optional): Token
43
+ counter to use for the model. If not provided,
44
+ :obj:`OpenAITokenCounter` will be used. (default: :obj:`None`)
27
45
  """
28
46
 
29
47
  def __init__(
30
48
  self,
31
- model_type: ModelType,
32
- model_config_dict: Dict[str, Any],
49
+ model_type: Union[ModelType, str],
50
+ model_config_dict: Optional[Dict[str, Any]] = None,
33
51
  api_key: Optional[str] = None,
34
52
  url: Optional[str] = None,
35
53
  token_counter: Optional[BaseTokenCounter] = None,
36
54
  ) -> None:
37
- r"""Constructor for the model backend.
38
-
39
- Args:
40
- model_type (ModelType): Model for which a backend is created.
41
- model_config_dict (Dict[str, Any]): A config dictionary.
42
- api_key (Optional[str]): The API key for authenticating with the
43
- model service.
44
- url (Optional[str]): The url to the model service.
45
- token_counter (Optional[BaseTokenCounter]): Token counter to use
46
- for the model. If not provided, `OpenAITokenCounter` will
47
- be used.
48
- """
49
- self.model_type = model_type
55
+ self.model_type: UnifiedModelType = UnifiedModelType(model_type)
56
+ if model_config_dict is None:
57
+ model_config_dict = {}
50
58
  self.model_config_dict = model_config_dict
51
59
  self._api_key = api_key
52
60
  self._url = url
53
- self.check_model_config()
54
61
  self._token_counter = token_counter
62
+ self.check_model_config()
55
63
 
56
64
  @property
57
65
  @abstractmethod
@@ -110,6 +118,9 @@ class BaseModelBackend(ABC):
110
118
  def token_limit(self) -> int:
111
119
  r"""Returns the maximum token limit for a given model.
112
120
 
121
+ This method retrieves the maximum token limit either from the
122
+ `model_config_dict` or from the model's default token limit.
123
+
113
124
  Returns:
114
125
  int: The maximum token limit for the given model.
115
126
  """
@@ -120,8 +131,8 @@ class BaseModelBackend(ABC):
120
131
 
121
132
  @property
122
133
  def stream(self) -> bool:
123
- r"""Returns whether the model is in stream mode,
124
- which sends partial results each time.
134
+ r"""Returns whether the model is in stream mode, which sends partial
135
+ results each time.
125
136
 
126
137
  Returns:
127
138
  bool: Whether the model is in stream mode.
@@ -11,9 +11,10 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import TYPE_CHECKING, Any, Dict, List, Optional
14
+ import os
15
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
15
16
 
16
- from camel.configs import Gemini_API_PARAMS
17
+ from camel.configs import Gemini_API_PARAMS, GeminiConfig
17
18
  from camel.messages import OpenAIMessage
18
19
  from camel.models import BaseModelBackend
19
20
  from camel.types import (
@@ -26,6 +27,7 @@ from camel.utils import (
26
27
  BaseTokenCounter,
27
28
  GeminiTokenCounter,
28
29
  api_keys_required,
30
+ dependencies_required,
29
31
  )
30
32
 
31
33
  if TYPE_CHECKING:
@@ -33,43 +35,49 @@ if TYPE_CHECKING:
33
35
 
34
36
 
35
37
  class GeminiModel(BaseModelBackend):
36
- r"""Gemini API in a unified BaseModelBackend interface."""
37
-
38
- # NOTE: Currently "stream": True is not supported with Gemini due to the
39
- # limitation of the current camel design.
40
-
38
+ r"""Gemini API in a unified BaseModelBackend interface.
39
+
40
+ Args:
41
+ model_type (Union[ModelType, str]): Model for which a backend is
42
+ created.
43
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
44
+ that will be fed into:obj:`genai.GenerativeModel.generate_content()
45
+ `. If:obj:`None`, :obj:`GeminiConfig().as_dict()` will be used.
46
+ (default: :obj:`None`)
47
+ api_key (Optional[str], optional): The API key for authenticating with
48
+ the gemini service. (default: :obj:`None`)
49
+ url (Optional[str], optional): The url to the gemini service.
50
+ (default: :obj:`None`)
51
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
52
+ use for the model. If not provided, :obj:`GeminiTokenCounter` will
53
+ be used. (default: :obj:`None`)
54
+
55
+ Notes:
56
+ Currently :obj:`"stream": True` is not supported with Gemini due to the
57
+ limitation of the current camel design.
58
+ """
59
+
60
+ @dependencies_required('google')
41
61
  def __init__(
42
62
  self,
43
- model_type: ModelType,
44
- model_config_dict: Dict[str, Any],
63
+ model_type: Union[ModelType, str],
64
+ model_config_dict: Optional[Dict[str, Any]] = None,
45
65
  api_key: Optional[str] = None,
46
66
  url: Optional[str] = None,
47
67
  token_counter: Optional[BaseTokenCounter] = None,
48
68
  ) -> None:
49
- r"""Constructor for Gemini backend.
50
-
51
- Args:
52
- model_type (ModelType): Model for which a backend is created.
53
- model_config_dict (Dict[str, Any]): A dictionary that will
54
- be fed into generate_content().
55
- api_key (Optional[str]): The API key for authenticating with the
56
- gemini service. (default: :obj:`None`)
57
- url (Optional[str]): The url to the gemini service.
58
- token_counter (Optional[BaseTokenCounter]): Token counter to use
59
- for the model. If not provided, `GeminiTokenCounter` will be
60
- used.
61
- """
62
- import os
63
-
64
69
  import google.generativeai as genai
65
70
  from google.generativeai.types.generation_types import GenerationConfig
66
71
 
72
+ if model_config_dict is None:
73
+ model_config_dict = GeminiConfig().as_dict()
74
+
75
+ api_key = api_key or os.environ.get("GOOGLE_API_KEY")
67
76
  super().__init__(
68
77
  model_type, model_config_dict, api_key, url, token_counter
69
78
  )
70
- self._api_key = api_key or os.environ.get("GOOGLE_API_KEY")
71
79
  genai.configure(api_key=self._api_key)
72
- self._client = genai.GenerativeModel(self.model_type.value)
80
+ self._client = genai.GenerativeModel(self.model_type)
73
81
 
74
82
  keys = list(self.model_config_dict.keys())
75
83
  generation_config_dict = {
@@ -143,8 +151,8 @@ class GeminiModel(BaseModelBackend):
143
151
  return self.model_config_dict.get('stream', False)
144
152
 
145
153
  def to_gemini_req(self, messages: List[OpenAIMessage]) -> 'ContentsType':
146
- r"""Converts the request from the OpenAI API format to
147
- the Gemini API request format.
154
+ r"""Converts the request from the OpenAI API format to the Gemini API
155
+ request format.
148
156
 
149
157
  Args:
150
158
  messages: The request object from the OpenAI API.
@@ -189,7 +197,7 @@ class GeminiModel(BaseModelBackend):
189
197
  id=f"chatcmpl-{uuid.uuid4().hex!s}",
190
198
  object="chat.completion",
191
199
  created=int(time.time()),
192
- model=self.model_type.value,
200
+ model=self.model_type,
193
201
  choices=[],
194
202
  )
195
203
  for i, candidate in enumerate(response.candidates):
@@ -16,7 +16,7 @@ from typing import Any, Dict, List, Optional, Union
16
16
 
17
17
  from openai import OpenAI, Stream
18
18
 
19
- from camel.configs import GROQ_API_PARAMS
19
+ from camel.configs import GROQ_API_PARAMS, GroqConfig
20
20
  from camel.messages import OpenAIMessage
21
21
  from camel.models import BaseModelBackend
22
22
  from camel.types import (
@@ -32,42 +32,48 @@ from camel.utils import (
32
32
 
33
33
 
34
34
  class GroqModel(BaseModelBackend):
35
- r"""LLM API served by Groq in a unified BaseModelBackend interface."""
35
+ r"""LLM API served by Groq in a unified BaseModelBackend interface.
36
+
37
+ Args:
38
+ model_type (Union[ModelType, str]): Model for which a backend is
39
+ created.
40
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
41
+ that will be fed into:obj:`openai.ChatCompletion.create()`.
42
+ If:obj:`None`, :obj:`GroqConfig().as_dict()` will be used.
43
+ (default: :obj:`None`)
44
+ api_key (Optional[str], optional): The API key for authenticating
45
+ with the Groq service. (default: :obj:`None`).
46
+ url (Optional[str], optional): The url to the Groq service.
47
+ (default: :obj:`None`)
48
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
49
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
50
+ ModelType.GPT_4O_MINI)` will be used.
51
+ (default: :obj:`None`)
52
+ """
36
53
 
37
54
  def __init__(
38
55
  self,
39
- model_type: ModelType,
40
- model_config_dict: Dict[str, Any],
56
+ model_type: Union[ModelType, str],
57
+ model_config_dict: Optional[Dict[str, Any]] = None,
41
58
  api_key: Optional[str] = None,
42
59
  url: Optional[str] = None,
43
60
  token_counter: Optional[BaseTokenCounter] = None,
44
61
  ) -> None:
45
- r"""Constructor for Groq backend.
46
-
47
- Args:
48
- model_type (str): Model for which a backend is created.
49
- model_config_dict (Dict[str, Any]): A dictionary of parameters for
50
- the model configuration.
51
- api_key (Optional[str]): The API key for authenticating with the
52
- Groq service. (default: :obj:`None`).
53
- url (Optional[str]): The url to the Groq service. (default:
54
- :obj:`"https://api.groq.com/openai/v1"`)
55
- token_counter (Optional[BaseTokenCounter]): Token counter to use
56
- for the model. If not provided, `OpenAITokenCounter(ModelType.
57
- GPT_4O_MINI)` will be used.
58
- """
62
+ if model_config_dict is None:
63
+ model_config_dict = GroqConfig().as_dict()
64
+ api_key = api_key or os.environ.get("GROQ_API_KEY")
65
+ url = url or os.environ.get(
66
+ "GROQ_API_BASE_URL" or "https://api.groq.com/openai/v1"
67
+ )
59
68
  super().__init__(
60
69
  model_type, model_config_dict, api_key, url, token_counter
61
70
  )
62
- self._url = url or os.environ.get("GROQ_API_BASE_URL")
63
- self._api_key = api_key or os.environ.get("GROQ_API_KEY")
64
71
  self._client = OpenAI(
65
72
  timeout=60,
66
73
  max_retries=3,
67
74
  api_key=self._api_key,
68
- base_url=self._url or "https://api.groq.com/openai/v1",
75
+ base_url=self._url,
69
76
  )
70
- self._token_counter = token_counter
71
77
 
72
78
  @property
73
79
  def token_counter(self) -> BaseTokenCounter:
@@ -101,7 +107,7 @@ class GroqModel(BaseModelBackend):
101
107
  """
102
108
  response = self._client.chat.completions.create(
103
109
  messages=messages,
104
- model=self.model_type.value,
110
+ model=self.model_type,
105
111
  **self.model_config_dict,
106
112
  )
107
113
 
@@ -11,49 +11,58 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, Dict, List, Optional
14
+ from typing import Any, Dict, List, Optional, Union
15
15
 
16
- from camel.configs import LITELLM_API_PARAMS
16
+ from camel.configs import LITELLM_API_PARAMS, LiteLLMConfig
17
17
  from camel.messages import OpenAIMessage
18
- from camel.types import ChatCompletion
19
- from camel.utils import BaseTokenCounter, LiteLLMTokenCounter
20
-
21
-
22
- class LiteLLMModel:
23
- r"""Constructor for LiteLLM backend with OpenAI compatibility."""
18
+ from camel.models import BaseModelBackend
19
+ from camel.types import ChatCompletion, ModelType
20
+ from camel.utils import (
21
+ BaseTokenCounter,
22
+ LiteLLMTokenCounter,
23
+ dependencies_required,
24
+ )
25
+
26
+
27
+ class LiteLLMModel(BaseModelBackend):
28
+ r"""Constructor for LiteLLM backend with OpenAI compatibility.
29
+
30
+ Args:
31
+ model_type (Union[ModelType, str]): Model for which a backend is
32
+ created, such as GPT-3.5-turbo, Claude-2, etc.
33
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
34
+ that will be fed into:obj:`openai.ChatCompletion.create()`.
35
+ If:obj:`None`, :obj:`LiteLLMConfig().as_dict()` will be used.
36
+ (default: :obj:`None`)
37
+ api_key (Optional[str], optional): The API key for authenticating with
38
+ the model service. (default: :obj:`None`)
39
+ url (Optional[str], optional): The url to the model service.
40
+ (default: :obj:`None`)
41
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
42
+ use for the model. If not provided, :obj:`LiteLLMTokenCounter` will
43
+ be used. (default: :obj:`None`)
44
+ """
24
45
 
25
46
  # NOTE: Currently stream mode is not supported.
26
47
 
48
+ @dependencies_required('litellm')
27
49
  def __init__(
28
50
  self,
29
- model_type: str,
30
- model_config_dict: Dict[str, Any],
51
+ model_type: Union[ModelType, str],
52
+ model_config_dict: Optional[Dict[str, Any]] = None,
31
53
  api_key: Optional[str] = None,
32
54
  url: Optional[str] = None,
33
55
  token_counter: Optional[BaseTokenCounter] = None,
34
56
  ) -> None:
35
- r"""Constructor for LiteLLM backend.
57
+ from litellm import completion
36
58
 
37
- Args:
38
- model_type (str): Model for which a backend is created,
39
- such as GPT-3.5-turbo, Claude-2, etc.
40
- model_config_dict (Dict[str, Any]): A dictionary of parameters for
41
- the model configuration.
42
- api_key (Optional[str]): The API key for authenticating with the
43
- model service. (default: :obj:`None`)
44
- url (Optional[str]): The url to the model service. (default:
45
- :obj:`None`)
46
- token_counter (Optional[BaseTokenCounter]): Token counter to use
47
- for the model. If not provided, `LiteLLMTokenCounter` will
48
- be used.
49
- """
50
- self.model_type = model_type
51
- self.model_config_dict = model_config_dict
52
- self._client = None
53
- self._token_counter = token_counter
54
- self.check_model_config()
55
- self._url = url
56
- self._api_key = api_key
59
+ if model_config_dict is None:
60
+ model_config_dict = LiteLLMConfig().as_dict()
61
+
62
+ super().__init__(
63
+ model_type, model_config_dict, api_key, url, token_counter
64
+ )
65
+ self.client = completion
57
66
 
58
67
  def _convert_response_from_litellm_to_openai(
59
68
  self, response
@@ -86,26 +95,16 @@ class LiteLLMModel:
86
95
  )
87
96
 
88
97
  @property
89
- def client(self):
90
- if self._client is None:
91
- from litellm import completion
92
-
93
- self._client = completion
94
- return self._client
95
-
96
- @property
97
- def token_counter(self) -> LiteLLMTokenCounter:
98
+ def token_counter(self) -> BaseTokenCounter:
98
99
  r"""Initialize the token counter for the model backend.
99
100
 
100
101
  Returns:
101
- LiteLLMTokenCounter: The token counter following the model's
102
+ BaseTokenCounter: The token counter following the model's
102
103
  tokenization style.
103
104
  """
104
105
  if not self._token_counter:
105
- self._token_counter = LiteLLMTokenCounter( # type: ignore[assignment]
106
- self.model_type
107
- )
108
- return self._token_counter # type: ignore[return-value]
106
+ self._token_counter = LiteLLMTokenCounter(self.model_type)
107
+ return self._token_counter
109
108
 
110
109
  def run(
111
110
  self,
@@ -144,19 +143,3 @@ class LiteLLMModel:
144
143
  f"Unexpected argument `{param}` is "
145
144
  "input into LiteLLM model backend."
146
145
  )
147
-
148
- @property
149
- def token_limit(self) -> int:
150
- r"""Returns the maximum token limit for the given model.
151
-
152
- Returns:
153
- int: The maximum token limit for the given model.
154
- """
155
- max_tokens = self.model_config_dict.get("max_tokens")
156
- if isinstance(max_tokens, int):
157
- return max_tokens
158
- print(
159
- "Must set `max_tokens` as an integer in `model_config_dict` when"
160
- " setting up the model. Using 4096 as default value."
161
- )
162
- return 4096
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import os
15
- from typing import TYPE_CHECKING, Any, Dict, List, Optional
15
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
16
16
 
17
17
  if TYPE_CHECKING:
18
18
  from mistralai.models import (
@@ -20,7 +20,7 @@ if TYPE_CHECKING:
20
20
  Messages,
21
21
  )
22
22
 
23
- from camel.configs import MISTRAL_API_PARAMS
23
+ from camel.configs import MISTRAL_API_PARAMS, MistralConfig
24
24
  from camel.messages import OpenAIMessage
25
25
  from camel.models import BaseModelBackend
26
26
  from camel.types import ChatCompletion, ModelType
@@ -28,11 +28,10 @@ from camel.utils import (
28
28
  BaseTokenCounter,
29
29
  OpenAITokenCounter,
30
30
  api_keys_required,
31
+ dependencies_required,
31
32
  )
32
33
 
33
34
  try:
34
- import os
35
-
36
35
  if os.getenv("AGENTOPS_API_KEY") is not None:
37
36
  from agentops import LLMEvent, record
38
37
  else:
@@ -42,40 +41,44 @@ except (ImportError, AttributeError):
42
41
 
43
42
 
44
43
  class MistralModel(BaseModelBackend):
45
- r"""Mistral API in a unified BaseModelBackend interface."""
46
-
44
+ r"""Mistral API in a unified BaseModelBackend interface.
45
+
46
+ Args:
47
+ model_type (Union[ModelType, str]): Model for which a backend is
48
+ created, one of MISTRAL_* series.
49
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
50
+ that will be fed into:obj:`Mistral.chat.complete()`.
51
+ If:obj:`None`, :obj:`MistralConfig().as_dict()` will be used.
52
+ (default: :obj:`None`)
53
+ api_key (Optional[str], optional): The API key for authenticating with
54
+ the mistral service. (default: :obj:`None`)
55
+ url (Optional[str], optional): The url to the mistral service.
56
+ (default: :obj:`None`)
57
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
58
+ use for the model. If not provided, :obj:`OpenAITokenCounter` will
59
+ be used. (default: :obj:`None`)
60
+ """
61
+
62
+ @dependencies_required('mistralai')
47
63
  def __init__(
48
64
  self,
49
- model_type: ModelType,
50
- model_config_dict: Dict[str, Any],
65
+ model_type: Union[ModelType, str],
66
+ model_config_dict: Optional[Dict[str, Any]] = None,
51
67
  api_key: Optional[str] = None,
52
68
  url: Optional[str] = None,
53
69
  token_counter: Optional[BaseTokenCounter] = None,
54
70
  ) -> None:
55
- r"""Constructor for Mistral backend.
71
+ from mistralai import Mistral
56
72
 
57
- Args:
58
- model_type (ModelType): Model for which a backend is created,
59
- one of MISTRAL_* series.
60
- model_config_dict (Dict[str, Any]): A dictionary that will
61
- be fed into `MistralClient.chat`.
62
- api_key (Optional[str]): The API key for authenticating with the
63
- mistral service. (default: :obj:`None`)
64
- url (Optional[str]): The url to the mistral service.
65
- token_counter (Optional[BaseTokenCounter]): Token counter to use
66
- for the model. If not provided, `OpenAITokenCounter` will be
67
- used.
68
- """
73
+ if model_config_dict is None:
74
+ model_config_dict = MistralConfig().as_dict()
75
+
76
+ api_key = api_key or os.environ.get("MISTRAL_API_KEY")
77
+ url = url or os.environ.get("MISTRAL_API_BASE_URL")
69
78
  super().__init__(
70
79
  model_type, model_config_dict, api_key, url, token_counter
71
80
  )
72
- self._api_key = api_key or os.environ.get("MISTRAL_API_KEY")
73
- self._url = url or os.environ.get("MISTRAL_SERVER_URL")
74
-
75
- from mistralai import Mistral
76
-
77
81
  self._client = Mistral(api_key=self._api_key, server_url=self._url)
78
- self._token_counter: Optional[BaseTokenCounter] = None
79
82
 
80
83
  def _to_openai_response(
81
84
  self, response: 'ChatCompletionResponse'
@@ -215,7 +218,7 @@ class MistralModel(BaseModelBackend):
215
218
 
216
219
  response = self._client.chat.complete(
217
220
  messages=mistral_messages,
218
- model=self.model_type.value,
221
+ model=self.model_type,
219
222
  **self.model_config_dict,
220
223
  )
221
224
 
@@ -231,7 +234,7 @@ class MistralModel(BaseModelBackend):
231
234
  prompt_tokens=openai_response.usage.prompt_tokens, # type: ignore[union-attr]
232
235
  completion=openai_response.choices[0].message.content,
233
236
  completion_tokens=openai_response.usage.completion_tokens, # type: ignore[union-attr]
234
- model=self.model_type.value,
237
+ model=self.model_type,
235
238
  )
236
239
  record(llm_event)
237
240