camel-ai 0.1.6.6__py3-none-any.whl → 0.1.6.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

@@ -0,0 +1,232 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
16
+
17
+ from camel.configs import REKA_API_PARAMS
18
+ from camel.messages import OpenAIMessage
19
+ from camel.models import BaseModelBackend
20
+ from camel.types import ChatCompletion, ModelType
21
+ from camel.utils import (
22
+ BaseTokenCounter,
23
+ OpenAITokenCounter,
24
+ api_keys_required,
25
+ )
26
+
27
+ if TYPE_CHECKING:
28
+ from reka.types import ChatMessage, ChatResponse
29
+
30
+ try:
31
+ import os
32
+
33
+ if os.getenv("AGENTOPS_API_KEY") is not None:
34
+ from agentops import LLMEvent, record
35
+ else:
36
+ raise ImportError
37
+ except (ImportError, AttributeError):
38
+ LLMEvent = None
39
+
40
+
41
+ class RekaModel(BaseModelBackend):
42
+ r"""Reka API in a unified BaseModelBackend interface."""
43
+
44
+ def __init__(
45
+ self,
46
+ model_type: ModelType,
47
+ model_config_dict: Dict[str, Any],
48
+ api_key: Optional[str] = None,
49
+ url: Optional[str] = None,
50
+ token_counter: Optional[BaseTokenCounter] = None,
51
+ ) -> None:
52
+ r"""Constructor for Reka backend.
53
+
54
+ Args:
55
+ model_type (ModelType): Model for which a backend is created,
56
+ one of REKA_* series.
57
+ model_config_dict (Dict[str, Any]): A dictionary that will
58
+ be fed into `Reka.chat.create`.
59
+ api_key (Optional[str]): The API key for authenticating with the
60
+ Reka service. (default: :obj:`None`)
61
+ url (Optional[str]): The url to the Reka service.
62
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
63
+ for the model. If not provided, `OpenAITokenCounter` will be
64
+ used.
65
+ """
66
+ super().__init__(
67
+ model_type, model_config_dict, api_key, url, token_counter
68
+ )
69
+ self._api_key = api_key or os.environ.get("REKA_API_KEY")
70
+ self._url = url or os.environ.get("REKA_SERVER_URL")
71
+
72
+ from reka.client import Reka
73
+
74
+ self._client = Reka(api_key=self._api_key, base_url=self._url)
75
+ self._token_counter: Optional[BaseTokenCounter] = None
76
+
77
+ def _convert_reka_to_openai_response(
78
+ self, response: 'ChatResponse'
79
+ ) -> ChatCompletion:
80
+ r"""Converts a Reka `ChatResponse` to an OpenAI-style `ChatCompletion`
81
+ response.
82
+
83
+ Args:
84
+ response (ChatResponse): The response object from the Reka API.
85
+
86
+ Returns:
87
+ ChatCompletion: An OpenAI-compatible chat completion response.
88
+ """
89
+ openai_response = ChatCompletion.construct(
90
+ id=response.id,
91
+ choices=[
92
+ dict(
93
+ message={
94
+ "role": response.responses[0].message.role,
95
+ "content": response.responses[0].message.content,
96
+ },
97
+ finish_reason=response.responses[0].finish_reason
98
+ if response.responses[0].finish_reason
99
+ else None,
100
+ )
101
+ ],
102
+ created=None,
103
+ model=response.model,
104
+ object="chat.completion",
105
+ usage=response.usage,
106
+ )
107
+
108
+ return openai_response
109
+
110
+ def _convert_openai_to_reka_messages(
111
+ self,
112
+ messages: List[OpenAIMessage],
113
+ ) -> List["ChatMessage"]:
114
+ r"""Converts OpenAI API messages to Reka API messages.
115
+
116
+ Args:
117
+ messages (List[OpenAIMessage]): A list of messages in OpenAI
118
+ format.
119
+
120
+ Returns:
121
+ List[ChatMessage]: A list of messages converted to Reka's format.
122
+ """
123
+ from reka.types import ChatMessage
124
+
125
+ reka_messages = []
126
+ for msg in messages:
127
+ role = msg.get("role")
128
+ content = str(msg.get("content"))
129
+
130
+ if role == "user":
131
+ reka_messages.append(ChatMessage(role="user", content=content))
132
+ elif role == "assistant":
133
+ reka_messages.append(
134
+ ChatMessage(role="assistant", content=content)
135
+ )
136
+ elif role == "system":
137
+ reka_messages.append(ChatMessage(role="user", content=content))
138
+
139
+ # Add one more assistant msg since Reka requires conversation
140
+ # history must alternate between 'user' and 'assistant',
141
+ # starting and ending with 'user'.
142
+ reka_messages.append(
143
+ ChatMessage(
144
+ role="assistant",
145
+ content="",
146
+ )
147
+ )
148
+ else:
149
+ raise ValueError(f"Unsupported message role: {role}")
150
+
151
+ return reka_messages
152
+
153
+ @property
154
+ def token_counter(self) -> BaseTokenCounter:
155
+ r"""Initialize the token counter for the model backend.
156
+
157
+ # NOTE: Temporarily using `OpenAITokenCounter`
158
+
159
+ Returns:
160
+ BaseTokenCounter: The token counter following the model's
161
+ tokenization style.
162
+ """
163
+ if not self._token_counter:
164
+ self._token_counter = OpenAITokenCounter(
165
+ model=ModelType.GPT_4O_MINI
166
+ )
167
+ return self._token_counter
168
+
169
+ @api_keys_required("REKA_API_KEY")
170
+ def run(
171
+ self,
172
+ messages: List[OpenAIMessage],
173
+ ) -> ChatCompletion:
174
+ r"""Runs inference of Mistral chat completion.
175
+
176
+ Args:
177
+ messages (List[OpenAIMessage]): Message list with the chat history
178
+ in OpenAI API format.
179
+
180
+ Returns:
181
+ ChatCompletion.
182
+ """
183
+ reka_messages = self._convert_openai_to_reka_messages(messages)
184
+
185
+ response = self._client.chat.create(
186
+ messages=reka_messages,
187
+ model=self.model_type.value,
188
+ **self.model_config_dict,
189
+ )
190
+
191
+ openai_response = self._convert_reka_to_openai_response(response)
192
+
193
+ # Add AgentOps LLM Event tracking
194
+ if LLMEvent:
195
+ llm_event = LLMEvent(
196
+ thread_id=openai_response.id,
197
+ prompt=" ".join(
198
+ [message.get("content") for message in messages] # type: ignore[misc]
199
+ ),
200
+ prompt_tokens=openai_response.usage.input_tokens, # type: ignore[union-attr]
201
+ completion=openai_response.choices[0].message.content,
202
+ completion_tokens=openai_response.usage.output_tokens, # type: ignore[union-attr]
203
+ model=self.model_type.value,
204
+ )
205
+ record(llm_event)
206
+
207
+ return openai_response
208
+
209
+ def check_model_config(self):
210
+ r"""Check whether the model configuration contains any
211
+ unexpected arguments to Reka API.
212
+
213
+ Raises:
214
+ ValueError: If the model configuration dictionary contains any
215
+ unexpected arguments to Reka API.
216
+ """
217
+ for param in self.model_config_dict:
218
+ if param not in REKA_API_PARAMS:
219
+ raise ValueError(
220
+ f"Unexpected argument `{param}` is "
221
+ "input into Reka model backend."
222
+ )
223
+
224
+ @property
225
+ def stream(self) -> bool:
226
+ r"""Returns whether the model is in stream mode, which sends partial
227
+ results each time.
228
+
229
+ Returns:
230
+ bool: Whether the model is in stream mode.
231
+ """
232
+ return self.model_config_dict.get('stream', False)
@@ -0,0 +1,291 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import Any, Dict, List, Optional, Union
16
+
17
+ from openai import OpenAI, Stream
18
+
19
+ from camel.configs import SAMBA_API_PARAMS
20
+ from camel.messages import OpenAIMessage
21
+ from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
22
+ from camel.utils import (
23
+ BaseTokenCounter,
24
+ OpenAITokenCounter,
25
+ api_keys_required,
26
+ )
27
+
28
+
29
+ class SambaModel:
30
+ r"""SambaNova service interface."""
31
+
32
+ def __init__(
33
+ self,
34
+ model_type: ModelType,
35
+ model_config_dict: Dict[str, Any],
36
+ api_key: Optional[str] = None,
37
+ url: Optional[str] = None,
38
+ token_counter: Optional[BaseTokenCounter] = None,
39
+ ) -> None:
40
+ r"""Constructor for SambaNova backend.
41
+
42
+ Args:
43
+ model_type (ModelType): Model for which a SambaNova backend is
44
+ created.
45
+ model_config_dict (Dict[str, Any]): A dictionary that will
46
+ be fed into API request.
47
+ api_key (Optional[str]): The API key for authenticating with the
48
+ SambaNova service. (default: :obj:`None`)
49
+ url (Optional[str]): The url to the SambaNova service. (default:
50
+ :obj:`"https://fast-api.snova.ai/v1/chat/completions"`)
51
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
52
+ for the model. If not provided, `OpenAITokenCounter(ModelType.
53
+ GPT_4O_MINI)` will be used.
54
+ """
55
+ self.model_type = model_type
56
+ self._api_key = api_key or os.environ.get("SAMBA_API_KEY")
57
+ self._url = url or os.environ.get("SAMBA_API_BASE_URL")
58
+ self._token_counter = token_counter
59
+ self.model_config_dict = model_config_dict
60
+ self.check_model_config()
61
+
62
+ @property
63
+ def token_counter(self) -> BaseTokenCounter:
64
+ r"""Initialize the token counter for the model backend.
65
+
66
+ Returns:
67
+ BaseTokenCounter: The token counter following the model's
68
+ tokenization style.
69
+ """
70
+ if not self._token_counter:
71
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
72
+ return self._token_counter
73
+
74
+ def check_model_config(self):
75
+ r"""Check whether the model configuration contains any
76
+ unexpected arguments to SambaNova API.
77
+
78
+ Raises:
79
+ ValueError: If the model configuration dictionary contains any
80
+ unexpected arguments to SambaNova API.
81
+ """
82
+ for param in self.model_config_dict:
83
+ if param not in SAMBA_API_PARAMS:
84
+ raise ValueError(
85
+ f"Unexpected argument `{param}` is "
86
+ "input into SambaNova model backend."
87
+ )
88
+
89
+ @api_keys_required("SAMBA_API_KEY")
90
+ def run( # type: ignore[misc]
91
+ self, messages: List[OpenAIMessage]
92
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
93
+ r"""Runs SambaNova's FastAPI service.
94
+
95
+ Args:
96
+ messages (List[OpenAIMessage]): Message list with the chat history
97
+ in OpenAI API format.
98
+
99
+ Returns:
100
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
101
+ `ChatCompletion` in the non-stream mode, or
102
+ `Stream[ChatCompletionChunk]` in the stream mode.
103
+ """
104
+
105
+ if self.model_config_dict.get("stream") is True:
106
+ return self._run_streaming(messages)
107
+ else:
108
+ return self._run_non_streaming(messages)
109
+
110
+ def _run_streaming( # type: ignore[misc]
111
+ self, messages: List[OpenAIMessage]
112
+ ) -> Stream[ChatCompletionChunk]:
113
+ r"""Handles streaming inference with SambaNova FastAPI.
114
+
115
+ Args:
116
+ messages (List[OpenAIMessage]): A list of messages representing the
117
+ chat history in OpenAI API format.
118
+
119
+ Returns:
120
+ Stream[ChatCompletionChunk]: A generator yielding
121
+ `ChatCompletionChunk` objects as they are received from the
122
+ API.
123
+
124
+ Raises:
125
+ RuntimeError: If the HTTP request fails.
126
+ """
127
+
128
+ import httpx
129
+
130
+ headers = {
131
+ "Authorization": f"Basic {self._api_key}",
132
+ "Content-Type": "application/json",
133
+ }
134
+
135
+ data = {
136
+ "messages": messages,
137
+ "max_tokens": self.token_limit,
138
+ "stop": self.model_config_dict.get("stop"),
139
+ "model": self.model_type.value,
140
+ "stream": True,
141
+ "stream_options": self.model_config_dict.get("stream_options"),
142
+ }
143
+
144
+ try:
145
+ with httpx.stream(
146
+ "POST",
147
+ self._url or "https://fast-api.snova.ai/v1/chat/completions",
148
+ headers=headers,
149
+ json=data,
150
+ ) as api_response:
151
+ stream = Stream[ChatCompletionChunk](
152
+ cast_to=ChatCompletionChunk,
153
+ response=api_response,
154
+ client=OpenAI(),
155
+ )
156
+ for chunk in stream:
157
+ yield chunk
158
+ except httpx.HTTPError as e:
159
+ raise RuntimeError(f"HTTP request failed: {e!s}")
160
+
161
+ def _run_non_streaming(
162
+ self, messages: List[OpenAIMessage]
163
+ ) -> ChatCompletion:
164
+ r"""Handles non-streaming inference with SambaNova FastAPI.
165
+
166
+ Args:
167
+ messages (List[OpenAIMessage]): A list of messages representing the
168
+ message in OpenAI API format.
169
+
170
+ Returns:
171
+ ChatCompletion: A `ChatCompletion` object containing the complete
172
+ response from the API.
173
+
174
+ Raises:
175
+ RuntimeError: If the HTTP request fails.
176
+ ValueError: If the JSON response cannot be decoded or is missing
177
+ expected data.
178
+ """
179
+
180
+ import json
181
+
182
+ import httpx
183
+
184
+ headers = {
185
+ "Authorization": f"Basic {self._api_key}",
186
+ "Content-Type": "application/json",
187
+ }
188
+
189
+ data = {
190
+ "messages": messages,
191
+ "max_tokens": self.token_limit,
192
+ "stop": self.model_config_dict.get("stop"),
193
+ "model": self.model_type.value,
194
+ "stream": True,
195
+ "stream_options": self.model_config_dict.get("stream_options"),
196
+ }
197
+
198
+ try:
199
+ with httpx.stream(
200
+ "POST",
201
+ self._url or "https://fast-api.snova.ai/v1/chat/completions",
202
+ headers=headers,
203
+ json=data,
204
+ ) as api_response:
205
+ samba_response = []
206
+ for chunk in api_response.iter_text():
207
+ if chunk.startswith('data: '):
208
+ chunk = chunk[6:]
209
+ if '[DONE]' in chunk:
210
+ break
211
+ json_data = json.loads(chunk)
212
+ samba_response.append(json_data)
213
+ return self._to_openai_response(samba_response)
214
+ except httpx.HTTPError as e:
215
+ raise RuntimeError(f"HTTP request failed: {e!s}")
216
+ except json.JSONDecodeError as e:
217
+ raise ValueError(f"Failed to decode JSON response: {e!s}")
218
+
219
+ def _to_openai_response(
220
+ self, samba_response: List[Dict[str, Any]]
221
+ ) -> ChatCompletion:
222
+ r"""Converts SambaNova response chunks into an OpenAI-compatible
223
+ response.
224
+
225
+ Args:
226
+ samba_response (List[Dict[str, Any]]): A list of dictionaries
227
+ representing partial responses from the SambaNova API.
228
+
229
+ Returns:
230
+ ChatCompletion: A `ChatCompletion` object constructed from the
231
+ aggregated response data.
232
+
233
+ Raises:
234
+ ValueError: If the response data is invalid or incomplete.
235
+ """
236
+ # Step 1: Combine the content from each chunk
237
+ full_content = ""
238
+ for chunk in samba_response:
239
+ if chunk['choices']:
240
+ for choice in chunk['choices']:
241
+ delta_content = choice['delta'].get('content', '')
242
+ full_content += delta_content
243
+
244
+ # Step 2: Create the ChatCompletion object
245
+ # Extract relevant information from the first chunk
246
+ first_chunk = samba_response[0]
247
+
248
+ choices = [
249
+ dict(
250
+ index=0, # type: ignore[index]
251
+ message={
252
+ "role": 'assistant',
253
+ "content": full_content.strip(),
254
+ },
255
+ finish_reason=samba_response[-1]['choices'][0]['finish_reason']
256
+ or None,
257
+ )
258
+ ]
259
+
260
+ obj = ChatCompletion.construct(
261
+ id=first_chunk['id'],
262
+ choices=choices,
263
+ created=first_chunk['created'],
264
+ model=first_chunk['model'],
265
+ object="chat.completion",
266
+ usage=None,
267
+ )
268
+
269
+ return obj
270
+
271
+ @property
272
+ def token_limit(self) -> int:
273
+ r"""Returns the maximum token limit for a given model.
274
+
275
+ Returns:
276
+ int: The maximum token limit for the given model.
277
+ """
278
+ return (
279
+ self.model_config_dict.get("max_tokens")
280
+ or self.model_type.token_limit
281
+ )
282
+
283
+ @property
284
+ def stream(self) -> bool:
285
+ r"""Returns whether the model is in stream mode, which sends partial
286
+ results each time.
287
+
288
+ Returns:
289
+ bool: Whether the model is in stream mode.
290
+ """
291
+ return self.model_config_dict.get('stream', False)
@@ -0,0 +1,148 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+ import os
16
+ from typing import Any, Dict, List, Optional, Union
17
+
18
+ from openai import OpenAI, Stream
19
+
20
+ from camel.configs import TOGETHERAI_API_PARAMS
21
+ from camel.messages import OpenAIMessage
22
+ from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
23
+ from camel.utils import (
24
+ BaseTokenCounter,
25
+ OpenAITokenCounter,
26
+ api_keys_required,
27
+ )
28
+
29
+
30
+ class TogetherAIModel:
31
+ r"""Constructor for Together AI backend with OpenAI compatibility.
32
+ TODO: Add function calling support
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ model_type: str,
38
+ model_config_dict: Dict[str, Any],
39
+ api_key: Optional[str] = None,
40
+ url: Optional[str] = None,
41
+ token_counter: Optional[BaseTokenCounter] = None,
42
+ ) -> None:
43
+ r"""Constructor for TogetherAI backend.
44
+
45
+ Args:
46
+ model_type (str): Model for which a backend is created, supported
47
+ model can be found here: https://docs.together.ai/docs/chat-models
48
+ model_config_dict (Dict[str, Any]): A dictionary that will
49
+ be fed into openai.ChatCompletion.create().
50
+ api_key (Optional[str]): The API key for authenticating with the
51
+ Together service. (default: :obj:`None`)
52
+ url (Optional[str]): The url to the Together AI service. (default:
53
+ :obj:`"https://api.together.xyz/v1"`)
54
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
55
+ for the model. If not provided, `OpenAITokenCounter(ModelType.
56
+ GPT_4O_MINI)` will be used.
57
+ """
58
+ self.model_type = model_type
59
+ self.model_config_dict = model_config_dict
60
+ self._token_counter = token_counter
61
+ self._api_key = api_key or os.environ.get("TOGETHER_API_KEY")
62
+ self._url = url or os.environ.get("TOGETHER_API_BASE_URL")
63
+
64
+ self._client = OpenAI(
65
+ timeout=60,
66
+ max_retries=3,
67
+ api_key=self._api_key,
68
+ base_url=self._url or "https://api.together.xyz/v1",
69
+ )
70
+
71
+ @api_keys_required("TOGETHER_API_KEY")
72
+ def run(
73
+ self,
74
+ messages: List[OpenAIMessage],
75
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
76
+ r"""Runs inference of OpenAI chat completion.
77
+
78
+ Args:
79
+ messages (List[OpenAIMessage]): Message list with the chat history
80
+ in OpenAI API format.
81
+
82
+ Returns:
83
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
84
+ `ChatCompletion` in the non-stream mode, or
85
+ `Stream[ChatCompletionChunk]` in the stream mode.
86
+ """
87
+ # Use OpenAI cilent as interface call Together AI
88
+ # Reference: https://docs.together.ai/docs/openai-api-compatibility
89
+ response = self._client.chat.completions.create(
90
+ messages=messages,
91
+ model=self.model_type,
92
+ **self.model_config_dict,
93
+ )
94
+ return response
95
+
96
+ @property
97
+ def token_counter(self) -> BaseTokenCounter:
98
+ r"""Initialize the token counter for the model backend.
99
+
100
+ Returns:
101
+ OpenAITokenCounter: The token counter following the model's
102
+ tokenization style.
103
+ """
104
+
105
+ if not self._token_counter:
106
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
107
+ return self._token_counter
108
+
109
+ def check_model_config(self):
110
+ r"""Check whether the model configuration contains any
111
+ unexpected arguments to TogetherAI API.
112
+
113
+ Raises:
114
+ ValueError: If the model configuration dictionary contains any
115
+ unexpected arguments to TogetherAI API.
116
+ """
117
+ for param in self.model_config_dict:
118
+ if param not in TOGETHERAI_API_PARAMS:
119
+ raise ValueError(
120
+ f"Unexpected argument `{param}` is "
121
+ "input into TogetherAI model backend."
122
+ )
123
+
124
+ @property
125
+ def stream(self) -> bool:
126
+ r"""Returns whether the model is in stream mode, which sends partial
127
+ results each time.
128
+
129
+ Returns:
130
+ bool: Whether the model is in stream mode.
131
+ """
132
+ return self.model_config_dict.get('stream', False)
133
+
134
+ @property
135
+ def token_limit(self) -> int:
136
+ r"""Returns the maximum token limit for the given model.
137
+
138
+ Returns:
139
+ int: The maximum token limit for the given model.
140
+ """
141
+ max_tokens = self.model_config_dict.get("max_tokens")
142
+ if isinstance(max_tokens, int):
143
+ return max_tokens
144
+ print(
145
+ "Must set `max_tokens` as an integer in `model_config_dict` when"
146
+ " setting up the model. Using 4096 as default value."
147
+ )
148
+ return 4096