quantalogic 0.33.4__py3-none-any.whl → 0.40.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. quantalogic/__init__.py +0 -4
  2. quantalogic/agent.py +603 -362
  3. quantalogic/agent_config.py +260 -28
  4. quantalogic/agent_factory.py +43 -17
  5. quantalogic/coding_agent.py +20 -12
  6. quantalogic/config.py +7 -4
  7. quantalogic/console_print_events.py +4 -8
  8. quantalogic/console_print_token.py +2 -2
  9. quantalogic/docs_cli.py +15 -10
  10. quantalogic/event_emitter.py +258 -83
  11. quantalogic/flow/__init__.py +23 -0
  12. quantalogic/flow/flow.py +595 -0
  13. quantalogic/flow/flow_extractor.py +672 -0
  14. quantalogic/flow/flow_generator.py +89 -0
  15. quantalogic/flow/flow_manager.py +407 -0
  16. quantalogic/flow/flow_manager_schema.py +169 -0
  17. quantalogic/flow/flow_yaml.md +419 -0
  18. quantalogic/generative_model.py +109 -77
  19. quantalogic/get_model_info.py +6 -6
  20. quantalogic/interactive_text_editor.py +100 -73
  21. quantalogic/main.py +36 -23
  22. quantalogic/model_info_list.py +12 -0
  23. quantalogic/model_info_litellm.py +14 -14
  24. quantalogic/prompts.py +2 -1
  25. quantalogic/{llm.py → quantlitellm.py} +29 -39
  26. quantalogic/search_agent.py +4 -4
  27. quantalogic/server/models.py +4 -1
  28. quantalogic/task_file_reader.py +5 -5
  29. quantalogic/task_runner.py +21 -20
  30. quantalogic/tool_manager.py +10 -21
  31. quantalogic/tools/__init__.py +98 -68
  32. quantalogic/tools/composio/composio.py +416 -0
  33. quantalogic/tools/{generate_database_report_tool.py → database/generate_database_report_tool.py} +4 -9
  34. quantalogic/tools/database/sql_query_tool_advanced.py +261 -0
  35. quantalogic/tools/document_tools/markdown_to_docx_tool.py +620 -0
  36. quantalogic/tools/document_tools/markdown_to_epub_tool.py +438 -0
  37. quantalogic/tools/document_tools/markdown_to_html_tool.py +362 -0
  38. quantalogic/tools/document_tools/markdown_to_ipynb_tool.py +319 -0
  39. quantalogic/tools/document_tools/markdown_to_latex_tool.py +420 -0
  40. quantalogic/tools/document_tools/markdown_to_pdf_tool.py +623 -0
  41. quantalogic/tools/document_tools/markdown_to_pptx_tool.py +319 -0
  42. quantalogic/tools/duckduckgo_search_tool.py +2 -4
  43. quantalogic/tools/finance/alpha_vantage_tool.py +440 -0
  44. quantalogic/tools/finance/ccxt_tool.py +373 -0
  45. quantalogic/tools/finance/finance_llm_tool.py +387 -0
  46. quantalogic/tools/finance/google_finance.py +192 -0
  47. quantalogic/tools/finance/market_intelligence_tool.py +520 -0
  48. quantalogic/tools/finance/technical_analysis_tool.py +491 -0
  49. quantalogic/tools/finance/tradingview_tool.py +336 -0
  50. quantalogic/tools/finance/yahoo_finance.py +236 -0
  51. quantalogic/tools/git/bitbucket_clone_repo_tool.py +181 -0
  52. quantalogic/tools/git/bitbucket_operations_tool.py +326 -0
  53. quantalogic/tools/git/clone_repo_tool.py +189 -0
  54. quantalogic/tools/git/git_operations_tool.py +532 -0
  55. quantalogic/tools/google_packages/google_news_tool.py +480 -0
  56. quantalogic/tools/grep_app_tool.py +123 -186
  57. quantalogic/tools/{dalle_e.py → image_generation/dalle_e.py} +37 -27
  58. quantalogic/tools/jinja_tool.py +6 -10
  59. quantalogic/tools/language_handlers/__init__.py +22 -9
  60. quantalogic/tools/list_directory_tool.py +131 -42
  61. quantalogic/tools/llm_tool.py +45 -15
  62. quantalogic/tools/llm_vision_tool.py +59 -7
  63. quantalogic/tools/markitdown_tool.py +17 -5
  64. quantalogic/tools/nasa_packages/models.py +47 -0
  65. quantalogic/tools/nasa_packages/nasa_apod_tool.py +232 -0
  66. quantalogic/tools/nasa_packages/nasa_neows_tool.py +147 -0
  67. quantalogic/tools/nasa_packages/services.py +82 -0
  68. quantalogic/tools/presentation_tools/presentation_llm_tool.py +396 -0
  69. quantalogic/tools/product_hunt/product_hunt_tool.py +258 -0
  70. quantalogic/tools/product_hunt/services.py +63 -0
  71. quantalogic/tools/rag_tool/__init__.py +48 -0
  72. quantalogic/tools/rag_tool/document_metadata.py +15 -0
  73. quantalogic/tools/rag_tool/query_response.py +20 -0
  74. quantalogic/tools/rag_tool/rag_tool.py +566 -0
  75. quantalogic/tools/rag_tool/rag_tool_beta.py +264 -0
  76. quantalogic/tools/read_html_tool.py +24 -38
  77. quantalogic/tools/replace_in_file_tool.py +10 -10
  78. quantalogic/tools/safe_python_interpreter_tool.py +10 -24
  79. quantalogic/tools/search_definition_names.py +2 -2
  80. quantalogic/tools/sequence_tool.py +14 -23
  81. quantalogic/tools/sql_query_tool.py +17 -19
  82. quantalogic/tools/tool.py +39 -15
  83. quantalogic/tools/unified_diff_tool.py +1 -1
  84. quantalogic/tools/utilities/csv_processor_tool.py +234 -0
  85. quantalogic/tools/utilities/download_file_tool.py +179 -0
  86. quantalogic/tools/utilities/mermaid_validator_tool.py +661 -0
  87. quantalogic/tools/utils/__init__.py +1 -4
  88. quantalogic/tools/utils/create_sample_database.py +24 -38
  89. quantalogic/tools/utils/generate_database_report.py +74 -82
  90. quantalogic/tools/wikipedia_search_tool.py +17 -21
  91. quantalogic/utils/ask_user_validation.py +1 -1
  92. quantalogic/utils/async_utils.py +35 -0
  93. quantalogic/utils/check_version.py +3 -5
  94. quantalogic/utils/get_all_models.py +2 -1
  95. quantalogic/utils/git_ls.py +21 -7
  96. quantalogic/utils/lm_studio_model_info.py +9 -7
  97. quantalogic/utils/python_interpreter.py +113 -43
  98. quantalogic/utils/xml_utility.py +178 -0
  99. quantalogic/version_check.py +1 -1
  100. quantalogic/welcome_message.py +7 -7
  101. quantalogic/xml_parser.py +0 -1
  102. {quantalogic-0.33.4.dist-info → quantalogic-0.40.0.dist-info}/METADATA +44 -1
  103. quantalogic-0.40.0.dist-info/RECORD +148 -0
  104. quantalogic-0.33.4.dist-info/RECORD +0 -102
  105. {quantalogic-0.33.4.dist-info → quantalogic-0.40.0.dist-info}/LICENSE +0 -0
  106. {quantalogic-0.33.4.dist-info → quantalogic-0.40.0.dist-info}/WHEEL +0 -0
  107. {quantalogic-0.33.4.dist-info → quantalogic-0.40.0.dist-info}/entry_points.txt +0 -0
@@ -1,24 +1,20 @@
1
1
  """Generative model module for AI-powered text generation."""
2
2
 
3
+ import asyncio
3
4
  from datetime import datetime
4
- from typing import Any, Dict, List
5
+ from typing import Any, AsyncGenerator, Dict, List
5
6
 
6
- import litellm
7
7
  import openai
8
- from litellm import exceptions
9
8
  from loguru import logger
10
9
  from pydantic import BaseModel, Field, field_validator
11
10
 
12
11
  from quantalogic.event_emitter import EventEmitter # Importing the EventEmitter class
13
12
  from quantalogic.get_model_info import get_max_input_tokens, get_max_output_tokens, get_max_tokens
14
- from quantalogic.llm import count_tokens, generate_completion, generate_image
13
+ from quantalogic.quantlitellm import acompletion, aimage_generation, exceptions, token_counter
15
14
 
16
15
  MIN_RETRIES = 1
17
16
 
18
17
 
19
- litellm.suppress_debug_info = True # Very important to suppress prints don't remove
20
-
21
-
22
18
  # Define the Message class for conversation handling
23
19
  class Message(BaseModel):
24
20
  """Represents a message in a conversation with a specific role and content."""
@@ -76,7 +72,7 @@ class ResponseStats(BaseModel):
76
72
 
77
73
 
78
74
  class GenerativeModel:
79
- """Generative model for AI-powered text generation and image generation."""
75
+ """Generative model for AI-powered text and image generation with async support."""
80
76
 
81
77
  def __init__(
82
78
  self,
@@ -121,26 +117,25 @@ class GenerativeModel:
121
117
  exceptions.PermissionDeniedError,
122
118
  )
123
119
 
124
- # Generate a response with conversation history and optional streaming
125
- def generate_with_history(
120
+ async def async_generate_with_history(
126
121
  self,
127
122
  messages_history: list[Message],
128
123
  prompt: str,
129
124
  image_url: str | None = None,
130
125
  streaming: bool = False,
131
126
  stop_words: list[str] | None = None,
132
- ) -> ResponseStats:
133
- """Generate a response with conversation history and optional image.
127
+ ) -> ResponseStats | AsyncGenerator[str, None]:
128
+ """Asynchronously generate a response with conversation history and optional image.
134
129
 
135
130
  Args:
136
131
  messages_history: Previous conversation messages.
137
132
  prompt: Current user prompt.
138
133
  image_url: Optional image URL for visual queries.
139
134
  streaming: Whether to stream the response.
140
- stop_words: Optional list of stop words for streaming
135
+ stop_words: Optional list of stop words for streaming.
141
136
 
142
137
  Returns:
143
- Detailed response statistics or a generator in streaming mode.
138
+ ResponseStats if streaming=False, or an AsyncGenerator for streaming=True.
144
139
  """
145
140
  messages = [{"role": msg.role, "content": str(msg.content)} for msg in messages_history]
146
141
 
@@ -158,59 +153,79 @@ class GenerativeModel:
158
153
  messages.append({"role": "user", "content": str(prompt)})
159
154
 
160
155
  if streaming:
161
- self.event_emitter.emit("stream_start") # Emit stream start event
162
- return self._stream_response(messages) # Return generator
156
+ self.event_emitter.emit("stream_start")
157
+ return self._async_stream_response(messages, stop_words)
163
158
 
164
159
  try:
165
- logger.debug(f"Generating response for prompt: {prompt}")
166
-
167
- response = generate_completion(
168
- temperature=self.temperature,
160
+ logger.debug(f"Async generating response for prompt: {prompt} with messages: {messages}")
161
+ response = await acompletion(
169
162
  model=self.model,
170
163
  messages=messages,
164
+ temperature=self.temperature,
171
165
  num_retries=MIN_RETRIES,
172
166
  stop=stop_words,
173
167
  extra_headers={"X-Title": "quantalogic"},
174
168
  )
169
+ logger.debug(f"Raw response from {self.model}: {response}")
170
+
171
+ # Check for error in response
172
+ if hasattr(response, "error") and response.error:
173
+ error_msg = response.error.get("message", "Unknown error")
174
+ logger.warning(f"API returned error: {error_msg}")
175
+ raise openai.APIError(
176
+ message=f"API error: {error_msg}",
177
+ request={"model": self.model, "messages": messages},
178
+ body={"error": response.error},
179
+ )
175
180
 
176
181
  token_usage = TokenUsage(
177
182
  prompt_tokens=response.usage.prompt_tokens,
178
183
  completion_tokens=response.usage.completion_tokens,
179
184
  total_tokens=response.usage.total_tokens,
180
185
  )
186
+ # Get the content with a check for None
187
+ content = response.choices[0].message.content
188
+ if content is None:
189
+ logger.warning(f"Received None content from {self.model}. Raw response: {response}")
190
+ raise ValueError(f"Model {self.model} returned no content for the given input.")
181
191
 
182
192
  return ResponseStats(
183
- response=response.choices[0].message.content,
193
+ response=content,
184
194
  usage=token_usage,
185
195
  model=self.model,
186
196
  finish_reason=response.choices[0].finish_reason,
187
197
  )
188
-
189
198
  except Exception as e:
190
199
  self._handle_generation_exception(e)
200
+ # We should never reach here as _handle_generation_exception always raises
191
201
 
192
- def _stream_response(self, messages, stop_words: list[str] | None = None):
193
- """Private method to handle streaming responses."""
202
+ async def _async_stream_response(self, messages, stop_words: list[str] | None = None):
203
+ """Private method to handle asynchronous streaming responses."""
194
204
  try:
195
- for chunk in generate_completion(
196
- temperature=self.temperature,
205
+ response = await acompletion(
197
206
  model=self.model,
198
207
  messages=messages,
199
- num_retries=MIN_RETRIES,
200
- stream=True, # Enable streaming,
208
+ temperature=self.temperature,
209
+ stream=True,
201
210
  stop=stop_words,
202
- ):
211
+ num_retries=MIN_RETRIES,
212
+ )
213
+ async for chunk in response:
203
214
  if chunk.choices[0].delta.content is not None:
204
215
  self.event_emitter.emit("stream_chunk", chunk.choices[0].delta.content)
205
- yield chunk.choices[0].delta.content # Yield each chunk of content
206
-
207
- self.event_emitter.emit("stream_end") # Emit stream end event
216
+ yield chunk.choices[0].delta.content
217
+ self.event_emitter.emit("stream_end")
208
218
  except Exception as e:
209
- logger.error(f"Streaming error: {str(e)}")
219
+ logger.error(f"Async streaming error: {str(e)}")
210
220
  raise
211
221
 
212
- def generate(self, prompt: str, image_url: str | None = None, streaming: bool = False) -> ResponseStats:
213
- """Generate a response without conversation history.
222
+ async def async_generate(
223
+ self,
224
+ prompt: str,
225
+ image_url: str | None = None,
226
+ streaming: bool = False,
227
+ ) -> ResponseStats | AsyncGenerator[str, None]:
228
+ """Asynchronously generate a response without conversation history.
214
229
 
215
230
  Args:
216
231
  prompt: User prompt.
@@ -218,9 +233,9 @@ class GenerativeModel:
218
233
  streaming: Whether to stream the response.
219
234
 
220
235
  Returns:
221
- Detailed response statistics or a generator in streaming mode.
236
+ ResponseStats if streaming=False, or an AsyncGenerator for streaming=True.
222
237
  """
223
- return self.generate_with_history([], prompt, image_url, streaming)
238
+ return await self.async_generate_with_history([], prompt, image_url, streaming)
224
239
 
225
240
  def _handle_generation_exception(self, e):
226
241
  """Handle exceptions during generation."""
@@ -238,35 +253,39 @@ class GenerativeModel:
238
253
 
239
254
  if isinstance(e, self.AUTH_EXCEPTIONS):
240
255
  logger.debug("Authentication error occurred")
241
- raise openai.AuthenticationError(f"Authentication failed with provider {error_details['provider']}") from e
256
+ raise openai.AuthenticationError(
257
+ message=f"Authentication failed with provider {error_details['provider']}",
258
+ request={"model": self.model, "temperature": self.temperature},
259
+ body={"error": {"message": str(e), "type": "authentication_error"}},
260
+ ) from e
242
261
 
243
262
  if isinstance(e, self.CONTEXT_EXCEPTIONS):
244
- raise openai.InvalidRequestError(f"Context window exceeded or invalid request: {str(e)}") from e
263
+ raise openai.InvalidRequestError(
264
+ message=f"Context window exceeded or invalid request: {str(e)}",
265
+ request={"model": self.model, "temperature": self.temperature},
266
+ body={"error": {"message": str(e), "type": "invalid_request_error"}},
267
+ ) from e
245
268
 
246
269
  if isinstance(e, self.POLICY_EXCEPTIONS):
247
- raise openai.APIError(f"Content policy violation: {str(e)}") from e
270
+ raise openai.APIError(
271
+ message=f"Content policy violation: {str(e)}",
272
+ request={"model": self.model, "temperature": self.temperature},
273
+ body={"error": {"message": str(e), "type": "policy_violation"}},
274
+ ) from e
248
275
 
249
276
  if isinstance(e, openai.OpenAIError):
250
277
  raise
251
278
 
252
- raise openai.APIError(f"Unexpected error during generation: {str(e)}") from e
279
+ raise openai.APIError(
280
+ message=f"Unexpected error during generation: {str(e)}",
281
+ request={"model": self.model, "temperature": self.temperature},
282
+ body={"error": {"message": str(e), "type": "unexpected_error"}},
283
+ ) from e
253
284
 
254
285
  def get_max_tokens(self) -> int:
255
286
  """Get the maximum number of tokens that can be generated by the model."""
256
287
  return get_max_tokens(self.model)
257
288
 
258
- def token_counter(self, messages: list[Message]) -> int:
259
- """Count the number of tokens in a list of messages."""
260
- logger.debug(f"Counting tokens for {len(messages)} messages using model {self.model}")
261
- litellm_messages = [{"role": msg.role, "content": str(msg.content)} for msg in messages]
262
- return count_tokens(model=self.model, messages=litellm_messages)
263
-
264
- def token_counter_with_history(self, messages_history: list[Message], prompt: str) -> int:
265
- """Count the number of tokens in a list of messages and a prompt."""
266
- litellm_messages = [{"role": msg.role, "content": str(msg.content)} for msg in messages_history]
267
- litellm_messages.append({"role": "user", "content": str(prompt)})
268
- return count_tokens(model=self.model, messages=litellm_messages)
269
-
270
289
  def get_model_max_input_tokens(self) -> int | None:
271
290
  """Get the maximum number of input tokens for the model."""
272
291
  return get_max_input_tokens(self.model)
@@ -275,35 +294,26 @@ class GenerativeModel:
275
294
  """Get the maximum number of output tokens for the model."""
276
295
  return get_max_output_tokens(self.model)
277
296
 
278
- def generate_image(self, prompt: str, params: Dict[str, Any]) -> ResponseStats:
279
- """Generate an image using the specified model and parameters.
297
+ async def async_generate_image(self, prompt: str, params: Dict[str, Any]) -> ResponseStats:
298
+ """Asynchronously generate an image using the specified model and parameters.
280
299
 
281
300
  Args:
282
- prompt: Text description of the image to generate
283
- params: Dictionary of parameters for image generation including:
284
- - model: Name of the image generation model
285
- - size: Size of the generated image
286
- - quality: Quality level (DALL-E only)
287
- - style: Style preference (DALL-E only)
288
- - response_format: Format of the response (url/base64)
289
- - negative_prompt: What to avoid in the image (SD only)
290
- - cfg_scale: Classifier Free Guidance scale (SD only)
301
+ prompt: Text description of the image to generate.
302
+ params: Dictionary of parameters for image generation.
291
303
 
292
304
  Returns:
293
- ResponseStats containing the image generation results
305
+ ResponseStats containing the image generation results.
294
306
 
295
307
  Raises:
296
- Exception: If there's an error during image generation
308
+ Exception: If there's an error during image generation.
297
309
  """
298
310
  try:
299
- logger.debug(f"Generating image with params: {params}")
300
-
301
- # Ensure prompt is in params
302
- generation_params = {**params}
303
- generation_params["prompt"] = prompt
311
+ logger.debug(f"Async generating image with params: {params}")
312
+ generation_params = {**params, "prompt": prompt}
313
+ model = generation_params.pop("model")
304
314
 
305
- # Call litellm's image generation function
306
- response = generate_image(model=generation_params.pop("model"), **generation_params)
315
+ # Check if litellm provides an async image generation method; if not, adapt sync
316
+ response = await aimage_generation(model=model, **generation_params)
307
317
 
308
318
  # Convert response data to list of dictionaries with string values
309
319
  if hasattr(response, "data"):
@@ -329,15 +339,37 @@ class GenerativeModel:
329
339
  else:
330
340
  created = None
331
341
 
332
- # Convert response to our ResponseStats format
333
342
  return ResponseStats(
334
- response="", # Empty for image generation
343
+ response="",
335
344
  usage=TokenUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0),
336
345
  model=str(params["model"]),
337
346
  data=data,
338
347
  created=created,
339
348
  )
340
-
341
349
  except Exception as e:
342
- logger.error(f"Error in image generation: {str(e)}")
350
+ logger.error(f"Error in async image generation: {str(e)}")
343
351
  raise
352
+
353
+ def token_counter(self, messages: list[Message]) -> int:
354
+ """Count the number of tokens in a list of messages."""
355
+ logger.debug(f"Counting tokens for {len(messages)} messages using model {self.model}")
356
+ litellm_messages = [{"role": msg.role, "content": str(msg.content)} for msg in messages]
357
+ return token_counter(model=self.model, messages=litellm_messages)
358
+
359
+ def token_counter_with_history(self, messages_history: list[Message], prompt: str) -> int:
360
+ """Count the number of tokens in a list of messages and a prompt."""
361
+ litellm_messages = [{"role": msg.role, "content": str(msg.content)} for msg in messages_history]
362
+ litellm_messages.append({"role": "user", "content": str(prompt)})
363
+ return token_counter(model=self.model, messages=litellm_messages)
364
+
365
+ async def async_token_counter(self, messages: list[Message]) -> int:
366
+ """Asynchronously count the number of tokens in a list of messages."""
367
+ logger.debug(f"Async counting tokens for {len(messages)} messages using model {self.model}")
368
+ litellm_messages = [{"role": msg.role, "content": str(msg.content)} for msg in messages]
369
+ return await asyncio.to_thread(token_counter, model=self.model, messages=litellm_messages)
370
+
371
+ async def async_token_counter_with_history(self, messages_history: list[Message], prompt: str) -> int:
372
+ """Asynchronously count the number of tokens in a list of messages and a prompt."""
373
+ litellm_messages = [{"role": msg.role, "content": str(msg.content)} for msg in messages_history]
374
+ litellm_messages.append({"role": "user", "content": str(prompt)})
375
+ return await asyncio.to_thread(token_counter, model=self.model, messages=litellm_messages)
@@ -2,7 +2,7 @@ import loguru
2
2
 
3
3
  from quantalogic.model_info_list import model_info
4
4
  from quantalogic.model_info_litellm import litellm_get_model_max_input_tokens, litellm_get_model_max_output_tokens
5
- from quantalogic.utils.lm_studio_model_info import ModelInfo, get_model_list
5
+ from quantalogic.utils.lm_studio_model_info import get_model_list
6
6
 
7
7
  DEFAULT_MAX_OUTPUT_TOKENS = 4 * 1024 # Reasonable default for most models
8
8
  DEFAULT_MAX_INPUT_TOKENS = 32 * 1024 # Reasonable default for most models
@@ -24,11 +24,11 @@ def get_max_output_tokens(model_name: str) -> int:
24
24
  """Get max output tokens with safe fallback"""
25
25
  validate_model_name(model_name)
26
26
 
27
- if model_name.startswith('lm_studio/'):
27
+ if model_name.startswith("lm_studio/"):
28
28
  try:
29
29
  models = get_model_list()
30
30
  for model in models.data:
31
- if model.id == model_name[len('lm_studio/'):]:
31
+ if model.id == model_name[len("lm_studio/") :]:
32
32
  return model.max_context_length
33
33
  except Exception:
34
34
  loguru.logger.warning(f"Could not fetch LM Studio model info for {model_name}, using default")
@@ -38,7 +38,7 @@ def get_max_output_tokens(model_name: str) -> int:
38
38
 
39
39
  try:
40
40
  return litellm_get_model_max_output_tokens(model_name)
41
- except Exception as e:
41
+ except Exception:
42
42
  loguru.logger.warning(f"Model {model_name} not found in LiteLLM registry, using default")
43
43
  return DEFAULT_MAX_OUTPUT_TOKENS
44
44
 
@@ -47,11 +47,11 @@ def get_max_input_tokens(model_name: str) -> int:
47
47
  """Get max input tokens with safe fallback"""
48
48
  validate_model_name(model_name)
49
49
 
50
- if model_name.startswith('lm_studio/'):
50
+ if model_name.startswith("lm_studio/"):
51
51
  try:
52
52
  models = get_model_list()
53
53
  for model in models.data:
54
- if model.id == model_name[len('lm_studio/'):]:
54
+ if model.id == model_name[len("lm_studio/") :]:
55
55
  return model.max_context_length
56
56
  except Exception:
57
57
  loguru.logger.warning(f"Could not fetch LM Studio model info for {model_name}, using default")