quantalogic 0.31.1__py3-none-any.whl → 0.32.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
quantalogic/agent.py CHANGED
@@ -751,6 +751,8 @@ class Agent(BaseModel):
751
751
  str: Generated task summary
752
752
  """
753
753
  try:
754
+ if len(content) < 200:
755
+ return content
754
756
  prompt = (
755
757
  "Create an ultra-concise task summary that captures ONLY: \n"
756
758
  "1. Primary objective/purpose\n"
@@ -1,44 +1,83 @@
1
- model_info = {
2
- "dashscope/qwen-max": {"max_output_tokens": 8 * 1024, "max_input_tokens": 32 * 1024},
3
- "dashscope/qwen-plus": {"max_output_tokens": 8 * 1024, "max_input_tokens": 131072},
4
- "dashscope/qwen-turbo": {"max_output_tokens": 8 * 1024, "max_input_tokens": 1000000},
5
- "deepseek-reasoner": {"max_output_tokens": 8 * 1024, "max_input_tokens": 1024 * 128},
6
- "openrouter/deepseek/deepseek-r1": {"max_output_tokens": 8 * 1024, "max_input_tokens": 1024 * 128},
7
- "openrouter/mistralai/mistral-large-2411": {"max_output_tokens": 128 * 1024, "max_input_tokens": 1024 * 128},
8
- "mistralai/mistral-large-2411": {"max_output_tokens": 128 * 1024, "max_input_tokens": 1024 * 128},
9
- "deepseek/deepseek-chat": {"max_output_tokens": 8* 1024, "max_input_tokens": 1024*64},
10
- "deepseek/deepseek-reasoner": {"max_output_tokens": 8* 1024, "max_input_tokens": 1024*64, "max_cot_tokens": 1024*32 },
11
- }
1
+ import loguru
2
+
3
+ from quantalogic.model_info_list import model_info
4
+ from quantalogic.model_info_litellm import litellm_get_model_max_input_tokens, litellm_get_model_max_output_tokens
5
+ from quantalogic.utils.lm_studio_model_info import ModelInfo, get_model_list
6
+
7
+ DEFAULT_MAX_OUTPUT_TOKENS = 4 * 1024 # Reasonable default for most models
8
+ DEFAULT_MAX_INPUT_TOKENS = 32 * 1024 # Reasonable default for most models
9
+
10
+
11
+ def validate_model_name(model_name: str) -> None:
12
+ if not isinstance(model_name, str) or not model_name.strip():
13
+ raise ValueError(f"Invalid model name: {model_name}")
12
14
 
13
15
 
14
16
  def print_model_info():
15
- for model, info in model_info.items():
16
- print(f"\n{model}:")
17
- print(f" Max Input Tokens: {info['max_input_tokens']:,}")
18
- print(f" Max Output Tokens: {info['max_output_tokens']:,}")
17
+ for info in model_info.values():
18
+ print(f"\n{info.model_name}:")
19
+ print(f" Max Input Tokens: {info.max_input_tokens:,}")
20
+ print(f" Max Output Tokens: {info.max_output_tokens:,}")
19
21
 
20
22
 
21
- if __name__ == "__main__":
22
- print_model_info()
23
+ def get_max_output_tokens(model_name: str) -> int:
24
+ """Get max output tokens with safe fallback"""
25
+ validate_model_name(model_name)
26
+
27
+ if model_name.startswith('lm_studio/'):
28
+ try:
29
+ models = get_model_list()
30
+ for model in models.data:
31
+ if model.id == model_name[len('lm_studio/'):]:
32
+ return model.max_context_length
33
+ except Exception:
34
+ loguru.logger.warning(f"Could not fetch LM Studio model info for {model_name}, using default")
23
35
 
36
+ if model_name in model_info:
37
+ return model_info[model_name].max_output_tokens
24
38
 
25
- def get_max_output_tokens(model_name: str) -> int | None:
26
- """Get the maximum output tokens for a given model name."""
27
- return model_info.get(model_name, {}).get("max_output_tokens", None)
39
+ try:
40
+ return litellm_get_model_max_output_tokens(model_name)
41
+ except Exception as e:
42
+ loguru.logger.warning(f"Model {model_name} not found in LiteLLM registry, using default")
43
+ return DEFAULT_MAX_OUTPUT_TOKENS
28
44
 
29
45
 
30
- def get_max_input_tokens(model_name: str) -> int | None:
31
- """Get the maximum input tokens for a given model name."""
32
- return model_info.get(model_name, {}).get("max_input_tokens", None)
46
+ def get_max_input_tokens(model_name: str) -> int:
47
+ """Get max input tokens with safe fallback"""
48
+ validate_model_name(model_name)
33
49
 
50
+ if model_name.startswith('lm_studio/'):
51
+ try:
52
+ models = get_model_list()
53
+ for model in models.data:
54
+ if model.id == model_name[len('lm_studio/'):]:
55
+ return model.max_context_length
56
+ except Exception:
57
+ loguru.logger.warning(f"Could not fetch LM Studio model info for {model_name}, using default")
34
58
 
35
- def get_max_tokens(model_name: str) -> int | None:
36
- """Get the maximum total tokens (input + output) for a given model name."""
37
- model_data = model_info.get(model_name, {})
38
- max_input = model_data.get("max_input_tokens")
39
- max_output = model_data.get("max_output_tokens")
59
+ if model_name in model_info:
60
+ return model_info[model_name].max_input_tokens
40
61
 
41
- if max_input is None or max_output is None:
42
- return None
62
+ try:
63
+ return litellm_get_model_max_input_tokens(model_name)
64
+ except Exception:
65
+ loguru.logger.warning(f"Model {model_name} not found in LiteLLM registry, using default")
66
+ return DEFAULT_MAX_INPUT_TOKENS
43
67
 
44
- return max_input + max_output
68
+
69
+ def get_max_tokens(model_name: str) -> int:
70
+ """Get total maximum tokens (input + output)"""
71
+ validate_model_name(model_name)
72
+
73
+ # Get input and output tokens separately
74
+ input_tokens = get_max_input_tokens(model_name)
75
+ output_tokens = get_max_output_tokens(model_name)
76
+
77
+ return input_tokens + output_tokens
78
+
79
+
80
+ if __name__ == "__main__":
81
+ print_model_info()
82
+ print(get_max_input_tokens("gpt-4o-mini"))
83
+ print(get_max_output_tokens("openrouter/openai/gpt-4o-mini"))
quantalogic/llm.py CHANGED
@@ -30,18 +30,50 @@ def get_model_info(model_name: str) -> dict | None:
30
30
  return model_info.get(model_name, None)
31
31
 
32
32
 
33
+ class ModelProviderConfig:
34
+ def __init__(self, prefix: str, provider: str, base_url: str, env_var: str):
35
+ self.prefix = prefix
36
+ self.provider = provider
37
+ self.base_url = base_url
38
+ self.env_var = env_var
39
+
40
+ def configure(self, model: str, kwargs: Dict[str, Any]) -> None:
41
+ kwargs["model"] = model.replace(self.prefix, "")
42
+ kwargs["custom_llm_provider"] = self.provider
43
+ kwargs["base_url"] = self.base_url
44
+ api_key = os.getenv(self.env_var)
45
+ if not api_key:
46
+ raise ValueError(f"{self.env_var} is not set in the environment variables.")
47
+ kwargs["api_key"] = api_key
48
+
49
+
50
+ # Default provider configurations
51
+ PROVIDERS = {
52
+ "dashscope": ModelProviderConfig(
53
+ prefix="dashscope/",
54
+ provider="openai",
55
+ base_url="https://dashscope-intl.aliyuncs.com/compatible-mode/v1",
56
+ env_var="DASHSCOPE_API_KEY"
57
+ ),
58
+ "nvidia": ModelProviderConfig(
59
+ prefix="nvidia/",
60
+ provider="openai",
61
+ base_url="https://integrate.api.nvidia.com/v1",
62
+ env_var="NVIDIA_API_KEY"
63
+ )
64
+ }
65
+
66
+
33
67
  def generate_completion(**kwargs: Dict[str, Any]) -> Any:
34
68
  """Wraps litellm completion with proper type hints."""
35
69
  model = kwargs.get("model", "")
36
- if model.startswith("dashscope/"):
37
- # Remove prefix and configure for OpenAI-compatible endpoint
38
- kwargs["model"] = model.replace("dashscope/", "")
39
- kwargs["custom_llm_provider"] = "openai" # Explicitly specify OpenAI provider
40
- kwargs["base_url"] = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
41
- api_key = os.getenv("DASHSCOPE_API_KEY")
42
- if not api_key:
43
- raise ValueError("DASHSCOPE_API_KEY is not set in the environment variables.")
44
- kwargs["api_key"] = api_key
70
+
71
+ # Find matching provider
72
+ for provider_name, provider_config in PROVIDERS.items():
73
+ if model.startswith(provider_config.prefix):
74
+ provider_config.configure(model, kwargs)
75
+ break
76
+
45
77
  return completion(**kwargs)
46
78
 
47
79
 
@@ -0,0 +1,12 @@
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class ModelInfo(BaseModel):
5
+ model_name: str
6
+ max_input_tokens: int
7
+ max_output_tokens: int
8
+ max_cot_tokens: int | None = None
9
+
10
+
11
+ class ModelNotFoundError(Exception):
12
+ """Raised when a model is not found in local registry"""
@@ -0,0 +1,57 @@
1
+ from quantalogic.model_info import ModelInfo
2
+
3
+ model_info = {
4
+ "dashscope/qwen-max": ModelInfo(
5
+ model_name="dashscope/qwen-max",
6
+ max_output_tokens=8 * 1024,
7
+ max_input_tokens=32 * 1024,
8
+ ),
9
+ "dashscope/qwen-plus": ModelInfo(
10
+ model_name="dashscope/qwen-plus",
11
+ max_output_tokens=8 * 1024,
12
+ max_input_tokens=131072,
13
+ ),
14
+ "dashscope/qwen-turbo": ModelInfo(
15
+ model_name="dashscope/qwen-turbo",
16
+ max_output_tokens=8 * 1024,
17
+ max_input_tokens=1000000,
18
+ ),
19
+ "deepseek-reasoner": ModelInfo(
20
+ model_name="deepseek-reasoner",
21
+ max_output_tokens=8 * 1024,
22
+ max_input_tokens=1024 * 128,
23
+ ),
24
+ "openrouter/deepseek/deepseek-r1": ModelInfo(
25
+ model_name="openrouter/deepseek/deepseek-r1",
26
+ max_output_tokens=8 * 1024,
27
+ max_input_tokens=1024 * 128,
28
+ ),
29
+ "openrouter/mistralai/mistral-large-2411": ModelInfo(
30
+ model_name="openrouter/mistralai/mistral-large-2411",
31
+ max_output_tokens=128 * 1024,
32
+ max_input_tokens=1024 * 128,
33
+ ),
34
+ "mistralai/mistral-large-2411": ModelInfo(
35
+ model_name="mistralai/mistral-large-2411",
36
+ max_output_tokens=128 * 1024,
37
+ max_input_tokens=1024 * 128,
38
+ ),
39
+ "deepseek/deepseek-chat": ModelInfo(
40
+ model_name="deepseek/deepseek-chat",
41
+ max_output_tokens=8 * 1024,
42
+ max_input_tokens=1024 * 64,
43
+ ),
44
+ "deepseek/deepseek-reasoner": ModelInfo(
45
+ model_name="deepseek/deepseek-reasoner",
46
+ max_output_tokens=8 * 1024,
47
+ max_input_tokens=1024 * 64,
48
+ max_cot_tokens=1024 * 32,
49
+ ),
50
+ "nvidia/deepseek-ai/deepseek-r1": ModelInfo(
51
+ model_name="nvidia/deepseek-ai/deepseek-r1",
52
+ max_output_tokens=8 * 1024,
53
+ max_input_tokens=1024 * 64,
54
+ ),
55
+
56
+
57
+ }
@@ -0,0 +1,70 @@
1
+ import functools
2
+
3
+ import litellm
4
+
5
+
6
+ @functools.lru_cache(maxsize=32)
7
+ def litellm_get_model_info(model_name: str) -> dict | None:
8
+ """Get model information with prefix fallback logic using only litellm.
9
+
10
+ Args:
11
+ model_name: The model identifier to get information for
12
+
13
+ Returns:
14
+ Dictionary containing model information
15
+
16
+ Raises:
17
+ ValueError: If model info cannot be found after prefix fallbacks
18
+ """
19
+ tried_models = [model_name]
20
+
21
+ while True:
22
+ try:
23
+ # Attempt to get model info through litellm
24
+ info = litellm.get_model_info(model_name)
25
+ if info:
26
+ return info
27
+ except Exception:
28
+ pass
29
+
30
+ # Try removing one prefix level
31
+ parts = model_name.split("/")
32
+ if len(parts) <= 1:
33
+ break
34
+
35
+ model_name = "/".join(parts[1:])
36
+ tried_models.append(model_name)
37
+
38
+ return None
39
+
40
+
41
+ def litellm_get_model_max_input_tokens(model_name: str) -> int | None:
42
+ """Get maximum input tokens for a model using litellm.
43
+
44
+ Args:
45
+ model_name: The model identifier
46
+
47
+ Returns:
48
+ Maximum input tokens or None if not found
49
+ """
50
+ try:
51
+ info = litellm_get_model_info(model_name)
52
+ return info.get("max_input_tokens", 8192)
53
+ except Exception as e:
54
+ return 8192 # Default for many modern models
55
+
56
+
57
+ def litellm_get_model_max_output_tokens(model_name: str) -> int | None:
58
+ """Get maximum output tokens for a model using litellm.
59
+
60
+ Args:
61
+ model_name: The model identifier
62
+
63
+ Returns:
64
+ Maximum output tokens or None if not found
65
+ """
66
+ try:
67
+ info = litellm_get_model_info(model_name)
68
+ return info.get("max_output_tokens", 4096)
69
+ except Exception as e:
70
+ return 4096 # Conservative default
@@ -233,6 +233,12 @@ def task_runner(
233
233
  "memory_summary",
234
234
  ]
235
235
 
236
+ #def ask_continue(event: str, data: any) -> None:
237
+ # ## Ask for ctrl+return
238
+ # if event == "task_think_end":
239
+ # ## Wait return on the keyboard
240
+ # input("Press [Enter] to continue...")
241
+
236
242
  # Add spinner control to event handlers
237
243
  def handle_task_think_start(*args, **kwargs):
238
244
  start_spinner(console)
@@ -246,6 +252,11 @@ def task_runner(
246
252
  if data is not None:
247
253
  console.print(data, end="", markup=False)
248
254
 
255
+ #agent.event_emitter.on(
256
+ # event="task_think_end",
257
+ # listener=ask_continue,
258
+ #)
259
+
249
260
  agent.event_emitter.on(
250
261
  event=events,
251
262
  listener=console_print_events,
@@ -5,6 +5,7 @@ from .git_ls import git_ls
5
5
  from .get_environment import get_environment
6
6
  from .get_coding_environment import get_coding_environment
7
7
  from .get_quantalogic_rules_content import get_quantalogic_rules_file_content
8
+ from .lm_studio_model_info import get_model_list
8
9
 
9
10
  __all__ = [
10
11
  "download_http_file",
@@ -14,4 +15,5 @@ __all__ = [
14
15
  "get_environment",
15
16
  "get_coding_environment",
16
17
  "get_quantalogic_rules_file_content",
18
+ "get_model_list",
17
19
  ]
@@ -1,8 +1,8 @@
1
1
  import litellm
2
2
 
3
-
4
3
  from quantalogic.get_model_info import model_info
5
4
 
5
+
6
6
  def get_all_models() -> list[str]:
7
7
  """
8
8
  Retrieves a unified list of all model names supported by LiteLLM and Quantalogic.
@@ -0,0 +1,48 @@
1
+ from enum import Enum
2
+ from typing import List, Literal, Optional
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+
7
+ class ModelType(str, Enum):
8
+ LLM = "llm"
9
+ EMBEDDINGS = "embeddings"
10
+ VLM = "vlm"
11
+
12
+ class CompatibilityType(str, Enum):
13
+ MLX = "mlx"
14
+ GGUF = "gguf"
15
+
16
+ class ModelState(str, Enum):
17
+ LOADED = "loaded"
18
+ NOT_LOADED = "not-loaded"
19
+
20
+ class ModelInfo(BaseModel):
21
+ id: str = Field(..., description="Unique model identifier in LM Studio's namespace")
22
+ object: Literal["model"] = Field("model", description="Always 'model' for model objects")
23
+ type: ModelType = Field(..., description="Type of AI model")
24
+ publisher: str = Field(..., description="Organization or user who published the model")
25
+ arch: str = Field(..., description="Base architecture family")
26
+ compatibility_type: CompatibilityType = Field(..., alias="compatibility_type")
27
+ quantization: Optional[str] = Field(None, description="Quantization method if applicable")
28
+ state: ModelState = Field(..., description="Current loading state in LM Studio")
29
+ max_context_length: int = Field(..., alias="max_context_length", ge=0)
30
+ loaded_context_length: Optional[int] = Field(
31
+ None,
32
+ alias="loaded_context_length",
33
+ description="Currently allocated context length (only when loaded)",
34
+ ge=0
35
+ )
36
+
37
+ class ModelListResponse(BaseModel):
38
+ data: List[ModelInfo] = Field(..., description="List of available models")
39
+ object: Literal["list"] = Field("list", description="Always 'list' for list responses")
40
+
41
+ def get_model_list() -> ModelListResponse:
42
+ """Fetch and validate model information from LM Studio's API"""
43
+ import requests
44
+
45
+ response = requests.get("http://localhost:1234/api/v0/models")
46
+ response.raise_for_status()
47
+
48
+ return ModelListResponse(**response.json())
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: quantalogic
3
- Version: 0.31.1
3
+ Version: 0.32.0
4
4
  Summary: QuantaLogic ReAct Agents
5
5
  Author: Raphaël MANSUY
6
6
  Author-email: raphael.mansuy@gmail.com
@@ -184,12 +184,25 @@ See our [Release Notes](RELEASE_NOTES.MD) for detailed version history and chang
184
184
  | openrouter/openai/gpt-4o | OPENROUTER_API_KEY | OpenAI's GPT-4o model accessible through OpenRouter platform. |
185
185
  | openrouter/mistralai/mistral-large-2411 | OPENROUTER_API_KEY | Mistral's large model optimized for complex reasoning tasks, available through OpenRouter with enhanced multilingual capabilities. |
186
186
  | mistral/mistral-large-2407 | MISTRAL_API_KEY | Mistral's high-performance model designed for enterprise-grade applications, offering advanced reasoning and multilingual support. |
187
+ | nvidia/deepseek-ai/deepseek-r1 | NVIDIA_API_KEY | NVIDIA's DeepSeek R1 model optimized for high-performance AI tasks and advanced reasoning capabilities. |
188
+ | lm_studio/mistral-small-24b-instruct-2501 | LM_STUDIO_API_KEY | LM Studio's Mistral Small model optimized for local inference with advanced reasoning capabilities. |
187
189
  | dashscope/qwen-max | DASHSCOPE_API_KEY | Alibaba's Qwen-Max model optimized for maximum performance and extensive reasoning capabilities. |
188
190
  | dashscope/qwen-plus | DASHSCOPE_API_KEY | Alibaba's Qwen-Plus model offering balanced performance and cost-efficiency for a variety of tasks. |
189
191
  | dashscope/qwen-turbo | DASHSCOPE_API_KEY | Alibaba's Qwen-Turbo model designed for fast and efficient responses, ideal for high-throughput scenarios. |
190
192
 
191
193
  To configure the environment API key for Quantalogic using LiteLLM, set the required environment variable for your chosen provider and any optional variables like `OPENAI_API_BASE` or `OPENROUTER_REFERRER`. Use a `.env` file or a secrets manager to securely store these keys, and load them in your code using `python-dotenv`. For advanced configurations, refer to the [LiteLLM documentation](https://docs.litellm.ai/docs/).
192
194
 
195
+ ### LM Studio Local Setup
196
+
197
+ To use LM Studio with the Mistral model locally, set the following environment variables:
198
+
199
+ ```bash
200
+ export LM_STUDIO_API_BASE="http://localhost:1234/v1"
201
+ export LM_STUDIO_API_KEY="your-api-key-here"
202
+ ```
203
+
204
+ Replace `http://localhost:1234/v1` with your LM Studio server URL and `your-api-key-here` with your actual API key.
205
+
193
206
 
194
207
  ## 📦 Installation
195
208
 
@@ -1,5 +1,5 @@
1
1
  quantalogic/__init__.py,sha256=Su8CnOEdqKu4zTytjiP9P5olg-oIDuUA3fMWM1WUdRY,925
2
- quantalogic/agent.py,sha256=5TscD78hIQBaF9pfxIRysC3HjnTJ6e0nyKDwrZMXHak,33223
2
+ quantalogic/agent.py,sha256=VqxM_VizDpwH3SLAnoSFMjH4a1ijBVOHaI6rXahHP7M,33289
3
3
  quantalogic/agent_config.py,sha256=SIRVSF0kkrYfvtyHiMCJhnm_nYqJCD2p1pN-reMIy24,7868
4
4
  quantalogic/agent_factory.py,sha256=HWKwN_DN57EPmME-hoCD2uJE0DqsPCzGU_V7nq54XzI,5284
5
5
  quantalogic/coding_agent.py,sha256=Z7ik6LUvLKDnaW9Ax1iZGC7p1WMnlYEUIlE5lkBP414,4975
@@ -9,11 +9,14 @@ quantalogic/console_print_token.py,sha256=qSU-3kmoZk4T5-1ybrEBi8tIXDPcz7eyWKhGh3
9
9
  quantalogic/docs_cli.py,sha256=3giVbUpespB9ZdTSJ955A3BhcOaBl5Lwsn1AVy9XAeY,1663
10
10
  quantalogic/event_emitter.py,sha256=jqot2g4JRXc88K6PW837Oqxbf7shZfO-xdPaUWmzupk,7901
11
11
  quantalogic/generative_model.py,sha256=ut_BFy4BqDxNqUXVbM8e_C_CzwNuJkvGWRsbpbKaees,13423
12
- quantalogic/get_model_info.py,sha256=f64GpkpzeWXKRMBGG9edfAVP1-S-TclmxSaQC91vglw,1976
12
+ quantalogic/get_model_info.py,sha256=_9Nb9JQ09HZzT-_gZUSvl4Er7uCXs5ys36sIBa-8DXA,3005
13
13
  quantalogic/interactive_text_editor.py,sha256=1vW4poJl7SItRGEeGQgtCFcmRDXmfCM8PE-uBtDBJuE,16658
14
- quantalogic/llm.py,sha256=yhuC1b5TCXojDXZEJK_PMcWUUxzrpI-gwzlIszAdJMM,4677
14
+ quantalogic/llm.py,sha256=Nk2Dn1lJh1-323Fs7ADfR9ov_eAoJOEEnzyGswZSbJI,5460
15
15
  quantalogic/main.py,sha256=__-4pX2pgoSFvt-aLdp6Qlrq55_SrwP_l8u2uTaQbjg,9262
16
16
  quantalogic/memory.py,sha256=zbtRuM05jaS2lJll-92dt5JfYVLERnF_m_9xqp2x-k0,6304
17
+ quantalogic/model_info.py,sha256=j7QqvjEFQDGpDOgQs8uTkVyI3a50Oa_nrsQjyxizTLc,272
18
+ quantalogic/model_info_list.py,sha256=bJu2ohQFgZOwmcqydfh5oi5R1G8ZY7jtQlERsR1Z47s,1816
19
+ quantalogic/model_info_litellm.py,sha256=m1Yt4SIiOBRWLx7S8f8k4fcTiKJZKtOvcPN_QvQ_Oxk,1880
17
20
  quantalogic/model_names.py,sha256=UZlz25zG9B2dpfwdw_e1Gw5qFsKQ7iME9FJh9Ts4u6s,938
18
21
  quantalogic/prompts.py,sha256=M-7rCaQoylnwxedhvy7VmQdgBG6TT1vmcf8_UzPTyY0,4035
19
22
  quantalogic/search_agent.py,sha256=EA_FAPP0dVuUbJ_lAGKfYq1FIJ6oLYzGMgKLMvBL4ZQ,2472
@@ -26,7 +29,7 @@ quantalogic/server/static/js/event_visualizer.js,sha256=eFkkWyNZw3zOZlF18kxbfsWq
26
29
  quantalogic/server/static/js/quantalogic.js,sha256=x7TrlZGR1Y0WLK2DWl1xY847BhEWMPnL0Ua7KtOldUc,22311
27
30
  quantalogic/server/templates/index.html,sha256=nDnXJoQEm1vXbhXtgaYk0G5VXj0wwzE6KrqEDhHFpj4,7773
28
31
  quantalogic/task_file_reader.py,sha256=AMIJoeVY9Hhu0dBJ-C5EyaOFsXLkhn2oBhVs-WTnnLk,1460
29
- quantalogic/task_runner.py,sha256=Loa0hr-Bge-kVnTpoGuV7RotFdiAKQL_zj4OB4S6gko,9643
32
+ quantalogic/task_runner.py,sha256=6kL7o0br2YU8FNKyq-rKJo1oGKZKl73vtATpHusPIWQ,10038
30
33
  quantalogic/tool_manager.py,sha256=Uh-ufrJPufHqDUrFwKlXw3MOsVGc_4lQxuc6cRvZ7wU,7186
31
34
  quantalogic/tools/__init__.py,sha256=pTirT5UBynuTkAzFYebu7ttGAMP3_A0idFvDp6lGZJQ,2146
32
35
  quantalogic/tools/agent_tool.py,sha256=MXCXxWHRch7VK4UWhtRP1jeI8Np9Ne2CUGo8vm1oZiM,3064
@@ -72,15 +75,16 @@ quantalogic/tools/utils/create_sample_database.py,sha256=Aus9xRLGfQfsYnxsAkJ5CW-
72
75
  quantalogic/tools/utils/generate_database_report.py,sha256=0D-5fWOfpAh1jEcld5OTQP5x6XkJE5jpNY6FyHv1L2s,10345
73
76
  quantalogic/tools/wikipedia_search_tool.py,sha256=bdZ_0dYTxpEfU04tBFsatnLM5P9Z3kAZgKQEjsopJLA,5405
74
77
  quantalogic/tools/write_file_tool.py,sha256=_mx9_Zjg2oMAAVzlcHEKjZVZUxQVgbRfcoMKgWnoZcg,3764
75
- quantalogic/utils/__init__.py,sha256=Ltq7tzLuHCl9BpCvfRVA9Sjrtp1RJesrn7G980lbl_c,563
78
+ quantalogic/utils/__init__.py,sha256=E442CJQuTohKzgI0Wrd4NZEpKascFjz6F4Vy8Y1c_0Y,634
76
79
  quantalogic/utils/ask_user_validation.py,sha256=F0jkbFJVXAImcSSP7op6dov5i80hRvZGRvBHbfcZrxg,340
77
80
  quantalogic/utils/check_version.py,sha256=grxTfJE85GMue1OAk8z8_q8tjEJxQ8RO6fN3fJ_qedg,1136
78
81
  quantalogic/utils/download_http_file.py,sha256=FTN3brq9WvCFvuBX-lYAhjsdYTzQT4m9m2vqlcyjkNk,3472
79
- quantalogic/utils/get_all_models.py,sha256=GGhonVHUS7MeS6eogmkEwZETSVgeGU9pWCLTzR0XxAU,544
82
+ quantalogic/utils/get_all_models.py,sha256=Ol4e60MwZiJhu8HZ2i_RpIumLmFYYrncB1X9q1KEQh0,544
80
83
  quantalogic/utils/get_coding_environment.py,sha256=oMK5ZanOqX_SFaJxUZQGlsAAaiLUgJufCJYDrHnHPuQ,584
81
84
  quantalogic/utils/get_environment.py,sha256=7wWruSHYTUlnQWW27qU3WFYZnncqqqdofsxAsUU7lhw,875
82
85
  quantalogic/utils/get_quantalogic_rules_content.py,sha256=fnEFTyClXzpI0MLaM-gB9R6l4CJlu2NnaYiR09ciJC8,673
83
86
  quantalogic/utils/git_ls.py,sha256=_k6QIQtc0aM1bsG340jBp4VrdevbcH8Pg2CV4r9oHok,5264
87
+ quantalogic/utils/lm_studio_model_info.py,sha256=1eDvZ-I9W8AZbCch1l5rdiSpUxL7qMnfZItdFZkmAWs,1819
84
88
  quantalogic/utils/read_file.py,sha256=tSRVHk8dIP4nNLL89v5kRki4hOTjVyjbmuEb2zwvwCY,2077
85
89
  quantalogic/utils/read_http_text_content.py,sha256=n3IayT5KcqctIVVF2gOQQAMf3Ow6eenlVgfXTpLcQbw,4410
86
90
  quantalogic/version.py,sha256=ea_cRutaQk5_lwlLbUUvPFuOT7Of7-gAsDl7wdveS-g,107
@@ -88,8 +92,8 @@ quantalogic/version_check.py,sha256=cttR1lR3OienGLl7NrK1Te1fhDkqSjCci7HC1vFUTSY,
88
92
  quantalogic/welcome_message.py,sha256=IXMhem8h7srzNUwvw8G_lmEkHU8PFfote021E_BXmVk,3039
89
93
  quantalogic/xml_parser.py,sha256=8yDxvKzAEnefNwUAR-wjerMDOj5T5cxak4WPIA83SBw,11516
90
94
  quantalogic/xml_tool_parser.py,sha256=Vz4LEgDbelJynD1siLOVkJ3gLlfHsUk65_gCwbYJyGc,3784
91
- quantalogic-0.31.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
92
- quantalogic-0.31.1.dist-info/METADATA,sha256=pLu3KI4tGKodQWubeVQm1G2P7tC9yZp22ZpOl35N074,22789
93
- quantalogic-0.31.1.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
94
- quantalogic-0.31.1.dist-info/entry_points.txt,sha256=h74O_Q3qBRCrDR99qvwB4BpBGzASPUIjCfxHq6Qnups,183
95
- quantalogic-0.31.1.dist-info/RECORD,,
95
+ quantalogic-0.32.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
96
+ quantalogic-0.32.0.dist-info/METADATA,sha256=w5kJUS98ou0kVeY9fy-WImrW-W9wWZFOXmIQx4xK-_M,23461
97
+ quantalogic-0.32.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
98
+ quantalogic-0.32.0.dist-info/entry_points.txt,sha256=h74O_Q3qBRCrDR99qvwB4BpBGzASPUIjCfxHq6Qnups,183
99
+ quantalogic-0.32.0.dist-info/RECORD,,