quantalogic 0.2.10__py3-none-any.whl → 0.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
quantalogic/agent.py CHANGED
@@ -1,7 +1,5 @@
1
1
  """Enhanced QuantaLogic agent implementing the ReAct framework."""
2
2
 
3
- import os
4
- import sys
5
3
  from collections.abc import Callable
6
4
  from datetime import datetime
7
5
  from typing import Any
@@ -583,7 +581,9 @@ class Agent(BaseModel):
583
581
  "\n"
584
582
  "Available variables:\n"
585
583
  "\n"
586
- f"{', '.join(self.variable_store.keys())}\n" if len(self.variable_store.keys()) > 0 else "None\n"
584
+ f"{', '.join(self.variable_store.keys())}\n"
585
+ if len(self.variable_store.keys()) > 0
586
+ else "None\n"
587
587
  )
588
588
  return prompt_use_variables
589
589
 
@@ -630,10 +630,10 @@ class Agent(BaseModel):
630
630
 
631
631
  def _generate_task_summary(self, content: str) -> str:
632
632
  """Generate a concise summary of the given content using the generative model.
633
-
633
+
634
634
  Args:
635
635
  content (str): The content to summarize
636
-
636
+
637
637
  Returns:
638
638
  str: Generated summary
639
639
  """
@@ -670,5 +670,3 @@ class Agent(BaseModel):
670
670
  "session_add_message",
671
671
  {"role": "assistant", "content": assistant_content},
672
672
  )
673
-
674
-
@@ -107,7 +107,7 @@ def create_full_agent(model_name: str, vision_model_name: str | None) -> Agent:
107
107
  Agent: An agent with the specified model and tools
108
108
 
109
109
  """
110
- tools=[
110
+ tools = [
111
111
  TaskCompleteTool(),
112
112
  ReadFileTool(),
113
113
  ReadFileBlockTool(),
@@ -18,7 +18,7 @@ from quantalogic.utils import get_coding_environment
18
18
  from quantalogic.utils.get_quantalogic_rules_content import get_quantalogic_rules_file_content
19
19
 
20
20
 
21
- def create_coding_agent(model_name: str,vision_model_name: str | None = None, basic: bool = False) -> Agent:
21
+ def create_coding_agent(model_name: str, vision_model_name: str | None = None, basic: bool = False) -> Agent:
22
22
  """Creates and configures a coding agent with a comprehensive set of tools.
23
23
 
24
24
  Args:
@@ -69,7 +69,7 @@ def create_coding_agent(model_name: str,vision_model_name: str | None = None, ba
69
69
  LLMTool(
70
70
  model_name=model_name,
71
71
  system_prompt="You are a software expert, your role is to answer coding questions.",
72
- name="coding_consultant", # Handles implementation-level coding questions
72
+ name="coding_consultant", # Handles implementation-level coding questions
73
73
  )
74
74
  )
75
75
  tools.append(
@@ -107,7 +107,9 @@ class GenerativeModel:
107
107
  )
108
108
 
109
109
  # Retry on specific retriable exceptions
110
- def generate_with_history(self, messages_history: list[Message], prompt: str, image_url: str | None = None) -> ResponseStats:
110
+ def generate_with_history(
111
+ self, messages_history: list[Message], prompt: str, image_url: str | None = None
112
+ ) -> ResponseStats:
111
113
  """Generate a response with conversation history and optional image.
112
114
 
113
115
  Generates a response based on previous conversation messages,
@@ -128,20 +130,17 @@ class GenerativeModel:
128
130
  Exception: For other unexpected errors.
129
131
  """
130
132
  messages = [{"role": msg.role, "content": str(msg.content)} for msg in messages_history]
131
-
133
+
132
134
  if image_url:
133
- messages.append({
134
- "role": "user",
135
- "content": [
136
- {"type": "text", "text": str(prompt)},
137
- {
138
- "type": "image_url",
139
- "image_url": {
140
- "url": image_url
141
- }
142
- }
143
- ]
144
- })
135
+ messages.append(
136
+ {
137
+ "role": "user",
138
+ "content": [
139
+ {"type": "text", "text": str(prompt)},
140
+ {"type": "image_url", "image_url": {"url": image_url}},
141
+ ],
142
+ }
143
+ )
145
144
  else:
146
145
  messages.append({"role": "user", "content": str(prompt)})
147
146
 
@@ -249,7 +248,7 @@ class GenerativeModel:
249
248
  logger.debug(f"Model info retrieved: {model_info.keys()}")
250
249
  else:
251
250
  logger.debug("No model info available")
252
-
251
+
253
252
  return model_info
254
253
 
255
254
  def get_model_max_input_tokens(self) -> int:
quantalogic/main.py CHANGED
@@ -2,6 +2,7 @@
2
2
  """Main module for the QuantaLogic agent."""
3
3
 
4
4
  # Standard library imports
5
+ import random
5
6
  import sys
6
7
  from typing import Optional
7
8
 
@@ -9,6 +10,9 @@ from typing import Optional
9
10
  import click
10
11
  from loguru import logger
11
12
 
13
+ from quantalogic.utils.check_version import check_if_is_latest_version
14
+ from quantalogic.version import get_version
15
+
12
16
  # Configure logger
13
17
  logger.remove() # Remove default logger
14
18
 
@@ -50,6 +54,30 @@ def create_agent_for_mode(mode: str, model_name: str, vision_model_name: str | N
50
54
  else:
51
55
  raise ValueError(f"Unknown agent mode: {mode}")
52
56
 
57
+ def check_new_version():
58
+ # Randomly check for updates (1 in 10 chance)
59
+ if random.randint(1, 10) == 1:
60
+ try:
61
+ current_version = get_version()
62
+ has_new_version, latest_version = check_if_is_latest_version()
63
+
64
+ if has_new_version:
65
+ console = Console()
66
+ console.print(
67
+ Panel.fit(
68
+ f"[yellow]⚠️ Update Available![/yellow]\n\n"
69
+ f"Current version: [bold]{current_version}[/bold]\n"
70
+ f"Latest version: [bold]{latest_version}[/bold]\n\n"
71
+ "To update, run:\n"
72
+ "[bold]pip install --upgrade quantalogic[/bold]\n"
73
+ "or if using pipx:\n"
74
+ "[bold]pipx upgrade quantalogic[/bold]",
75
+ title="[bold]Update Available[/bold]",
76
+ border_style="yellow",
77
+ )
78
+ )
79
+ except Exception:
80
+ return
53
81
 
54
82
  def configure_logger(log_level: str) -> None:
55
83
  """Configure the logger with the specified log level and format."""
@@ -59,7 +87,7 @@ def configure_logger(log_level: str) -> None:
59
87
  level=log_level.upper(),
60
88
  format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{process}</cyan> | <magenta>{file}:{line}</magenta> | {message}",
61
89
  )
62
- logger.info(f"Log level set to: {log_level}")
90
+ logger.debug(f"Log level set to: {log_level}")
63
91
 
64
92
 
65
93
  def set_litellm_verbose(verbose_mode: bool) -> None:
@@ -192,17 +220,20 @@ def task(
192
220
  console = Console()
193
221
  switch_verbose(verbose, log)
194
222
 
223
+
195
224
  try:
196
225
  if file:
197
226
  task_content = get_task_from_file(file)
198
227
  else:
199
228
  if task:
229
+ check_new_version()
200
230
  task_content = task
201
231
  else:
202
232
  display_welcome_message(console, model_name, vision_model_name)
203
- logger.info("Waiting for user input...")
233
+ check_new_version()
234
+ logger.debug("Waiting for user input...")
204
235
  task_content = get_multiline_input(console).strip()
205
- logger.info(f"User input received. Task content: {task_content}")
236
+ logger.debug(f"User input received. Task content: {task_content}")
206
237
  if not task_content:
207
238
  logger.info("No task provided. Exiting...")
208
239
  console.print("[yellow]No task provided. Exiting...[/yellow]")
@@ -216,9 +247,17 @@ def task(
216
247
  border_style="blue",
217
248
  )
218
249
  )
219
- if not Confirm.ask("[bold]Are you sure you want to submit this task?[/bold]"):
220
- console.print("[yellow]Task submission cancelled. Exiting...[/yellow]")
221
- sys.exit(0)
250
+ if not Confirm.ask("[bold]Are you sure you want to submit this task?[/bold]"):
251
+ console.print("[yellow]Task submission cancelled. Exiting...[/yellow]")
252
+ sys.exit(0)
253
+
254
+ console.print(
255
+ Panel.fit(
256
+ "[green]✓ Task successfully submitted! Processing...[/green]",
257
+ title="[bold]Status[/bold]",
258
+ border_style="green",
259
+ )
260
+ )
222
261
 
223
262
  logger.debug(f"Creating agent for mode: {mode} with model: {model_name}")
224
263
  agent = create_agent_for_mode(mode, model_name, vision_model_name=vision_model_name)
@@ -259,7 +298,7 @@ def task(
259
298
 
260
299
 
261
300
  def main():
262
- """Entry point for the quantalogic CLI."""
301
+ """Main Entry point"""
263
302
  cli()
264
303
 
265
304
 
@@ -1,12 +1,14 @@
1
1
  """Print events with rich formatting."""
2
2
 
3
+ from typing import Any
4
+
3
5
  from rich import box
4
6
  from rich.console import Console
5
7
  from rich.panel import Panel
6
8
  from rich.tree import Tree
7
9
 
8
10
 
9
- def console_print_events(event: str, data: dict[str, any] = None):
11
+ def console_print_events(event: str, data: dict[str, Any] | None = None):
10
12
  """Print events with rich formatting.
11
13
 
12
14
  Args:
@@ -47,7 +47,7 @@ VALIDATION_TIMEOUT = 30.0 # seconds
47
47
 
48
48
  def handle_sigterm(signum, frame):
49
49
  """Handle SIGTERM signal."""
50
- logger.info("Received SIGTERM signal")
50
+ logger.debug("Received SIGTERM signal")
51
51
  raise SystemExit(0)
52
52
 
53
53
 
@@ -74,7 +74,7 @@ class ServerState:
74
74
  async def initiate_shutdown(self, force: bool = False):
75
75
  """Initiate the shutdown process."""
76
76
  if not self.is_shutting_down or force:
77
- logger.info("Initiating server shutdown...")
77
+ logger.debug("Initiating server shutdown...")
78
78
  self.is_shutting_down = True
79
79
  self.force_exit = force
80
80
  self.shutdown_initiated.set()
@@ -88,7 +88,7 @@ class ServerState:
88
88
  """Handle interrupt signal."""
89
89
  self.interrupt_count += 1
90
90
  if self.interrupt_count == 1:
91
- logger.info("Graceful shutdown initiated (press Ctrl+C again to force)")
91
+ logger.debug("Graceful shutdown initiated (press Ctrl+C again to force)")
92
92
  asyncio.create_task(self.initiate_shutdown(force=False))
93
93
  else:
94
94
  logger.warning("Forced shutdown initiated...")
@@ -277,7 +277,7 @@ class AgentState:
277
277
  # Override ask_for_user_validation with SSE-based method
278
278
  self.agent.ask_for_user_validation = self.sse_ask_for_user_validation
279
279
 
280
- logger.info(f"Agent initialized with model: {model_name}")
280
+ logger.debug(f"Agent initialized with model: {model_name}")
281
281
  except Exception as e:
282
282
  logger.error(f"Failed to initialize agent: {e}", exc_info=True)
283
283
  raise
@@ -316,7 +316,7 @@ class AgentState:
316
316
  console_print_events(event_type, data)
317
317
 
318
318
  # Log event details
319
- logger.info(f"Agent Event: {event_type}")
319
+ logger.debug(f"Agent Event: {event_type}")
320
320
  logger.debug(f"Event Data: {data}")
321
321
 
322
322
  # Broadcast to clients
@@ -334,7 +334,7 @@ class AgentState:
334
334
  async def cleanup(self):
335
335
  """Clean up resources during shutdown."""
336
336
  try:
337
- logger.info("Cleaning up resources...")
337
+ logger.debug("Cleaning up resources...")
338
338
  if server_state.force_exit:
339
339
  logger.warning("Forced cleanup - skipping graceful shutdown")
340
340
  return
@@ -349,7 +349,7 @@ class AgentState:
349
349
  self.validation_responses.clear()
350
350
  # Clear agent
351
351
  self.agent = None
352
- logger.info("Cleanup completed")
352
+ logger.debug("Cleanup completed")
353
353
  except TimeoutError:
354
354
  logger.warning(f"Cleanup timed out after {SHUTDOWN_TIMEOUT} seconds")
355
355
  except Exception as e:
@@ -429,7 +429,7 @@ class AgentState:
429
429
  with self.queue_lock:
430
430
  if task_id in self.task_queues:
431
431
  del self.task_queues[task_id]
432
- logger.info(f"Removed event queue for task_id: {task_id}")
432
+ logger.debug(f"Removed event queue for task_id: {task_id}")
433
433
 
434
434
 
435
435
  # Initialize global states
@@ -448,11 +448,11 @@ async def lifespan(app: FastAPI):
448
448
  loop.add_signal_handler(sig, lambda s=sig: asyncio.create_task(handle_shutdown(s)))
449
449
  yield
450
450
  finally:
451
- logger.info("Shutting down server gracefully...")
451
+ logger.debug("Shutting down server gracefully...")
452
452
  await server_state.initiate_shutdown()
453
453
  await agent_state.cleanup()
454
454
  server_state.shutdown_complete.set()
455
- logger.info("Server shutdown complete")
455
+ logger.debug("Server shutdown complete")
456
456
 
457
457
 
458
458
  async def handle_shutdown(sig):
@@ -527,7 +527,7 @@ async def event_stream(request: Request, task_id: Optional[str] = None) -> Strea
527
527
  async def event_generator() -> AsyncGenerator[str, None]:
528
528
  # Ensure unique client-task combination
529
529
  client_id = agent_state.add_client(task_id)
530
- logger.info(f"Client {client_id} subscribed to {'task_id: ' + task_id if task_id else 'all events'}")
530
+ logger.debug(f"Client {client_id} subscribed to {'task_id: ' + task_id if task_id else 'all events'}")
531
531
 
532
532
  try:
533
533
  while not server_state.is_shutting_down:
@@ -557,7 +557,7 @@ async def event_stream(request: Request, task_id: Optional[str] = None) -> Strea
557
557
  finally:
558
558
  # Clean up the client's event queue
559
559
  agent_state.remove_client(client_id, task_id)
560
- logger.info(f"Client {client_id} {'unsubscribed from task_id: ' + task_id if task_id else 'disconnected'}")
560
+ logger.debug(f"Client {client_id} {'unsubscribed from task_id: ' + task_id if task_id else 'disconnected'}")
561
561
 
562
562
  return StreamingResponse(
563
563
  event_generator(),
@@ -629,5 +629,5 @@ if __name__ == "__main__":
629
629
  try:
630
630
  server.run()
631
631
  except KeyboardInterrupt:
632
- logger.info("Received keyboard interrupt")
632
+ logger.debug("Received keyboard interrupt")
633
633
  sys.exit(1)
@@ -35,7 +35,7 @@ class ServerState:
35
35
  async def initiate_shutdown(self, force: bool = False):
36
36
  """Initiate the shutdown process."""
37
37
  if not self.is_shutting_down or force:
38
- logger.info("Initiating server shutdown...")
38
+ logger.debug("Initiating server shutdown...")
39
39
  self.is_shutting_down = True
40
40
  self.force_exit = force
41
41
  self.shutdown_initiated.set()
@@ -48,7 +48,7 @@ class ServerState:
48
48
  """Handle interrupt signal."""
49
49
  self.interrupt_count += 1
50
50
  if self.interrupt_count == 1:
51
- logger.info("Graceful shutdown initiated (press Ctrl+C again to force)")
51
+ logger.debug("Graceful shutdown initiated (press Ctrl+C again to force)")
52
52
  asyncio.create_task(self.initiate_shutdown(force=False))
53
53
  else:
54
54
  logger.warning("Forced shutdown initiated...")
@@ -95,7 +95,7 @@ class AgentState:
95
95
  if task_id not in self.agents:
96
96
  self.agents[task_id] = self.create_agent_for_task(task_id)
97
97
 
98
- logger.info(f"New client connected: {client_id} for task: {task_id}")
98
+ logger.debug(f"New client connected: {client_id} for task: {task_id}")
99
99
  return client_id
100
100
 
101
101
  def create_agent_for_task(self, task_id: str) -> Any:
@@ -109,7 +109,7 @@ class AgentState:
109
109
  """
110
110
  # Placeholder for agent creation logic
111
111
  agent = ... # Replace with actual agent creation logic
112
- logger.info(f"Agent created for task: {task_id}")
112
+ logger.debug(f"Agent created for task: {task_id}")
113
113
  return agent
114
114
 
115
115
  def get_agent_for_task(self, task_id: str) -> Optional[Any]:
@@ -128,7 +128,7 @@ class AgentState:
128
128
  with self.queue_lock:
129
129
  if client_id in self.event_queues:
130
130
  del self.event_queues[client_id]
131
- logger.info(f"Client disconnected: {client_id}")
131
+ logger.debug(f"Client disconnected: {client_id}")
132
132
 
133
133
  def _format_data_for_client(self, data: Dict[str, Any]) -> Dict[str, Any]:
134
134
  """Format data for client consumption."""
@@ -18,18 +18,18 @@ class ToolManager(BaseModel):
18
18
 
19
19
  def add(self, tool: Tool):
20
20
  """Add a tool to the tool dictionary."""
21
- logger.info(f"Adding tool: {tool.name} to tool dictionary")
21
+ logger.debug(f"Adding tool: {tool.name} to tool dictionary")
22
22
  self.tools[tool.name] = tool
23
23
 
24
24
  def add_list(self, tools: list[Tool]):
25
25
  """Add a list of tools to the tool dictionary."""
26
- logger.info(f"Adding {len(tools)} tools to tool dictionary")
26
+ logger.debug(f"Adding {len(tools)} tools to tool dictionary")
27
27
  for tool in tools:
28
28
  self.add(tool)
29
29
 
30
30
  def remove(self, tool_name: str) -> bool:
31
31
  """Remove a tool from the tool dictionary."""
32
- logger.info(f"Removing tool: {tool_name} from tool dictionary")
32
+ logger.debug(f"Removing tool: {tool_name} from tool dictionary")
33
33
  del self.tools[tool_name]
34
34
  return True
35
35
 
@@ -45,7 +45,7 @@ class ToolManager(BaseModel):
45
45
 
46
46
  def execute(self, tool_name: str, **kwargs) -> str:
47
47
  """Execute a tool from the tool dictionary."""
48
- logger.info(f"Executing tool: {tool_name} with arguments: {kwargs}")
48
+ logger.debug(f"Executing tool: {tool_name} with arguments: {kwargs}")
49
49
  try:
50
50
  result = self.tools[tool_name].execute(**kwargs)
51
51
  logger.debug(f"Tool {tool_name} execution completed successfully")
@@ -49,7 +49,7 @@ class AgentTool(Tool):
49
49
  )
50
50
 
51
51
  @model_validator(mode="before")
52
- def validate_agent(cls, values: dict[str, Any]) -> dict[str, Any]:
52
+ def validate_agent(cls, values: dict[str, Any]) -> dict[str, Any]: # noqa: N805
53
53
  """Validate the provided agent and its role."""
54
54
  agent = values.get("agent")
55
55
  # Lazy import to avoid circular dependency
@@ -122,7 +122,7 @@ end
122
122
 
123
123
  # Execute the main function
124
124
  ElixirScript.main()
125
- """ % script.strip()
125
+ """ % script.strip() # noqa: UP031
126
126
 
127
127
  with open(script_path, "w", encoding="utf-8") as f:
128
128
  f.write(wrapped_script.strip())
@@ -54,6 +54,7 @@ class LLMTool(Tool):
54
54
 
55
55
  model_name: str = Field(..., description="The name of the language model to use")
56
56
  generative_model: GenerativeModel | None = Field(default=None)
57
+ system_prompt: str | None = Field(default=None)
57
58
 
58
59
  def model_post_init(self, __context):
59
60
  """Initialize the generative model after model initialization."""
@@ -61,7 +62,10 @@ class LLMTool(Tool):
61
62
  self.generative_model = GenerativeModel(model=self.model_name)
62
63
  logging.debug(f"Initialized LLMTool with model: {self.model_name}")
63
64
 
64
- def execute(self, system_prompt: str, prompt: str, temperature: str = "0.7") -> str:
65
+
66
+ def execute(
67
+ self, system_prompt: str | None = None, prompt: str | None = None, temperature: str | None = None
68
+ ) -> str:
65
69
  """Execute the tool to generate an answer based on the provided question.
66
70
 
67
71
  Args:
@@ -84,9 +88,11 @@ class LLMTool(Tool):
84
88
  logging.error(f"Invalid temperature value: {temperature}")
85
89
  raise ValueError(f"Invalid temperature value: {temperature}") from ve
86
90
 
91
+ used_system_prompt = self.system_prompt if self.system_prompt else system_prompt
92
+
87
93
  # Prepare the messages history
88
94
  messages_history = [
89
- Message(role="system", content=system_prompt),
95
+ Message(role="system", content=used_system_prompt),
90
96
  Message(role="user", content=prompt),
91
97
  ]
92
98
 
@@ -111,9 +117,12 @@ class LLMTool(Tool):
111
117
 
112
118
  if __name__ == "__main__":
113
119
  # Example usage of LLMTool
114
- tool = LLMTool(model_name="gpt-4o-mini")
120
+ tool = LLMTool(model_name="openrouter/openai/gpt-4o-mini")
115
121
  system_prompt = 'Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the context, say "I don\'t know".'
116
122
  question = "What is the meaning of life?"
117
123
  temperature = "0.7"
118
124
  answer = tool.execute(system_prompt=system_prompt, prompt=question, temperature=temperature)
119
125
  print(answer)
126
+ pirate = LLMTool(model_name="openrouter/openai/gpt-4o-mini", system_prompt="You are a pirate.")
127
+ pirate_answer = pirate.execute(system_prompt=system_prompt, prompt=question, temperature=temperature)
128
+ print(pirate_answer)
@@ -8,7 +8,7 @@ from pydantic import ConfigDict, Field
8
8
  from quantalogic.generative_model import GenerativeModel, Message
9
9
  from quantalogic.tools.tool import Tool, ToolArgument
10
10
 
11
- #DEFAULT_MODEL_NAME = "ollama/llama3.2-vision"
11
+ # DEFAULT_MODEL_NAME = "ollama/llama3.2-vision"
12
12
  DEFAULT_MODEL_NAME = "openrouter/openai/gpt-4o-mini"
13
13
 
14
14
 
@@ -67,13 +67,7 @@ class LLMVisionTool(Tool):
67
67
  self.generative_model = GenerativeModel(model=self.model_name)
68
68
  logging.debug(f"Initialized LLMVisionTool with model: {self.model_name}")
69
69
 
70
- def execute(
71
- self,
72
- system_prompt: str,
73
- prompt: str,
74
- image_url: str,
75
- temperature: str = "0.7"
76
- ) -> str:
70
+ def execute(self, system_prompt: str, prompt: str, image_url: str, temperature: str = "0.7") -> str:
77
71
  """Execute the tool to analyze an image and generate a response.
78
72
 
79
73
  Args:
@@ -112,9 +106,7 @@ class LLMVisionTool(Tool):
112
106
 
113
107
  try:
114
108
  response_stats = self.generative_model.generate_with_history(
115
- messages_history=messages_history,
116
- prompt=prompt,
117
- image_url=image_url
109
+ messages_history=messages_history, prompt=prompt, image_url=image_url
118
110
  )
119
111
  response = response_stats.response.strip()
120
112
  logging.info(f"Generated response: {response}")
@@ -131,10 +123,5 @@ if __name__ == "__main__":
131
123
  question = "What is shown in this image? Describe it with details."
132
124
  image_url = "https://fastly.picsum.photos/id/767/200/300.jpg?hmac=j5YA1cRw-jS6fK3Mx2ooPwl2_TS3RSyLmFmiM9TqLC4"
133
125
  temperature = "0.7"
134
- answer = tool.execute(
135
- system_prompt=system_prompt,
136
- prompt=question,
137
- image_url=image_url,
138
- temperature=temperature
139
- )
126
+ answer = tool.execute(system_prompt=system_prompt, prompt=question, image_url=image_url, temperature=temperature)
140
127
  print(answer)
@@ -342,14 +342,14 @@ class NodeJsTool(Tool):
342
342
  RuntimeError: If pulling the Docker image fails.
343
343
  """
344
344
  try:
345
- logger.info(f"Pulling Docker image: {docker_image}")
345
+ logger.debug(f"Pulling Docker image: {docker_image}")
346
346
  subprocess.run(
347
347
  ["docker", "pull", docker_image],
348
348
  check=True,
349
349
  capture_output=True,
350
350
  text=True,
351
351
  )
352
- logger.info(f"Successfully pulled Docker image '{docker_image}'.")
352
+ logger.debug(f"Successfully pulled Docker image '{docker_image}'.")
353
353
  except subprocess.CalledProcessError as e:
354
354
  error_msg = f"Failed to pull Docker image '{docker_image}': {e.stderr.strip()}"
355
355
  logger.error(error_msg)
@@ -266,7 +266,7 @@ class PythonTool(Tool):
266
266
  capture_output=True,
267
267
  text=True,
268
268
  )
269
- logger.info(f"Successfully pulled Docker image '{docker_image}'.")
269
+ logger.debug(f"Successfully pulled Docker image '{docker_image}'.")
270
270
  except subprocess.CalledProcessError as e:
271
271
  error_msg = f"Failed to pull Docker image '{docker_image}': {e.stderr.strip()}"
272
272
  logger.error(error_msg)
@@ -365,7 +365,7 @@ class PythonTool(Tool):
365
365
  docker_run_cmd += ["bash", "-c", venv_and_run]
366
366
  logger.debug("Added script execution command to Docker run command.")
367
367
 
368
- logger.info(f"Executing Docker command: {' '.join(docker_run_cmd)}")
368
+ logger.debug(f"Executing Docker command: {' '.join(docker_run_cmd)}")
369
369
  try:
370
370
  result = subprocess.run(
371
371
  docker_run_cmd,
@@ -204,7 +204,7 @@ class ReplaceInFileTool(Tool):
204
204
  if not block.search:
205
205
  if block.replace:
206
206
  content += f"\n{block.replace}"
207
- logger.info(f"Block {idx}: Appended content")
207
+ logger.debug(f"Block {idx}: Appended content")
208
208
  continue
209
209
 
210
210
  match_found = False
@@ -218,7 +218,7 @@ class ReplaceInFileTool(Tool):
218
218
  content = f"{content[:start]}{content[end:]}"
219
219
  changes.append((start, start + len(block.replace) if block.replace else start))
220
220
  match_found = True
221
- logger.info(f"Block {idx}: Exact match {'replaced' if block.replace else 'deleted'}")
221
+ logger.debug(f"Block {idx}: Exact match {'replaced' if block.replace else 'deleted'}")
222
222
 
223
223
  if not match_found:
224
224
  similarity, matched_str = self.find_similar_match(block.search, content)
@@ -232,7 +232,7 @@ class ReplaceInFileTool(Tool):
232
232
  else:
233
233
  content = f"{content[:start]}{content[end:]}"
234
234
  changes.append((start, start + len(block.replace) if block.replace else start))
235
- logger.info(
235
+ logger.debug(
236
236
  f"Block {idx}: Similar match (similarity={similarity:.1%}) "
237
237
  f"{'replaced' if block.replace else 'deleted'}"
238
238
  )
@@ -136,7 +136,7 @@ class RipgrepTool(Tool):
136
136
  return "No files matching the pattern (after .gitignore filtering)"
137
137
 
138
138
  try:
139
- logger.info(f"Executing ripgrep with args: {args}")
139
+ logger.debug(f"Executing ripgrep with args: {args}")
140
140
  # Add filtered files to ripgrep command
141
141
  args.extend(filtered_files)
142
142
  output = subprocess.check_output([rg_path] + args, text=True, cwd=cwd)
@@ -194,7 +194,7 @@ class RipgrepTool(Tool):
194
194
  for path in system_paths + node_paths:
195
195
  full_path = Path(__file__).parent.parent / path if str(path).startswith("node_modules") else path
196
196
  if full_path.exists():
197
- logger.info(f"Found ripgrep at: {full_path}")
197
+ logger.debug(f"Found ripgrep at: {full_path}")
198
198
  return str(full_path)
199
199
 
200
200
  # Check system PATH using which/where
@@ -202,7 +202,7 @@ class RipgrepTool(Tool):
202
202
  command = "where" if os.name == "nt" else "which"
203
203
  rg_path = subprocess.check_output([command, bin_name], text=True).strip()
204
204
  if rg_path:
205
- logger.info(f"Found ripgrep in PATH at: {rg_path}")
205
+ logger.debug(f"Found ripgrep in PATH at: {rg_path}")
206
206
  return rg_path
207
207
  except subprocess.CalledProcessError:
208
208
  logger.debug("Ripgrep not found in system PATH")
quantalogic/tools/tool.py CHANGED
@@ -25,10 +25,10 @@ class ToolArgument(BaseModel):
25
25
  arg_type: Literal["string", "int", "float", "boolean"] = Field(
26
26
  ..., description="The type of the argument. Must be one of: string, integer, float, boolean."
27
27
  )
28
- description: str | None = Field(None, description="A brief description of the argument.")
28
+ description: str | None = Field(default=None, description="A brief description of the argument.")
29
29
  required: bool = Field(default=False, description="Indicates if the argument is required.")
30
- default: str | None = Field(None, description="The default value for the argument.")
31
- example: str | None = Field(None, description="An example value to illustrate the argument's usage.")
30
+ default: str | None = Field(default=None, description="The default value for the argument. This parameter is required.")
31
+ example: str | None = Field(default=None, description="An example value to illustrate the argument's usage.")
32
32
  need_validation: bool = Field(default=False, description="Indicates if the argument needs validation.")
33
33
 
34
34
 
@@ -153,6 +153,7 @@ class PatchError(Exception):
153
153
  super().__init__(message)
154
154
 
155
155
  def __str__(self):
156
+ """Override the default exception string to include context."""
156
157
  msg = [super().__str__()]
157
158
  if self.context:
158
159
  for key, value in self.context.items():
@@ -43,6 +43,7 @@ class WriteFileTool(Tool):
43
43
  description="Overwrite mode. If true, existing files can be overwritten. Defaults to False.",
44
44
  required=False,
45
45
  example="False",
46
+ default="False",
46
47
  ),
47
48
  ]
48
49
 
@@ -0,0 +1,37 @@
1
+
2
+ import requests
3
+ from packaging import version
4
+
5
+ from quantalogic.version import get_version
6
+
7
+
8
+ def check_if_is_latest_version() -> (bool,str|None):
9
+ """Check if the current version is the latest version on PyPI.
10
+
11
+ Returns:
12
+ bool: True if the current version is the latest, False otherwise
13
+ """
14
+ try:
15
+ current_version = get_version()
16
+ response = requests.get("https://pypi.org/pypi/quantalogic/json", timeout=5)
17
+ response.raise_for_status()
18
+ latest_version = response.json()["info"]["version"]
19
+ return version.parse(current_version) <= version.parse(latest_version), latest_version
20
+ except (requests.RequestException, KeyError):
21
+ return False, None
22
+
23
+
24
+ def main():
25
+ """Test the version checking functionality."""
26
+ is_latest, latest_version = check_if_is_latest_version()
27
+ if is_latest:
28
+ print("✅ You're running the latest version")
29
+ elif latest_version:
30
+ print(f"⚠️ Update available: {latest_version}")
31
+ else:
32
+ print("❌ Could not check version")
33
+
34
+ if __name__ == "__main__":
35
+ main()
36
+
37
+
@@ -35,18 +35,18 @@ def download_http_file(
35
35
 
36
36
  for attempt in range(max_retries):
37
37
  try:
38
- logger.info(f"Attempt {attempt + 1} of {max_retries} to download {url}")
38
+ logger.debug(f"Attempt {attempt + 1} of {max_retries} to download {url}")
39
39
  response = requests.get(url, headers=headers, stream=True, timeout=timeout)
40
40
  response.raise_for_status()
41
41
 
42
42
  content_type = response.headers.get("Content-Type", "unknown")
43
- logger.info(f"Downloading content with Content-Type: {content_type}")
43
+ logger.debug(f"Downloading content with Content-Type: {content_type}")
44
44
 
45
45
  with open(local_path, "wb") as file:
46
46
  for chunk in response.iter_content(chunk_size=chunk_size):
47
47
  file.write(chunk)
48
48
 
49
- logger.info(f"File successfully downloaded and saved to {local_path}")
49
+ logger.debug(f"File successfully downloaded and saved to {local_path}")
50
50
  return local_path
51
51
 
52
52
  except HTTPError as http_err:
@@ -70,7 +70,7 @@ def download_http_file(
70
70
 
71
71
  if attempt < max_retries - 1:
72
72
  sleep_duration = delay * (2**attempt) # Exponential backoff
73
- logger.info(f"Retrying in {sleep_duration} seconds...")
73
+ logger.debug(f"Retrying in {sleep_duration} seconds...")
74
74
  sleep(sleep_duration)
75
75
 
76
76
  logger.error("Max retries reached. Download failed.")
@@ -45,7 +45,7 @@ def read_http_text_content(
45
45
 
46
46
  for attempt in range(retries):
47
47
  try:
48
- logger.info(f"Attempt {attempt + 1} of {retries} to fetch {url}")
48
+ logger.debug(f"Attempt {attempt + 1} of {retries} to fetch {url}")
49
49
  response = requests.get(url, headers=headers, timeout=timeout)
50
50
  response.raise_for_status() # Raise an HTTPError for bad responses (4xx and 5xx)
51
51
 
@@ -87,7 +87,7 @@ def read_http_text_content(
87
87
 
88
88
  if attempt < retries - 1:
89
89
  sleep_duration = delay * (2**attempt) # Exponential backoff
90
- logger.info(f"Retrying in {sleep_duration} seconds...")
90
+ logger.debug(f"Retrying in {sleep_duration} seconds...")
91
91
  sleep(sleep_duration)
92
92
 
93
93
  return None, error_msg
quantalogic/version.py CHANGED
@@ -1,4 +1,5 @@
1
+ import importlib.metadata
2
+
1
3
 
2
- VERSION = "0.2.10"
3
4
  def get_version() -> str:
4
- return VERSION
5
+ return importlib.metadata.version("quantalogic")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: quantalogic
3
- Version: 0.2.10
3
+ Version: 0.2.12
4
4
  Summary: QuantaLogic ReAct Agents
5
5
  Author: Raphaël MANSUY
6
6
  Author-email: raphael.mansuy@gmail.com
@@ -31,6 +31,7 @@ Requires-Dist: tree-sitter-python (>=0.23.6,<0.24.0)
31
31
  Requires-Dist: tree-sitter-rust (>=0.23.2,<0.24.0)
32
32
  Requires-Dist: tree-sitter-scala (>=0.23.4,<0.24.0)
33
33
  Requires-Dist: tree-sitter-typescript (>=0.23.2,<0.24.0)
34
+ Requires-Dist: types-requests (>=2.32.0.20241016,<3.0.0.0)
34
35
  Requires-Dist: uvicorn (>=0.34.0,<0.35.0)
35
36
  Requires-Dist: websocket (>=0.2.1,<0.3.0)
36
37
  Description-Content-Type: text/markdown
@@ -1,29 +1,29 @@
1
1
  quantalogic/__init__.py,sha256=HFk7_19UzHzYwvPzb9QTQ4w_lPwTTPda61AYb8qggZY,686
2
- quantalogic/agent.py,sha256=s6kmgH2XLhtg1ZBl1SckV7ayFXkR_acs305Z2l1O2Bg,26680
3
- quantalogic/agent_config.py,sha256=Gm-i84dQF7CC1NUEtXpdw-qgZh5Jp1q4ni_SxMIxm-U,4518
4
- quantalogic/coding_agent.py,sha256=qT24jneNLUH1zMQUWBBTYDIVjCLPNm1iCaXB0v3w9yc,3469
2
+ quantalogic/agent.py,sha256=ROE9hJe9b2eRhhuH64-rxWWHsXO4lnjV-PdjqqdxS_g,26653
3
+ quantalogic/agent_config.py,sha256=iPJjY8OZDOoV7lYuL5TAj5JXPcRL4VUOzu5CZVeB3Sg,4520
4
+ quantalogic/coding_agent.py,sha256=FrodyypgtOOV-AvJiQP8PLkUIDQkDrye26dbOxIEKjM,3486
5
5
  quantalogic/event_emitter.py,sha256=jqot2g4JRXc88K6PW837Oqxbf7shZfO-xdPaUWmzupk,7901
6
- quantalogic/generative_model.py,sha256=N5FjE0kMpKKdpP9_QEOZJ0rkOpYPaJWG91jzEyyovXA,10725
6
+ quantalogic/generative_model.py,sha256=LKuv_aRpEwMgeEUzHg4r3XkQdPHAr4Mu-ZfWIAqR4Ik,10651
7
7
  quantalogic/interactive_text_editor.py,sha256=kYeTA2qej5kxtPvAUHy_Dr2MhrGQAyenLFpW9mU9Rmw,6855
8
- quantalogic/main.py,sha256=0zsr8-BnDPaR3roagL-3RX71hf4N0PqcgnCnJDO-gII,9626
8
+ quantalogic/main.py,sha256=CdQVjs_5EMEpXUHoqfW5MyfOnQosERRJx1-M7jSsMXU,11061
9
9
  quantalogic/memory.py,sha256=zbtRuM05jaS2lJll-92dt5JfYVLERnF_m_9xqp2x-k0,6304
10
10
  quantalogic/model_names.py,sha256=UZlz25zG9B2dpfwdw_e1Gw5qFsKQ7iME9FJh9Ts4u6s,938
11
- quantalogic/print_event.py,sha256=-4qZmFI2BTkXuGE9DoKm6Vs-GzK1F9WJGt9GqpRQlQQ,2175
11
+ quantalogic/print_event.py,sha256=nl1aRdYnsU72SRezafePF82zKtrqGfY8OoTx2QfbdE8,2206
12
12
  quantalogic/prompts.py,sha256=BHIST57DYcTeTb7rvV1QkGLt0_B8Wk8a_9tsnsN6suk,3547
13
13
  quantalogic/server/__init__.py,sha256=8sz_PYAUCrkM6JM5EAUeIzNM4NPW6j6UT72JVkc21WQ,91
14
- quantalogic/server/agent_server.py,sha256=GmglYf-LeVQQOdikMFDvPq1R0wKt6wIBpufW8XzP-iE,22489
14
+ quantalogic/server/agent_server.py,sha256=38GEK_MpLp--CX_dCkopTFOU7KcGuOw4-GciwmRJyyg,22502
15
15
  quantalogic/server/models.py,sha256=nVUGWElOsUw8QnRCGJylk25wCew_5gohe6nldYighUA,1322
16
16
  quantalogic/server/routes.py,sha256=00nFe6s0T4Gv8vCp0wQFjWGo1tC8FViH8h0koAJdWs4,4216
17
- quantalogic/server/state.py,sha256=UuG4VZTzo3RgQzkgHvnV6Qb1oZWs3fpFXyjVfwFKx3s,7364
17
+ quantalogic/server/state.py,sha256=TwtL0BTp_LT-fynF1IR4k8WVXuxXWtSv3NgWG9fuUME,7369
18
18
  quantalogic/server/static/js/event_visualizer.js,sha256=eFkkWyNZw3zOZlF18kxbfsWql8a2C13qBFEOAPzrj88,19646
19
19
  quantalogic/server/static/js/quantalogic.js,sha256=x7TrlZGR1Y0WLK2DWl1xY847BhEWMPnL0Ua7KtOldUc,22311
20
20
  quantalogic/server/templates/index.html,sha256=nDnXJoQEm1vXbhXtgaYk0G5VXj0wwzE6KrqEDhHFpj4,7773
21
- quantalogic/tool_manager.py,sha256=FyghX2M_yGmdL7ovJR4ZGYIiBwkxA-bPjpI-y4IFx4Y,2421
21
+ quantalogic/tool_manager.py,sha256=JAC5E5kLfYzYJx0QRIWbG14q1hlkOcwJFBG7HE8twpU,2425
22
22
  quantalogic/tools/__init__.py,sha256=OxZp1nWZC5EewMJJVEvP6Fd2RMFlpaIYLHqChXPG_6s,1495
23
- quantalogic/tools/agent_tool.py,sha256=qeRp74EBqPSGu6JNZMATGyDoSCzPo7EnB2rmCP5wsBE,3050
23
+ quantalogic/tools/agent_tool.py,sha256=MXCXxWHRch7VK4UWhtRP1jeI8Np9Ne2CUGo8vm1oZiM,3064
24
24
  quantalogic/tools/download_http_file_tool.py,sha256=wTfanbXjIRi5-qrbluuLvNmDNhvmYAnlMVb3dO8C2ss,2210
25
25
  quantalogic/tools/edit_whole_content_tool.py,sha256=nXmpAvojvqvAcqNMy1kUKZ1ocboky_ZcnCR4SNCSPgw,2360
26
- quantalogic/tools/elixir_tool.py,sha256=rGWP6lsfSw0pyZi9I4Fcj4k7Q8Ie6BAFaPtC9YiPYfc,7637
26
+ quantalogic/tools/elixir_tool.py,sha256=fzPPtAW-Koy9KB0r5k2zV1f1U0WphL-LXPPOBkeNkug,7652
27
27
  quantalogic/tools/execute_bash_command_tool.py,sha256=fnx-zSPpxR2EofaleygAorrR21gRs43jBWh7IBAoNKw,4131
28
28
  quantalogic/tools/input_question_tool.py,sha256=UoTlNhdmdr-eyiVtVCG2qJe_R4bU_ag-DzstSdmYkvM,1848
29
29
  quantalogic/tools/language_handlers/__init__.py,sha256=5GD6TYsMqRni0nwePp2KOjNQ04GnT5wihT6YAuvx43c,699
@@ -37,34 +37,35 @@ quantalogic/tools/language_handlers/rust_handler.py,sha256=t_AqKVa3KVk6SVkq_UjUU
37
37
  quantalogic/tools/language_handlers/scala_handler.py,sha256=wr-cWOIFOc0UYwODmEtT6rV63Qf1NyNB_BLo23GLrvk,1281
38
38
  quantalogic/tools/language_handlers/typescript_handler.py,sha256=L4vuJMYxKO3_83dQhdwZ9fogauIV7rwoicRT0xLGfkQ,1738
39
39
  quantalogic/tools/list_directory_tool.py,sha256=8Hy38DelSh-mRqS_uDLpeBYoHLtEy5ji77xI-TJu3Ms,4176
40
- quantalogic/tools/llm_tool.py,sha256=ktwsg1LI3rSPS5XCwp-GAFiYCaXPSXRzUaqjOexPfEg,5011
41
- quantalogic/tools/llm_vision_tool.py,sha256=S99pKpIi8WUPREXXPArsuA7iTdXzUtiXg32J9NKCvlc,5028
40
+ quantalogic/tools/llm_tool.py,sha256=9SNApqvNT2qF9GWxEVzMKq8XiVtc-UoyamhsIWvgZQI,5456
41
+ quantalogic/tools/llm_vision_tool.py,sha256=OP2B6bYjnKvO7TjSG0gEtwvV5L0m4uFDWAaU54cOG3w,4913
42
42
  quantalogic/tools/markitdown_tool.py,sha256=GHJMPdWmwF-CBu3vHWhy-kXJYRDluFkh18KN06yNHOc,4101
43
- quantalogic/tools/nodejs_tool.py,sha256=2VTkZgtyXmv2E18CVaml3CKZE28WL9Tbv2IVdziv8wA,19903
44
- quantalogic/tools/python_tool.py,sha256=t66ge3xXS55-wJkddnVU9210TuDVoRI0Y-rsZwWxYIk,18154
43
+ quantalogic/tools/nodejs_tool.py,sha256=zdnE0VFj_5786uR2L0o-SKR0Gk8L-U7rdj7xGHJYIq0,19905
44
+ quantalogic/tools/python_tool.py,sha256=70HLbfU2clOBgj4axDOtIKzXwEBMNGEAX1nGSf-KNNQ,18156
45
45
  quantalogic/tools/read_file_block_tool.py,sha256=FTcDAUOOPQOvWRjnRI6nMI1Upus90klR4PC0pbPP_S8,5266
46
46
  quantalogic/tools/read_file_tool.py,sha256=bOWJbA0GU-hYbFOJ-tQVlSVz0r6WrVAfzy4aXOnAcBw,2757
47
- quantalogic/tools/replace_in_file_tool.py,sha256=6nbW3v0yzo3YDh-w30oW_kulmYzn0xsIfUw1jcpJh_c,12950
48
- quantalogic/tools/ripgrep_tool.py,sha256=a0V5a-HozZqWPBxmIq8BoRvcFd7UXm1Z7TuE7l73H0Y,14288
47
+ quantalogic/tools/replace_in_file_tool.py,sha256=n63s09Y8RXOKGjxfWw0D6F6JpQ6ERSJxVJOzmceVXLk,12953
48
+ quantalogic/tools/ripgrep_tool.py,sha256=sRzHaWac9fa0cCGhECJN04jw_Ko0O3u45KDWzMIYcvY,14291
49
49
  quantalogic/tools/search_definition_names.py,sha256=qolDbRUssE5EyghWqgs69Kmu_dhzeO9GliqgP9pkUHM,16704
50
50
  quantalogic/tools/task_complete_tool.py,sha256=L8tuyVoN07Q2hOsxx17JTW0C5Jd_N-C0i_0PtCUQUKU,929
51
- quantalogic/tools/tool.py,sha256=pC9aLMv4R0pDbuQsWHOUQG1EVaDeAEABIoH0djznCH8,5598
52
- quantalogic/tools/unified_diff_tool.py,sha256=cDWZjtIVodvhWxV479u0RRs98GKn13b3I7z9anFyvK0,14075
53
- quantalogic/tools/write_file_tool.py,sha256=7zbXbjZDKDu6ZY1N1mH2w0oyVz5FMxROk1JmKtdfpTA,3735
51
+ quantalogic/tools/tool.py,sha256=ixouKOcmyYMP4YnzXANhIxixh4xgBVCRqvnBccqrAY0,5650
52
+ quantalogic/tools/unified_diff_tool.py,sha256=wTKXIoBEPcC_EcQmpJZVi95vq0Ncvsw1Kyc7XqPO6dU,14147
53
+ quantalogic/tools/write_file_tool.py,sha256=_mx9_Zjg2oMAAVzlcHEKjZVZUxQVgbRfcoMKgWnoZcg,3764
54
54
  quantalogic/utils/__init__.py,sha256=Ltq7tzLuHCl9BpCvfRVA9Sjrtp1RJesrn7G980lbl_c,563
55
55
  quantalogic/utils/ask_user_validation.py,sha256=F0jkbFJVXAImcSSP7op6dov5i80hRvZGRvBHbfcZrxg,340
56
- quantalogic/utils/download_http_file.py,sha256=cm0Jr2PWLoXxJL5-zMOYxJtYvugZSLWurEWt2HlPryw,3468
56
+ quantalogic/utils/check_version.py,sha256=LZDU78EwMSDw3cv-sXQK_3wfTGZeFAyPR4OsepM8aIU,1095
57
+ quantalogic/utils/download_http_file.py,sha256=FTN3brq9WvCFvuBX-lYAhjsdYTzQT4m9m2vqlcyjkNk,3472
57
58
  quantalogic/utils/get_coding_environment.py,sha256=ujZ2_nDryrLWe6ZUalSu9WDG6t53UJFn7FJ_ER7Jixc,389
58
59
  quantalogic/utils/get_environment.py,sha256=7wWruSHYTUlnQWW27qU3WFYZnncqqqdofsxAsUU7lhw,875
59
60
  quantalogic/utils/get_quantalogic_rules_content.py,sha256=fnEFTyClXzpI0MLaM-gB9R6l4CJlu2NnaYiR09ciJC8,673
60
61
  quantalogic/utils/git_ls.py,sha256=_aXg2TwqYv9CoOrhQ1gqHCqu1j8wOVigQNWbGncSDlM,4361
61
62
  quantalogic/utils/read_file.py,sha256=tSRVHk8dIP4nNLL89v5kRki4hOTjVyjbmuEb2zwvwCY,2077
62
- quantalogic/utils/read_http_text_content.py,sha256=1nRLQ9DHP_fKrm0rIEJBF0ROmB78e4lct2hUzD2PAUk,4408
63
- quantalogic/version.py,sha256=rbj6GCYHf5rUvES_yzIMSiI_7I2h-zkrmZOjre8NI7A,65
63
+ quantalogic/utils/read_http_text_content.py,sha256=n3IayT5KcqctIVVF2gOQQAMf3Ow6eenlVgfXTpLcQbw,4410
64
+ quantalogic/version.py,sha256=ea_cRutaQk5_lwlLbUUvPFuOT7Of7-gAsDl7wdveS-g,107
64
65
  quantalogic/xml_parser.py,sha256=cTRorr5sVfkIzH72M0C-GQ9ROGPiz2FTT66U9ndjzhE,9538
65
66
  quantalogic/xml_tool_parser.py,sha256=lsVzClZBrZan7wjCuCKnGHWzksXI3VMy_vWthxu2_bo,3738
66
- quantalogic-0.2.10.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
67
- quantalogic-0.2.10.dist-info/METADATA,sha256=Hu3vVRYMVGIPJpoCxP_6S4XKbw6R5rKPxcvS5y4Xfnw,39747
68
- quantalogic-0.2.10.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
69
- quantalogic-0.2.10.dist-info/entry_points.txt,sha256=wgSq5SRU98yvlRHGEZD1Xn7sS5CSjH2RfUtTa6Qy28Q,52
70
- quantalogic-0.2.10.dist-info/RECORD,,
67
+ quantalogic-0.2.12.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
68
+ quantalogic-0.2.12.dist-info/METADATA,sha256=gqelTtdz_LdjDImTJ4lAhTUFqTQ3VNf_2Wa0OjEAY84,39806
69
+ quantalogic-0.2.12.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
70
+ quantalogic-0.2.12.dist-info/entry_points.txt,sha256=wgSq5SRU98yvlRHGEZD1Xn7sS5CSjH2RfUtTa6Qy28Q,52
71
+ quantalogic-0.2.12.dist-info/RECORD,,