quantalogic 0.58.0__py3-none-any.whl → 0.59.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,7 +2,7 @@
2
2
 
3
3
  import sys
4
4
  from threading import Lock
5
- from typing import Optional
5
+ from typing import Any, Callable, Optional, Set
6
6
 
7
7
  from loguru import logger
8
8
  from rich.console import Console
@@ -17,9 +17,11 @@ from quantalogic.task_file_reader import get_task_from_file
17
17
  from quantalogic.version_check import check_new_version, get_version
18
18
  from quantalogic.welcome_message import display_welcome_message
19
19
 
20
- # Spinner control
20
+ # Spinner and console output control
21
21
  spinner_lock = Lock()
22
+ console_lock = Lock()
22
23
  current_spinner = None
24
+ processed_chunks: Set[str] = set()
23
25
 
24
26
 
25
27
  def configure_logger(log_level: str) -> None:
@@ -35,7 +37,6 @@ def configure_logger(log_level: str) -> None:
35
37
  def set_litellm_verbose(verbose_mode: bool) -> None:
36
38
  """Set the verbosity of the litellm library."""
37
39
  import litellm
38
-
39
40
  litellm.set_verbose = verbose_mode
40
41
 
41
42
 
@@ -45,16 +46,23 @@ def switch_verbose(verbose_mode: bool, log_level: str = "info") -> None:
45
46
  configure_logger("DEBUG")
46
47
  else:
47
48
  configure_logger(log_level)
48
-
49
+
50
+ verbose_mode = verbose_mode if verbose_mode is not None else False
49
51
  set_litellm_verbose(verbose_mode)
52
+ logger.debug(f"litellm verbose mode set to: {verbose_mode}")
50
53
 
51
54
 
52
- def start_spinner(console: Console) -> None:
53
- """Start the thinking spinner."""
55
+ def start_spinner(console: Console, message: str = "[yellow]Thinking...[/yellow]") -> None:
56
+ """Start the spinner with a custom message.
57
+
58
+ Args:
59
+ console: The Rich console instance
60
+ message: Custom message to display with the spinner (default: "Thinking...")
61
+ """
54
62
  global current_spinner
55
63
  with spinner_lock:
56
64
  if current_spinner is None:
57
- current_spinner = console.status("[yellow]Thinking...", spinner="dots")
65
+ current_spinner = console.status(message, spinner="dots")
58
66
  current_spinner.start()
59
67
 
60
68
 
@@ -67,20 +75,271 @@ def stop_spinner(console: Console) -> None:
67
75
  current_spinner = None
68
76
 
69
77
 
78
+ def register_spinner_handlers(agent, console: Console) -> None:
79
+ """Register common spinner control handlers."""
80
+ def handle_think_start(*args, **kwargs):
81
+ if current_spinner:
82
+ stop_spinner(console)
83
+ start_spinner(console)
84
+
85
+ def handle_think_end(*args, **kwargs):
86
+ stop_spinner(console)
87
+
88
+ agent.event_emitter.on("task_think_start", handle_think_start)
89
+ agent.event_emitter.on("task_think_end", handle_think_end)
90
+
91
+
92
+ def create_stream_handler(console: Console) -> Callable:
93
+ """Create a handler for streaming chunks that works across all modes."""
94
+ def handle_stream_chunk(event: str, data: Any) -> None:
95
+ if current_spinner:
96
+ stop_spinner(console)
97
+
98
+ if data is None:
99
+ return
100
+
101
+ # Extract content from various data formats
102
+ if isinstance(data, str):
103
+ content = data
104
+ elif isinstance(data, dict) and "data" in data:
105
+ content = data["data"]
106
+ elif isinstance(data, dict):
107
+ logger.debug(f"Stream chunk data without 'data' key: {data}")
108
+ content = str(data)
109
+ else:
110
+ try:
111
+ content = str(data)
112
+ except Exception as e:
113
+ logger.error(f"Error processing stream chunk: {e}")
114
+ return
115
+
116
+ # Print content with thread safety
117
+ with console_lock:
118
+ console.print(content, end="", markup=False)
119
+
120
+ return handle_stream_chunk
121
+
122
+
123
+ def run_chat_mode(agent, console: Console, config: QLConfig) -> None:
124
+ """Run the assistant in chat mode."""
125
+ console.print(f"[green]Entering chat mode with persona: {config.chat_system_prompt}[/green]")
126
+ if config.tool_mode:
127
+ console.print(f"[green]Tool mode: {config.tool_mode}[/green]")
128
+ console.print("[yellow]Type '/exit' to quit or '/clear' to reset memory.[/yellow]")
129
+
130
+ # Event handlers specific to chat mode
131
+ def handle_chat_start(*args, **kwargs):
132
+ start_spinner(console) # Uses default "Thinking..." message
133
+ console.print("Assistant: ", end="")
134
+
135
+ def handle_chat_response(*args, **kwargs):
136
+ # Always stop any active spinner (whether from tool execution or thinking)
137
+ stop_spinner(console)
138
+
139
+ if "response" in kwargs:
140
+ # Get the response from the kwargs
141
+ response_text = kwargs["response"]
142
+
143
+ # Simply print the response text without special tool call handling
144
+ console.print(response_text)
145
+
146
+ def handle_chat_end(*args, **kwargs):
147
+ # This function is intentionally empty as we're handling the prompt in the main loop
148
+ pass
149
+
150
+ def handle_tool_execution_start(*args, **kwargs):
151
+ # Print a newline before tool execution starts
152
+ console.print("\n")
153
+ # Start a spinner to indicate tool execution is in progress
154
+ tool_name = kwargs.get("tool_name", "tool")
155
+ start_spinner(console, f"[yellow]Executing {tool_name}...[/yellow]")
156
+
157
+ def handle_tool_execution_end(*args, **kwargs):
158
+ # Stop the tool execution spinner
159
+ stop_spinner(console)
160
+ # Start a thinking spinner to indicate the agent is processing the tool results
161
+ start_spinner(console, "[yellow]Processing tool results...[/yellow]")
162
+
163
+ # Register chat-specific handlers
164
+ agent.event_emitter.on("chat_start", handle_chat_start)
165
+ agent.event_emitter.on("chat_end", handle_chat_end)
166
+ agent.event_emitter.on("chat_response", handle_chat_response)
167
+
168
+ # Register tool execution handlers for newlines
169
+ agent.event_emitter.on("tool_execution_start", handle_tool_execution_start)
170
+ agent.event_emitter.on("tool_execution_end", handle_tool_execution_end)
171
+
172
+ # First clear any existing handlers to prevent duplicates
173
+ agent.event_emitter.clear("stream_chunk")
174
+
175
+ # Register ONLY ONE stream handler for chat mode (fix for token duplication)
176
+ agent.event_emitter.on("stream_chunk", create_stream_handler(console))
177
+
178
+ try:
179
+ while True:
180
+ # Add a newline before the prompt for better readability, except on the first iteration
181
+ if agent.memory.memory and len(agent.memory.memory) > 1: # Check if we have any conversation history
182
+ console.print("")
183
+
184
+ user_input = console.input("You: ")
185
+ if user_input.lower() == "/exit":
186
+ console.print("[yellow]Exiting chat mode.[/yellow]")
187
+ # Emit the chat_end event before exiting
188
+ agent.event_emitter.emit("chat_end")
189
+ break
190
+ elif user_input.lower() == "/clear":
191
+ agent.clear_memory()
192
+ console.print("[green]Chat memory cleared.[/green]")
193
+ continue
194
+
195
+ try:
196
+ response = agent.chat(user_input, streaming=not config.no_stream)
197
+ # For non-streaming mode, we need to manually emit the chat_response event
198
+ # since it won't be triggered by the streaming handler
199
+ if config.no_stream and response:
200
+ agent.event_emitter.emit("chat_response", response=response)
201
+ except Exception as e:
202
+ stop_spinner(console)
203
+ console.print(f"[red]Error: {str(e)}[/red]")
204
+ logger.error(f"Chat error: {e}", exc_info=True)
205
+ except KeyboardInterrupt:
206
+ console.print("\n[yellow]Chat interrupted. Exiting chat mode.[/yellow]")
207
+ # Emit the chat_end event when interrupted with Ctrl+C
208
+ agent.event_emitter.emit("chat_end")
209
+
210
+
211
+ def run_file_mode(agent, console: Console, file: str, config: QLConfig) -> None:
212
+ """Run a task from a file."""
213
+ task_content = get_task_from_file(file)
214
+
215
+ # Clear any existing handlers to prevent duplicates
216
+ agent.event_emitter.clear("stream_chunk")
217
+
218
+ # Register stream handler
219
+ agent.event_emitter.on("stream_chunk", create_stream_handler(console))
220
+
221
+ # Execute task from file
222
+ logger.debug(f"Solving task with agent: {task_content}")
223
+ if config.max_iterations < 1:
224
+ raise ValueError("max_iterations must be greater than 0")
225
+
226
+ result = agent.solve_task(
227
+ task=task_content,
228
+ max_iterations=config.max_iterations,
229
+ streaming=not config.no_stream
230
+ )
231
+
232
+ logger.debug(f"Task solved with result: {result} using {config.max_iterations} iterations")
233
+ console.print(
234
+ Panel.fit(
235
+ f"[bold]Task Result:[/bold]\n{result}",
236
+ title="[bold]Execution Output[/bold]",
237
+ border_style="green"
238
+ )
239
+ )
240
+
241
+
242
+ def run_task_mode(agent, console: Console, task: str, config: QLConfig) -> None:
243
+ """Run a single task from command line."""
244
+ check_new_version()
245
+
246
+ # Clear any existing handlers to prevent duplicates
247
+ agent.event_emitter.clear("stream_chunk")
248
+
249
+ # Register stream handler
250
+ agent.event_emitter.on("stream_chunk", create_stream_handler(console))
251
+
252
+ # Execute task from command line
253
+ logger.debug(f"Solving task with agent: {task}")
254
+ if config.max_iterations < 1:
255
+ raise ValueError("max_iterations must be greater than 0")
256
+
257
+ result = agent.solve_task(
258
+ task=task,
259
+ max_iterations=config.max_iterations,
260
+ streaming=not config.no_stream
261
+ )
262
+
263
+ logger.debug(f"Task solved with result: {result} using {config.max_iterations} iterations")
264
+ console.print(
265
+ Panel.fit(
266
+ f"[bold]Task Result:[/bold]\n{result}",
267
+ title="[bold]Execution Output[/bold]",
268
+ border_style="green"
269
+ )
270
+ )
271
+
272
+
273
+ def process_interactive_command(
274
+ command: str,
275
+ agent,
276
+ console: Console
277
+ ) -> bool:
278
+ """Process interactive commands and return whether to continue."""
279
+ if command == "/clear":
280
+ logger.info("Clearing agent memory...")
281
+ console.print("[yellow]Clearing agent memory...[/yellow]")
282
+ agent.clear_memory()
283
+ console.print("[green]Memory cleared successfully![/green]")
284
+ return True
285
+ else:
286
+ console.print(f"[red]Unknown command: {command}[/red]")
287
+ return True
288
+
289
+
290
+ def handle_interactive_task(
291
+ agent,
292
+ console: Console,
293
+ task_content: str,
294
+ max_iterations: int,
295
+ no_stream: bool
296
+ ) -> None:
297
+ """Handle a single interactive task."""
298
+ console.print(
299
+ Panel.fit(
300
+ f"[bold]Task to be submitted:[/bold]\n{task_content}",
301
+ title="[bold]Task Preview[/bold]",
302
+ border_style="blue",
303
+ )
304
+ )
305
+
306
+ if not Confirm.ask("[bold]Are you sure you want to submit this task?[/bold]"):
307
+ console.print("[yellow]Task submission cancelled.[/yellow]")
308
+ return
309
+
310
+ console.print(
311
+ Panel.fit(
312
+ "[green]✓ Task successfully submitted! Processing...[/green]",
313
+ title="[bold]Status[/bold]",
314
+ border_style="green",
315
+ )
316
+ )
317
+
318
+ logger.debug(f"Solving task with agent: {task_content}")
319
+ result = agent.solve_task(
320
+ task=task_content,
321
+ max_iterations=max_iterations,
322
+ streaming=not no_stream,
323
+ clear_memory=False
324
+ )
325
+
326
+ logger.debug(f"Task solved with result: {result} using {max_iterations} iterations")
327
+ console.print(
328
+ Panel.fit(
329
+ f"[bold]Task Result:[/bold]\n{result}",
330
+ title="[bold]Execution Output[/bold]",
331
+ border_style="green"
332
+ )
333
+ )
334
+
335
+
70
336
  def interactive_task_runner(
71
337
  agent,
72
338
  console: Console,
73
339
  max_iterations: int,
74
340
  no_stream: bool,
75
341
  ) -> None:
76
- """Run tasks interactively, asking the user if they want to continue after each task.
77
-
78
- Args:
79
- agent: The agent instance to use for solving tasks
80
- console: Rich console instance for output
81
- max_iterations: Maximum number of iterations per task
82
- no_stream: Disable streaming output
83
- """
342
+ """Run tasks interactively, asking the user if they want to continue after each task."""
84
343
  while True:
85
344
  logger.debug("Waiting for user input...")
86
345
  task_content = get_multiline_input(console).strip()
@@ -90,56 +349,55 @@ def interactive_task_runner(
90
349
  console.print("[yellow]No task provided. Exiting...[/yellow]")
91
350
  break
92
351
 
93
- # Handle commands with single return
352
+ # Handle commands
94
353
  if task_content.startswith("/"):
95
- command = task_content.lower()
96
- if command == "/clear":
97
- logger.info("Clearing agent memory...")
98
- console.print("[yellow]Clearing agent memory...[/yellow]")
99
- agent.clear_memory()
100
- console.print("[green]Memory cleared successfully![/green]")
101
- continue
102
- else:
103
- console.print(f"[red]Unknown command: {command}[/red]")
104
- continue
105
-
106
- # For non-commands, ask for confirmation
107
- console.print(
108
- Panel.fit(
109
- f"[bold]Task to be submitted:[/bold]\n{task_content}",
110
- title="[bold]Task Preview[/bold]",
111
- border_style="blue",
112
- )
113
- )
114
-
115
- if not Confirm.ask("[bold]Are you sure you want to submit this task?[/bold]"):
116
- console.print("[yellow]Task submission cancelled.[/yellow]")
117
- if not Confirm.ask("[bold]Would you like to ask another question?[/bold]"):
354
+ if not process_interactive_command(task_content.lower(), agent, console):
118
355
  break
119
356
  continue
120
357
 
121
- console.print(
122
- Panel.fit(
123
- "[green]✓ Task successfully submitted! Processing...[/green]",
124
- title="[bold]Status[/bold]",
125
- border_style="green",
126
- )
127
- )
358
+ # Handle regular task
359
+ handle_interactive_task(agent, console, task_content, max_iterations, no_stream)
360
+
361
+ if not Confirm.ask("[bold]Would you like to ask another question?[/bold]"):
362
+ break
128
363
 
129
- logger.debug(f"Solving task with agent: {task_content}")
130
- result = agent.solve_task(
131
- task=task_content, max_iterations=max_iterations, streaming=not no_stream, clear_memory=False
132
- )
133
- logger.debug(f"Task solved with result: {result} using {max_iterations} iterations")
134
364
 
135
- console.print(
136
- Panel.fit(
137
- f"[bold]Task Result:[/bold]\n{result}", title="[bold]Execution Output[/bold]", border_style="green"
138
- )
139
- )
365
+ def run_interactive_mode(agent, console: Console, config: QLConfig) -> None:
366
+ """Run the assistant in interactive mode."""
367
+ display_welcome_message(
368
+ console=console,
369
+ model_name=config.model_name,
370
+ version=get_version(),
371
+ vision_model_name=config.vision_model_name,
372
+ max_iterations=config.max_iterations,
373
+ compact_every_n_iteration=config.compact_every_n_iteration,
374
+ max_tokens_working_memory=config.max_tokens_working_memory,
375
+ mode=config.mode,
376
+ )
377
+ check_new_version()
378
+
379
+ logger.debug(
380
+ f"Created agent for mode: {config.mode} with model: {config.model_name}, "
381
+ f"vision model: {config.vision_model_name}, no_stream: {config.no_stream}"
382
+ )
140
383
 
141
- if not Confirm.ask("[bold]Would you like to ask another question?[/bold]"):
142
- break
384
+ # Register event handlers for interactive mode
385
+ events = [
386
+ "task_start", "task_think_start", "task_think_end", "task_complete",
387
+ "tool_execution_start", "tool_execution_end", "error_max_iterations_reached",
388
+ "memory_full", "memory_compacted", "memory_summary",
389
+ ]
390
+
391
+ agent.event_emitter.on(event=events, listener=console_print_events)
392
+
393
+ # Clear any existing handlers to prevent duplicates
394
+ agent.event_emitter.clear("stream_chunk")
395
+
396
+ # Register stream handler
397
+ agent.event_emitter.on("stream_chunk", create_stream_handler(console))
398
+
399
+ logger.debug(f"Registered event handlers for agent events with events: {events}")
400
+ interactive_task_runner(agent, console, config.max_iterations, config.no_stream)
143
401
 
144
402
 
145
403
  def task_runner(
@@ -148,15 +406,11 @@ def task_runner(
148
406
  config: QLConfig,
149
407
  task: Optional[str],
150
408
  ) -> None:
151
- """Execute a task with the QuantaLogic AI Assistant.
152
-
153
- Args:
154
- console: Rich console instance for output
155
- file: Optional path to task file
156
- config: QuantaLogic configuration object
157
- task: Optional task string
158
- """
159
- switch_verbose(config.verbose, config.log)
409
+ """Execute a task or chat with the QuantaLogic AI Assistant."""
410
+ switch_verbose(
411
+ config.verbose if hasattr(config, 'verbose') else False,
412
+ config.log
413
+ )
160
414
 
161
415
  # Create the agent instance with the specified configuration
162
416
  agent = create_agent_for_mode(
@@ -166,118 +420,21 @@ def task_runner(
166
420
  thinking_model_name=config.thinking_model_name,
167
421
  compact_every_n_iteration=config.compact_every_n_iteration,
168
422
  max_tokens_working_memory=config.max_tokens_working_memory,
423
+ chat_system_prompt=config.chat_system_prompt,
424
+ tool_mode=config.tool_mode,
169
425
  )
170
426
 
171
427
  AgentRegistry.register_agent("main_agent", agent)
172
-
173
- if file:
174
- task_content = get_task_from_file(file)
175
- # Execute single task from file
176
- logger.debug(f"Solving task with agent: {task_content}")
177
- if config.max_iterations < 1:
178
- raise ValueError("max_iterations must be greater than 0")
179
- result = agent.solve_task(
180
- task=task_content, max_iterations=config.max_iterations, streaming=not config.no_stream
181
- )
182
- logger.debug(f"Task solved with result: {result} using {config.max_iterations} iterations")
183
-
184
- console.print(
185
- Panel.fit(
186
- f"[bold]Task Result:[/bold]\n{result}", title="[bold]Execution Output[/bold]", border_style="green"
187
- )
188
- )
428
+ register_spinner_handlers(agent, console)
429
+
430
+ # Dispatch to the appropriate mode runner
431
+ if config.mode == "chat":
432
+ run_chat_mode(agent, console, config)
433
+ # Print the final prompt with backslash when exiting chat mode
434
+ print("You:\\")
435
+ elif file:
436
+ run_file_mode(agent, console, file, config)
437
+ elif task:
438
+ run_task_mode(agent, console, task, config)
189
439
  else:
190
- if task:
191
- check_new_version()
192
- task_content = task
193
- # Execute single task from command line
194
- logger.debug(f"Solving task with agent: {task_content}")
195
- if config.max_iterations < 1:
196
- raise ValueError("max_iterations must be greater than 0")
197
- result = agent.solve_task(
198
- task=task_content, max_iterations=config.max_iterations, streaming=not config.no_stream
199
- )
200
- logger.debug(f"Task solved with result: {result} using {config.max_iterations} iterations")
201
-
202
- console.print(
203
- Panel.fit(
204
- f"[bold]Task Result:[/bold]\n{result}", title="[bold]Execution Output[/bold]", border_style="green"
205
- )
206
- )
207
- else:
208
- # Interactive mode
209
- display_welcome_message(
210
- console=console,
211
- model_name=config.model_name,
212
- version=get_version(),
213
- vision_model_name=config.vision_model_name,
214
- max_iterations=config.max_iterations,
215
- compact_every_n_iteration=config.compact_every_n_iteration,
216
- max_tokens_working_memory=config.max_tokens_working_memory,
217
- mode=config.mode,
218
- )
219
- check_new_version()
220
- logger.debug(
221
- f"Created agent for mode: {config.mode} with model: {config.model_name}, vision model: {config.vision_model_name}, no_stream: {config.no_stream}"
222
- )
223
-
224
- events = [
225
- "task_start",
226
- "task_think_start",
227
- "task_think_end",
228
- "task_complete",
229
- "tool_execution_start",
230
- "tool_execution_end",
231
- "error_max_iterations_reached",
232
- "memory_full",
233
- "memory_compacted",
234
- "memory_summary",
235
- ]
236
-
237
- # def ask_continue(event: str, data: any) -> None:
238
- # ## Ask for ctrl+return
239
- # if event == "task_think_end":
240
- # ## Wait return on the keyboard
241
- # input("Press [Enter] to continue...")
242
-
243
- # Add spinner control to event handlers
244
- def handle_task_think_start(*args, **kwargs):
245
- start_spinner(console)
246
-
247
- def handle_task_think_end(*args, **kwargs):
248
- stop_spinner(console)
249
-
250
- def handle_stream_chunk(event: str, data: str) -> None:
251
- if current_spinner:
252
- stop_spinner(console)
253
- if data is not None:
254
- console.print(data, end="", markup=False)
255
-
256
- # agent.event_emitter.on(
257
- # event="task_think_end",
258
- # listener=ask_continue,
259
- # )
260
-
261
- agent.event_emitter.on(
262
- event=events,
263
- listener=console_print_events,
264
- )
265
-
266
- agent.event_emitter.on(
267
- event="task_think_start",
268
- listener=handle_task_think_start,
269
- )
270
-
271
- agent.event_emitter.on(
272
- event="task_think_end",
273
- listener=handle_task_think_end,
274
- )
275
-
276
- agent.event_emitter.on(
277
- event="stream_chunk",
278
- listener=handle_stream_chunk,
279
- )
280
-
281
- logger.debug("Registered event handlers for agent events with events: {events}")
282
-
283
- interactive_task_runner(agent, console, config.max_iterations, config.no_stream)
440
+ run_interactive_mode(agent, console, config)