quantalogic 0.59.3__py3-none-any.whl → 0.60.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. quantalogic/agent.py +268 -24
  2. quantalogic/create_custom_agent.py +26 -78
  3. quantalogic/prompts/chat_system_prompt.j2 +10 -7
  4. quantalogic/prompts/code_2_system_prompt.j2 +190 -0
  5. quantalogic/prompts/code_system_prompt.j2 +142 -0
  6. quantalogic/prompts/doc_system_prompt.j2 +178 -0
  7. quantalogic/prompts/legal_2_system_prompt.j2 +218 -0
  8. quantalogic/prompts/legal_system_prompt.j2 +140 -0
  9. quantalogic/prompts/system_prompt.j2 +6 -2
  10. quantalogic/prompts/tools_prompt.j2 +2 -4
  11. quantalogic/prompts.py +23 -4
  12. quantalogic/server/agent_server.py +1 -1
  13. quantalogic/tools/__init__.py +2 -0
  14. quantalogic/tools/duckduckgo_search_tool.py +1 -0
  15. quantalogic/tools/execute_bash_command_tool.py +114 -57
  16. quantalogic/tools/file_tracker_tool.py +49 -0
  17. quantalogic/tools/google_packages/google_news_tool.py +3 -0
  18. quantalogic/tools/image_generation/dalle_e.py +89 -137
  19. quantalogic/tools/rag_tool/__init__.py +2 -9
  20. quantalogic/tools/rag_tool/document_rag_sources_.py +728 -0
  21. quantalogic/tools/rag_tool/ocr_pdf_markdown.py +144 -0
  22. quantalogic/tools/replace_in_file_tool.py +1 -1
  23. quantalogic/tools/terminal_capture_tool.py +293 -0
  24. quantalogic/tools/tool.py +4 -0
  25. quantalogic/tools/utilities/__init__.py +2 -0
  26. quantalogic/tools/utilities/download_file_tool.py +3 -5
  27. quantalogic/tools/utilities/llm_tool.py +283 -0
  28. quantalogic/tools/utilities/selenium_tool.py +296 -0
  29. quantalogic/tools/utilities/vscode_tool.py +1 -1
  30. quantalogic/tools/web_navigation/__init__.py +5 -0
  31. quantalogic/tools/web_navigation/web_tool.py +145 -0
  32. quantalogic/tools/write_file_tool.py +72 -36
  33. {quantalogic-0.59.3.dist-info → quantalogic-0.60.0.dist-info}/METADATA +1 -1
  34. {quantalogic-0.59.3.dist-info → quantalogic-0.60.0.dist-info}/RECORD +37 -28
  35. quantalogic/tools/rag_tool/document_metadata.py +0 -15
  36. quantalogic/tools/rag_tool/query_response.py +0 -20
  37. quantalogic/tools/rag_tool/rag_tool.py +0 -566
  38. quantalogic/tools/rag_tool/rag_tool_beta.py +0 -264
  39. {quantalogic-0.59.3.dist-info → quantalogic-0.60.0.dist-info}/LICENSE +0 -0
  40. {quantalogic-0.59.3.dist-info → quantalogic-0.60.0.dist-info}/WHEEL +0 -0
  41. {quantalogic-0.59.3.dist-info → quantalogic-0.60.0.dist-info}/entry_points.txt +0 -0
quantalogic/agent.py CHANGED
@@ -23,6 +23,7 @@ from quantalogic.utils import get_environment
23
23
  from quantalogic.utils.ask_user_validation import console_ask_for_user_validation
24
24
  from quantalogic.xml_parser import ToleranceXMLParser
25
25
  from quantalogic.xml_tool_parser import ToolParser
26
+ import uuid
26
27
 
27
28
  # Maximum ratio occupancy of the occupied memory
28
29
  MAX_OCCUPANCY = 90.0
@@ -78,7 +79,7 @@ class Agent(BaseModel):
78
79
  config: AgentConfig
79
80
  task_to_solve: str
80
81
  task_to_solve_summary: str = ""
81
- ask_for_user_validation: Callable[[str, str], Awaitable[bool]] = console_ask_for_user_validation
82
+ ask_for_user_validation: Callable[[str, str], bool] = console_ask_for_user_validation
82
83
  last_tool_call: dict[str, Any] = {} # Stores the last tool call information
83
84
  total_tokens: int = 0 # Total tokens in the conversation
84
85
  current_iteration: int = 0
@@ -91,6 +92,8 @@ class Agent(BaseModel):
91
92
  _model_name: str = PrivateAttr(default="")
92
93
  chat_system_prompt: str # Base persona prompt for chat mode
93
94
  tool_mode: Optional[str] = None # Tool or toolset to prioritize in chat mode
95
+ tracked_files: list[str] = [] # List to track files created or modified during execution
96
+ agent_mode: str = "react" # Default mode is ReAct
94
97
 
95
98
  def __init__(
96
99
  self,
@@ -98,7 +101,7 @@ class Agent(BaseModel):
98
101
  memory: AgentMemory = AgentMemory(),
99
102
  variable_store: VariableMemory = VariableMemory(),
100
103
  tools: list[Tool] = [TaskCompleteTool()],
101
- ask_for_user_validation: Callable[[str, str], Awaitable[bool]] = console_ask_for_user_validation,
104
+ ask_for_user_validation: Callable[[str, str], bool] = console_ask_for_user_validation,
102
105
  task_to_solve: str = "",
103
106
  specific_expertise: str = "General AI assistant with coding and problem-solving capabilities",
104
107
  get_environment: Callable[[], str] = get_environment,
@@ -107,6 +110,7 @@ class Agent(BaseModel):
107
110
  event_emitter: EventEmitter | None = None,
108
111
  chat_system_prompt: str | None = None,
109
112
  tool_mode: Optional[str] = None,
113
+ agent_mode: str = "react",
110
114
  ):
111
115
  """Initialize the agent with model, memory, tools, and configurations.
112
116
 
@@ -124,6 +128,7 @@ class Agent(BaseModel):
124
128
  event_emitter: EventEmitter instance for event handling
125
129
  chat_system_prompt: Optional base system prompt for chat mode persona
126
130
  tool_mode: Optional tool or toolset to prioritize in chat mode
131
+ agent_mode: Mode to use ("react" or "chat")
127
132
  """
128
133
  try:
129
134
  logger.debug("Initializing agent...")
@@ -140,8 +145,9 @@ class Agent(BaseModel):
140
145
  tools_markdown = tool_manager.to_markdown()
141
146
  logger.debug(f"Tools Markdown: {tools_markdown}")
142
147
 
148
+ logger.info(f"Agent mode: {agent_mode}")
143
149
  system_prompt_text = system_prompt(
144
- tools=tools_markdown, environment=environment, expertise=specific_expertise
150
+ tools=tools_markdown, environment=environment, expertise=specific_expertise, agent_mode=agent_mode
145
151
  )
146
152
  logger.debug(f"System prompt: {system_prompt_text}")
147
153
 
@@ -151,7 +157,7 @@ class Agent(BaseModel):
151
157
  system_prompt=system_prompt_text,
152
158
  )
153
159
 
154
- chat_system_prompt = chat_system_prompt or (
160
+ chat_system_prompt = chat_system_prompt or specific_expertise or (
155
161
  "You are a friendly, helpful AI assistant. Engage in natural conversation, "
156
162
  "answer questions, and use tools when explicitly requested or when they enhance your response."
157
163
  )
@@ -178,6 +184,7 @@ class Agent(BaseModel):
178
184
  max_tokens_working_memory=max_tokens_working_memory,
179
185
  chat_system_prompt=chat_system_prompt,
180
186
  tool_mode=tool_mode,
187
+ agent_mode=agent_mode,
181
188
  )
182
189
 
183
190
  self._model_name = model_name
@@ -301,7 +308,11 @@ class Agent(BaseModel):
301
308
  current_prompt = result.next_prompt
302
309
 
303
310
  if result.executed_tool == "task_complete":
304
- self._emit_event("task_complete", {"response": result.answer})
311
+ self._emit_event("task_complete", {
312
+ "response": result.answer,
313
+ "message": "Task execution completed",
314
+ "tracked_files": self.tracked_files if self.tracked_files else []
315
+ })
305
316
  answer = result.answer or ""
306
317
  done = True
307
318
 
@@ -316,7 +327,12 @@ class Agent(BaseModel):
316
327
  answer = f"Error: {str(e)}"
317
328
  done = True
318
329
 
319
- self._emit_event("task_solve_end")
330
+ task_solve_end_data = {
331
+ "result": answer,
332
+ "message": "Task execution completed",
333
+ "tracked_files": self.tracked_files if self.tracked_files else []
334
+ }
335
+ self._emit_event("task_solve_end", task_solve_end_data)
320
336
  return answer
321
337
 
322
338
  def chat(
@@ -374,6 +390,7 @@ class Agent(BaseModel):
374
390
 
375
391
  # Prepare chat system prompt with tool information
376
392
  tools_prompt = self._get_tools_names_prompt()
393
+ logger.debug(tools_prompt)
377
394
  if self.tool_mode:
378
395
  tools_prompt += f"\nPrioritized tool mode: {self.tool_mode}. Prefer tools related to {self.tool_mode} when applicable."
379
396
 
@@ -472,6 +489,170 @@ class Agent(BaseModel):
472
489
  self._emit_event("chat_response", {"response": response_content})
473
490
  return response_content
474
491
 
492
+
493
+ def chat_news_specific(
494
+ self,
495
+ message: str,
496
+ streaming: bool = False,
497
+ clear_memory: bool = False,
498
+ auto_tool_call: bool = True,
499
+ ) -> str:
500
+ """Engage in a conversational chat_news_specific with the user (synchronous version).
501
+
502
+ Ideal for synchronous applications. For asynchronous contexts, use `async_chat_news_specific`.
503
+
504
+ Args:
505
+ message: The user's input message
506
+ streaming: Whether to stream the response
507
+ clear_memory: Whether to clear memory before starting
508
+ auto_tool_call: Whether to automatically execute detected tool calls and interpret results
509
+
510
+ Returns:
511
+ The assistant's response
512
+ """
513
+ logger.debug(f"chat_news_specificting synchronously with message: {message}, auto_tool_call: {auto_tool_call}")
514
+ try:
515
+ loop = asyncio.get_event_loop()
516
+ except RuntimeError:
517
+ loop = asyncio.new_event_loop()
518
+ asyncio.set_event_loop(loop)
519
+
520
+ return loop.run_until_complete(self.async_chat_news_specific(message, streaming, clear_memory, auto_tool_call))
521
+
522
+ async def async_chat_news_specific(
523
+ self,
524
+ message: str,
525
+ streaming: bool = False,
526
+ clear_memory: bool = False,
527
+ auto_tool_call: bool = True,
528
+ ) -> str:
529
+ """Engage in a conversational chat with the user (asynchronous version).
530
+
531
+ Ideal for asynchronous applications. For synchronous contexts, use `chat`.
532
+
533
+ Args:
534
+ message: The user's input message
535
+ streaming: Whether to stream the response
536
+ clear_memory: Whether to clear memory before starting
537
+ auto_tool_call: Whether to automatically execute detected tool calls and interpret results
538
+
539
+ Returns:
540
+ The assistant's response
541
+ """
542
+ logger.debug(f"Chatting asynchronously with message: {message}, auto_tool_call: {auto_tool_call}")
543
+ if clear_memory:
544
+ self.clear_memory()
545
+
546
+ # Prepare chat system prompt with tool information
547
+ tools_prompt = self._get_tools_names_prompt()
548
+ logger.debug(tools_prompt)
549
+ if self.tool_mode:
550
+ tools_prompt += f"\nPrioritized tool mode: {self.tool_mode}. Prefer tools related to {self.tool_mode} when applicable."
551
+
552
+ full_chat_prompt = self._render_template(
553
+ 'chat_system_prompt.j2',
554
+ persona=self.chat_system_prompt,
555
+ tools_prompt=tools_prompt
556
+ )
557
+
558
+ if not self.memory.memory or self.memory.memory[0].role != "system":
559
+ self.memory.add(Message(role="system", content=full_chat_prompt))
560
+
561
+ self._emit_event("chat_start", {"message": message})
562
+
563
+ # Add user message to memory
564
+ self.memory.add(Message(role="user", content=message))
565
+ self._update_total_tokens(self.memory.memory, "")
566
+
567
+ # Iterative tool usage with auto-execution
568
+ current_prompt = message
569
+ response_content = ""
570
+ max_tool_iterations = 5 # Prevent infinite tool loops
571
+ tool_iteration = 0
572
+
573
+ while tool_iteration < max_tool_iterations:
574
+ try:
575
+ if streaming:
576
+ content = ""
577
+ # When streaming is enabled, the GenerativeModel._async_stream_response method
578
+ # already emits the stream_chunk events, so we don't need to emit them again here
579
+ async_stream = await self.model.async_generate_with_history(
580
+ messages_history=self.memory.memory,
581
+ prompt=current_prompt,
582
+ streaming=True,
583
+ )
584
+ # Just collect the chunks without re-emitting events
585
+ async for chunk in async_stream:
586
+ content += chunk
587
+ response = ResponseStats(
588
+ response=content,
589
+ usage=TokenUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0),
590
+ model=self.model.model,
591
+ finish_reason="stop",
592
+ )
593
+ else:
594
+ response = await self.model.async_generate_with_history(
595
+ messages_history=self.memory.memory,
596
+ prompt=current_prompt,
597
+ streaming=False,
598
+ )
599
+ content = response.response
600
+
601
+ self.total_tokens = response.usage.total_tokens if not streaming else self.total_tokens
602
+
603
+ # Observe response for tool calls
604
+ observation = await self._async_observe_response(content)
605
+ if observation.executed_tool and auto_tool_call:
606
+ print("observation.executed_tool : ", observation.executed_tool)
607
+ # If any news tool is used, return immediately
608
+ if "googlenews" in observation.executed_tool.lower() or \
609
+ "duckduckgo" in observation.executed_tool.lower() or \
610
+ "duckduckgosearch" in observation.executed_tool.lower():
611
+ self._emit_event("chat_response", {"response": observation.next_prompt})
612
+ return observation.next_prompt
613
+ # Tool was executed; process result and continue
614
+ current_prompt = observation.next_prompt
615
+
616
+ # In chat mode, format the response with clear tool call visualization
617
+ if not self.task_to_solve.strip(): # We're in chat mode
618
+ # Format the response to clearly show the tool call and result
619
+ # Use a format that task_runner.py can parse and display nicely
620
+
621
+ # For a cleaner look, insert a special delimiter that task_runner.py can recognize
622
+ # to separate tool call from result
623
+ response_content = f"{content}\n\n__TOOL_RESULT_SEPARATOR__{observation.executed_tool}__\n{observation.next_prompt}"
624
+ else:
625
+ # In task mode, keep the original behavior
626
+ response_content = observation.next_prompt
627
+
628
+ tool_iteration += 1
629
+ self.memory.add(Message(role="assistant", content=content)) # Original tool call
630
+ self.memory.add(Message(role="user", content=observation.next_prompt)) # Tool result
631
+ logger.debug(f"Tool executed: {observation.executed_tool}, iteration: {tool_iteration}")
632
+ elif not observation.executed_tool and "<action>" in content and auto_tool_call:
633
+ # Detected malformed tool call attempt; provide feedback and exit loop
634
+ response_content = (
635
+ f"{content}\n\n⚠️ Error: Invalid tool call format detected. "
636
+ "Please use the exact XML structure as specified in the system prompt:\n"
637
+ "```xml\n<action>\n<tool_name>\n <parameter_name>value</parameter_name>\n</tool_name>\n</action>\n```"
638
+ )
639
+ break
640
+ else:
641
+ # No tool executed or auto_tool_call is False; final response
642
+ response_content = content
643
+ break
644
+
645
+ except Exception as e:
646
+ logger.error(f"Error during async chat: {str(e)}")
647
+ response_content = f"Error: {str(e)}"
648
+ break
649
+
650
+ self._update_session_memory(message, response_content)
651
+ self._emit_event("chat_response", {"response": response_content})
652
+ return response_content
653
+
654
+
655
+
475
656
  def _observe_response(self, content: str, iteration: int = 1) -> ObserveResponseResult:
476
657
  """Analyze the assistant's response and determine next steps (synchronous wrapper).
477
658
 
@@ -535,11 +716,16 @@ class Agent(BaseModel):
535
716
  if not executed_tool:
536
717
  return self._handle_tool_execution_failure(response)
537
718
 
719
+ # Track files when write_file_tool or writefile is used
720
+ if (tool_name in ["write_file_tool", "writefile", "edit_whole_content", "replace_in_file", "replaceinfile", "EditWholeContent"]) and "file_path" in arguments_with_values:
721
+ self._track_file(arguments_with_values["file_path"], tool_name)
722
+
538
723
  variable_name = self.variable_store.add(response)
539
724
  new_prompt = self._format_observation_response(response, executed_tool, variable_name, iteration)
540
725
 
541
726
  # In chat mode, don't set answer; in task mode, set answer only for task_complete
542
727
  is_task_complete_answer = executed_tool == "task_complete" and not is_chat_mode
728
+
543
729
  return ObserveResponseResult(
544
730
  next_prompt=new_prompt,
545
731
  executed_tool=executed_tool,
@@ -586,12 +772,12 @@ class Agent(BaseModel):
586
772
  logger.info(f"Tool '{tool_name}' requires validation.")
587
773
  validation_id = str(uuid.uuid4())
588
774
  logger.info(f"Validation ID: {validation_id}")
589
-
775
+
590
776
  self._emit_event(
591
777
  "tool_execute_validation_start",
592
778
  {
593
779
  "validation_id": validation_id,
594
- "tool_name": tool_name,
780
+ "tool_name": tool_name,
595
781
  "arguments": arguments_with_values
596
782
  },
597
783
  )
@@ -602,7 +788,7 @@ class Agent(BaseModel):
602
788
  + "\n".join([f" <{key}>{value}</{key}>" for key, value in arguments_with_values.items()])
603
789
  + "\n</arguments>\nYes or No"
604
790
  )
605
- permission_granted = await self.ask_for_user_validation(validation_id=validation_id, question=question_validation)
791
+ permission_granted = await self.ask_for_user_validation(validation_id, question_validation)
606
792
 
607
793
  self._emit_event(
608
794
  "tool_execute_validation_end",
@@ -639,13 +825,14 @@ class Agent(BaseModel):
639
825
  response = tool.execute(**converted_args)
640
826
 
641
827
  # Post-process tool response if needed
642
- response = self._post_process_tool_response(tool_name, response)
828
+ if (tool.need_post_process):
829
+ response = self._post_process_tool_response(tool_name, response)
643
830
 
644
831
  executed_tool = tool.name
645
832
  except Exception as e:
646
833
  response = f"Error executing tool: {tool_name}: {str(e)}\n"
647
834
  executed_tool = ""
648
-
835
+
649
836
  self._emit_event(
650
837
  "tool_execution_end", {"tool_name": tool_name, "arguments": arguments_with_values, "response": response}
651
838
  )
@@ -1311,21 +1498,45 @@ class Agent(BaseModel):
1311
1498
 
1312
1499
  required = "(required)" if param_info.get("required", False) else "(optional)"
1313
1500
  default = f" default: {param_info['default']}" if "default" in param_info else ""
1314
- param_desc = f"{param_name} {required}{default}"
1501
+ param_type = param_info.get("type", "string")
1502
+ param_desc = f"{param_name} ({param_type}) {required}{default}"
1315
1503
  params.append(param_desc)
1316
1504
  except Exception as e:
1317
1505
  logger.debug(f"Error parsing schema for {tool_name}: {str(e)}")
1318
-
1319
- # Special case for duckduckgo_tool
1320
- if tool_name == "duckduckgo_tool" and not any(p.startswith("max_results ") for p in params):
1321
- params.append("max_results (required) default: 5")
1322
-
1323
- # Special case for other search tools that might need max_results
1324
- if "search" in tool_name.lower() and not any(p.startswith("max_results ") for p in params):
1325
- params.append("max_results (optional) default: 5")
1326
-
1327
- param_str = ", ".join(params) if params else "No parameters required"
1328
- tool_descriptions.append(f"{tool_name}: {param_str}")
1506
+
1507
+ # Enhanced tool-specific parameter descriptions
1508
+ if tool_name == "googlenews":
1509
+ params = [
1510
+ "query (string, required) - The search query string",
1511
+ "language (string, optional) default: en - Language code (e.g., en, fr, es)",
1512
+ "period (string, optional) default: 7d - Time period (1d, 7d, 30d)",
1513
+ "max_results (integer, required) default: 5 - Number of results to return",
1514
+ "country (string, optional) default: US - Country code (e.g., US, GB, FR)",
1515
+ "sort_by (string, optional) default: relevance - Sort by (relevance, date)",
1516
+ "analyze (boolean, optional) default: False - Whether to analyze results"
1517
+ ]
1518
+ elif tool_name == "duckduckgosearch":
1519
+ params = [
1520
+ "query (string, required) - The search query string",
1521
+ "max_results (integer, required) default: 5 - Number of results to return",
1522
+ "time_period (string, optional) default: d - Time period (d: day, w: week, m: month)",
1523
+ "region (string, optional) default: wt-wt - Region code for search results"
1524
+ ]
1525
+ elif tool_name == "llm":
1526
+ params = [
1527
+ "system_prompt (string, required) - The persona or system prompt to guide the language model's behavior",
1528
+ "prompt (string, required) - The question to ask the language model. Supports interpolation with $var$ syntax",
1529
+ "temperature (float, required) default: 0.5 - Sampling temperature between 0.0 (no creativity) and 1.0 (full creativity)"
1530
+ ]
1531
+ elif tool_name == "task_complete":
1532
+ params = [
1533
+ "answer (string, required) - Your final answer or response to complete the task"
1534
+ ]
1535
+ elif "search" in tool_name.lower() and not params:
1536
+ params.append("max_results (integer, optional) default: 5 - Number of results to return")
1537
+
1538
+ param_str = "\n - ".join(params) if params else "No parameters required"
1539
+ tool_descriptions.append(f"{tool_name}:\n - {param_str}")
1329
1540
  except Exception as e:
1330
1541
  logger.debug(f"Error processing tool {tool_name}: {str(e)}")
1331
1542
  # Still include the tool in the list, but with minimal info
@@ -1468,4 +1679,37 @@ class Agent(BaseModel):
1468
1679
  return template.render(**kwargs)
1469
1680
  except Exception as e:
1470
1681
  logger.error(f"Error rendering template {template_name}: {str(e)}")
1471
- raise
1682
+ raise
1683
+
1684
+ def _track_file(self, file_path: str, tool_name: str) -> None:
1685
+ """Track files created or modified by tools.
1686
+
1687
+ Args:
1688
+ file_path: Path to the file to track
1689
+ tool_name: Name of the tool that created/modified the file
1690
+ """
1691
+ try:
1692
+ # Handle /tmp directory for write tools
1693
+ if tool_name in ["write_file_tool", "writefile", "edit_whole_content", "replace_in_file", "replaceinfile", "EditWholeContent"]:
1694
+ if not file_path.startswith("/tmp/"):
1695
+ file_path = os.path.join("/tmp", file_path.lstrip("/"))
1696
+
1697
+ # For other tools, ensure we have absolute path
1698
+ elif not os.path.isabs(file_path):
1699
+ file_path = os.path.abspath(os.path.join(os.getcwd(), file_path))
1700
+
1701
+ # Resolve any . or .. in the path
1702
+ tracked_path = os.path.realpath(file_path)
1703
+
1704
+ # For write tools, ensure path is in /tmp
1705
+ if tool_name in ["write_file_tool", "writefile"] and not tracked_path.startswith("/tmp/"):
1706
+ logger.warning(f"Attempted to track file outside /tmp: {tracked_path}")
1707
+ return
1708
+
1709
+ # Add to tracked files if not already present
1710
+ if tracked_path not in self.tracked_files:
1711
+ self.tracked_files.append(tracked_path)
1712
+ logger.debug(f"Added {tracked_path} to tracked files")
1713
+
1714
+ except Exception as e:
1715
+ logger.error(f"Error tracking file {file_path}: {str(e)}")
@@ -10,11 +10,15 @@ from quantalogic.console_print_token import console_print_token
10
10
  from quantalogic.event_emitter import EventEmitter
11
11
  from quantalogic.tools.tool import Tool
12
12
 
13
- # Configure loguru to output debug messages
13
+ # Configure loguru to output only INFO and above
14
14
  logger.remove() # Remove default handler
15
- logger.add(sink=lambda msg: print(msg, end=""), level="DEBUG") # Add a new handler that prints to console
16
-
15
+ logger.add(
16
+ sink=lambda msg: print(msg, end=""),
17
+ level="INFO",
18
+ format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>"
19
+ )
17
20
 
21
+ # Helper function to import tool classes
18
22
  def _import_tool(module_path: str, class_name: str) -> Type[Tool]:
19
23
  """
20
24
  Import a tool class from a module path using standard Python imports.
@@ -78,6 +82,7 @@ TOOL_IMPORTS = {
78
82
  # File Tools
79
83
  "download_http_file": lambda: _import_tool("quantalogic.tools.utilities", "PrepareDownloadTool"),
80
84
  "write_file": lambda: _import_tool("quantalogic.tools.write_file_tool", "WriteFileTool"),
85
+ "file_tracker": lambda: _import_tool("quantalogic.tools.file_tracker_tool", "FileTrackerTool"),
81
86
  "edit_whole_content": lambda: _import_tool("quantalogic.tools", "EditWholeContentTool"),
82
87
  "read_file_block": lambda: _import_tool("quantalogic.tools", "ReadFileBlockTool"),
83
88
  "read_file": lambda: _import_tool("quantalogic.tools", "ReadFileTool"),
@@ -128,14 +133,15 @@ TOOL_IMPORTS = {
128
133
  # Product Hunt Tools
129
134
  "product_hunt_tool": lambda: _import_tool("quantalogic.tools.product_hunt", "ProductHuntTool"),
130
135
 
131
- # RAG Tools
132
- "rag_tool": lambda: _import_tool("quantalogic.tools.rag_tool", "RagTool"),
136
+ # RAG Tools
137
+ "rag_tool_hf": lambda: _import_tool("quantalogic.tools.rag_tool", "RagToolHf"),
133
138
 
134
139
  # Utility Tools
135
140
  "task_complete": lambda: _import_tool("quantalogic.tools.task_complete_tool", "TaskCompleteTool"),
136
141
  "input_question": lambda: _import_tool("quantalogic.tools.utilities", "InputQuestionTool"),
137
142
  "markitdown": lambda: _import_tool("quantalogic.tools.utilities", "MarkitdownTool"),
138
- "read_html": lambda: _import_tool("quantalogic.tools.utilities", "ReadHTMLTool"),
143
+ "read_html": lambda: _import_tool("quantalogic.tools.read_html_tool", "ReadHTMLTool"),
144
+ "oriented_llm_tool": lambda: _import_tool("quantalogic.tools.utilities", "OrientedLLMTool"),
139
145
  "presentation_llm": lambda: _import_tool("quantalogic.tools.presentation_tools", "PresentationLLMTool"),
140
146
  "sequence": lambda: _import_tool("quantalogic.tools.utilities", "SequenceTool"),
141
147
  "csv_processor": lambda: _import_tool("quantalogic.tools.utilities", "CSVProcessorTool"),
@@ -152,7 +158,8 @@ def create_custom_agent(
152
158
  max_tokens_working_memory: Optional[int] = None,
153
159
  specific_expertise: str = "",
154
160
  tools: Optional[list[dict[str, Any]]] = None,
155
- memory: Optional[AgentMemory] = None
161
+ memory: Optional[AgentMemory] = None,
162
+ agent_mode: str = "react"
156
163
  ) -> Agent:
157
164
  """Create an agent with lazy-loaded tools and graceful error handling.
158
165
 
@@ -169,6 +176,8 @@ def create_custom_agent(
169
176
  Returns:
170
177
  Agent: Configured agent instance
171
178
  """
179
+ logger.info("Creating custom agent with model: {}".format(model_name))
180
+ logger.info("tools: {}".format(tools))
172
181
  # Create storage directory for RAG
173
182
  storage_dir = os.path.join(os.path.dirname(__file__), "storage", "rag")
174
183
  os.makedirs(storage_dir, exist_ok=True)
@@ -188,8 +197,9 @@ def create_custom_agent(
188
197
  tool_configs = {
189
198
  # LLM Tools with shared parameters
190
199
  "llm": lambda params: create_tool_instance(TOOL_IMPORTS["llm"](), **get_llm_params(params)),
200
+ "oriented_llm_tool": lambda params: create_tool_instance(TOOL_IMPORTS["oriented_llm_tool"](), **get_llm_params(params)),
191
201
  "llm_vision": lambda params: create_tool_instance(TOOL_IMPORTS["llm_vision"](),
192
- model_name=params.get("vision_model_name") or vision_model_name,
202
+ model_name=params.get("vision_model_name") or "gpt-4-vision",
193
203
  on_token=console_print_token if not no_stream else None,
194
204
  event_emitter=event_emitter
195
205
  ) if vision_model_name else None,
@@ -203,6 +213,7 @@ def create_custom_agent(
203
213
  "download_http_file": lambda _: create_tool_instance(TOOL_IMPORTS["download_http_file"]()),
204
214
  "duck_duck_go_search": lambda _: create_tool_instance(TOOL_IMPORTS["duck_duck_go_search"]()),
205
215
  "write_file": lambda _: create_tool_instance(TOOL_IMPORTS["write_file"]()),
216
+ "file_tracker": lambda _: create_tool_instance(TOOL_IMPORTS["file_tracker"]()),
206
217
  "task_complete": lambda _: create_tool_instance(TOOL_IMPORTS["task_complete"]()),
207
218
  "edit_whole_content": lambda _: create_tool_instance(TOOL_IMPORTS["edit_whole_content"]()),
208
219
  "execute_bash_command": lambda _: create_tool_instance(TOOL_IMPORTS["execute_bash_command"]()),
@@ -296,11 +307,12 @@ def create_custom_agent(
296
307
  "nasa_apod_tool": lambda _: create_tool_instance(TOOL_IMPORTS["nasa_apod_tool"]()),
297
308
  "product_hunt_tool": lambda _: create_tool_instance(TOOL_IMPORTS["product_hunt_tool"]()),
298
309
 
299
- # RAG tool
300
- "rag_tool": lambda params: create_tool_instance(TOOL_IMPORTS["rag_tool"](),
301
- vector_store=params.get("vector_store", "chroma"),
302
- embedding_model=params.get("embedding_model", "openai"),
303
- persist_dir=storage_dir,
310
+ # Multilingual RAG tool
311
+ "rag_tool_hf": lambda params: create_tool_instance(TOOL_IMPORTS["rag_tool_hf"](),
312
+ persist_dir=params.get("persist_dir", "./storage/multilingual_rag"),
313
+ use_ocr_for_pdfs=params.get("use_ocr_for_pdfs", False),
314
+ ocr_model=params.get("ocr_model", "openai/gpt-4o-mini"),
315
+ embed_model=params.get("embed_model", "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"),
304
316
  document_paths=params.get("document_paths", [])
305
317
  ),
306
318
 
@@ -357,26 +369,6 @@ def create_custom_agent(
357
369
  else:
358
370
  logger.warning(f"Unknown tool type: {tool_type} - Skipping")
359
371
 
360
- # Add download tool if any write tool is present
361
- if has_write_tool:
362
- try:
363
- # Get the tool class first
364
- download_tool_class = TOOL_IMPORTS["download_file_tool"]()
365
- if download_tool_class:
366
- # Create an instance with the name 'download'
367
- download_tool = create_tool_instance(download_tool_class, name="download")
368
- if download_tool:
369
- agent_tools.append(download_tool)
370
- logger.info("Added download tool automatically due to write tool presence")
371
- else:
372
- logger.warning("Failed to instantiate download tool")
373
- else:
374
- logger.warning("Download tool class not found")
375
- except ImportError as e:
376
- logger.warning(f"Failed to load download tool: Required library missing - {str(e)}")
377
- except Exception as e:
378
- logger.error(f"Failed to add download tool: {str(e)}")
379
-
380
372
 
381
373
  # Create and return the agent
382
374
  try:
@@ -388,52 +380,8 @@ def create_custom_agent(
388
380
  max_tokens_working_memory=max_tokens_working_memory,
389
381
  specific_expertise=specific_expertise,
390
382
  memory=memory if memory else AgentMemory(),
383
+ agent_mode=agent_mode
391
384
  )
392
385
  except Exception as e:
393
386
  logger.error(f"Failed to create agent: {str(e)}")
394
387
  raise
395
-
396
- if __name__ == "__main__":
397
- # Example usage
398
- tools_config = [
399
- {"type": "duck_duck_go_search", "parameters": {}},
400
- ]
401
-
402
- agent = create_custom_agent(
403
- model_name="openrouter/openai/gpt-4o-mini",
404
- specific_expertise="General purpose assistant",
405
- tools=tools_config
406
- )
407
- print(f"Created agent with {len(agent.tools.tool_names())} tools")
408
-
409
- # Display all tool names
410
- print("Agent Tools:")
411
- for tool_name in agent.tools.tool_names():
412
- print(f"- {tool_name}")
413
-
414
- # Set up event monitoring to track agent's lifecycle
415
- # The event system provides:
416
- # 1. Real-time observability into the agent's operations
417
- # 2. Debugging and performance monitoring
418
- # 3. Support for future analytics and optimization efforts
419
- agent.event_emitter.on(
420
- event=[
421
- "task_complete",
422
- "task_think_start",
423
- "task_think_end",
424
- "tool_execution_start",
425
- "tool_execution_end",
426
- "error_max_iterations_reached",
427
- "memory_full",
428
- "memory_compacted",
429
- "memory_summary",
430
- ],
431
- listener=console_print_events,
432
- )
433
-
434
- # Enable token streaming for detailed output
435
- agent.event_emitter.on(event=["stream_chunk"], listener=console_print_token)
436
-
437
- # Solve task with streaming enabled
438
- result = agent.solve_task("Who is the Prime Minister of France in 2025 ?", max_iterations=10, streaming=True)
439
- print(result)
@@ -22,13 +22,17 @@ Tools must be called using the following XML format, with NO additional text:
22
22
 
23
23
  #### Examples of Correct Tool Usage
24
24
 
25
- **Example 1 - Search Tool:**
25
+ **Example 1 - Duck Duck Go search tool:**
26
26
  ```xml
27
27
  <action>
28
- <duckduckgo_tool>
29
- <query>search query text</query>
30
- <max_results>5</max_results> <!-- Always include max_results for search -->
31
- </duckduckgo_tool>
28
+ <duckduckgosearch>
29
+ <query>machine learning</query>
30
+ <max_results>20</max_results>
31
+ <search_type>images</search_type>
32
+ <region>us-en</region>
33
+ <safesearch>moderate</safesearch>
34
+ <timelimit>d</timelimit>
35
+ </duckduckgosearch>
32
36
  </action>
33
37
  ```
34
38
 
@@ -50,5 +54,4 @@ Tools must be called using the following XML format, with NO additional text:
50
54
  **Tools Available**: None
51
55
  {% endif %}
52
56
 
53
- Don't invent tools that are not listed.
54
-
57
+ Don't invent tools that are not listed.