quantalogic 0.2.15__py3-none-any.whl → 0.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,9 +7,12 @@ from litellm import completion, exceptions, get_max_tokens, get_model_info, toke
7
7
  from loguru import logger
8
8
  from pydantic import BaseModel, Field, field_validator
9
9
 
10
+ from quantalogic.event_emitter import EventEmitter # Importing the EventEmitter class
11
+
10
12
  MIN_RETRIES = 1
11
13
 
12
14
 
15
+ # Define the Message class for conversation handling
13
16
  class Message(BaseModel):
14
17
  """Represents a message in a conversation with a specific role and content."""
15
18
 
@@ -70,21 +73,22 @@ class GenerativeModel:
70
73
  self,
71
74
  model: str = "ollama/qwen2.5-coder:14b",
72
75
  temperature: float = 0.7,
76
+ event_emitter: EventEmitter = None, # EventEmitter instance
73
77
  ) -> None:
74
78
  """Initialize a generative model with configurable parameters.
75
79
 
76
- Configure the generative model with specified model,
77
- temperature, and maximum token settings.
78
-
79
80
  Args:
80
- model: Model identifier.
81
- Defaults to "ollama/qwen2.5-coder:14b".
82
- temperature: Sampling temperature between 0 and 1.
83
- Defaults to 0.7.
81
+ model: Model identifier. Defaults to "ollama/qwen2.5-coder:14b".
82
+ temperature: Temperature parameter for controlling randomness in generation.
83
+ Higher values (e.g. 0.8) make output more random, lower values (e.g. 0.2)
84
+ make it more deterministic. Defaults to 0.7.
85
+ event_emitter: Optional event emitter instance for handling asynchronous events
86
+ and callbacks during text generation. Defaults to None.
84
87
  """
85
88
  logger.debug(f"Initializing GenerativeModel with model={model}, temperature={temperature}")
86
89
  self.model = model
87
90
  self.temperature = temperature
91
+ self.event_emitter = event_emitter or EventEmitter() # Initialize event emitter
88
92
  self._get_model_info_cached = functools.lru_cache(maxsize=32)(self._get_model_info_impl)
89
93
 
90
94
  # Define retriable exceptions based on LiteLLM's exception mapping
@@ -109,28 +113,20 @@ class GenerativeModel:
109
113
  exceptions.PermissionDeniedError,
110
114
  )
111
115
 
112
- # Retry on specific retriable exceptions
116
+ # Generate a response with conversation history and optional streaming
113
117
  def generate_with_history(
114
- self, messages_history: list[Message], prompt: str, image_url: str | None = None
118
+ self, messages_history: list[Message], prompt: str, image_url: str | None = None, streaming: bool = False
115
119
  ) -> ResponseStats:
116
120
  """Generate a response with conversation history and optional image.
117
121
 
118
- Generates a response based on previous conversation messages,
119
- a new user prompt, and an optional image URL.
120
-
121
122
  Args:
122
123
  messages_history: Previous conversation messages.
123
124
  prompt: Current user prompt.
124
125
  image_url: Optional image URL for visual queries.
126
+ streaming: Whether to stream the response.
125
127
 
126
128
  Returns:
127
- Detailed response statistics.
128
-
129
- Raises:
130
- openai.AuthenticationError: If authentication fails.
131
- openai.InvalidRequestError: If the request is invalid (e.g., context length exceeded).
132
- openai.APIError: For content policy violations or other API errors.
133
- Exception: For other unexpected errors.
129
+ Detailed response statistics or a generator in streaming mode.
134
130
  """
135
131
  messages = [{"role": msg.role, "content": str(msg.content)} for msg in messages_history]
136
132
 
@@ -147,6 +143,10 @@ class GenerativeModel:
147
143
  else:
148
144
  messages.append({"role": "user", "content": str(prompt)})
149
145
 
146
+ if streaming:
147
+ self.event_emitter.emit("stream_start") # Emit stream start event
148
+ return self._stream_response(messages) # Return generator
149
+
150
150
  try:
151
151
  logger.debug(f"Generating response for prompt: {prompt}")
152
152
 
@@ -171,54 +171,68 @@ class GenerativeModel:
171
171
  )
172
172
 
173
173
  except Exception as e:
174
- error_details = {
175
- "error_type": type(e).__name__,
176
- "message": str(e),
177
- "model": self.model,
178
- "provider": getattr(e, "llm_provider", "unknown"),
179
- "status_code": getattr(e, "status_code", None),
180
- }
181
-
182
- logger.error("LLM Generation Error: {}", error_details)
183
- logger.debug(f"Error details: {error_details}")
184
- logger.debug(f"Model: {self.model}, Temperature: {self.temperature}")
185
-
186
- # Handle authentication and permission errors
187
- if isinstance(e, self.AUTH_EXCEPTIONS):
188
- logger.debug("Authentication error occurred")
189
- raise openai.AuthenticationError(
190
- f"Authentication failed with provider {error_details['provider']}"
191
- ) from e
192
-
193
- # Handle context window errors
194
- if isinstance(e, self.CONTEXT_EXCEPTIONS):
195
- raise openai.InvalidRequestError(f"Context window exceeded or invalid request: {str(e)}") from e
196
-
197
- # Handle content policy violations
198
- if isinstance(e, self.POLICY_EXCEPTIONS):
199
- raise openai.APIError(f"Content policy violation: {str(e)}") from e
200
-
201
- # For other exceptions, preserve the original error type if it's from OpenAI
202
- if isinstance(e, openai.OpenAIError):
203
- raise
204
-
205
- # Wrap unknown errors in APIError
206
- raise openai.APIError(f"Unexpected error during generation: {str(e)}") from e
207
-
208
- def generate(self, prompt: str, image_url: str | None = None) -> ResponseStats:
209
- """Generate a response without conversation history.
174
+ self._handle_generation_exception(e)
175
+
176
+ def _stream_response(self, messages):
177
+ """Private method to handle streaming responses."""
178
+ try:
179
+ for chunk in completion(
180
+ temperature=self.temperature,
181
+ model=self.model,
182
+ messages=messages,
183
+ num_retries=MIN_RETRIES,
184
+ stream=True, # Enable streaming
185
+ ):
186
+ if chunk.choices[0].delta.content is not None:
187
+ self.event_emitter.emit("stream_chunk", chunk.choices[0].delta.content)
188
+ yield chunk.choices[0].delta.content # Yield each chunk of content
210
189
 
211
- Generates a response for a single user prompt without
212
- any previous conversation context.
190
+ self.event_emitter.emit("stream_end") # Emit stream end event
191
+ except Exception as e:
192
+ logger.error(f"Streaming error: {str(e)}")
193
+ raise
194
+
195
+ def generate(self, prompt: str, image_url: str | None = None, streaming: bool = False) -> ResponseStats:
196
+ """Generate a response without conversation history.
213
197
 
214
198
  Args:
215
199
  prompt: User prompt.
216
200
  image_url: Optional image URL for visual queries.
201
+ streaming: Whether to stream the response.
217
202
 
218
203
  Returns:
219
- Detailed response statistics.
204
+ Detailed response statistics or a generator in streaming mode.
220
205
  """
221
- return self.generate_with_history([], prompt, image_url)
206
+ return self.generate_with_history([], prompt, image_url, streaming)
207
+
208
+ def _handle_generation_exception(self, e):
209
+ """Handle exceptions during generation."""
210
+ error_details = {
211
+ "error_type": type(e).__name__,
212
+ "message": str(e),
213
+ "model": self.model,
214
+ "provider": getattr(e, "llm_provider", "unknown"),
215
+ "status_code": getattr(e, "status_code", None),
216
+ }
217
+
218
+ logger.error("LLM Generation Error: {}", error_details)
219
+ logger.debug(f"Error details: {error_details}")
220
+ logger.debug(f"Model: {self.model}, Temperature: {self.temperature}")
221
+
222
+ if isinstance(e, self.AUTH_EXCEPTIONS):
223
+ logger.debug("Authentication error occurred")
224
+ raise openai.AuthenticationError(f"Authentication failed with provider {error_details['provider']}") from e
225
+
226
+ if isinstance(e, self.CONTEXT_EXCEPTIONS):
227
+ raise openai.InvalidRequestError(f"Context window exceeded or invalid request: {str(e)}") from e
228
+
229
+ if isinstance(e, self.POLICY_EXCEPTIONS):
230
+ raise openai.APIError(f"Content policy violation: {str(e)}") from e
231
+
232
+ if isinstance(e, openai.OpenAIError):
233
+ raise
234
+
235
+ raise openai.APIError(f"Unexpected error during generation: {str(e)}") from e
222
236
 
223
237
  def get_max_tokens(self) -> int:
224
238
  """Get the maximum number of tokens that can be generated by the model."""
@@ -239,17 +253,9 @@ class GenerativeModel:
239
253
  return token_counter(model=self.model, messages=litellm_messages)
240
254
 
241
255
  def _get_model_info_impl(self, model_name: str) -> dict:
242
- """Get information about the model with prefix fallback logic.
243
-
244
- Attempts to find model info by progressively removing provider prefixes.
245
- Raises ValueError if no valid model configuration is found.
246
- Results are cached to improve performance.
247
-
248
- Example:
249
- openrouter/openai/gpt-4o-mini → openai/gpt-4o-mini → gpt-4o-mini
250
- """
256
+ """Get information about the model with prefix fallback logic."""
251
257
  original_model = model_name
252
-
258
+
253
259
  while True:
254
260
  try:
255
261
  logger.debug(f"Attempting to retrieve model info for: {model_name}")
@@ -259,22 +265,19 @@ class GenerativeModel:
259
265
  return model_info
260
266
  except Exception:
261
267
  pass
262
-
268
+
263
269
  # Try removing one prefix level
264
- parts = model_name.split('/')
270
+ parts = model_name.split("/")
265
271
  if len(parts) <= 1:
266
272
  break
267
- model_name = '/'.join(parts[1:])
268
-
273
+ model_name = "/".join(parts[1:])
274
+
269
275
  error_msg = f"Could not find model info for {original_model} after trying: {self.model} → {model_name}"
270
276
  logger.error(error_msg)
271
277
  raise ValueError(error_msg)
272
278
 
273
279
  def get_model_info(self, model_name: str = None) -> dict:
274
- """Get cached information about the model.
275
-
276
- If no model name is provided, uses the current model.
277
- """
280
+ """Get cached information about the model."""
278
281
  if model_name is None:
279
282
  model_name = self.model
280
283
  return self._get_model_info_cached(model_name)
quantalogic/main.py CHANGED
@@ -10,15 +10,20 @@ from typing import Optional
10
10
  import click
11
11
  from loguru import logger
12
12
 
13
+ from quantalogic.console_print_events import console_print_events
14
+ from quantalogic.console_print_token import console_print_token
13
15
  from quantalogic.utils.check_version import check_if_is_latest_version
14
16
  from quantalogic.version import get_version
15
17
 
16
18
  # Configure logger
17
19
  logger.remove() # Remove default logger
18
20
 
21
+ from threading import Lock # noqa: E402
22
+
19
23
  from rich.console import Console # noqa: E402
20
24
  from rich.panel import Panel # noqa: E402
21
25
  from rich.prompt import Confirm # noqa: E402
26
+ from rich.spinner import Spinner # noqa: E402
22
27
 
23
28
  from quantalogic.agent import Agent # noqa: E402
24
29
 
@@ -31,31 +36,35 @@ from quantalogic.agent_config import ( # noqa: E402
31
36
  create_orchestrator_agent,
32
37
  )
33
38
  from quantalogic.interactive_text_editor import get_multiline_input # noqa: E402
34
- from quantalogic.print_event import console_print_events # noqa: E402
35
- from quantalogic.search_agent import create_search_agent
39
+ from quantalogic.search_agent import create_search_agent # noqa: E402
36
40
 
37
- AGENT_MODES = ["code", "basic", "interpreter", "full", "code-basic","search"]
41
+ AGENT_MODES = ["code", "basic", "interpreter", "full", "code-basic", "search", "search-full"]
38
42
 
39
43
 
40
- def create_agent_for_mode(mode: str, model_name: str, vision_model_name: str | None) -> Agent:
44
+ def create_agent_for_mode(mode: str, model_name: str, vision_model_name: str | None, no_stream: bool = False) -> Agent:
41
45
  """Create an agent based on the specified mode."""
42
46
  logger.debug(f"Creating agent for mode: {mode} with model: {model_name}")
47
+ logger.debug(f"Using vision model: {vision_model_name}")
48
+ logger.debug(f"Using no_stream: {no_stream}")
43
49
  if mode == "code":
44
50
  logger.debug("Creating code agent without basic mode")
45
- return create_coding_agent(model_name, vision_model_name, basic=False)
51
+ return create_coding_agent(model_name, vision_model_name, basic=False, no_stream=no_stream)
46
52
  if mode == "code-basic":
47
- return create_coding_agent(model_name, vision_model_name, basic=True)
53
+ return create_coding_agent(model_name, vision_model_name, basic=True, no_stream=no_stream)
48
54
  elif mode == "basic":
49
- return create_orchestrator_agent(model_name, vision_model_name)
55
+ return create_orchestrator_agent(model_name, vision_model_name, no_stream=no_stream)
50
56
  elif mode == "full":
51
- return create_full_agent(model_name, vision_model_name)
57
+ return create_full_agent(model_name, vision_model_name, no_stream=no_stream)
52
58
  elif mode == "interpreter":
53
- return create_interpreter_agent(model_name, vision_model_name)
59
+ return create_interpreter_agent(model_name, vision_model_name, no_stream=no_stream)
54
60
  elif mode == "search":
55
- return create_search_agent(model_name)
61
+ return create_search_agent(model_name, no_stream=no_stream)
62
+ if mode == "search-full":
63
+ return create_search_agent(model_name, mode_full=True, no_stream=no_stream)
56
64
  else:
57
65
  raise ValueError(f"Unknown agent mode: {mode}")
58
66
 
67
+
59
68
  def check_new_version():
60
69
  # Randomly check for updates (1 in 10 chance)
61
70
  if random.randint(1, 10) == 1:
@@ -81,6 +90,7 @@ def check_new_version():
81
90
  except Exception:
82
91
  return
83
92
 
93
+
84
94
  def configure_logger(log_level: str) -> None:
85
95
  """Configure the logger with the specified log level and format."""
86
96
  logger.remove()
@@ -122,7 +132,30 @@ def get_task_from_file(file_path: str) -> str:
122
132
  raise Exception(f"Unexpected error reading file: {e}")
123
133
 
124
134
 
125
- def display_welcome_message(console: Console, model_name: str, vision_model_name: str | None) -> None:
135
+ # Spinner control
136
+ spinner_lock = Lock()
137
+ current_spinner = None
138
+
139
+ def start_spinner(console: Console) -> None:
140
+ """Start the thinking spinner."""
141
+ global current_spinner
142
+ with spinner_lock:
143
+ if current_spinner is None:
144
+ current_spinner = console.status("[yellow]Thinking...", spinner="dots")
145
+ current_spinner.start()
146
+
147
+ def stop_spinner(console: Console) -> None:
148
+ """Stop the thinking spinner."""
149
+ global current_spinner
150
+ with spinner_lock:
151
+ if current_spinner is not None:
152
+ current_spinner.stop()
153
+ current_spinner = None
154
+
155
+
156
+ def display_welcome_message(
157
+ console: Console, model_name: str, vision_model_name: str | None, max_iterations: int = 50
158
+ ) -> None:
126
159
  """Display the welcome message and instructions."""
127
160
  version = get_version()
128
161
  console.print(
@@ -135,7 +168,8 @@ def display_welcome_message(console: Console, model_name: str, vision_model_name
135
168
  f"[yellow] 🤖 System Info:[/yellow]\n\n"
136
169
  "\n"
137
170
  f"- Model: {model_name}\n"
138
- f"- Vision Model: {vision_model_name}\n\n"
171
+ f"- Vision Model: {vision_model_name}\n"
172
+ f"- Max Iterations: {max_iterations}\n\n"
139
173
  "[bold magenta]💡 Pro Tips:[/bold magenta]\n\n"
140
174
  "- Be as specific as possible in your task description to get the best results!\n"
141
175
  "- Use clear and concise language when describing your task\n"
@@ -152,7 +186,7 @@ def display_welcome_message(console: Console, model_name: str, vision_model_name
152
186
  @click.option(
153
187
  "--model-name",
154
188
  default=MODEL_NAME,
155
- help='Specify the model to use (litellm format, e.g. "openrouter/deepseek-chat").',
189
+ help='Specify the model to use (litellm format, e.g. "openrouter/deepseek/deepseek-chat").',
156
190
  )
157
191
  @click.option(
158
192
  "--log",
@@ -167,6 +201,12 @@ def display_welcome_message(console: Console, model_name: str, vision_model_name
167
201
  default=None,
168
202
  help='Specify the vision model to use (litellm format, e.g. "openrouter/A/gpt-4o-mini").',
169
203
  )
204
+ @click.option(
205
+ "--max-iterations",
206
+ type=int,
207
+ default=30,
208
+ help="Maximum number of iterations for task solving (default: 30).",
209
+ )
170
210
  @click.pass_context
171
211
  def cli(
172
212
  ctx: click.Context,
@@ -176,6 +216,7 @@ def cli(
176
216
  mode: str,
177
217
  log: str,
178
218
  vision_model_name: str | None,
219
+ max_iterations: int,
179
220
  ) -> None:
180
221
  """QuantaLogic AI Assistant - A powerful AI tool for various tasks."""
181
222
  if version:
@@ -184,7 +225,13 @@ def cli(
184
225
  sys.exit(0)
185
226
  if ctx.invoked_subcommand is None:
186
227
  ctx.invoke(
187
- task, model_name=model_name, verbose=verbose, mode=mode, log=log, vision_model_name=vision_model_name
228
+ task,
229
+ model_name=model_name,
230
+ verbose=verbose,
231
+ mode=mode,
232
+ log=log,
233
+ vision_model_name=vision_model_name,
234
+ max_iterations=max_iterations,
188
235
  )
189
236
 
190
237
 
@@ -193,7 +240,7 @@ def cli(
193
240
  @click.option(
194
241
  "--model-name",
195
242
  default=MODEL_NAME,
196
- help='Specify the model to use (litellm format, e.g. "openrouter/deepseek-chat").',
243
+ help='Specify the model to use (litellm format, e.g. "openrouter/deepseek/deepseek-chat").',
197
244
  )
198
245
  @click.option("--verbose", is_flag=True, help="Enable verbose output.")
199
246
  @click.option("--mode", type=click.Choice(AGENT_MODES), default="code", help="Agent mode (code/search/full).")
@@ -208,6 +255,17 @@ def cli(
208
255
  default=None,
209
256
  help='Specify the vision model to use (litellm format, e.g. "openrouter/openai/gpt-4o-mini").',
210
257
  )
258
+ @click.option(
259
+ "--max-iterations",
260
+ type=int,
261
+ default=30,
262
+ help="Maximum number of iterations for task solving (default: 30).",
263
+ )
264
+ @click.option(
265
+ "--no-stream",
266
+ is_flag=True,
267
+ help="Disable streaming output (default: streaming enabled).",
268
+ )
211
269
  @click.argument("task", required=False)
212
270
  def task(
213
271
  file: Optional[str],
@@ -217,12 +275,13 @@ def task(
217
275
  log: str,
218
276
  vision_model_name: str | None,
219
277
  task: Optional[str],
278
+ max_iterations: int,
279
+ no_stream: bool,
220
280
  ) -> None:
221
281
  """Execute a task with the QuantaLogic AI Assistant."""
222
282
  console = Console()
223
283
  switch_verbose(verbose, log)
224
284
 
225
-
226
285
  try:
227
286
  if file:
228
287
  task_content = get_task_from_file(file)
@@ -231,7 +290,7 @@ def task(
231
290
  check_new_version()
232
291
  task_content = task
233
292
  else:
234
- display_welcome_message(console, model_name, vision_model_name)
293
+ display_welcome_message(console, model_name, vision_model_name, max_iterations=max_iterations)
235
294
  check_new_version()
236
295
  logger.debug("Waiting for user input...")
237
296
  task_content = get_multiline_input(console).strip()
@@ -241,14 +300,13 @@ def task(
241
300
  console.print("[yellow]No task provided. Exiting...[/yellow]")
242
301
  sys.exit(2)
243
302
 
244
- if model_name != MODEL_NAME:
245
- console.print(
246
- Panel.fit(
247
- f"[bold]Task to be submitted:[/bold]\n{task_content}",
248
- title="[bold]Task Preview[/bold]",
249
- border_style="blue",
250
- )
303
+ console.print(
304
+ Panel.fit(
305
+ f"[bold]Task to be submitted:[/bold]\n{task_content}",
306
+ title="[bold]Task Preview[/bold]",
307
+ border_style="blue",
251
308
  )
309
+ )
252
310
  if not Confirm.ask("[bold]Are you sure you want to submit this task?[/bold]"):
253
311
  console.print("[yellow]Task submission cancelled. Exiting...[/yellow]")
254
312
  sys.exit(0)
@@ -261,9 +319,13 @@ def task(
261
319
  )
262
320
  )
263
321
 
264
- logger.debug(f"Creating agent for mode: {mode} with model: {model_name}")
265
- agent = create_agent_for_mode(mode, model_name, vision_model_name=vision_model_name)
266
- logger.debug(f"Created agent for mode: {mode} with model: {model_name}")
322
+ logger.debug(
323
+ f"Creating agent for mode: {mode} with model: {model_name}, vision model: {vision_model_name}, no_stream: {no_stream}"
324
+ )
325
+ agent = create_agent_for_mode(mode, model_name, vision_model_name=vision_model_name, no_stream=no_stream)
326
+ logger.debug(
327
+ f"Created agent for mode: {mode} with model: {model_name}, vision model: {vision_model_name}, no_stream: {no_stream}"
328
+ )
267
329
 
268
330
  events = [
269
331
  "task_start",
@@ -277,15 +339,46 @@ def task(
277
339
  "memory_compacted",
278
340
  "memory_summary",
279
341
  ]
342
+ # Add spinner control to event handlers
343
+ def handle_task_think_start(*args, **kwargs):
344
+ start_spinner(console)
345
+
346
+ def handle_task_think_end(*args, **kwargs):
347
+ stop_spinner(console)
348
+
349
+ def handle_stream_chunk(event: str, data: str) -> None:
350
+ if current_spinner:
351
+ stop_spinner(console)
352
+ if data is not None:
353
+ console.print(data, end="", markup=False)
354
+
280
355
  agent.event_emitter.on(
281
356
  event=events,
282
357
  listener=console_print_events,
283
358
  )
359
+
360
+ agent.event_emitter.on(
361
+ event="task_think_start",
362
+ listener=handle_task_think_start,
363
+ )
364
+
365
+ agent.event_emitter.on(
366
+ event="task_think_end",
367
+ listener=handle_task_think_end,
368
+ )
369
+
370
+ agent.event_emitter.on(
371
+ event="stream_chunk",
372
+ listener=handle_stream_chunk,
373
+ )
374
+
284
375
  logger.debug("Registered event handlers for agent events with events: {events}")
285
376
 
286
377
  logger.debug(f"Solving task with agent: {task_content}")
287
- result = agent.solve_task(task=task_content, max_iterations=300)
288
- logger.debug(f"Task solved with result: {result}")
378
+ if max_iterations < 1:
379
+ raise ValueError("max_iterations must be greater than 0")
380
+ result = agent.solve_task(task=task_content, max_iterations=max_iterations, streaming=not no_stream)
381
+ logger.debug(f"Task solved with result: {result} using {max_iterations} iterations")
289
382
 
290
383
  console.print(
291
384
  Panel.fit(
quantalogic/prompts.py CHANGED
@@ -90,4 +90,5 @@ Every response must contain exactly two XML blocks:
90
90
 
91
91
  ### Environment Details
92
92
  {environment}
93
+
93
94
  """
@@ -1,22 +1,23 @@
1
1
  from quantalogic.agent import Agent
2
2
  from quantalogic.tools import (
3
+ DuckDuckGoSearchTool,
3
4
  InputQuestionTool,
5
+ MarkitdownTool,
6
+ ReadFileBlockTool,
7
+ ReadFileTool,
8
+ RipgrepTool,
4
9
  SerpApiSearchTool,
5
- DuckDuckGoSearchTool,
6
10
  TaskCompleteTool,
7
11
  WikipediaSearchTool,
8
- ReadFileBlockTool,
9
- ReadFileTool,
10
- MarkitdownTool,
11
- RipgrepTool
12
12
  )
13
13
 
14
14
 
15
- def create_search_agent(model_name: str) -> Agent:
15
+ def create_search_agent(model_name: str, mode_full: bool = False) -> Agent:
16
16
  """Creates and configures a search agent with web, knowledge, and privacy-focused search tools.
17
17
 
18
18
  Args:
19
19
  model_name (str): Name of the language model to use for the agent's core capabilities
20
+ mode_full (bool, optional): If True, the agent will be configured with a full set of tools.
20
21
 
21
22
  Returns:
22
23
  Agent: A fully configured search agent instance with:
@@ -31,7 +32,6 @@ def create_search_agent(model_name: str) -> Agent:
31
32
 
32
33
  tools = [
33
34
  # Search tools
34
- SerpApiSearchTool(), # Web search capabilities
35
35
  DuckDuckGoSearchTool(), # Privacy-focused web search
36
36
  WikipediaSearchTool(), # Knowledge search capabilities
37
37
  # Basic interaction tools
@@ -45,6 +45,14 @@ def create_search_agent(model_name: str) -> Agent:
45
45
  RipgrepTool(), # Code search capabilities
46
46
  ]
47
47
 
48
+ if mode_full:
49
+ tools.extend(
50
+ [
51
+ # Search tools
52
+ SerpApiSearchTool(), # Web search capabilities
53
+ ]
54
+ )
55
+
48
56
  return Agent(
49
57
  model_name=model_name,
50
58
  tools=tools,
@@ -30,7 +30,7 @@ from quantalogic.agent_config import (
30
30
  create_coding_agent, # noqa: F401
31
31
  create_orchestrator_agent, # noqa: F401
32
32
  )
33
- from quantalogic.print_event import console_print_events
33
+ from quantalogic.console_print_events import console_print_events
34
34
 
35
35
  # Configure logger
36
36
  logger.remove()
@@ -246,7 +246,7 @@ class AgentState:
246
246
  def initialize_agent_with_sse_validation(self, model_name: str = MODEL_NAME):
247
247
  """Initialize agent with SSE-based user validation."""
248
248
  try:
249
- self.agent = create_agent(model_name)
249
+ self.agent = create_agent(model_name, None)
250
250
 
251
251
  # Comprehensive list of agent events to track
252
252
  agent_events = [