praisonaiagents 0.0.91__py3-none-any.whl → 0.0.93__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -864,7 +864,11 @@ Your Goal: {self.goal}
864
864
  if self._using_custom_llm:
865
865
  try:
866
866
  # Special handling for MCP tools when using provider/model format
867
- tool_param = self.tools if tools is None else tools
867
+ # Fix: Handle empty tools list properly - use self.tools if tools is None or empty
868
+ if tools is None or (isinstance(tools, list) and len(tools) == 0):
869
+ tool_param = self.tools
870
+ else:
871
+ tool_param = tools
868
872
 
869
873
  # Convert MCP tool objects to OpenAI format if needed
870
874
  if tool_param is not None:
@@ -443,12 +443,12 @@ Context:
443
443
  if task.callback:
444
444
  try:
445
445
  if asyncio.iscoroutinefunction(task.callback):
446
- if asyncio.get_event_loop().is_running():
447
- asyncio.create_task(task.callback(task_output))
448
- else:
449
- loop = asyncio.new_event_loop()
450
- asyncio.set_event_loop(loop)
451
- loop.run_until_complete(task.callback(task_output))
446
+ try:
447
+ loop = asyncio.get_running_loop()
448
+ loop.create_task(task.callback(task_output))
449
+ except RuntimeError:
450
+ # No event loop running, create new one
451
+ asyncio.run(task.callback(task_output))
452
452
  else:
453
453
  task.callback(task_output)
454
454
  except Exception as e:
@@ -765,12 +765,12 @@ Context:
765
765
  if task.callback:
766
766
  try:
767
767
  if asyncio.iscoroutinefunction(task.callback):
768
- if asyncio.get_event_loop().is_running():
769
- asyncio.create_task(task.callback(task_output))
770
- else:
771
- loop = asyncio.new_event_loop()
772
- asyncio.set_event_loop(loop)
773
- loop.run_until_complete(task.callback(task_output))
768
+ try:
769
+ loop = asyncio.get_running_loop()
770
+ loop.create_task(task.callback(task_output))
771
+ except RuntimeError:
772
+ # No event loop running, create new one
773
+ asyncio.run(task.callback(task_output))
774
774
  else:
775
775
  task.callback(task_output)
776
776
  except Exception as e:
@@ -205,6 +205,72 @@ class LLM:
205
205
  }
206
206
  logging.debug(f"LLM instance initialized with: {json.dumps(debug_info, indent=2, default=str)}")
207
207
 
208
+ def _is_ollama_provider(self) -> bool:
209
+ """Detect if this is an Ollama provider regardless of naming convention"""
210
+ if not self.model:
211
+ return False
212
+
213
+ # Direct ollama/ prefix
214
+ if self.model.startswith("ollama/"):
215
+ return True
216
+
217
+ # Check environment variables for Ollama base URL
218
+ base_url = os.getenv("OPENAI_BASE_URL", "")
219
+ api_base = os.getenv("OPENAI_API_BASE", "")
220
+
221
+ # Common Ollama endpoints
222
+ ollama_endpoints = ["localhost:11434", "127.0.0.1:11434", ":11434"]
223
+
224
+ return any(endpoint in base_url or endpoint in api_base for endpoint in ollama_endpoints)
225
+
226
+ def _parse_tool_call_arguments(self, tool_call: Dict, is_ollama: bool = False) -> tuple:
227
+ """
228
+ Safely parse tool call arguments with proper error handling
229
+
230
+ Returns:
231
+ tuple: (function_name, arguments, tool_call_id)
232
+ """
233
+ try:
234
+ if is_ollama:
235
+ # Special handling for Ollama provider which may have different structure
236
+ if "function" in tool_call and isinstance(tool_call["function"], dict):
237
+ function_name = tool_call["function"]["name"]
238
+ arguments = json.loads(tool_call["function"]["arguments"])
239
+ else:
240
+ # Try alternative format that Ollama might return
241
+ function_name = tool_call.get("name", "unknown_function")
242
+ arguments_str = tool_call.get("arguments", "{}")
243
+ arguments = json.loads(arguments_str) if arguments_str else {}
244
+ tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
245
+ else:
246
+ # Standard format for other providers with error handling
247
+ function_name = tool_call["function"]["name"]
248
+ arguments_str = tool_call["function"]["arguments"]
249
+ arguments = json.loads(arguments_str) if arguments_str else {}
250
+ tool_call_id = tool_call["id"]
251
+
252
+ except (KeyError, json.JSONDecodeError, TypeError) as e:
253
+ logging.error(f"Error parsing tool call arguments: {e}")
254
+ function_name = tool_call.get("name", "unknown_function")
255
+ arguments = {}
256
+ tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
257
+
258
+ return function_name, arguments, tool_call_id
259
+
260
+ def _needs_system_message_skip(self) -> bool:
261
+ """Check if this model requires skipping system messages"""
262
+ if not self.model:
263
+ return False
264
+
265
+ # Only skip for specific legacy o1 models that don't support system messages
266
+ legacy_o1_models = [
267
+ "o1-preview", # 2024-09-12 version
268
+ "o1-mini", # 2024-09-12 version
269
+ "o1-mini-2024-09-12" # Explicit dated version
270
+ ]
271
+
272
+ return self.model in legacy_o1_models
273
+
208
274
  def get_response(
209
275
  self,
210
276
  prompt: Union[str, List[Dict]],
@@ -320,7 +386,9 @@ class LLM:
320
386
  system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
321
387
  elif output_pydantic:
322
388
  system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
323
- messages.append({"role": "system", "content": system_prompt})
389
+ # Skip system messages for legacy o1 models as they don't support them
390
+ if not self._needs_system_message_skip():
391
+ messages.append({"role": "system", "content": system_prompt})
324
392
 
325
393
  if chat_history:
326
394
  messages.extend(chat_history)
@@ -470,32 +538,19 @@ class LLM:
470
538
  for tool_call in tool_calls:
471
539
  # Handle both object and dict access patterns
472
540
  if isinstance(tool_call, dict):
473
- # Special handling for Ollama provider which may have a different structure
474
- if self.model and self.model.startswith("ollama/"):
475
- try:
476
- # Try standard format first
477
- if "function" in tool_call and isinstance(tool_call["function"], dict):
478
- function_name = tool_call["function"]["name"]
479
- arguments = json.loads(tool_call["function"]["arguments"])
480
- else:
481
- # Try alternative format that Ollama might return
482
- function_name = tool_call.get("name", "unknown_function")
483
- arguments = json.loads(tool_call.get("arguments", "{}"))
484
- tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
485
- except Exception as e:
486
- logging.error(f"Error processing Ollama tool call: {e}")
487
- function_name = "unknown_function"
488
- arguments = {}
489
- tool_call_id = f"tool_{id(tool_call)}"
490
- else:
491
- # Standard format for other providers
492
- function_name = tool_call["function"]["name"]
493
- arguments = json.loads(tool_call["function"]["arguments"])
494
- tool_call_id = tool_call["id"]
541
+ is_ollama = self._is_ollama_provider()
542
+ function_name, arguments, tool_call_id = self._parse_tool_call_arguments(tool_call, is_ollama)
495
543
  else:
496
- function_name = tool_call.function.name
497
- arguments = json.loads(tool_call.function.arguments)
498
- tool_call_id = tool_call.id
544
+ # Handle object-style tool calls
545
+ try:
546
+ function_name = tool_call.function.name
547
+ arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
548
+ tool_call_id = tool_call.id
549
+ except (json.JSONDecodeError, AttributeError) as e:
550
+ logging.error(f"Error parsing object-style tool call: {e}")
551
+ function_name = "unknown_function"
552
+ arguments = {}
553
+ tool_call_id = f"tool_{id(tool_call)}"
499
554
 
500
555
  logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
501
556
  tool_result = execute_tool_fn(function_name, arguments)
@@ -867,7 +922,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
867
922
  system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
868
923
  elif output_pydantic:
869
924
  system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
870
- messages.append({"role": "system", "content": system_prompt})
925
+ # Skip system messages for legacy o1 models as they don't support them
926
+ if not self._needs_system_message_skip():
927
+ messages.append({"role": "system", "content": system_prompt})
871
928
 
872
929
  if chat_history:
873
930
  messages.extend(chat_history)
@@ -1065,32 +1122,19 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1065
1122
  for tool_call in tool_calls:
1066
1123
  # Handle both object and dict access patterns
1067
1124
  if isinstance(tool_call, dict):
1068
- # Special handling for Ollama provider which may have a different structure
1069
- if self.model and self.model.startswith("ollama/"):
1070
- try:
1071
- # Try standard format first
1072
- if "function" in tool_call and isinstance(tool_call["function"], dict):
1073
- function_name = tool_call["function"]["name"]
1074
- arguments = json.loads(tool_call["function"]["arguments"])
1075
- else:
1076
- # Try alternative format that Ollama might return
1077
- function_name = tool_call.get("name", "unknown_function")
1078
- arguments = json.loads(tool_call.get("arguments", "{}"))
1079
- tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
1080
- except Exception as e:
1081
- logging.error(f"Error processing Ollama tool call: {e}")
1082
- function_name = "unknown_function"
1083
- arguments = {}
1084
- tool_call_id = f"tool_{id(tool_call)}"
1085
- else:
1086
- # Standard format for other providers
1087
- function_name = tool_call["function"]["name"]
1088
- arguments = json.loads(tool_call["function"]["arguments"])
1089
- tool_call_id = tool_call["id"]
1125
+ is_ollama = self._is_ollama_provider()
1126
+ function_name, arguments, tool_call_id = self._parse_tool_call_arguments(tool_call, is_ollama)
1090
1127
  else:
1091
- function_name = tool_call.function.name
1092
- arguments = json.loads(tool_call.function.arguments)
1093
- tool_call_id = tool_call.id
1128
+ # Handle object-style tool calls
1129
+ try:
1130
+ function_name = tool_call.function.name
1131
+ arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
1132
+ tool_call_id = tool_call.id
1133
+ except (json.JSONDecodeError, AttributeError) as e:
1134
+ logging.error(f"Error parsing object-style tool call: {e}")
1135
+ function_name = "unknown_function"
1136
+ arguments = {}
1137
+ tool_call_id = f"tool_{id(tool_call)}"
1094
1138
 
1095
1139
  tool_result = await execute_tool_fn(function_name, arguments)
1096
1140
 
@@ -1111,7 +1155,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1111
1155
  response_text = ""
1112
1156
 
1113
1157
  # Special handling for Ollama models that don't automatically process tool results
1114
- if self.model and self.model.startswith("ollama/") and tool_result:
1158
+ if self._is_ollama_provider() and tool_result:
1115
1159
  # For Ollama models, we need to explicitly ask the model to process the tool results
1116
1160
  # First, check if the response is just a JSON tool call
1117
1161
  try:
@@ -1517,7 +1561,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1517
1561
  # Build messages list
1518
1562
  messages = []
1519
1563
  if system_prompt:
1520
- messages.append({"role": "system", "content": system_prompt})
1564
+ # Skip system messages for legacy o1 models as they don't support them
1565
+ if not self._needs_system_message_skip():
1566
+ messages.append({"role": "system", "content": system_prompt})
1521
1567
 
1522
1568
  # Add prompt to messages
1523
1569
  if isinstance(prompt, list):
@@ -1623,7 +1669,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1623
1669
  # Build messages list
1624
1670
  messages = []
1625
1671
  if system_prompt:
1626
- messages.append({"role": "system", "content": system_prompt})
1672
+ # Skip system messages for legacy o1 models as they don't support them
1673
+ if not self._needs_system_message_skip():
1674
+ messages.append({"role": "system", "content": system_prompt})
1627
1675
 
1628
1676
  # Add prompt to messages
1629
1677
  if isinstance(prompt, list):
praisonaiagents/main.py CHANGED
@@ -362,7 +362,24 @@ class ReflectionOutput(BaseModel):
362
362
  reflection: str
363
363
  satisfactory: Literal["yes", "no"]
364
364
 
365
- client = OpenAI(api_key=(os.environ["OPENAI_API_KEY"] if os.environ.get("OPENAI_API_KEY") else "xxxx"))
365
+ # Constants
366
+ LOCAL_SERVER_API_KEY_PLACEHOLDER = "not-needed"
367
+
368
+ # Initialize OpenAI client with proper API key handling
369
+ api_key = os.environ.get("OPENAI_API_KEY")
370
+ base_url = os.environ.get("OPENAI_API_BASE") or os.environ.get("OPENAI_BASE_URL")
371
+
372
+ # For local servers like LM Studio, allow minimal API key
373
+ if base_url and not api_key:
374
+ api_key = LOCAL_SERVER_API_KEY_PLACEHOLDER
375
+ elif not api_key:
376
+ raise ValueError(
377
+ "OPENAI_API_KEY environment variable is required for the default OpenAI service. "
378
+ "If you are targeting a local server (e.g., LM Studio), ensure OPENAI_API_BASE is set "
379
+ f"(e.g., 'http://localhost:1234/v1') and you can use a placeholder API key by setting OPENAI_API_KEY='{LOCAL_SERVER_API_KEY_PLACEHOLDER}'"
380
+ )
381
+
382
+ client = OpenAI(api_key=api_key, base_url=base_url)
366
383
 
367
384
  class TaskOutput(BaseModel):
368
385
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -1,5 +1,6 @@
1
1
  import logging
2
2
  import asyncio
3
+ import json
3
4
  from typing import Dict, Optional, List, Any, AsyncGenerator
4
5
  from pydantic import BaseModel, ConfigDict
5
6
  from ..agent.agent import Agent
@@ -7,6 +8,7 @@ from ..task.task import Task
7
8
  from ..main import display_error, client
8
9
  import csv
9
10
  import os
11
+ from openai import AsyncOpenAI
10
12
 
11
13
  class LoopItems(BaseModel):
12
14
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -106,6 +108,128 @@ class Process:
106
108
  logging.debug(f"Fallback attempt {fallback_attempts}: No 'not started' task found within retry limit.")
107
109
  return None # Return None if no task found after all attempts
108
110
 
111
+ async def _get_manager_instructions_with_fallback_async(self, manager_task, manager_prompt, ManagerInstructions):
112
+ """Async version of getting manager instructions with fallback"""
113
+ try:
114
+ # First try structured output (OpenAI compatible)
115
+ logging.info("Attempting structured output...")
116
+ return await self._get_structured_response_async(manager_task, manager_prompt, ManagerInstructions)
117
+ except Exception as e:
118
+ logging.info(f"Structured output failed: {e}, falling back to JSON mode...")
119
+ # Fallback to regular JSON mode
120
+ try:
121
+ # Generate JSON structure description from Pydantic model
122
+ try:
123
+ schema = ManagerInstructions.model_json_schema()
124
+ props_desc = ", ".join([f'"{k}": <{v.get("type", "any")}>' for k, v in schema.get('properties', {}).items()])
125
+ required_props = schema.get('required', [])
126
+ required_props_str = ', '.join(f'"{p}"' for p in required_props)
127
+ required_desc = f" (required: {required_props_str})" if required_props else ""
128
+ json_structure_desc = "{" + props_desc + "}"
129
+ enhanced_prompt = manager_prompt + f"\n\nIMPORTANT: Respond with valid JSON only, using this exact structure: {json_structure_desc}{required_desc}"
130
+ except Exception as schema_error:
131
+ logging.warning(f"Could not generate schema for ManagerInstructions: {schema_error}. Using hardcoded prompt.")
132
+ # Fallback to hardcoded prompt if schema generation fails
133
+ enhanced_prompt = manager_prompt + "\n\nIMPORTANT: Respond with valid JSON only, using this exact structure: {\"task_id\": <int>, \"agent_name\": \"<string>\", \"action\": \"<execute or stop>\"}"
134
+
135
+ return await self._get_json_response_async(manager_task, enhanced_prompt, ManagerInstructions)
136
+ except Exception as fallback_error:
137
+ error_msg = f"Both structured output and JSON fallback failed: {fallback_error}"
138
+ logging.error(error_msg, exc_info=True)
139
+ raise Exception(error_msg) from fallback_error
140
+
141
+ def _get_manager_instructions_with_fallback(self, manager_task, manager_prompt, ManagerInstructions):
142
+ """Sync version of getting manager instructions with fallback"""
143
+ try:
144
+ # First try structured output (OpenAI compatible)
145
+ logging.info("Attempting structured output...")
146
+ manager_response = client.beta.chat.completions.parse(
147
+ model=self.manager_llm,
148
+ messages=[
149
+ {"role": "system", "content": manager_task.description},
150
+ {"role": "user", "content": manager_prompt}
151
+ ],
152
+ temperature=0.7,
153
+ response_format=ManagerInstructions
154
+ )
155
+ return manager_response.choices[0].message.parsed
156
+ except Exception as e:
157
+ logging.info(f"Structured output failed: {e}, falling back to JSON mode...")
158
+ # Fallback to regular JSON mode
159
+ try:
160
+ # Generate JSON structure description from Pydantic model
161
+ try:
162
+ schema = ManagerInstructions.model_json_schema()
163
+ props_desc = ", ".join([f'"{k}": <{v.get("type", "any")}>' for k, v in schema.get('properties', {}).items()])
164
+ required_props = schema.get('required', [])
165
+ required_props_str = ', '.join(f'"{p}"' for p in required_props)
166
+ required_desc = f" (required: {required_props_str})" if required_props else ""
167
+ json_structure_desc = "{" + props_desc + "}"
168
+ enhanced_prompt = manager_prompt + f"\n\nIMPORTANT: Respond with valid JSON only, using this exact structure: {json_structure_desc}{required_desc}"
169
+ except Exception as schema_error:
170
+ logging.warning(f"Could not generate schema for ManagerInstructions: {schema_error}. Using hardcoded prompt.")
171
+ # Fallback to hardcoded prompt if schema generation fails
172
+ enhanced_prompt = manager_prompt + "\n\nIMPORTANT: Respond with valid JSON only, using this exact structure: {\"task_id\": <int>, \"agent_name\": \"<string>\", \"action\": \"<execute or stop>\"}"
173
+
174
+ manager_response = client.chat.completions.create(
175
+ model=self.manager_llm,
176
+ messages=[
177
+ {"role": "system", "content": manager_task.description},
178
+ {"role": "user", "content": enhanced_prompt}
179
+ ],
180
+ temperature=0.7,
181
+ response_format={"type": "json_object"}
182
+ )
183
+
184
+ # Parse JSON and validate with Pydantic
185
+ try:
186
+ json_content = manager_response.choices[0].message.content
187
+ parsed_json = json.loads(json_content)
188
+ return ManagerInstructions(**parsed_json)
189
+ except (json.JSONDecodeError, ValueError) as e:
190
+ raise Exception(f"Failed to parse JSON response: {json_content}") from e
191
+ except Exception as fallback_error:
192
+ error_msg = f"Both structured output and JSON fallback failed: {fallback_error}"
193
+ logging.error(error_msg, exc_info=True)
194
+ raise Exception(error_msg) from fallback_error
195
+
196
+ async def _get_structured_response_async(self, manager_task, manager_prompt, ManagerInstructions):
197
+ """Async version of structured response"""
198
+ # Create an async client instance for this async method
199
+ async_client = AsyncOpenAI()
200
+ manager_response = await async_client.beta.chat.completions.parse(
201
+ model=self.manager_llm,
202
+ messages=[
203
+ {"role": "system", "content": manager_task.description},
204
+ {"role": "user", "content": manager_prompt}
205
+ ],
206
+ temperature=0.7,
207
+ response_format=ManagerInstructions
208
+ )
209
+ return manager_response.choices[0].message.parsed
210
+
211
+ async def _get_json_response_async(self, manager_task, enhanced_prompt, ManagerInstructions):
212
+ """Async version of JSON fallback response"""
213
+ # Create an async client instance for this async method
214
+ async_client = AsyncOpenAI()
215
+ manager_response = await async_client.chat.completions.create(
216
+ model=self.manager_llm,
217
+ messages=[
218
+ {"role": "system", "content": manager_task.description},
219
+ {"role": "user", "content": enhanced_prompt}
220
+ ],
221
+ temperature=0.7,
222
+ response_format={"type": "json_object"}
223
+ )
224
+
225
+ # Parse JSON and validate with Pydantic
226
+ try:
227
+ json_content = manager_response.choices[0].message.content
228
+ parsed_json = json.loads(json_content)
229
+ return ManagerInstructions(**parsed_json)
230
+ except (json.JSONDecodeError, ValueError) as e:
231
+ raise Exception(f"Failed to parse JSON response: {json_content}") from e
232
+
109
233
 
110
234
  async def aworkflow(self) -> AsyncGenerator[str, None]:
111
235
  """Async version of workflow method"""
@@ -496,26 +620,13 @@ Provide a JSON with the structure:
496
620
  try:
497
621
  logging.info("Requesting manager instructions...")
498
622
  if manager_task.async_execution:
499
- manager_response = await client.beta.chat.completions.parse(
500
- model=self.manager_llm,
501
- messages=[
502
- {"role": "system", "content": manager_task.description},
503
- {"role": "user", "content": manager_prompt}
504
- ],
505
- temperature=0.7,
506
- response_format=ManagerInstructions
623
+ parsed_instructions = await self._get_manager_instructions_with_fallback_async(
624
+ manager_task, manager_prompt, ManagerInstructions
507
625
  )
508
626
  else:
509
- manager_response = client.beta.chat.completions.parse(
510
- model=self.manager_llm,
511
- messages=[
512
- {"role": "system", "content": manager_task.description},
513
- {"role": "user", "content": manager_prompt}
514
- ],
515
- temperature=0.7,
516
- response_format=ManagerInstructions
627
+ parsed_instructions = self._get_manager_instructions_with_fallback(
628
+ manager_task, manager_prompt, ManagerInstructions
517
629
  )
518
- parsed_instructions = manager_response.choices[0].message.parsed
519
630
  logging.info(f"Manager instructions: {parsed_instructions}")
520
631
  except Exception as e:
521
632
  display_error(f"Manager parse error: {e}")
@@ -1110,16 +1221,9 @@ Provide a JSON with the structure:
1110
1221
 
1111
1222
  try:
1112
1223
  logging.info("Requesting manager instructions...")
1113
- manager_response = client.beta.chat.completions.parse(
1114
- model=self.manager_llm,
1115
- messages=[
1116
- {"role": "system", "content": manager_task.description},
1117
- {"role": "user", "content": manager_prompt}
1118
- ],
1119
- temperature=0.7,
1120
- response_format=ManagerInstructions
1224
+ parsed_instructions = self._get_manager_instructions_with_fallback(
1225
+ manager_task, manager_prompt, ManagerInstructions
1121
1226
  )
1122
- parsed_instructions = manager_response.choices[0].message.parsed
1123
1227
  logging.info(f"Manager instructions: {parsed_instructions}")
1124
1228
  except Exception as e:
1125
1229
  display_error(f"Manager parse error: {e}")
@@ -47,11 +47,11 @@ class Task:
47
47
  try:
48
48
  from ..memory.memory import Memory
49
49
  MEMORY_AVAILABLE = True
50
- except ImportError:
51
- raise ImportError(
52
- "Memory features requested in Task but memory dependencies not installed. "
53
- "Please install with: pip install \"praisonaiagents[memory]\""
54
- )
50
+ except ImportError as e:
51
+ logger.warning(f"Memory dependency missing: {e}")
52
+ logger.warning("Some memory features may not work. Install with: pip install \"praisonaiagents[memory]\"")
53
+ MEMORY_AVAILABLE = False
54
+ # Don't raise - let it continue with limited functionality
55
55
 
56
56
  self.input_file = input_file
57
57
  self.id = str(uuid.uuid4()) if id is None else str(id)
@@ -7,6 +7,8 @@ TOOL_MAPPINGS = {
7
7
  # Direct functions
8
8
  'internet_search': ('.duckduckgo_tools', None),
9
9
  'duckduckgo': ('.duckduckgo_tools', None),
10
+ 'searxng_search': ('.searxng_tools', None),
11
+ 'searxng': ('.searxng_tools', None),
10
12
 
11
13
  # arXiv Tools
12
14
  'search_arxiv': ('.arxiv_tools', None),
@@ -171,7 +173,7 @@ def __getattr__(name: str) -> Any:
171
173
  # Direct function import
172
174
  module = import_module(module_path, __package__)
173
175
  if name in [
174
- 'duckduckgo', 'internet_search',
176
+ 'duckduckgo', 'internet_search', 'searxng_search', 'searxng',
175
177
  'search_arxiv', 'get_arxiv_paper', 'get_papers_by_author', 'get_papers_by_category',
176
178
  'wiki_search', 'wiki_summary', 'wiki_page', 'wiki_random', 'wiki_language',
177
179
  'get_article', 'get_news_sources', 'get_articles_from_source', 'get_trending_topics',
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.91
3
+ Version: 0.0.93
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -1,25 +1,25 @@
1
1
  praisonaiagents/__init__.py,sha256=Z2_rSA6mYozz0r3ioUgKzl3QV8uWRDS_QaqPg2oGjqg,1324
2
- praisonaiagents/main.py,sha256=EsMRCT1tYjHH7hgoXov5s1caIBeRkpIPK8EZQsMKlw4,15138
2
+ praisonaiagents/main.py,sha256=D6XzpqdfglCQiWaH5LjRSv-bB3QkJso-i0h1uTFkPQI,15844
3
3
  praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
4
- praisonaiagents/agent/agent.py,sha256=-zENKxcaAWH5KJOed4KmcpAeBDNtRlxqG58QHdLH6RA,86334
4
+ praisonaiagents/agent/agent.py,sha256=DPpTgobDVZw2qlPzvNS-Xi-OF3RlM2cil6y15cbXIy8,86553
5
5
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
6
6
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
7
- praisonaiagents/agents/agents.py,sha256=lFWJDZeWQcr6RttV-pxvny-jfAM3UWiYjMnYo8pZYe0,59429
7
+ praisonaiagents/agents/agents.py,sha256=-cWRgok0X_4Mk-L7dW6bFdX7JVpxfe7R6aLmukktwKc,59381
8
8
  praisonaiagents/agents/autoagents.py,sha256=Lc_b9mO2MeefBrsHkHoqFxEr5iRGrYuzDhslyybXwdw,13649
9
9
  praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
10
10
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
11
11
  praisonaiagents/knowledge/knowledge.py,sha256=Po0JZsgjYJrXdNSggmUGOWidZEF0f8xo4nhsZZfh8tY,13217
12
12
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
13
- praisonaiagents/llm/llm.py,sha256=Y8z7mfzL_OMhoPSIr7k7Demk8HvHmJZv80EXFY6SUEU,91863
13
+ praisonaiagents/llm/llm.py,sha256=9wHmf0aGKf4a7YZ4JONmD7Ela8JBYVrkMFF2ei8Ivpk,93400
14
14
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
15
15
  praisonaiagents/mcp/mcp.py,sha256=-U6md6zHoJZCWF8XFq921Yy5CcSNaGqvjg3aRT737LM,16765
16
16
  praisonaiagents/mcp/mcp_sse.py,sha256=DLh3F_aoVRM1X-7hgIOWOw4FQ1nGmn9YNbQTesykzn4,6792
17
17
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
18
18
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
19
- praisonaiagents/process/process.py,sha256=7nGV-d9lDlQO6d7X4nMb7f6pMKCYNfoFzTrVvPrUefo,60179
19
+ praisonaiagents/process/process.py,sha256=gxhMXG3s4CzaREyuwE5zxCMx2Wp_b_Wd53tDfkj8Qk8,66567
20
20
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
21
- praisonaiagents/task/task.py,sha256=JShYyMkAC1_bhEt0s0CXwJtQFXY6bNu-rlb0mHyyoXM,15034
22
- praisonaiagents/tools/__init__.py,sha256=CWOYV9SudYY82r45LnNgaVRV3cmsAFdasNRkPrLsgmI,9198
21
+ praisonaiagents/task/task.py,sha256=03Vcz3TaKIYnryFnKAuuQ7Ly5nTaxysFpem6sgn4gJA,15112
22
+ praisonaiagents/tools/__init__.py,sha256=Rrgi7_3-yLHpfBB81WUi0-wD_wb_BsukwHVdjDYAF-0,9316
23
23
  praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
24
24
  praisonaiagents/tools/calculator_tools.py,sha256=S1xPT74Geurvjm52QMMIG29zDXVEWJmM6nmyY7yF298,9571
25
25
  praisonaiagents/tools/csv_tools.py,sha256=4Yr0QYwBXt-1BDXGLalB2eSsFR2mB5rH3KdHmRBQY6E,10036
@@ -40,7 +40,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
40
40
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
41
41
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
42
42
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
43
- praisonaiagents-0.0.91.dist-info/METADATA,sha256=AVYx1DidbzoSBWeh23X-8LuX7gAEqOas0mCe90yVWQA,1273
44
- praisonaiagents-0.0.91.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
45
- praisonaiagents-0.0.91.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
46
- praisonaiagents-0.0.91.dist-info/RECORD,,
43
+ praisonaiagents-0.0.93.dist-info/METADATA,sha256=HlnkZm2D8lKJPxXbT6ODUIbyzhJ1LMArMStjh1vCXlY,1273
44
+ praisonaiagents-0.0.93.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
45
+ praisonaiagents-0.0.93.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
46
+ praisonaiagents-0.0.93.dist-info/RECORD,,