praisonaiagents 0.0.90__py3-none-any.whl → 0.0.92__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -443,12 +443,12 @@ Context:
443
443
  if task.callback:
444
444
  try:
445
445
  if asyncio.iscoroutinefunction(task.callback):
446
- if asyncio.get_event_loop().is_running():
447
- asyncio.create_task(task.callback(task_output))
448
- else:
449
- loop = asyncio.new_event_loop()
450
- asyncio.set_event_loop(loop)
451
- loop.run_until_complete(task.callback(task_output))
446
+ try:
447
+ loop = asyncio.get_running_loop()
448
+ loop.create_task(task.callback(task_output))
449
+ except RuntimeError:
450
+ # No event loop running, create new one
451
+ asyncio.run(task.callback(task_output))
452
452
  else:
453
453
  task.callback(task_output)
454
454
  except Exception as e:
@@ -765,12 +765,12 @@ Context:
765
765
  if task.callback:
766
766
  try:
767
767
  if asyncio.iscoroutinefunction(task.callback):
768
- if asyncio.get_event_loop().is_running():
769
- asyncio.create_task(task.callback(task_output))
770
- else:
771
- loop = asyncio.new_event_loop()
772
- asyncio.set_event_loop(loop)
773
- loop.run_until_complete(task.callback(task_output))
768
+ try:
769
+ loop = asyncio.get_running_loop()
770
+ loop.create_task(task.callback(task_output))
771
+ except RuntimeError:
772
+ # No event loop running, create new one
773
+ asyncio.run(task.callback(task_output))
774
774
  else:
775
775
  task.callback(task_output)
776
776
  except Exception as e:
praisonaiagents/main.py CHANGED
@@ -362,7 +362,24 @@ class ReflectionOutput(BaseModel):
362
362
  reflection: str
363
363
  satisfactory: Literal["yes", "no"]
364
364
 
365
- client = OpenAI(api_key=(os.environ["OPENAI_API_KEY"] if os.environ.get("OPENAI_API_KEY") else "xxxx"))
365
+ # Constants
366
+ LOCAL_SERVER_API_KEY_PLACEHOLDER = "not-needed"
367
+
368
+ # Initialize OpenAI client with proper API key handling
369
+ api_key = os.environ.get("OPENAI_API_KEY")
370
+ base_url = os.environ.get("OPENAI_API_BASE") or os.environ.get("OPENAI_BASE_URL")
371
+
372
+ # For local servers like LM Studio, allow minimal API key
373
+ if base_url and not api_key:
374
+ api_key = LOCAL_SERVER_API_KEY_PLACEHOLDER
375
+ elif not api_key:
376
+ raise ValueError(
377
+ "OPENAI_API_KEY environment variable is required for the default OpenAI service. "
378
+ "If you are targeting a local server (e.g., LM Studio), ensure OPENAI_API_BASE is set "
379
+ f"(e.g., 'http://localhost:1234/v1') and you can use a placeholder API key by setting OPENAI_API_KEY='{LOCAL_SERVER_API_KEY_PLACEHOLDER}'"
380
+ )
381
+
382
+ client = OpenAI(api_key=api_key, base_url=base_url)
366
383
 
367
384
  class TaskOutput(BaseModel):
368
385
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -1,5 +1,6 @@
1
1
  import logging
2
2
  import asyncio
3
+ import json
3
4
  from typing import Dict, Optional, List, Any, AsyncGenerator
4
5
  from pydantic import BaseModel, ConfigDict
5
6
  from ..agent.agent import Agent
@@ -7,6 +8,7 @@ from ..task.task import Task
7
8
  from ..main import display_error, client
8
9
  import csv
9
10
  import os
11
+ from openai import AsyncOpenAI
10
12
 
11
13
  class LoopItems(BaseModel):
12
14
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -106,6 +108,128 @@ class Process:
106
108
  logging.debug(f"Fallback attempt {fallback_attempts}: No 'not started' task found within retry limit.")
107
109
  return None # Return None if no task found after all attempts
108
110
 
111
+ async def _get_manager_instructions_with_fallback_async(self, manager_task, manager_prompt, ManagerInstructions):
112
+ """Async version of getting manager instructions with fallback"""
113
+ try:
114
+ # First try structured output (OpenAI compatible)
115
+ logging.info("Attempting structured output...")
116
+ return await self._get_structured_response_async(manager_task, manager_prompt, ManagerInstructions)
117
+ except Exception as e:
118
+ logging.info(f"Structured output failed: {e}, falling back to JSON mode...")
119
+ # Fallback to regular JSON mode
120
+ try:
121
+ # Generate JSON structure description from Pydantic model
122
+ try:
123
+ schema = ManagerInstructions.model_json_schema()
124
+ props_desc = ", ".join([f'"{k}": <{v.get("type", "any")}>' for k, v in schema.get('properties', {}).items()])
125
+ required_props = schema.get('required', [])
126
+ required_props_str = ', '.join(f'"{p}"' for p in required_props)
127
+ required_desc = f" (required: {required_props_str})" if required_props else ""
128
+ json_structure_desc = "{" + props_desc + "}"
129
+ enhanced_prompt = manager_prompt + f"\n\nIMPORTANT: Respond with valid JSON only, using this exact structure: {json_structure_desc}{required_desc}"
130
+ except Exception as schema_error:
131
+ logging.warning(f"Could not generate schema for ManagerInstructions: {schema_error}. Using hardcoded prompt.")
132
+ # Fallback to hardcoded prompt if schema generation fails
133
+ enhanced_prompt = manager_prompt + "\n\nIMPORTANT: Respond with valid JSON only, using this exact structure: {\"task_id\": <int>, \"agent_name\": \"<string>\", \"action\": \"<execute or stop>\"}"
134
+
135
+ return await self._get_json_response_async(manager_task, enhanced_prompt, ManagerInstructions)
136
+ except Exception as fallback_error:
137
+ error_msg = f"Both structured output and JSON fallback failed: {fallback_error}"
138
+ logging.error(error_msg, exc_info=True)
139
+ raise Exception(error_msg) from fallback_error
140
+
141
+ def _get_manager_instructions_with_fallback(self, manager_task, manager_prompt, ManagerInstructions):
142
+ """Sync version of getting manager instructions with fallback"""
143
+ try:
144
+ # First try structured output (OpenAI compatible)
145
+ logging.info("Attempting structured output...")
146
+ manager_response = client.beta.chat.completions.parse(
147
+ model=self.manager_llm,
148
+ messages=[
149
+ {"role": "system", "content": manager_task.description},
150
+ {"role": "user", "content": manager_prompt}
151
+ ],
152
+ temperature=0.7,
153
+ response_format=ManagerInstructions
154
+ )
155
+ return manager_response.choices[0].message.parsed
156
+ except Exception as e:
157
+ logging.info(f"Structured output failed: {e}, falling back to JSON mode...")
158
+ # Fallback to regular JSON mode
159
+ try:
160
+ # Generate JSON structure description from Pydantic model
161
+ try:
162
+ schema = ManagerInstructions.model_json_schema()
163
+ props_desc = ", ".join([f'"{k}": <{v.get("type", "any")}>' for k, v in schema.get('properties', {}).items()])
164
+ required_props = schema.get('required', [])
165
+ required_props_str = ', '.join(f'"{p}"' for p in required_props)
166
+ required_desc = f" (required: {required_props_str})" if required_props else ""
167
+ json_structure_desc = "{" + props_desc + "}"
168
+ enhanced_prompt = manager_prompt + f"\n\nIMPORTANT: Respond with valid JSON only, using this exact structure: {json_structure_desc}{required_desc}"
169
+ except Exception as schema_error:
170
+ logging.warning(f"Could not generate schema for ManagerInstructions: {schema_error}. Using hardcoded prompt.")
171
+ # Fallback to hardcoded prompt if schema generation fails
172
+ enhanced_prompt = manager_prompt + "\n\nIMPORTANT: Respond with valid JSON only, using this exact structure: {\"task_id\": <int>, \"agent_name\": \"<string>\", \"action\": \"<execute or stop>\"}"
173
+
174
+ manager_response = client.chat.completions.create(
175
+ model=self.manager_llm,
176
+ messages=[
177
+ {"role": "system", "content": manager_task.description},
178
+ {"role": "user", "content": enhanced_prompt}
179
+ ],
180
+ temperature=0.7,
181
+ response_format={"type": "json_object"}
182
+ )
183
+
184
+ # Parse JSON and validate with Pydantic
185
+ try:
186
+ json_content = manager_response.choices[0].message.content
187
+ parsed_json = json.loads(json_content)
188
+ return ManagerInstructions(**parsed_json)
189
+ except (json.JSONDecodeError, ValueError) as e:
190
+ raise Exception(f"Failed to parse JSON response: {json_content}") from e
191
+ except Exception as fallback_error:
192
+ error_msg = f"Both structured output and JSON fallback failed: {fallback_error}"
193
+ logging.error(error_msg, exc_info=True)
194
+ raise Exception(error_msg) from fallback_error
195
+
196
+ async def _get_structured_response_async(self, manager_task, manager_prompt, ManagerInstructions):
197
+ """Async version of structured response"""
198
+ # Create an async client instance for this async method
199
+ async_client = AsyncOpenAI()
200
+ manager_response = await async_client.beta.chat.completions.parse(
201
+ model=self.manager_llm,
202
+ messages=[
203
+ {"role": "system", "content": manager_task.description},
204
+ {"role": "user", "content": manager_prompt}
205
+ ],
206
+ temperature=0.7,
207
+ response_format=ManagerInstructions
208
+ )
209
+ return manager_response.choices[0].message.parsed
210
+
211
+ async def _get_json_response_async(self, manager_task, enhanced_prompt, ManagerInstructions):
212
+ """Async version of JSON fallback response"""
213
+ # Create an async client instance for this async method
214
+ async_client = AsyncOpenAI()
215
+ manager_response = await async_client.chat.completions.create(
216
+ model=self.manager_llm,
217
+ messages=[
218
+ {"role": "system", "content": manager_task.description},
219
+ {"role": "user", "content": enhanced_prompt}
220
+ ],
221
+ temperature=0.7,
222
+ response_format={"type": "json_object"}
223
+ )
224
+
225
+ # Parse JSON and validate with Pydantic
226
+ try:
227
+ json_content = manager_response.choices[0].message.content
228
+ parsed_json = json.loads(json_content)
229
+ return ManagerInstructions(**parsed_json)
230
+ except (json.JSONDecodeError, ValueError) as e:
231
+ raise Exception(f"Failed to parse JSON response: {json_content}") from e
232
+
109
233
 
110
234
  async def aworkflow(self) -> AsyncGenerator[str, None]:
111
235
  """Async version of workflow method"""
@@ -496,26 +620,13 @@ Provide a JSON with the structure:
496
620
  try:
497
621
  logging.info("Requesting manager instructions...")
498
622
  if manager_task.async_execution:
499
- manager_response = await client.beta.chat.completions.parse(
500
- model=self.manager_llm,
501
- messages=[
502
- {"role": "system", "content": manager_task.description},
503
- {"role": "user", "content": manager_prompt}
504
- ],
505
- temperature=0.7,
506
- response_format=ManagerInstructions
623
+ parsed_instructions = await self._get_manager_instructions_with_fallback_async(
624
+ manager_task, manager_prompt, ManagerInstructions
507
625
  )
508
626
  else:
509
- manager_response = client.beta.chat.completions.parse(
510
- model=self.manager_llm,
511
- messages=[
512
- {"role": "system", "content": manager_task.description},
513
- {"role": "user", "content": manager_prompt}
514
- ],
515
- temperature=0.7,
516
- response_format=ManagerInstructions
627
+ parsed_instructions = self._get_manager_instructions_with_fallback(
628
+ manager_task, manager_prompt, ManagerInstructions
517
629
  )
518
- parsed_instructions = manager_response.choices[0].message.parsed
519
630
  logging.info(f"Manager instructions: {parsed_instructions}")
520
631
  except Exception as e:
521
632
  display_error(f"Manager parse error: {e}")
@@ -1110,16 +1221,9 @@ Provide a JSON with the structure:
1110
1221
 
1111
1222
  try:
1112
1223
  logging.info("Requesting manager instructions...")
1113
- manager_response = client.beta.chat.completions.parse(
1114
- model=self.manager_llm,
1115
- messages=[
1116
- {"role": "system", "content": manager_task.description},
1117
- {"role": "user", "content": manager_prompt}
1118
- ],
1119
- temperature=0.7,
1120
- response_format=ManagerInstructions
1224
+ parsed_instructions = self._get_manager_instructions_with_fallback(
1225
+ manager_task, manager_prompt, ManagerInstructions
1121
1226
  )
1122
- parsed_instructions = manager_response.choices[0].message.parsed
1123
1227
  logging.info(f"Manager instructions: {parsed_instructions}")
1124
1228
  except Exception as e:
1125
1229
  display_error(f"Manager parse error: {e}")
@@ -47,11 +47,11 @@ class Task:
47
47
  try:
48
48
  from ..memory.memory import Memory
49
49
  MEMORY_AVAILABLE = True
50
- except ImportError:
51
- raise ImportError(
52
- "Memory features requested in Task but memory dependencies not installed. "
53
- "Please install with: pip install \"praisonaiagents[memory]\""
54
- )
50
+ except ImportError as e:
51
+ logger.warning(f"Memory dependency missing: {e}")
52
+ logger.warning("Some memory features may not work. Install with: pip install \"praisonaiagents[memory]\"")
53
+ MEMORY_AVAILABLE = False
54
+ # Don't raise - let it continue with limited functionality
55
55
 
56
56
  self.input_file = input_file
57
57
  self.id = str(uuid.uuid4()) if id is None else str(id)
@@ -7,6 +7,8 @@ TOOL_MAPPINGS = {
7
7
  # Direct functions
8
8
  'internet_search': ('.duckduckgo_tools', None),
9
9
  'duckduckgo': ('.duckduckgo_tools', None),
10
+ 'searxng_search': ('.searxng_tools', None),
11
+ 'searxng': ('.searxng_tools', None),
10
12
 
11
13
  # arXiv Tools
12
14
  'search_arxiv': ('.arxiv_tools', None),
@@ -171,7 +173,7 @@ def __getattr__(name: str) -> Any:
171
173
  # Direct function import
172
174
  module = import_module(module_path, __package__)
173
175
  if name in [
174
- 'duckduckgo', 'internet_search',
176
+ 'duckduckgo', 'internet_search', 'searxng_search', 'searxng',
175
177
  'search_arxiv', 'get_arxiv_paper', 'get_papers_by_author', 'get_papers_by_category',
176
178
  'wiki_search', 'wiki_summary', 'wiki_page', 'wiki_random', 'wiki_language',
177
179
  'get_article', 'get_news_sources', 'get_articles_from_source', 'get_trending_topics',
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.90
3
+ Version: 0.0.92
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -13,11 +13,11 @@ Requires-Dist: mcp>=1.6.0; extra == "mcp"
13
13
  Requires-Dist: fastapi>=0.115.0; extra == "mcp"
14
14
  Requires-Dist: uvicorn>=0.34.0; extra == "mcp"
15
15
  Provides-Extra: memory
16
- Requires-Dist: chromadb>=0.5.23; extra == "memory"
16
+ Requires-Dist: chromadb>=1.0.0; extra == "memory"
17
17
  Provides-Extra: knowledge
18
18
  Requires-Dist: mem0ai>=0.1.0; extra == "knowledge"
19
- Requires-Dist: chromadb==0.5.23; extra == "knowledge"
20
- Requires-Dist: markitdown[all]; extra == "knowledge"
19
+ Requires-Dist: chromadb>=1.0.0; extra == "knowledge"
20
+ Requires-Dist: markitdown[all]>=0.1.0; extra == "knowledge"
21
21
  Requires-Dist: chonkie>=1.0.2; extra == "knowledge"
22
22
  Provides-Extra: llm
23
23
  Requires-Dist: litellm>=1.50.0; extra == "llm"
@@ -1,10 +1,10 @@
1
1
  praisonaiagents/__init__.py,sha256=Z2_rSA6mYozz0r3ioUgKzl3QV8uWRDS_QaqPg2oGjqg,1324
2
- praisonaiagents/main.py,sha256=EsMRCT1tYjHH7hgoXov5s1caIBeRkpIPK8EZQsMKlw4,15138
2
+ praisonaiagents/main.py,sha256=D6XzpqdfglCQiWaH5LjRSv-bB3QkJso-i0h1uTFkPQI,15844
3
3
  praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
4
4
  praisonaiagents/agent/agent.py,sha256=-zENKxcaAWH5KJOed4KmcpAeBDNtRlxqG58QHdLH6RA,86334
5
5
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
6
6
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
7
- praisonaiagents/agents/agents.py,sha256=lFWJDZeWQcr6RttV-pxvny-jfAM3UWiYjMnYo8pZYe0,59429
7
+ praisonaiagents/agents/agents.py,sha256=-cWRgok0X_4Mk-L7dW6bFdX7JVpxfe7R6aLmukktwKc,59381
8
8
  praisonaiagents/agents/autoagents.py,sha256=Lc_b9mO2MeefBrsHkHoqFxEr5iRGrYuzDhslyybXwdw,13649
9
9
  praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
10
10
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
@@ -16,10 +16,10 @@ praisonaiagents/mcp/mcp.py,sha256=-U6md6zHoJZCWF8XFq921Yy5CcSNaGqvjg3aRT737LM,16
16
16
  praisonaiagents/mcp/mcp_sse.py,sha256=DLh3F_aoVRM1X-7hgIOWOw4FQ1nGmn9YNbQTesykzn4,6792
17
17
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
18
18
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
19
- praisonaiagents/process/process.py,sha256=7nGV-d9lDlQO6d7X4nMb7f6pMKCYNfoFzTrVvPrUefo,60179
19
+ praisonaiagents/process/process.py,sha256=gxhMXG3s4CzaREyuwE5zxCMx2Wp_b_Wd53tDfkj8Qk8,66567
20
20
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
21
- praisonaiagents/task/task.py,sha256=JShYyMkAC1_bhEt0s0CXwJtQFXY6bNu-rlb0mHyyoXM,15034
22
- praisonaiagents/tools/__init__.py,sha256=CWOYV9SudYY82r45LnNgaVRV3cmsAFdasNRkPrLsgmI,9198
21
+ praisonaiagents/task/task.py,sha256=03Vcz3TaKIYnryFnKAuuQ7Ly5nTaxysFpem6sgn4gJA,15112
22
+ praisonaiagents/tools/__init__.py,sha256=Rrgi7_3-yLHpfBB81WUi0-wD_wb_BsukwHVdjDYAF-0,9316
23
23
  praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
24
24
  praisonaiagents/tools/calculator_tools.py,sha256=S1xPT74Geurvjm52QMMIG29zDXVEWJmM6nmyY7yF298,9571
25
25
  praisonaiagents/tools/csv_tools.py,sha256=4Yr0QYwBXt-1BDXGLalB2eSsFR2mB5rH3KdHmRBQY6E,10036
@@ -40,7 +40,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
40
40
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
41
41
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
42
42
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
43
- praisonaiagents-0.0.90.dist-info/METADATA,sha256=HrNS7WT3LB14TVihEpeYTvisD7XGxmZve7dmkiN-GzU,1268
44
- praisonaiagents-0.0.90.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
45
- praisonaiagents-0.0.90.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
46
- praisonaiagents-0.0.90.dist-info/RECORD,,
43
+ praisonaiagents-0.0.92.dist-info/METADATA,sha256=nzF23q2sAFXQ4-TWyV3klRP3yyBUQyA7PwZ_8uvI6z8,1273
44
+ praisonaiagents-0.0.92.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
45
+ praisonaiagents-0.0.92.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
46
+ praisonaiagents-0.0.92.dist-info/RECORD,,