praisonaiagents 0.0.156__tar.gz → 0.0.158__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/PKG-INFO +1 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/agent/agent.py +9 -9
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/agent/context_agent.py +17 -17
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/agent/router_agent.py +1 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/llm/llm.py +85 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/llm/model_capabilities.py +2 -2
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/llm/model_router.py +1 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/llm/openai_client.py +85 -23
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/mcp/mcp.py +4 -4
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/memory/memory.py +2 -2
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/task/task.py +1 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/train/data/generatecot.py +1 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents.egg-info/PKG-INFO +1 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/pyproject.toml +1 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test.py +2 -2
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_context_agent.py +2 -2
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_fix_comprehensive.py +1 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_llm_self_reflection_direct.py +1 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_ollama_async_fix.py +2 -2
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_ollama_fix.py +1 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_ollama_sequential_fix.py +1 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_self_reflection_fix_simple.py +1 -1
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/README.md +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/_logging.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/_warning_patch.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/agent/handoff.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/agent/image_agent.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/agents/autoagents.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/approval.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/flow_display.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/guardrails/__init__.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/guardrails/guardrail_result.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/knowledge/__init__.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/knowledge/chunking.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/knowledge/knowledge.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/llm/__init__.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/mcp/__init__.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/mcp/mcp_sse.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/memory/__init__.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/process/process.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/session.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/__init__.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/integration.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/performance_cli.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/performance_monitor.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/performance_utils.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/telemetry.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/token_collector.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/token_telemetry.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/README.md +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/arxiv_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/calculator_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/csv_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/duckdb_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/excel_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/file_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/json_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/mongodb_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/newspaper_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/pandas_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/python_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/searxng_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/shell_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/spider_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/test.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/wikipedia_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/xml_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/yaml_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/yfinance_tools.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents.egg-info/SOURCES.txt +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents.egg-info/requires.txt +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/setup.cfg +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test-graph-memory.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_basic_agents_demo.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_embedding_logging.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_gemini_streaming_fix.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_handoff_compatibility.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_http_stream_basic.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_posthog_fixed.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_self_reflection_comprehensive.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_self_reflection_fix_verification.py +0 -0
- {praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_validation_feedback.py +0 -0
@@ -716,7 +716,7 @@ Your Goal: {self.goal}
|
|
716
716
|
error=f"Agent guardrail validation error: {str(e)}"
|
717
717
|
)
|
718
718
|
|
719
|
-
def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0
|
719
|
+
def _apply_guardrail_with_retry(self, response_text, prompt, temperature=1.0, tools=None, task_name=None, task_description=None, task_id=None):
|
720
720
|
"""Apply guardrail validation with retry logic.
|
721
721
|
|
722
722
|
Args:
|
@@ -859,7 +859,7 @@ Your Goal: {self.goal}"""
|
|
859
859
|
self._system_prompt_cache[cache_key] = system_prompt
|
860
860
|
return system_prompt
|
861
861
|
|
862
|
-
def _build_messages(self, prompt, temperature=0
|
862
|
+
def _build_messages(self, prompt, temperature=1.0, output_json=None, output_pydantic=None, tools=None):
|
863
863
|
"""Build messages list for chat completion.
|
864
864
|
|
865
865
|
Args:
|
@@ -1172,7 +1172,7 @@ Your Goal: {self.goal}"""
|
|
1172
1172
|
reasoning_steps=reasoning_steps
|
1173
1173
|
)
|
1174
1174
|
|
1175
|
-
def _chat_completion(self, messages, temperature=0
|
1175
|
+
def _chat_completion(self, messages, temperature=1.0, tools=None, stream=True, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
|
1176
1176
|
start_time = time.time()
|
1177
1177
|
logging.debug(f"{self.name} sending messages to LLM: {messages}")
|
1178
1178
|
|
@@ -1336,7 +1336,7 @@ Your Goal: {self.goal}"""
|
|
1336
1336
|
# expand=False
|
1337
1337
|
# )
|
1338
1338
|
|
1339
|
-
def chat(self, prompt, temperature=0
|
1339
|
+
def chat(self, prompt, temperature=1.0, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=None, task_name=None, task_description=None, task_id=None):
|
1340
1340
|
# Reset the final display flag for each new conversation
|
1341
1341
|
self._final_display_shown = False
|
1342
1342
|
|
@@ -1694,7 +1694,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1694
1694
|
cleaned = cleaned[:-3].strip()
|
1695
1695
|
return cleaned
|
1696
1696
|
|
1697
|
-
async def achat(self, prompt: str, temperature=0
|
1697
|
+
async def achat(self, prompt: str, temperature=1.0, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
|
1698
1698
|
"""Async version of chat method with self-reflection support."""
|
1699
1699
|
# Reset the final display flag for each new conversation
|
1700
1700
|
self._final_display_shown = False
|
@@ -2046,7 +2046,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2046
2046
|
final_response = await self._openai_client.async_client.chat.completions.create(
|
2047
2047
|
model=self.llm,
|
2048
2048
|
messages=messages,
|
2049
|
-
temperature=0
|
2049
|
+
temperature=1.0,
|
2050
2050
|
stream=True
|
2051
2051
|
)
|
2052
2052
|
full_response_text = ""
|
@@ -2169,7 +2169,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2169
2169
|
prompt=actual_prompt,
|
2170
2170
|
system_prompt=self._build_system_prompt(tool_param),
|
2171
2171
|
chat_history=self.chat_history,
|
2172
|
-
temperature=kwargs.get('temperature', 0
|
2172
|
+
temperature=kwargs.get('temperature', 1.0),
|
2173
2173
|
tools=tool_param,
|
2174
2174
|
output_json=kwargs.get('output_json'),
|
2175
2175
|
output_pydantic=kwargs.get('output_pydantic'),
|
@@ -2220,7 +2220,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2220
2220
|
tool_param = tools
|
2221
2221
|
|
2222
2222
|
# Build messages using the helper method
|
2223
|
-
messages, original_prompt = self._build_messages(actual_prompt, kwargs.get('temperature', 0
|
2223
|
+
messages, original_prompt = self._build_messages(actual_prompt, kwargs.get('temperature', 1.0),
|
2224
2224
|
kwargs.get('output_json'), kwargs.get('output_pydantic'))
|
2225
2225
|
|
2226
2226
|
# Store chat history length for potential rollback
|
@@ -2249,7 +2249,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2249
2249
|
completion_args = {
|
2250
2250
|
"model": self.llm,
|
2251
2251
|
"messages": messages,
|
2252
|
-
"temperature": kwargs.get('temperature', 0
|
2252
|
+
"temperature": kwargs.get('temperature', 1.0),
|
2253
2253
|
"stream": True
|
2254
2254
|
}
|
2255
2255
|
if formatted_tools:
|
@@ -434,7 +434,7 @@ This report contains all agent interactions and outputs from a complete ContextA
|
|
434
434
|
|
435
435
|
Provide comprehensive analysis that follows the PRD template principles and enables
|
436
436
|
AI assistants to implement features that perfectly match existing codebase patterns.""",
|
437
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
437
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
438
438
|
verbose=getattr(self, 'verbose', True)
|
439
439
|
)
|
440
440
|
|
@@ -515,7 +515,7 @@ codebase style and architecture following PRD template principles."""
|
|
515
515
|
role="Expert Manual Codebase Analysis Specialist",
|
516
516
|
goal="Perform comprehensive manual codebase analysis following PRD methodology",
|
517
517
|
instructions="""Analyze the codebase samples following PRD template methodology for complete understanding.""",
|
518
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
518
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
519
519
|
verbose=getattr(self, 'verbose', True)
|
520
520
|
)
|
521
521
|
|
@@ -566,7 +566,7 @@ Analyze following PRD principles to extract patterns, conventions, and architect
|
|
566
566
|
6. Design pattern implementations
|
567
567
|
7. Code complexity metrics
|
568
568
|
8. API and interface patterns""",
|
569
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
569
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
570
570
|
verbose=getattr(self, 'verbose', True)
|
571
571
|
)
|
572
572
|
|
@@ -624,7 +624,7 @@ Extract comprehensive patterns that follow PRD template principles for implement
|
|
624
624
|
|
625
625
|
For each pattern, provide the pattern name, where it's used, and how to replicate it
|
626
626
|
following PRD template principles.""",
|
627
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
627
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
628
628
|
verbose=getattr(self, 'verbose', True)
|
629
629
|
)
|
630
630
|
|
@@ -674,7 +674,7 @@ patterns and best practices for first-try success."""
|
|
674
674
|
goal="Analyze testing patterns for comprehensive validation framework design",
|
675
675
|
instructions="""Analyze testing patterns to understand validation approaches and create
|
676
676
|
comprehensive test frameworks following PRD methodology.""",
|
677
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
677
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
678
678
|
verbose=getattr(self, 'verbose', True)
|
679
679
|
)
|
680
680
|
|
@@ -801,7 +801,7 @@ Extract testing patterns for validation framework creation following PRD princip
|
|
801
801
|
Confidence level for one-pass implementation
|
802
802
|
|
803
803
|
Generate PRPs following this EXACT structure for first-try implementation success.""",
|
804
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
804
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
805
805
|
verbose=getattr(self, 'verbose', True)
|
806
806
|
)
|
807
807
|
|
@@ -850,7 +850,7 @@ on the first try following PRD template principles."""
|
|
850
850
|
6. CODE QUALITY: Complexity analysis, maintainability
|
851
851
|
7. DOCUMENTATION VALIDATION: Documentation completeness
|
852
852
|
8. DEPENDENCY VALIDATION: Dependency analysis and security""",
|
853
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
853
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
854
854
|
verbose=getattr(self, 'verbose', True)
|
855
855
|
)
|
856
856
|
|
@@ -897,7 +897,7 @@ following PRD template principles."""
|
|
897
897
|
instructions="""Compile all available documentation following PRD methodology including:
|
898
898
|
README files, API documentation, setup guides, architecture docs, and any other
|
899
899
|
relevant documentation that provides context for implementation.""",
|
900
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
900
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
901
901
|
verbose=getattr(self, 'verbose', True)
|
902
902
|
)
|
903
903
|
|
@@ -943,7 +943,7 @@ following PRD template principles."""
|
|
943
943
|
instructions="""Analyze integration points following PRD methodology including:
|
944
944
|
APIs, databases, external services, configuration points, and any other
|
945
945
|
integration requirements that affect implementation.""",
|
946
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
946
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
947
947
|
verbose=getattr(self, 'verbose', True)
|
948
948
|
)
|
949
949
|
|
@@ -1004,7 +1004,7 @@ following PRD template principles."""
|
|
1004
1004
|
8. DOCUMENTATION UPDATES: Documentation to create/update
|
1005
1005
|
9. INTEGRATION STEPS: How to integrate with existing systems
|
1006
1006
|
10. VALIDATION CHECKPOINTS: Validation steps at each phase""",
|
1007
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
1007
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
1008
1008
|
verbose=getattr(self, 'verbose', True)
|
1009
1009
|
)
|
1010
1010
|
|
@@ -1302,7 +1302,7 @@ Every agent interaction has been saved for full audit trail and reproducibility.
|
|
1302
1302
|
GOAL: [extracted implementation goal]
|
1303
1303
|
|
1304
1304
|
Be precise and extract only what is explicitly mentioned or clearly implied.""",
|
1305
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
1305
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
1306
1306
|
verbose=getattr(self, 'verbose', True)
|
1307
1307
|
)
|
1308
1308
|
|
@@ -1536,7 +1536,7 @@ Note: Detailed function/class metadata not available due to content access limit
|
|
1536
1536
|
5. Documentation topics
|
1537
1537
|
|
1538
1538
|
Make the output easy for a file selection agent to understand which files contain what functionality.""",
|
1539
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
1539
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
1540
1540
|
verbose=getattr(self, 'verbose', True)
|
1541
1541
|
)
|
1542
1542
|
|
@@ -1770,7 +1770,7 @@ Focus on creating clear, structured metadata that will help with intelligent fil
|
|
1770
1770
|
["README.md", "src/auth/login.py", "config/settings.py", ...]
|
1771
1771
|
|
1772
1772
|
Maximum 50 files for efficient analysis.""",
|
1773
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
1773
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
1774
1774
|
verbose=getattr(self, 'verbose', True)
|
1775
1775
|
)
|
1776
1776
|
|
@@ -1923,7 +1923,7 @@ Maximum 50 files.""".format(goal=goal)
|
|
1923
1923
|
8. EXAMPLES: Similar features that can guide {goal} implementation
|
1924
1924
|
|
1925
1925
|
Since these files were pre-selected for relevance, provide deep analysis of how each contributes to implementing: {goal}""",
|
1926
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
1926
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
1927
1927
|
verbose=getattr(self, 'verbose', True)
|
1928
1928
|
)
|
1929
1929
|
|
@@ -2024,7 +2024,7 @@ Since these files were pre-selected for relevance, explain how each contributes
|
|
2024
2024
|
- Success criteria for {goal}
|
2025
2025
|
|
2026
2026
|
Focus everything on successfully implementing: {goal}""",
|
2027
|
-
llm=self.llm if hasattr(self, 'llm') else "gpt-
|
2027
|
+
llm=self.llm if hasattr(self, 'llm') else "gpt-5-nano",
|
2028
2028
|
verbose=getattr(self, 'verbose', True)
|
2029
2029
|
)
|
2030
2030
|
|
@@ -2303,13 +2303,13 @@ def create_context_agent(llm: Optional[Union[str, Any]] = None, **kwargs) -> Con
|
|
2303
2303
|
Factory function to create a ContextAgent following Context Engineering and PRD methodology.
|
2304
2304
|
|
2305
2305
|
Args:
|
2306
|
-
llm: Language model to use (e.g., "gpt-
|
2306
|
+
llm: Language model to use (e.g., "gpt-5-nano", "claude-3-haiku")
|
2307
2307
|
**kwargs: Additional arguments to pass to ContextAgent constructor
|
2308
2308
|
|
2309
2309
|
Returns:
|
2310
2310
|
ContextAgent: Configured ContextAgent for comprehensive context generation following PRD principles
|
2311
2311
|
"""
|
2312
2312
|
if llm is None:
|
2313
|
-
llm = "gpt-
|
2313
|
+
llm = "gpt-5-nano"
|
2314
2314
|
|
2315
2315
|
return ContextAgent(llm=llm, **kwargs)
|
@@ -44,7 +44,7 @@ class RouterAgent(Agent):
|
|
44
44
|
# Initialize model router
|
45
45
|
self.model_router = model_router or ModelRouter()
|
46
46
|
self.routing_strategy = routing_strategy
|
47
|
-
self.fallback_model = fallback_model or os.getenv('OPENAI_MODEL_NAME', 'gpt-
|
47
|
+
self.fallback_model = fallback_model or os.getenv('OPENAI_MODEL_NAME', 'gpt-5-nano')
|
48
48
|
|
49
49
|
# Process models configuration
|
50
50
|
self.available_models = self._process_models_config(models)
|
@@ -7,6 +7,7 @@ from typing import Any, Dict, List, Optional, Union, Literal, Callable
|
|
7
7
|
from pydantic import BaseModel
|
8
8
|
import time
|
9
9
|
import json
|
10
|
+
import xml.etree.ElementTree as ET
|
10
11
|
from ..main import (
|
11
12
|
display_error,
|
12
13
|
display_tool_call,
|
@@ -61,7 +62,7 @@ class LLM:
|
|
61
62
|
# OpenAI
|
62
63
|
"gpt-4": 6144, # 8,192 actual
|
63
64
|
"gpt-4o": 96000, # 128,000 actual
|
64
|
-
"gpt-
|
65
|
+
"gpt-5-nano": 96000, # 128,000 actual
|
65
66
|
"gpt-4-turbo": 96000, # 128,000 actual
|
66
67
|
"o1-preview": 96000, # 128,000 actual
|
67
68
|
"o1-mini": 96000, # 128,000 actual
|
@@ -281,6 +282,8 @@ class LLM:
|
|
281
282
|
self.min_reflect = extra_settings.get('min_reflect', 1)
|
282
283
|
self.reasoning_steps = extra_settings.get('reasoning_steps', False)
|
283
284
|
self.metrics = extra_settings.get('metrics', False)
|
285
|
+
# Auto-detect XML tool format for known models, or allow manual override
|
286
|
+
self.xml_tool_format = extra_settings.get('xml_tool_format', 'auto')
|
284
287
|
|
285
288
|
# Token tracking
|
286
289
|
self.last_token_metrics: Optional[TokenMetrics] = None
|
@@ -359,6 +362,25 @@ class LLM:
|
|
359
362
|
|
360
363
|
return False
|
361
364
|
|
365
|
+
def _is_qwen_provider(self) -> bool:
|
366
|
+
"""Detect if this is a Qwen provider"""
|
367
|
+
if not self.model:
|
368
|
+
return False
|
369
|
+
|
370
|
+
# Check for Qwen patterns in model name
|
371
|
+
model_lower = self.model.lower()
|
372
|
+
return any(pattern in model_lower for pattern in ["qwen", "qwen2", "qwen2.5"])
|
373
|
+
|
374
|
+
def _supports_xml_tool_format(self) -> bool:
|
375
|
+
"""Check if the model should use XML tool format"""
|
376
|
+
if self.xml_tool_format == 'auto':
|
377
|
+
# Auto-detect based on known models that use XML format
|
378
|
+
return self._is_qwen_provider()
|
379
|
+
elif self.xml_tool_format in [True, 'true', 'True']:
|
380
|
+
return True
|
381
|
+
else:
|
382
|
+
return False
|
383
|
+
|
362
384
|
def _generate_ollama_tool_summary(self, tool_results: List[Any], response_text: str) -> Optional[str]:
|
363
385
|
"""
|
364
386
|
Generate a summary from tool results for Ollama to prevent infinite loops.
|
@@ -658,6 +680,10 @@ class LLM:
|
|
658
680
|
if any(self.model.startswith(prefix) for prefix in ["gemini-", "gemini/"]):
|
659
681
|
return True
|
660
682
|
|
683
|
+
# Models with XML tool format support streaming with tools
|
684
|
+
if self._supports_xml_tool_format():
|
685
|
+
return True
|
686
|
+
|
661
687
|
# For other providers, default to False to be safe
|
662
688
|
# This ensures we make a single non-streaming call rather than risk
|
663
689
|
# missing tool calls or making duplicate calls
|
@@ -1427,6 +1453,64 @@ class LLM:
|
|
1427
1453
|
except (json.JSONDecodeError, KeyError) as e:
|
1428
1454
|
logging.debug(f"Could not parse Ollama tool call from response: {e}")
|
1429
1455
|
|
1456
|
+
# Parse tool calls from XML format in response text
|
1457
|
+
# Try for known XML models first, or fallback for any model that might output XML
|
1458
|
+
if not tool_calls and response_text and formatted_tools:
|
1459
|
+
# Check if this model is known to use XML format, or try as fallback
|
1460
|
+
should_try_xml = (self._supports_xml_tool_format() or
|
1461
|
+
# Fallback: try XML if response contains XML-like tool call tags
|
1462
|
+
'<tool_call>' in response_text)
|
1463
|
+
|
1464
|
+
if should_try_xml:
|
1465
|
+
tool_calls = []
|
1466
|
+
|
1467
|
+
# Try proper XML parsing first
|
1468
|
+
try:
|
1469
|
+
# Wrap in root element if multiple tool_call tags exist
|
1470
|
+
xml_content = f"<root>{response_text}</root>"
|
1471
|
+
root = ET.fromstring(xml_content)
|
1472
|
+
tool_call_elements = root.findall('.//tool_call')
|
1473
|
+
|
1474
|
+
for idx, element in enumerate(tool_call_elements):
|
1475
|
+
if element.text:
|
1476
|
+
try:
|
1477
|
+
tool_json = json.loads(element.text.strip())
|
1478
|
+
if isinstance(tool_json, dict) and "name" in tool_json:
|
1479
|
+
tool_calls.append({
|
1480
|
+
"id": f"tool_{iteration_count}_{idx}",
|
1481
|
+
"type": "function",
|
1482
|
+
"function": {
|
1483
|
+
"name": tool_json["name"],
|
1484
|
+
"arguments": json.dumps(tool_json.get("arguments", {}))
|
1485
|
+
}
|
1486
|
+
})
|
1487
|
+
except (json.JSONDecodeError, KeyError) as e:
|
1488
|
+
logging.debug(f"Could not parse tool call JSON: {e}")
|
1489
|
+
continue
|
1490
|
+
except ET.ParseError:
|
1491
|
+
# Fallback to regex if XML parsing fails
|
1492
|
+
tool_call_pattern = r'<tool_call>\s*(\{(?:[^{}]|{[^{}]*})*\})\s*</tool_call>'
|
1493
|
+
matches = re.findall(tool_call_pattern, response_text, re.DOTALL)
|
1494
|
+
|
1495
|
+
for idx, match in enumerate(matches):
|
1496
|
+
try:
|
1497
|
+
tool_json = json.loads(match.strip())
|
1498
|
+
if isinstance(tool_json, dict) and "name" in tool_json:
|
1499
|
+
tool_calls.append({
|
1500
|
+
"id": f"tool_{iteration_count}_{idx}",
|
1501
|
+
"type": "function",
|
1502
|
+
"function": {
|
1503
|
+
"name": tool_json["name"],
|
1504
|
+
"arguments": json.dumps(tool_json.get("arguments", {}))
|
1505
|
+
}
|
1506
|
+
})
|
1507
|
+
except (json.JSONDecodeError, KeyError) as e:
|
1508
|
+
logging.debug(f"Could not parse XML tool call: {e}")
|
1509
|
+
continue
|
1510
|
+
|
1511
|
+
if tool_calls:
|
1512
|
+
logging.debug(f"Parsed {len(tool_calls)} tool call(s) from XML format")
|
1513
|
+
|
1430
1514
|
# For Ollama, if response is empty but we have tools, prompt for tool usage
|
1431
1515
|
if self._is_ollama_provider() and (not response_text or response_text.strip() == "") and formatted_tools and iteration_count == 0:
|
1432
1516
|
messages.append({
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/llm/model_capabilities.py
RENAMED
@@ -7,7 +7,7 @@ This module defines which models support specific features like structured outpu
|
|
7
7
|
MODELS_SUPPORTING_STRUCTURED_OUTPUTS = {
|
8
8
|
# OpenAI models
|
9
9
|
"gpt-4o",
|
10
|
-
"gpt-
|
10
|
+
"gpt-5-nano",
|
11
11
|
"gpt-4-turbo",
|
12
12
|
"gpt-4-turbo-preview",
|
13
13
|
"gpt-4-turbo-2024-04-09",
|
@@ -46,7 +46,7 @@ MODELS_SUPPORTING_STRUCTURED_OUTPUTS = {
|
|
46
46
|
MODELS_NOT_SUPPORTING_STRUCTURED_OUTPUTS = {
|
47
47
|
# Audio preview models
|
48
48
|
"gpt-4o-audio-preview",
|
49
|
-
"gpt-
|
49
|
+
"gpt-5-nano-audio-preview",
|
50
50
|
|
51
51
|
# Legacy o1 models (don't support system messages either)
|
52
52
|
"o1-preview-2024-09-12",
|
@@ -51,7 +51,7 @@ class ModelRouter:
|
|
51
51
|
DEFAULT_MODELS = [
|
52
52
|
# Lightweight/cheap models for simple tasks
|
53
53
|
ModelProfile(
|
54
|
-
name="gpt-
|
54
|
+
name="gpt-5-nano",
|
55
55
|
provider="openai",
|
56
56
|
complexity_range=(TaskComplexity.SIMPLE, TaskComplexity.MODERATE),
|
57
57
|
cost_per_1k_tokens=0.00075, # Average of $0.00015 input, $0.0006 output
|
@@ -230,19 +230,34 @@ class OpenAIClient:
|
|
230
230
|
f"(e.g., 'http://localhost:1234/v1') and you can use a placeholder API key by setting OPENAI_API_KEY='{LOCAL_SERVER_API_KEY_PLACEHOLDER}'"
|
231
231
|
)
|
232
232
|
|
233
|
-
# Initialize
|
234
|
-
self._sync_client =
|
233
|
+
# Initialize clients lazily
|
234
|
+
self._sync_client = None
|
235
235
|
self._async_client = None
|
236
236
|
|
237
237
|
# Set up logging
|
238
238
|
self.logger = logging.getLogger(__name__)
|
239
239
|
|
240
|
-
# Initialize console
|
241
|
-
self.
|
240
|
+
# Initialize console lazily
|
241
|
+
self._console = None
|
242
|
+
|
243
|
+
# Cache for formatted tools and fixed schemas
|
244
|
+
self._formatted_tools_cache = {}
|
245
|
+
self._fixed_schema_cache = {}
|
246
|
+
self._max_cache_size = 100
|
247
|
+
|
248
|
+
@property
|
249
|
+
def console(self):
|
250
|
+
"""Lazily initialize Rich Console only when needed."""
|
251
|
+
if self._console is None:
|
252
|
+
from rich.console import Console
|
253
|
+
self._console = Console()
|
254
|
+
return self._console
|
242
255
|
|
243
256
|
@property
|
244
257
|
def sync_client(self) -> OpenAI:
|
245
|
-
"""Get the synchronous OpenAI client."""
|
258
|
+
"""Get the synchronous OpenAI client (lazy initialization)."""
|
259
|
+
if self._sync_client is None:
|
260
|
+
self._sync_client = OpenAI(api_key=self.api_key, base_url=self.base_url)
|
246
261
|
return self._sync_client
|
247
262
|
|
248
263
|
@property
|
@@ -350,6 +365,35 @@ class OpenAIClient:
|
|
350
365
|
|
351
366
|
return fixed_schema
|
352
367
|
|
368
|
+
def _get_tools_cache_key(self, tools: List[Any]) -> str:
|
369
|
+
"""Generate a cache key for tools."""
|
370
|
+
parts = []
|
371
|
+
for tool in tools:
|
372
|
+
if isinstance(tool, dict):
|
373
|
+
# For dict tools, use sorted JSON representation
|
374
|
+
parts.append(json.dumps(tool, sort_keys=True))
|
375
|
+
elif callable(tool):
|
376
|
+
# For functions, use module.name
|
377
|
+
parts.append(f"{tool.__module__}.{tool.__name__}")
|
378
|
+
elif isinstance(tool, str):
|
379
|
+
# For string tools, use as-is
|
380
|
+
parts.append(tool)
|
381
|
+
elif isinstance(tool, list):
|
382
|
+
# For lists, recursively process
|
383
|
+
subparts = []
|
384
|
+
for subtool in tool:
|
385
|
+
if isinstance(subtool, dict):
|
386
|
+
subparts.append(json.dumps(subtool, sort_keys=True))
|
387
|
+
elif callable(subtool):
|
388
|
+
subparts.append(f"{subtool.__module__}.{subtool.__name__}")
|
389
|
+
else:
|
390
|
+
subparts.append(str(subtool))
|
391
|
+
parts.append(f"[{','.join(subparts)}]")
|
392
|
+
else:
|
393
|
+
# For other types, use string representation
|
394
|
+
parts.append(str(tool))
|
395
|
+
return "|".join(parts)
|
396
|
+
|
353
397
|
def format_tools(self, tools: Optional[List[Any]]) -> Optional[List[Dict]]:
|
354
398
|
"""
|
355
399
|
Format tools for OpenAI API.
|
@@ -370,6 +414,11 @@ class OpenAIClient:
|
|
370
414
|
"""
|
371
415
|
if not tools:
|
372
416
|
return None
|
417
|
+
|
418
|
+
# Check cache first
|
419
|
+
cache_key = self._get_tools_cache_key(tools)
|
420
|
+
if cache_key in self._formatted_tools_cache:
|
421
|
+
return self._formatted_tools_cache[cache_key]
|
373
422
|
|
374
423
|
formatted_tools = []
|
375
424
|
for tool in tools:
|
@@ -424,8 +473,13 @@ class OpenAIClient:
|
|
424
473
|
except (TypeError, ValueError) as e:
|
425
474
|
logging.error(f"Tools are not JSON serializable: {e}")
|
426
475
|
return None
|
476
|
+
|
477
|
+
# Cache the result
|
478
|
+
result = formatted_tools if formatted_tools else None
|
479
|
+
if result is not None and len(self._formatted_tools_cache) < self._max_cache_size:
|
480
|
+
self._formatted_tools_cache[cache_key] = result
|
427
481
|
|
428
|
-
return
|
482
|
+
return result
|
429
483
|
|
430
484
|
def _generate_tool_definition(self, func: Callable) -> Optional[Dict]:
|
431
485
|
"""Generate a tool definition from a callable function."""
|
@@ -513,7 +567,7 @@ class OpenAIClient:
|
|
513
567
|
self,
|
514
568
|
messages: List[Dict],
|
515
569
|
model: str,
|
516
|
-
temperature: float = 0
|
570
|
+
temperature: float = 1.0,
|
517
571
|
tools: Optional[List[Dict]] = None,
|
518
572
|
start_time: Optional[float] = None,
|
519
573
|
console: Optional[Console] = None,
|
@@ -546,7 +600,7 @@ class OpenAIClient:
|
|
546
600
|
console = self.console
|
547
601
|
|
548
602
|
# Create the response stream
|
549
|
-
response_stream = self.
|
603
|
+
response_stream = self.sync_client.chat.completions.create(
|
550
604
|
model=model,
|
551
605
|
messages=messages,
|
552
606
|
temperature=temperature,
|
@@ -600,7 +654,7 @@ class OpenAIClient:
|
|
600
654
|
self,
|
601
655
|
messages: List[Dict],
|
602
656
|
model: str,
|
603
|
-
temperature: float = 0
|
657
|
+
temperature: float = 1.0,
|
604
658
|
tools: Optional[List[Dict]] = None,
|
605
659
|
start_time: Optional[float] = None,
|
606
660
|
console: Optional[Console] = None,
|
@@ -687,7 +741,7 @@ class OpenAIClient:
|
|
687
741
|
self,
|
688
742
|
messages: List[Dict[str, Any]],
|
689
743
|
model: str = "gpt-4o",
|
690
|
-
temperature: float = 0
|
744
|
+
temperature: float = 1.0,
|
691
745
|
stream: bool = False,
|
692
746
|
tools: Optional[List[Dict[str, Any]]] = None,
|
693
747
|
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
@@ -723,7 +777,7 @@ class OpenAIClient:
|
|
723
777
|
params["tool_choice"] = tool_choice
|
724
778
|
|
725
779
|
try:
|
726
|
-
return self.
|
780
|
+
return self.sync_client.chat.completions.create(**params)
|
727
781
|
except Exception as e:
|
728
782
|
self.logger.error(f"Error creating completion: {e}")
|
729
783
|
raise
|
@@ -732,7 +786,7 @@ class OpenAIClient:
|
|
732
786
|
self,
|
733
787
|
messages: List[Dict[str, Any]],
|
734
788
|
model: str = "gpt-4o",
|
735
|
-
temperature: float = 0
|
789
|
+
temperature: float = 1.0,
|
736
790
|
stream: bool = False,
|
737
791
|
tools: Optional[List[Dict[str, Any]]] = None,
|
738
792
|
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
@@ -777,7 +831,7 @@ class OpenAIClient:
|
|
777
831
|
self,
|
778
832
|
messages: List[Dict[str, Any]],
|
779
833
|
model: str = "gpt-4o",
|
780
|
-
temperature: float = 0
|
834
|
+
temperature: float = 1.0,
|
781
835
|
tools: Optional[List[Any]] = None,
|
782
836
|
execute_tool_fn: Optional[Callable] = None,
|
783
837
|
stream: bool = True,
|
@@ -955,7 +1009,7 @@ class OpenAIClient:
|
|
955
1009
|
self,
|
956
1010
|
messages: List[Dict[str, Any]],
|
957
1011
|
model: str = "gpt-4o",
|
958
|
-
temperature: float = 0
|
1012
|
+
temperature: float = 1.0,
|
959
1013
|
tools: Optional[List[Any]] = None,
|
960
1014
|
execute_tool_fn: Optional[Callable] = None,
|
961
1015
|
stream: bool = True,
|
@@ -1136,7 +1190,7 @@ class OpenAIClient:
|
|
1136
1190
|
self,
|
1137
1191
|
messages: List[Dict[str, Any]],
|
1138
1192
|
model: str = "gpt-4o",
|
1139
|
-
temperature: float = 0
|
1193
|
+
temperature: float = 1.0,
|
1140
1194
|
tools: Optional[List[Any]] = None,
|
1141
1195
|
execute_tool_fn: Optional[Callable] = None,
|
1142
1196
|
reasoning_steps: bool = False,
|
@@ -1173,7 +1227,7 @@ class OpenAIClient:
|
|
1173
1227
|
while iteration_count < max_iterations:
|
1174
1228
|
try:
|
1175
1229
|
# Create streaming response
|
1176
|
-
response_stream = self.
|
1230
|
+
response_stream = self.sync_client.chat.completions.create(
|
1177
1231
|
model=model,
|
1178
1232
|
messages=messages,
|
1179
1233
|
temperature=temperature,
|
@@ -1281,7 +1335,7 @@ class OpenAIClient:
|
|
1281
1335
|
messages: List[Dict[str, Any]],
|
1282
1336
|
response_format: BaseModel,
|
1283
1337
|
model: str = "gpt-4o",
|
1284
|
-
temperature: float = 0
|
1338
|
+
temperature: float = 1.0,
|
1285
1339
|
**kwargs
|
1286
1340
|
) -> Any:
|
1287
1341
|
"""
|
@@ -1298,7 +1352,7 @@ class OpenAIClient:
|
|
1298
1352
|
Parsed response according to the response_format
|
1299
1353
|
"""
|
1300
1354
|
try:
|
1301
|
-
response = self.
|
1355
|
+
response = self.sync_client.beta.chat.completions.parse(
|
1302
1356
|
model=model,
|
1303
1357
|
messages=messages,
|
1304
1358
|
temperature=temperature,
|
@@ -1315,7 +1369,7 @@ class OpenAIClient:
|
|
1315
1369
|
messages: List[Dict[str, Any]],
|
1316
1370
|
response_format: BaseModel,
|
1317
1371
|
model: str = "gpt-4o",
|
1318
|
-
temperature: float = 0
|
1372
|
+
temperature: float = 1.0,
|
1319
1373
|
**kwargs
|
1320
1374
|
) -> Any:
|
1321
1375
|
"""
|
@@ -1346,14 +1400,14 @@ class OpenAIClient:
|
|
1346
1400
|
|
1347
1401
|
def close(self):
|
1348
1402
|
"""Close the OpenAI clients."""
|
1349
|
-
if hasattr(self._sync_client, 'close'):
|
1403
|
+
if self._sync_client and hasattr(self._sync_client, 'close'):
|
1350
1404
|
self._sync_client.close()
|
1351
1405
|
if self._async_client and hasattr(self._async_client, 'close'):
|
1352
1406
|
self._async_client.close()
|
1353
1407
|
|
1354
1408
|
async def aclose(self):
|
1355
1409
|
"""Asynchronously close the OpenAI clients."""
|
1356
|
-
if hasattr(self._sync_client, 'close'):
|
1410
|
+
if self._sync_client and hasattr(self._sync_client, 'close'):
|
1357
1411
|
await asyncio.to_thread(self._sync_client.close)
|
1358
1412
|
if self._async_client and hasattr(self._async_client, 'aclose'):
|
1359
1413
|
await self._async_client.aclose()
|
@@ -1361,6 +1415,7 @@ class OpenAIClient:
|
|
1361
1415
|
|
1362
1416
|
# Global client instance (similar to main.py pattern)
|
1363
1417
|
_global_client = None
|
1418
|
+
_global_client_params = None
|
1364
1419
|
|
1365
1420
|
def get_openai_client(api_key: Optional[str] = None, base_url: Optional[str] = None) -> OpenAIClient:
|
1366
1421
|
"""
|
@@ -1373,9 +1428,16 @@ def get_openai_client(api_key: Optional[str] = None, base_url: Optional[str] = N
|
|
1373
1428
|
Returns:
|
1374
1429
|
OpenAIClient instance
|
1375
1430
|
"""
|
1376
|
-
global _global_client
|
1431
|
+
global _global_client, _global_client_params
|
1432
|
+
|
1433
|
+
# Normalize parameters for comparison
|
1434
|
+
normalized_api_key = api_key or os.getenv("OPENAI_API_KEY")
|
1435
|
+
normalized_base_url = base_url
|
1436
|
+
current_params = (normalized_api_key, normalized_base_url)
|
1377
1437
|
|
1378
|
-
if
|
1438
|
+
# Only create new client if parameters changed or first time
|
1439
|
+
if _global_client is None or _global_client_params != current_params:
|
1379
1440
|
_global_client = OpenAIClient(api_key=api_key, base_url=base_url)
|
1441
|
+
_global_client_params = current_params
|
1380
1442
|
|
1381
1443
|
return _global_client
|
@@ -155,7 +155,7 @@ class MCP:
|
|
155
155
|
# Method 1: Using command and args separately
|
156
156
|
agent = Agent(
|
157
157
|
instructions="You are a helpful assistant...",
|
158
|
-
llm="gpt-
|
158
|
+
llm="gpt-5-nano",
|
159
159
|
tools=MCP(
|
160
160
|
command="/path/to/python",
|
161
161
|
args=["/path/to/app.py"]
|
@@ -165,14 +165,14 @@ class MCP:
|
|
165
165
|
# Method 2: Using a single command string
|
166
166
|
agent = Agent(
|
167
167
|
instructions="You are a helpful assistant...",
|
168
|
-
llm="gpt-
|
168
|
+
llm="gpt-5-nano",
|
169
169
|
tools=MCP("/path/to/python /path/to/app.py")
|
170
170
|
)
|
171
171
|
|
172
172
|
# Method 3: Using an SSE endpoint
|
173
173
|
agent = Agent(
|
174
174
|
instructions="You are a helpful assistant...",
|
175
|
-
llm="gpt-
|
175
|
+
llm="gpt-5-nano",
|
176
176
|
tools=MCP("http://localhost:8080/sse")
|
177
177
|
)
|
178
178
|
|
@@ -514,7 +514,7 @@ class MCP:
|
|
514
514
|
"""Convert the MCP tool to an OpenAI-compatible tool definition.
|
515
515
|
|
516
516
|
This method is specifically invoked by the Agent class when using
|
517
|
-
provider/model format (e.g., "openai/gpt-
|
517
|
+
provider/model format (e.g., "openai/gpt-5-nano").
|
518
518
|
|
519
519
|
Returns:
|
520
520
|
dict or list: OpenAI-compatible tool definition(s)
|
@@ -1442,7 +1442,7 @@ class Memory:
|
|
1442
1442
|
import litellm
|
1443
1443
|
|
1444
1444
|
# Convert model name if it's in litellm format
|
1445
|
-
model_name = llm or "gpt-
|
1445
|
+
model_name = llm or "gpt-5-nano"
|
1446
1446
|
|
1447
1447
|
response = litellm.completion(
|
1448
1448
|
model=model_name,
|
@@ -1459,7 +1459,7 @@ class Memory:
|
|
1459
1459
|
client = OpenAI()
|
1460
1460
|
|
1461
1461
|
response = client.chat.completions.create(
|
1462
|
-
model=llm or "gpt-
|
1462
|
+
model=llm or "gpt-5-nano",
|
1463
1463
|
messages=[{
|
1464
1464
|
"role": "user",
|
1465
1465
|
"content": custom_prompt or default_prompt
|
@@ -322,7 +322,7 @@ class Task:
|
|
322
322
|
if hasattr(self.agent.llm_instance, 'model'):
|
323
323
|
llm_model = self.agent.llm_instance.model
|
324
324
|
else:
|
325
|
-
llm_model = "gpt-
|
325
|
+
llm_model = "gpt-5-nano" # Default fallback
|
326
326
|
elif hasattr(self.agent, 'llm') and self.agent.llm:
|
327
327
|
# For standard model strings
|
328
328
|
llm_model = self.agent.llm
|
@@ -26,7 +26,7 @@ planner_agent = Agent(
|
|
26
26
|
goal="Create detailed story outlines with chapter breakdowns",
|
27
27
|
backstory="Expert storyteller skilled in narrative structure and plot development",
|
28
28
|
verbose=True,
|
29
|
-
llm="gpt-
|
29
|
+
llm="gpt-5-nano",
|
30
30
|
self_reflect=False
|
31
31
|
)
|
32
32
|
|
@@ -37,7 +37,7 @@ writer_agent = Agent(
|
|
37
37
|
goal="Write engaging and cohesive story chapters",
|
38
38
|
backstory="Experienced writer skilled in bringing stories to life with vivid details and engaging narrative",
|
39
39
|
verbose=True,
|
40
|
-
llm="gpt-
|
40
|
+
llm="gpt-5-nano",
|
41
41
|
self_reflect=False
|
42
42
|
)
|
43
43
|
|
@@ -56,12 +56,12 @@ def test_basic_instantiation():
|
|
56
56
|
name="Test Context Engineer",
|
57
57
|
role="Test Role",
|
58
58
|
goal="Test Goal",
|
59
|
-
llm="gpt-
|
59
|
+
llm="gpt-5-nano"
|
60
60
|
)
|
61
61
|
print("✅ Successfully created ContextAgent with custom parameters")
|
62
62
|
|
63
63
|
# Test factory function
|
64
|
-
factory_agent = create_context_agent(llm="gpt-
|
64
|
+
factory_agent = create_context_agent(llm="gpt-5-nano")
|
65
65
|
print("✅ Successfully created ContextAgent using factory function")
|
66
66
|
|
67
67
|
return True, [context_agent, custom_agent, factory_agent]
|
@@ -57,7 +57,7 @@ print("=" * 60)
|
|
57
57
|
|
58
58
|
agent_gpt4 = Agent(
|
59
59
|
instructions="You are a helpful assistant. You can use the tools provided to you to help the user. When asked to multiply a stock price, first get the stock price, then multiply it.",
|
60
|
-
llm="gpt-
|
60
|
+
llm="gpt-5-nano",
|
61
61
|
tools=[get_stock_price, multiply],
|
62
62
|
verbose=True
|
63
63
|
)
|
@@ -135,12 +135,12 @@ async def main():
|
|
135
135
|
|
136
136
|
# Test sync methods
|
137
137
|
print("\n1. Testing SYNC methods:")
|
138
|
-
openai_sync_success = test_model_sync("openai/gpt-
|
138
|
+
openai_sync_success = test_model_sync("openai/gpt-5-nano")
|
139
139
|
ollama_sync_success = test_model_sync("ollama/llama3.2")
|
140
140
|
|
141
141
|
# Test async methods
|
142
142
|
print("\n2. Testing ASYNC methods:")
|
143
|
-
openai_async_success = await test_model_async("openai/gpt-
|
143
|
+
openai_async_success = await test_model_async("openai/gpt-5-nano")
|
144
144
|
ollama_async_success = await test_model_async("ollama/llama3.2")
|
145
145
|
|
146
146
|
# Summary
|
@@ -82,7 +82,7 @@ if __name__ == "__main__":
|
|
82
82
|
|
83
83
|
# Test with OpenAI first (as baseline)
|
84
84
|
print("\n1. Testing with OpenAI (baseline):")
|
85
|
-
openai_success = test_model("openai/gpt-
|
85
|
+
openai_success = test_model("openai/gpt-5-nano")
|
86
86
|
|
87
87
|
# Test with Ollama
|
88
88
|
print("\n2. Testing with Ollama:")
|
@@ -103,7 +103,7 @@ def test_provider_detection():
|
|
103
103
|
print("✅ Ollama prefix detection works")
|
104
104
|
|
105
105
|
# Test non-Ollama provider
|
106
|
-
openai_llm = LLM(model="gpt-
|
106
|
+
openai_llm = LLM(model="gpt-5-nano")
|
107
107
|
assert not openai_llm._is_ollama_provider(), "Should not detect OpenAI as Ollama"
|
108
108
|
print("✅ Non-Ollama provider detection works")
|
109
109
|
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_self_reflection_fix_simple.py
RENAMED
@@ -14,7 +14,7 @@ def test_self_reflection_fix():
|
|
14
14
|
goal="Solve math problems accurately",
|
15
15
|
backstory="You are a helpful math assistant",
|
16
16
|
self_reflect=True,
|
17
|
-
llm="gpt-
|
17
|
+
llm="gpt-5-nano", # Use a more widely available model
|
18
18
|
verbose=True,
|
19
19
|
tools=[calculator],
|
20
20
|
min_reflect=1,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/guardrails/guardrail_result.py
RENAMED
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/guardrails/llm_guardrail.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/integration.py
RENAMED
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/performance_cli.py
RENAMED
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/performance_monitor.py
RENAMED
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/performance_utils.py
RENAMED
File without changes
|
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/token_collector.py
RENAMED
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/telemetry/token_telemetry.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/calculator_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/duckduckgo_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/newspaper_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents/tools/wikipedia_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/praisonaiagents.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_self_reflection_comprehensive.py
RENAMED
File without changes
|
{praisonaiagents-0.0.156 → praisonaiagents-0.0.158}/tests/test_self_reflection_fix_verification.py
RENAMED
File without changes
|
File without changes
|