praisonaiagents 0.0.123__tar.gz → 0.0.124__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/PKG-INFO +1 -1
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/agent/agent.py +329 -192
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/agents/autoagents.py +1 -1
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/llm/llm.py +17 -1
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents.egg-info/PKG-INFO +1 -1
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/pyproject.toml +2 -2
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/README.md +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/agent/handoff.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/agent/image_agent.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/approval.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/guardrails/__init__.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/guardrails/guardrail_result.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/knowledge/__init__.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/knowledge/chunking.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/knowledge/knowledge.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/llm/__init__.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/llm/model_capabilities.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/llm/openai_client.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/mcp/__init__.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/mcp/mcp.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/mcp/mcp_sse.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/memory/__init__.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/memory/memory.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/process/process.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/session.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/telemetry/__init__.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/telemetry/integration.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/telemetry/telemetry.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/README.md +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/arxiv_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/calculator_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/csv_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/duckdb_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/excel_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/file_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/json_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/newspaper_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/pandas_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/python_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/searxng_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/shell_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/spider_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/test.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/train/data/generatecot.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/wikipedia_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/xml_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/yaml_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/yfinance_tools.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents.egg-info/SOURCES.txt +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents.egg-info/requires.txt +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/setup.cfg +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/tests/test-graph-memory.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/tests/test.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/tests/test_handoff_compatibility.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/tests/test_ollama_async_fix.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/tests/test_ollama_fix.py +0 -0
- {praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/tests/test_posthog_fixed.py +0 -0
@@ -444,6 +444,8 @@ class Agent:
|
|
444
444
|
self.embedder_config = embedder_config
|
445
445
|
self.knowledge = knowledge
|
446
446
|
self.use_system_prompt = use_system_prompt
|
447
|
+
# NOTE: chat_history is not thread-safe. If concurrent access is needed,
|
448
|
+
# consider using threading.Lock or other synchronization mechanisms
|
447
449
|
self.chat_history = []
|
448
450
|
self.markdown = markdown
|
449
451
|
self.stream = stream
|
@@ -711,8 +713,55 @@ Your Goal: {self.goal}
|
|
711
713
|
)
|
712
714
|
|
713
715
|
return current_response
|
716
|
+
|
717
|
+
def _build_system_prompt(self, tools=None):
|
718
|
+
"""Build the system prompt with tool information.
|
719
|
+
|
720
|
+
Args:
|
721
|
+
tools: Optional list of tools to use (defaults to self.tools)
|
722
|
+
|
723
|
+
Returns:
|
724
|
+
str: The system prompt or None if use_system_prompt is False
|
725
|
+
"""
|
726
|
+
if not self.use_system_prompt:
|
727
|
+
return None
|
728
|
+
|
729
|
+
system_prompt = f"""{self.backstory}\n
|
730
|
+
Your Role: {self.role}\n
|
731
|
+
Your Goal: {self.goal}"""
|
732
|
+
|
733
|
+
# Add tool usage instructions if tools are available
|
734
|
+
# Use provided tools or fall back to self.tools
|
735
|
+
tools_to_use = tools if tools is not None else self.tools
|
736
|
+
if tools_to_use:
|
737
|
+
tool_names = []
|
738
|
+
for tool in tools_to_use:
|
739
|
+
try:
|
740
|
+
if callable(tool) and hasattr(tool, '__name__'):
|
741
|
+
tool_names.append(tool.__name__)
|
742
|
+
elif isinstance(tool, dict) and isinstance(tool.get('function'), dict) and 'name' in tool['function']:
|
743
|
+
tool_names.append(tool['function']['name'])
|
744
|
+
elif isinstance(tool, str):
|
745
|
+
tool_names.append(tool)
|
746
|
+
elif hasattr(tool, "to_openai_tool"):
|
747
|
+
# Handle MCP tools
|
748
|
+
openai_tools = tool.to_openai_tool()
|
749
|
+
if isinstance(openai_tools, list):
|
750
|
+
for t in openai_tools:
|
751
|
+
if isinstance(t, dict) and 'function' in t and 'name' in t['function']:
|
752
|
+
tool_names.append(t['function']['name'])
|
753
|
+
elif isinstance(openai_tools, dict) and 'function' in openai_tools:
|
754
|
+
tool_names.append(openai_tools['function']['name'])
|
755
|
+
except (AttributeError, KeyError, TypeError) as e:
|
756
|
+
logging.warning(f"Could not extract tool name from {tool}: {e}")
|
757
|
+
continue
|
758
|
+
|
759
|
+
if tool_names:
|
760
|
+
system_prompt += f"\n\nYou have access to the following tools: {', '.join(tool_names)}. Use these tools when appropriate to help complete your tasks. Always use tools when they can help provide accurate information or perform actions."
|
761
|
+
|
762
|
+
return system_prompt
|
714
763
|
|
715
|
-
def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None):
|
764
|
+
def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None, tools=None):
|
716
765
|
"""Build messages list for chat completion.
|
717
766
|
|
718
767
|
Args:
|
@@ -720,17 +769,13 @@ Your Goal: {self.goal}
|
|
720
769
|
temperature: Temperature for the chat
|
721
770
|
output_json: Optional Pydantic model for JSON output
|
722
771
|
output_pydantic: Optional Pydantic model for JSON output (alias)
|
772
|
+
tools: Optional list of tools to use (defaults to self.tools)
|
723
773
|
|
724
774
|
Returns:
|
725
775
|
tuple: (messages list, original prompt)
|
726
776
|
"""
|
727
|
-
# Build system prompt
|
728
|
-
system_prompt =
|
729
|
-
if self.use_system_prompt:
|
730
|
-
system_prompt = f"""{self.backstory}\n
|
731
|
-
Your Role: {self.role}\n
|
732
|
-
Your Goal: {self.goal}
|
733
|
-
"""
|
777
|
+
# Build system prompt using the helper method
|
778
|
+
system_prompt = self._build_system_prompt(tools)
|
734
779
|
|
735
780
|
# Use openai_client's build_messages method if available
|
736
781
|
if self._openai_client is not None:
|
@@ -1154,10 +1199,27 @@ Your Goal: {self.goal}
|
|
1154
1199
|
tool_param = [openai_tool]
|
1155
1200
|
logging.debug(f"Converted MCP tool: {tool_param}")
|
1156
1201
|
|
1157
|
-
#
|
1158
|
-
|
1202
|
+
# Store chat history length for potential rollback
|
1203
|
+
chat_history_length = len(self.chat_history)
|
1204
|
+
|
1205
|
+
# Normalize prompt content for consistent chat history storage
|
1206
|
+
normalized_content = prompt
|
1207
|
+
if isinstance(prompt, list):
|
1208
|
+
# Extract text from multimodal prompts
|
1209
|
+
normalized_content = next((item["text"] for item in prompt if item.get("type") == "text"), "")
|
1210
|
+
|
1211
|
+
# Prevent duplicate messages
|
1212
|
+
if not (self.chat_history and
|
1213
|
+
self.chat_history[-1].get("role") == "user" and
|
1214
|
+
self.chat_history[-1].get("content") == normalized_content):
|
1215
|
+
# Add user message to chat history BEFORE LLM call so handoffs can access it
|
1216
|
+
self.chat_history.append({"role": "user", "content": normalized_content})
|
1217
|
+
|
1218
|
+
try:
|
1219
|
+
# Pass everything to LLM class
|
1220
|
+
response_text = self.llm_instance.get_response(
|
1159
1221
|
prompt=prompt,
|
1160
|
-
system_prompt=
|
1222
|
+
system_prompt=self._build_system_prompt(tools),
|
1161
1223
|
chat_history=self.chat_history,
|
1162
1224
|
temperature=temperature,
|
1163
1225
|
tools=tool_param,
|
@@ -1174,22 +1236,28 @@ Your Goal: {self.goal}
|
|
1174
1236
|
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
|
1175
1237
|
execute_tool_fn=self.execute_tool, # Pass tool execution function
|
1176
1238
|
reasoning_steps=reasoning_steps
|
1177
|
-
|
1239
|
+
)
|
1178
1240
|
|
1179
|
-
|
1180
|
-
self.chat_history.append({"role": "assistant", "content": response_text})
|
1241
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
1181
1242
|
|
1182
|
-
|
1183
|
-
|
1184
|
-
|
1185
|
-
|
1243
|
+
# Log completion time if in debug mode
|
1244
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1245
|
+
total_time = time.time() - start_time
|
1246
|
+
logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
|
1186
1247
|
|
1187
|
-
|
1188
|
-
|
1189
|
-
|
1190
|
-
|
1248
|
+
# Apply guardrail validation for custom LLM response
|
1249
|
+
try:
|
1250
|
+
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
1251
|
+
return validated_response
|
1252
|
+
except Exception as e:
|
1253
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
|
1254
|
+
# Rollback chat history on guardrail failure
|
1255
|
+
self.chat_history = self.chat_history[:chat_history_length]
|
1256
|
+
return None
|
1191
1257
|
except Exception as e:
|
1192
|
-
|
1258
|
+
# Rollback chat history if LLM call fails
|
1259
|
+
self.chat_history = self.chat_history[:chat_history_length]
|
1260
|
+
display_error(f"Error in LLM chat: {e}")
|
1193
1261
|
return None
|
1194
1262
|
except Exception as e:
|
1195
1263
|
display_error(f"Error in LLM chat: {e}")
|
@@ -1197,191 +1265,209 @@ Your Goal: {self.goal}
|
|
1197
1265
|
else:
|
1198
1266
|
# Use the new _build_messages helper method
|
1199
1267
|
messages, original_prompt = self._build_messages(prompt, temperature, output_json, output_pydantic)
|
1268
|
+
|
1269
|
+
# Store chat history length for potential rollback
|
1270
|
+
chat_history_length = len(self.chat_history)
|
1271
|
+
|
1272
|
+
# Normalize original_prompt for consistent chat history storage
|
1273
|
+
normalized_content = original_prompt
|
1274
|
+
if isinstance(original_prompt, list):
|
1275
|
+
# Extract text from multimodal prompts
|
1276
|
+
normalized_content = next((item["text"] for item in original_prompt if item.get("type") == "text"), "")
|
1277
|
+
|
1278
|
+
# Prevent duplicate messages
|
1279
|
+
if not (self.chat_history and
|
1280
|
+
self.chat_history[-1].get("role") == "user" and
|
1281
|
+
self.chat_history[-1].get("content") == normalized_content):
|
1282
|
+
# Add user message to chat history BEFORE LLM call so handoffs can access it
|
1283
|
+
self.chat_history.append({"role": "user", "content": normalized_content})
|
1200
1284
|
|
1201
|
-
final_response_text = None
|
1202
1285
|
reflection_count = 0
|
1203
1286
|
start_time = time.time()
|
1204
|
-
|
1205
|
-
while
|
1206
|
-
|
1207
|
-
|
1208
|
-
# Handle both string and list prompts for instruction display
|
1209
|
-
display_text = prompt
|
1210
|
-
if isinstance(prompt, list):
|
1211
|
-
# Extract text content from multimodal prompt
|
1212
|
-
display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
|
1213
|
-
|
1214
|
-
if display_text and str(display_text).strip():
|
1215
|
-
# Pass agent information to display_instruction
|
1216
|
-
agent_tools = [t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools]
|
1217
|
-
display_instruction(
|
1218
|
-
f"Agent {self.name} is processing prompt: {display_text}",
|
1219
|
-
console=self.console,
|
1220
|
-
agent_name=self.name,
|
1221
|
-
agent_role=self.role,
|
1222
|
-
agent_tools=agent_tools
|
1223
|
-
)
|
1224
|
-
|
1225
|
-
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream)
|
1226
|
-
if not response:
|
1227
|
-
return None
|
1228
|
-
|
1229
|
-
response_text = response.choices[0].message.content.strip()
|
1230
|
-
|
1231
|
-
# Handle output_json or output_pydantic if specified
|
1232
|
-
if output_json or output_pydantic:
|
1233
|
-
# Add to chat history and return raw response
|
1234
|
-
self.chat_history.append({"role": "user", "content": original_prompt})
|
1235
|
-
self.chat_history.append({"role": "assistant", "content": response_text})
|
1236
|
-
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1237
|
-
if self.verbose and not self._using_custom_llm:
|
1238
|
-
display_interaction(original_prompt, response_text, markdown=self.markdown,
|
1239
|
-
generation_time=time.time() - start_time, console=self.console)
|
1240
|
-
return response_text
|
1241
|
-
|
1242
|
-
if not self.self_reflect:
|
1243
|
-
self.chat_history.append({"role": "user", "content": original_prompt})
|
1244
|
-
self.chat_history.append({"role": "assistant", "content": response_text})
|
1245
|
-
if self.verbose:
|
1246
|
-
logging.debug(f"Agent {self.name} final response: {response_text}")
|
1247
|
-
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1248
|
-
if self.verbose and not self._using_custom_llm:
|
1249
|
-
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1250
|
-
# Return only reasoning content if reasoning_steps is True
|
1251
|
-
if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
|
1252
|
-
# Apply guardrail to reasoning content
|
1253
|
-
try:
|
1254
|
-
validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools)
|
1255
|
-
return validated_reasoning
|
1256
|
-
except Exception as e:
|
1257
|
-
logging.error(f"Agent {self.name}: Guardrail validation failed for reasoning content: {e}")
|
1258
|
-
return None
|
1259
|
-
# Apply guardrail to regular response
|
1260
|
-
try:
|
1261
|
-
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1262
|
-
return validated_response
|
1263
|
-
except Exception as e:
|
1264
|
-
logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
|
1265
|
-
return None
|
1266
|
-
|
1267
|
-
reflection_prompt = f"""
|
1268
|
-
Reflect on your previous response: '{response_text}'.
|
1269
|
-
{self.reflect_prompt if self.reflect_prompt else "Identify any flaws, improvements, or actions."}
|
1270
|
-
Provide a "satisfactory" status ('yes' or 'no').
|
1271
|
-
Output MUST be JSON with 'reflection' and 'satisfactory'.
|
1272
|
-
"""
|
1273
|
-
logging.debug(f"{self.name} reflection attempt {reflection_count+1}, sending prompt: {reflection_prompt}")
|
1274
|
-
messages.append({"role": "user", "content": reflection_prompt})
|
1275
|
-
|
1287
|
+
|
1288
|
+
# Wrap entire while loop in try-except for rollback on any failure
|
1289
|
+
try:
|
1290
|
+
while True:
|
1276
1291
|
try:
|
1277
|
-
# Check if we're using a custom LLM (like Gemini)
|
1278
|
-
if self._using_custom_llm or self._openai_client is None:
|
1279
|
-
# For custom LLMs, we need to handle reflection differently
|
1280
|
-
# Use non-streaming to get complete JSON response
|
1281
|
-
reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False)
|
1282
|
-
|
1283
|
-
if not reflection_response or not reflection_response.choices:
|
1284
|
-
raise Exception("No response from reflection request")
|
1285
|
-
|
1286
|
-
reflection_text = reflection_response.choices[0].message.content.strip()
|
1287
|
-
|
1288
|
-
# Clean the JSON output
|
1289
|
-
cleaned_json = self.clean_json_output(reflection_text)
|
1290
|
-
|
1291
|
-
# Parse the JSON manually
|
1292
|
-
reflection_data = json.loads(cleaned_json)
|
1293
|
-
|
1294
|
-
# Create a reflection output object manually
|
1295
|
-
class CustomReflectionOutput:
|
1296
|
-
def __init__(self, data):
|
1297
|
-
self.reflection = data.get('reflection', '')
|
1298
|
-
self.satisfactory = data.get('satisfactory', 'no').lower()
|
1299
|
-
|
1300
|
-
reflection_output = CustomReflectionOutput(reflection_data)
|
1301
|
-
else:
|
1302
|
-
# Use OpenAI's structured output for OpenAI models
|
1303
|
-
reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
|
1304
|
-
model=self.reflect_llm if self.reflect_llm else self.llm,
|
1305
|
-
messages=messages,
|
1306
|
-
temperature=temperature,
|
1307
|
-
response_format=ReflectionOutput
|
1308
|
-
)
|
1309
|
-
|
1310
|
-
reflection_output = reflection_response.choices[0].message.parsed
|
1311
|
-
|
1312
1292
|
if self.verbose:
|
1313
|
-
|
1293
|
+
# Handle both string and list prompts for instruction display
|
1294
|
+
display_text = prompt
|
1295
|
+
if isinstance(prompt, list):
|
1296
|
+
# Extract text content from multimodal prompt
|
1297
|
+
display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
|
1298
|
+
|
1299
|
+
if display_text and str(display_text).strip():
|
1300
|
+
# Pass agent information to display_instruction
|
1301
|
+
agent_tools = [t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools]
|
1302
|
+
display_instruction(
|
1303
|
+
f"Agent {self.name} is processing prompt: {display_text}",
|
1304
|
+
console=self.console,
|
1305
|
+
agent_name=self.name,
|
1306
|
+
agent_role=self.role,
|
1307
|
+
agent_tools=agent_tools
|
1308
|
+
)
|
1309
|
+
|
1310
|
+
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream)
|
1311
|
+
if not response:
|
1312
|
+
# Rollback chat history on response failure
|
1313
|
+
self.chat_history = self.chat_history[:chat_history_length]
|
1314
|
+
return None
|
1314
1315
|
|
1315
|
-
|
1316
|
+
response_text = response.choices[0].message.content.strip()
|
1316
1317
|
|
1317
|
-
#
|
1318
|
-
if
|
1319
|
-
|
1320
|
-
|
1321
|
-
self.chat_history.append({"role": "user", "content": prompt})
|
1318
|
+
# Handle output_json or output_pydantic if specified
|
1319
|
+
if output_json or output_pydantic:
|
1320
|
+
# Add to chat history and return raw response
|
1321
|
+
# User message already added before LLM call via _build_messages
|
1322
1322
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1323
1323
|
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1324
1324
|
if self.verbose and not self._using_custom_llm:
|
1325
|
-
display_interaction(
|
1326
|
-
|
1327
|
-
|
1328
|
-
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
1329
|
-
return validated_response
|
1330
|
-
except Exception as e:
|
1331
|
-
logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
|
1332
|
-
return None
|
1325
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown,
|
1326
|
+
generation_time=time.time() - start_time, console=self.console)
|
1327
|
+
return response_text
|
1333
1328
|
|
1334
|
-
|
1335
|
-
|
1336
|
-
if self.verbose:
|
1337
|
-
display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
|
1338
|
-
self.chat_history.append({"role": "user", "content": prompt})
|
1329
|
+
if not self.self_reflect:
|
1330
|
+
# User message already added before LLM call via _build_messages
|
1339
1331
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1332
|
+
if self.verbose:
|
1333
|
+
logging.debug(f"Agent {self.name} final response: {response_text}")
|
1340
1334
|
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1341
1335
|
if self.verbose and not self._using_custom_llm:
|
1342
|
-
display_interaction(
|
1343
|
-
#
|
1336
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1337
|
+
# Return only reasoning content if reasoning_steps is True
|
1338
|
+
if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
|
1339
|
+
# Apply guardrail to reasoning content
|
1340
|
+
try:
|
1341
|
+
validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools)
|
1342
|
+
return validated_reasoning
|
1343
|
+
except Exception as e:
|
1344
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed for reasoning content: {e}")
|
1345
|
+
# Rollback chat history on guardrail failure
|
1346
|
+
self.chat_history = self.chat_history[:chat_history_length]
|
1347
|
+
return None
|
1348
|
+
# Apply guardrail to regular response
|
1344
1349
|
try:
|
1345
|
-
validated_response = self._apply_guardrail_with_retry(response_text,
|
1350
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1346
1351
|
return validated_response
|
1347
1352
|
except Exception as e:
|
1348
|
-
logging.error(f"Agent {self.name}: Guardrail validation failed
|
1353
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
|
1354
|
+
# Rollback chat history on guardrail failure
|
1355
|
+
self.chat_history = self.chat_history[:chat_history_length]
|
1349
1356
|
return None
|
1350
1357
|
|
1351
|
-
|
1352
|
-
|
1353
|
-
|
1354
|
-
|
1355
|
-
|
1356
|
-
|
1357
|
-
reflection_count
|
1358
|
-
|
1358
|
+
reflection_prompt = f"""
|
1359
|
+
Reflect on your previous response: '{response_text}'.
|
1360
|
+
{self.reflect_prompt if self.reflect_prompt else "Identify any flaws, improvements, or actions."}
|
1361
|
+
Provide a "satisfactory" status ('yes' or 'no').
|
1362
|
+
Output MUST be JSON with 'reflection' and 'satisfactory'.
|
1363
|
+
"""
|
1364
|
+
logging.debug(f"{self.name} reflection attempt {reflection_count+1}, sending prompt: {reflection_prompt}")
|
1365
|
+
messages.append({"role": "user", "content": reflection_prompt})
|
1359
1366
|
|
1360
|
-
|
1361
|
-
|
1362
|
-
|
1363
|
-
|
1364
|
-
|
1365
|
-
|
1366
|
-
|
1367
|
-
|
1368
|
-
|
1369
|
-
|
1367
|
+
try:
|
1368
|
+
# Check if we're using a custom LLM (like Gemini)
|
1369
|
+
if self._using_custom_llm or self._openai_client is None:
|
1370
|
+
# For custom LLMs, we need to handle reflection differently
|
1371
|
+
# Use non-streaming to get complete JSON response
|
1372
|
+
reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False)
|
1373
|
+
|
1374
|
+
if not reflection_response or not reflection_response.choices:
|
1375
|
+
raise Exception("No response from reflection request")
|
1376
|
+
|
1377
|
+
reflection_text = reflection_response.choices[0].message.content.strip()
|
1378
|
+
|
1379
|
+
# Clean the JSON output
|
1380
|
+
cleaned_json = self.clean_json_output(reflection_text)
|
1381
|
+
|
1382
|
+
# Parse the JSON manually
|
1383
|
+
reflection_data = json.loads(cleaned_json)
|
1384
|
+
|
1385
|
+
# Create a reflection output object manually
|
1386
|
+
class CustomReflectionOutput:
|
1387
|
+
def __init__(self, data):
|
1388
|
+
self.reflection = data.get('reflection', '')
|
1389
|
+
self.satisfactory = data.get('satisfactory', 'no').lower()
|
1390
|
+
|
1391
|
+
reflection_output = CustomReflectionOutput(reflection_data)
|
1392
|
+
else:
|
1393
|
+
# Use OpenAI's structured output for OpenAI models
|
1394
|
+
reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
|
1395
|
+
model=self.reflect_llm if self.reflect_llm else self.llm,
|
1396
|
+
messages=messages,
|
1397
|
+
temperature=temperature,
|
1398
|
+
response_format=ReflectionOutput
|
1399
|
+
)
|
1370
1400
|
|
1371
|
-
|
1372
|
-
|
1373
|
-
|
1374
|
-
|
1375
|
-
|
1376
|
-
|
1377
|
-
|
1378
|
-
|
1379
|
-
|
1380
|
-
|
1381
|
-
|
1382
|
-
|
1383
|
-
|
1384
|
-
|
1401
|
+
reflection_output = reflection_response.choices[0].message.parsed
|
1402
|
+
|
1403
|
+
if self.verbose:
|
1404
|
+
display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
|
1405
|
+
|
1406
|
+
messages.append({"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"})
|
1407
|
+
|
1408
|
+
# Only consider satisfactory after minimum reflections
|
1409
|
+
if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
|
1410
|
+
if self.verbose:
|
1411
|
+
display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
|
1412
|
+
# User message already added before LLM call via _build_messages
|
1413
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
1414
|
+
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1415
|
+
if self.verbose and not self._using_custom_llm:
|
1416
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1417
|
+
# Apply guardrail validation after satisfactory reflection
|
1418
|
+
try:
|
1419
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1420
|
+
return validated_response
|
1421
|
+
except Exception as e:
|
1422
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
|
1423
|
+
# Rollback chat history on guardrail failure
|
1424
|
+
self.chat_history = self.chat_history[:chat_history_length]
|
1425
|
+
return None
|
1426
|
+
|
1427
|
+
# Check if we've hit max reflections
|
1428
|
+
if reflection_count >= self.max_reflect - 1:
|
1429
|
+
if self.verbose:
|
1430
|
+
display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
|
1431
|
+
# User message already added before LLM call via _build_messages
|
1432
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
1433
|
+
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1434
|
+
if self.verbose and not self._using_custom_llm:
|
1435
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1436
|
+
# Apply guardrail validation after max reflections
|
1437
|
+
try:
|
1438
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1439
|
+
return validated_response
|
1440
|
+
except Exception as e:
|
1441
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed after max reflections: {e}")
|
1442
|
+
# Rollback chat history on guardrail failure
|
1443
|
+
self.chat_history = self.chat_history[:chat_history_length]
|
1444
|
+
return None
|
1445
|
+
|
1446
|
+
# If not satisfactory and not at max reflections, continue with regeneration
|
1447
|
+
logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
|
1448
|
+
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
|
1449
|
+
# For custom LLMs during reflection, always use non-streaming to ensure complete responses
|
1450
|
+
use_stream = self.stream if not self._using_custom_llm else False
|
1451
|
+
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream)
|
1452
|
+
response_text = response.choices[0].message.content.strip()
|
1453
|
+
reflection_count += 1
|
1454
|
+
continue # Continue the loop for more reflections
|
1455
|
+
|
1456
|
+
except Exception as e:
|
1457
|
+
display_error(f"Error in parsing self-reflection json {e}. Retrying", console=self.console)
|
1458
|
+
logging.error("Reflection parsing failed.", exc_info=True)
|
1459
|
+
messages.append({"role": "assistant", "content": "Self Reflection failed."})
|
1460
|
+
reflection_count += 1
|
1461
|
+
continue # Continue even after error to try again
|
1462
|
+
except Exception:
|
1463
|
+
# Catch any exception from the inner try block and re-raise to outer handler
|
1464
|
+
raise
|
1465
|
+
except Exception as e:
|
1466
|
+
# Catch any exceptions that escape the while loop
|
1467
|
+
display_error(f"Unexpected error in chat: {e}", console=self.console)
|
1468
|
+
# Rollback chat history
|
1469
|
+
self.chat_history = self.chat_history[:chat_history_length]
|
1470
|
+
return None
|
1385
1471
|
|
1386
1472
|
def clean_json_output(self, output: str) -> str:
|
1387
1473
|
"""Clean and extract JSON from response text."""
|
@@ -1430,10 +1516,26 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1430
1516
|
prompt = f"{prompt}\n\nKnowledge: {knowledge_content}"
|
1431
1517
|
|
1432
1518
|
if self._using_custom_llm:
|
1519
|
+
# Store chat history length for potential rollback
|
1520
|
+
chat_history_length = len(self.chat_history)
|
1521
|
+
|
1522
|
+
# Normalize prompt content for consistent chat history storage
|
1523
|
+
normalized_content = prompt
|
1524
|
+
if isinstance(prompt, list):
|
1525
|
+
# Extract text from multimodal prompts
|
1526
|
+
normalized_content = next((item["text"] for item in prompt if item.get("type") == "text"), "")
|
1527
|
+
|
1528
|
+
# Prevent duplicate messages
|
1529
|
+
if not (self.chat_history and
|
1530
|
+
self.chat_history[-1].get("role") == "user" and
|
1531
|
+
self.chat_history[-1].get("content") == normalized_content):
|
1532
|
+
# Add user message to chat history BEFORE LLM call so handoffs can access it
|
1533
|
+
self.chat_history.append({"role": "user", "content": normalized_content})
|
1534
|
+
|
1433
1535
|
try:
|
1434
1536
|
response_text = await self.llm_instance.get_response_async(
|
1435
1537
|
prompt=prompt,
|
1436
|
-
system_prompt=
|
1538
|
+
system_prompt=self._build_system_prompt(tools),
|
1437
1539
|
chat_history=self.chat_history,
|
1438
1540
|
temperature=temperature,
|
1439
1541
|
tools=tools,
|
@@ -1447,19 +1549,29 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1447
1549
|
console=self.console,
|
1448
1550
|
agent_name=self.name,
|
1449
1551
|
agent_role=self.role,
|
1450
|
-
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
|
1552
|
+
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
|
1451
1553
|
execute_tool_fn=self.execute_tool_async,
|
1452
1554
|
reasoning_steps=reasoning_steps
|
1453
1555
|
)
|
1454
1556
|
|
1455
|
-
self.chat_history.append({"role": "user", "content": prompt})
|
1456
1557
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1457
1558
|
|
1458
1559
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1459
1560
|
total_time = time.time() - start_time
|
1460
1561
|
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1461
|
-
|
1562
|
+
|
1563
|
+
# Apply guardrail validation for custom LLM response
|
1564
|
+
try:
|
1565
|
+
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
1566
|
+
return validated_response
|
1567
|
+
except Exception as e:
|
1568
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
|
1569
|
+
# Rollback chat history on guardrail failure
|
1570
|
+
self.chat_history = self.chat_history[:chat_history_length]
|
1571
|
+
return None
|
1462
1572
|
except Exception as e:
|
1573
|
+
# Rollback chat history if LLM call fails
|
1574
|
+
self.chat_history = self.chat_history[:chat_history_length]
|
1463
1575
|
display_error(f"Error in LLM chat: {e}")
|
1464
1576
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1465
1577
|
total_time = time.time() - start_time
|
@@ -1469,6 +1581,22 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1469
1581
|
# For OpenAI client
|
1470
1582
|
# Use the new _build_messages helper method
|
1471
1583
|
messages, original_prompt = self._build_messages(prompt, temperature, output_json, output_pydantic)
|
1584
|
+
|
1585
|
+
# Store chat history length for potential rollback
|
1586
|
+
chat_history_length = len(self.chat_history)
|
1587
|
+
|
1588
|
+
# Normalize original_prompt for consistent chat history storage
|
1589
|
+
normalized_content = original_prompt
|
1590
|
+
if isinstance(original_prompt, list):
|
1591
|
+
# Extract text from multimodal prompts
|
1592
|
+
normalized_content = next((item["text"] for item in original_prompt if item.get("type") == "text"), "")
|
1593
|
+
|
1594
|
+
# Prevent duplicate messages
|
1595
|
+
if not (self.chat_history and
|
1596
|
+
self.chat_history[-1].get("role") == "user" and
|
1597
|
+
self.chat_history[-1].get("content") == normalized_content):
|
1598
|
+
# Add user message to chat history BEFORE LLM call so handoffs can access it
|
1599
|
+
self.chat_history.append({"role": "user", "content": normalized_content})
|
1472
1600
|
|
1473
1601
|
reflection_count = 0
|
1474
1602
|
start_time = time.time()
|
@@ -1615,7 +1743,16 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1615
1743
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1616
1744
|
total_time = time.time() - start_time
|
1617
1745
|
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1618
|
-
|
1746
|
+
|
1747
|
+
# Apply guardrail validation for OpenAI client response
|
1748
|
+
try:
|
1749
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1750
|
+
return validated_response
|
1751
|
+
except Exception as e:
|
1752
|
+
logging.error(f"Agent {self.name}: Guardrail validation failed for OpenAI client: {e}")
|
1753
|
+
# Rollback chat history on guardrail failure
|
1754
|
+
self.chat_history = self.chat_history[:chat_history_length]
|
1755
|
+
return None
|
1619
1756
|
except Exception as e:
|
1620
1757
|
display_error(f"Error in chat completion: {e}")
|
1621
1758
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
@@ -406,7 +406,7 @@ class LLM:
|
|
406
406
|
# missing tool calls or making duplicate calls
|
407
407
|
return False
|
408
408
|
|
409
|
-
def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_json=None, output_pydantic=None):
|
409
|
+
def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_json=None, output_pydantic=None, tools=None):
|
410
410
|
"""Build messages list for LLM completion. Works for both sync and async.
|
411
411
|
|
412
412
|
Args:
|
@@ -415,6 +415,7 @@ class LLM:
|
|
415
415
|
chat_history: Optional list of previous messages
|
416
416
|
output_json: Optional Pydantic model for JSON output
|
417
417
|
output_pydantic: Optional Pydantic model for JSON output (alias)
|
418
|
+
tools: Optional list of tools available
|
418
419
|
|
419
420
|
Returns:
|
420
421
|
tuple: (messages list, original prompt)
|
@@ -1858,6 +1859,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1858
1859
|
# Override with any provided parameters
|
1859
1860
|
params.update(override_params)
|
1860
1861
|
|
1862
|
+
# Add tool_choice="auto" when tools are provided (unless already specified)
|
1863
|
+
if 'tools' in params and params['tools'] and 'tool_choice' not in params:
|
1864
|
+
# For Gemini models, use tool_choice to encourage tool usage
|
1865
|
+
# More comprehensive Gemini model detection
|
1866
|
+
if any(prefix in self.model.lower() for prefix in ['gemini', 'gemini/', 'google/gemini']):
|
1867
|
+
try:
|
1868
|
+
import litellm
|
1869
|
+
# Check if model supports function calling before setting tool_choice
|
1870
|
+
if litellm.supports_function_calling(model=self.model):
|
1871
|
+
params['tool_choice'] = 'auto'
|
1872
|
+
except Exception as e:
|
1873
|
+
# If check fails, still set tool_choice for known Gemini models
|
1874
|
+
logging.debug(f"Could not verify function calling support: {e}. Setting tool_choice anyway.")
|
1875
|
+
params['tool_choice'] = 'auto'
|
1876
|
+
|
1861
1877
|
return params
|
1862
1878
|
|
1863
1879
|
def _prepare_response_logging(self, temperature: float, stream: bool, verbose: bool, markdown: bool, **kwargs) -> Optional[Dict[str, Any]]:
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "praisonaiagents"
|
7
|
-
version = "0.0.
|
7
|
+
version = "0.0.124"
|
8
8
|
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
|
9
9
|
requires-python = ">=3.10"
|
10
10
|
authors = [
|
@@ -73,4 +73,4 @@ all = [
|
|
73
73
|
|
74
74
|
[tool.setuptools.packages.find]
|
75
75
|
where = ["."]
|
76
|
-
include = ["praisonaiagents*"]
|
76
|
+
include = ["praisonaiagents*"]
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/guardrails/guardrail_result.py
RENAMED
File without changes
|
{praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/guardrails/llm_guardrail.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/llm/model_capabilities.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/telemetry/integration.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/calculator_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/duckduckgo_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/newspaper_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/train/data/generatecot.py
RENAMED
File without changes
|
{praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents/tools/wikipedia_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.123 → praisonaiagents-0.0.124}/praisonaiagents.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|