praisonaiagents 0.0.118__tar.gz → 0.0.123__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/agent/agent.py +47 -23
  3. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/agents/autoagents.py +188 -34
  4. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/llm/__init__.py +7 -1
  5. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/llm/llm.py +384 -327
  6. praisonaiagents-0.0.123/praisonaiagents/llm/model_capabilities.py +90 -0
  7. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents.egg-info/PKG-INFO +1 -1
  8. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents.egg-info/SOURCES.txt +1 -0
  9. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/pyproject.toml +1 -1
  10. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/README.md +0 -0
  11. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/__init__.py +0 -0
  12. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/agent/__init__.py +0 -0
  13. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/agent/handoff.py +0 -0
  14. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/agent/image_agent.py +0 -0
  15. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/agents/__init__.py +0 -0
  16. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/agents/agents.py +0 -0
  17. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/approval.py +0 -0
  18. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/guardrails/__init__.py +0 -0
  19. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  20. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  21. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/knowledge/__init__.py +0 -0
  22. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/knowledge/chunking.py +0 -0
  23. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/knowledge/knowledge.py +0 -0
  24. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/llm/openai_client.py +0 -0
  25. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/main.py +0 -0
  26. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/mcp/__init__.py +0 -0
  27. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/mcp/mcp.py +0 -0
  28. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/mcp/mcp_sse.py +0 -0
  29. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/memory/__init__.py +0 -0
  30. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/memory/memory.py +0 -0
  31. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/process/__init__.py +0 -0
  32. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/process/process.py +0 -0
  33. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/session.py +0 -0
  34. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/task/__init__.py +0 -0
  35. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/task/task.py +0 -0
  36. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/telemetry/__init__.py +0 -0
  37. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/telemetry/integration.py +0 -0
  38. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/telemetry/telemetry.py +0 -0
  39. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/README.md +0 -0
  40. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/__init__.py +0 -0
  41. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/arxiv_tools.py +0 -0
  42. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/calculator_tools.py +0 -0
  43. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/csv_tools.py +0 -0
  44. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/duckdb_tools.py +0 -0
  45. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  46. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/excel_tools.py +0 -0
  47. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/file_tools.py +0 -0
  48. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/json_tools.py +0 -0
  49. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/newspaper_tools.py +0 -0
  50. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/pandas_tools.py +0 -0
  51. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/python_tools.py +0 -0
  52. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/searxng_tools.py +0 -0
  53. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/shell_tools.py +0 -0
  54. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/spider_tools.py +0 -0
  55. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/test.py +0 -0
  56. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/tools.py +0 -0
  57. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  58. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  59. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/xml_tools.py +0 -0
  60. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/yaml_tools.py +0 -0
  61. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents/tools/yfinance_tools.py +0 -0
  62. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  63. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents.egg-info/requires.txt +0 -0
  64. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/praisonaiagents.egg-info/top_level.txt +0 -0
  65. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/setup.cfg +0 -0
  66. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/tests/test-graph-memory.py +0 -0
  67. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/tests/test.py +0 -0
  68. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/tests/test_handoff_compatibility.py +0 -0
  69. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/tests/test_ollama_async_fix.py +0 -0
  70. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/tests/test_ollama_fix.py +0 -0
  71. {praisonaiagents-0.0.118 → praisonaiagents-0.0.123}/tests/test_posthog_fixed.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.118
3
+ Version: 0.0.123
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -1233,7 +1233,8 @@ Your Goal: {self.goal}
1233
1233
  # Add to chat history and return raw response
1234
1234
  self.chat_history.append({"role": "user", "content": original_prompt})
1235
1235
  self.chat_history.append({"role": "assistant", "content": response_text})
1236
- if self.verbose:
1236
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1237
+ if self.verbose and not self._using_custom_llm:
1237
1238
  display_interaction(original_prompt, response_text, markdown=self.markdown,
1238
1239
  generation_time=time.time() - start_time, console=self.console)
1239
1240
  return response_text
@@ -1243,7 +1244,9 @@ Your Goal: {self.goal}
1243
1244
  self.chat_history.append({"role": "assistant", "content": response_text})
1244
1245
  if self.verbose:
1245
1246
  logging.debug(f"Agent {self.name} final response: {response_text}")
1246
- display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1247
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1248
+ if self.verbose and not self._using_custom_llm:
1249
+ display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1247
1250
  # Return only reasoning content if reasoning_steps is True
1248
1251
  if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
1249
1252
  # Apply guardrail to reasoning content
@@ -1271,25 +1274,40 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1271
1274
  messages.append({"role": "user", "content": reflection_prompt})
1272
1275
 
1273
1276
  try:
1274
- # Check if OpenAI client is available
1275
- if self._openai_client is None:
1276
- # For custom LLMs, self-reflection with structured output is not supported
1277
- if self.verbose:
1278
- display_self_reflection(f"Agent {self.name}: Self-reflection with structured output is not supported for custom LLM providers. Skipping reflection.", console=self.console)
1279
- # Return the original response without reflection
1280
- self.chat_history.append({"role": "user", "content": prompt})
1281
- self.chat_history.append({"role": "assistant", "content": response_text})
1282
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1283
- return response_text
1284
-
1285
- reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
1286
- model=self.reflect_llm if self.reflect_llm else self.llm,
1287
- messages=messages,
1288
- temperature=temperature,
1289
- response_format=ReflectionOutput
1290
- )
1277
+ # Check if we're using a custom LLM (like Gemini)
1278
+ if self._using_custom_llm or self._openai_client is None:
1279
+ # For custom LLMs, we need to handle reflection differently
1280
+ # Use non-streaming to get complete JSON response
1281
+ reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False)
1282
+
1283
+ if not reflection_response or not reflection_response.choices:
1284
+ raise Exception("No response from reflection request")
1285
+
1286
+ reflection_text = reflection_response.choices[0].message.content.strip()
1287
+
1288
+ # Clean the JSON output
1289
+ cleaned_json = self.clean_json_output(reflection_text)
1290
+
1291
+ # Parse the JSON manually
1292
+ reflection_data = json.loads(cleaned_json)
1293
+
1294
+ # Create a reflection output object manually
1295
+ class CustomReflectionOutput:
1296
+ def __init__(self, data):
1297
+ self.reflection = data.get('reflection', '')
1298
+ self.satisfactory = data.get('satisfactory', 'no').lower()
1299
+
1300
+ reflection_output = CustomReflectionOutput(reflection_data)
1301
+ else:
1302
+ # Use OpenAI's structured output for OpenAI models
1303
+ reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
1304
+ model=self.reflect_llm if self.reflect_llm else self.llm,
1305
+ messages=messages,
1306
+ temperature=temperature,
1307
+ response_format=ReflectionOutput
1308
+ )
1291
1309
 
1292
- reflection_output = reflection_response.choices[0].message.parsed
1310
+ reflection_output = reflection_response.choices[0].message.parsed
1293
1311
 
1294
1312
  if self.verbose:
1295
1313
  display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
@@ -1302,7 +1320,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1302
1320
  display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
1303
1321
  self.chat_history.append({"role": "user", "content": prompt})
1304
1322
  self.chat_history.append({"role": "assistant", "content": response_text})
1305
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1323
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1324
+ if self.verbose and not self._using_custom_llm:
1325
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1306
1326
  # Apply guardrail validation after satisfactory reflection
1307
1327
  try:
1308
1328
  validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
@@ -1317,7 +1337,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1317
1337
  display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
1318
1338
  self.chat_history.append({"role": "user", "content": prompt})
1319
1339
  self.chat_history.append({"role": "assistant", "content": response_text})
1320
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1340
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1341
+ if self.verbose and not self._using_custom_llm:
1342
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1321
1343
  # Apply guardrail validation after max reflections
1322
1344
  try:
1323
1345
  validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
@@ -1328,7 +1350,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1328
1350
 
1329
1351
  logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
1330
1352
  messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
1331
- response = self._chat_completion(messages, temperature=temperature, tools=None, stream=self.stream)
1353
+ # For custom LLMs during reflection, always use non-streaming to ensure complete responses
1354
+ use_stream = self.stream if not self._using_custom_llm else False
1355
+ response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream)
1332
1356
  response_text = response.choices[0].message.content.strip()
1333
1357
  reflection_count += 1
1334
1358
  continue # Continue the loop for more reflections
@@ -13,7 +13,8 @@ import logging
13
13
  import os
14
14
  from pydantic import BaseModel, ConfigDict
15
15
  from ..main import display_instruction, display_tool_call, display_interaction
16
- from ..llm import get_openai_client
16
+ from ..llm import get_openai_client, LLM, OpenAIClient
17
+ import json
17
18
 
18
19
  # Define Pydantic models for structured output
19
20
  class TaskConfig(BaseModel):
@@ -108,6 +109,8 @@ class AutoAgents(PraisonAIAgents):
108
109
  self.max_execution_time = max_execution_time
109
110
  self.max_iter = max_iter
110
111
  self.reflect_llm = reflect_llm
112
+ self.base_url = base_url
113
+ self.api_key = api_key
111
114
 
112
115
  # Display initial instruction
113
116
  if self.verbose:
@@ -211,9 +214,37 @@ Tools: {', '.join(agent_tools)}"""
211
214
 
212
215
  return assigned_tools
213
216
 
217
+ def _validate_config(self, config: AutoAgentsConfig) -> tuple[bool, str]:
218
+ """
219
+ Validate that the configuration has proper TaskConfig objects.
220
+
221
+ Returns:
222
+ Tuple of (is_valid, error_message)
223
+ """
224
+ for agent_idx, agent in enumerate(config.agents):
225
+ if not hasattr(agent, 'tasks') or not agent.tasks:
226
+ return False, f"Agent '{agent.name}' has no tasks defined"
227
+
228
+ for task_idx, task in enumerate(agent.tasks):
229
+ # Check if task is a proper TaskConfig instance
230
+ if not isinstance(task, TaskConfig):
231
+ return False, f"Task at index {task_idx} for agent '{agent.name}' is not a proper TaskConfig object"
232
+
233
+ # Check required fields
234
+ if not task.name:
235
+ return False, f"Task at index {task_idx} for agent '{agent.name}' has no name"
236
+ if not task.description:
237
+ return False, f"Task at index {task_idx} for agent '{agent.name}' has no description"
238
+ if not task.expected_output:
239
+ return False, f"Task at index {task_idx} for agent '{agent.name}' has no expected_output"
240
+ if task.tools is None:
241
+ return False, f"Task at index {task_idx} for agent '{agent.name}' has no tools field"
242
+
243
+ return True, ""
244
+
214
245
  def _generate_config(self) -> AutoAgentsConfig:
215
- """Generate the configuration for agents and tasks"""
216
- prompt = f"""
246
+ """Generate the configuration for agents and tasks with retry logic"""
247
+ base_prompt = f"""
217
248
  Generate a configuration for AI agents to accomplish this task: "{self.instructions}"
218
249
 
219
250
  The configuration should include:
@@ -234,40 +265,161 @@ Requirements:
234
265
  4. The process type should match the task requirements
235
266
  5. Generate maximum {self.max_agents} agents to handle this task efficiently
236
267
 
237
- Return the configuration in a structured JSON format matching the AutoAgentsConfig schema.
268
+ Return the configuration in a structured JSON format matching this exact schema:
269
+ {{
270
+ "main_instruction": "Overall goal description",
271
+ "process_type": "sequential|workflow|hierarchical",
272
+ "agents": [
273
+ {{
274
+ "name": "Agent Name",
275
+ "role": "Agent Role",
276
+ "goal": "Agent Goal",
277
+ "backstory": "Agent Backstory",
278
+ "tools": ["tool1", "tool2"],
279
+ "tasks": [
280
+ {{
281
+ "name": "Task Name",
282
+ "description": "Detailed task description",
283
+ "expected_output": "What the task should produce",
284
+ "tools": ["tool1", "tool2"]
285
+ }}
286
+ ]
287
+ }}
288
+ ]
289
+ }}
290
+
291
+ IMPORTANT: Each task MUST be an object with name, description, expected_output, and tools fields, NOT a simple string.
238
292
  """
239
293
 
240
- try:
241
- # Get OpenAI client
242
- try:
243
- client = get_openai_client()
244
- except ValueError as e:
245
- # AutoAgents requires OpenAI for structured output generation
246
- raise ValueError(
247
- "AutoAgents requires OpenAI API for automatic agent generation. "
248
- "Please set OPENAI_API_KEY environment variable or use PraisonAIAgents class directly "
249
- "with manually configured agents for non-OpenAI providers."
250
- ) from e
251
-
252
- response = client.beta.chat.completions.parse(
253
- model=self.llm,
254
- response_format=AutoAgentsConfig,
255
- messages=[
256
- {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
257
- {"role": "user", "content": prompt}
258
- ]
259
- )
294
+ max_retries = 3
295
+ last_response = None
296
+ last_error = None
297
+
298
+ for attempt in range(max_retries):
299
+ # Initialize variables for this attempt
300
+ use_openai_structured = False
301
+ client = None
302
+
303
+ # Prepare prompt for this attempt
304
+ if attempt > 0 and last_response and last_error:
305
+ # On retry, include the previous response and error
306
+ prompt = f"""{base_prompt}
307
+
308
+ PREVIOUS ATTEMPT FAILED!
309
+ Your previous response was:
310
+ ```json
311
+ {last_response}
312
+ ```
313
+
314
+ Error: {last_error}
315
+
316
+ REMEMBER: Tasks MUST be objects with the following structure:
317
+ {{
318
+ "name": "Task Name",
319
+ "description": "Task Description",
320
+ "expected_output": "Expected Output",
321
+ "tools": ["tool1", "tool2"]
322
+ }}
323
+
324
+ DO NOT use strings for tasks. Each task MUST be a complete object with all four fields."""
325
+ else:
326
+ prompt = base_prompt
260
327
 
261
- # Ensure we have exactly max_agents number of agents
262
- if len(response.choices[0].message.parsed.agents) > self.max_agents:
263
- response.choices[0].message.parsed.agents = response.choices[0].message.parsed.agents[:self.max_agents]
264
- elif len(response.choices[0].message.parsed.agents) < self.max_agents:
265
- logging.warning(f"Generated {len(response.choices[0].message.parsed.agents)} agents, expected {self.max_agents}")
328
+ try:
329
+ # Check if we have OpenAI API and the model supports structured output
330
+ from ..llm import supports_structured_outputs
331
+ if self.llm and supports_structured_outputs(self.llm):
332
+ client = get_openai_client()
333
+ use_openai_structured = True
334
+ except:
335
+ # If OpenAI client is not available, we'll use the LLM class
336
+ pass
266
337
 
267
- return response.choices[0].message.parsed
268
- except Exception as e:
269
- logging.error(f"Error generating configuration: {e}")
270
- raise
338
+ try:
339
+ if use_openai_structured and client:
340
+ # Use OpenAI's structured output for OpenAI models (backward compatibility)
341
+ config = client.parse_structured_output(
342
+ messages=[
343
+ {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
344
+ {"role": "user", "content": prompt}
345
+ ],
346
+ response_format=AutoAgentsConfig,
347
+ model=self.llm
348
+ )
349
+ # Store the response for potential retry
350
+ last_response = json.dumps(config.model_dump(), indent=2)
351
+ else:
352
+ # Use LLM class for all other providers (Gemini, Anthropic, etc.)
353
+ llm_instance = LLM(
354
+ model=self.llm,
355
+ base_url=self.base_url,
356
+ api_key=self.api_key
357
+ )
358
+
359
+ response_text = llm_instance.get_response(
360
+ prompt=prompt,
361
+ system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
362
+ output_pydantic=AutoAgentsConfig,
363
+ temperature=0.7,
364
+ stream=False,
365
+ verbose=False
366
+ )
367
+
368
+ # Store the raw response for potential retry
369
+ last_response = response_text
370
+
371
+ # Parse the JSON response
372
+ try:
373
+ # First try to parse as is
374
+ config_dict = json.loads(response_text)
375
+ config = AutoAgentsConfig(**config_dict)
376
+ except json.JSONDecodeError:
377
+ # If that fails, try to extract JSON from the response
378
+ # Handle cases where the model might wrap JSON in markdown blocks
379
+ cleaned_response = response_text.strip()
380
+ if cleaned_response.startswith("```json"):
381
+ cleaned_response = cleaned_response[7:]
382
+ if cleaned_response.startswith("```"):
383
+ cleaned_response = cleaned_response[3:]
384
+ if cleaned_response.endswith("```"):
385
+ cleaned_response = cleaned_response[:-3]
386
+ cleaned_response = cleaned_response.strip()
387
+
388
+ config_dict = json.loads(cleaned_response)
389
+ config = AutoAgentsConfig(**config_dict)
390
+
391
+ # Validate the configuration
392
+ is_valid, error_msg = self._validate_config(config)
393
+ if not is_valid:
394
+ last_error = error_msg
395
+ if attempt < max_retries - 1:
396
+ logging.warning(f"Configuration validation failed (attempt {attempt + 1}/{max_retries}): {error_msg}")
397
+ continue
398
+ else:
399
+ raise ValueError(f"Configuration validation failed after {max_retries} attempts: {error_msg}")
400
+
401
+ # Ensure we have exactly max_agents number of agents
402
+ if len(config.agents) > self.max_agents:
403
+ config.agents = config.agents[:self.max_agents]
404
+ elif len(config.agents) < self.max_agents:
405
+ logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
406
+
407
+ return config
408
+
409
+ except ValueError as e:
410
+ # Re-raise validation errors
411
+ raise
412
+ except Exception as e:
413
+ last_error = str(e)
414
+ if attempt < max_retries - 1:
415
+ logging.warning(f"Error generating configuration (attempt {attempt + 1}/{max_retries}): {e}")
416
+ continue
417
+ else:
418
+ logging.error(f"Error generating configuration after {max_retries} attempts: {e}")
419
+ raise
420
+
421
+ # This should never be reached due to the raise statements above
422
+ raise RuntimeError(f"Failed to generate valid configuration after {max_retries} attempts")
271
423
 
272
424
  def _create_agents_and_tasks(self, config: AutoAgentsConfig) -> Tuple[List[Agent], List[Task]]:
273
425
  """Create agents and tasks from configuration"""
@@ -308,7 +460,9 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
308
460
  max_rpm=self.max_rpm,
309
461
  max_execution_time=self.max_execution_time,
310
462
  max_iter=self.max_iter,
311
- reflect_llm=self.reflect_llm
463
+ reflect_llm=self.reflect_llm,
464
+ base_url=self.base_url,
465
+ api_key=self.api_key
312
466
  )
313
467
  agents.append(agent)
314
468
 
@@ -32,6 +32,10 @@ from .openai_client import (
32
32
  ToolCall,
33
33
  process_stream_chunks
34
34
  )
35
+ from .model_capabilities import (
36
+ supports_structured_outputs,
37
+ supports_streaming_with_tools
38
+ )
35
39
 
36
40
  # Ensure telemetry is disabled after import as well
37
41
  try:
@@ -52,5 +56,7 @@ __all__ = [
52
56
  "CompletionUsage",
53
57
  "ChatCompletion",
54
58
  "ToolCall",
55
- "process_stream_chunks"
59
+ "process_stream_chunks",
60
+ "supports_structured_outputs",
61
+ "supports_streaming_with_tools"
56
62
  ]