praisonaiagents 0.0.120__py3-none-any.whl → 0.0.122__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1274,27 +1274,40 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1274
1274
  messages.append({"role": "user", "content": reflection_prompt})
1275
1275
 
1276
1276
  try:
1277
- # Check if OpenAI client is available
1278
- if self._openai_client is None:
1279
- # For custom LLMs, self-reflection with structured output is not supported
1280
- if self.verbose:
1281
- display_self_reflection(f"Agent {self.name}: Self-reflection with structured output is not supported for custom LLM providers. Skipping reflection.", console=self.console)
1282
- # Return the original response without reflection
1283
- self.chat_history.append({"role": "user", "content": prompt})
1284
- self.chat_history.append({"role": "assistant", "content": response_text})
1285
- # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1286
- if self.verbose and not self._using_custom_llm:
1287
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1288
- return response_text
1289
-
1290
- reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
1291
- model=self.reflect_llm if self.reflect_llm else self.llm,
1292
- messages=messages,
1293
- temperature=temperature,
1294
- response_format=ReflectionOutput
1295
- )
1277
+ # Check if we're using a custom LLM (like Gemini)
1278
+ if self._using_custom_llm or self._openai_client is None:
1279
+ # For custom LLMs, we need to handle reflection differently
1280
+ # Use non-streaming to get complete JSON response
1281
+ reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False)
1282
+
1283
+ if not reflection_response or not reflection_response.choices:
1284
+ raise Exception("No response from reflection request")
1285
+
1286
+ reflection_text = reflection_response.choices[0].message.content.strip()
1287
+
1288
+ # Clean the JSON output
1289
+ cleaned_json = self.clean_json_output(reflection_text)
1290
+
1291
+ # Parse the JSON manually
1292
+ reflection_data = json.loads(cleaned_json)
1293
+
1294
+ # Create a reflection output object manually
1295
+ class CustomReflectionOutput:
1296
+ def __init__(self, data):
1297
+ self.reflection = data.get('reflection', '')
1298
+ self.satisfactory = data.get('satisfactory', 'no').lower()
1299
+
1300
+ reflection_output = CustomReflectionOutput(reflection_data)
1301
+ else:
1302
+ # Use OpenAI's structured output for OpenAI models
1303
+ reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
1304
+ model=self.reflect_llm if self.reflect_llm else self.llm,
1305
+ messages=messages,
1306
+ temperature=temperature,
1307
+ response_format=ReflectionOutput
1308
+ )
1296
1309
 
1297
- reflection_output = reflection_response.choices[0].message.parsed
1310
+ reflection_output = reflection_response.choices[0].message.parsed
1298
1311
 
1299
1312
  if self.verbose:
1300
1313
  display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
@@ -1337,7 +1350,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1337
1350
 
1338
1351
  logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
1339
1352
  messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
1340
- response = self._chat_completion(messages, temperature=temperature, tools=None, stream=self.stream)
1353
+ # For custom LLMs during reflection, always use non-streaming to ensure complete responses
1354
+ use_stream = self.stream if not self._using_custom_llm else False
1355
+ response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream)
1341
1356
  response_text = response.choices[0].message.content.strip()
1342
1357
  reflection_count += 1
1343
1358
  continue # Continue the loop for more reflections
@@ -13,7 +13,7 @@ import logging
13
13
  import os
14
14
  from pydantic import BaseModel, ConfigDict
15
15
  from ..main import display_instruction, display_tool_call, display_interaction
16
- from ..llm import get_openai_client, LLM
16
+ from ..llm import get_openai_client, LLM, OpenAIClient
17
17
  import json
18
18
 
19
19
  # Define Pydantic models for structured output
@@ -109,6 +109,8 @@ class AutoAgents(PraisonAIAgents):
109
109
  self.max_execution_time = max_execution_time
110
110
  self.max_iter = max_iter
111
111
  self.reflect_llm = reflect_llm
112
+ self.base_url = base_url
113
+ self.api_key = api_key
112
114
 
113
115
  # Display initial instruction
114
116
  if self.verbose:
@@ -212,9 +214,37 @@ Tools: {', '.join(agent_tools)}"""
212
214
 
213
215
  return assigned_tools
214
216
 
217
+ def _validate_config(self, config: AutoAgentsConfig) -> tuple[bool, str]:
218
+ """
219
+ Validate that the configuration has proper TaskConfig objects.
220
+
221
+ Returns:
222
+ Tuple of (is_valid, error_message)
223
+ """
224
+ for agent_idx, agent in enumerate(config.agents):
225
+ if not hasattr(agent, 'tasks') or not agent.tasks:
226
+ return False, f"Agent '{agent.name}' has no tasks defined"
227
+
228
+ for task_idx, task in enumerate(agent.tasks):
229
+ # Check if task is a proper TaskConfig instance
230
+ if not isinstance(task, TaskConfig):
231
+ return False, f"Task at index {task_idx} for agent '{agent.name}' is not a proper TaskConfig object"
232
+
233
+ # Check required fields
234
+ if not task.name:
235
+ return False, f"Task at index {task_idx} for agent '{agent.name}' has no name"
236
+ if not task.description:
237
+ return False, f"Task at index {task_idx} for agent '{agent.name}' has no description"
238
+ if not task.expected_output:
239
+ return False, f"Task at index {task_idx} for agent '{agent.name}' has no expected_output"
240
+ if task.tools is None:
241
+ return False, f"Task at index {task_idx} for agent '{agent.name}' has no tools field"
242
+
243
+ return True, ""
244
+
215
245
  def _generate_config(self) -> AutoAgentsConfig:
216
- """Generate the configuration for agents and tasks"""
217
- prompt = f"""
246
+ """Generate the configuration for agents and tasks with retry logic"""
247
+ base_prompt = f"""
218
248
  Generate a configuration for AI agents to accomplish this task: "{self.instructions}"
219
249
 
220
250
  The configuration should include:
@@ -235,81 +265,164 @@ Requirements:
235
265
  4. The process type should match the task requirements
236
266
  5. Generate maximum {self.max_agents} agents to handle this task efficiently
237
267
 
238
- Return the configuration in a structured JSON format matching the AutoAgentsConfig schema.
268
+ Return the configuration in a structured JSON format matching this exact schema:
269
+ {{
270
+ "main_instruction": "Overall goal description",
271
+ "process_type": "sequential|workflow|hierarchical",
272
+ "agents": [
273
+ {{
274
+ "name": "Agent Name",
275
+ "role": "Agent Role",
276
+ "goal": "Agent Goal",
277
+ "backstory": "Agent Backstory",
278
+ "tools": ["tool1", "tool2"],
279
+ "tasks": [
280
+ {{
281
+ "name": "Task Name",
282
+ "description": "Detailed task description",
283
+ "expected_output": "What the task should produce",
284
+ "tools": ["tool1", "tool2"]
285
+ }}
286
+ ]
287
+ }}
288
+ ]
289
+ }}
290
+
291
+ IMPORTANT: Each task MUST be an object with name, description, expected_output, and tools fields, NOT a simple string.
239
292
  """
240
293
 
241
- try:
242
- # Try to use OpenAI's structured output if available
243
- use_openai_structured = False
244
- client = None
294
+ max_retries = 3
295
+ last_response = None
296
+ last_error = None
297
+
298
+ for attempt in range(max_retries):
299
+ # Prepare prompt for this attempt
300
+ if attempt > 0 and last_response and last_error:
301
+ # On retry, include the previous response and error
302
+ prompt = f"""{base_prompt}
303
+
304
+ PREVIOUS ATTEMPT FAILED!
305
+ Your previous response was:
306
+ ```json
307
+ {last_response}
308
+ ```
309
+
310
+ Error: {last_error}
311
+
312
+ REMEMBER: Tasks MUST be objects with the following structure:
313
+ {{
314
+ "name": "Task Name",
315
+ "description": "Task Description",
316
+ "expected_output": "Expected Output",
317
+ "tools": ["tool1", "tool2"]
318
+ }}
319
+
320
+ DO NOT use strings for tasks. Each task MUST be a complete object with all four fields."""
321
+ else:
322
+ prompt = base_prompt
245
323
 
246
324
  try:
247
- # Check if we have OpenAI API and the model supports structured output
248
- if self.llm and (self.llm.startswith('gpt-') or self.llm.startswith('o1-') or self.llm.startswith('o3-')):
249
- client = get_openai_client()
250
- use_openai_structured = True
251
- except:
252
- # If OpenAI client is not available, we'll use the LLM class
253
- pass
254
-
255
- if use_openai_structured and client:
256
- # Use OpenAI's structured output for OpenAI models (backward compatibility)
257
- response = client.beta.chat.completions.parse(
258
- model=self.llm,
259
- response_format=AutoAgentsConfig,
260
- messages=[
261
- {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
262
- {"role": "user", "content": prompt}
263
- ]
264
- )
265
- config = response.choices[0].message.parsed
266
- else:
267
- # Use LLM class for all other providers (Gemini, Anthropic, etc.)
268
- llm_instance = LLM(
269
- model=self.llm,
270
- base_url=self.base_url,
271
- api_key=self.api_key
272
- )
325
+ # Try to use OpenAI's structured output if available
326
+ use_openai_structured = False
327
+ client = None
273
328
 
274
- response_text = llm_instance.response(
275
- prompt=prompt,
276
- system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
277
- output_pydantic=AutoAgentsConfig,
278
- temperature=0.7,
279
- stream=False,
280
- verbose=False
281
- )
282
-
283
- # Parse the JSON response
284
329
  try:
285
- # First try to parse as is
286
- config_dict = json.loads(response_text)
287
- config = AutoAgentsConfig(**config_dict)
288
- except json.JSONDecodeError:
289
- # If that fails, try to extract JSON from the response
290
- # Handle cases where the model might wrap JSON in markdown blocks
291
- cleaned_response = response_text.strip()
292
- if cleaned_response.startswith("```json"):
293
- cleaned_response = cleaned_response[7:]
294
- if cleaned_response.startswith("```"):
295
- cleaned_response = cleaned_response[3:]
296
- if cleaned_response.endswith("```"):
297
- cleaned_response = cleaned_response[:-3]
298
- cleaned_response = cleaned_response.strip()
330
+ # Check if we have OpenAI API and the model supports structured output
331
+ if self.llm and (self.llm.startswith('gpt-') or self.llm.startswith('o1-') or self.llm.startswith('o3-')):
332
+ # Create a new client instance if custom parameters are provided
333
+ if self.api_key or self.base_url:
334
+ client = OpenAIClient(api_key=self.api_key, base_url=self.base_url)
335
+ else:
336
+ client = get_openai_client()
337
+ use_openai_structured = True
338
+ except:
339
+ # If OpenAI client is not available, we'll use the LLM class
340
+ pass
341
+
342
+ if use_openai_structured and client:
343
+ # Use OpenAI's structured output for OpenAI models (backward compatibility)
344
+ config = client.parse_structured_output(
345
+ messages=[
346
+ {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
347
+ {"role": "user", "content": prompt}
348
+ ],
349
+ response_format=AutoAgentsConfig,
350
+ model=self.llm
351
+ )
352
+ # Store the response for potential retry
353
+ last_response = json.dumps(config.model_dump(), indent=2)
354
+ else:
355
+ # Use LLM class for all other providers (Gemini, Anthropic, etc.)
356
+ llm_instance = LLM(
357
+ model=self.llm,
358
+ base_url=self.base_url,
359
+ api_key=self.api_key
360
+ )
299
361
 
300
- config_dict = json.loads(cleaned_response)
301
- config = AutoAgentsConfig(**config_dict)
302
-
303
- # Ensure we have exactly max_agents number of agents
304
- if len(config.agents) > self.max_agents:
305
- config.agents = config.agents[:self.max_agents]
306
- elif len(config.agents) < self.max_agents:
307
- logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
308
-
309
- return config
310
- except Exception as e:
311
- logging.error(f"Error generating configuration: {e}")
312
- raise
362
+ response_text = llm_instance.response(
363
+ prompt=prompt,
364
+ system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
365
+ output_pydantic=AutoAgentsConfig,
366
+ temperature=0.7,
367
+ stream=False,
368
+ verbose=False
369
+ )
370
+
371
+ # Store the raw response for potential retry
372
+ last_response = response_text
373
+
374
+ # Parse the JSON response
375
+ try:
376
+ # First try to parse as is
377
+ config_dict = json.loads(response_text)
378
+ config = AutoAgentsConfig(**config_dict)
379
+ except json.JSONDecodeError:
380
+ # If that fails, try to extract JSON from the response
381
+ # Handle cases where the model might wrap JSON in markdown blocks
382
+ cleaned_response = response_text.strip()
383
+ if cleaned_response.startswith("```json"):
384
+ cleaned_response = cleaned_response[7:]
385
+ if cleaned_response.startswith("```"):
386
+ cleaned_response = cleaned_response[3:]
387
+ if cleaned_response.endswith("```"):
388
+ cleaned_response = cleaned_response[:-3]
389
+ cleaned_response = cleaned_response.strip()
390
+
391
+ config_dict = json.loads(cleaned_response)
392
+ config = AutoAgentsConfig(**config_dict)
393
+
394
+ # Validate the configuration
395
+ is_valid, error_msg = self._validate_config(config)
396
+ if not is_valid:
397
+ last_error = error_msg
398
+ if attempt < max_retries - 1:
399
+ logging.warning(f"Configuration validation failed (attempt {attempt + 1}/{max_retries}): {error_msg}")
400
+ continue
401
+ else:
402
+ raise ValueError(f"Configuration validation failed after {max_retries} attempts: {error_msg}")
403
+
404
+ # Ensure we have exactly max_agents number of agents
405
+ if len(config.agents) > self.max_agents:
406
+ config.agents = config.agents[:self.max_agents]
407
+ elif len(config.agents) < self.max_agents:
408
+ logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
409
+
410
+ return config
411
+
412
+ except ValueError as e:
413
+ # Re-raise validation errors
414
+ raise
415
+ except Exception as e:
416
+ last_error = str(e)
417
+ if attempt < max_retries - 1:
418
+ logging.warning(f"Error generating configuration (attempt {attempt + 1}/{max_retries}): {e}")
419
+ continue
420
+ else:
421
+ logging.error(f"Error generating configuration after {max_retries} attempts: {e}")
422
+ raise
423
+
424
+ # This should never be reached due to the raise statements above
425
+ raise RuntimeError(f"Failed to generate valid configuration after {max_retries} attempts")
313
426
 
314
427
  def _create_agents_and_tasks(self, config: AutoAgentsConfig) -> Tuple[List[Agent], List[Task]]:
315
428
  """Create agents and tasks from configuration"""
@@ -350,7 +463,9 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
350
463
  max_rpm=self.max_rpm,
351
464
  max_execution_time=self.max_execution_time,
352
465
  max_iter=self.max_iter,
353
- reflect_llm=self.reflect_llm
466
+ reflect_llm=self.reflect_llm,
467
+ base_url=self.base_url,
468
+ api_key=self.api_key
354
469
  )
355
470
  agents.append(agent)
356
471
 
@@ -746,6 +746,7 @@ class LLM:
746
746
  )
747
747
  if delta.content:
748
748
  live.update(display_generating(response_text, current_time))
749
+
749
750
  else:
750
751
  # Non-verbose streaming
751
752
  for chunk in litellm.completion(
@@ -759,9 +760,12 @@ class LLM:
759
760
  ):
760
761
  if chunk and chunk.choices and chunk.choices[0].delta:
761
762
  delta = chunk.choices[0].delta
762
- response_text, tool_calls = self._process_stream_delta(
763
- delta, response_text, tool_calls, formatted_tools
764
- )
763
+ if delta.content:
764
+ response_text += delta.content
765
+
766
+ # Capture tool calls from streaming chunks if provider supports it
767
+ if formatted_tools and self._supports_streaming_tools():
768
+ tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
765
769
 
766
770
  response_text = response_text.strip()
767
771
 
@@ -802,20 +806,7 @@ class LLM:
802
806
  # Handle tool calls - Sequential tool calling logic
803
807
  if tool_calls and execute_tool_fn:
804
808
  # Convert tool_calls to a serializable format for all providers
805
- serializable_tool_calls = []
806
- for tc in tool_calls:
807
- if isinstance(tc, dict):
808
- serializable_tool_calls.append(tc) # Already a dict
809
- else:
810
- # Convert object to dict
811
- serializable_tool_calls.append({
812
- "id": tc.id,
813
- "type": getattr(tc, 'type', "function"),
814
- "function": {
815
- "name": tc.function.name,
816
- "arguments": tc.function.arguments
817
- }
818
- })
809
+ serializable_tool_calls = self._serialize_tool_calls(tool_calls)
819
810
  messages.append({
820
811
  "role": "assistant",
821
812
  "content": response_text,
@@ -826,20 +817,8 @@ class LLM:
826
817
  tool_results = [] # Store all tool results
827
818
  for tool_call in tool_calls:
828
819
  # Handle both object and dict access patterns
829
- if isinstance(tool_call, dict):
830
- is_ollama = self._is_ollama_provider()
831
- function_name, arguments, tool_call_id = self._parse_tool_call_arguments(tool_call, is_ollama)
832
- else:
833
- # Handle object-style tool calls
834
- try:
835
- function_name = tool_call.function.name
836
- arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
837
- tool_call_id = tool_call.id
838
- except (json.JSONDecodeError, AttributeError) as e:
839
- logging.error(f"Error parsing object-style tool call: {e}")
840
- function_name = "unknown_function"
841
- arguments = {}
842
- tool_call_id = f"tool_{id(tool_call)}"
820
+ is_ollama = self._is_ollama_provider()
821
+ function_name, arguments, tool_call_id = self._extract_tool_call_info(tool_call, is_ollama)
843
822
 
844
823
  logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
845
824
  tool_result = execute_tool_fn(function_name, arguments)
@@ -1367,6 +1346,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1367
1346
  if delta.content:
1368
1347
  print("\033[K", end="\r")
1369
1348
  print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
1349
+
1370
1350
  else:
1371
1351
  # Non-verbose streaming
1372
1352
  async for chunk in await litellm.acompletion(
@@ -1380,9 +1360,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1380
1360
  ):
1381
1361
  if chunk and chunk.choices and chunk.choices[0].delta:
1382
1362
  delta = chunk.choices[0].delta
1383
- response_text, tool_calls = self._process_stream_delta(
1384
- delta, response_text, tool_calls, formatted_tools
1385
- )
1363
+ if delta.content:
1364
+ response_text += delta.content
1365
+
1366
+ # Capture tool calls from streaming chunks if provider supports it
1367
+ if formatted_tools and self._supports_streaming_tools():
1368
+ tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
1386
1369
 
1387
1370
  response_text = response_text.strip()
1388
1371
 
@@ -1417,20 +1400,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1417
1400
 
1418
1401
  if tool_calls:
1419
1402
  # Convert tool_calls to a serializable format for all providers
1420
- serializable_tool_calls = []
1421
- for tc in tool_calls:
1422
- if isinstance(tc, dict):
1423
- serializable_tool_calls.append(tc) # Already a dict
1424
- else:
1425
- # Convert object to dict
1426
- serializable_tool_calls.append({
1427
- "id": tc.id,
1428
- "type": getattr(tc, 'type', "function"),
1429
- "function": {
1430
- "name": tc.function.name,
1431
- "arguments": tc.function.arguments
1432
- }
1433
- })
1403
+ serializable_tool_calls = self._serialize_tool_calls(tool_calls)
1434
1404
  messages.append({
1435
1405
  "role": "assistant",
1436
1406
  "content": response_text,
@@ -1440,20 +1410,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1440
1410
  tool_results = [] # Store all tool results
1441
1411
  for tool_call in tool_calls:
1442
1412
  # Handle both object and dict access patterns
1443
- if isinstance(tool_call, dict):
1444
- is_ollama = self._is_ollama_provider()
1445
- function_name, arguments, tool_call_id = self._parse_tool_call_arguments(tool_call, is_ollama)
1446
- else:
1447
- # Handle object-style tool calls
1448
- try:
1449
- function_name = tool_call.function.name
1450
- arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
1451
- tool_call_id = tool_call.id
1452
- except (json.JSONDecodeError, AttributeError) as e:
1453
- logging.error(f"Error parsing object-style tool call: {e}")
1454
- function_name = "unknown_function"
1455
- arguments = {}
1456
- tool_call_id = f"tool_{id(tool_call)}"
1413
+ is_ollama = self._is_ollama_provider()
1414
+ function_name, arguments, tool_call_id = self._extract_tool_call_info(tool_call, is_ollama)
1457
1415
 
1458
1416
  tool_result = await execute_tool_fn(function_name, arguments)
1459
1417
  tool_results.append(tool_result) # Store the result
@@ -1899,6 +1857,90 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1899
1857
 
1900
1858
  return params
1901
1859
 
1860
+ def _prepare_response_logging(self, temperature: float, stream: bool, verbose: bool, markdown: bool, **kwargs) -> Optional[Dict[str, Any]]:
1861
+ """Prepare debug logging information for response methods"""
1862
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1863
+ debug_info = {
1864
+ "model": self.model,
1865
+ "timeout": self.timeout,
1866
+ "temperature": temperature,
1867
+ "top_p": self.top_p,
1868
+ "n": self.n,
1869
+ "max_tokens": self.max_tokens,
1870
+ "presence_penalty": self.presence_penalty,
1871
+ "frequency_penalty": self.frequency_penalty,
1872
+ "stream": stream,
1873
+ "verbose": verbose,
1874
+ "markdown": markdown,
1875
+ "kwargs": str(kwargs)
1876
+ }
1877
+ return debug_info
1878
+ return None
1879
+
1880
+ def _process_streaming_chunk(self, chunk) -> Optional[str]:
1881
+ """Extract content from a streaming chunk"""
1882
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1883
+ return chunk.choices[0].delta.content
1884
+ return None
1885
+
1886
+ def _process_tool_calls_from_stream(self, delta, tool_calls: List[Dict]) -> List[Dict]:
1887
+ """Process tool calls from streaming delta chunks.
1888
+
1889
+ This handles the accumulation of tool call data from streaming chunks,
1890
+ building up the complete tool call information incrementally.
1891
+ """
1892
+ if hasattr(delta, 'tool_calls') and delta.tool_calls:
1893
+ for tc in delta.tool_calls:
1894
+ if tc.index >= len(tool_calls):
1895
+ tool_calls.append({
1896
+ "id": tc.id,
1897
+ "type": "function",
1898
+ "function": {"name": "", "arguments": ""}
1899
+ })
1900
+ if tc.function.name:
1901
+ tool_calls[tc.index]["function"]["name"] = tc.function.name
1902
+ if tc.function.arguments:
1903
+ tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
1904
+ return tool_calls
1905
+
1906
+ def _serialize_tool_calls(self, tool_calls) -> List[Dict]:
1907
+ """Convert tool calls to a serializable format for all providers."""
1908
+ serializable_tool_calls = []
1909
+ for tc in tool_calls:
1910
+ if isinstance(tc, dict):
1911
+ serializable_tool_calls.append(tc) # Already a dict
1912
+ else:
1913
+ # Convert object to dict
1914
+ serializable_tool_calls.append({
1915
+ "id": tc.id,
1916
+ "type": getattr(tc, 'type', "function"),
1917
+ "function": {
1918
+ "name": tc.function.name,
1919
+ "arguments": tc.function.arguments
1920
+ }
1921
+ })
1922
+ return serializable_tool_calls
1923
+
1924
+ def _extract_tool_call_info(self, tool_call, is_ollama: bool = False) -> tuple:
1925
+ """Extract function name, arguments, and tool_call_id from a tool call.
1926
+
1927
+ Handles both dict and object formats for tool calls.
1928
+ """
1929
+ if isinstance(tool_call, dict):
1930
+ return self._parse_tool_call_arguments(tool_call, is_ollama)
1931
+ else:
1932
+ # Handle object-style tool calls
1933
+ try:
1934
+ function_name = tool_call.function.name
1935
+ arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
1936
+ tool_call_id = tool_call.id
1937
+ except (json.JSONDecodeError, AttributeError) as e:
1938
+ logging.error(f"Error parsing object-style tool call: {e}")
1939
+ function_name = "unknown_function"
1940
+ arguments = {}
1941
+ tool_call_id = f"tool_{id(tool_call)}"
1942
+ return function_name, arguments, tool_call_id
1943
+
1902
1944
  # Response without tool calls
1903
1945
  def response(
1904
1946
  self,
@@ -1946,42 +1988,29 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1946
1988
  )
1947
1989
 
1948
1990
  # Get response from LiteLLM
1991
+ response_text = ""
1992
+ completion_params = self._build_completion_params(
1993
+ messages=messages,
1994
+ temperature=temperature,
1995
+ stream=stream,
1996
+ **kwargs
1997
+ )
1998
+
1949
1999
  if stream:
1950
- response_text = ""
1951
2000
  if verbose:
1952
2001
  with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
1953
- for chunk in litellm.completion(
1954
- **self._build_completion_params(
1955
- messages=messages,
1956
- temperature=temperature,
1957
- stream=True,
1958
- **kwargs
1959
- )
1960
- ):
1961
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1962
- content = chunk.choices[0].delta.content
2002
+ for chunk in litellm.completion(**completion_params):
2003
+ content = self._process_streaming_chunk(chunk)
2004
+ if content:
1963
2005
  response_text += content
1964
2006
  live.update(display_generating(response_text, start_time))
1965
2007
  else:
1966
- for chunk in litellm.completion(
1967
- **self._build_completion_params(
1968
- messages=messages,
1969
- temperature=temperature,
1970
- stream=True,
1971
- **kwargs
1972
- )
1973
- ):
1974
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1975
- response_text += chunk.choices[0].delta.content
2008
+ for chunk in litellm.completion(**completion_params):
2009
+ content = self._process_streaming_chunk(chunk)
2010
+ if content:
2011
+ response_text += content
1976
2012
  else:
1977
- response = litellm.completion(
1978
- **self._build_completion_params(
1979
- messages=messages,
1980
- temperature=temperature,
1981
- stream=False,
1982
- **kwargs
1983
- )
1984
- )
2013
+ response = litellm.completion(**completion_params)
1985
2014
  response_text = response.choices[0].message.content.strip()
1986
2015
 
1987
2016
  if verbose:
@@ -2022,6 +2051,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2022
2051
 
2023
2052
  logger.debug("Using asynchronous response function")
2024
2053
 
2054
+
2025
2055
  # Log all self values when in debug mode
2026
2056
  self._log_llm_config(
2027
2057
  'Async response method',
@@ -2046,42 +2076,29 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2046
2076
  )
2047
2077
 
2048
2078
  # Get response from LiteLLM
2079
+ response_text = ""
2080
+ completion_params = self._build_completion_params(
2081
+ messages=messages,
2082
+ temperature=temperature,
2083
+ stream=stream,
2084
+ **kwargs
2085
+ )
2086
+
2049
2087
  if stream:
2050
- response_text = ""
2051
2088
  if verbose:
2052
2089
  with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2053
- async for chunk in await litellm.acompletion(
2054
- **self._build_completion_params(
2055
- messages=messages,
2056
- temperature=temperature,
2057
- stream=True,
2058
- **kwargs
2059
- )
2060
- ):
2061
- if chunk and chunk.choices and chunk.choices[0].delta.content:
2062
- content = chunk.choices[0].delta.content
2090
+ async for chunk in await litellm.acompletion(**completion_params):
2091
+ content = self._process_streaming_chunk(chunk)
2092
+ if content:
2063
2093
  response_text += content
2064
2094
  live.update(display_generating(response_text, start_time))
2065
2095
  else:
2066
- async for chunk in await litellm.acompletion(
2067
- **self._build_completion_params(
2068
- messages=messages,
2069
- temperature=temperature,
2070
- stream=True,
2071
- **kwargs
2072
- )
2073
- ):
2074
- if chunk and chunk.choices and chunk.choices[0].delta.content:
2075
- response_text += chunk.choices[0].delta.content
2096
+ async for chunk in await litellm.acompletion(**completion_params):
2097
+ content = self._process_streaming_chunk(chunk)
2098
+ if content:
2099
+ response_text += content
2076
2100
  else:
2077
- response = await litellm.acompletion(
2078
- **self._build_completion_params(
2079
- messages=messages,
2080
- temperature=temperature,
2081
- stream=False,
2082
- **kwargs
2083
- )
2084
- )
2101
+ response = await litellm.acompletion(**completion_params)
2085
2102
  response_text = response.choices[0].message.content.strip()
2086
2103
 
2087
2104
  if verbose:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.120
3
+ Version: 0.0.122
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -3,12 +3,12 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
3
3
  praisonaiagents/main.py,sha256=bamnEu5PaekloGi52VqAFclm-HzjEVeKtWF0Zpdmfzs,15479
4
4
  praisonaiagents/session.py,sha256=d-CZPYikOHb0q-H9f_IWKJsypnQfz1YKeLLkyxs6oDo,15532
5
5
  praisonaiagents/agent/__init__.py,sha256=IhIDtAkfJ99cxbttwou52coih_AejS2-jpazsX6LbDY,350
6
- praisonaiagents/agent/agent.py,sha256=_ROVyOTPBMB5Porv4YvZ4-kKWr4-tGMbSN7V8uDWZgk,109619
6
+ praisonaiagents/agent/agent.py,sha256=BZx0iCP4hHfKFlfGlkZtanBJDDBDZe54dhf4Oskhmhw,110427
7
7
  praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
8
8
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
9
9
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
10
10
  praisonaiagents/agents/agents.py,sha256=WnptTEMSDMAM30Ka6rOAu6rBD-ZLev3qphb1a3BbP1g,63301
11
- praisonaiagents/agents/autoagents.py,sha256=NNSlqEsWf4up4lmdQwNl5_iTgodZ5aODUnjlXdp9vEQ,16127
11
+ praisonaiagents/agents/autoagents.py,sha256=1stF8z94eyVg6hyfrLgdArlarftz_OFvEDtRMsGZFvg,21094
12
12
  praisonaiagents/guardrails/__init__.py,sha256=HA8zhp-KRHTxo0194MUwXOUJjPyjOu7E3d7xUIKYVVY,310
13
13
  praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXCFIyZXZVY4f0hnQyWc,1352
14
14
  praisonaiagents/guardrails/llm_guardrail.py,sha256=MTTqmYDdZX-18QN9T17T5P_6H2qnV8GVgymJufW1WuM,3277
@@ -16,7 +16,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
16
16
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
17
17
  praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
18
18
  praisonaiagents/llm/__init__.py,sha256=6lTeQ8jWi1-KiwjCDCmkHo2e-bRLq2dP0s5iJWqjO3s,1421
19
- praisonaiagents/llm/llm.py,sha256=I08T3Du9PQndEzIEjDjacHqVkBpwg_AumcO4TsG85b8,107317
19
+ praisonaiagents/llm/llm.py,sha256=8cDahPVMPI882J0psA1cXreJGXvO33eSOpMNy7FLCS4,107383
20
20
  praisonaiagents/llm/openai_client.py,sha256=0JvjCDHoH8I8kIt5vvObARkGdVaPWdTIv_FoEQ5EQPA,48973
21
21
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
22
22
  praisonaiagents/mcp/mcp.py,sha256=-fFx4MHffnN2woLnnV7Pzx3-1SFkn2j8Gp5F5ZIwKJ0,19698
@@ -53,7 +53,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
53
53
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
54
54
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
55
55
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
56
- praisonaiagents-0.0.120.dist-info/METADATA,sha256=0degpExWB64MNrEFvMEKEkopgjJp_UlALiDV8tFvxmk,1669
57
- praisonaiagents-0.0.120.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
58
- praisonaiagents-0.0.120.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
59
- praisonaiagents-0.0.120.dist-info/RECORD,,
56
+ praisonaiagents-0.0.122.dist-info/METADATA,sha256=pU6W0akH1O1raC15FOsFQW3GXuflwpznV2ij10vYUP4,1669
57
+ praisonaiagents-0.0.122.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
58
+ praisonaiagents-0.0.122.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
59
+ praisonaiagents-0.0.122.dist-info/RECORD,,