praisonaiagents 0.0.51__tar.gz → 0.0.53__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/agent/agent.py +21 -10
  3. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/agents/agents.py +4 -18
  4. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/llm/llm.py +324 -124
  5. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/task/task.py +18 -1
  6. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents.egg-info/PKG-INFO +1 -1
  7. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/pyproject.toml +1 -1
  8. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/__init__.py +0 -0
  9. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/agent/__init__.py +0 -0
  10. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/agents/__init__.py +0 -0
  11. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/agents/autoagents.py +0 -0
  12. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/knowledge/__init__.py +0 -0
  13. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/knowledge/chunking.py +0 -0
  14. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/knowledge/knowledge.py +0 -0
  15. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/llm/__init__.py +0 -0
  16. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/main.py +0 -0
  17. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/memory/memory.py +0 -0
  18. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/process/__init__.py +0 -0
  19. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/process/process.py +0 -0
  20. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/task/__init__.py +0 -0
  21. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/__init__.py +0 -0
  22. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/arxiv_tools.py +0 -0
  23. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/calculator_tools.py +0 -0
  24. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/csv_tools.py +0 -0
  25. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/duckdb_tools.py +0 -0
  26. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  27. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/excel_tools.py +0 -0
  28. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/file_tools.py +0 -0
  29. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/json_tools.py +0 -0
  30. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/newspaper_tools.py +0 -0
  31. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/pandas_tools.py +0 -0
  32. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/python_tools.py +0 -0
  33. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/shell_tools.py +0 -0
  34. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/spider_tools.py +0 -0
  35. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/test.py +0 -0
  36. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/tools.py +0 -0
  37. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  38. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/xml_tools.py +0 -0
  39. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/yaml_tools.py +0 -0
  40. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents/tools/yfinance_tools.py +0 -0
  41. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  42. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  43. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents.egg-info/requires.txt +0 -0
  44. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/praisonaiagents.egg-info/top_level.txt +0 -0
  45. {praisonaiagents-0.0.51 → praisonaiagents-0.0.53}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.51
3
+ Version: 0.0.53
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -317,7 +317,8 @@ class Agent:
317
317
  max_reflect: int = 3,
318
318
  min_reflect: int = 1,
319
319
  reflect_llm: Optional[str] = None,
320
- user_id: Optional[str] = None
320
+ user_id: Optional[str] = None,
321
+ reasoning_steps: bool = False
321
322
  ):
322
323
  # Add check at start if memory is requested
323
324
  if memory is not None:
@@ -425,6 +426,7 @@ Your Goal: {self.goal}
425
426
 
426
427
  # Store user_id
427
428
  self.user_id = user_id or "praison"
429
+ self.reasoning_steps = reasoning_steps
428
430
 
429
431
  # Check if knowledge parameter has any values
430
432
  if not knowledge:
@@ -526,7 +528,7 @@ Your Goal: {self.goal}
526
528
  def __str__(self):
527
529
  return f"Agent(name='{self.name}', role='{self.role}', goal='{self.goal}')"
528
530
 
529
- def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, show_reasoning=False):
531
+ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False):
530
532
  start_time = time.time()
531
533
  logging.debug(f"{self.name} sending messages to LLM: {messages}")
532
534
 
@@ -615,7 +617,7 @@ Your Goal: {self.goal}
615
617
  live.update(display_generating(full_response_text, start_time))
616
618
 
617
619
  # Update live display with reasoning content if enabled
618
- if show_reasoning and hasattr(chunk.choices[0].delta, "reasoning_content"):
620
+ if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
619
621
  rc = chunk.choices[0].delta.reasoning_content
620
622
  if rc:
621
623
  reasoning_content += rc
@@ -642,7 +644,8 @@ Your Goal: {self.goal}
642
644
  display_error(f"Error in chat completion: {e}")
643
645
  return None
644
646
 
645
- def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, show_reasoning=False):
647
+ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
648
+ reasoning_steps = reasoning_steps or self.reasoning_steps
646
649
  # Search for existing knowledge if any knowledge is provided
647
650
  if self.knowledge:
648
651
  search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
@@ -678,7 +681,8 @@ Your Goal: {self.goal}
678
681
  agent_name=self.name,
679
682
  agent_role=self.role,
680
683
  agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
681
- execute_tool_fn=self.execute_tool # Pass tool execution function
684
+ execute_tool_fn=self.execute_tool, # Pass tool execution function
685
+ reasoning_steps=reasoning_steps
682
686
  )
683
687
 
684
688
  self.chat_history.append({"role": "user", "content": prompt})
@@ -748,7 +752,7 @@ Your Goal: {self.goal}
748
752
  agent_tools=agent_tools
749
753
  )
750
754
 
751
- response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, show_reasoning=show_reasoning)
755
+ response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps)
752
756
  if not response:
753
757
  return None
754
758
 
@@ -807,6 +811,9 @@ Your Goal: {self.goal}
807
811
  if self.verbose:
808
812
  logging.debug(f"Agent {self.name} final response: {response_text}")
809
813
  display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
814
+ # Return only reasoning content if reasoning_steps is True
815
+ if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
816
+ return response.choices[0].message.reasoning_content
810
817
  return response_text
811
818
 
812
819
  reflection_prompt = f"""
@@ -881,8 +888,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
881
888
  cleaned = cleaned[:-3].strip()
882
889
  return cleaned
883
890
 
884
- async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, show_reasoning=False):
891
+ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
885
892
  """Async version of chat method. TODO: Requires Syncing with chat method."""
893
+ reasoning_steps = reasoning_steps or self.reasoning_steps
886
894
  try:
887
895
  # Search for existing knowledge if any knowledge is provided
888
896
  if self.knowledge:
@@ -914,7 +922,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
914
922
  agent_role=self.role,
915
923
  agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
916
924
  execute_tool_fn=self.execute_tool_async,
917
- show_reasoning=show_reasoning
925
+ reasoning_steps=reasoning_steps
918
926
  )
919
927
 
920
928
  self.chat_history.append({"role": "user", "content": prompt})
@@ -1029,7 +1037,7 @@ Your Goal: {self.goal}
1029
1037
  display_error(f"Error in achat: {e}")
1030
1038
  return None
1031
1039
 
1032
- async def _achat_completion(self, response, tools, show_reasoning=False):
1040
+ async def _achat_completion(self, response, tools, reasoning_steps=False):
1033
1041
  """Async version of _chat_completion method"""
1034
1042
  try:
1035
1043
  message = response.choices[0].message
@@ -1097,7 +1105,7 @@ Your Goal: {self.goal}
1097
1105
  full_response_text += chunk.choices[0].delta.content
1098
1106
  live.update(display_generating(full_response_text, start_time))
1099
1107
 
1100
- if show_reasoning and hasattr(chunk.choices[0].delta, "reasoning_content"):
1108
+ if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
1101
1109
  rc = chunk.choices[0].delta.reasoning_content
1102
1110
  if rc:
1103
1111
  reasoning_content += rc
@@ -1106,6 +1114,9 @@ Your Goal: {self.goal}
1106
1114
  self.console.print()
1107
1115
 
1108
1116
  final_response = process_stream_chunks(chunks)
1117
+ # Return only reasoning content if reasoning_steps is True
1118
+ if reasoning_steps and hasattr(final_response.choices[0].message, 'reasoning_content'):
1119
+ return final_response.choices[0].message.reasoning_content
1109
1120
  return final_response.choices[0].message.content if final_response else full_response_text
1110
1121
 
1111
1122
  except Exception as e:
@@ -390,15 +390,8 @@ Context:
390
390
  task.status = "completed"
391
391
  # Run execute_callback for memory operations
392
392
  try:
393
- try:
394
- # If a loop is already running, just create the task
395
- loop = asyncio.get_running_loop()
396
- loop.create_task(task.execute_callback(task_output))
397
- except RuntimeError:
398
- # Otherwise, create and set a new loop, and run the callback
399
- loop = asyncio.new_event_loop()
400
- asyncio.set_event_loop(loop)
401
- loop.create_task(task.execute_callback(task_output))
393
+ # Use the new sync wrapper to avoid pending coroutine issues
394
+ task.execute_callback_sync(task_output)
402
395
  except Exception as e:
403
396
  logger.error(f"Error executing memory callback for task {task_id}: {e}")
404
397
  logger.exception(e)
@@ -720,15 +713,8 @@ Context:
720
713
  task.status = "completed"
721
714
  # Run execute_callback for memory operations
722
715
  try:
723
- try:
724
- # If a loop is already running, just create the task
725
- loop = asyncio.get_running_loop()
726
- loop.create_task(task.execute_callback(task_output))
727
- except RuntimeError:
728
- # Otherwise, create and set a new loop, and run the callback
729
- loop = asyncio.new_event_loop()
730
- asyncio.set_event_loop(loop)
731
- loop.create_task(task.execute_callback(task_output))
716
+ # Use the new sync wrapper to avoid pending coroutine issues
717
+ task.execute_callback_sync(task_output)
732
718
  except Exception as e:
733
719
  logger.error(f"Error executing memory callback for task {task_id}: {e}")
734
720
  logger.exception(e)
@@ -113,6 +113,7 @@ class LLM:
113
113
  litellm.callbacks = []
114
114
  # Additional logging suppression
115
115
  litellm.suppress_debug_messages = True
116
+ litellm._logging._disable_debugging()
116
117
  logging.getLogger("litellm.utils").setLevel(logging.WARNING)
117
118
  logging.getLogger("litellm.main").setLevel(logging.WARNING)
118
119
  except ImportError:
@@ -147,6 +148,7 @@ class LLM:
147
148
  self.self_reflect = extra_settings.get('self_reflect', False)
148
149
  self.max_reflect = extra_settings.get('max_reflect', 3)
149
150
  self.min_reflect = extra_settings.get('min_reflect', 1)
151
+ self.reasoning_steps = extra_settings.get('reasoning_steps', False)
150
152
 
151
153
  # Enable error dropping for cleaner output
152
154
  litellm.drop_params = True
@@ -176,7 +178,7 @@ class LLM:
176
178
  """Enhanced get_response with all OpenAI-like features"""
177
179
  try:
178
180
  import litellm
179
-
181
+ reasoning_steps = kwargs.get('reasoning_steps', self.reasoning_steps)
180
182
  # Disable litellm debug messages
181
183
  litellm.set_verbose = False
182
184
 
@@ -230,8 +232,55 @@ class LLM:
230
232
 
231
233
  # Get response from LiteLLM
232
234
  start_time = time.time()
233
- if verbose:
234
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
235
+
236
+ # If reasoning_steps is True, do a single non-streaming call
237
+ if reasoning_steps:
238
+ resp = litellm.completion(
239
+ model=self.model,
240
+ messages=messages,
241
+ temperature=temperature,
242
+ stream=False, # force non-streaming
243
+ **kwargs
244
+ )
245
+ reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
246
+ response_text = resp["choices"][0]["message"]["content"]
247
+
248
+ # Optionally display reasoning if present
249
+ if verbose and reasoning_content:
250
+ display_interaction(
251
+ original_prompt,
252
+ f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
253
+ markdown=markdown,
254
+ generation_time=time.time() - start_time,
255
+ console=console
256
+ )
257
+ else:
258
+ display_interaction(
259
+ original_prompt,
260
+ response_text,
261
+ markdown=markdown,
262
+ generation_time=time.time() - start_time,
263
+ console=console
264
+ )
265
+
266
+ # Otherwise do the existing streaming approach
267
+ else:
268
+ if verbose:
269
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
270
+ response_text = ""
271
+ for chunk in litellm.completion(
272
+ model=self.model,
273
+ messages=messages,
274
+ temperature=temperature,
275
+ stream=True,
276
+ **kwargs
277
+ ):
278
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
279
+ content = chunk.choices[0].delta.content
280
+ response_text += content
281
+ live.update(display_generating(response_text, start_time))
282
+ else:
283
+ # Non-verbose mode, just collect the response
235
284
  response_text = ""
236
285
  for chunk in litellm.completion(
237
286
  model=self.model,
@@ -241,23 +290,9 @@ class LLM:
241
290
  **kwargs
242
291
  ):
243
292
  if chunk and chunk.choices and chunk.choices[0].delta.content:
244
- content = chunk.choices[0].delta.content
245
- response_text += content
246
- live.update(display_generating(response_text, start_time))
247
- else:
248
- # Non-verbose mode, just collect the response
249
- response_text = ""
250
- for chunk in litellm.completion(
251
- model=self.model,
252
- messages=messages,
253
- temperature=temperature,
254
- stream=True,
255
- **kwargs
256
- ):
257
- if chunk and chunk.choices and chunk.choices[0].delta.content:
258
- response_text += chunk.choices[0].delta.content
293
+ response_text += chunk.choices[0].delta.content
259
294
 
260
- response_text = response_text.strip()
295
+ response_text = response_text.strip()
261
296
 
262
297
  # Get final completion to check for tool calls
263
298
  final_response = litellm.completion(
@@ -302,9 +337,53 @@ class LLM:
302
337
  "content": "Function returned an empty output"
303
338
  })
304
339
 
305
- # Get response after tool calls with streaming
306
- if verbose:
307
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
340
+ # If reasoning_steps is True, do a single non-streaming call
341
+ if reasoning_steps:
342
+ resp = litellm.completion(
343
+ model=self.model,
344
+ messages=messages,
345
+ temperature=temperature,
346
+ stream=False, # force non-streaming
347
+ **kwargs
348
+ )
349
+ reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
350
+ response_text = resp["choices"][0]["message"]["content"]
351
+
352
+ # Optionally display reasoning if present
353
+ if verbose and reasoning_content:
354
+ display_interaction(
355
+ original_prompt,
356
+ f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
357
+ markdown=markdown,
358
+ generation_time=time.time() - start_time,
359
+ console=console
360
+ )
361
+ else:
362
+ display_interaction(
363
+ original_prompt,
364
+ response_text,
365
+ markdown=markdown,
366
+ generation_time=time.time() - start_time,
367
+ console=console
368
+ )
369
+
370
+ # Otherwise do the existing streaming approach
371
+ else:
372
+ # Get response after tool calls with streaming
373
+ if verbose:
374
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
375
+ response_text = ""
376
+ for chunk in litellm.completion(
377
+ model=self.model,
378
+ messages=messages,
379
+ temperature=temperature,
380
+ stream=True
381
+ ):
382
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
383
+ content = chunk.choices[0].delta.content
384
+ response_text += content
385
+ live.update(display_generating(response_text, start_time))
386
+ else:
308
387
  response_text = ""
309
388
  for chunk in litellm.completion(
310
389
  model=self.model,
@@ -313,21 +392,9 @@ class LLM:
313
392
  stream=True
314
393
  ):
315
394
  if chunk and chunk.choices and chunk.choices[0].delta.content:
316
- content = chunk.choices[0].delta.content
317
- response_text += content
318
- live.update(display_generating(response_text, start_time))
319
- else:
320
- response_text = ""
321
- for chunk in litellm.completion(
322
- model=self.model,
323
- messages=messages,
324
- temperature=temperature,
325
- stream=True
326
- ):
327
- if chunk and chunk.choices and chunk.choices[0].delta.content:
328
- response_text += chunk.choices[0].delta.content
395
+ response_text += chunk.choices[0].delta.content
329
396
 
330
- response_text = response_text.strip()
397
+ response_text = response_text.strip()
331
398
 
332
399
  # Handle output formatting
333
400
  if output_json or output_pydantic:
@@ -342,6 +409,9 @@ class LLM:
342
409
  if verbose:
343
410
  display_interaction(original_prompt, response_text, markdown=markdown,
344
411
  generation_time=time.time() - start_time, console=console)
412
+ # Return reasoning content if reasoning_steps is True
413
+ if reasoning_steps and reasoning_content:
414
+ return reasoning_content
345
415
  return response_text
346
416
 
347
417
  # Handle self-reflection
@@ -357,32 +427,66 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
357
427
  {"role": "user", "content": reflection_prompt}
358
428
  ]
359
429
 
360
- # Get reflection response with streaming
361
- if verbose:
362
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
430
+ # If reasoning_steps is True, do a single non-streaming call to capture reasoning
431
+ if reasoning_steps:
432
+ reflection_resp = litellm.completion(
433
+ model=self.model,
434
+ messages=reflection_messages,
435
+ temperature=temperature,
436
+ stream=False, # Force non-streaming
437
+ response_format={"type": "json_object"},
438
+ **kwargs
439
+ )
440
+ # Grab reflection text and optional reasoning
441
+ reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
442
+ reflection_text = reflection_resp["choices"][0]["message"]["content"]
443
+
444
+ # Optionally display reasoning if present
445
+ if verbose and reasoning_content:
446
+ display_interaction(
447
+ "Reflection reasoning:",
448
+ f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
449
+ markdown=markdown,
450
+ generation_time=time.time() - start_time,
451
+ console=console
452
+ )
453
+ elif verbose:
454
+ display_interaction(
455
+ "Self-reflection (non-streaming):",
456
+ reflection_text,
457
+ markdown=markdown,
458
+ generation_time=time.time() - start_time,
459
+ console=console
460
+ )
461
+ else:
462
+ # Existing streaming approach
463
+ if verbose:
464
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
465
+ reflection_text = ""
466
+ for chunk in litellm.completion(
467
+ model=self.model,
468
+ messages=reflection_messages,
469
+ temperature=temperature,
470
+ stream=True,
471
+ response_format={"type": "json_object"},
472
+ **kwargs
473
+ ):
474
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
475
+ content = chunk.choices[0].delta.content
476
+ reflection_text += content
477
+ live.update(display_generating(reflection_text, start_time))
478
+ else:
363
479
  reflection_text = ""
364
480
  for chunk in litellm.completion(
365
481
  model=self.model,
366
482
  messages=reflection_messages,
367
483
  temperature=temperature,
368
484
  stream=True,
369
- response_format={"type": "json_object"}
485
+ response_format={"type": "json_object"},
486
+ **kwargs
370
487
  ):
371
488
  if chunk and chunk.choices and chunk.choices[0].delta.content:
372
- content = chunk.choices[0].delta.content
373
- reflection_text += content
374
- live.update(display_generating(reflection_text, start_time))
375
- else:
376
- reflection_text = ""
377
- for chunk in litellm.completion(
378
- model=self.model,
379
- messages=reflection_messages,
380
- temperature=temperature,
381
- stream=True,
382
- response_format={"type": "json_object"}
383
- ):
384
- if chunk and chunk.choices and chunk.choices[0].delta.content:
385
- reflection_text += chunk.choices[0].delta.content
489
+ reflection_text += chunk.choices[0].delta.content
386
490
 
387
491
  try:
388
492
  reflection_data = json.loads(reflection_text)
@@ -453,6 +557,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
453
557
  """Async version of get_response with identical functionality."""
454
558
  try:
455
559
  import litellm
560
+ reasoning_steps = kwargs.get('reasoning_steps', self.reasoning_steps)
456
561
  litellm.set_verbose = False
457
562
 
458
563
  # Build messages list
@@ -490,10 +595,10 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
490
595
  # Format tools for LiteLLM
491
596
  formatted_tools = None
492
597
  if tools:
493
- logging.info(f"Starting tool formatting for {len(tools)} tools")
598
+ logging.debug(f"Starting tool formatting for {len(tools)} tools")
494
599
  formatted_tools = []
495
600
  for tool in tools:
496
- logging.info(f"Processing tool: {tool.__name__ if hasattr(tool, '__name__') else str(tool)}")
601
+ logging.debug(f"Processing tool: {tool.__name__ if hasattr(tool, '__name__') else str(tool)}")
497
602
  if hasattr(tool, '__name__'):
498
603
  tool_name = tool.__name__
499
604
  tool_doc = tool.__doc__ or "No description available"
@@ -539,10 +644,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
539
644
  }
540
645
  }
541
646
  # Ensure tool definition is JSON serializable
542
- print(f"Generated tool definition: {tool_def}")
543
647
  try:
544
648
  json.dumps(tool_def) # Test serialization
545
- logging.info(f"Generated tool definition: {tool_def}")
649
+ logging.debug(f"Generated tool definition: {tool_def}")
546
650
  formatted_tools.append(tool_def)
547
651
  except TypeError as e:
548
652
  logging.error(f"Tool definition not JSON serializable: {e}")
@@ -552,38 +656,67 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
552
656
  if formatted_tools:
553
657
  try:
554
658
  json.dumps(formatted_tools) # Final serialization check
555
- logging.info(f"Final formatted tools: {json.dumps(formatted_tools, indent=2)}")
659
+ logging.debug(f"Final formatted tools: {json.dumps(formatted_tools, indent=2)}")
556
660
  except TypeError as e:
557
661
  logging.error(f"Final tools list not JSON serializable: {e}")
558
662
  formatted_tools = None
559
663
 
560
664
  response_text = ""
561
- if verbose:
562
- # ----------------------------------------------------
563
- # 1) Make the streaming call WITHOUT tools
564
- # ----------------------------------------------------
565
- async for chunk in await litellm.acompletion(
665
+ if reasoning_steps:
666
+ # Non-streaming call to capture reasoning
667
+ resp = await litellm.acompletion(
566
668
  model=self.model,
567
669
  messages=messages,
568
670
  temperature=temperature,
569
- stream=True,
671
+ stream=False, # force non-streaming
570
672
  **kwargs
571
- ):
572
- if chunk and chunk.choices and chunk.choices[0].delta.content:
573
- response_text += chunk.choices[0].delta.content
574
- print("\033[K", end="\r")
575
- print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
673
+ )
674
+ reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
675
+ response_text = resp["choices"][0]["message"]["content"]
676
+
677
+ if verbose and reasoning_content:
678
+ display_interaction(
679
+ "Initial reasoning:",
680
+ f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
681
+ markdown=markdown,
682
+ generation_time=time.time() - start_time,
683
+ console=console
684
+ )
685
+ elif verbose:
686
+ display_interaction(
687
+ "Initial response:",
688
+ response_text,
689
+ markdown=markdown,
690
+ generation_time=time.time() - start_time,
691
+ console=console
692
+ )
576
693
  else:
577
- # Non-verbose streaming call, still no tools
578
- async for chunk in await litellm.acompletion(
579
- model=self.model,
580
- messages=messages,
581
- temperature=temperature,
582
- stream=True,
583
- **kwargs
584
- ):
585
- if chunk and chunk.choices and chunk.choices[0].delta.content:
586
- response_text += chunk.choices[0].delta.content
694
+ if verbose:
695
+ # ----------------------------------------------------
696
+ # 1) Make the streaming call WITHOUT tools
697
+ # ----------------------------------------------------
698
+ async for chunk in await litellm.acompletion(
699
+ model=self.model,
700
+ messages=messages,
701
+ temperature=temperature,
702
+ stream=True,
703
+ **kwargs
704
+ ):
705
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
706
+ response_text += chunk.choices[0].delta.content
707
+ print("\033[K", end="\r")
708
+ print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
709
+ else:
710
+ # Non-verbose streaming call, still no tools
711
+ async for chunk in await litellm.acompletion(
712
+ model=self.model,
713
+ messages=messages,
714
+ temperature=temperature,
715
+ stream=True,
716
+ **kwargs
717
+ ):
718
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
719
+ response_text += chunk.choices[0].delta.content
587
720
 
588
721
  response_text = response_text.strip()
589
722
 
@@ -634,35 +767,66 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
634
767
  "content": "Function returned an empty output"
635
768
  })
636
769
 
637
- # Get response after tool calls with streaming
770
+ # Get response after tool calls
638
771
  response_text = ""
639
- if verbose:
640
- async for chunk in await litellm.acompletion(
772
+ if reasoning_steps:
773
+ # Non-streaming call to capture reasoning
774
+ resp = await litellm.acompletion(
641
775
  model=self.model,
642
776
  messages=messages,
643
777
  temperature=temperature,
644
- stream=True,
645
- tools=formatted_tools,
778
+ stream=False, # force non-streaming
779
+ tools=formatted_tools, # Include tools
646
780
  **kwargs
647
- ):
648
- if chunk and chunk.choices and chunk.choices[0].delta.content:
649
- content = chunk.choices[0].delta.content
650
- response_text += content
651
- print("\033[K", end="\r")
652
- print(f"Reflecting... {time.time() - start_time:.1f}s", end="\r")
653
- else:
654
- response_text = ""
655
- for chunk in litellm.completion(
656
- model=self.model,
657
- messages=messages,
658
- temperature=temperature,
659
- stream=True,
660
- **kwargs
661
- ):
662
- if chunk and chunk.choices and chunk.choices[0].delta.content:
663
- response_text += chunk.choices[0].delta.content
781
+ )
782
+ reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
783
+ response_text = resp["choices"][0]["message"]["content"]
784
+
785
+ if verbose and reasoning_content:
786
+ display_interaction(
787
+ "Tool response reasoning:",
788
+ f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
789
+ markdown=markdown,
790
+ generation_time=time.time() - start_time,
791
+ console=console
792
+ )
793
+ elif verbose:
794
+ display_interaction(
795
+ "Tool response:",
796
+ response_text,
797
+ markdown=markdown,
798
+ generation_time=time.time() - start_time,
799
+ console=console
800
+ )
801
+ else:
802
+ # Get response after tool calls with streaming
803
+ if verbose:
804
+ async for chunk in await litellm.acompletion(
805
+ model=self.model,
806
+ messages=messages,
807
+ temperature=temperature,
808
+ stream=True,
809
+ tools=formatted_tools,
810
+ **kwargs
811
+ ):
812
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
813
+ content = chunk.choices[0].delta.content
814
+ response_text += content
815
+ print("\033[K", end="\r")
816
+ print(f"Reflecting... {time.time() - start_time:.1f}s", end="\r")
817
+ else:
818
+ response_text = ""
819
+ for chunk in litellm.completion(
820
+ model=self.model,
821
+ messages=messages,
822
+ temperature=temperature,
823
+ stream=True,
824
+ **kwargs
825
+ ):
826
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
827
+ response_text += chunk.choices[0].delta.content
664
828
 
665
- response_text = response_text.strip()
829
+ response_text = response_text.strip()
666
830
 
667
831
  # Handle output formatting
668
832
  if output_json or output_pydantic:
@@ -677,6 +841,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
677
841
  if verbose:
678
842
  display_interaction(original_prompt, response_text, markdown=markdown,
679
843
  generation_time=time.time() - start_time, console=console)
844
+ # Return reasoning content if reasoning_steps is True
845
+ if reasoning_steps and reasoning_content:
846
+ return reasoning_content
680
847
  return response_text
681
848
 
682
849
  # Handle self-reflection
@@ -692,33 +859,66 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
692
859
  {"role": "user", "content": reflection_prompt}
693
860
  ]
694
861
 
695
- # Get reflection response
696
- reflection_text = ""
697
- if verbose:
698
- async for chunk in await litellm.acompletion(
862
+ # If reasoning_steps is True, do a single non-streaming call to capture reasoning
863
+ if reasoning_steps:
864
+ reflection_resp = litellm.completion(
699
865
  model=self.model,
700
866
  messages=reflection_messages,
701
867
  temperature=temperature,
702
- stream=True,
868
+ stream=False, # Force non-streaming
703
869
  response_format={"type": "json_object"},
704
870
  **kwargs
705
- ):
706
- if chunk and chunk.choices and chunk.choices[0].delta.content:
707
- content = chunk.choices[0].delta.content
708
- reflection_text += content
709
- print("\033[K", end="\r")
710
- print(f"Reflecting... {time.time() - start_time:.1f}s", end="\r")
871
+ )
872
+ # Grab reflection text and optional reasoning
873
+ reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
874
+ reflection_text = reflection_resp["choices"][0]["message"]["content"]
875
+
876
+ # Optionally display reasoning if present
877
+ if verbose and reasoning_content:
878
+ display_interaction(
879
+ "Reflection reasoning:",
880
+ f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
881
+ markdown=markdown,
882
+ generation_time=time.time() - start_time,
883
+ console=console
884
+ )
885
+ elif verbose:
886
+ display_interaction(
887
+ "Self-reflection (non-streaming):",
888
+ reflection_text,
889
+ markdown=markdown,
890
+ generation_time=time.time() - start_time,
891
+ console=console
892
+ )
711
893
  else:
712
- async for chunk in await litellm.acompletion(
713
- model=self.model,
714
- messages=reflection_messages,
715
- temperature=temperature,
716
- stream=True,
717
- response_format={"type": "json_object"},
718
- **kwargs
719
- ):
720
- if chunk and chunk.choices and chunk.choices[0].delta.content:
721
- reflection_text += chunk.choices[0].delta.content
894
+ # Existing streaming approach
895
+ if verbose:
896
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
897
+ reflection_text = ""
898
+ for chunk in litellm.completion(
899
+ model=self.model,
900
+ messages=reflection_messages,
901
+ temperature=temperature,
902
+ stream=True,
903
+ response_format={"type": "json_object"},
904
+ **kwargs
905
+ ):
906
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
907
+ content = chunk.choices[0].delta.content
908
+ reflection_text += content
909
+ live.update(display_generating(reflection_text, start_time))
910
+ else:
911
+ reflection_text = ""
912
+ for chunk in litellm.completion(
913
+ model=self.model,
914
+ messages=reflection_messages,
915
+ temperature=temperature,
916
+ stream=True,
917
+ response_format={"type": "json_object"},
918
+ **kwargs
919
+ ):
920
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
921
+ reflection_text += chunk.choices[0].delta.content
722
922
 
723
923
  while True: # Add loop for reflection handling
724
924
  try:
@@ -302,4 +302,21 @@ Expected Output: {self.expected_output}.
302
302
  Context:
303
303
 
304
304
  {' '.join(unique_contexts)}
305
- """
305
+ """
306
+
307
+ def execute_callback_sync(self, task_output: TaskOutput) -> None:
308
+ """
309
+ Synchronous wrapper to ensure that execute_callback is awaited,
310
+ preventing 'Task was destroyed but pending!' warnings if called
311
+ from non-async code.
312
+ """
313
+ import asyncio
314
+ try:
315
+ loop = asyncio.get_running_loop()
316
+ if loop.is_running():
317
+ loop.create_task(self.execute_callback(task_output))
318
+ else:
319
+ loop.run_until_complete(self.execute_callback(task_output))
320
+ except RuntimeError:
321
+ # If no loop is running in this context
322
+ asyncio.run(self.execute_callback(task_output))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.51
3
+ Version: 0.0.53
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.51"
7
+ version = "0.0.53"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  authors = [
10
10
  { name="Mervin Praison" }