praisonaiagents 0.0.51__py3-none-any.whl → 0.0.52__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -317,7 +317,8 @@ class Agent:
317
317
  max_reflect: int = 3,
318
318
  min_reflect: int = 1,
319
319
  reflect_llm: Optional[str] = None,
320
- user_id: Optional[str] = None
320
+ user_id: Optional[str] = None,
321
+ show_reasoning: bool = False
321
322
  ):
322
323
  # Add check at start if memory is requested
323
324
  if memory is not None:
@@ -425,6 +426,7 @@ Your Goal: {self.goal}
425
426
 
426
427
  # Store user_id
427
428
  self.user_id = user_id or "praison"
429
+ self.show_reasoning = show_reasoning
428
430
 
429
431
  # Check if knowledge parameter has any values
430
432
  if not knowledge:
@@ -643,6 +645,7 @@ Your Goal: {self.goal}
643
645
  return None
644
646
 
645
647
  def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, show_reasoning=False):
648
+ show_reasoning = show_reasoning or self.show_reasoning
646
649
  # Search for existing knowledge if any knowledge is provided
647
650
  if self.knowledge:
648
651
  search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
@@ -678,7 +681,8 @@ Your Goal: {self.goal}
678
681
  agent_name=self.name,
679
682
  agent_role=self.role,
680
683
  agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
681
- execute_tool_fn=self.execute_tool # Pass tool execution function
684
+ execute_tool_fn=self.execute_tool, # Pass tool execution function
685
+ show_reasoning=show_reasoning
682
686
  )
683
687
 
684
688
  self.chat_history.append({"role": "user", "content": prompt})
@@ -883,6 +887,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
883
887
 
884
888
  async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, show_reasoning=False):
885
889
  """Async version of chat method. TODO: Requires Syncing with chat method."""
890
+ show_reasoning = show_reasoning or self.show_reasoning
886
891
  try:
887
892
  # Search for existing knowledge if any knowledge is provided
888
893
  if self.knowledge:
@@ -113,6 +113,7 @@ class LLM:
113
113
  litellm.callbacks = []
114
114
  # Additional logging suppression
115
115
  litellm.suppress_debug_messages = True
116
+ litellm._logging._disable_debugging()
116
117
  logging.getLogger("litellm.utils").setLevel(logging.WARNING)
117
118
  logging.getLogger("litellm.main").setLevel(logging.WARNING)
118
119
  except ImportError:
@@ -147,6 +148,7 @@ class LLM:
147
148
  self.self_reflect = extra_settings.get('self_reflect', False)
148
149
  self.max_reflect = extra_settings.get('max_reflect', 3)
149
150
  self.min_reflect = extra_settings.get('min_reflect', 1)
151
+ self.show_reasoning = extra_settings.get('show_reasoning', False)
150
152
 
151
153
  # Enable error dropping for cleaner output
152
154
  litellm.drop_params = True
@@ -176,7 +178,7 @@ class LLM:
176
178
  """Enhanced get_response with all OpenAI-like features"""
177
179
  try:
178
180
  import litellm
179
-
181
+ show_reasoning = kwargs.get('show_reasoning', self.show_reasoning)
180
182
  # Disable litellm debug messages
181
183
  litellm.set_verbose = False
182
184
 
@@ -230,8 +232,55 @@ class LLM:
230
232
 
231
233
  # Get response from LiteLLM
232
234
  start_time = time.time()
233
- if verbose:
234
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
235
+
236
+ # If show_reasoning is True, do a single non-streaming call
237
+ if show_reasoning:
238
+ resp = litellm.completion(
239
+ model=self.model,
240
+ messages=messages,
241
+ temperature=temperature,
242
+ stream=False, # force non-streaming
243
+ **kwargs
244
+ )
245
+ reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
246
+ response_text = resp["choices"][0]["message"]["content"]
247
+
248
+ # Optionally display reasoning if present
249
+ if verbose and reasoning_content:
250
+ display_interaction(
251
+ original_prompt,
252
+ f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
253
+ markdown=markdown,
254
+ generation_time=time.time() - start_time,
255
+ console=console
256
+ )
257
+ else:
258
+ display_interaction(
259
+ original_prompt,
260
+ response_text,
261
+ markdown=markdown,
262
+ generation_time=time.time() - start_time,
263
+ console=console
264
+ )
265
+
266
+ # Otherwise do the existing streaming approach
267
+ else:
268
+ if verbose:
269
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
270
+ response_text = ""
271
+ for chunk in litellm.completion(
272
+ model=self.model,
273
+ messages=messages,
274
+ temperature=temperature,
275
+ stream=True,
276
+ **kwargs
277
+ ):
278
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
279
+ content = chunk.choices[0].delta.content
280
+ response_text += content
281
+ live.update(display_generating(response_text, start_time))
282
+ else:
283
+ # Non-verbose mode, just collect the response
235
284
  response_text = ""
236
285
  for chunk in litellm.completion(
237
286
  model=self.model,
@@ -241,23 +290,9 @@ class LLM:
241
290
  **kwargs
242
291
  ):
243
292
  if chunk and chunk.choices and chunk.choices[0].delta.content:
244
- content = chunk.choices[0].delta.content
245
- response_text += content
246
- live.update(display_generating(response_text, start_time))
247
- else:
248
- # Non-verbose mode, just collect the response
249
- response_text = ""
250
- for chunk in litellm.completion(
251
- model=self.model,
252
- messages=messages,
253
- temperature=temperature,
254
- stream=True,
255
- **kwargs
256
- ):
257
- if chunk and chunk.choices and chunk.choices[0].delta.content:
258
- response_text += chunk.choices[0].delta.content
293
+ response_text += chunk.choices[0].delta.content
259
294
 
260
- response_text = response_text.strip()
295
+ response_text = response_text.strip()
261
296
 
262
297
  # Get final completion to check for tool calls
263
298
  final_response = litellm.completion(
@@ -302,9 +337,53 @@ class LLM:
302
337
  "content": "Function returned an empty output"
303
338
  })
304
339
 
305
- # Get response after tool calls with streaming
306
- if verbose:
307
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
340
+ # If show_reasoning is True, do a single non-streaming call
341
+ if show_reasoning:
342
+ resp = litellm.completion(
343
+ model=self.model,
344
+ messages=messages,
345
+ temperature=temperature,
346
+ stream=False, # force non-streaming
347
+ **kwargs
348
+ )
349
+ reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
350
+ response_text = resp["choices"][0]["message"]["content"]
351
+
352
+ # Optionally display reasoning if present
353
+ if verbose and reasoning_content:
354
+ display_interaction(
355
+ original_prompt,
356
+ f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
357
+ markdown=markdown,
358
+ generation_time=time.time() - start_time,
359
+ console=console
360
+ )
361
+ else:
362
+ display_interaction(
363
+ original_prompt,
364
+ response_text,
365
+ markdown=markdown,
366
+ generation_time=time.time() - start_time,
367
+ console=console
368
+ )
369
+
370
+ # Otherwise do the existing streaming approach
371
+ else:
372
+ # Get response after tool calls with streaming
373
+ if verbose:
374
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
375
+ response_text = ""
376
+ for chunk in litellm.completion(
377
+ model=self.model,
378
+ messages=messages,
379
+ temperature=temperature,
380
+ stream=True
381
+ ):
382
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
383
+ content = chunk.choices[0].delta.content
384
+ response_text += content
385
+ live.update(display_generating(response_text, start_time))
386
+ else:
308
387
  response_text = ""
309
388
  for chunk in litellm.completion(
310
389
  model=self.model,
@@ -313,21 +392,9 @@ class LLM:
313
392
  stream=True
314
393
  ):
315
394
  if chunk and chunk.choices and chunk.choices[0].delta.content:
316
- content = chunk.choices[0].delta.content
317
- response_text += content
318
- live.update(display_generating(response_text, start_time))
319
- else:
320
- response_text = ""
321
- for chunk in litellm.completion(
322
- model=self.model,
323
- messages=messages,
324
- temperature=temperature,
325
- stream=True
326
- ):
327
- if chunk and chunk.choices and chunk.choices[0].delta.content:
328
- response_text += chunk.choices[0].delta.content
395
+ response_text += chunk.choices[0].delta.content
329
396
 
330
- response_text = response_text.strip()
397
+ response_text = response_text.strip()
331
398
 
332
399
  # Handle output formatting
333
400
  if output_json or output_pydantic:
@@ -357,32 +424,66 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
357
424
  {"role": "user", "content": reflection_prompt}
358
425
  ]
359
426
 
360
- # Get reflection response with streaming
361
- if verbose:
362
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
427
+ # If show_reasoning is True, do a single non-streaming call to capture reasoning
428
+ if show_reasoning:
429
+ reflection_resp = litellm.completion(
430
+ model=self.model,
431
+ messages=reflection_messages,
432
+ temperature=temperature,
433
+ stream=False, # Force non-streaming
434
+ response_format={"type": "json_object"},
435
+ **kwargs
436
+ )
437
+ # Grab reflection text and optional reasoning
438
+ reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
439
+ reflection_text = reflection_resp["choices"][0]["message"]["content"]
440
+
441
+ # Optionally display reasoning if present
442
+ if verbose and reasoning_content:
443
+ display_interaction(
444
+ "Reflection reasoning:",
445
+ f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
446
+ markdown=markdown,
447
+ generation_time=time.time() - start_time,
448
+ console=console
449
+ )
450
+ elif verbose:
451
+ display_interaction(
452
+ "Self-reflection (non-streaming):",
453
+ reflection_text,
454
+ markdown=markdown,
455
+ generation_time=time.time() - start_time,
456
+ console=console
457
+ )
458
+ else:
459
+ # Existing streaming approach
460
+ if verbose:
461
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
462
+ reflection_text = ""
463
+ for chunk in litellm.completion(
464
+ model=self.model,
465
+ messages=reflection_messages,
466
+ temperature=temperature,
467
+ stream=True,
468
+ response_format={"type": "json_object"},
469
+ **kwargs
470
+ ):
471
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
472
+ content = chunk.choices[0].delta.content
473
+ reflection_text += content
474
+ live.update(display_generating(reflection_text, start_time))
475
+ else:
363
476
  reflection_text = ""
364
477
  for chunk in litellm.completion(
365
478
  model=self.model,
366
479
  messages=reflection_messages,
367
480
  temperature=temperature,
368
481
  stream=True,
369
- response_format={"type": "json_object"}
482
+ response_format={"type": "json_object"},
483
+ **kwargs
370
484
  ):
371
485
  if chunk and chunk.choices and chunk.choices[0].delta.content:
372
- content = chunk.choices[0].delta.content
373
- reflection_text += content
374
- live.update(display_generating(reflection_text, start_time))
375
- else:
376
- reflection_text = ""
377
- for chunk in litellm.completion(
378
- model=self.model,
379
- messages=reflection_messages,
380
- temperature=temperature,
381
- stream=True,
382
- response_format={"type": "json_object"}
383
- ):
384
- if chunk and chunk.choices and chunk.choices[0].delta.content:
385
- reflection_text += chunk.choices[0].delta.content
486
+ reflection_text += chunk.choices[0].delta.content
386
487
 
387
488
  try:
388
489
  reflection_data = json.loads(reflection_text)
@@ -453,6 +554,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
453
554
  """Async version of get_response with identical functionality."""
454
555
  try:
455
556
  import litellm
557
+ show_reasoning = kwargs.get('show_reasoning', self.show_reasoning)
456
558
  litellm.set_verbose = False
457
559
 
458
560
  # Build messages list
@@ -490,10 +592,10 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
490
592
  # Format tools for LiteLLM
491
593
  formatted_tools = None
492
594
  if tools:
493
- logging.info(f"Starting tool formatting for {len(tools)} tools")
595
+ logging.debug(f"Starting tool formatting for {len(tools)} tools")
494
596
  formatted_tools = []
495
597
  for tool in tools:
496
- logging.info(f"Processing tool: {tool.__name__ if hasattr(tool, '__name__') else str(tool)}")
598
+ logging.debug(f"Processing tool: {tool.__name__ if hasattr(tool, '__name__') else str(tool)}")
497
599
  if hasattr(tool, '__name__'):
498
600
  tool_name = tool.__name__
499
601
  tool_doc = tool.__doc__ or "No description available"
@@ -539,10 +641,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
539
641
  }
540
642
  }
541
643
  # Ensure tool definition is JSON serializable
542
- print(f"Generated tool definition: {tool_def}")
543
644
  try:
544
645
  json.dumps(tool_def) # Test serialization
545
- logging.info(f"Generated tool definition: {tool_def}")
646
+ logging.debug(f"Generated tool definition: {tool_def}")
546
647
  formatted_tools.append(tool_def)
547
648
  except TypeError as e:
548
649
  logging.error(f"Tool definition not JSON serializable: {e}")
@@ -552,38 +653,67 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
552
653
  if formatted_tools:
553
654
  try:
554
655
  json.dumps(formatted_tools) # Final serialization check
555
- logging.info(f"Final formatted tools: {json.dumps(formatted_tools, indent=2)}")
656
+ logging.debug(f"Final formatted tools: {json.dumps(formatted_tools, indent=2)}")
556
657
  except TypeError as e:
557
658
  logging.error(f"Final tools list not JSON serializable: {e}")
558
659
  formatted_tools = None
559
660
 
560
661
  response_text = ""
561
- if verbose:
562
- # ----------------------------------------------------
563
- # 1) Make the streaming call WITHOUT tools
564
- # ----------------------------------------------------
565
- async for chunk in await litellm.acompletion(
662
+ if show_reasoning:
663
+ # Non-streaming call to capture reasoning
664
+ resp = await litellm.acompletion(
566
665
  model=self.model,
567
666
  messages=messages,
568
667
  temperature=temperature,
569
- stream=True,
668
+ stream=False, # force non-streaming
570
669
  **kwargs
571
- ):
572
- if chunk and chunk.choices and chunk.choices[0].delta.content:
573
- response_text += chunk.choices[0].delta.content
574
- print("\033[K", end="\r")
575
- print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
670
+ )
671
+ reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
672
+ response_text = resp["choices"][0]["message"]["content"]
673
+
674
+ if verbose and reasoning_content:
675
+ display_interaction(
676
+ "Initial reasoning:",
677
+ f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
678
+ markdown=markdown,
679
+ generation_time=time.time() - start_time,
680
+ console=console
681
+ )
682
+ elif verbose:
683
+ display_interaction(
684
+ "Initial response:",
685
+ response_text,
686
+ markdown=markdown,
687
+ generation_time=time.time() - start_time,
688
+ console=console
689
+ )
576
690
  else:
577
- # Non-verbose streaming call, still no tools
578
- async for chunk in await litellm.acompletion(
579
- model=self.model,
580
- messages=messages,
581
- temperature=temperature,
582
- stream=True,
583
- **kwargs
584
- ):
585
- if chunk and chunk.choices and chunk.choices[0].delta.content:
586
- response_text += chunk.choices[0].delta.content
691
+ if verbose:
692
+ # ----------------------------------------------------
693
+ # 1) Make the streaming call WITHOUT tools
694
+ # ----------------------------------------------------
695
+ async for chunk in await litellm.acompletion(
696
+ model=self.model,
697
+ messages=messages,
698
+ temperature=temperature,
699
+ stream=True,
700
+ **kwargs
701
+ ):
702
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
703
+ response_text += chunk.choices[0].delta.content
704
+ print("\033[K", end="\r")
705
+ print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
706
+ else:
707
+ # Non-verbose streaming call, still no tools
708
+ async for chunk in await litellm.acompletion(
709
+ model=self.model,
710
+ messages=messages,
711
+ temperature=temperature,
712
+ stream=True,
713
+ **kwargs
714
+ ):
715
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
716
+ response_text += chunk.choices[0].delta.content
587
717
 
588
718
  response_text = response_text.strip()
589
719
 
@@ -634,35 +764,66 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
634
764
  "content": "Function returned an empty output"
635
765
  })
636
766
 
637
- # Get response after tool calls with streaming
767
+ # Get response after tool calls
638
768
  response_text = ""
639
- if verbose:
640
- async for chunk in await litellm.acompletion(
769
+ if show_reasoning:
770
+ # Non-streaming call to capture reasoning
771
+ resp = await litellm.acompletion(
641
772
  model=self.model,
642
773
  messages=messages,
643
774
  temperature=temperature,
644
- stream=True,
645
- tools=formatted_tools,
775
+ stream=False, # force non-streaming
776
+ tools=formatted_tools, # Include tools
646
777
  **kwargs
647
- ):
648
- if chunk and chunk.choices and chunk.choices[0].delta.content:
649
- content = chunk.choices[0].delta.content
650
- response_text += content
651
- print("\033[K", end="\r")
652
- print(f"Reflecting... {time.time() - start_time:.1f}s", end="\r")
653
- else:
654
- response_text = ""
655
- for chunk in litellm.completion(
656
- model=self.model,
657
- messages=messages,
658
- temperature=temperature,
659
- stream=True,
660
- **kwargs
661
- ):
662
- if chunk and chunk.choices and chunk.choices[0].delta.content:
663
- response_text += chunk.choices[0].delta.content
778
+ )
779
+ reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
780
+ response_text = resp["choices"][0]["message"]["content"]
781
+
782
+ if verbose and reasoning_content:
783
+ display_interaction(
784
+ "Tool response reasoning:",
785
+ f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
786
+ markdown=markdown,
787
+ generation_time=time.time() - start_time,
788
+ console=console
789
+ )
790
+ elif verbose:
791
+ display_interaction(
792
+ "Tool response:",
793
+ response_text,
794
+ markdown=markdown,
795
+ generation_time=time.time() - start_time,
796
+ console=console
797
+ )
798
+ else:
799
+ # Get response after tool calls with streaming
800
+ if verbose:
801
+ async for chunk in await litellm.acompletion(
802
+ model=self.model,
803
+ messages=messages,
804
+ temperature=temperature,
805
+ stream=True,
806
+ tools=formatted_tools,
807
+ **kwargs
808
+ ):
809
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
810
+ content = chunk.choices[0].delta.content
811
+ response_text += content
812
+ print("\033[K", end="\r")
813
+ print(f"Reflecting... {time.time() - start_time:.1f}s", end="\r")
814
+ else:
815
+ response_text = ""
816
+ for chunk in litellm.completion(
817
+ model=self.model,
818
+ messages=messages,
819
+ temperature=temperature,
820
+ stream=True,
821
+ **kwargs
822
+ ):
823
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
824
+ response_text += chunk.choices[0].delta.content
664
825
 
665
- response_text = response_text.strip()
826
+ response_text = response_text.strip()
666
827
 
667
828
  # Handle output formatting
668
829
  if output_json or output_pydantic:
@@ -692,33 +853,66 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
692
853
  {"role": "user", "content": reflection_prompt}
693
854
  ]
694
855
 
695
- # Get reflection response
696
- reflection_text = ""
697
- if verbose:
698
- async for chunk in await litellm.acompletion(
856
+ # If show_reasoning is True, do a single non-streaming call to capture reasoning
857
+ if show_reasoning:
858
+ reflection_resp = litellm.completion(
699
859
  model=self.model,
700
860
  messages=reflection_messages,
701
861
  temperature=temperature,
702
- stream=True,
862
+ stream=False, # Force non-streaming
703
863
  response_format={"type": "json_object"},
704
864
  **kwargs
705
- ):
706
- if chunk and chunk.choices and chunk.choices[0].delta.content:
707
- content = chunk.choices[0].delta.content
708
- reflection_text += content
709
- print("\033[K", end="\r")
710
- print(f"Reflecting... {time.time() - start_time:.1f}s", end="\r")
865
+ )
866
+ # Grab reflection text and optional reasoning
867
+ reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
868
+ reflection_text = reflection_resp["choices"][0]["message"]["content"]
869
+
870
+ # Optionally display reasoning if present
871
+ if verbose and reasoning_content:
872
+ display_interaction(
873
+ "Reflection reasoning:",
874
+ f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
875
+ markdown=markdown,
876
+ generation_time=time.time() - start_time,
877
+ console=console
878
+ )
879
+ elif verbose:
880
+ display_interaction(
881
+ "Self-reflection (non-streaming):",
882
+ reflection_text,
883
+ markdown=markdown,
884
+ generation_time=time.time() - start_time,
885
+ console=console
886
+ )
711
887
  else:
712
- async for chunk in await litellm.acompletion(
713
- model=self.model,
714
- messages=reflection_messages,
715
- temperature=temperature,
716
- stream=True,
717
- response_format={"type": "json_object"},
718
- **kwargs
719
- ):
720
- if chunk and chunk.choices and chunk.choices[0].delta.content:
721
- reflection_text += chunk.choices[0].delta.content
888
+ # Existing streaming approach
889
+ if verbose:
890
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
891
+ reflection_text = ""
892
+ for chunk in litellm.completion(
893
+ model=self.model,
894
+ messages=reflection_messages,
895
+ temperature=temperature,
896
+ stream=True,
897
+ response_format={"type": "json_object"},
898
+ **kwargs
899
+ ):
900
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
901
+ content = chunk.choices[0].delta.content
902
+ reflection_text += content
903
+ live.update(display_generating(reflection_text, start_time))
904
+ else:
905
+ reflection_text = ""
906
+ for chunk in litellm.completion(
907
+ model=self.model,
908
+ messages=reflection_messages,
909
+ temperature=temperature,
910
+ stream=True,
911
+ response_format={"type": "json_object"},
912
+ **kwargs
913
+ ):
914
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
915
+ reflection_text += chunk.choices[0].delta.content
722
916
 
723
917
  while True: # Add loop for reflection handling
724
918
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.51
3
+ Version: 0.0.52
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,7 +1,7 @@
1
1
  praisonaiagents/__init__.py,sha256=JtPibbmeFv3meIb3vkKjckB0p7m-Vqt2RYPwOH8P41k,1228
2
2
  praisonaiagents/main.py,sha256=0kB9gn9meXtr4EIrdgA2lAioKIHCRJ61audsGDwuTm4,14428
3
3
  praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=5_IEyIrlQN75a9zJeZdWRMTvuMNNIofwttzo2KF9lyM,53584
4
+ praisonaiagents/agent/agent.py,sha256=9r9eN9sTI3A_3IZdA4GYpsKXE5Q4m8yQ_QXGyFirQok,53844
5
5
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
6
6
  praisonaiagents/agents/agents.py,sha256=PRqBEUqRadVLBoDd-tgne5fVB87bR6P9qOgvDdjS-dY,37028
7
7
  praisonaiagents/agents/autoagents.py,sha256=bjC2O5oZmoJItJXIMPTWc2lsp_AJC9tMiTQOal2hwPA,13532
@@ -9,7 +9,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
9
9
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
10
10
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
11
11
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
12
- praisonaiagents/llm/llm.py,sha256=h1n8Iyo0WTCLwTPEWpiTjoK28LNk3pbAjvcTtJLjwzY,38077
12
+ praisonaiagents/llm/llm.py,sha256=WEfqWEOb2Sa2V5MRVa2XbFCBcrE5WBMeEhSFq3HCZvM,49145
13
13
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
14
14
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
15
15
  praisonaiagents/process/process.py,sha256=_1Nk37kOYakPaUWAJff86rP0ENyykXqMnhTp8E0efuE,30802
@@ -35,7 +35,7 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
35
35
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
36
36
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
37
37
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
38
- praisonaiagents-0.0.51.dist-info/METADATA,sha256=W6SXNFt4RaQFloUFqjzpEFAPMmucqWM_OEtoU4hBFAE,830
39
- praisonaiagents-0.0.51.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
- praisonaiagents-0.0.51.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
41
- praisonaiagents-0.0.51.dist-info/RECORD,,
38
+ praisonaiagents-0.0.52.dist-info/METADATA,sha256=NIkU1FNQXms13nIStb4r2yYjItL1aA0XSL7sCy1HxMM,830
39
+ praisonaiagents-0.0.52.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
+ praisonaiagents-0.0.52.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
41
+ praisonaiagents-0.0.52.dist-info/RECORD,,