praisonaiagents 0.0.52__py3-none-any.whl → 0.0.54__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -318,7 +318,7 @@ class Agent:
318
318
  min_reflect: int = 1,
319
319
  reflect_llm: Optional[str] = None,
320
320
  user_id: Optional[str] = None,
321
- show_reasoning: bool = False
321
+ reasoning_steps: bool = False
322
322
  ):
323
323
  # Add check at start if memory is requested
324
324
  if memory is not None:
@@ -426,7 +426,7 @@ Your Goal: {self.goal}
426
426
 
427
427
  # Store user_id
428
428
  self.user_id = user_id or "praison"
429
- self.show_reasoning = show_reasoning
429
+ self.reasoning_steps = reasoning_steps
430
430
 
431
431
  # Check if knowledge parameter has any values
432
432
  if not knowledge:
@@ -528,7 +528,7 @@ Your Goal: {self.goal}
528
528
  def __str__(self):
529
529
  return f"Agent(name='{self.name}', role='{self.role}', goal='{self.goal}')"
530
530
 
531
- def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, show_reasoning=False):
531
+ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False):
532
532
  start_time = time.time()
533
533
  logging.debug(f"{self.name} sending messages to LLM: {messages}")
534
534
 
@@ -617,7 +617,7 @@ Your Goal: {self.goal}
617
617
  live.update(display_generating(full_response_text, start_time))
618
618
 
619
619
  # Update live display with reasoning content if enabled
620
- if show_reasoning and hasattr(chunk.choices[0].delta, "reasoning_content"):
620
+ if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
621
621
  rc = chunk.choices[0].delta.reasoning_content
622
622
  if rc:
623
623
  reasoning_content += rc
@@ -644,8 +644,8 @@ Your Goal: {self.goal}
644
644
  display_error(f"Error in chat completion: {e}")
645
645
  return None
646
646
 
647
- def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, show_reasoning=False):
648
- show_reasoning = show_reasoning or self.show_reasoning
647
+ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
648
+ reasoning_steps = reasoning_steps or self.reasoning_steps
649
649
  # Search for existing knowledge if any knowledge is provided
650
650
  if self.knowledge:
651
651
  search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
@@ -682,7 +682,7 @@ Your Goal: {self.goal}
682
682
  agent_role=self.role,
683
683
  agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
684
684
  execute_tool_fn=self.execute_tool, # Pass tool execution function
685
- show_reasoning=show_reasoning
685
+ reasoning_steps=reasoning_steps
686
686
  )
687
687
 
688
688
  self.chat_history.append({"role": "user", "content": prompt})
@@ -752,7 +752,7 @@ Your Goal: {self.goal}
752
752
  agent_tools=agent_tools
753
753
  )
754
754
 
755
- response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, show_reasoning=show_reasoning)
755
+ response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps)
756
756
  if not response:
757
757
  return None
758
758
 
@@ -811,6 +811,9 @@ Your Goal: {self.goal}
811
811
  if self.verbose:
812
812
  logging.debug(f"Agent {self.name} final response: {response_text}")
813
813
  display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
814
+ # Return only reasoning content if reasoning_steps is True
815
+ if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
816
+ return response.choices[0].message.reasoning_content
814
817
  return response_text
815
818
 
816
819
  reflection_prompt = f"""
@@ -885,9 +888,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
885
888
  cleaned = cleaned[:-3].strip()
886
889
  return cleaned
887
890
 
888
- async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, show_reasoning=False):
891
+ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
889
892
  """Async version of chat method. TODO: Requires Syncing with chat method."""
890
- show_reasoning = show_reasoning or self.show_reasoning
893
+ reasoning_steps = reasoning_steps or self.reasoning_steps
891
894
  try:
892
895
  # Search for existing knowledge if any knowledge is provided
893
896
  if self.knowledge:
@@ -919,7 +922,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
919
922
  agent_role=self.role,
920
923
  agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
921
924
  execute_tool_fn=self.execute_tool_async,
922
- show_reasoning=show_reasoning
925
+ reasoning_steps=reasoning_steps
923
926
  )
924
927
 
925
928
  self.chat_history.append({"role": "user", "content": prompt})
@@ -985,48 +988,48 @@ Your Goal: {self.goal}
985
988
  )
986
989
 
987
990
  # Format tools if provided
988
- formatted_tools = []
989
- if tools:
990
- for tool in tools:
991
- if isinstance(tool, str):
992
- tool_def = self._generate_tool_definition(tool)
993
- if tool_def:
994
- formatted_tools.append(tool_def)
995
- elif isinstance(tool, dict):
996
- formatted_tools.append(tool)
997
- elif hasattr(tool, "to_openai_tool"):
998
- formatted_tools.append(tool.to_openai_tool())
999
- elif callable(tool):
1000
- formatted_tools.append(self._generate_tool_definition(tool.__name__))
1001
-
1002
- # Create async OpenAI client
1003
- async_client = AsyncOpenAI()
1004
-
1005
- # Make the API call based on the type of request
1006
- if tools:
1007
- response = await async_client.chat.completions.create(
1008
- model=self.llm,
1009
- messages=messages,
1010
- temperature=temperature,
1011
- tools=formatted_tools
1012
- )
1013
- return await self._achat_completion(response, tools)
1014
- elif output_json or output_pydantic:
1015
- response = await async_client.chat.completions.create(
1016
- model=self.llm,
1017
- messages=messages,
1018
- temperature=temperature,
1019
- response_format={"type": "json_object"}
1020
- )
1021
- # Return the raw response
1022
- return response.choices[0].message.content
1023
- else:
1024
- response = await async_client.chat.completions.create(
1025
- model=self.llm,
1026
- messages=messages,
1027
- temperature=temperature
1028
- )
1029
- return response.choices[0].message.content
991
+ formatted_tools = []
992
+ if tools:
993
+ for tool in tools:
994
+ if isinstance(tool, str):
995
+ tool_def = self._generate_tool_definition(tool)
996
+ if tool_def:
997
+ formatted_tools.append(tool_def)
998
+ elif isinstance(tool, dict):
999
+ formatted_tools.append(tool)
1000
+ elif hasattr(tool, "to_openai_tool"):
1001
+ formatted_tools.append(tool.to_openai_tool())
1002
+ elif callable(tool):
1003
+ formatted_tools.append(self._generate_tool_definition(tool.__name__))
1004
+
1005
+ # Create async OpenAI client
1006
+ async_client = AsyncOpenAI()
1007
+
1008
+ # Make the API call based on the type of request
1009
+ if tools:
1010
+ response = await async_client.chat.completions.create(
1011
+ model=self.llm,
1012
+ messages=messages,
1013
+ temperature=temperature,
1014
+ tools=formatted_tools
1015
+ )
1016
+ return await self._achat_completion(response, tools)
1017
+ elif output_json or output_pydantic:
1018
+ response = await async_client.chat.completions.create(
1019
+ model=self.llm,
1020
+ messages=messages,
1021
+ temperature=temperature,
1022
+ response_format={"type": "json_object"}
1023
+ )
1024
+ # Return the raw response
1025
+ return response.choices[0].message.content
1026
+ else:
1027
+ response = await async_client.chat.completions.create(
1028
+ model=self.llm,
1029
+ messages=messages,
1030
+ temperature=temperature
1031
+ )
1032
+ return response.choices[0].message.content
1030
1033
  except Exception as e:
1031
1034
  display_error(f"Error in chat completion: {e}")
1032
1035
  return None
@@ -1034,7 +1037,7 @@ Your Goal: {self.goal}
1034
1037
  display_error(f"Error in achat: {e}")
1035
1038
  return None
1036
1039
 
1037
- async def _achat_completion(self, response, tools, show_reasoning=False):
1040
+ async def _achat_completion(self, response, tools, reasoning_steps=False):
1038
1041
  """Async version of _chat_completion method"""
1039
1042
  try:
1040
1043
  message = response.choices[0].message
@@ -1102,7 +1105,7 @@ Your Goal: {self.goal}
1102
1105
  full_response_text += chunk.choices[0].delta.content
1103
1106
  live.update(display_generating(full_response_text, start_time))
1104
1107
 
1105
- if show_reasoning and hasattr(chunk.choices[0].delta, "reasoning_content"):
1108
+ if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
1106
1109
  rc = chunk.choices[0].delta.reasoning_content
1107
1110
  if rc:
1108
1111
  reasoning_content += rc
@@ -1111,6 +1114,9 @@ Your Goal: {self.goal}
1111
1114
  self.console.print()
1112
1115
 
1113
1116
  final_response = process_stream_chunks(chunks)
1117
+ # Return only reasoning content if reasoning_steps is True
1118
+ if reasoning_steps and hasattr(final_response.choices[0].message, 'reasoning_content'):
1119
+ return final_response.choices[0].message.reasoning_content
1114
1120
  return final_response.choices[0].message.content if final_response else full_response_text
1115
1121
 
1116
1122
  except Exception as e:
@@ -390,15 +390,8 @@ Context:
390
390
  task.status = "completed"
391
391
  # Run execute_callback for memory operations
392
392
  try:
393
- try:
394
- # If a loop is already running, just create the task
395
- loop = asyncio.get_running_loop()
396
- loop.create_task(task.execute_callback(task_output))
397
- except RuntimeError:
398
- # Otherwise, create and set a new loop, and run the callback
399
- loop = asyncio.new_event_loop()
400
- asyncio.set_event_loop(loop)
401
- loop.create_task(task.execute_callback(task_output))
393
+ # Use the new sync wrapper to avoid pending coroutine issues
394
+ task.execute_callback_sync(task_output)
402
395
  except Exception as e:
403
396
  logger.error(f"Error executing memory callback for task {task_id}: {e}")
404
397
  logger.exception(e)
@@ -720,15 +713,8 @@ Context:
720
713
  task.status = "completed"
721
714
  # Run execute_callback for memory operations
722
715
  try:
723
- try:
724
- # If a loop is already running, just create the task
725
- loop = asyncio.get_running_loop()
726
- loop.create_task(task.execute_callback(task_output))
727
- except RuntimeError:
728
- # Otherwise, create and set a new loop, and run the callback
729
- loop = asyncio.new_event_loop()
730
- asyncio.set_event_loop(loop)
731
- loop.create_task(task.execute_callback(task_output))
716
+ # Use the new sync wrapper to avoid pending coroutine issues
717
+ task.execute_callback_sync(task_output)
732
718
  except Exception as e:
733
719
  logger.error(f"Error executing memory callback for task {task_id}: {e}")
734
720
  logger.exception(e)
@@ -148,7 +148,7 @@ class LLM:
148
148
  self.self_reflect = extra_settings.get('self_reflect', False)
149
149
  self.max_reflect = extra_settings.get('max_reflect', 3)
150
150
  self.min_reflect = extra_settings.get('min_reflect', 1)
151
- self.show_reasoning = extra_settings.get('show_reasoning', False)
151
+ self.reasoning_steps = extra_settings.get('reasoning_steps', False)
152
152
 
153
153
  # Enable error dropping for cleaner output
154
154
  litellm.drop_params = True
@@ -178,7 +178,8 @@ class LLM:
178
178
  """Enhanced get_response with all OpenAI-like features"""
179
179
  try:
180
180
  import litellm
181
- show_reasoning = kwargs.get('show_reasoning', self.show_reasoning)
181
+ # This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
182
+ reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
182
183
  # Disable litellm debug messages
183
184
  litellm.set_verbose = False
184
185
 
@@ -233,14 +234,14 @@ class LLM:
233
234
  # Get response from LiteLLM
234
235
  start_time = time.time()
235
236
 
236
- # If show_reasoning is True, do a single non-streaming call
237
- if show_reasoning:
237
+ # If reasoning_steps is True, do a single non-streaming call
238
+ if reasoning_steps:
238
239
  resp = litellm.completion(
239
240
  model=self.model,
240
241
  messages=messages,
241
242
  temperature=temperature,
242
243
  stream=False, # force non-streaming
243
- **kwargs
244
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
244
245
  )
245
246
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
246
247
  response_text = resp["choices"][0]["message"]["content"]
@@ -337,14 +338,14 @@ class LLM:
337
338
  "content": "Function returned an empty output"
338
339
  })
339
340
 
340
- # If show_reasoning is True, do a single non-streaming call
341
- if show_reasoning:
341
+ # If reasoning_steps is True, do a single non-streaming call
342
+ if reasoning_steps:
342
343
  resp = litellm.completion(
343
344
  model=self.model,
344
345
  messages=messages,
345
346
  temperature=temperature,
346
347
  stream=False, # force non-streaming
347
- **kwargs
348
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
348
349
  )
349
350
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
350
351
  response_text = resp["choices"][0]["message"]["content"]
@@ -409,6 +410,9 @@ class LLM:
409
410
  if verbose:
410
411
  display_interaction(original_prompt, response_text, markdown=markdown,
411
412
  generation_time=time.time() - start_time, console=console)
413
+ # Return reasoning content if reasoning_steps is True
414
+ if reasoning_steps and reasoning_content:
415
+ return reasoning_content
412
416
  return response_text
413
417
 
414
418
  # Handle self-reflection
@@ -424,15 +428,15 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
424
428
  {"role": "user", "content": reflection_prompt}
425
429
  ]
426
430
 
427
- # If show_reasoning is True, do a single non-streaming call to capture reasoning
428
- if show_reasoning:
431
+ # If reasoning_steps is True, do a single non-streaming call to capture reasoning
432
+ if reasoning_steps:
429
433
  reflection_resp = litellm.completion(
430
434
  model=self.model,
431
435
  messages=reflection_messages,
432
436
  temperature=temperature,
433
437
  stream=False, # Force non-streaming
434
438
  response_format={"type": "json_object"},
435
- **kwargs
439
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
436
440
  )
437
441
  # Grab reflection text and optional reasoning
438
442
  reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
@@ -466,7 +470,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
466
470
  temperature=temperature,
467
471
  stream=True,
468
472
  response_format={"type": "json_object"},
469
- **kwargs
473
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
470
474
  ):
471
475
  if chunk and chunk.choices and chunk.choices[0].delta.content:
472
476
  content = chunk.choices[0].delta.content
@@ -480,7 +484,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
480
484
  temperature=temperature,
481
485
  stream=True,
482
486
  response_format={"type": "json_object"},
483
- **kwargs
487
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
484
488
  ):
485
489
  if chunk and chunk.choices and chunk.choices[0].delta.content:
486
490
  reflection_text += chunk.choices[0].delta.content
@@ -554,7 +558,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
554
558
  """Async version of get_response with identical functionality."""
555
559
  try:
556
560
  import litellm
557
- show_reasoning = kwargs.get('show_reasoning', self.show_reasoning)
561
+ reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
558
562
  litellm.set_verbose = False
559
563
 
560
564
  # Build messages list
@@ -659,14 +663,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
659
663
  formatted_tools = None
660
664
 
661
665
  response_text = ""
662
- if show_reasoning:
666
+ if reasoning_steps:
663
667
  # Non-streaming call to capture reasoning
664
668
  resp = await litellm.acompletion(
665
669
  model=self.model,
666
670
  messages=messages,
667
671
  temperature=temperature,
668
672
  stream=False, # force non-streaming
669
- **kwargs
673
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
670
674
  )
671
675
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
672
676
  response_text = resp["choices"][0]["message"]["content"]
@@ -728,7 +732,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
728
732
  temperature=temperature,
729
733
  stream=False,
730
734
  tools=formatted_tools, # We safely pass tools here
731
- **kwargs
735
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
732
736
  )
733
737
  # handle tool_calls from tool_response as usual...
734
738
  tool_calls = tool_response.choices[0].message.get("tool_calls")
@@ -766,7 +770,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
766
770
 
767
771
  # Get response after tool calls
768
772
  response_text = ""
769
- if show_reasoning:
773
+ if reasoning_steps:
770
774
  # Non-streaming call to capture reasoning
771
775
  resp = await litellm.acompletion(
772
776
  model=self.model,
@@ -774,7 +778,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
774
778
  temperature=temperature,
775
779
  stream=False, # force non-streaming
776
780
  tools=formatted_tools, # Include tools
777
- **kwargs
781
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
778
782
  )
779
783
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
780
784
  response_text = resp["choices"][0]["message"]["content"]
@@ -804,7 +808,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
804
808
  temperature=temperature,
805
809
  stream=True,
806
810
  tools=formatted_tools,
807
- **kwargs
811
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
808
812
  ):
809
813
  if chunk and chunk.choices and chunk.choices[0].delta.content:
810
814
  content = chunk.choices[0].delta.content
@@ -818,7 +822,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
818
822
  messages=messages,
819
823
  temperature=temperature,
820
824
  stream=True,
821
- **kwargs
825
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
822
826
  ):
823
827
  if chunk and chunk.choices and chunk.choices[0].delta.content:
824
828
  response_text += chunk.choices[0].delta.content
@@ -838,6 +842,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
838
842
  if verbose:
839
843
  display_interaction(original_prompt, response_text, markdown=markdown,
840
844
  generation_time=time.time() - start_time, console=console)
845
+ # Return reasoning content if reasoning_steps is True
846
+ if reasoning_steps and reasoning_content:
847
+ return reasoning_content
841
848
  return response_text
842
849
 
843
850
  # Handle self-reflection
@@ -853,15 +860,15 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
853
860
  {"role": "user", "content": reflection_prompt}
854
861
  ]
855
862
 
856
- # If show_reasoning is True, do a single non-streaming call to capture reasoning
857
- if show_reasoning:
863
+ # If reasoning_steps is True, do a single non-streaming call to capture reasoning
864
+ if reasoning_steps:
858
865
  reflection_resp = litellm.completion(
859
866
  model=self.model,
860
867
  messages=reflection_messages,
861
868
  temperature=temperature,
862
869
  stream=False, # Force non-streaming
863
870
  response_format={"type": "json_object"},
864
- **kwargs
871
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
865
872
  )
866
873
  # Grab reflection text and optional reasoning
867
874
  reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
@@ -895,7 +902,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
895
902
  temperature=temperature,
896
903
  stream=True,
897
904
  response_format={"type": "json_object"},
898
- **kwargs
905
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
899
906
  ):
900
907
  if chunk and chunk.choices and chunk.choices[0].delta.content:
901
908
  content = chunk.choices[0].delta.content
@@ -909,7 +916,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
909
916
  temperature=temperature,
910
917
  stream=True,
911
918
  response_format={"type": "json_object"},
912
- **kwargs
919
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
913
920
  ):
914
921
  if chunk and chunk.choices and chunk.choices[0].delta.content:
915
922
  reflection_text += chunk.choices[0].delta.content
@@ -302,4 +302,21 @@ Expected Output: {self.expected_output}.
302
302
  Context:
303
303
 
304
304
  {' '.join(unique_contexts)}
305
- """
305
+ """
306
+
307
+ def execute_callback_sync(self, task_output: TaskOutput) -> None:
308
+ """
309
+ Synchronous wrapper to ensure that execute_callback is awaited,
310
+ preventing 'Task was destroyed but pending!' warnings if called
311
+ from non-async code.
312
+ """
313
+ import asyncio
314
+ try:
315
+ loop = asyncio.get_running_loop()
316
+ if loop.is_running():
317
+ loop.create_task(self.execute_callback(task_output))
318
+ else:
319
+ loop.run_until_complete(self.execute_callback(task_output))
320
+ except RuntimeError:
321
+ # If no loop is running in this context
322
+ asyncio.run(self.execute_callback(task_output))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.52
3
+ Version: 0.0.54
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,20 +1,20 @@
1
1
  praisonaiagents/__init__.py,sha256=JtPibbmeFv3meIb3vkKjckB0p7m-Vqt2RYPwOH8P41k,1228
2
2
  praisonaiagents/main.py,sha256=0kB9gn9meXtr4EIrdgA2lAioKIHCRJ61audsGDwuTm4,14428
3
3
  praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=9r9eN9sTI3A_3IZdA4GYpsKXE5Q4m8yQ_QXGyFirQok,53844
4
+ praisonaiagents/agent/agent.py,sha256=Rd2ZCToraAoe57UDT1JfrB03ffRKtZ-Tct9avFcZyT4,54257
5
5
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
6
- praisonaiagents/agents/agents.py,sha256=PRqBEUqRadVLBoDd-tgne5fVB87bR6P9qOgvDdjS-dY,37028
6
+ praisonaiagents/agents/agents.py,sha256=94YPQl-hl-EPY6-Xk2Rj9wlIs9YtiLQbsutSOXWX8QI,36156
7
7
  praisonaiagents/agents/autoagents.py,sha256=bjC2O5oZmoJItJXIMPTWc2lsp_AJC9tMiTQOal2hwPA,13532
8
8
  praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
9
9
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
10
10
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
11
11
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
12
- praisonaiagents/llm/llm.py,sha256=WEfqWEOb2Sa2V5MRVa2XbFCBcrE5WBMeEhSFq3HCZvM,49145
12
+ praisonaiagents/llm/llm.py,sha256=M6xh9cuO0KZjzpAkHZrnktxw4eCmXLymoZqMoXeq-0U,50352
13
13
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
14
14
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
15
15
  praisonaiagents/process/process.py,sha256=_1Nk37kOYakPaUWAJff86rP0ENyykXqMnhTp8E0efuE,30802
16
16
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
17
- praisonaiagents/task/task.py,sha256=YKoUOyIBQflOSCO4nuSi6sL-s2-Awujjs44qNqiUETI,13582
17
+ praisonaiagents/task/task.py,sha256=ikFjzNm4WPYONSLtWA3uDGNIUx_TvXTeU5SukWoC66E,14271
18
18
  praisonaiagents/tools/__init__.py,sha256=-0lV5n5cG54vYW6REjXIfuJnCLKnfQIDlXsySCaPB9s,7347
19
19
  praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
20
20
  praisonaiagents/tools/calculator_tools.py,sha256=S1xPT74Geurvjm52QMMIG29zDXVEWJmM6nmyY7yF298,9571
@@ -35,7 +35,7 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
35
35
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
36
36
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
37
37
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
38
- praisonaiagents-0.0.52.dist-info/METADATA,sha256=NIkU1FNQXms13nIStb4r2yYjItL1aA0XSL7sCy1HxMM,830
39
- praisonaiagents-0.0.52.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
- praisonaiagents-0.0.52.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
41
- praisonaiagents-0.0.52.dist-info/RECORD,,
38
+ praisonaiagents-0.0.54.dist-info/METADATA,sha256=Zfzek1Y53OzW35U-lLAX5mTpdS0xxV57mCDdHhcSfYo,830
39
+ praisonaiagents-0.0.54.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
+ praisonaiagents-0.0.54.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
41
+ praisonaiagents-0.0.54.dist-info/RECORD,,