praisonaiagents 0.0.53__py3-none-any.whl → 0.0.54__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -988,48 +988,48 @@ Your Goal: {self.goal}
988
988
  )
989
989
 
990
990
  # Format tools if provided
991
- formatted_tools = []
992
- if tools:
993
- for tool in tools:
994
- if isinstance(tool, str):
995
- tool_def = self._generate_tool_definition(tool)
996
- if tool_def:
997
- formatted_tools.append(tool_def)
998
- elif isinstance(tool, dict):
999
- formatted_tools.append(tool)
1000
- elif hasattr(tool, "to_openai_tool"):
1001
- formatted_tools.append(tool.to_openai_tool())
1002
- elif callable(tool):
1003
- formatted_tools.append(self._generate_tool_definition(tool.__name__))
1004
-
1005
- # Create async OpenAI client
1006
- async_client = AsyncOpenAI()
1007
-
1008
- # Make the API call based on the type of request
1009
- if tools:
1010
- response = await async_client.chat.completions.create(
1011
- model=self.llm,
1012
- messages=messages,
1013
- temperature=temperature,
1014
- tools=formatted_tools
1015
- )
1016
- return await self._achat_completion(response, tools)
1017
- elif output_json or output_pydantic:
1018
- response = await async_client.chat.completions.create(
1019
- model=self.llm,
1020
- messages=messages,
1021
- temperature=temperature,
1022
- response_format={"type": "json_object"}
1023
- )
1024
- # Return the raw response
1025
- return response.choices[0].message.content
1026
- else:
1027
- response = await async_client.chat.completions.create(
1028
- model=self.llm,
1029
- messages=messages,
1030
- temperature=temperature
1031
- )
1032
- return response.choices[0].message.content
991
+ formatted_tools = []
992
+ if tools:
993
+ for tool in tools:
994
+ if isinstance(tool, str):
995
+ tool_def = self._generate_tool_definition(tool)
996
+ if tool_def:
997
+ formatted_tools.append(tool_def)
998
+ elif isinstance(tool, dict):
999
+ formatted_tools.append(tool)
1000
+ elif hasattr(tool, "to_openai_tool"):
1001
+ formatted_tools.append(tool.to_openai_tool())
1002
+ elif callable(tool):
1003
+ formatted_tools.append(self._generate_tool_definition(tool.__name__))
1004
+
1005
+ # Create async OpenAI client
1006
+ async_client = AsyncOpenAI()
1007
+
1008
+ # Make the API call based on the type of request
1009
+ if tools:
1010
+ response = await async_client.chat.completions.create(
1011
+ model=self.llm,
1012
+ messages=messages,
1013
+ temperature=temperature,
1014
+ tools=formatted_tools
1015
+ )
1016
+ return await self._achat_completion(response, tools)
1017
+ elif output_json or output_pydantic:
1018
+ response = await async_client.chat.completions.create(
1019
+ model=self.llm,
1020
+ messages=messages,
1021
+ temperature=temperature,
1022
+ response_format={"type": "json_object"}
1023
+ )
1024
+ # Return the raw response
1025
+ return response.choices[0].message.content
1026
+ else:
1027
+ response = await async_client.chat.completions.create(
1028
+ model=self.llm,
1029
+ messages=messages,
1030
+ temperature=temperature
1031
+ )
1032
+ return response.choices[0].message.content
1033
1033
  except Exception as e:
1034
1034
  display_error(f"Error in chat completion: {e}")
1035
1035
  return None
@@ -178,7 +178,8 @@ class LLM:
178
178
  """Enhanced get_response with all OpenAI-like features"""
179
179
  try:
180
180
  import litellm
181
- reasoning_steps = kwargs.get('reasoning_steps', self.reasoning_steps)
181
+ # This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
182
+ reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
182
183
  # Disable litellm debug messages
183
184
  litellm.set_verbose = False
184
185
 
@@ -240,7 +241,7 @@ class LLM:
240
241
  messages=messages,
241
242
  temperature=temperature,
242
243
  stream=False, # force non-streaming
243
- **kwargs
244
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
244
245
  )
245
246
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
246
247
  response_text = resp["choices"][0]["message"]["content"]
@@ -344,7 +345,7 @@ class LLM:
344
345
  messages=messages,
345
346
  temperature=temperature,
346
347
  stream=False, # force non-streaming
347
- **kwargs
348
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
348
349
  )
349
350
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
350
351
  response_text = resp["choices"][0]["message"]["content"]
@@ -435,7 +436,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
435
436
  temperature=temperature,
436
437
  stream=False, # Force non-streaming
437
438
  response_format={"type": "json_object"},
438
- **kwargs
439
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
439
440
  )
440
441
  # Grab reflection text and optional reasoning
441
442
  reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
@@ -469,7 +470,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
469
470
  temperature=temperature,
470
471
  stream=True,
471
472
  response_format={"type": "json_object"},
472
- **kwargs
473
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
473
474
  ):
474
475
  if chunk and chunk.choices and chunk.choices[0].delta.content:
475
476
  content = chunk.choices[0].delta.content
@@ -483,7 +484,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
483
484
  temperature=temperature,
484
485
  stream=True,
485
486
  response_format={"type": "json_object"},
486
- **kwargs
487
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
487
488
  ):
488
489
  if chunk and chunk.choices and chunk.choices[0].delta.content:
489
490
  reflection_text += chunk.choices[0].delta.content
@@ -557,7 +558,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
557
558
  """Async version of get_response with identical functionality."""
558
559
  try:
559
560
  import litellm
560
- reasoning_steps = kwargs.get('reasoning_steps', self.reasoning_steps)
561
+ reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
561
562
  litellm.set_verbose = False
562
563
 
563
564
  # Build messages list
@@ -669,7 +670,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
669
670
  messages=messages,
670
671
  temperature=temperature,
671
672
  stream=False, # force non-streaming
672
- **kwargs
673
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
673
674
  )
674
675
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
675
676
  response_text = resp["choices"][0]["message"]["content"]
@@ -731,7 +732,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
731
732
  temperature=temperature,
732
733
  stream=False,
733
734
  tools=formatted_tools, # We safely pass tools here
734
- **kwargs
735
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
735
736
  )
736
737
  # handle tool_calls from tool_response as usual...
737
738
  tool_calls = tool_response.choices[0].message.get("tool_calls")
@@ -777,7 +778,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
777
778
  temperature=temperature,
778
779
  stream=False, # force non-streaming
779
780
  tools=formatted_tools, # Include tools
780
- **kwargs
781
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
781
782
  )
782
783
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
783
784
  response_text = resp["choices"][0]["message"]["content"]
@@ -807,7 +808,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
807
808
  temperature=temperature,
808
809
  stream=True,
809
810
  tools=formatted_tools,
810
- **kwargs
811
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
811
812
  ):
812
813
  if chunk and chunk.choices and chunk.choices[0].delta.content:
813
814
  content = chunk.choices[0].delta.content
@@ -821,7 +822,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
821
822
  messages=messages,
822
823
  temperature=temperature,
823
824
  stream=True,
824
- **kwargs
825
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
825
826
  ):
826
827
  if chunk and chunk.choices and chunk.choices[0].delta.content:
827
828
  response_text += chunk.choices[0].delta.content
@@ -867,7 +868,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
867
868
  temperature=temperature,
868
869
  stream=False, # Force non-streaming
869
870
  response_format={"type": "json_object"},
870
- **kwargs
871
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
871
872
  )
872
873
  # Grab reflection text and optional reasoning
873
874
  reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
@@ -901,7 +902,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
901
902
  temperature=temperature,
902
903
  stream=True,
903
904
  response_format={"type": "json_object"},
904
- **kwargs
905
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
905
906
  ):
906
907
  if chunk and chunk.choices and chunk.choices[0].delta.content:
907
908
  content = chunk.choices[0].delta.content
@@ -915,7 +916,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
915
916
  temperature=temperature,
916
917
  stream=True,
917
918
  response_format={"type": "json_object"},
918
- **kwargs
919
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
919
920
  ):
920
921
  if chunk and chunk.choices and chunk.choices[0].delta.content:
921
922
  reflection_text += chunk.choices[0].delta.content
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.53
3
+ Version: 0.0.54
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,7 +1,7 @@
1
1
  praisonaiagents/__init__.py,sha256=JtPibbmeFv3meIb3vkKjckB0p7m-Vqt2RYPwOH8P41k,1228
2
2
  praisonaiagents/main.py,sha256=0kB9gn9meXtr4EIrdgA2lAioKIHCRJ61audsGDwuTm4,14428
3
3
  praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=du4wbXOWr4TKeTdlHxfqtq3O7NoQNh5P94cBVF6Nj_M,54417
4
+ praisonaiagents/agent/agent.py,sha256=Rd2ZCToraAoe57UDT1JfrB03ffRKtZ-Tct9avFcZyT4,54257
5
5
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
6
6
  praisonaiagents/agents/agents.py,sha256=94YPQl-hl-EPY6-Xk2Rj9wlIs9YtiLQbsutSOXWX8QI,36156
7
7
  praisonaiagents/agents/autoagents.py,sha256=bjC2O5oZmoJItJXIMPTWc2lsp_AJC9tMiTQOal2hwPA,13532
@@ -9,7 +9,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
9
9
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
10
10
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
11
11
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
12
- praisonaiagents/llm/llm.py,sha256=S-9GBQcOoXQ7mzT119N7k6QzqwPIHSrXqbzluYzGntQ,49533
12
+ praisonaiagents/llm/llm.py,sha256=M6xh9cuO0KZjzpAkHZrnktxw4eCmXLymoZqMoXeq-0U,50352
13
13
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
14
14
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
15
15
  praisonaiagents/process/process.py,sha256=_1Nk37kOYakPaUWAJff86rP0ENyykXqMnhTp8E0efuE,30802
@@ -35,7 +35,7 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
35
35
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
36
36
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
37
37
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
38
- praisonaiagents-0.0.53.dist-info/METADATA,sha256=DAmi3nqq0kER9xJhFPZ8e3JC_TCeSdH2zKzrrZyZCZ4,830
39
- praisonaiagents-0.0.53.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
- praisonaiagents-0.0.53.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
41
- praisonaiagents-0.0.53.dist-info/RECORD,,
38
+ praisonaiagents-0.0.54.dist-info/METADATA,sha256=Zfzek1Y53OzW35U-lLAX5mTpdS0xxV57mCDdHhcSfYo,830
39
+ praisonaiagents-0.0.54.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
+ praisonaiagents-0.0.54.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
41
+ praisonaiagents-0.0.54.dist-info/RECORD,,