praisonaiagents 0.0.126__py3-none-any.whl → 0.0.128__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -480,24 +480,22 @@ Context:
480
480
  )
481
481
 
482
482
  if self.process == "workflow":
483
- # Collect all tasks that should run in parallel
484
- parallel_tasks = []
483
+ tasks_to_run = []
485
484
  async for task_id in process.aworkflow():
486
- if self.tasks[task_id].async_execution and self.tasks[task_id].is_start:
487
- parallel_tasks.append(task_id)
488
- elif parallel_tasks:
489
- # Execute collected parallel tasks
490
- await asyncio.gather(*[self.arun_task(t) for t in parallel_tasks])
491
- parallel_tasks = []
492
- # Run the current non-parallel task
493
- if self.tasks[task_id].async_execution:
494
- await self.arun_task(task_id)
495
- else:
496
- self.run_task(task_id)
497
-
498
- # Execute any remaining parallel tasks
499
- if parallel_tasks:
500
- await asyncio.gather(*[self.arun_task(t) for t in parallel_tasks])
485
+ if self.tasks[task_id].async_execution:
486
+ tasks_to_run.append(self.arun_task(task_id))
487
+ else:
488
+ # If we encounter a sync task, we must wait for the previous async tasks to finish.
489
+ if tasks_to_run:
490
+ await asyncio.gather(*tasks_to_run)
491
+ tasks_to_run = []
492
+
493
+ # Run sync task in an executor to avoid blocking the event loop
494
+ loop = asyncio.get_event_loop()
495
+ await loop.run_in_executor(None, self.run_task, task_id)
496
+
497
+ if tasks_to_run:
498
+ await asyncio.gather(*tasks_to_run)
501
499
 
502
500
  elif self.process == "sequential":
503
501
  async for task_id in process.asequential():
@@ -136,7 +136,7 @@ class AutoAgents(PraisonAIAgents):
136
136
  completion_checker=completion_checker,
137
137
  max_retries=max_retries,
138
138
  process=process,
139
- manager_llm=manager_llm
139
+ manager_llm=manager_llm or self.llm
140
140
  )
141
141
 
142
142
  def _display_agents_and_tasks(self, agents: List[Agent], tasks: List[Task]):
@@ -772,7 +772,7 @@ class LLM:
772
772
  if formatted_tools and self._supports_streaming_tools():
773
773
  tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
774
774
 
775
- response_text = response_text.strip()
775
+ response_text = response_text.strip() if response_text else "" if response_text else "" if response_text else "" if response_text else ""
776
776
 
777
777
  # Create a mock final_response with the captured data
778
778
  final_response = {
@@ -858,102 +858,6 @@ class LLM:
858
858
  iteration_count += 1
859
859
  continue
860
860
 
861
- # Special handling for Ollama models that don't automatically process tool results
862
- ollama_handled = False
863
- ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
864
-
865
- if ollama_params:
866
- # Get response based on streaming mode
867
- if stream:
868
- # Streaming approach
869
- if verbose:
870
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
871
- response_text = ""
872
- for chunk in litellm.completion(
873
- **self._build_completion_params(
874
- messages=ollama_params["follow_up_messages"],
875
- temperature=temperature,
876
- stream=True
877
- )
878
- ):
879
- if chunk and chunk.choices and chunk.choices[0].delta.content:
880
- content = chunk.choices[0].delta.content
881
- response_text += content
882
- live.update(display_generating(response_text, start_time))
883
- else:
884
- response_text = ""
885
- for chunk in litellm.completion(
886
- **self._build_completion_params(
887
- messages=ollama_params["follow_up_messages"],
888
- temperature=temperature,
889
- stream=True
890
- )
891
- ):
892
- if chunk and chunk.choices and chunk.choices[0].delta.content:
893
- response_text += chunk.choices[0].delta.content
894
- else:
895
- # Non-streaming approach
896
- resp = litellm.completion(
897
- **self._build_completion_params(
898
- messages=ollama_params["follow_up_messages"],
899
- temperature=temperature,
900
- stream=False
901
- )
902
- )
903
- response_text = resp.get("choices", [{}])[0].get("message", {}).get("content", "") or ""
904
-
905
- # Set flag to indicate Ollama was handled
906
- ollama_handled = True
907
- final_response_text = response_text.strip()
908
- logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
909
-
910
- # Display the response if we got one
911
- if final_response_text and verbose:
912
- display_interaction(
913
- ollama_params["original_prompt"],
914
- final_response_text,
915
- markdown=markdown,
916
- generation_time=time.time() - start_time,
917
- console=console
918
- )
919
-
920
- # Update messages and continue the loop instead of returning
921
- if final_response_text:
922
- # Update messages with the response to maintain conversation context
923
- messages.append({
924
- "role": "assistant",
925
- "content": final_response_text
926
- })
927
- # Continue the loop to check if more tools are needed
928
- iteration_count += 1
929
- continue
930
- else:
931
- logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
932
-
933
- # Handle reasoning_steps after tool execution if not already handled by Ollama
934
- if reasoning_steps and not ollama_handled:
935
- # Make a non-streaming call to capture reasoning content
936
- reasoning_resp = litellm.completion(
937
- **self._build_completion_params(
938
- messages=messages,
939
- temperature=temperature,
940
- stream=False, # force non-streaming
941
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
942
- )
943
- )
944
- reasoning_content = reasoning_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
945
- response_text = reasoning_resp["choices"][0]["message"]["content"]
946
-
947
- # Store reasoning content for later use
948
- if reasoning_content:
949
- stored_reasoning_content = reasoning_content
950
-
951
- # Update messages with the response
952
- messages.append({
953
- "role": "assistant",
954
- "content": response_text
955
- })
956
-
957
861
  # After tool execution, continue the loop to check if more tools are needed
958
862
  # instead of immediately trying to get a final response
959
863
  iteration_count += 1
@@ -962,7 +866,7 @@ class LLM:
962
866
  # No tool calls, we're done with this iteration
963
867
  # If we've executed tools in previous iterations, this response contains the final answer
964
868
  if iteration_count > 0:
965
- final_response_text = response_text.strip()
869
+ final_response_text = response_text.strip() if response_text else ""
966
870
  break
967
871
 
968
872
  except Exception as e:
@@ -993,7 +897,7 @@ class LLM:
993
897
  console=console
994
898
  )
995
899
 
996
- response_text = response_text.strip()
900
+ response_text = response_text.strip() if response_text else ""
997
901
 
998
902
  # Return reasoning content if reasoning_steps is True and we have it
999
903
  if reasoning_steps and stored_reasoning_content:
@@ -1155,7 +1059,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1155
1059
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1156
1060
  response_text += chunk.choices[0].delta.content
1157
1061
 
1158
- response_text = response_text.strip()
1062
+ response_text = response_text.strip() if response_text else "" if response_text else ""
1159
1063
  continue
1160
1064
 
1161
1065
  except json.JSONDecodeError:
@@ -1367,7 +1271,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1367
1271
  if formatted_tools and self._supports_streaming_tools():
1368
1272
  tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
1369
1273
 
1370
- response_text = response_text.strip()
1274
+ response_text = response_text.strip() if response_text else "" if response_text else "" if response_text else ""
1371
1275
 
1372
1276
  # We already have tool_calls from streaming if supported
1373
1277
  # No need for a second API call!
@@ -1430,68 +1334,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1430
1334
  # Get response after tool calls
1431
1335
  response_text = ""
1432
1336
 
1433
- # Special handling for Ollama models that don't automatically process tool results
1434
- ollama_handled = False
1435
- ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
1436
-
1437
- if ollama_params:
1438
- # Get response with streaming
1439
- if verbose:
1440
- response_text = ""
1441
- async for chunk in await litellm.acompletion(
1442
- **self._build_completion_params(
1443
- messages=ollama_params["follow_up_messages"],
1444
- temperature=temperature,
1445
- stream=stream
1446
- )
1447
- ):
1448
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1449
- content = chunk.choices[0].delta.content
1450
- response_text += content
1451
- print("\033[K", end="\r")
1452
- print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
1453
- else:
1454
- response_text = ""
1455
- async for chunk in await litellm.acompletion(
1456
- **self._build_completion_params(
1457
- messages=ollama_params["follow_up_messages"],
1458
- temperature=temperature,
1459
- stream=stream
1460
- )
1461
- ):
1462
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1463
- response_text += chunk.choices[0].delta.content
1464
-
1465
- # Set flag to indicate Ollama was handled
1466
- ollama_handled = True
1467
- final_response_text = response_text.strip()
1468
- logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
1469
-
1470
- # Display the response if we got one
1471
- if final_response_text and verbose:
1472
- display_interaction(
1473
- ollama_params["original_prompt"],
1474
- final_response_text,
1475
- markdown=markdown,
1476
- generation_time=time.time() - start_time,
1477
- console=console
1478
- )
1479
-
1480
- # Store the response for potential final return
1481
- if final_response_text:
1482
- # Update messages with the response to maintain conversation context
1483
- messages.append({
1484
- "role": "assistant",
1485
- "content": final_response_text
1486
- })
1487
- # Continue the loop to check if more tools are needed
1488
- iteration_count += 1
1489
- continue
1490
- else:
1491
- logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
1492
-
1493
- # If no special handling was needed or if it's not an Ollama model
1494
- if reasoning_steps and not ollama_handled:
1337
+ # If no special handling was needed
1338
+ if reasoning_steps:
1495
1339
  # Non-streaming call to capture reasoning
1496
1340
  resp = await litellm.acompletion(
1497
1341
  **self._build_completion_params(
@@ -1521,7 +1365,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1521
1365
  generation_time=time.time() - start_time,
1522
1366
  console=console
1523
1367
  )
1524
- elif not ollama_handled:
1368
+ else:
1525
1369
  # Get response after tool calls with streaming if not already handled
1526
1370
  if verbose:
1527
1371
  async for chunk in await litellm.acompletion(
@@ -1551,7 +1395,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1551
1395
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1552
1396
  response_text += chunk.choices[0].delta.content
1553
1397
 
1554
- response_text = response_text.strip()
1398
+ response_text = response_text.strip() if response_text else "" if response_text else ""
1555
1399
 
1556
1400
  # After tool execution, update messages and continue the loop
1557
1401
  if response_text:
@@ -1790,67 +1634,6 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1790
1634
 
1791
1635
  litellm.callbacks = events
1792
1636
 
1793
- def _handle_ollama_model(self, response_text: str, tool_results: List[Any], messages: List[Dict], original_prompt: Union[str, List[Dict]]) -> Optional[Dict[str, Any]]:
1794
- """
1795
- Handle special Ollama model requirements when processing tool results.
1796
-
1797
- Args:
1798
- response_text: The initial response text from the model
1799
- tool_results: List of tool execution results
1800
- messages: The conversation messages list
1801
- original_prompt: The original user prompt
1802
-
1803
- Returns:
1804
- Dict with follow-up parameters if Ollama needs special handling, None otherwise
1805
- """
1806
- if not self._is_ollama_provider() or not tool_results:
1807
- return None
1808
-
1809
- # Check if the response is just a JSON tool call
1810
- try:
1811
- json_response = json.loads(response_text.strip())
1812
- if not (('name' in json_response or 'function' in json_response) and
1813
- not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found'])):
1814
- return None
1815
-
1816
- logging.debug("Detected Ollama returning only tool call JSON, preparing follow-up call to process results")
1817
-
1818
- # Extract the original user query from messages
1819
- original_query = ""
1820
- for msg in reversed(messages): # Look from the end to find the most recent user message
1821
- if msg.get("role") == "user":
1822
- content = msg.get("content", "")
1823
- # Handle list content (multimodal)
1824
- if isinstance(content, list):
1825
- for item in content:
1826
- if isinstance(item, dict) and item.get("type") == "text":
1827
- original_query = item.get("text", "")
1828
- break
1829
- else:
1830
- original_query = content
1831
- if original_query:
1832
- break
1833
-
1834
- # Create a shorter follow-up prompt with all tool results
1835
- # If there's only one result, use it directly; otherwise combine them
1836
- if len(tool_results) == 1:
1837
- results_text = json.dumps(tool_results[0], indent=2)
1838
- else:
1839
- results_text = json.dumps(tool_results, indent=2)
1840
-
1841
- follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1842
- logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
1843
- logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
1844
-
1845
- # Return parameters for follow-up call
1846
- return {
1847
- "follow_up_messages": [{"role": "user", "content": follow_up_prompt}],
1848
- "original_prompt": original_prompt
1849
- }
1850
-
1851
- except (json.JSONDecodeError, KeyError):
1852
- # Not a JSON response or not a tool call format
1853
- return None
1854
1637
 
1855
1638
  def _build_completion_params(self, **override_params) -> Dict[str, Any]:
1856
1639
  """Build parameters for litellm completion calls with all necessary config"""
@@ -2066,7 +1849,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2066
1849
  response_text += content
2067
1850
  else:
2068
1851
  response = litellm.completion(**completion_params)
2069
- response_text = response.choices[0].message.content.strip()
1852
+ response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
2070
1853
 
2071
1854
  if verbose:
2072
1855
  display_interaction(
@@ -2077,7 +1860,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2077
1860
  console=console or self.console
2078
1861
  )
2079
1862
 
2080
- return response_text.strip()
1863
+ return response_text.strip() if response_text else ""
2081
1864
 
2082
1865
  except Exception as error:
2083
1866
  display_error(f"Error in response: {str(error)}")
@@ -2154,7 +1937,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2154
1937
  response_text += content
2155
1938
  else:
2156
1939
  response = await litellm.acompletion(**completion_params)
2157
- response_text = response.choices[0].message.content.strip()
1940
+ response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
2158
1941
 
2159
1942
  if verbose:
2160
1943
  display_interaction(
@@ -2165,7 +1948,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2165
1948
  console=console or self.console
2166
1949
  )
2167
1950
 
2168
- return response_text.strip()
1951
+ return response_text.strip() if response_text else ""
2169
1952
 
2170
1953
  except Exception as error:
2171
1954
  display_error(f"Error in response_async: {str(error)}")
@@ -890,45 +890,8 @@ class OpenAIClient:
890
890
  "content": results_str
891
891
  })
892
892
 
893
- # Check if we should continue (for tools like sequential thinking)
894
- should_continue = False
895
- for tool_call in tool_calls:
896
- # Handle both ToolCall dataclass and OpenAI object
897
- if isinstance(tool_call, ToolCall):
898
- function_name = tool_call.function["name"]
899
- arguments = json.loads(tool_call.function["arguments"])
900
- else:
901
- function_name = tool_call.function.name
902
- arguments = json.loads(tool_call.function.arguments)
903
-
904
- # For sequential thinking tool, check if nextThoughtNeeded is True
905
- if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
906
- should_continue = True
907
- break
908
-
909
- if not should_continue:
910
- # Get final response after tool calls
911
- if stream:
912
- final_response = self.process_stream_response(
913
- messages=messages,
914
- model=model,
915
- temperature=temperature,
916
- tools=formatted_tools,
917
- start_time=start_time,
918
- console=console,
919
- display_fn=display_fn,
920
- reasoning_steps=reasoning_steps,
921
- **kwargs
922
- )
923
- else:
924
- final_response = self.create_completion(
925
- messages=messages,
926
- model=model,
927
- temperature=temperature,
928
- stream=False,
929
- **kwargs
930
- )
931
- break
893
+ # Continue the loop to allow more tool calls
894
+ # The model will see tool results and can make additional tool calls
932
895
 
933
896
  iteration_count += 1
934
897
  else:
@@ -1067,45 +1030,8 @@ class OpenAIClient:
1067
1030
  "content": results_str
1068
1031
  })
1069
1032
 
1070
- # Check if we should continue (for tools like sequential thinking)
1071
- should_continue = False
1072
- for tool_call in tool_calls:
1073
- # Handle both ToolCall dataclass and OpenAI object
1074
- if isinstance(tool_call, ToolCall):
1075
- function_name = tool_call.function["name"]
1076
- arguments = json.loads(tool_call.function["arguments"])
1077
- else:
1078
- function_name = tool_call.function.name
1079
- arguments = json.loads(tool_call.function.arguments)
1080
-
1081
- # For sequential thinking tool, check if nextThoughtNeeded is True
1082
- if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
1083
- should_continue = True
1084
- break
1085
-
1086
- if not should_continue:
1087
- # Get final response after tool calls
1088
- if stream:
1089
- final_response = await self.process_stream_response_async(
1090
- messages=messages,
1091
- model=model,
1092
- temperature=temperature,
1093
- tools=formatted_tools,
1094
- start_time=start_time,
1095
- console=console,
1096
- display_fn=display_fn,
1097
- reasoning_steps=reasoning_steps,
1098
- **kwargs
1099
- )
1100
- else:
1101
- final_response = await self.acreate_completion(
1102
- messages=messages,
1103
- model=model,
1104
- temperature=temperature,
1105
- stream=False,
1106
- **kwargs
1107
- )
1108
- break
1033
+ # Continue the loop to allow more tool calls
1034
+ # The model will see tool results and can make additional tool calls
1109
1035
 
1110
1036
  iteration_count += 1
1111
1037
  else:
@@ -469,16 +469,18 @@ Subtask: {st.name}
469
469
  logging.debug(f"Task type: {task_to_check.task_type}")
470
470
  logging.debug(f"Task status before reset check: {task_to_check.status}")
471
471
  logging.debug(f"Task rerun: {getattr(task_to_check, 'rerun', True)}") # default to True if not set
472
+ logging.debug(f"Task async_execution: {task_to_check.async_execution}")
472
473
 
473
474
  if (getattr(task_to_check, 'rerun', True) and # Corrected condition - reset only if rerun is True (or default True)
474
475
  task_to_check.task_type != "loop" and # Removed "decision" from exclusion
475
476
  not any(t.task_type == "loop" and subtask_name.startswith(t.name + "_")
476
- for t in self.tasks.values())):
477
- logging.debug(f"=== Resetting non-loop, non-decision task {subtask_name} to 'not started' ===")
477
+ for t in self.tasks.values()) and
478
+ not task_to_check.async_execution): # Don't reset async parallel tasks
479
+ logging.debug(f"=== Resetting non-loop, non-decision, non-parallel task {subtask_name} to 'not started' ===")
478
480
  self.tasks[task_id].status = "not started"
479
481
  logging.debug(f"Task status after reset: {self.tasks[task_id].status}")
480
482
  else:
481
- logging.debug(f"=== Skipping reset for loop/decision/subtask or rerun=False: {subtask_name} ===")
483
+ logging.debug(f"=== Skipping reset for loop/decision/subtask/parallel or rerun=False: {subtask_name} ===")
482
484
  logging.debug(f"Keeping status as: {self.tasks[task_id].status}")
483
485
 
484
486
  # Handle loop progression
@@ -1099,16 +1101,18 @@ Subtask: {st.name}
1099
1101
  logging.debug(f"Task type: {task_to_check.task_type}")
1100
1102
  logging.debug(f"Task status before reset check: {task_to_check.status}")
1101
1103
  logging.debug(f"Task rerun: {getattr(task_to_check, 'rerun', True)}") # default to True if not set
1104
+ logging.debug(f"Task async_execution: {task_to_check.async_execution}")
1102
1105
 
1103
1106
  if (getattr(task_to_check, 'rerun', True) and # Corrected condition - reset only if rerun is True (or default True)
1104
1107
  task_to_check.task_type != "loop" and # Removed "decision" from exclusion
1105
1108
  not any(t.task_type == "loop" and subtask_name.startswith(t.name + "_")
1106
- for t in self.tasks.values())):
1107
- logging.debug(f"=== Resetting non-loop, non-decision task {subtask_name} to 'not started' ===")
1109
+ for t in self.tasks.values()) and
1110
+ not task_to_check.async_execution): # Don't reset async parallel tasks
1111
+ logging.debug(f"=== Resetting non-loop, non-decision, non-parallel task {subtask_name} to 'not started' ===")
1108
1112
  self.tasks[task_id].status = "not started"
1109
1113
  logging.debug(f"Task status after reset: {self.tasks[task_id].status}")
1110
1114
  else:
1111
- logging.debug(f"=== Skipping reset for loop/decision/subtask or rerun=False: {subtask_name} ===")
1115
+ logging.debug(f"=== Skipping reset for loop/decision/subtask/parallel or rerun=False: {subtask_name} ===")
1112
1116
  logging.debug(f"Keeping status as: {self.tasks[task_id].status}")
1113
1117
 
1114
1118
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.126
3
+ Version: 0.0.128
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -7,8 +7,8 @@ praisonaiagents/agent/agent.py,sha256=zuwZ3U-wwEu3x_BK4SYPJPKC7cZ_e8iak5ILn_H9yQ
7
7
  praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
8
8
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
9
9
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
10
- praisonaiagents/agents/agents.py,sha256=WnptTEMSDMAM30Ka6rOAu6rBD-ZLev3qphb1a3BbP1g,63301
11
- praisonaiagents/agents/autoagents.py,sha256=BIbsl1m7SzZOh5BO2dfV_xi8sivGSWU9Xj4lhAkFru8,20765
10
+ praisonaiagents/agents/agents.py,sha256=21JwDl6-YBbZfEfWXgSJ-iqJ48kpAuG3OuzzwCHddEs,63161
11
+ praisonaiagents/agents/autoagents.py,sha256=v5pJfTgHnFzG5K2gHwfRA0nZ7Ikptir6hUNvOZ--E44,20777
12
12
  praisonaiagents/guardrails/__init__.py,sha256=HA8zhp-KRHTxo0194MUwXOUJjPyjOu7E3d7xUIKYVVY,310
13
13
  praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXCFIyZXZVY4f0hnQyWc,1352
14
14
  praisonaiagents/guardrails/llm_guardrail.py,sha256=MTTqmYDdZX-18QN9T17T5P_6H2qnV8GVgymJufW1WuM,3277
@@ -16,9 +16,9 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
16
16
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
17
17
  praisonaiagents/knowledge/knowledge.py,sha256=-di_h9HxXQfAhTMMerhK16tfw8DtUndp44TGkBOzkZs,15539
18
18
  praisonaiagents/llm/__init__.py,sha256=DtFSBjsVQj7AOTM0x5Q0bZnrbxb-t2ljom5Aid5xJEs,1547
19
- praisonaiagents/llm/llm.py,sha256=rewZhxoaYvVN7Hwj2lAYC3PS9RNIJkyhlyvj9W2Yh9Y,110721
19
+ praisonaiagents/llm/llm.py,sha256=CsWZPIQfY6VFYV2mmPyxqHmsDlKEeuXtbuybeqwWPxU,98341
20
20
  praisonaiagents/llm/model_capabilities.py,sha256=poxOxATUOi9XPTx3v6BPnXvSfikWSA9NciWQVuPU7Zg,2586
21
- praisonaiagents/llm/openai_client.py,sha256=0JvjCDHoH8I8kIt5vvObARkGdVaPWdTIv_FoEQ5EQPA,48973
21
+ praisonaiagents/llm/openai_client.py,sha256=6KANw9SNiglvfJvTcpDPZjuTKG6cThD1t-ZqgKvmZiw,45356
22
22
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
23
23
  praisonaiagents/mcp/mcp.py,sha256=T0G0rQotHxk9qTnG1tjQLr4c0BUSLnEqz9sIMx4F954,21598
24
24
  praisonaiagents/mcp/mcp_http_stream.py,sha256=Yh-69eIlLQS_M0bd__y7NzSjOqqX6R8Ed4eJQw6xXgg,18314
@@ -26,7 +26,7 @@ praisonaiagents/mcp/mcp_sse.py,sha256=KO10tAgZ5vSKeRhkJIZcdJ0ZmhRybS39i1KybWt4D7
26
26
  praisonaiagents/memory/__init__.py,sha256=aEFdhgtTqDdMhc_JCWM-f4XI9cZIj7Wz5g_MUa-0amg,397
27
27
  praisonaiagents/memory/memory.py,sha256=D5BmQTktv6VOJ49yW2m1MjjCJ5UDSX1Qo46_443ymKo,44276
28
28
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
29
- praisonaiagents/process/process.py,sha256=qar8gljiU1DrpIKvKI1jZqQ8g2g_BGCG01l-FtYwj-U,73449
29
+ praisonaiagents/process/process.py,sha256=vjITSEzKtcYDcEgPu2aItsyrmYnlG9ygiR2AstMIO8o,73859
30
30
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
31
31
  praisonaiagents/task/task.py,sha256=vehRhEpTTBWTv-qxHMhJwcshdyR821TYQAaODDpaOL4,20882
32
32
  praisonaiagents/telemetry/__init__.py,sha256=5iAOrj_N_cKMmh2ltWGYs3PfOYt_jcwUoElW8fTAIsc,3062
@@ -55,7 +55,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
55
55
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
56
56
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
57
57
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
58
- praisonaiagents-0.0.126.dist-info/METADATA,sha256=uLef5SSGpu_Zs4lOWdSMXrfMy78p0-Shacqup0JNpK8,1699
59
- praisonaiagents-0.0.126.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
60
- praisonaiagents-0.0.126.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
61
- praisonaiagents-0.0.126.dist-info/RECORD,,
58
+ praisonaiagents-0.0.128.dist-info/METADATA,sha256=LCnTc_OJsDCkPPT0MzT7SieRUXtn8G6bMazLVZTDKPQ,1699
59
+ praisonaiagents-0.0.128.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
60
+ praisonaiagents-0.0.128.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
61
+ praisonaiagents-0.0.128.dist-info/RECORD,,