praisonaiagents 0.0.127__py3-none-any.whl → 0.0.128__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agents/agents.py +15 -17
- praisonaiagents/agents/autoagents.py +1 -1
- praisonaiagents/llm/llm.py +3 -220
- praisonaiagents/process/process.py +10 -6
- {praisonaiagents-0.0.127.dist-info → praisonaiagents-0.0.128.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.127.dist-info → praisonaiagents-0.0.128.dist-info}/RECORD +8 -8
- {praisonaiagents-0.0.127.dist-info → praisonaiagents-0.0.128.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.127.dist-info → praisonaiagents-0.0.128.dist-info}/top_level.txt +0 -0
praisonaiagents/agents/agents.py
CHANGED
@@ -480,24 +480,22 @@ Context:
|
|
480
480
|
)
|
481
481
|
|
482
482
|
if self.process == "workflow":
|
483
|
-
|
484
|
-
parallel_tasks = []
|
483
|
+
tasks_to_run = []
|
485
484
|
async for task_id in process.aworkflow():
|
486
|
-
if self.tasks[task_id].async_execution
|
487
|
-
|
488
|
-
|
489
|
-
#
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
await asyncio.gather(*[self.arun_task(t) for t in parallel_tasks])
|
485
|
+
if self.tasks[task_id].async_execution:
|
486
|
+
tasks_to_run.append(self.arun_task(task_id))
|
487
|
+
else:
|
488
|
+
# If we encounter a sync task, we must wait for the previous async tasks to finish.
|
489
|
+
if tasks_to_run:
|
490
|
+
await asyncio.gather(*tasks_to_run)
|
491
|
+
tasks_to_run = []
|
492
|
+
|
493
|
+
# Run sync task in an executor to avoid blocking the event loop
|
494
|
+
loop = asyncio.get_event_loop()
|
495
|
+
await loop.run_in_executor(None, self.run_task, task_id)
|
496
|
+
|
497
|
+
if tasks_to_run:
|
498
|
+
await asyncio.gather(*tasks_to_run)
|
501
499
|
|
502
500
|
elif self.process == "sequential":
|
503
501
|
async for task_id in process.asequential():
|
@@ -136,7 +136,7 @@ class AutoAgents(PraisonAIAgents):
|
|
136
136
|
completion_checker=completion_checker,
|
137
137
|
max_retries=max_retries,
|
138
138
|
process=process,
|
139
|
-
manager_llm=manager_llm
|
139
|
+
manager_llm=manager_llm or self.llm
|
140
140
|
)
|
141
141
|
|
142
142
|
def _display_agents_and_tasks(self, agents: List[Agent], tasks: List[Task]):
|
praisonaiagents/llm/llm.py
CHANGED
@@ -858,102 +858,6 @@ class LLM:
|
|
858
858
|
iteration_count += 1
|
859
859
|
continue
|
860
860
|
|
861
|
-
# Special handling for Ollama models that don't automatically process tool results
|
862
|
-
ollama_handled = False
|
863
|
-
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
|
864
|
-
|
865
|
-
if ollama_params:
|
866
|
-
# Get response based on streaming mode
|
867
|
-
if stream:
|
868
|
-
# Streaming approach
|
869
|
-
if verbose:
|
870
|
-
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
871
|
-
response_text = ""
|
872
|
-
for chunk in litellm.completion(
|
873
|
-
**self._build_completion_params(
|
874
|
-
messages=ollama_params["follow_up_messages"],
|
875
|
-
temperature=temperature,
|
876
|
-
stream=True
|
877
|
-
)
|
878
|
-
):
|
879
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
880
|
-
content = chunk.choices[0].delta.content
|
881
|
-
response_text += content
|
882
|
-
live.update(display_generating(response_text, start_time))
|
883
|
-
else:
|
884
|
-
response_text = ""
|
885
|
-
for chunk in litellm.completion(
|
886
|
-
**self._build_completion_params(
|
887
|
-
messages=ollama_params["follow_up_messages"],
|
888
|
-
temperature=temperature,
|
889
|
-
stream=True
|
890
|
-
)
|
891
|
-
):
|
892
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
893
|
-
response_text += chunk.choices[0].delta.content
|
894
|
-
else:
|
895
|
-
# Non-streaming approach
|
896
|
-
resp = litellm.completion(
|
897
|
-
**self._build_completion_params(
|
898
|
-
messages=ollama_params["follow_up_messages"],
|
899
|
-
temperature=temperature,
|
900
|
-
stream=False
|
901
|
-
)
|
902
|
-
)
|
903
|
-
response_text = resp.get("choices", [{}])[0].get("message", {}).get("content", "") or ""
|
904
|
-
|
905
|
-
# Set flag to indicate Ollama was handled
|
906
|
-
ollama_handled = True
|
907
|
-
final_response_text = response_text.strip() if response_text else ""
|
908
|
-
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
909
|
-
|
910
|
-
# Display the response if we got one
|
911
|
-
if final_response_text and verbose:
|
912
|
-
display_interaction(
|
913
|
-
ollama_params["original_prompt"],
|
914
|
-
final_response_text,
|
915
|
-
markdown=markdown,
|
916
|
-
generation_time=time.time() - start_time,
|
917
|
-
console=console
|
918
|
-
)
|
919
|
-
|
920
|
-
# Update messages and continue the loop instead of returning
|
921
|
-
if final_response_text:
|
922
|
-
# Update messages with the response to maintain conversation context
|
923
|
-
messages.append({
|
924
|
-
"role": "assistant",
|
925
|
-
"content": final_response_text
|
926
|
-
})
|
927
|
-
# Continue the loop to check if more tools are needed
|
928
|
-
iteration_count += 1
|
929
|
-
continue
|
930
|
-
else:
|
931
|
-
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
|
932
|
-
|
933
|
-
# Handle reasoning_steps after tool execution if not already handled by Ollama
|
934
|
-
if reasoning_steps and not ollama_handled:
|
935
|
-
# Make a non-streaming call to capture reasoning content
|
936
|
-
reasoning_resp = litellm.completion(
|
937
|
-
**self._build_completion_params(
|
938
|
-
messages=messages,
|
939
|
-
temperature=temperature,
|
940
|
-
stream=False, # force non-streaming
|
941
|
-
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
942
|
-
)
|
943
|
-
)
|
944
|
-
reasoning_content = reasoning_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
|
945
|
-
response_text = reasoning_resp["choices"][0]["message"]["content"]
|
946
|
-
|
947
|
-
# Store reasoning content for later use
|
948
|
-
if reasoning_content:
|
949
|
-
stored_reasoning_content = reasoning_content
|
950
|
-
|
951
|
-
# Update messages with the response
|
952
|
-
messages.append({
|
953
|
-
"role": "assistant",
|
954
|
-
"content": response_text
|
955
|
-
})
|
956
|
-
|
957
861
|
# After tool execution, continue the loop to check if more tools are needed
|
958
862
|
# instead of immediately trying to get a final response
|
959
863
|
iteration_count += 1
|
@@ -1430,68 +1334,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1430
1334
|
# Get response after tool calls
|
1431
1335
|
response_text = ""
|
1432
1336
|
|
1433
|
-
#
|
1434
|
-
|
1435
|
-
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
|
1436
|
-
|
1437
|
-
if ollama_params:
|
1438
|
-
# Get response with streaming
|
1439
|
-
if verbose:
|
1440
|
-
response_text = ""
|
1441
|
-
async for chunk in await litellm.acompletion(
|
1442
|
-
**self._build_completion_params(
|
1443
|
-
messages=ollama_params["follow_up_messages"],
|
1444
|
-
temperature=temperature,
|
1445
|
-
stream=stream
|
1446
|
-
)
|
1447
|
-
):
|
1448
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1449
|
-
content = chunk.choices[0].delta.content
|
1450
|
-
response_text += content
|
1451
|
-
print("\033[K", end="\r")
|
1452
|
-
print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
|
1453
|
-
else:
|
1454
|
-
response_text = ""
|
1455
|
-
async for chunk in await litellm.acompletion(
|
1456
|
-
**self._build_completion_params(
|
1457
|
-
messages=ollama_params["follow_up_messages"],
|
1458
|
-
temperature=temperature,
|
1459
|
-
stream=stream
|
1460
|
-
)
|
1461
|
-
):
|
1462
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1463
|
-
response_text += chunk.choices[0].delta.content
|
1464
|
-
|
1465
|
-
# Set flag to indicate Ollama was handled
|
1466
|
-
ollama_handled = True
|
1467
|
-
final_response_text = response_text.strip()
|
1468
|
-
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
1469
|
-
|
1470
|
-
# Display the response if we got one
|
1471
|
-
if final_response_text and verbose:
|
1472
|
-
display_interaction(
|
1473
|
-
ollama_params["original_prompt"],
|
1474
|
-
final_response_text,
|
1475
|
-
markdown=markdown,
|
1476
|
-
generation_time=time.time() - start_time,
|
1477
|
-
console=console
|
1478
|
-
)
|
1479
|
-
|
1480
|
-
# Store the response for potential final return
|
1481
|
-
if final_response_text:
|
1482
|
-
# Update messages with the response to maintain conversation context
|
1483
|
-
messages.append({
|
1484
|
-
"role": "assistant",
|
1485
|
-
"content": final_response_text
|
1486
|
-
})
|
1487
|
-
# Continue the loop to check if more tools are needed
|
1488
|
-
iteration_count += 1
|
1489
|
-
continue
|
1490
|
-
else:
|
1491
|
-
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
|
1492
|
-
|
1493
|
-
# If no special handling was needed or if it's not an Ollama model
|
1494
|
-
if reasoning_steps and not ollama_handled:
|
1337
|
+
# If no special handling was needed
|
1338
|
+
if reasoning_steps:
|
1495
1339
|
# Non-streaming call to capture reasoning
|
1496
1340
|
resp = await litellm.acompletion(
|
1497
1341
|
**self._build_completion_params(
|
@@ -1521,7 +1365,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1521
1365
|
generation_time=time.time() - start_time,
|
1522
1366
|
console=console
|
1523
1367
|
)
|
1524
|
-
|
1368
|
+
else:
|
1525
1369
|
# Get response after tool calls with streaming if not already handled
|
1526
1370
|
if verbose:
|
1527
1371
|
async for chunk in await litellm.acompletion(
|
@@ -1790,67 +1634,6 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1790
1634
|
|
1791
1635
|
litellm.callbacks = events
|
1792
1636
|
|
1793
|
-
def _handle_ollama_model(self, response_text: str, tool_results: List[Any], messages: List[Dict], original_prompt: Union[str, List[Dict]]) -> Optional[Dict[str, Any]]:
|
1794
|
-
"""
|
1795
|
-
Handle special Ollama model requirements when processing tool results.
|
1796
|
-
|
1797
|
-
Args:
|
1798
|
-
response_text: The initial response text from the model
|
1799
|
-
tool_results: List of tool execution results
|
1800
|
-
messages: The conversation messages list
|
1801
|
-
original_prompt: The original user prompt
|
1802
|
-
|
1803
|
-
Returns:
|
1804
|
-
Dict with follow-up parameters if Ollama needs special handling, None otherwise
|
1805
|
-
"""
|
1806
|
-
if not self._is_ollama_provider() or not tool_results:
|
1807
|
-
return None
|
1808
|
-
|
1809
|
-
# Check if the response is just a JSON tool call
|
1810
|
-
try:
|
1811
|
-
json_response = json.loads(response_text.strip() if response_text else "{}")
|
1812
|
-
if not (('name' in json_response or 'function' in json_response) and
|
1813
|
-
not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found'])):
|
1814
|
-
return None
|
1815
|
-
|
1816
|
-
logging.debug("Detected Ollama returning only tool call JSON, preparing follow-up call to process results")
|
1817
|
-
|
1818
|
-
# Extract the original user query from messages
|
1819
|
-
original_query = ""
|
1820
|
-
for msg in reversed(messages): # Look from the end to find the most recent user message
|
1821
|
-
if msg.get("role") == "user":
|
1822
|
-
content = msg.get("content", "")
|
1823
|
-
# Handle list content (multimodal)
|
1824
|
-
if isinstance(content, list):
|
1825
|
-
for item in content:
|
1826
|
-
if isinstance(item, dict) and item.get("type") == "text":
|
1827
|
-
original_query = item.get("text", "")
|
1828
|
-
break
|
1829
|
-
else:
|
1830
|
-
original_query = content
|
1831
|
-
if original_query:
|
1832
|
-
break
|
1833
|
-
|
1834
|
-
# Create a shorter follow-up prompt with all tool results
|
1835
|
-
# If there's only one result, use it directly; otherwise combine them
|
1836
|
-
if len(tool_results) == 1:
|
1837
|
-
results_text = json.dumps(tool_results[0], indent=2)
|
1838
|
-
else:
|
1839
|
-
results_text = json.dumps(tool_results, indent=2)
|
1840
|
-
|
1841
|
-
follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
|
1842
|
-
logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
|
1843
|
-
logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
|
1844
|
-
|
1845
|
-
# Return parameters for follow-up call
|
1846
|
-
return {
|
1847
|
-
"follow_up_messages": [{"role": "user", "content": follow_up_prompt}],
|
1848
|
-
"original_prompt": original_prompt
|
1849
|
-
}
|
1850
|
-
|
1851
|
-
except (json.JSONDecodeError, KeyError):
|
1852
|
-
# Not a JSON response or not a tool call format
|
1853
|
-
return None
|
1854
1637
|
|
1855
1638
|
def _build_completion_params(self, **override_params) -> Dict[str, Any]:
|
1856
1639
|
"""Build parameters for litellm completion calls with all necessary config"""
|
@@ -469,16 +469,18 @@ Subtask: {st.name}
|
|
469
469
|
logging.debug(f"Task type: {task_to_check.task_type}")
|
470
470
|
logging.debug(f"Task status before reset check: {task_to_check.status}")
|
471
471
|
logging.debug(f"Task rerun: {getattr(task_to_check, 'rerun', True)}") # default to True if not set
|
472
|
+
logging.debug(f"Task async_execution: {task_to_check.async_execution}")
|
472
473
|
|
473
474
|
if (getattr(task_to_check, 'rerun', True) and # Corrected condition - reset only if rerun is True (or default True)
|
474
475
|
task_to_check.task_type != "loop" and # Removed "decision" from exclusion
|
475
476
|
not any(t.task_type == "loop" and subtask_name.startswith(t.name + "_")
|
476
|
-
for t in self.tasks.values())
|
477
|
-
|
477
|
+
for t in self.tasks.values()) and
|
478
|
+
not task_to_check.async_execution): # Don't reset async parallel tasks
|
479
|
+
logging.debug(f"=== Resetting non-loop, non-decision, non-parallel task {subtask_name} to 'not started' ===")
|
478
480
|
self.tasks[task_id].status = "not started"
|
479
481
|
logging.debug(f"Task status after reset: {self.tasks[task_id].status}")
|
480
482
|
else:
|
481
|
-
logging.debug(f"=== Skipping reset for loop/decision/subtask or rerun=False: {subtask_name} ===")
|
483
|
+
logging.debug(f"=== Skipping reset for loop/decision/subtask/parallel or rerun=False: {subtask_name} ===")
|
482
484
|
logging.debug(f"Keeping status as: {self.tasks[task_id].status}")
|
483
485
|
|
484
486
|
# Handle loop progression
|
@@ -1099,16 +1101,18 @@ Subtask: {st.name}
|
|
1099
1101
|
logging.debug(f"Task type: {task_to_check.task_type}")
|
1100
1102
|
logging.debug(f"Task status before reset check: {task_to_check.status}")
|
1101
1103
|
logging.debug(f"Task rerun: {getattr(task_to_check, 'rerun', True)}") # default to True if not set
|
1104
|
+
logging.debug(f"Task async_execution: {task_to_check.async_execution}")
|
1102
1105
|
|
1103
1106
|
if (getattr(task_to_check, 'rerun', True) and # Corrected condition - reset only if rerun is True (or default True)
|
1104
1107
|
task_to_check.task_type != "loop" and # Removed "decision" from exclusion
|
1105
1108
|
not any(t.task_type == "loop" and subtask_name.startswith(t.name + "_")
|
1106
|
-
for t in self.tasks.values())
|
1107
|
-
|
1109
|
+
for t in self.tasks.values()) and
|
1110
|
+
not task_to_check.async_execution): # Don't reset async parallel tasks
|
1111
|
+
logging.debug(f"=== Resetting non-loop, non-decision, non-parallel task {subtask_name} to 'not started' ===")
|
1108
1112
|
self.tasks[task_id].status = "not started"
|
1109
1113
|
logging.debug(f"Task status after reset: {self.tasks[task_id].status}")
|
1110
1114
|
else:
|
1111
|
-
logging.debug(f"=== Skipping reset for loop/decision/subtask or rerun=False: {subtask_name} ===")
|
1115
|
+
logging.debug(f"=== Skipping reset for loop/decision/subtask/parallel or rerun=False: {subtask_name} ===")
|
1112
1116
|
logging.debug(f"Keeping status as: {self.tasks[task_id].status}")
|
1113
1117
|
|
1114
1118
|
|
@@ -7,8 +7,8 @@ praisonaiagents/agent/agent.py,sha256=zuwZ3U-wwEu3x_BK4SYPJPKC7cZ_e8iak5ILn_H9yQ
|
|
7
7
|
praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
|
8
8
|
praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
|
9
9
|
praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
|
10
|
-
praisonaiagents/agents/agents.py,sha256=
|
11
|
-
praisonaiagents/agents/autoagents.py,sha256=
|
10
|
+
praisonaiagents/agents/agents.py,sha256=21JwDl6-YBbZfEfWXgSJ-iqJ48kpAuG3OuzzwCHddEs,63161
|
11
|
+
praisonaiagents/agents/autoagents.py,sha256=v5pJfTgHnFzG5K2gHwfRA0nZ7Ikptir6hUNvOZ--E44,20777
|
12
12
|
praisonaiagents/guardrails/__init__.py,sha256=HA8zhp-KRHTxo0194MUwXOUJjPyjOu7E3d7xUIKYVVY,310
|
13
13
|
praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXCFIyZXZVY4f0hnQyWc,1352
|
14
14
|
praisonaiagents/guardrails/llm_guardrail.py,sha256=MTTqmYDdZX-18QN9T17T5P_6H2qnV8GVgymJufW1WuM,3277
|
@@ -16,7 +16,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
|
|
16
16
|
praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
|
17
17
|
praisonaiagents/knowledge/knowledge.py,sha256=-di_h9HxXQfAhTMMerhK16tfw8DtUndp44TGkBOzkZs,15539
|
18
18
|
praisonaiagents/llm/__init__.py,sha256=DtFSBjsVQj7AOTM0x5Q0bZnrbxb-t2ljom5Aid5xJEs,1547
|
19
|
-
praisonaiagents/llm/llm.py,sha256=
|
19
|
+
praisonaiagents/llm/llm.py,sha256=CsWZPIQfY6VFYV2mmPyxqHmsDlKEeuXtbuybeqwWPxU,98341
|
20
20
|
praisonaiagents/llm/model_capabilities.py,sha256=poxOxATUOi9XPTx3v6BPnXvSfikWSA9NciWQVuPU7Zg,2586
|
21
21
|
praisonaiagents/llm/openai_client.py,sha256=6KANw9SNiglvfJvTcpDPZjuTKG6cThD1t-ZqgKvmZiw,45356
|
22
22
|
praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
|
@@ -26,7 +26,7 @@ praisonaiagents/mcp/mcp_sse.py,sha256=KO10tAgZ5vSKeRhkJIZcdJ0ZmhRybS39i1KybWt4D7
|
|
26
26
|
praisonaiagents/memory/__init__.py,sha256=aEFdhgtTqDdMhc_JCWM-f4XI9cZIj7Wz5g_MUa-0amg,397
|
27
27
|
praisonaiagents/memory/memory.py,sha256=D5BmQTktv6VOJ49yW2m1MjjCJ5UDSX1Qo46_443ymKo,44276
|
28
28
|
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
29
|
-
praisonaiagents/process/process.py,sha256=
|
29
|
+
praisonaiagents/process/process.py,sha256=vjITSEzKtcYDcEgPu2aItsyrmYnlG9ygiR2AstMIO8o,73859
|
30
30
|
praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
31
31
|
praisonaiagents/task/task.py,sha256=vehRhEpTTBWTv-qxHMhJwcshdyR821TYQAaODDpaOL4,20882
|
32
32
|
praisonaiagents/telemetry/__init__.py,sha256=5iAOrj_N_cKMmh2ltWGYs3PfOYt_jcwUoElW8fTAIsc,3062
|
@@ -55,7 +55,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
55
55
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
56
56
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
57
57
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
58
|
-
praisonaiagents-0.0.
|
59
|
-
praisonaiagents-0.0.
|
60
|
-
praisonaiagents-0.0.
|
61
|
-
praisonaiagents-0.0.
|
58
|
+
praisonaiagents-0.0.128.dist-info/METADATA,sha256=LCnTc_OJsDCkPPT0MzT7SieRUXtn8G6bMazLVZTDKPQ,1699
|
59
|
+
praisonaiagents-0.0.128.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
60
|
+
praisonaiagents-0.0.128.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
61
|
+
praisonaiagents-0.0.128.dist-info/RECORD,,
|
File without changes
|
File without changes
|