praisonaiagents 0.0.125__py3-none-any.whl → 0.0.126__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1235,7 +1235,8 @@ Your Goal: {self.goal}"""
1235
1235
  agent_role=self.role,
1236
1236
  agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
1237
1237
  execute_tool_fn=self.execute_tool, # Pass tool execution function
1238
- reasoning_steps=reasoning_steps
1238
+ reasoning_steps=reasoning_steps,
1239
+ stream=stream # Pass the stream parameter from chat method
1239
1240
  )
1240
1241
 
1241
1242
  self.chat_history.append({"role": "assistant", "content": response_text})
@@ -680,6 +680,7 @@ class LLM:
680
680
  max_iterations = 10 # Prevent infinite loops
681
681
  iteration_count = 0
682
682
  final_response_text = ""
683
+ stored_reasoning_content = None # Store reasoning content from tool execution
683
684
 
684
685
  while iteration_count < max_iterations:
685
686
  try:
@@ -857,8 +858,6 @@ class LLM:
857
858
  iteration_count += 1
858
859
  continue
859
860
 
860
- # If we reach here, no more tool calls needed - get final response
861
- # Make one more call to get the final summary response
862
861
  # Special handling for Ollama models that don't automatically process tool results
863
862
  ollama_handled = False
864
863
  ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
@@ -918,15 +917,23 @@ class LLM:
918
917
  console=console
919
918
  )
920
919
 
921
- # Return the final response after processing Ollama's follow-up
920
+ # Update messages and continue the loop instead of returning
922
921
  if final_response_text:
923
- return final_response_text
922
+ # Update messages with the response to maintain conversation context
923
+ messages.append({
924
+ "role": "assistant",
925
+ "content": final_response_text
926
+ })
927
+ # Continue the loop to check if more tools are needed
928
+ iteration_count += 1
929
+ continue
924
930
  else:
925
931
  logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
926
932
 
927
- # If reasoning_steps is True and we haven't handled Ollama already, do a single non-streaming call
933
+ # Handle reasoning_steps after tool execution if not already handled by Ollama
928
934
  if reasoning_steps and not ollama_handled:
929
- resp = litellm.completion(
935
+ # Make a non-streaming call to capture reasoning content
936
+ reasoning_resp = litellm.completion(
930
937
  **self._build_completion_params(
931
938
  messages=messages,
932
939
  temperature=temperature,
@@ -934,89 +941,28 @@ class LLM:
934
941
  **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
935
942
  )
936
943
  )
937
- reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
938
- response_text = resp["choices"][0]["message"]["content"]
944
+ reasoning_content = reasoning_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
945
+ response_text = reasoning_resp["choices"][0]["message"]["content"]
939
946
 
940
- # Optionally display reasoning if present
941
- if verbose and reasoning_content:
942
- display_interaction(
943
- original_prompt,
944
- f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
945
- markdown=markdown,
946
- generation_time=time.time() - start_time,
947
- console=console
948
- )
949
- else:
950
- display_interaction(
951
- original_prompt,
952
- response_text,
953
- markdown=markdown,
954
- generation_time=time.time() - start_time,
955
- console=console
956
- )
957
-
958
- # Otherwise do the existing streaming approach if not already handled
959
- elif not ollama_handled:
960
- # Get response after tool calls
961
- if stream:
962
- # Streaming approach
963
- if verbose:
964
- with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
965
- final_response_text = ""
966
- for chunk in litellm.completion(
967
- **self._build_completion_params(
968
- messages=messages,
969
- tools=formatted_tools,
970
- temperature=temperature,
971
- stream=True,
972
- **kwargs
973
- )
974
- ):
975
- if chunk and chunk.choices and chunk.choices[0].delta.content:
976
- content = chunk.choices[0].delta.content
977
- final_response_text += content
978
- live.update(display_generating(final_response_text, current_time))
979
- else:
980
- final_response_text = ""
981
- for chunk in litellm.completion(
982
- **self._build_completion_params(
983
- messages=messages,
984
- tools=formatted_tools,
985
- temperature=temperature,
986
- stream=True,
987
- **kwargs
988
- )
989
- ):
990
- if chunk and chunk.choices and chunk.choices[0].delta.content:
991
- final_response_text += chunk.choices[0].delta.content
992
- else:
993
- # Non-streaming approach
994
- resp = litellm.completion(
995
- **self._build_completion_params(
996
- messages=messages,
997
- tools=formatted_tools,
998
- temperature=temperature,
999
- stream=False,
1000
- **kwargs
1001
- )
1002
- )
1003
- final_response_text = resp.get("choices", [{}])[0].get("message", {}).get("content", "") or ""
947
+ # Store reasoning content for later use
948
+ if reasoning_content:
949
+ stored_reasoning_content = reasoning_content
1004
950
 
1005
- final_response_text = final_response_text.strip()
1006
-
1007
- # Display final response
1008
- if verbose:
1009
- display_interaction(
1010
- original_prompt,
1011
- final_response_text,
1012
- markdown=markdown,
1013
- generation_time=time.time() - start_time,
1014
- console=console
1015
- )
951
+ # Update messages with the response
952
+ messages.append({
953
+ "role": "assistant",
954
+ "content": response_text
955
+ })
1016
956
 
1017
- return final_response_text
957
+ # After tool execution, continue the loop to check if more tools are needed
958
+ # instead of immediately trying to get a final response
959
+ iteration_count += 1
960
+ continue
1018
961
  else:
1019
962
  # No tool calls, we're done with this iteration
963
+ # If we've executed tools in previous iterations, this response contains the final answer
964
+ if iteration_count > 0:
965
+ final_response_text = response_text.strip()
1020
966
  break
1021
967
 
1022
968
  except Exception as e:
@@ -1029,16 +975,30 @@ class LLM:
1029
975
 
1030
976
  # No tool calls were made in this iteration, return the response
1031
977
  if verbose:
1032
- display_interaction(
1033
- original_prompt,
1034
- response_text,
1035
- markdown=markdown,
1036
- generation_time=time.time() - start_time,
1037
- console=console
1038
- )
978
+ # If we have stored reasoning content from tool execution, display it
979
+ if stored_reasoning_content:
980
+ display_interaction(
981
+ original_prompt,
982
+ f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{response_text}",
983
+ markdown=markdown,
984
+ generation_time=time.time() - start_time,
985
+ console=console
986
+ )
987
+ else:
988
+ display_interaction(
989
+ original_prompt,
990
+ response_text,
991
+ markdown=markdown,
992
+ generation_time=time.time() - start_time,
993
+ console=console
994
+ )
1039
995
 
1040
996
  response_text = response_text.strip()
1041
997
 
998
+ # Return reasoning content if reasoning_steps is True and we have it
999
+ if reasoning_steps and stored_reasoning_content:
1000
+ return stored_reasoning_content
1001
+
1042
1002
  # Handle output formatting
1043
1003
  if output_json or output_pydantic:
1044
1004
  self.chat_history.append({"role": "user", "content": original_prompt})
@@ -1053,8 +1013,8 @@ class LLM:
1053
1013
  display_interaction(original_prompt, response_text, markdown=markdown,
1054
1014
  generation_time=time.time() - start_time, console=console)
1055
1015
  # Return reasoning content if reasoning_steps is True
1056
- if reasoning_steps and reasoning_content:
1057
- return reasoning_content
1016
+ if reasoning_steps and stored_reasoning_content:
1017
+ return stored_reasoning_content
1058
1018
  return response_text
1059
1019
 
1060
1020
  # Handle self-reflection loop
@@ -1317,118 +1277,126 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1317
1277
  # Format tools for LiteLLM using the shared helper
1318
1278
  formatted_tools = self._format_tools_for_litellm(tools)
1319
1279
 
1320
- response_text = ""
1321
- if reasoning_steps:
1322
- # Non-streaming call to capture reasoning
1323
- resp = await litellm.acompletion(
1324
- **self._build_completion_params(
1325
- messages=messages,
1280
+ # Initialize variables for iteration loop
1281
+ max_iterations = 10 # Prevent infinite loops
1282
+ iteration_count = 0
1283
+ final_response_text = ""
1284
+ stored_reasoning_content = None # Store reasoning content from tool execution
1285
+
1286
+ while iteration_count < max_iterations:
1287
+ response_text = ""
1288
+ reasoning_content = None
1289
+ tool_calls = []
1290
+
1291
+ if reasoning_steps and iteration_count == 0:
1292
+ # Non-streaming call to capture reasoning
1293
+ resp = await litellm.acompletion(
1294
+ **self._build_completion_params(
1295
+ messages=messages,
1326
1296
  temperature=temperature,
1327
1297
  stream=False, # force non-streaming
1328
1298
  **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1329
1299
  )
1330
- )
1331
- reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
1332
- response_text = resp["choices"][0]["message"]["content"]
1333
-
1334
- if verbose and reasoning_content:
1335
- display_interaction(
1336
- "Initial reasoning:",
1337
- f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
1338
- markdown=markdown,
1339
- generation_time=time.time() - start_time,
1340
- console=console
1341
- )
1342
- elif verbose:
1343
- display_interaction(
1344
- "Initial response:",
1345
- response_text,
1346
- markdown=markdown,
1347
- generation_time=time.time() - start_time,
1348
- console=console
1349
1300
  )
1350
- else:
1351
- # Determine if we should use streaming based on tool support
1352
- use_streaming = stream
1353
- if formatted_tools and not self._supports_streaming_tools():
1354
- # Provider doesn't support streaming with tools, use non-streaming
1355
- use_streaming = False
1356
-
1357
- if use_streaming:
1358
- # Streaming approach (with or without tools)
1359
- tool_calls = []
1301
+ reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
1302
+ response_text = resp["choices"][0]["message"]["content"]
1360
1303
 
1361
- if verbose:
1362
- async for chunk in await litellm.acompletion(
1363
- **self._build_completion_params(
1364
- messages=messages,
1365
- temperature=temperature,
1366
- stream=True,
1367
- tools=formatted_tools,
1368
- **kwargs
1369
- )
1370
- ):
1371
- if chunk and chunk.choices and chunk.choices[0].delta:
1372
- delta = chunk.choices[0].delta
1373
- response_text, tool_calls = self._process_stream_delta(
1374
- delta, response_text, tool_calls, formatted_tools
1304
+ if verbose and reasoning_content:
1305
+ display_interaction(
1306
+ "Initial reasoning:",
1307
+ f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
1308
+ markdown=markdown,
1309
+ generation_time=time.time() - start_time,
1310
+ console=console
1311
+ )
1312
+ elif verbose:
1313
+ display_interaction(
1314
+ "Initial response:",
1315
+ response_text,
1316
+ markdown=markdown,
1317
+ generation_time=time.time() - start_time,
1318
+ console=console
1319
+ )
1320
+ else:
1321
+ # Determine if we should use streaming based on tool support
1322
+ use_streaming = stream
1323
+ if formatted_tools and not self._supports_streaming_tools():
1324
+ # Provider doesn't support streaming with tools, use non-streaming
1325
+ use_streaming = False
1326
+
1327
+ if use_streaming:
1328
+ # Streaming approach (with or without tools)
1329
+ tool_calls = []
1330
+
1331
+ if verbose:
1332
+ async for chunk in await litellm.acompletion(
1333
+ **self._build_completion_params(
1334
+ messages=messages,
1335
+ temperature=temperature,
1336
+ stream=True,
1337
+ tools=formatted_tools,
1338
+ **kwargs
1375
1339
  )
1376
- if delta.content:
1377
- print("\033[K", end="\r")
1378
- print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
1340
+ ):
1341
+ if chunk and chunk.choices and chunk.choices[0].delta:
1342
+ delta = chunk.choices[0].delta
1343
+ response_text, tool_calls = self._process_stream_delta(
1344
+ delta, response_text, tool_calls, formatted_tools
1345
+ )
1346
+ if delta.content:
1347
+ print("\033[K", end="\r")
1348
+ print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
1379
1349
 
1350
+ else:
1351
+ # Non-verbose streaming
1352
+ async for chunk in await litellm.acompletion(
1353
+ **self._build_completion_params(
1354
+ messages=messages,
1355
+ temperature=temperature,
1356
+ stream=True,
1357
+ tools=formatted_tools,
1358
+ **kwargs
1359
+ )
1360
+ ):
1361
+ if chunk and chunk.choices and chunk.choices[0].delta:
1362
+ delta = chunk.choices[0].delta
1363
+ if delta.content:
1364
+ response_text += delta.content
1365
+
1366
+ # Capture tool calls from streaming chunks if provider supports it
1367
+ if formatted_tools and self._supports_streaming_tools():
1368
+ tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
1369
+
1370
+ response_text = response_text.strip()
1371
+
1372
+ # We already have tool_calls from streaming if supported
1373
+ # No need for a second API call!
1380
1374
  else:
1381
- # Non-verbose streaming
1382
- async for chunk in await litellm.acompletion(
1375
+ # Non-streaming approach (when tools require it or streaming is disabled)
1376
+ tool_response = await litellm.acompletion(
1383
1377
  **self._build_completion_params(
1384
1378
  messages=messages,
1385
1379
  temperature=temperature,
1386
- stream=True,
1380
+ stream=False,
1387
1381
  tools=formatted_tools,
1388
- **kwargs
1382
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1389
1383
  )
1390
- ):
1391
- if chunk and chunk.choices and chunk.choices[0].delta:
1392
- delta = chunk.choices[0].delta
1393
- if delta.content:
1394
- response_text += delta.content
1395
-
1396
- # Capture tool calls from streaming chunks if provider supports it
1397
- if formatted_tools and self._supports_streaming_tools():
1398
- tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
1399
-
1400
- response_text = response_text.strip()
1401
-
1402
- # We already have tool_calls from streaming if supported
1403
- # No need for a second API call!
1404
- else:
1405
- # Non-streaming approach (when tools require it or streaming is disabled)
1406
- tool_response = await litellm.acompletion(
1407
- **self._build_completion_params(
1408
- messages=messages,
1409
- temperature=temperature,
1410
- stream=False,
1411
- tools=formatted_tools,
1412
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1413
- )
1414
- )
1415
- response_text = tool_response.choices[0].message.get("content", "")
1416
- tool_calls = tool_response.choices[0].message.get("tool_calls", [])
1417
-
1418
- if verbose:
1419
- # Display the complete response at once
1420
- display_interaction(
1421
- original_prompt,
1422
- response_text,
1423
- markdown=markdown,
1424
- generation_time=time.time() - start_time,
1425
- console=console
1426
1384
  )
1385
+ response_text = tool_response.choices[0].message.get("content", "")
1386
+ tool_calls = tool_response.choices[0].message.get("tool_calls", [])
1387
+
1388
+ if verbose:
1389
+ # Display the complete response at once
1390
+ display_interaction(
1391
+ original_prompt,
1392
+ response_text,
1393
+ markdown=markdown,
1394
+ generation_time=time.time() - start_time,
1395
+ console=console
1396
+ )
1427
1397
 
1428
- # Now handle tools if we have them (either from streaming or non-streaming)
1429
- if tools and execute_tool_fn and tool_calls:
1430
-
1431
- if tool_calls:
1398
+ # Now handle tools if we have them (either from streaming or non-streaming)
1399
+ if tools and execute_tool_fn and tool_calls:
1432
1400
  # Convert tool_calls to a serializable format for all providers
1433
1401
  serializable_tool_calls = self._serialize_tool_calls(tool_calls)
1434
1402
  messages.append({
@@ -1509,9 +1477,16 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1509
1477
  console=console
1510
1478
  )
1511
1479
 
1512
- # Return the final response after processing Ollama's follow-up
1480
+ # Store the response for potential final return
1513
1481
  if final_response_text:
1514
- return final_response_text
1482
+ # Update messages with the response to maintain conversation context
1483
+ messages.append({
1484
+ "role": "assistant",
1485
+ "content": final_response_text
1486
+ })
1487
+ # Continue the loop to check if more tools are needed
1488
+ iteration_count += 1
1489
+ continue
1515
1490
  else:
1516
1491
  logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
1517
1492
 
@@ -1577,6 +1552,27 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1577
1552
  response_text += chunk.choices[0].delta.content
1578
1553
 
1579
1554
  response_text = response_text.strip()
1555
+
1556
+ # After tool execution, update messages and continue the loop
1557
+ if response_text:
1558
+ messages.append({
1559
+ "role": "assistant",
1560
+ "content": response_text
1561
+ })
1562
+
1563
+ # Store reasoning content if captured
1564
+ if reasoning_steps and reasoning_content:
1565
+ stored_reasoning_content = reasoning_content
1566
+
1567
+ # Continue the loop to check if more tools are needed
1568
+ iteration_count += 1
1569
+ continue
1570
+ else:
1571
+ # No tool calls, we're done with this iteration
1572
+ # If we've executed tools in previous iterations, this response contains the final answer
1573
+ if iteration_count > 0:
1574
+ final_response_text = response_text.strip()
1575
+ break
1580
1576
 
1581
1577
  # Handle output formatting
1582
1578
  if output_json or output_pydantic:
@@ -1588,13 +1584,27 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1588
1584
  return response_text
1589
1585
 
1590
1586
  if not self_reflect:
1587
+ # Use final_response_text if we went through tool iterations
1588
+ display_text = final_response_text if final_response_text else response_text
1589
+
1590
+ # Display with stored reasoning content if available
1591
1591
  if verbose:
1592
- display_interaction(original_prompt, response_text, markdown=markdown,
1593
- generation_time=time.time() - start_time, console=console)
1594
- # Return reasoning content if reasoning_steps is True
1595
- if reasoning_steps and reasoning_content:
1596
- return reasoning_content
1597
- return response_text
1592
+ if stored_reasoning_content:
1593
+ display_interaction(
1594
+ original_prompt,
1595
+ f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{display_text}",
1596
+ markdown=markdown,
1597
+ generation_time=time.time() - start_time,
1598
+ console=console
1599
+ )
1600
+ else:
1601
+ display_interaction(original_prompt, display_text, markdown=markdown,
1602
+ generation_time=time.time() - start_time, console=console)
1603
+
1604
+ # Return reasoning content if reasoning_steps is True and we have it
1605
+ if reasoning_steps and stored_reasoning_content:
1606
+ return stored_reasoning_content
1607
+ return display_text
1598
1608
 
1599
1609
  # Handle self-reflection
1600
1610
  reflection_prompt = f"""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.125
3
+ Version: 0.0.126
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
3
3
  praisonaiagents/main.py,sha256=7HEFVNmxMzRjVpbe52aYvQMZA013mA3YosrixM_Ua8Q,14975
4
4
  praisonaiagents/session.py,sha256=d-CZPYikOHb0q-H9f_IWKJsypnQfz1YKeLLkyxs6oDo,15532
5
5
  praisonaiagents/agent/__init__.py,sha256=IhIDtAkfJ99cxbttwou52coih_AejS2-jpazsX6LbDY,350
6
- praisonaiagents/agent/agent.py,sha256=9JB46swjvrPSm7pQnsawGd2mV50qA-tx3Vv05_OoJeU,119579
6
+ praisonaiagents/agent/agent.py,sha256=zuwZ3U-wwEu3x_BK4SYPJPKC7cZ_e8iak5ILn_H9yQE,119660
7
7
  praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
8
8
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
9
9
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
@@ -16,7 +16,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
16
16
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
17
17
  praisonaiagents/knowledge/knowledge.py,sha256=-di_h9HxXQfAhTMMerhK16tfw8DtUndp44TGkBOzkZs,15539
18
18
  praisonaiagents/llm/__init__.py,sha256=DtFSBjsVQj7AOTM0x5Q0bZnrbxb-t2ljom5Aid5xJEs,1547
19
- praisonaiagents/llm/llm.py,sha256=_zOKvdKkwG6Eojf0tyBlS1txSaj3H0nCAeUtm4ohHoI,110234
19
+ praisonaiagents/llm/llm.py,sha256=rewZhxoaYvVN7Hwj2lAYC3PS9RNIJkyhlyvj9W2Yh9Y,110721
20
20
  praisonaiagents/llm/model_capabilities.py,sha256=poxOxATUOi9XPTx3v6BPnXvSfikWSA9NciWQVuPU7Zg,2586
21
21
  praisonaiagents/llm/openai_client.py,sha256=0JvjCDHoH8I8kIt5vvObARkGdVaPWdTIv_FoEQ5EQPA,48973
22
22
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
@@ -55,7 +55,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
55
55
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
56
56
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
57
57
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
58
- praisonaiagents-0.0.125.dist-info/METADATA,sha256=h5XqjtcCkP0-ls4vCnQyOMTxwmAhCJKTw3dvKztChHs,1699
59
- praisonaiagents-0.0.125.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
60
- praisonaiagents-0.0.125.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
61
- praisonaiagents-0.0.125.dist-info/RECORD,,
58
+ praisonaiagents-0.0.126.dist-info/METADATA,sha256=uLef5SSGpu_Zs4lOWdSMXrfMy78p0-Shacqup0JNpK8,1699
59
+ praisonaiagents-0.0.126.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
60
+ praisonaiagents-0.0.126.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
61
+ praisonaiagents-0.0.126.dist-info/RECORD,,