praisonaiagents 0.0.68__py3-none-any.whl → 0.0.70__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -436,15 +436,56 @@ class LLM:
436
436
 
437
437
  # Handle tool calls
438
438
  if tool_calls and execute_tool_fn:
439
+ # Convert tool_calls to a serializable format for all providers
440
+ serializable_tool_calls = []
441
+ for tc in tool_calls:
442
+ if isinstance(tc, dict):
443
+ serializable_tool_calls.append(tc) # Already a dict
444
+ else:
445
+ # Convert object to dict
446
+ serializable_tool_calls.append({
447
+ "id": tc.id,
448
+ "type": getattr(tc, 'type', "function"),
449
+ "function": {
450
+ "name": tc.function.name,
451
+ "arguments": tc.function.arguments
452
+ }
453
+ })
439
454
  messages.append({
440
455
  "role": "assistant",
441
456
  "content": response_text,
442
- "tool_calls": tool_calls
457
+ "tool_calls": serializable_tool_calls
443
458
  })
444
459
 
445
460
  for tool_call in tool_calls:
446
- function_name = tool_call["function"]["name"]
447
- arguments = json.loads(tool_call["function"]["arguments"])
461
+ # Handle both object and dict access patterns
462
+ if isinstance(tool_call, dict):
463
+ # Special handling for Ollama provider which may have a different structure
464
+ if self.model and self.model.startswith("ollama/"):
465
+ try:
466
+ # Try standard format first
467
+ if "function" in tool_call and isinstance(tool_call["function"], dict):
468
+ function_name = tool_call["function"]["name"]
469
+ arguments = json.loads(tool_call["function"]["arguments"])
470
+ else:
471
+ # Try alternative format that Ollama might return
472
+ function_name = tool_call.get("name", "unknown_function")
473
+ arguments = json.loads(tool_call.get("arguments", "{}"))
474
+ tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
475
+ except Exception as e:
476
+ logging.error(f"Error processing Ollama tool call: {e}")
477
+ function_name = "unknown_function"
478
+ arguments = {}
479
+ tool_call_id = f"tool_{id(tool_call)}"
480
+ else:
481
+ # Standard format for other providers
482
+ function_name = tool_call["function"]["name"]
483
+ arguments = json.loads(tool_call["function"]["arguments"])
484
+ tool_call_id = tool_call["id"]
485
+ else:
486
+ function_name = tool_call.function.name
487
+ arguments = json.loads(tool_call.function.arguments)
488
+ tool_call_id = tool_call.id
448
489
 
449
490
  logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
450
491
  tool_result = execute_tool_fn(function_name, arguments)
@@ -462,21 +503,61 @@ class LLM:
462
503
  logging.debug(f"[TOOL_EXEC_DEBUG] About to display tool call with message: {display_message}")
463
504
  display_tool_call(display_message, console=console)
464
505
 
465
- messages.append({
466
- "role": "tool",
467
- "tool_call_id": tool_call["id"],
468
- "content": json.dumps(tool_result)
469
- })
470
- else:
471
- logging.debug("[TOOL_EXEC_DEBUG] Verbose mode off, not displaying tool call")
472
- messages.append({
473
- "role": "tool",
474
- "tool_call_id": tool_call["id"],
475
- "content": "Function returned an empty output"
476
- })
506
+ messages.append({
507
+ "role": "tool",
508
+ "tool_call_id": tool_call_id,
509
+ "content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
510
+ })
477
511
 
512
+ # Special handling for Ollama models that don't automatically process tool results
513
+ if self.model and self.model.startswith("ollama/") and tool_result:
514
+ # For Ollama models, we need to explicitly ask the model to process the tool results
515
+ # First, check if the response is just a JSON tool call
516
+ try:
517
+ # If the response_text is a valid JSON that looks like a tool call,
518
+ # we need to make a follow-up call to process the results
519
+ json_response = json.loads(response_text.strip())
520
+ if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
521
+ logging.debug("Detected Ollama returning only tool call JSON, making follow-up call to process results")
522
+
523
+ # Create a prompt that asks the model to process the tool results
524
+ follow_up_prompt = f"I've searched for apartments and found these results. Please analyze them and provide a summary of the best options:\n\n{json.dumps(tool_result, indent=2)}\n\nPlease format your response as a nice summary with the top options."
525
+
526
+ # Make a follow-up call to process the results
527
+ follow_up_messages = [
528
+ {"role": "user", "content": follow_up_prompt}
529
+ ]
530
+
531
+ # Get response with streaming
532
+ if verbose:
533
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
534
+ response_text = ""
535
+ for chunk in litellm.completion(
536
+ model=self.model,
537
+ messages=follow_up_messages,
538
+ temperature=temperature,
539
+ stream=True
540
+ ):
541
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
542
+ content = chunk.choices[0].delta.content
543
+ response_text += content
544
+ live.update(display_generating(response_text, start_time))
545
+ else:
546
+ response_text = ""
547
+ for chunk in litellm.completion(
548
+ model=self.model,
549
+ messages=follow_up_messages,
550
+ temperature=temperature,
551
+ stream=True
552
+ ):
553
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
554
+ response_text += chunk.choices[0].delta.content
555
+ except (json.JSONDecodeError, KeyError):
556
+ # Not a JSON response or not a tool call format, continue normally
557
+ pass
558
+
478
559
  # If reasoning_steps is True, do a single non-streaming call
479
- if reasoning_steps:
560
+ elif reasoning_steps:
480
561
  resp = litellm.completion(
481
562
  model=self.model,
482
563
  messages=messages,
@@ -930,15 +1011,56 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
930
1011
  tool_calls = tool_response.choices[0].message.get("tool_calls")
931
1012
 
932
1013
  if tool_calls:
1014
+ # Convert tool_calls to a serializable format for all providers
1015
+ serializable_tool_calls = []
1016
+ for tc in tool_calls:
1017
+ if isinstance(tc, dict):
1018
+ serializable_tool_calls.append(tc) # Already a dict
1019
+ else:
1020
+ # Convert object to dict
1021
+ serializable_tool_calls.append({
1022
+ "id": tc.id,
1023
+ "type": getattr(tc, 'type', "function"),
1024
+ "function": {
1025
+ "name": tc.function.name,
1026
+ "arguments": tc.function.arguments
1027
+ }
1028
+ })
933
1029
  messages.append({
934
1030
  "role": "assistant",
935
1031
  "content": response_text,
936
- "tool_calls": tool_calls
1032
+ "tool_calls": serializable_tool_calls
937
1033
  })
938
1034
 
939
1035
  for tool_call in tool_calls:
940
- function_name = tool_call.function.name
941
- arguments = json.loads(tool_call.function.arguments)
1036
+ # Handle both object and dict access patterns
1037
+ if isinstance(tool_call, dict):
1038
+ # Special handling for Ollama provider which may have a different structure
1039
+ if self.model and self.model.startswith("ollama/"):
1040
+ try:
1041
+ # Try standard format first
1042
+ if "function" in tool_call and isinstance(tool_call["function"], dict):
1043
+ function_name = tool_call["function"]["name"]
1044
+ arguments = json.loads(tool_call["function"]["arguments"])
1045
+ else:
1046
+ # Try alternative format that Ollama might return
1047
+ function_name = tool_call.get("name", "unknown_function")
1048
+ arguments = json.loads(tool_call.get("arguments", "{}"))
1049
+ tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
1050
+ except Exception as e:
1051
+ logging.error(f"Error processing Ollama tool call: {e}")
1052
+ function_name = "unknown_function"
1053
+ arguments = {}
1054
+ tool_call_id = f"tool_{id(tool_call)}"
1055
+ else:
1056
+ # Standard format for other providers
1057
+ function_name = tool_call["function"]["name"]
1058
+ arguments = json.loads(tool_call["function"]["arguments"])
1059
+ tool_call_id = tool_call["id"]
1060
+ else:
1061
+ function_name = tool_call.function.name
1062
+ arguments = json.loads(tool_call.function.arguments)
1063
+ tool_call_id = tool_call.id
942
1064
 
943
1065
  tool_result = await execute_tool_fn(function_name, arguments)
944
1066
 
@@ -949,21 +1071,64 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
949
1071
  else:
950
1072
  display_message += "Function returned no output"
951
1073
  display_tool_call(display_message, console=console)
952
- messages.append({
953
- "role": "tool",
954
- "tool_call_id": tool_call.id,
955
- "content": json.dumps(tool_result)
956
- })
957
- else:
958
- messages.append({
959
- "role": "tool",
960
- "tool_call_id": tool_call.id,
961
- "content": "Function returned an empty output"
962
- })
1074
+ messages.append({
1075
+ "role": "tool",
1076
+ "tool_call_id": tool_call_id,
1077
+ "content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
1078
+ })
963
1079
 
964
1080
  # Get response after tool calls
965
1081
  response_text = ""
966
- if reasoning_steps:
1082
+
1083
+ # Special handling for Ollama models that don't automatically process tool results
1084
+ if self.model and self.model.startswith("ollama/") and tool_result:
1085
+ # For Ollama models, we need to explicitly ask the model to process the tool results
1086
+ # First, check if the response is just a JSON tool call
1087
+ try:
1088
+ # If the response_text is a valid JSON that looks like a tool call,
1089
+ # we need to make a follow-up call to process the results
1090
+ json_response = json.loads(response_text.strip())
1091
+ if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
1092
+ logging.debug("Detected Ollama returning only tool call JSON in async mode, making follow-up call to process results")
1093
+
1094
+ # Create a prompt that asks the model to process the tool results
1095
+ follow_up_prompt = f"I've searched for apartments and found these results. Please analyze them and provide a summary of the best options:\n\n{json.dumps(tool_result, indent=2)}\n\nPlease format your response as a nice summary with the top options."
1096
+
1097
+ # Make a follow-up call to process the results
1098
+ follow_up_messages = [
1099
+ {"role": "user", "content": follow_up_prompt}
1100
+ ]
1101
+
1102
+ # Get response with streaming
1103
+ if verbose:
1104
+ response_text = ""
1105
+ async for chunk in await litellm.acompletion(
1106
+ model=self.model,
1107
+ messages=follow_up_messages,
1108
+ temperature=temperature,
1109
+ stream=True
1110
+ ):
1111
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1112
+ content = chunk.choices[0].delta.content
1113
+ response_text += content
1114
+ print("\033[K", end="\r")
1115
+ print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
1116
+ else:
1117
+ response_text = ""
1118
+ async for chunk in await litellm.acompletion(
1119
+ model=self.model,
1120
+ messages=follow_up_messages,
1121
+ temperature=temperature,
1122
+ stream=True
1123
+ ):
1124
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1125
+ response_text += chunk.choices[0].delta.content
1126
+ except (json.JSONDecodeError, KeyError):
1127
+ # Not a JSON response or not a tool call format, continue normally
1128
+ pass
1129
+
1130
+ # If no special handling was needed or if it's not an Ollama model
1131
+ elif reasoning_steps:
967
1132
  # Non-streaming call to capture reasoning
968
1133
  resp = await litellm.acompletion(
969
1134
  model=self.model,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.68
3
+ Version: 0.0.70
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -10,7 +10,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
10
10
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
11
11
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
12
12
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
13
- praisonaiagents/llm/llm.py,sha256=l7Z2QjD9eFy0Zq5bwTVK7VOUHxeTyx866YWt3fS3vz8,74606
13
+ praisonaiagents/llm/llm.py,sha256=tOdTbssYSBe-o0mA03Ocq_nJPisDZyD1K71qtzCoBRA,87065
14
14
  praisonaiagents/mcp/__init__.py,sha256=IkYdrAK1bDQDm_0t3Wjt63Zwv3_IJgqz84Wqz9GH2iQ,111
15
15
  praisonaiagents/mcp/mcp.py,sha256=BPPf5AIPXx28PaJJqOg6T3NRyymQH9YAD-Km7Ma9-KA,13681
16
16
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
@@ -39,7 +39,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
39
39
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
40
40
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
41
41
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
42
- praisonaiagents-0.0.68.dist-info/METADATA,sha256=wMy9WDu6aGcQWrQIFT1UMOfB-wcvrGqpeDwXqbQXIvM,856
43
- praisonaiagents-0.0.68.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
44
- praisonaiagents-0.0.68.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
45
- praisonaiagents-0.0.68.dist-info/RECORD,,
42
+ praisonaiagents-0.0.70.dist-info/METADATA,sha256=tHQAMvxoSDYguBJ3YDzjkHhHU1vUV6EzdPZ9btlk_Lo,856
43
+ praisonaiagents-0.0.70.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
44
+ praisonaiagents-0.0.70.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
45
+ praisonaiagents-0.0.70.dist-info/RECORD,,