praisonaiagents 0.0.130__py3-none-any.whl → 0.0.131__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,7 +27,8 @@ from ..main import (
27
27
  display_self_reflection,
28
28
  ReflectionOutput,
29
29
  adisplay_instruction,
30
- approval_callback
30
+ approval_callback,
31
+ execute_sync_callback
31
32
  )
32
33
  import inspect
33
34
  import uuid
@@ -424,7 +425,13 @@ class Agent:
424
425
  # Otherwise, fall back to OpenAI environment/name
425
426
  else:
426
427
  self.llm = llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
427
- self.tools = tools if tools else [] # Store original tools
428
+ # Handle tools parameter - ensure it's always a list
429
+ if callable(tools):
430
+ # If a single function/callable is passed, wrap it in a list
431
+ self.tools = [tools]
432
+ else:
433
+ # Handle all falsy values (None, False, 0, "", etc.) by defaulting to empty list
434
+ self.tools = tools or []
428
435
  self.function_calling_llm = function_calling_llm
429
436
  self.max_iter = max_iter
430
437
  self.max_rpm = max_rpm
@@ -1093,6 +1100,10 @@ Your Goal: {self.goal}"""
1093
1100
  execute_tool_fn=self.execute_tool,
1094
1101
  agent_name=self.name,
1095
1102
  agent_role=self.role,
1103
+ agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
1104
+ task_name=task_name,
1105
+ task_description=task_description,
1106
+ task_id=task_id,
1096
1107
  reasoning_steps=reasoning_steps
1097
1108
  )
1098
1109
  else:
@@ -1109,6 +1120,10 @@ Your Goal: {self.goal}"""
1109
1120
  execute_tool_fn=self.execute_tool,
1110
1121
  agent_name=self.name,
1111
1122
  agent_role=self.role,
1123
+ agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
1124
+ task_name=task_name,
1125
+ task_description=task_description,
1126
+ task_id=task_id,
1112
1127
  reasoning_steps=reasoning_steps
1113
1128
  )
1114
1129
  else:
@@ -1142,8 +1157,39 @@ Your Goal: {self.goal}"""
1142
1157
  except Exception as e:
1143
1158
  display_error(f"Error in chat completion: {e}")
1144
1159
  return None
1145
-
1146
- def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True):
1160
+
1161
+ def _execute_callback_and_display(self, prompt: str, response: str, generation_time: float):
1162
+ """Helper method to execute callbacks and display interaction.
1163
+
1164
+ This centralizes the logic for callback execution and display to avoid duplication.
1165
+ """
1166
+ # Always execute callbacks regardless of verbose setting (only when not using custom LLM)
1167
+ if not self._using_custom_llm:
1168
+ execute_sync_callback(
1169
+ 'interaction',
1170
+ message=prompt,
1171
+ response=response,
1172
+ markdown=self.markdown,
1173
+ generation_time=generation_time,
1174
+ agent_name=self.name,
1175
+ agent_role=self.role,
1176
+ agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
1177
+ task_name=None, # Not available in this context
1178
+ task_description=None, # Not available in this context
1179
+ task_id=None # Not available in this context
1180
+ )
1181
+ # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1182
+ if self.verbose and not self._using_custom_llm:
1183
+ display_interaction(prompt, response, markdown=self.markdown,
1184
+ generation_time=generation_time, console=self.console,
1185
+ agent_name=self.name,
1186
+ agent_role=self.role,
1187
+ agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
1188
+ task_name=None, # Not available in this context
1189
+ task_description=None, # Not available in this context
1190
+ task_id=None) # Not available in this context
1191
+
1192
+ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True, task_name=None, task_description=None, task_id=None):
1147
1193
  # Log all parameter values when in debug mode
1148
1194
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1149
1195
  param_info = {
@@ -1234,6 +1280,9 @@ Your Goal: {self.goal}"""
1234
1280
  agent_name=self.name,
1235
1281
  agent_role=self.role,
1236
1282
  agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
1283
+ task_name=task_name,
1284
+ task_description=task_description,
1285
+ task_id=task_id,
1237
1286
  execute_tool_fn=self.execute_tool, # Pass tool execution function
1238
1287
  reasoning_steps=reasoning_steps,
1239
1288
  stream=stream # Pass the stream parameter from chat method
@@ -1321,25 +1370,30 @@ Your Goal: {self.goal}"""
1321
1370
  # Add to chat history and return raw response
1322
1371
  # User message already added before LLM call via _build_messages
1323
1372
  self.chat_history.append({"role": "assistant", "content": response_text})
1324
- # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1325
- if self.verbose and not self._using_custom_llm:
1326
- display_interaction(original_prompt, response_text, markdown=self.markdown,
1327
- generation_time=time.time() - start_time, console=self.console)
1328
- return response_text
1373
+ # Apply guardrail validation even for JSON output
1374
+ try:
1375
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1376
+ # Execute callback after validation
1377
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1378
+ return validated_response
1379
+ except Exception as e:
1380
+ logging.error(f"Agent {self.name}: Guardrail validation failed for JSON output: {e}")
1381
+ # Rollback chat history on guardrail failure
1382
+ self.chat_history = self.chat_history[:chat_history_length]
1383
+ return None
1329
1384
 
1330
1385
  if not self.self_reflect:
1331
1386
  # User message already added before LLM call via _build_messages
1332
1387
  self.chat_history.append({"role": "assistant", "content": response_text})
1333
1388
  if self.verbose:
1334
1389
  logging.debug(f"Agent {self.name} final response: {response_text}")
1335
- # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1336
- if self.verbose and not self._using_custom_llm:
1337
- display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1338
1390
  # Return only reasoning content if reasoning_steps is True
1339
1391
  if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
1340
1392
  # Apply guardrail to reasoning content
1341
1393
  try:
1342
1394
  validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools)
1395
+ # Execute callback after validation
1396
+ self._execute_callback_and_display(original_prompt, validated_reasoning, time.time() - start_time)
1343
1397
  return validated_reasoning
1344
1398
  except Exception as e:
1345
1399
  logging.error(f"Agent {self.name}: Guardrail validation failed for reasoning content: {e}")
@@ -1349,6 +1403,8 @@ Your Goal: {self.goal}"""
1349
1403
  # Apply guardrail to regular response
1350
1404
  try:
1351
1405
  validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1406
+ # Execute callback after validation
1407
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1352
1408
  return validated_response
1353
1409
  except Exception as e:
1354
1410
  logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
@@ -1412,12 +1468,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1412
1468
  display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
1413
1469
  # User message already added before LLM call via _build_messages
1414
1470
  self.chat_history.append({"role": "assistant", "content": response_text})
1415
- # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1416
- if self.verbose and not self._using_custom_llm:
1417
- display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1418
1471
  # Apply guardrail validation after satisfactory reflection
1419
1472
  try:
1420
1473
  validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1474
+ # Execute callback after validation
1475
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1421
1476
  return validated_response
1422
1477
  except Exception as e:
1423
1478
  logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
@@ -1431,12 +1486,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1431
1486
  display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
1432
1487
  # User message already added before LLM call via _build_messages
1433
1488
  self.chat_history.append({"role": "assistant", "content": response_text})
1434
- # Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1435
- if self.verbose and not self._using_custom_llm:
1436
- display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1437
1489
  # Apply guardrail validation after max reflections
1438
1490
  try:
1439
1491
  validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1492
+ # Execute callback after validation
1493
+ self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
1440
1494
  return validated_response
1441
1495
  except Exception as e:
1442
1496
  logging.error(f"Agent {self.name}: Guardrail validation failed after max reflections: {e}")
@@ -1551,6 +1605,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1551
1605
  agent_name=self.name,
1552
1606
  agent_role=self.role,
1553
1607
  agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
1608
+ task_name=task_name,
1609
+ task_description=task_description,
1610
+ task_id=task_id,
1554
1611
  execute_tool_fn=self.execute_tool_async,
1555
1612
  reasoning_steps=reasoning_steps
1556
1613
  )
@@ -144,8 +144,17 @@ class ImageAgent(Agent):
144
144
  config = self.image_config.dict(exclude_none=True)
145
145
  config.update(kwargs)
146
146
 
147
- # Use llm parameter as the model
148
- config['model'] = self.llm
147
+ # Get the model name robustly from the parent Agent's property
148
+ model_info = self.llm_model
149
+ model_name = model_info.model if hasattr(model_info, 'model') else str(model_info)
150
+
151
+ # Use the model name in config
152
+ config['model'] = model_name
153
+
154
+ # Check if we're using a Gemini model and remove unsupported parameters
155
+ if 'gemini' in model_name.lower():
156
+ # Gemini models don't support response_format parameter
157
+ config.pop('response_format', None)
149
158
 
150
159
  with Progress(
151
160
  SpinnerColumn(),
@@ -154,7 +163,7 @@ class ImageAgent(Agent):
154
163
  ) as progress:
155
164
  try:
156
165
  # Add a task for image generation
157
- task = progress.add_task(f"[cyan]Generating image with {self.llm}...", total=None)
166
+ task = progress.add_task(f"[cyan]Generating image with {model_name}...", total=None)
158
167
 
159
168
  # Use litellm's image generation
160
169
  response = self.litellm(
@@ -491,18 +491,35 @@ Context:
491
491
  tasks_to_run = []
492
492
 
493
493
  # Run sync task in an executor to avoid blocking the event loop
494
- loop = asyncio.get_event_loop()
494
+ loop = asyncio.get_running_loop()
495
495
  await loop.run_in_executor(None, self.run_task, task_id)
496
496
 
497
497
  if tasks_to_run:
498
498
  await asyncio.gather(*tasks_to_run)
499
499
 
500
500
  elif self.process == "sequential":
501
+ async_tasks_to_run = []
502
+
503
+ async def flush_async_tasks():
504
+ """Execute all pending async tasks"""
505
+ nonlocal async_tasks_to_run
506
+ if async_tasks_to_run:
507
+ await asyncio.gather(*async_tasks_to_run)
508
+ async_tasks_to_run = []
509
+
501
510
  async for task_id in process.asequential():
502
511
  if self.tasks[task_id].async_execution:
503
- await self.arun_task(task_id)
512
+ # Collect async tasks to run in parallel
513
+ async_tasks_to_run.append(self.arun_task(task_id))
504
514
  else:
505
- self.run_task(task_id)
515
+ # Before running a sync task, execute all pending async tasks
516
+ await flush_async_tasks()
517
+ # Run sync task in an executor to avoid blocking the event loop
518
+ loop = asyncio.get_running_loop()
519
+ await loop.run_in_executor(None, self.run_task, task_id)
520
+
521
+ # Execute any remaining async tasks at the end
522
+ await flush_async_tasks()
506
523
  elif self.process == "hierarchical":
507
524
  async for task_id in process.ahierarchical():
508
525
  if isinstance(task_id, Task):
@@ -510,7 +527,9 @@ Context:
510
527
  if self.tasks[task_id].async_execution:
511
528
  await self.arun_task(task_id)
512
529
  else:
513
- self.run_task(task_id)
530
+ # Run sync task in an executor to avoid blocking the event loop
531
+ loop = asyncio.get_running_loop()
532
+ await loop.run_in_executor(None, self.run_task, task_id)
514
533
 
515
534
  async def astart(self, content=None, return_dict=False, **kwargs):
516
535
  """Async version of start method
@@ -670,7 +689,10 @@ Context:
670
689
  _get_multimodal_message(task_prompt, task.images),
671
690
  tools=task.tools,
672
691
  output_json=task.output_json,
673
- output_pydantic=task.output_pydantic
692
+ output_pydantic=task.output_pydantic,
693
+ task_name=task.name,
694
+ task_description=task.description,
695
+ task_id=task_id
674
696
  )
675
697
  else:
676
698
  agent_output = executor_agent.chat(
@@ -679,6 +701,9 @@ Context:
679
701
  output_json=task.output_json,
680
702
  output_pydantic=task.output_pydantic,
681
703
  stream=self.stream,
704
+ task_name=task.name,
705
+ task_description=task.description,
706
+ task_id=task_id
682
707
  )
683
708
 
684
709
  if agent_output:
@@ -1116,7 +1141,7 @@ Context:
1116
1141
  response = await agent_instance.achat(current_input)
1117
1142
  else:
1118
1143
  # Run sync function in a thread to avoid blocking
1119
- loop = asyncio.get_event_loop()
1144
+ loop = asyncio.get_running_loop()
1120
1145
  # Correctly pass current_input to the lambda for closure
1121
1146
  response = await loop.run_in_executor(None, lambda ci=current_input: agent_instance.chat(ci))
1122
1147
 
@@ -1271,7 +1296,7 @@ Context:
1271
1296
  if hasattr(agent_instance, 'achat') and asyncio.iscoroutinefunction(agent_instance.achat):
1272
1297
  response = await agent_instance.achat(current_input, tools=agent_instance.tools)
1273
1298
  elif hasattr(agent_instance, 'chat'): # Fallback to sync chat if achat not suitable
1274
- loop = asyncio.get_event_loop()
1299
+ loop = asyncio.get_running_loop()
1275
1300
  response = await loop.run_in_executor(None, lambda ci=current_input: agent_instance.chat(ci, tools=agent_instance.tools))
1276
1301
  else:
1277
1302
  logging.warning(f"Agent {agent_instance.name} has no suitable chat or achat method.")
@@ -14,6 +14,7 @@ from ..main import (
14
14
  display_generating,
15
15
  display_self_reflection,
16
16
  ReflectionOutput,
17
+ execute_sync_callback,
17
18
  )
18
19
  from rich.console import Console
19
20
  from rich.live import Live
@@ -130,8 +131,10 @@ class LLM:
130
131
  if 'tools' in safe_config:
131
132
  tools = safe_config['tools']
132
133
  # Check if tools is iterable before processing
133
- if tools and hasattr(tools, '__iter__') and not isinstance(tools, str):
134
+ if tools and isinstance(tools, (list, tuple)):
134
135
  safe_config['tools'] = [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools]
136
+ elif tools and callable(tools):
137
+ safe_config['tools'] = tools.__name__ if hasattr(tools, "__name__") else str(tools)
135
138
  else:
136
139
  safe_config['tools'] = None
137
140
  if 'output_json' in safe_config:
@@ -617,6 +620,9 @@ class LLM:
617
620
  agent_name: Optional[str] = None,
618
621
  agent_role: Optional[str] = None,
619
622
  agent_tools: Optional[List[str]] = None,
623
+ task_name: Optional[str] = None,
624
+ task_description: Optional[str] = None,
625
+ task_id: Optional[str] = None,
620
626
  execute_tool_fn: Optional[Callable] = None,
621
627
  stream: bool = True,
622
628
  **kwargs
@@ -692,6 +698,7 @@ class LLM:
692
698
 
693
699
  start_time = time.time()
694
700
  reflection_count = 0
701
+ callback_executed = False # Track if callback has been executed for this interaction
695
702
  interaction_displayed = False # Track if interaction has been displayed
696
703
 
697
704
  # Display initial instruction once
@@ -737,6 +744,19 @@ class LLM:
737
744
  response_text = resp["choices"][0]["message"]["content"]
738
745
  final_response = resp
739
746
 
747
+ # Always execute callbacks regardless of verbose setting
748
+ generation_time_val = time.time() - current_time
749
+ interaction_displayed = False
750
+
751
+ response_content = f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}" if reasoning_content else response_text
752
+ execute_sync_callback(
753
+ 'interaction',
754
+ message=original_prompt,
755
+ response=response_content,
756
+ markdown=markdown,
757
+ generation_time=generation_time_val
758
+ )
759
+
740
760
  # Optionally display reasoning if present
741
761
  if verbose and reasoning_content and not interaction_displayed:
742
762
  display_interaction(
@@ -744,7 +764,13 @@ class LLM:
744
764
  f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
745
765
  markdown=markdown,
746
766
  generation_time=time.time() - current_time,
747
- console=console
767
+ console=console,
768
+ agent_name=agent_name,
769
+ agent_role=agent_role,
770
+ agent_tools=agent_tools,
771
+ task_name=task_name,
772
+ task_description=task_description,
773
+ task_id=task_id
748
774
  )
749
775
  interaction_displayed = True
750
776
  elif verbose and not interaction_displayed:
@@ -753,7 +779,13 @@ class LLM:
753
779
  response_text,
754
780
  markdown=markdown,
755
781
  generation_time=time.time() - current_time,
756
- console=console
782
+ console=console,
783
+ agent_name=agent_name,
784
+ agent_role=agent_role,
785
+ agent_tools=agent_tools,
786
+ task_name=task_name,
787
+ task_description=task_description,
788
+ task_id=task_id
757
789
  )
758
790
  interaction_displayed = True
759
791
 
@@ -815,6 +847,16 @@ class LLM:
815
847
 
816
848
  response_text = response_text.strip() if response_text else ""
817
849
 
850
+ # Always execute callbacks after streaming completes
851
+ execute_sync_callback(
852
+ 'interaction',
853
+ message=original_prompt,
854
+ response=response_text,
855
+ markdown=markdown,
856
+ generation_time=time.time() - current_time
857
+ )
858
+
859
+
818
860
  # Create a mock final_response with the captured data
819
861
  final_response = {
820
862
  "choices": [{
@@ -839,6 +881,16 @@ class LLM:
839
881
  )
840
882
  response_text = final_response["choices"][0]["message"]["content"]
841
883
 
884
+ # Always execute callbacks regardless of verbose setting
885
+ execute_sync_callback(
886
+ 'interaction',
887
+ message=original_prompt,
888
+ response=response_text,
889
+ markdown=markdown,
890
+ generation_time=time.time() - current_time
891
+ )
892
+
893
+
842
894
  if verbose and not interaction_displayed:
843
895
  # Display the complete response at once
844
896
  display_interaction(
@@ -846,7 +898,13 @@ class LLM:
846
898
  response_text,
847
899
  markdown=markdown,
848
900
  generation_time=time.time() - current_time,
849
- console=console
901
+ console=console,
902
+ agent_name=agent_name,
903
+ agent_role=agent_role,
904
+ agent_tools=agent_tools,
905
+ task_name=task_name,
906
+ task_description=task_description,
907
+ task_id=task_id
850
908
  )
851
909
  interaction_displayed = True
852
910
 
@@ -926,16 +984,14 @@ class LLM:
926
984
  iteration_count += 1
927
985
  continue
928
986
 
929
- # For Ollama, add explicit prompt if we need a final answer
930
- if self._is_ollama_provider() and iteration_count > 0:
931
- # Add an explicit prompt for Ollama to generate the final answer
932
- messages.append({
933
- "role": "user",
934
- "content": self.OLLAMA_FINAL_ANSWER_PROMPT
935
- })
987
+ # Check if the LLM provided a final answer alongside the tool calls
988
+ # If response_text contains substantive content, treat it as the final answer
989
+ if response_text and response_text.strip() and len(response_text.strip()) > 10:
990
+ # LLM provided a final answer after tool execution, don't continue
991
+ final_response_text = response_text.strip()
992
+ break
936
993
 
937
- # After tool execution, continue the loop to check if more tools are needed
938
- # instead of immediately trying to get a final response
994
+ # Otherwise, continue the loop to check if more tools are needed
939
995
  iteration_count += 1
940
996
  continue
941
997
  else:
@@ -954,6 +1010,17 @@ class LLM:
954
1010
  return final_response_text
955
1011
 
956
1012
  # No tool calls were made in this iteration, return the response
1013
+ # Always execute callbacks regardless of verbose setting
1014
+ generation_time_val = time.time() - start_time
1015
+ response_content = f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{response_text}" if stored_reasoning_content else response_text
1016
+ execute_sync_callback(
1017
+ 'interaction',
1018
+ message=original_prompt,
1019
+ response=response_content,
1020
+ markdown=markdown,
1021
+ generation_time=generation_time_val
1022
+ )
1023
+
957
1024
  if verbose and not interaction_displayed:
958
1025
  # If we have stored reasoning content from tool execution, display it
959
1026
  if stored_reasoning_content:
@@ -962,7 +1029,13 @@ class LLM:
962
1029
  f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{response_text}",
963
1030
  markdown=markdown,
964
1031
  generation_time=time.time() - start_time,
965
- console=console
1032
+ console=console,
1033
+ agent_name=agent_name,
1034
+ agent_role=agent_role,
1035
+ agent_tools=agent_tools,
1036
+ task_name=task_name,
1037
+ task_description=task_description,
1038
+ task_id=task_id
966
1039
  )
967
1040
  else:
968
1041
  display_interaction(
@@ -970,7 +1043,13 @@ class LLM:
970
1043
  response_text,
971
1044
  markdown=markdown,
972
1045
  generation_time=time.time() - start_time,
973
- console=console
1046
+ console=console,
1047
+ agent_name=agent_name,
1048
+ agent_role=agent_role,
1049
+ agent_tools=agent_tools,
1050
+ task_name=task_name,
1051
+ task_description=task_description,
1052
+ task_id=task_id
974
1053
  )
975
1054
  interaction_displayed = True
976
1055
 
@@ -984,16 +1063,39 @@ class LLM:
984
1063
  if output_json or output_pydantic:
985
1064
  self.chat_history.append({"role": "user", "content": original_prompt})
986
1065
  self.chat_history.append({"role": "assistant", "content": response_text})
1066
+ # Always execute callbacks regardless of verbose setting
1067
+ if not interaction_displayed:
1068
+ execute_sync_callback(
1069
+ 'interaction',
1070
+ message=original_prompt,
1071
+ response=response_text,
1072
+ markdown=markdown,
1073
+ generation_time=time.time() - start_time
1074
+ )
987
1075
  if verbose and not interaction_displayed:
988
1076
  display_interaction(original_prompt, response_text, markdown=markdown,
989
- generation_time=time.time() - start_time, console=console)
1077
+ generation_time=time.time() - start_time, console=console,
1078
+ agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1079
+ task_name=task_name, task_description=task_description, task_id=task_id)
990
1080
  interaction_displayed = True
991
1081
  return response_text
992
1082
 
993
1083
  if not self_reflect:
1084
+ # Always execute callbacks regardless of verbose setting
1085
+ if not interaction_displayed:
1086
+ execute_sync_callback(
1087
+ 'interaction',
1088
+ message=original_prompt,
1089
+ response=response_text,
1090
+ markdown=markdown,
1091
+ generation_time=time.time() - start_time
1092
+ )
1093
+
994
1094
  if verbose and not interaction_displayed:
995
1095
  display_interaction(original_prompt, response_text, markdown=markdown,
996
- generation_time=time.time() - start_time, console=console)
1096
+ generation_time=time.time() - start_time, console=console,
1097
+ agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1098
+ task_name=task_name, task_description=task_description, task_id=task_id)
997
1099
  interaction_displayed = True
998
1100
  # Return reasoning content if reasoning_steps is True
999
1101
  if reasoning_steps and stored_reasoning_content:
@@ -1039,7 +1141,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1039
1141
  f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
1040
1142
  markdown=markdown,
1041
1143
  generation_time=time.time() - start_time,
1042
- console=console
1144
+ console=console,
1145
+ agent_name=agent_name,
1146
+ agent_role=agent_role,
1147
+ agent_tools=agent_tools,
1148
+ task_name=task_name,
1149
+ task_description=task_description,
1150
+ task_id=task_id
1043
1151
  )
1044
1152
  elif verbose:
1045
1153
  display_interaction(
@@ -1047,7 +1155,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1047
1155
  reflection_text,
1048
1156
  markdown=markdown,
1049
1157
  generation_time=time.time() - start_time,
1050
- console=console
1158
+ console=console,
1159
+ agent_name=agent_name,
1160
+ agent_role=agent_role,
1161
+ agent_tools=agent_tools,
1162
+ task_name=task_name,
1163
+ task_description=task_description,
1164
+ task_id=task_id
1051
1165
  )
1052
1166
  else:
1053
1167
  # Existing streaming approach
@@ -1098,14 +1212,18 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1098
1212
  if satisfactory and reflection_count >= min_reflect - 1:
1099
1213
  if verbose and not interaction_displayed:
1100
1214
  display_interaction(prompt, response_text, markdown=markdown,
1101
- generation_time=time.time() - start_time, console=console)
1215
+ generation_time=time.time() - start_time, console=console,
1216
+ agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1217
+ task_name=task_name, task_description=task_description, task_id=task_id)
1102
1218
  interaction_displayed = True
1103
1219
  return response_text
1104
1220
 
1105
1221
  if reflection_count >= max_reflect - 1:
1106
1222
  if verbose and not interaction_displayed:
1107
1223
  display_interaction(prompt, response_text, markdown=markdown,
1108
- generation_time=time.time() - start_time, console=console)
1224
+ generation_time=time.time() - start_time, console=console,
1225
+ agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1226
+ task_name=task_name, task_description=task_description, task_id=task_id)
1109
1227
  interaction_displayed = True
1110
1228
  return response_text
1111
1229
 
@@ -1126,6 +1244,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1126
1244
  messages=messages,
1127
1245
  temperature=temperature,
1128
1246
  stream=True,
1247
+ tools=formatted_tools,
1129
1248
  output_json=output_json,
1130
1249
  output_pydantic=output_pydantic,
1131
1250
  **kwargs
@@ -1142,6 +1261,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1142
1261
  messages=messages,
1143
1262
  temperature=temperature,
1144
1263
  stream=True,
1264
+ tools=formatted_tools,
1145
1265
  output_json=output_json,
1146
1266
  output_pydantic=output_pydantic,
1147
1267
  **kwargs
@@ -1158,7 +1278,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1158
1278
  if reflection_count >= max_reflect:
1159
1279
  if verbose and not interaction_displayed:
1160
1280
  display_interaction(prompt, response_text, markdown=markdown,
1161
- generation_time=time.time() - start_time, console=console)
1281
+ generation_time=time.time() - start_time, console=console,
1282
+ agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1283
+ task_name=task_name, task_description=task_description, task_id=task_id)
1162
1284
  interaction_displayed = True
1163
1285
  return response_text
1164
1286
  continue
@@ -1206,6 +1328,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1206
1328
  agent_name: Optional[str] = None,
1207
1329
  agent_role: Optional[str] = None,
1208
1330
  agent_tools: Optional[List[str]] = None,
1331
+ task_name: Optional[str] = None,
1332
+ task_description: Optional[str] = None,
1333
+ task_id: Optional[str] = None,
1209
1334
  execute_tool_fn: Optional[Callable] = None,
1210
1335
  stream: bool = True,
1211
1336
  **kwargs
@@ -1313,7 +1438,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1313
1438
  f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
1314
1439
  markdown=markdown,
1315
1440
  generation_time=time.time() - start_time,
1316
- console=console
1441
+ console=console,
1442
+ agent_name=agent_name,
1443
+ agent_role=agent_role,
1444
+ agent_tools=agent_tools,
1445
+ task_name=task_name,
1446
+ task_description=task_description,
1447
+ task_id=task_id
1317
1448
  )
1318
1449
  interaction_displayed = True
1319
1450
  elif verbose and not interaction_displayed:
@@ -1322,7 +1453,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1322
1453
  response_text,
1323
1454
  markdown=markdown,
1324
1455
  generation_time=time.time() - start_time,
1325
- console=console
1456
+ console=console,
1457
+ agent_name=agent_name,
1458
+ agent_role=agent_role,
1459
+ agent_tools=agent_tools,
1460
+ task_name=task_name,
1461
+ task_description=task_description,
1462
+ task_id=task_id
1326
1463
  )
1327
1464
  interaction_displayed = True
1328
1465
  else:
@@ -1406,7 +1543,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1406
1543
  response_text,
1407
1544
  markdown=markdown,
1408
1545
  generation_time=time.time() - start_time,
1409
- console=console
1546
+ console=console,
1547
+ agent_name=agent_name,
1548
+ agent_role=agent_role,
1549
+ agent_tools=agent_tools,
1550
+ task_name=task_name,
1551
+ task_description=task_description,
1552
+ task_id=task_id
1410
1553
  )
1411
1554
  interaction_displayed = True
1412
1555
 
@@ -1500,7 +1643,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1500
1643
  f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
1501
1644
  markdown=markdown,
1502
1645
  generation_time=time.time() - start_time,
1503
- console=console
1646
+ console=console,
1647
+ agent_name=agent_name,
1648
+ agent_role=agent_role,
1649
+ agent_tools=agent_tools,
1650
+ task_name=task_name,
1651
+ task_description=task_description,
1652
+ task_id=task_id
1504
1653
  )
1505
1654
  interaction_displayed = True
1506
1655
  elif verbose and not interaction_displayed:
@@ -1509,7 +1658,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1509
1658
  response_text,
1510
1659
  markdown=markdown,
1511
1660
  generation_time=time.time() - start_time,
1512
- console=console
1661
+ console=console,
1662
+ agent_name=agent_name,
1663
+ agent_role=agent_role,
1664
+ agent_tools=agent_tools,
1665
+ task_name=task_name,
1666
+ task_description=task_description,
1667
+ task_id=task_id
1513
1668
  )
1514
1669
  interaction_displayed = True
1515
1670
  else:
@@ -1559,6 +1714,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1559
1714
  if reasoning_steps and reasoning_content:
1560
1715
  stored_reasoning_content = reasoning_content
1561
1716
 
1717
+ # Check if the LLM provided a final answer alongside the tool calls
1718
+ # If response_text contains substantive content, treat it as the final answer
1719
+ if response_text and response_text.strip() and len(response_text.strip()) > 10:
1720
+ # LLM provided a final answer after tool execution, don't continue
1721
+ final_response_text = response_text.strip()
1722
+ break
1723
+
1562
1724
  # Continue the loop to check if more tools are needed
1563
1725
  iteration_count += 1
1564
1726
  continue
@@ -1575,7 +1737,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1575
1737
  self.chat_history.append({"role": "assistant", "content": response_text})
1576
1738
  if verbose and not interaction_displayed:
1577
1739
  display_interaction(original_prompt, response_text, markdown=markdown,
1578
- generation_time=time.time() - start_time, console=console)
1740
+ generation_time=time.time() - start_time, console=console,
1741
+ agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1742
+ task_name=task_name, task_description=task_description, task_id=task_id)
1579
1743
  interaction_displayed = True
1580
1744
  return response_text
1581
1745
 
@@ -1591,11 +1755,19 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1591
1755
  f"Reasoning:\n{stored_reasoning_content}\n\nAnswer:\n{display_text}",
1592
1756
  markdown=markdown,
1593
1757
  generation_time=time.time() - start_time,
1594
- console=console
1758
+ console=console,
1759
+ agent_name=agent_name,
1760
+ agent_role=agent_role,
1761
+ agent_tools=agent_tools,
1762
+ task_name=task_name,
1763
+ task_description=task_description,
1764
+ task_id=task_id
1595
1765
  )
1596
1766
  else:
1597
1767
  display_interaction(original_prompt, display_text, markdown=markdown,
1598
- generation_time=time.time() - start_time, console=console)
1768
+ generation_time=time.time() - start_time, console=console,
1769
+ agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1770
+ task_name=task_name, task_description=task_description, task_id=task_id)
1599
1771
  interaction_displayed = True
1600
1772
 
1601
1773
  # Return reasoning content if reasoning_steps is True and we have it
@@ -1640,7 +1812,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1640
1812
  f"{reasoning_content}\n\nReflection result:\n{reflection_text}",
1641
1813
  markdown=markdown,
1642
1814
  generation_time=time.time() - start_time,
1643
- console=console
1815
+ console=console,
1816
+ agent_name=agent_name,
1817
+ agent_role=agent_role,
1818
+ agent_tools=agent_tools,
1819
+ task_name=task_name,
1820
+ task_description=task_description,
1821
+ task_id=task_id
1644
1822
  )
1645
1823
  elif verbose:
1646
1824
  display_interaction(
@@ -1648,7 +1826,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1648
1826
  reflection_text,
1649
1827
  markdown=markdown,
1650
1828
  generation_time=time.time() - start_time,
1651
- console=console
1829
+ console=console,
1830
+ agent_name=agent_name,
1831
+ agent_role=agent_role,
1832
+ agent_tools=agent_tools,
1833
+ task_name=task_name,
1834
+ task_description=task_description,
1835
+ task_id=task_id
1652
1836
  )
1653
1837
  else:
1654
1838
  # Existing streaming approach
@@ -1700,14 +1884,18 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1700
1884
  if satisfactory and reflection_count >= min_reflect - 1:
1701
1885
  if verbose and not interaction_displayed:
1702
1886
  display_interaction(prompt, response_text, markdown=markdown,
1703
- generation_time=time.time() - start_time, console=console)
1887
+ generation_time=time.time() - start_time, console=console,
1888
+ agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1889
+ task_name=task_name, task_description=task_description, task_id=task_id)
1704
1890
  interaction_displayed = True
1705
1891
  return response_text
1706
1892
 
1707
1893
  if reflection_count >= max_reflect - 1:
1708
1894
  if verbose and not interaction_displayed:
1709
1895
  display_interaction(prompt, response_text, markdown=markdown,
1710
- generation_time=time.time() - start_time, console=console)
1896
+ generation_time=time.time() - start_time, console=console,
1897
+ agent_name=agent_name, agent_role=agent_role, agent_tools=agent_tools,
1898
+ task_name=task_name, task_description=task_description, task_id=task_id)
1711
1899
  interaction_displayed = True
1712
1900
  return response_text
1713
1901
 
@@ -1972,6 +2160,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1972
2160
  verbose: bool = True,
1973
2161
  markdown: bool = True,
1974
2162
  console: Optional[Console] = None,
2163
+ agent_name: Optional[str] = None,
2164
+ agent_role: Optional[str] = None,
2165
+ agent_tools: Optional[List[str]] = None,
2166
+ task_name: Optional[str] = None,
2167
+ task_description: Optional[str] = None,
2168
+ task_id: Optional[str] = None,
1975
2169
  **kwargs
1976
2170
  ) -> str:
1977
2171
  """Simple function to get model response without tool calls or complex features"""
@@ -2040,7 +2234,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2040
2234
  response_text,
2041
2235
  markdown=markdown,
2042
2236
  generation_time=time.time() - start_time,
2043
- console=console or self.console
2237
+ console=console or self.console,
2238
+ agent_name=agent_name,
2239
+ agent_role=agent_role,
2240
+ agent_tools=agent_tools,
2241
+ task_name=task_name,
2242
+ task_description=task_description,
2243
+ task_id=task_id
2044
2244
  )
2045
2245
 
2046
2246
  return response_text.strip() if response_text else ""
@@ -2059,6 +2259,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2059
2259
  verbose: bool = True,
2060
2260
  markdown: bool = True,
2061
2261
  console: Optional[Console] = None,
2262
+ agent_name: Optional[str] = None,
2263
+ agent_role: Optional[str] = None,
2264
+ agent_tools: Optional[List[str]] = None,
2265
+ task_name: Optional[str] = None,
2266
+ task_description: Optional[str] = None,
2267
+ task_id: Optional[str] = None,
2062
2268
  **kwargs
2063
2269
  ) -> str:
2064
2270
  """Async version of response function"""
@@ -2128,7 +2334,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2128
2334
  response_text,
2129
2335
  markdown=markdown,
2130
2336
  generation_time=time.time() - start_time,
2131
- console=console or self.console
2337
+ console=console or self.console,
2338
+ agent_name=agent_name,
2339
+ agent_role=agent_role,
2340
+ agent_tools=agent_tools,
2341
+ task_name=task_name,
2342
+ task_description=task_description,
2343
+ task_id=task_id
2132
2344
  )
2133
2345
 
2134
2346
  return response_text.strip() if response_text else ""
praisonaiagents/main.py CHANGED
@@ -62,6 +62,30 @@ def register_approval_callback(callback_fn):
62
62
  global approval_callback
63
63
  approval_callback = callback_fn
64
64
 
65
+ def execute_sync_callback(display_type: str, **kwargs):
66
+ """Execute synchronous callback for a given display type without displaying anything.
67
+
68
+ This function is used to trigger callbacks even when verbose=False.
69
+
70
+ Args:
71
+ display_type (str): Type of display event
72
+ **kwargs: Arguments to pass to the callback function
73
+ """
74
+ if display_type in sync_display_callbacks:
75
+ callback = sync_display_callbacks[display_type]
76
+ import inspect
77
+ sig = inspect.signature(callback)
78
+
79
+ # Filter kwargs to what the callback accepts to maintain backward compatibility
80
+ if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()):
81
+ # Callback accepts **kwargs, so pass all arguments
82
+ supported_kwargs = kwargs
83
+ else:
84
+ # Only pass arguments that the callback signature supports
85
+ supported_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters}
86
+
87
+ callback(**supported_kwargs)
88
+
65
89
  async def execute_callback(display_type: str, **kwargs):
66
90
  """Execute both sync and async callbacks for a given display type.
67
91
 
@@ -69,16 +93,38 @@ async def execute_callback(display_type: str, **kwargs):
69
93
  display_type (str): Type of display event
70
94
  **kwargs: Arguments to pass to the callback functions
71
95
  """
96
+ import inspect
97
+
72
98
  # Execute synchronous callback if registered
73
99
  if display_type in sync_display_callbacks:
74
100
  callback = sync_display_callbacks[display_type]
101
+ sig = inspect.signature(callback)
102
+
103
+ # Filter kwargs to what the callback accepts to maintain backward compatibility
104
+ if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()):
105
+ # Callback accepts **kwargs, so pass all arguments
106
+ supported_kwargs = kwargs
107
+ else:
108
+ # Only pass arguments that the callback signature supports
109
+ supported_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters}
110
+
75
111
  loop = asyncio.get_event_loop()
76
- await loop.run_in_executor(None, lambda: callback(**kwargs))
112
+ await loop.run_in_executor(None, lambda: callback(**supported_kwargs))
77
113
 
78
114
  # Execute asynchronous callback if registered
79
115
  if display_type in async_display_callbacks:
80
116
  callback = async_display_callbacks[display_type]
81
- await callback(**kwargs)
117
+ sig = inspect.signature(callback)
118
+
119
+ # Filter kwargs to what the callback accepts to maintain backward compatibility
120
+ if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()):
121
+ # Callback accepts **kwargs, so pass all arguments
122
+ supported_kwargs = kwargs
123
+ else:
124
+ # Only pass arguments that the callback signature supports
125
+ supported_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters}
126
+
127
+ await callback(**supported_kwargs)
82
128
 
83
129
  def _clean_display_content(content: str, max_length: int = 20000) -> str:
84
130
  """Helper function to clean and truncate content for display."""
@@ -101,7 +147,7 @@ def _clean_display_content(content: str, max_length: int = 20000) -> str:
101
147
 
102
148
  return content.strip()
103
149
 
104
- def display_interaction(message, response, markdown=True, generation_time=None, console=None):
150
+ def display_interaction(message, response, markdown=True, generation_time=None, console=None, agent_name=None, agent_role=None, agent_tools=None, task_name=None, task_description=None, task_id=None):
105
151
  """Synchronous version of display_interaction."""
106
152
  if console is None:
107
153
  console = Console()
@@ -113,15 +159,35 @@ def display_interaction(message, response, markdown=True, generation_time=None,
113
159
  message = _clean_display_content(str(message))
114
160
  response = _clean_display_content(str(response))
115
161
 
162
+
116
163
  # Execute synchronous callback if registered
117
164
  if 'interaction' in sync_display_callbacks:
118
- sync_display_callbacks['interaction'](
119
- message=message,
120
- response=response,
121
- markdown=markdown,
122
- generation_time=generation_time
123
- )
124
-
165
+ callback = sync_display_callbacks['interaction']
166
+ import inspect
167
+ sig = inspect.signature(callback)
168
+
169
+ all_kwargs = {
170
+ 'message': message,
171
+ 'response': response,
172
+ 'markdown': markdown,
173
+ 'generation_time': generation_time,
174
+ 'agent_name': agent_name,
175
+ 'agent_role': agent_role,
176
+ 'agent_tools': agent_tools,
177
+ 'task_name': task_name,
178
+ 'task_description': task_description,
179
+ 'task_id': task_id
180
+ }
181
+
182
+ # Filter kwargs to what the callback accepts to maintain backward compatibility
183
+ if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()):
184
+ # Callback accepts **kwargs, so pass all arguments
185
+ supported_kwargs = all_kwargs
186
+ else:
187
+ # Only pass arguments that the callback signature supports
188
+ supported_kwargs = {k: v for k, v in all_kwargs.items() if k in sig.parameters}
189
+
190
+ callback(**supported_kwargs)
125
191
  # Rest of the display logic...
126
192
  if generation_time:
127
193
  console.print(Text(f"Response generated in {generation_time:.1f}s", style="dim"))
@@ -140,10 +206,6 @@ def display_self_reflection(message: str, console=None):
140
206
  console = Console()
141
207
  message = _clean_display_content(str(message))
142
208
 
143
- # Execute callback if registered
144
- if 'self_reflection' in sync_display_callbacks:
145
- sync_display_callbacks['self_reflection'](message=message)
146
-
147
209
  console.print(Panel.fit(Text(message, style="bold yellow"), title="Self Reflection", border_style="magenta"))
148
210
 
149
211
  def display_instruction(message: str, console=None, agent_name: str = None, agent_role: str = None, agent_tools: List[str] = None):
@@ -153,10 +215,6 @@ def display_instruction(message: str, console=None, agent_name: str = None, agen
153
215
  console = Console()
154
216
  message = _clean_display_content(str(message))
155
217
 
156
- # Execute callback if registered
157
- if 'instruction' in sync_display_callbacks:
158
- sync_display_callbacks['instruction'](message=message)
159
-
160
218
  # Display agent info if available
161
219
  if agent_name:
162
220
  agent_info = f"[bold #FF9B9B]👤 Agent:[/] [#FFE5E5]{agent_name}[/]"
@@ -181,10 +239,6 @@ def display_tool_call(message: str, console=None):
181
239
  message = _clean_display_content(str(message))
182
240
  logging.debug(f"Cleaned message in display_tool_call: {repr(message)}")
183
241
 
184
- # Execute callback if registered
185
- if 'tool_call' in sync_display_callbacks:
186
- sync_display_callbacks['tool_call'](message=message)
187
-
188
242
  console.print(Panel.fit(Text(message, style="bold cyan"), title="Tool Call", border_style="green"))
189
243
 
190
244
  def display_error(message: str, console=None):
@@ -194,10 +248,6 @@ def display_error(message: str, console=None):
194
248
  console = Console()
195
249
  message = _clean_display_content(str(message))
196
250
 
197
- # Execute callback if registered
198
- if 'error' in sync_display_callbacks:
199
- sync_display_callbacks['error'](message=message)
200
-
201
251
  console.print(Panel.fit(Text(message, style="bold red"), title="Error", border_style="red"))
202
252
  error_logs.append(message)
203
253
 
@@ -213,17 +263,10 @@ def display_generating(content: str = "", start_time: Optional[float] = None):
213
263
 
214
264
  content = _clean_display_content(str(content))
215
265
 
216
- # Execute callback if registered
217
- if 'generating' in sync_display_callbacks:
218
- sync_display_callbacks['generating'](
219
- content=content,
220
- elapsed_time=elapsed_str.strip() if elapsed_str else None
221
- )
222
-
223
266
  return Panel(Markdown(content), title=f"Generating...{elapsed_str}", border_style="green")
224
267
 
225
268
  # Async versions with 'a' prefix
226
- async def adisplay_interaction(message, response, markdown=True, generation_time=None, console=None):
269
+ async def adisplay_interaction(message, response, markdown=True, generation_time=None, console=None, agent_name=None, agent_role=None, agent_tools=None, task_name=None, task_description=None, task_id=None):
227
270
  """Async version of display_interaction."""
228
271
  if console is None:
229
272
  console = Console()
@@ -241,7 +284,13 @@ async def adisplay_interaction(message, response, markdown=True, generation_time
241
284
  message=message,
242
285
  response=response,
243
286
  markdown=markdown,
244
- generation_time=generation_time
287
+ generation_time=generation_time,
288
+ agent_name=agent_name,
289
+ agent_role=agent_role,
290
+ agent_tools=agent_tools,
291
+ task_name=task_name,
292
+ task_description=task_description,
293
+ task_id=task_id
245
294
  )
246
295
 
247
296
  # Rest of the display logic...
@@ -116,6 +116,16 @@ class Memory:
116
116
  self.use_mem0 = (self.provider.lower() == "mem0") and MEM0_AVAILABLE
117
117
  self.use_rag = (self.provider.lower() == "rag") and CHROMADB_AVAILABLE and self.cfg.get("use_embedding", False)
118
118
  self.graph_enabled = False # Initialize graph support flag
119
+
120
+ # Extract embedding model from config
121
+ self.embedder_config = self.cfg.get("embedder", {})
122
+ if isinstance(self.embedder_config, dict):
123
+ embedder_model_config = self.embedder_config.get("config", {})
124
+ self.embedding_model = embedder_model_config.get("model", "text-embedding-3-small")
125
+ else:
126
+ self.embedding_model = "text-embedding-3-small"
127
+
128
+ self._log_verbose(f"Using embedding model: {self.embedding_model}")
119
129
 
120
130
  # Create .praison directory if it doesn't exist
121
131
  os.makedirs(".praison", exist_ok=True)
@@ -355,7 +365,7 @@ class Memory:
355
365
  import litellm
356
366
 
357
367
  response = litellm.embedding(
358
- model="text-embedding-3-small",
368
+ model=self.embedding_model,
359
369
  input=query
360
370
  )
361
371
  query_embedding = response.data[0]["embedding"]
@@ -366,7 +376,7 @@ class Memory:
366
376
 
367
377
  response = client.embeddings.create(
368
378
  input=query,
369
- model="text-embedding-3-small"
379
+ model=self.embedding_model
370
380
  )
371
381
  query_embedding = response.data[0].embedding
372
382
  else:
@@ -496,7 +506,7 @@ class Memory:
496
506
  logger.debug(f"Embedding input text: {text}")
497
507
 
498
508
  response = litellm.embedding(
499
- model="text-embedding-3-small",
509
+ model=self.embedding_model,
500
510
  input=text
501
511
  )
502
512
  embedding = response.data[0]["embedding"]
@@ -513,7 +523,7 @@ class Memory:
513
523
 
514
524
  response = client.embeddings.create(
515
525
  input=text,
516
- model="text-embedding-3-small"
526
+ model=self.embedding_model
517
527
  )
518
528
  embedding = response.data[0].embedding
519
529
  logger.info("Successfully got embeddings from OpenAI")
@@ -576,7 +586,7 @@ class Memory:
576
586
  import litellm
577
587
 
578
588
  response = litellm.embedding(
579
- model="text-embedding-3-small",
589
+ model=self.embedding_model,
580
590
  input=query
581
591
  )
582
592
  query_embedding = response.data[0]["embedding"]
@@ -587,7 +597,7 @@ class Memory:
587
597
 
588
598
  response = client.embeddings.create(
589
599
  input=query,
590
- model="text-embedding-3-small"
600
+ model=self.embedding_model
591
601
  )
592
602
  query_embedding = response.data[0].embedding
593
603
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.130
3
+ Version: 0.0.131
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -1,14 +1,14 @@
1
1
  praisonaiagents/__init__.py,sha256=7DZJjhHa1_OeP0aiR5_iXINXYiug_gtbOuh_ylad6Uc,3847
2
2
  praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9813
3
- praisonaiagents/main.py,sha256=7HEFVNmxMzRjVpbe52aYvQMZA013mA3YosrixM_Ua8Q,14975
3
+ praisonaiagents/main.py,sha256=8iUDHu-BHM4DCEQlVfGQWhXbNB99DZxjV32sYlptbf8,17498
4
4
  praisonaiagents/session.py,sha256=d-CZPYikOHb0q-H9f_IWKJsypnQfz1YKeLLkyxs6oDo,15532
5
5
  praisonaiagents/agent/__init__.py,sha256=FkjW6f3EU8heQ9tvctfLbOWV9_dOXmS1PcFNgcStns8,403
6
- praisonaiagents/agent/agent.py,sha256=zuwZ3U-wwEu3x_BK4SYPJPKC7cZ_e8iak5ILn_H9yQE,119660
6
+ praisonaiagents/agent/agent.py,sha256=uNpJ8ejF28-FJsFWLbk4SHFCjAlWT-gKav7zZz-gQ6k,122647
7
7
  praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
8
- praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
8
+ praisonaiagents/agent/image_agent.py,sha256=Bbwg_h3qhjhG7gMH8sdcQXhcOFgE_wSvcdhtqH5f2UM,9145
9
9
  praisonaiagents/agent/router_agent.py,sha256=a_b6w5Ti05gvK80uKGMIcT14fiCTKv8rCQPCWAUfIiE,12713
10
10
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
11
- praisonaiagents/agents/agents.py,sha256=21JwDl6-YBbZfEfWXgSJ-iqJ48kpAuG3OuzzwCHddEs,63161
11
+ praisonaiagents/agents/agents.py,sha256=WfzlnwiqiEdU6z-6j_Xp0LyhIApKNj0G6L0Hlr418yE,64420
12
12
  praisonaiagents/agents/autoagents.py,sha256=v5pJfTgHnFzG5K2gHwfRA0nZ7Ikptir6hUNvOZ--E44,20777
13
13
  praisonaiagents/guardrails/__init__.py,sha256=HA8zhp-KRHTxo0194MUwXOUJjPyjOu7E3d7xUIKYVVY,310
14
14
  praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXCFIyZXZVY4f0hnQyWc,1352
@@ -17,7 +17,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
17
17
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
18
18
  praisonaiagents/knowledge/knowledge.py,sha256=-di_h9HxXQfAhTMMerhK16tfw8DtUndp44TGkBOzkZs,15539
19
19
  praisonaiagents/llm/__init__.py,sha256=tHvWq5mv4K4MhWr0s6rqox8UnJ5RK0kXhYuD40WkZQA,1747
20
- praisonaiagents/llm/llm.py,sha256=pseLC7JKUz56QOuBtsZttbMAq0UrgCLHgxoeoNXoqrg,109070
20
+ praisonaiagents/llm/llm.py,sha256=PFFrIUPKJ1fcb1N_vJkjLTj4IAILB1S6USmorxSr4cU,121087
21
21
  praisonaiagents/llm/model_capabilities.py,sha256=MrEDLpYk9U_xzvXXIKYD4-bnNQg_W4bnaoTPyBUHfcs,3231
22
22
  praisonaiagents/llm/model_router.py,sha256=Jy2pShlkLxqXF3quz-MRB3-6L9vaUSgUrf2YJs_Tsg0,13995
23
23
  praisonaiagents/llm/openai_client.py,sha256=6KANw9SNiglvfJvTcpDPZjuTKG6cThD1t-ZqgKvmZiw,45356
@@ -26,7 +26,7 @@ praisonaiagents/mcp/mcp.py,sha256=T0G0rQotHxk9qTnG1tjQLr4c0BUSLnEqz9sIMx4F954,21
26
26
  praisonaiagents/mcp/mcp_http_stream.py,sha256=Yh-69eIlLQS_M0bd__y7NzSjOqqX6R8Ed4eJQw6xXgg,18314
27
27
  praisonaiagents/mcp/mcp_sse.py,sha256=KO10tAgZ5vSKeRhkJIZcdJ0ZmhRybS39i1KybWt4D7M,9128
28
28
  praisonaiagents/memory/__init__.py,sha256=aEFdhgtTqDdMhc_JCWM-f4XI9cZIj7Wz5g_MUa-0amg,397
29
- praisonaiagents/memory/memory.py,sha256=D5BmQTktv6VOJ49yW2m1MjjCJ5UDSX1Qo46_443ymKo,44276
29
+ praisonaiagents/memory/memory.py,sha256=hbqPQj6gvTq3pIO9Ww75xWSZJbVwjTPGvLZX0vZBIrI,44748
30
30
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
31
31
  praisonaiagents/process/process.py,sha256=uYvxL1TD_2Ku04dyBN_GV3DvoCuyv3Y2Iy7QODNin18,73906
32
32
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
@@ -57,7 +57,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
57
57
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
58
58
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
59
59
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
60
- praisonaiagents-0.0.130.dist-info/METADATA,sha256=yId1o9u0MH_G-DszUmZ31Hvn-PNznFt5l7bXCHeYh9U,1699
61
- praisonaiagents-0.0.130.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
62
- praisonaiagents-0.0.130.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
63
- praisonaiagents-0.0.130.dist-info/RECORD,,
60
+ praisonaiagents-0.0.131.dist-info/METADATA,sha256=VXwZ3uzd7-OwCm2A1_ZKoU1OG-37s40N6qFW8RCmyqM,1699
61
+ praisonaiagents-0.0.131.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
62
+ praisonaiagents-0.0.131.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
63
+ praisonaiagents-0.0.131.dist-info/RECORD,,