praisonaiagents 0.0.84__py3-none-any.whl → 0.0.86__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -526,6 +526,44 @@ Your Goal: {self.goal}
526
526
  tools=self.tools
527
527
  )
528
528
 
529
+ def _cast_arguments(self, func, arguments):
530
+ """Cast arguments to their expected types based on function signature."""
531
+ if not callable(func) or not arguments:
532
+ return arguments
533
+
534
+ try:
535
+ sig = inspect.signature(func)
536
+ casted_args = {}
537
+
538
+ for param_name, arg_value in arguments.items():
539
+ if param_name in sig.parameters:
540
+ param = sig.parameters[param_name]
541
+ if param.annotation != inspect.Parameter.empty:
542
+ # Handle common type conversions
543
+ if param.annotation == int and isinstance(arg_value, (str, float)):
544
+ try:
545
+ casted_args[param_name] = int(float(arg_value))
546
+ except (ValueError, TypeError):
547
+ casted_args[param_name] = arg_value
548
+ elif param.annotation == float and isinstance(arg_value, (str, int)):
549
+ try:
550
+ casted_args[param_name] = float(arg_value)
551
+ except (ValueError, TypeError):
552
+ casted_args[param_name] = arg_value
553
+ elif param.annotation == bool and isinstance(arg_value, str):
554
+ casted_args[param_name] = arg_value.lower() in ('true', '1', 'yes', 'on')
555
+ else:
556
+ casted_args[param_name] = arg_value
557
+ else:
558
+ casted_args[param_name] = arg_value
559
+ else:
560
+ casted_args[param_name] = arg_value
561
+
562
+ return casted_args
563
+ except Exception as e:
564
+ logging.debug(f"Type casting failed for {getattr(func, '__name__', 'unknown function')}: {e}")
565
+ return arguments
566
+
529
567
  def execute_tool(self, function_name, arguments):
530
568
  """
531
569
  Execute a tool dynamically based on the function name and arguments.
@@ -576,7 +614,8 @@ Your Goal: {self.goal}
576
614
  run_params = {k: v for k, v in arguments.items()
577
615
  if k in inspect.signature(instance.run).parameters
578
616
  and k != 'self'}
579
- return instance.run(**run_params)
617
+ casted_params = self._cast_arguments(instance.run, run_params)
618
+ return instance.run(**casted_params)
580
619
 
581
620
  # CrewAI: If it's a class with an _run method, instantiate and call _run
582
621
  elif inspect.isclass(func) and hasattr(func, '_run'):
@@ -584,11 +623,13 @@ Your Goal: {self.goal}
584
623
  run_params = {k: v for k, v in arguments.items()
585
624
  if k in inspect.signature(instance._run).parameters
586
625
  and k != 'self'}
587
- return instance._run(**run_params)
626
+ casted_params = self._cast_arguments(instance._run, run_params)
627
+ return instance._run(**casted_params)
588
628
 
589
629
  # Otherwise treat as regular function
590
630
  elif callable(func):
591
- return func(**arguments)
631
+ casted_arguments = self._cast_arguments(func, arguments)
632
+ return func(**casted_arguments)
592
633
  except Exception as e:
593
634
  error_msg = str(e)
594
635
  logging.error(f"Error executing tool {function_name}: {error_msg}")
@@ -13,6 +13,16 @@ from ..task.task import Task
13
13
  from ..process.process import Process, LoopItems
14
14
  import asyncio
15
15
  import uuid
16
+ from enum import Enum
17
+
18
+ # Task status constants
19
+ class TaskStatus(Enum):
20
+ """Enumeration for task status values to ensure consistency"""
21
+ COMPLETED = "completed"
22
+ IN_PROGRESS = "in progress"
23
+ NOT_STARTED = "not started"
24
+ FAILED = "failed"
25
+ UNKNOWN = "unknown"
16
26
 
17
27
  # Set up logger
18
28
  logger = logging.getLogger(__name__)
@@ -49,6 +59,55 @@ def process_video(video_path: str, seconds_per_frame=2):
49
59
  video.release()
50
60
  return base64_frames
51
61
 
62
+ def process_task_context(context_item, verbose=0, user_id=None):
63
+ """
64
+ Process a single context item for task execution.
65
+ This helper function avoids code duplication between async and sync execution methods.
66
+
67
+ Args:
68
+ context_item: The context item to process (can be string, list, task object, or dict)
69
+ verbose: Verbosity level for logging
70
+ user_id: User ID for database queries
71
+
72
+ Returns:
73
+ str: Formatted context string for this item
74
+ """
75
+ if isinstance(context_item, str):
76
+ return f"Input Content:\n{context_item}"
77
+ elif isinstance(context_item, list):
78
+ return f"Input Content: {' '.join(str(x) for x in context_item)}"
79
+ elif hasattr(context_item, 'result'): # Task object
80
+ # Ensure the previous task is completed before including its result
81
+ task_status = getattr(context_item, 'status', None)
82
+ task_name = context_item.name if context_item.name else context_item.description
83
+
84
+ if context_item.result and task_status == TaskStatus.COMPLETED.value:
85
+ return f"Result of previous task {task_name}:\n{context_item.result.raw}"
86
+ elif task_status == TaskStatus.COMPLETED.value and not context_item.result:
87
+ return f"Previous task {task_name} completed but produced no result."
88
+ else:
89
+ return f"Previous task {task_name} is not yet completed (status: {task_status or TaskStatus.UNKNOWN.value})."
90
+ elif isinstance(context_item, dict) and "vector_store" in context_item:
91
+ from ..knowledge.knowledge import Knowledge
92
+ try:
93
+ # Handle both string and dict configs
94
+ cfg = context_item["vector_store"]
95
+ if isinstance(cfg, str):
96
+ cfg = json.loads(cfg)
97
+
98
+ knowledge = Knowledge(config={"vector_store": cfg}, verbose=verbose)
99
+
100
+ # Only use user_id as filter
101
+ db_results = knowledge.search(
102
+ context_item.get("query", ""), # Use query from context if available
103
+ user_id=user_id if user_id else None
104
+ )
105
+ return f"[DB Context]: {str(db_results)}"
106
+ except Exception as e:
107
+ return f"[Vector DB Error]: {e}"
108
+ else:
109
+ return str(context_item) # Fallback for unknown types
110
+
52
111
  class PraisonAIAgents:
53
112
  def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None, max_iter=10, stream=True, name: Optional[str] = None):
54
113
  # Add check at the start if memory is requested
@@ -250,44 +309,20 @@ Expected Output: {task.expected_output}.
250
309
  if task.context:
251
310
  context_results = [] # Use list to avoid duplicates
252
311
  for context_item in task.context:
253
- if isinstance(context_item, str):
254
- context_results.append(f"Input Content:\n{context_item}")
255
- elif isinstance(context_item, list):
256
- context_results.append(f"Input Content: {' '.join(str(x) for x in context_item)}")
257
- elif hasattr(context_item, 'result'): # Task object
258
- if context_item.result:
259
- context_results.append(
260
- f"Result of previous task {context_item.name if context_item.name else context_item.description}:\n{context_item.result.raw}"
261
- )
262
- else:
263
- context_results.append(
264
- f"Previous task {context_item.name if context_item.name else context_item.description} has no result yet."
265
- )
266
- elif isinstance(context_item, dict) and "vector_store" in context_item:
267
- from ..knowledge.knowledge import Knowledge
268
- try:
269
- # Handle both string and dict configs
270
- cfg = context_item["vector_store"]
271
- if isinstance(cfg, str):
272
- cfg = json.loads(cfg)
273
-
274
- knowledge = Knowledge(config={"vector_store": cfg}, verbose=self.verbose)
275
-
276
- # Only use user_id as filter
277
- db_results = knowledge.search(
278
- task.description,
279
- user_id=self.user_id if self.user_id else None
280
- )
281
- context_results.append(f"[DB Context]: {str(db_results)}")
282
- except Exception as e:
283
- context_results.append(f"[Vector DB Error]: {e}")
312
+ # Use the centralized helper function
313
+ context_str = process_task_context(context_item, self.verbose, self.user_id)
314
+ context_results.append(context_str)
284
315
 
285
- # Join unique context results
316
+ # Join unique context results with proper formatting
286
317
  unique_contexts = list(dict.fromkeys(context_results)) # Remove duplicates
318
+ if self.verbose >= 3:
319
+ logger.info(f"Task {task_id} context items: {len(unique_contexts)}")
320
+ for i, ctx in enumerate(unique_contexts):
321
+ logger.info(f"Context {i+1}: {ctx[:100]}...")
287
322
  task_prompt += f"""
288
323
  Context:
289
324
 
290
- {' '.join(unique_contexts)}
325
+ {'\n\n'.join(unique_contexts)}
291
326
  """
292
327
  task_prompt += "Please provide only the final result of your work. Do not add any conversation or extra explanation."
293
328
 
@@ -573,44 +608,20 @@ Expected Output: {task.expected_output}.
573
608
  if task.context:
574
609
  context_results = [] # Use list to avoid duplicates
575
610
  for context_item in task.context:
576
- if isinstance(context_item, str):
577
- context_results.append(f"Input Content:\n{context_item}")
578
- elif isinstance(context_item, list):
579
- context_results.append(f"Input Content: {' '.join(str(x) for x in context_item)}")
580
- elif hasattr(context_item, 'result'): # Task object
581
- if context_item.result:
582
- context_results.append(
583
- f"Result of previous task {context_item.name if context_item.name else context_item.description}:\n{context_item.result.raw}"
584
- )
585
- else:
586
- context_results.append(
587
- f"Previous task {context_item.name if context_item.name else context_item.description} has no result yet."
588
- )
589
- elif isinstance(context_item, dict) and "vector_store" in context_item:
590
- from ..knowledge.knowledge import Knowledge
591
- try:
592
- # Handle both string and dict configs
593
- cfg = context_item["vector_store"]
594
- if isinstance(cfg, str):
595
- cfg = json.loads(cfg)
596
-
597
- knowledge = Knowledge(config={"vector_store": cfg}, verbose=self.verbose)
598
-
599
- # Only use user_id as filter
600
- db_results = knowledge.search(
601
- task.description,
602
- user_id=self.user_id if self.user_id else None
603
- )
604
- context_results.append(f"[DB Context]: {str(db_results)}")
605
- except Exception as e:
606
- context_results.append(f"[Vector DB Error]: {e}")
611
+ # Use the centralized helper function
612
+ context_str = process_task_context(context_item, self.verbose, self.user_id)
613
+ context_results.append(context_str)
607
614
 
608
- # Join unique context results
615
+ # Join unique context results with proper formatting
609
616
  unique_contexts = list(dict.fromkeys(context_results)) # Remove duplicates
617
+ if self.verbose >= 3:
618
+ logger.info(f"Task {task_id} context items: {len(unique_contexts)}")
619
+ for i, ctx in enumerate(unique_contexts):
620
+ logger.info(f"Context {i+1}: {ctx[:100]}...")
610
621
  task_prompt += f"""
611
622
  Context:
612
623
 
613
- {' '.join(unique_contexts)}
624
+ {'\n\n'.join(unique_contexts)}
614
625
  """
615
626
 
616
627
  # Add memory context if available
@@ -367,12 +367,13 @@ class LLM:
367
367
  # If reasoning_steps is True, do a single non-streaming call
368
368
  if reasoning_steps:
369
369
  resp = litellm.completion(
370
- model=self.model,
371
- messages=messages,
372
- temperature=temperature,
373
- stream=False, # force non-streaming
374
- tools=formatted_tools,
375
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
370
+ **self._build_completion_params(
371
+ messages=messages,
372
+ temperature=temperature,
373
+ stream=False, # force non-streaming
374
+ tools=formatted_tools,
375
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
376
+ )
376
377
  )
377
378
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
378
379
  response_text = resp["choices"][0]["message"]["content"]
@@ -401,12 +402,13 @@ class LLM:
401
402
  with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
402
403
  response_text = ""
403
404
  for chunk in litellm.completion(
404
- model=self.model,
405
- messages=messages,
406
- tools=formatted_tools,
407
- temperature=temperature,
408
- stream=True,
409
- **kwargs
405
+ **self._build_completion_params(
406
+ messages=messages,
407
+ tools=formatted_tools,
408
+ temperature=temperature,
409
+ stream=True,
410
+ **kwargs
411
+ )
410
412
  ):
411
413
  if chunk and chunk.choices and chunk.choices[0].delta.content:
412
414
  content = chunk.choices[0].delta.content
@@ -416,12 +418,13 @@ class LLM:
416
418
  # Non-verbose mode, just collect the response
417
419
  response_text = ""
418
420
  for chunk in litellm.completion(
419
- model=self.model,
420
- messages=messages,
421
- tools=formatted_tools,
422
- temperature=temperature,
423
- stream=True,
424
- **kwargs
421
+ **self._build_completion_params(
422
+ messages=messages,
423
+ tools=formatted_tools,
424
+ temperature=temperature,
425
+ stream=True,
426
+ **kwargs
427
+ )
425
428
  ):
426
429
  if chunk and chunk.choices and chunk.choices[0].delta.content:
427
430
  response_text += chunk.choices[0].delta.content
@@ -430,12 +433,13 @@ class LLM:
430
433
 
431
434
  # Get final completion to check for tool calls
432
435
  final_response = litellm.completion(
433
- model=self.model,
434
- messages=messages,
435
- tools=formatted_tools,
436
- temperature=temperature,
437
- stream=False, # No streaming for tool call check
438
- **kwargs
436
+ **self._build_completion_params(
437
+ messages=messages,
438
+ tools=formatted_tools,
439
+ temperature=temperature,
440
+ stream=False, # No streaming for tool call check
441
+ **kwargs
442
+ )
439
443
  )
440
444
 
441
445
  tool_calls = final_response["choices"][0]["message"].get("tool_calls")
@@ -547,10 +551,11 @@ class LLM:
547
551
  with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
548
552
  response_text = ""
549
553
  for chunk in litellm.completion(
550
- model=self.model,
551
- messages=follow_up_messages,
552
- temperature=temperature,
553
- stream=True
554
+ **self._build_completion_params(
555
+ messages=follow_up_messages,
556
+ temperature=temperature,
557
+ stream=True
558
+ )
554
559
  ):
555
560
  if chunk and chunk.choices and chunk.choices[0].delta.content:
556
561
  content = chunk.choices[0].delta.content
@@ -559,10 +564,11 @@ class LLM:
559
564
  else:
560
565
  response_text = ""
561
566
  for chunk in litellm.completion(
562
- model=self.model,
563
- messages=follow_up_messages,
564
- temperature=temperature,
565
- stream=True
567
+ **self._build_completion_params(
568
+ messages=follow_up_messages,
569
+ temperature=temperature,
570
+ stream=True
571
+ )
566
572
  ):
567
573
  if chunk and chunk.choices and chunk.choices[0].delta.content:
568
574
  response_text += chunk.choices[0].delta.content
@@ -573,11 +579,12 @@ class LLM:
573
579
  # If reasoning_steps is True, do a single non-streaming call
574
580
  elif reasoning_steps:
575
581
  resp = litellm.completion(
576
- model=self.model,
577
- messages=messages,
578
- temperature=temperature,
579
- stream=False, # force non-streaming
580
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
582
+ **self._build_completion_params(
583
+ messages=messages,
584
+ temperature=temperature,
585
+ stream=False, # force non-streaming
586
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
587
+ )
581
588
  )
582
589
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
583
590
  response_text = resp["choices"][0]["message"]["content"]
@@ -607,10 +614,11 @@ class LLM:
607
614
  with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
608
615
  response_text = ""
609
616
  for chunk in litellm.completion(
610
- model=self.model,
611
- messages=messages,
612
- temperature=temperature,
613
- stream=True
617
+ **self._build_completion_params(
618
+ messages=messages,
619
+ temperature=temperature,
620
+ stream=True
621
+ )
614
622
  ):
615
623
  if chunk and chunk.choices and chunk.choices[0].delta.content:
616
624
  content = chunk.choices[0].delta.content
@@ -619,10 +627,11 @@ class LLM:
619
627
  else:
620
628
  response_text = ""
621
629
  for chunk in litellm.completion(
622
- model=self.model,
623
- messages=messages,
624
- temperature=temperature,
625
- stream=True
630
+ **self._build_completion_params(
631
+ messages=messages,
632
+ temperature=temperature,
633
+ stream=True
634
+ )
626
635
  ):
627
636
  if chunk and chunk.choices and chunk.choices[0].delta.content:
628
637
  response_text += chunk.choices[0].delta.content
@@ -663,12 +672,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
663
672
  # If reasoning_steps is True, do a single non-streaming call to capture reasoning
664
673
  if reasoning_steps:
665
674
  reflection_resp = litellm.completion(
666
- model=self.model,
667
- messages=reflection_messages,
668
- temperature=temperature,
669
- stream=False, # Force non-streaming
670
- response_format={"type": "json_object"},
671
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
675
+ **self._build_completion_params(
676
+ messages=reflection_messages,
677
+ temperature=temperature,
678
+ stream=False, # Force non-streaming
679
+ response_format={"type": "json_object"},
680
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
681
+ )
672
682
  )
673
683
  # Grab reflection text and optional reasoning
674
684
  reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
@@ -697,12 +707,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
697
707
  with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
698
708
  reflection_text = ""
699
709
  for chunk in litellm.completion(
700
- model=self.model,
701
- messages=reflection_messages,
702
- temperature=temperature,
703
- stream=True,
704
- response_format={"type": "json_object"},
705
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
710
+ **self._build_completion_params(
711
+ messages=reflection_messages,
712
+ temperature=temperature,
713
+ stream=True,
714
+ response_format={"type": "json_object"},
715
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
716
+ )
706
717
  ):
707
718
  if chunk and chunk.choices and chunk.choices[0].delta.content:
708
719
  content = chunk.choices[0].delta.content
@@ -711,12 +722,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
711
722
  else:
712
723
  reflection_text = ""
713
724
  for chunk in litellm.completion(
714
- model=self.model,
715
- messages=reflection_messages,
716
- temperature=temperature,
717
- stream=True,
718
- response_format={"type": "json_object"},
719
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
725
+ **self._build_completion_params(
726
+ messages=reflection_messages,
727
+ temperature=temperature,
728
+ stream=True,
729
+ response_format={"type": "json_object"},
730
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
731
+ )
720
732
  ):
721
733
  if chunk and chunk.choices and chunk.choices[0].delta.content:
722
734
  reflection_text += chunk.choices[0].delta.content
@@ -953,11 +965,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
953
965
  if reasoning_steps:
954
966
  # Non-streaming call to capture reasoning
955
967
  resp = await litellm.acompletion(
956
- model=self.model,
957
- messages=messages,
958
- temperature=temperature,
959
- stream=False, # force non-streaming
960
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
968
+ **self._build_completion_params(
969
+ messages=messages,
970
+ temperature=temperature,
971
+ stream=False, # force non-streaming
972
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
973
+ )
961
974
  )
962
975
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
963
976
  response_text = resp["choices"][0]["message"]["content"]
@@ -984,11 +997,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
984
997
  # 1) Make the streaming call WITHOUT tools
985
998
  # ----------------------------------------------------
986
999
  async for chunk in await litellm.acompletion(
987
- model=self.model,
988
- messages=messages,
989
- temperature=temperature,
990
- stream=True,
991
- **kwargs
1000
+ **self._build_completion_params(
1001
+ messages=messages,
1002
+ temperature=temperature,
1003
+ stream=True,
1004
+ **kwargs
1005
+ )
992
1006
  ):
993
1007
  if chunk and chunk.choices and chunk.choices[0].delta.content:
994
1008
  response_text += chunk.choices[0].delta.content
@@ -997,11 +1011,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
997
1011
  else:
998
1012
  # Non-verbose streaming call, still no tools
999
1013
  async for chunk in await litellm.acompletion(
1000
- model=self.model,
1001
- messages=messages,
1002
- temperature=temperature,
1003
- stream=True,
1004
- **kwargs
1014
+ **self._build_completion_params(
1015
+ messages=messages,
1016
+ temperature=temperature,
1017
+ stream=True,
1018
+ **kwargs
1019
+ )
1005
1020
  ):
1006
1021
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1007
1022
  response_text += chunk.choices[0].delta.content
@@ -1014,12 +1029,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1014
1029
  if tools and execute_tool_fn:
1015
1030
  # Next call with tools if needed
1016
1031
  tool_response = await litellm.acompletion(
1017
- model=self.model,
1018
- messages=messages,
1019
- temperature=temperature,
1020
- stream=False,
1021
- tools=formatted_tools, # We safely pass tools here
1022
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1032
+ **self._build_completion_params(
1033
+ messages=messages,
1034
+ temperature=temperature,
1035
+ stream=False,
1036
+ tools=formatted_tools, # We safely pass tools here
1037
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1038
+ )
1023
1039
  )
1024
1040
  # handle tool_calls from tool_response as usual...
1025
1041
  tool_calls = tool_response.choices[0].message.get("tool_calls")
@@ -1125,10 +1141,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1125
1141
  if verbose:
1126
1142
  response_text = ""
1127
1143
  async for chunk in await litellm.acompletion(
1128
- model=self.model,
1129
- messages=follow_up_messages,
1130
- temperature=temperature,
1131
- stream=True
1144
+ **self._build_completion_params(
1145
+ messages=follow_up_messages,
1146
+ temperature=temperature,
1147
+ stream=True
1148
+ )
1132
1149
  ):
1133
1150
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1134
1151
  content = chunk.choices[0].delta.content
@@ -1138,10 +1155,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1138
1155
  else:
1139
1156
  response_text = ""
1140
1157
  async for chunk in await litellm.acompletion(
1141
- model=self.model,
1142
- messages=follow_up_messages,
1143
- temperature=temperature,
1144
- stream=True
1158
+ **self._build_completion_params(
1159
+ messages=follow_up_messages,
1160
+ temperature=temperature,
1161
+ stream=True
1162
+ )
1145
1163
  ):
1146
1164
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1147
1165
  response_text += chunk.choices[0].delta.content
@@ -1153,12 +1171,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1153
1171
  elif reasoning_steps:
1154
1172
  # Non-streaming call to capture reasoning
1155
1173
  resp = await litellm.acompletion(
1156
- model=self.model,
1157
- messages=messages,
1158
- temperature=temperature,
1159
- stream=False, # force non-streaming
1160
- tools=formatted_tools, # Include tools
1161
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1174
+ **self._build_completion_params(
1175
+ messages=messages,
1176
+ temperature=temperature,
1177
+ stream=False, # force non-streaming
1178
+ tools=formatted_tools, # Include tools
1179
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1180
+ )
1162
1181
  )
1163
1182
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
1164
1183
  response_text = resp["choices"][0]["message"]["content"]
@@ -1183,12 +1202,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1183
1202
  # Get response after tool calls with streaming
1184
1203
  if verbose:
1185
1204
  async for chunk in await litellm.acompletion(
1186
- model=self.model,
1187
- messages=messages,
1188
- temperature=temperature,
1189
- stream=True,
1190
- tools=formatted_tools,
1191
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1205
+ **self._build_completion_params(
1206
+ messages=messages,
1207
+ temperature=temperature,
1208
+ stream=True,
1209
+ tools=formatted_tools,
1210
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1211
+ )
1192
1212
  ):
1193
1213
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1194
1214
  content = chunk.choices[0].delta.content
@@ -1197,12 +1217,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1197
1217
  print(f"Reflecting... {time.time() - start_time:.1f}s", end="\r")
1198
1218
  else:
1199
1219
  response_text = ""
1200
- for chunk in litellm.completion(
1201
- model=self.model,
1202
- messages=messages,
1203
- temperature=temperature,
1204
- stream=True,
1205
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1220
+ async for chunk in await litellm.acompletion(
1221
+ **self._build_completion_params(
1222
+ messages=messages,
1223
+ temperature=temperature,
1224
+ stream=True,
1225
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1226
+ )
1206
1227
  ):
1207
1228
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1208
1229
  response_text += chunk.choices[0].delta.content
@@ -1242,13 +1263,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1242
1263
 
1243
1264
  # If reasoning_steps is True, do a single non-streaming call to capture reasoning
1244
1265
  if reasoning_steps:
1245
- reflection_resp = litellm.completion(
1246
- model=self.model,
1247
- messages=reflection_messages,
1248
- temperature=temperature,
1249
- stream=False, # Force non-streaming
1250
- response_format={"type": "json_object"},
1251
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1266
+ reflection_resp = await litellm.acompletion(
1267
+ **self._build_completion_params(
1268
+ messages=reflection_messages,
1269
+ temperature=temperature,
1270
+ stream=False, # Force non-streaming
1271
+ response_format={"type": "json_object"},
1272
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1273
+ )
1252
1274
  )
1253
1275
  # Grab reflection text and optional reasoning
1254
1276
  reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
@@ -1276,13 +1298,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1276
1298
  if verbose:
1277
1299
  with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
1278
1300
  reflection_text = ""
1279
- for chunk in litellm.completion(
1280
- model=self.model,
1281
- messages=reflection_messages,
1282
- temperature=temperature,
1283
- stream=True,
1284
- response_format={"type": "json_object"},
1285
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1301
+ async for chunk in await litellm.acompletion(
1302
+ **self._build_completion_params(
1303
+ messages=reflection_messages,
1304
+ temperature=temperature,
1305
+ stream=True,
1306
+ response_format={"type": "json_object"},
1307
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1308
+ )
1286
1309
  ):
1287
1310
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1288
1311
  content = chunk.choices[0].delta.content
@@ -1290,13 +1313,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1290
1313
  live.update(display_generating(reflection_text, start_time))
1291
1314
  else:
1292
1315
  reflection_text = ""
1293
- for chunk in litellm.completion(
1294
- model=self.model,
1295
- messages=reflection_messages,
1296
- temperature=temperature,
1297
- stream=True,
1298
- response_format={"type": "json_object"},
1299
- **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1316
+ async for chunk in await litellm.acompletion(
1317
+ **self._build_completion_params(
1318
+ messages=reflection_messages,
1319
+ temperature=temperature,
1320
+ stream=True,
1321
+ response_format={"type": "json_object"},
1322
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
1323
+ )
1300
1324
  ):
1301
1325
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1302
1326
  reflection_text += chunk.choices[0].delta.content
@@ -1408,6 +1432,47 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1408
1432
 
1409
1433
  litellm.callbacks = events
1410
1434
 
1435
+ def _build_completion_params(self, **override_params) -> Dict[str, Any]:
1436
+ """Build parameters for litellm completion calls with all necessary config"""
1437
+ params = {
1438
+ "model": self.model,
1439
+ }
1440
+
1441
+ # Add optional parameters if they exist
1442
+ if self.base_url:
1443
+ params["base_url"] = self.base_url
1444
+ if self.api_key:
1445
+ params["api_key"] = self.api_key
1446
+ if self.api_version:
1447
+ params["api_version"] = self.api_version
1448
+ if self.timeout:
1449
+ params["timeout"] = self.timeout
1450
+ if self.max_tokens:
1451
+ params["max_tokens"] = self.max_tokens
1452
+ if self.top_p:
1453
+ params["top_p"] = self.top_p
1454
+ if self.presence_penalty:
1455
+ params["presence_penalty"] = self.presence_penalty
1456
+ if self.frequency_penalty:
1457
+ params["frequency_penalty"] = self.frequency_penalty
1458
+ if self.logit_bias:
1459
+ params["logit_bias"] = self.logit_bias
1460
+ if self.response_format:
1461
+ params["response_format"] = self.response_format
1462
+ if self.seed:
1463
+ params["seed"] = self.seed
1464
+ if self.logprobs:
1465
+ params["logprobs"] = self.logprobs
1466
+ if self.top_logprobs:
1467
+ params["top_logprobs"] = self.top_logprobs
1468
+ if self.stop_phrases:
1469
+ params["stop"] = self.stop_phrases
1470
+
1471
+ # Override with any provided parameters
1472
+ params.update(override_params)
1473
+
1474
+ return params
1475
+
1411
1476
  # Response without tool calls
1412
1477
  def response(
1413
1478
  self,
@@ -1466,11 +1531,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1466
1531
  if verbose:
1467
1532
  with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
1468
1533
  for chunk in litellm.completion(
1469
- model=self.model,
1470
- messages=messages,
1471
- temperature=temperature,
1472
- stream=True,
1473
- **kwargs
1534
+ **self._build_completion_params(
1535
+ messages=messages,
1536
+ temperature=temperature,
1537
+ stream=True,
1538
+ **kwargs
1539
+ )
1474
1540
  ):
1475
1541
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1476
1542
  content = chunk.choices[0].delta.content
@@ -1478,21 +1544,23 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1478
1544
  live.update(display_generating(response_text, start_time))
1479
1545
  else:
1480
1546
  for chunk in litellm.completion(
1481
- model=self.model,
1482
- messages=messages,
1483
- temperature=temperature,
1484
- stream=True,
1485
- **kwargs
1547
+ **self._build_completion_params(
1548
+ messages=messages,
1549
+ temperature=temperature,
1550
+ stream=True,
1551
+ **kwargs
1552
+ )
1486
1553
  ):
1487
1554
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1488
1555
  response_text += chunk.choices[0].delta.content
1489
1556
  else:
1490
1557
  response = litellm.completion(
1491
- model=self.model,
1492
- messages=messages,
1493
- temperature=temperature,
1494
- stream=False,
1495
- **kwargs
1558
+ **self._build_completion_params(
1559
+ messages=messages,
1560
+ temperature=temperature,
1561
+ stream=False,
1562
+ **kwargs
1563
+ )
1496
1564
  )
1497
1565
  response_text = response.choices[0].message.content.strip()
1498
1566
 
@@ -1569,11 +1637,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1569
1637
  if verbose:
1570
1638
  with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
1571
1639
  async for chunk in await litellm.acompletion(
1572
- model=self.model,
1573
- messages=messages,
1574
- temperature=temperature,
1575
- stream=True,
1576
- **kwargs
1640
+ **self._build_completion_params(
1641
+ messages=messages,
1642
+ temperature=temperature,
1643
+ stream=True,
1644
+ **kwargs
1645
+ )
1577
1646
  ):
1578
1647
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1579
1648
  content = chunk.choices[0].delta.content
@@ -1581,21 +1650,23 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1581
1650
  live.update(display_generating(response_text, start_time))
1582
1651
  else:
1583
1652
  async for chunk in await litellm.acompletion(
1584
- model=self.model,
1585
- messages=messages,
1586
- temperature=temperature,
1587
- stream=True,
1588
- **kwargs
1653
+ **self._build_completion_params(
1654
+ messages=messages,
1655
+ temperature=temperature,
1656
+ stream=True,
1657
+ **kwargs
1658
+ )
1589
1659
  ):
1590
1660
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1591
1661
  response_text += chunk.choices[0].delta.content
1592
1662
  else:
1593
1663
  response = await litellm.acompletion(
1594
- model=self.model,
1595
- messages=messages,
1596
- temperature=temperature,
1597
- stream=False,
1598
- **kwargs
1664
+ **self._build_completion_params(
1665
+ messages=messages,
1666
+ temperature=temperature,
1667
+ stream=False,
1668
+ **kwargs
1669
+ )
1599
1670
  )
1600
1671
  response_text = response.choices[0].message.content.strip()
1601
1672
 
@@ -212,6 +212,18 @@ class MCP:
212
212
 
213
213
  # Set up stdio client
214
214
  self.is_sse = False
215
+
216
+ # Ensure UTF-8 encoding in environment for Docker compatibility
217
+ env = kwargs.get('env', {})
218
+ if not env:
219
+ env = os.environ.copy()
220
+ env.update({
221
+ 'PYTHONIOENCODING': 'utf-8',
222
+ 'LC_ALL': 'C.UTF-8',
223
+ 'LANG': 'C.UTF-8'
224
+ })
225
+ kwargs['env'] = env
226
+
215
227
  self.server_params = StdioServerParameters(
216
228
  command=cmd,
217
229
  args=arguments,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.84
3
+ Version: 0.0.86
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,18 +1,18 @@
1
1
  praisonaiagents/__init__.py,sha256=Z2_rSA6mYozz0r3ioUgKzl3QV8uWRDS_QaqPg2oGjqg,1324
2
2
  praisonaiagents/main.py,sha256=l29nGEbV2ReBi4szURbnH0Fk0w2F_QZTmECysyZjYcA,15066
3
3
  praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
4
- praisonaiagents/agent/agent.py,sha256=9vexAh3Jch5Fqst2aT-jf6xU1wAr4t0NePJZMmtVK_g,84183
4
+ praisonaiagents/agent/agent.py,sha256=rnUCrrEB_7kLrSsHwZydiRg7ygIokGIrKZ0P6WYuj4M,86363
5
5
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
6
6
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
7
- praisonaiagents/agents/agents.py,sha256=HQXAuvV_cORa-NUNVknve4d0B1OIqS6W5jYkw_bYEyA,59519
7
+ praisonaiagents/agents/agents.py,sha256=5u-49r8vAWDA7D8mxuodhljYEE2D5x8csBTmho1cyqU,59329
8
8
  praisonaiagents/agents/autoagents.py,sha256=olYDn--rlJp-SckxILqmREkkgNlzCgEEcAUzfMj-54E,13518
9
9
  praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9bge0Ujuto,246
10
10
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
11
11
  praisonaiagents/knowledge/knowledge.py,sha256=Po0JZsgjYJrXdNSggmUGOWidZEF0f8xo4nhsZZfh8tY,13217
12
12
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
13
- praisonaiagents/llm/llm.py,sha256=5SII0qUgaVbDTHdNfq4foV_vAjSwilz9Mw6p_S5LZfk,88393
13
+ praisonaiagents/llm/llm.py,sha256=Y8z7mfzL_OMhoPSIr7k7Demk8HvHmJZv80EXFY6SUEU,91863
14
14
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
15
- praisonaiagents/mcp/mcp.py,sha256=foarT5IoCZ6V8P9AbnqnWQHKmshJoD24gf3OP4sD_IM,16419
15
+ praisonaiagents/mcp/mcp.py,sha256=-U6md6zHoJZCWF8XFq921Yy5CcSNaGqvjg3aRT737LM,16765
16
16
  praisonaiagents/mcp/mcp_sse.py,sha256=DLh3F_aoVRM1X-7hgIOWOw4FQ1nGmn9YNbQTesykzn4,6792
17
17
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
18
18
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
@@ -40,7 +40,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
40
40
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
41
41
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
42
42
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
43
- praisonaiagents-0.0.84.dist-info/METADATA,sha256=4JjpgU3oHjBZpNViGQJbjv1J0vKseHp96wo13sT-jwk,1244
44
- praisonaiagents-0.0.84.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
45
- praisonaiagents-0.0.84.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
46
- praisonaiagents-0.0.84.dist-info/RECORD,,
43
+ praisonaiagents-0.0.86.dist-info/METADATA,sha256=dYMYlpU7x7Brp7KqcWA1RZcNFBl3waWW-X8Dc3JZ8TA,1244
44
+ praisonaiagents-0.0.86.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
45
+ praisonaiagents-0.0.86.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
46
+ praisonaiagents-0.0.86.dist-info/RECORD,,