praisonaiagents 0.0.53__py3-none-any.whl → 0.0.56__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -97,36 +97,79 @@ def process_stream_chunks(chunks):
97
97
 
98
98
  content_list = []
99
99
  reasoning_list = []
100
+ tool_calls = []
101
+ current_tool_call = None
100
102
 
103
+ # First pass: Get initial tool call data
101
104
  for chunk in chunks:
102
105
  if not hasattr(chunk, "choices") or not chunk.choices:
103
106
  continue
104
107
 
105
- # Track usage from each chunk
106
- if hasattr(chunk, "usage"):
107
- completion_tokens += getattr(chunk.usage, "completion_tokens", 0)
108
- prompt_tokens += getattr(chunk.usage, "prompt_tokens", 0)
109
-
110
108
  delta = getattr(chunk.choices[0], "delta", None)
111
109
  if not delta:
112
110
  continue
113
-
111
+
112
+ # Handle content and reasoning
114
113
  if hasattr(delta, "content") and delta.content:
115
114
  content_list.append(delta.content)
116
115
  if hasattr(delta, "reasoning_content") and delta.reasoning_content:
117
116
  reasoning_list.append(delta.reasoning_content)
117
+
118
+ # Handle tool calls
119
+ if hasattr(delta, "tool_calls") and delta.tool_calls:
120
+ for tool_call_delta in delta.tool_calls:
121
+ if tool_call_delta.index is not None and tool_call_delta.id:
122
+ # Found the initial tool call
123
+ current_tool_call = {
124
+ "id": tool_call_delta.id,
125
+ "type": "function",
126
+ "function": {
127
+ "name": tool_call_delta.function.name,
128
+ "arguments": ""
129
+ }
130
+ }
131
+ while len(tool_calls) <= tool_call_delta.index:
132
+ tool_calls.append(None)
133
+ tool_calls[tool_call_delta.index] = current_tool_call
134
+ current_tool_call = tool_calls[tool_call_delta.index]
135
+ elif current_tool_call is not None and hasattr(tool_call_delta.function, "arguments"):
136
+ if tool_call_delta.function.arguments:
137
+ current_tool_call["function"]["arguments"] += tool_call_delta.function.arguments
138
+
139
+ # Remove any None values and empty tool calls
140
+ tool_calls = [tc for tc in tool_calls if tc and tc["id"] and tc["function"]["name"]]
118
141
 
119
142
  combined_content = "".join(content_list) if content_list else ""
120
143
  combined_reasoning = "".join(reasoning_list) if reasoning_list else None
121
144
  finish_reason = getattr(last_chunk.choices[0], "finish_reason", None) if hasattr(last_chunk, "choices") and last_chunk.choices else None
122
145
 
146
+ # Create ToolCall objects
147
+ processed_tool_calls = []
148
+ if tool_calls:
149
+ try:
150
+ from openai.types.chat import ChatCompletionMessageToolCall
151
+ for tc in tool_calls:
152
+ tool_call = ChatCompletionMessageToolCall(
153
+ id=tc["id"],
154
+ type=tc["type"],
155
+ function={
156
+ "name": tc["function"]["name"],
157
+ "arguments": tc["function"]["arguments"]
158
+ }
159
+ )
160
+ processed_tool_calls.append(tool_call)
161
+ except Exception as e:
162
+ print(f"Error processing tool call: {e}")
163
+
123
164
  message = ChatCompletionMessage(
124
165
  content=combined_content,
125
- reasoning_content=combined_reasoning
166
+ role="assistant",
167
+ reasoning_content=combined_reasoning,
168
+ tool_calls=processed_tool_calls if processed_tool_calls else None
126
169
  )
127
170
 
128
171
  choice = Choice(
129
- finish_reason=finish_reason,
172
+ finish_reason=finish_reason or "tool_calls" if processed_tool_calls else None,
130
173
  index=0,
131
174
  message=message
132
175
  )
@@ -528,6 +571,53 @@ Your Goal: {self.goal}
528
571
  def __str__(self):
529
572
  return f"Agent(name='{self.name}', role='{self.role}', goal='{self.goal}')"
530
573
 
574
+ def _process_stream_response(self, messages, temperature, start_time, formatted_tools=None, reasoning_steps=False):
575
+ """Process streaming response and return final response"""
576
+ try:
577
+ # Create the response stream
578
+ response_stream = client.chat.completions.create(
579
+ model=self.llm,
580
+ messages=messages,
581
+ temperature=temperature,
582
+ tools=formatted_tools if formatted_tools else None,
583
+ stream=True
584
+ )
585
+
586
+ full_response_text = ""
587
+ reasoning_content = ""
588
+ chunks = []
589
+
590
+ # Create Live display with proper configuration
591
+ with Live(
592
+ display_generating("", start_time),
593
+ console=self.console,
594
+ refresh_per_second=4,
595
+ transient=True,
596
+ vertical_overflow="ellipsis",
597
+ auto_refresh=True
598
+ ) as live:
599
+ for chunk in response_stream:
600
+ chunks.append(chunk)
601
+ if chunk.choices[0].delta.content:
602
+ full_response_text += chunk.choices[0].delta.content
603
+ live.update(display_generating(full_response_text, start_time))
604
+
605
+ # Update live display with reasoning content if enabled
606
+ if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
607
+ rc = chunk.choices[0].delta.reasoning_content
608
+ if rc:
609
+ reasoning_content += rc
610
+ live.update(display_generating(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
611
+
612
+ # Clear the last generating display with a blank line
613
+ self.console.print()
614
+ final_response = process_stream_chunks(chunks)
615
+ return final_response
616
+
617
+ except Exception as e:
618
+ display_error(f"Error in stream processing: {e}")
619
+ return None
620
+
531
621
  def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False):
532
622
  start_time = time.time()
533
623
  logging.debug(f"{self.name} sending messages to LLM: {messages}")
@@ -554,20 +644,31 @@ Your Goal: {self.goal}
554
644
  logging.warning(f"Tool {tool} not recognized")
555
645
 
556
646
  try:
557
- initial_response = client.chat.completions.create(
558
- model=self.llm,
559
- messages=messages,
560
- temperature=temperature,
561
- tools=formatted_tools if formatted_tools else None,
562
- stream=False
563
- )
647
+ if stream:
648
+ # Process as streaming response with formatted tools
649
+ final_response = self._process_stream_response(
650
+ messages,
651
+ temperature,
652
+ start_time,
653
+ formatted_tools=formatted_tools if formatted_tools else None,
654
+ reasoning_steps=reasoning_steps
655
+ )
656
+ else:
657
+ # Process as regular non-streaming response
658
+ final_response = client.chat.completions.create(
659
+ model=self.llm,
660
+ messages=messages,
661
+ temperature=temperature,
662
+ tools=formatted_tools if formatted_tools else None,
663
+ stream=False
664
+ )
564
665
 
565
- tool_calls = getattr(initial_response.choices[0].message, 'tool_calls', None)
666
+ tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
566
667
 
567
668
  if tool_calls:
568
669
  messages.append({
569
- "role": "assistant",
570
- "content": initial_response.choices[0].message.content,
670
+ "role": "assistant",
671
+ "content": final_response.choices[0].message.content,
571
672
  "tool_calls": tool_calls
572
673
  })
573
674
 
@@ -590,55 +691,24 @@ Your Goal: {self.goal}
590
691
  "content": results_str
591
692
  })
592
693
 
593
- if stream:
594
- response_stream = client.chat.completions.create(
595
- model=self.llm,
596
- messages=messages,
597
- temperature=temperature,
598
- stream=True
599
- )
600
- full_response_text = ""
601
- reasoning_content = ""
602
- chunks = []
603
-
604
- # Create Live display with proper configuration
605
- with Live(
606
- display_generating("", start_time),
607
- console=self.console,
608
- refresh_per_second=4,
609
- transient=True,
610
- vertical_overflow="ellipsis",
611
- auto_refresh=True
612
- ) as live:
613
- for chunk in response_stream:
614
- chunks.append(chunk)
615
- if chunk.choices[0].delta.content:
616
- full_response_text += chunk.choices[0].delta.content
617
- live.update(display_generating(full_response_text, start_time))
618
-
619
- # Update live display with reasoning content if enabled
620
- if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
621
- rc = chunk.choices[0].delta.reasoning_content
622
- if rc:
623
- reasoning_content += rc
624
- live.update(display_generating(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
625
-
626
- # Clear the last generating display with a blank line
627
- self.console.print()
628
-
629
- final_response = process_stream_chunks(chunks)
630
- return final_response
631
- else:
632
- if tool_calls:
694
+ # Get final response after tool calls
695
+ if stream:
696
+ final_response = self._process_stream_response(
697
+ messages,
698
+ temperature,
699
+ start_time,
700
+ formatted_tools=formatted_tools if formatted_tools else None,
701
+ reasoning_steps=reasoning_steps
702
+ )
703
+ else:
633
704
  final_response = client.chat.completions.create(
634
705
  model=self.llm,
635
706
  messages=messages,
636
707
  temperature=temperature,
637
708
  stream=False
638
709
  )
639
- return final_response
640
- else:
641
- return initial_response
710
+
711
+ return final_response
642
712
 
643
713
  except Exception as e:
644
714
  display_error(f"Error in chat completion: {e}")
@@ -758,8 +828,7 @@ Your Goal: {self.goal}
758
828
 
759
829
  tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
760
830
  response_text = response.choices[0].message.content.strip()
761
-
762
- if tool_calls:
831
+ if tool_calls: ## TODO: Most likely this tool call is already called in _chat_completion, so maybe we can remove this.
763
832
  messages.append({
764
833
  "role": "assistant",
765
834
  "content": response_text,
@@ -988,48 +1057,48 @@ Your Goal: {self.goal}
988
1057
  )
989
1058
 
990
1059
  # Format tools if provided
991
- formatted_tools = []
992
- if tools:
993
- for tool in tools:
994
- if isinstance(tool, str):
995
- tool_def = self._generate_tool_definition(tool)
996
- if tool_def:
997
- formatted_tools.append(tool_def)
998
- elif isinstance(tool, dict):
999
- formatted_tools.append(tool)
1000
- elif hasattr(tool, "to_openai_tool"):
1001
- formatted_tools.append(tool.to_openai_tool())
1002
- elif callable(tool):
1003
- formatted_tools.append(self._generate_tool_definition(tool.__name__))
1004
-
1005
- # Create async OpenAI client
1006
- async_client = AsyncOpenAI()
1007
-
1008
- # Make the API call based on the type of request
1009
- if tools:
1010
- response = await async_client.chat.completions.create(
1011
- model=self.llm,
1012
- messages=messages,
1013
- temperature=temperature,
1014
- tools=formatted_tools
1015
- )
1016
- return await self._achat_completion(response, tools)
1017
- elif output_json or output_pydantic:
1018
- response = await async_client.chat.completions.create(
1019
- model=self.llm,
1020
- messages=messages,
1021
- temperature=temperature,
1022
- response_format={"type": "json_object"}
1023
- )
1024
- # Return the raw response
1025
- return response.choices[0].message.content
1026
- else:
1027
- response = await async_client.chat.completions.create(
1028
- model=self.llm,
1029
- messages=messages,
1030
- temperature=temperature
1031
- )
1032
- return response.choices[0].message.content
1060
+ formatted_tools = []
1061
+ if tools:
1062
+ for tool in tools:
1063
+ if isinstance(tool, str):
1064
+ tool_def = self._generate_tool_definition(tool)
1065
+ if tool_def:
1066
+ formatted_tools.append(tool_def)
1067
+ elif isinstance(tool, dict):
1068
+ formatted_tools.append(tool)
1069
+ elif hasattr(tool, "to_openai_tool"):
1070
+ formatted_tools.append(tool.to_openai_tool())
1071
+ elif callable(tool):
1072
+ formatted_tools.append(self._generate_tool_definition(tool.__name__))
1073
+
1074
+ # Create async OpenAI client
1075
+ async_client = AsyncOpenAI()
1076
+
1077
+ # Make the API call based on the type of request
1078
+ if tools:
1079
+ response = await async_client.chat.completions.create(
1080
+ model=self.llm,
1081
+ messages=messages,
1082
+ temperature=temperature,
1083
+ tools=formatted_tools
1084
+ )
1085
+ return await self._achat_completion(response, tools)
1086
+ elif output_json or output_pydantic:
1087
+ response = await async_client.chat.completions.create(
1088
+ model=self.llm,
1089
+ messages=messages,
1090
+ temperature=temperature,
1091
+ response_format={"type": "json_object"}
1092
+ )
1093
+ # Return the raw response
1094
+ return response.choices[0].message.content
1095
+ else:
1096
+ response = await async_client.chat.completions.create(
1097
+ model=self.llm,
1098
+ messages=messages,
1099
+ temperature=temperature
1100
+ )
1101
+ return response.choices[0].message.content
1033
1102
  except Exception as e:
1034
1103
  display_error(f"Error in chat completion: {e}")
1035
1104
  return None
@@ -17,6 +17,8 @@ from ..main import (
17
17
  from rich.console import Console
18
18
  from rich.live import Live
19
19
 
20
+ # TODO: Include in-build tool calling in LLM class
21
+ # TODO: Restructure so that duplicate calls are not made (Sync with agent.py)
20
22
  class LLMContextLengthExceededException(Exception):
21
23
  """Raised when LLM context length is exceeded"""
22
24
  def __init__(self, message: str):
@@ -178,7 +180,8 @@ class LLM:
178
180
  """Enhanced get_response with all OpenAI-like features"""
179
181
  try:
180
182
  import litellm
181
- reasoning_steps = kwargs.get('reasoning_steps', self.reasoning_steps)
183
+ # This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
184
+ reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
182
185
  # Disable litellm debug messages
183
186
  litellm.set_verbose = False
184
187
 
@@ -240,7 +243,7 @@ class LLM:
240
243
  messages=messages,
241
244
  temperature=temperature,
242
245
  stream=False, # force non-streaming
243
- **kwargs
246
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
244
247
  )
245
248
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
246
249
  response_text = resp["choices"][0]["message"]["content"]
@@ -344,7 +347,7 @@ class LLM:
344
347
  messages=messages,
345
348
  temperature=temperature,
346
349
  stream=False, # force non-streaming
347
- **kwargs
350
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
348
351
  )
349
352
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
350
353
  response_text = resp["choices"][0]["message"]["content"]
@@ -435,7 +438,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
435
438
  temperature=temperature,
436
439
  stream=False, # Force non-streaming
437
440
  response_format={"type": "json_object"},
438
- **kwargs
441
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
439
442
  )
440
443
  # Grab reflection text and optional reasoning
441
444
  reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
@@ -469,7 +472,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
469
472
  temperature=temperature,
470
473
  stream=True,
471
474
  response_format={"type": "json_object"},
472
- **kwargs
475
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
473
476
  ):
474
477
  if chunk and chunk.choices and chunk.choices[0].delta.content:
475
478
  content = chunk.choices[0].delta.content
@@ -483,7 +486,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
483
486
  temperature=temperature,
484
487
  stream=True,
485
488
  response_format={"type": "json_object"},
486
- **kwargs
489
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
487
490
  ):
488
491
  if chunk and chunk.choices and chunk.choices[0].delta.content:
489
492
  reflection_text += chunk.choices[0].delta.content
@@ -557,7 +560,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
557
560
  """Async version of get_response with identical functionality."""
558
561
  try:
559
562
  import litellm
560
- reasoning_steps = kwargs.get('reasoning_steps', self.reasoning_steps)
563
+ reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
561
564
  litellm.set_verbose = False
562
565
 
563
566
  # Build messages list
@@ -669,7 +672,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
669
672
  messages=messages,
670
673
  temperature=temperature,
671
674
  stream=False, # force non-streaming
672
- **kwargs
675
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
673
676
  )
674
677
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
675
678
  response_text = resp["choices"][0]["message"]["content"]
@@ -731,7 +734,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
731
734
  temperature=temperature,
732
735
  stream=False,
733
736
  tools=formatted_tools, # We safely pass tools here
734
- **kwargs
737
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
735
738
  )
736
739
  # handle tool_calls from tool_response as usual...
737
740
  tool_calls = tool_response.choices[0].message.get("tool_calls")
@@ -777,7 +780,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
777
780
  temperature=temperature,
778
781
  stream=False, # force non-streaming
779
782
  tools=formatted_tools, # Include tools
780
- **kwargs
783
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
781
784
  )
782
785
  reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
783
786
  response_text = resp["choices"][0]["message"]["content"]
@@ -807,7 +810,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
807
810
  temperature=temperature,
808
811
  stream=True,
809
812
  tools=formatted_tools,
810
- **kwargs
813
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
811
814
  ):
812
815
  if chunk and chunk.choices and chunk.choices[0].delta.content:
813
816
  content = chunk.choices[0].delta.content
@@ -821,7 +824,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
821
824
  messages=messages,
822
825
  temperature=temperature,
823
826
  stream=True,
824
- **kwargs
827
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
825
828
  ):
826
829
  if chunk and chunk.choices and chunk.choices[0].delta.content:
827
830
  response_text += chunk.choices[0].delta.content
@@ -867,7 +870,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
867
870
  temperature=temperature,
868
871
  stream=False, # Force non-streaming
869
872
  response_format={"type": "json_object"},
870
- **kwargs
873
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
871
874
  )
872
875
  # Grab reflection text and optional reasoning
873
876
  reasoning_content = reflection_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
@@ -901,7 +904,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
901
904
  temperature=temperature,
902
905
  stream=True,
903
906
  response_format={"type": "json_object"},
904
- **kwargs
907
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
905
908
  ):
906
909
  if chunk and chunk.choices and chunk.choices[0].delta.content:
907
910
  content = chunk.choices[0].delta.content
@@ -915,7 +918,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
915
918
  temperature=temperature,
916
919
  stream=True,
917
920
  response_format={"type": "json_object"},
918
- **kwargs
921
+ **{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
919
922
  ):
920
923
  if chunk and chunk.choices and chunk.choices[0].delta.content:
921
924
  reflection_text += chunk.choices[0].delta.content
@@ -44,6 +44,8 @@ class Process:
44
44
  current_task = start_task
45
45
  visited_tasks = set()
46
46
  loop_data = {} # Store loop-specific data
47
+
48
+ # TODO: start task with loop feature is not available in aworkflow method
47
49
 
48
50
  while current_task:
49
51
  current_iter += 1
@@ -350,9 +352,10 @@ Provide a JSON with the structure:
350
352
  if row: # Skip empty rows
351
353
  task_desc = row[0] # Take first column
352
354
  row_task = Task(
353
- description=task_desc, # Keep full row as description
355
+ description=f"{start_task.description}\n{task_desc}" if start_task.description else task_desc,
354
356
  agent=start_task.agent,
355
- name=task_desc, # Use first column as name
357
+ name=f"{start_task.name}_{i+1}" if start_task.name else task_desc,
358
+ expected_output=getattr(start_task, 'expected_output', None),
356
359
  is_start=(i == 0),
357
360
  task_type="task",
358
361
  condition={
@@ -374,9 +377,10 @@ Provide a JSON with the structure:
374
377
  previous_task = None
375
378
  for i, line in enumerate(lines):
376
379
  row_task = Task(
377
- description=line.strip(),
380
+ description=f"{start_task.description}\n{line.strip()}" if start_task.description else line.strip(),
378
381
  agent=start_task.agent,
379
- name=line.strip(),
382
+ name=f"{start_task.name}_{i+1}" if start_task.name else line.strip(),
383
+ expected_output=getattr(start_task, 'expected_output', None),
380
384
  is_start=(i == 0),
381
385
  task_type="task",
382
386
  condition={
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.53
3
+ Version: 0.0.56
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,7 +1,7 @@
1
1
  praisonaiagents/__init__.py,sha256=JtPibbmeFv3meIb3vkKjckB0p7m-Vqt2RYPwOH8P41k,1228
2
2
  praisonaiagents/main.py,sha256=0kB9gn9meXtr4EIrdgA2lAioKIHCRJ61audsGDwuTm4,14428
3
3
  praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=du4wbXOWr4TKeTdlHxfqtq3O7NoQNh5P94cBVF6Nj_M,54417
4
+ praisonaiagents/agent/agent.py,sha256=VZPci7g_LAvTz7_rWvWAZ1JgCGKQc1vSfI6x0fqi2os,57598
5
5
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
6
6
  praisonaiagents/agents/agents.py,sha256=94YPQl-hl-EPY6-Xk2Rj9wlIs9YtiLQbsutSOXWX8QI,36156
7
7
  praisonaiagents/agents/autoagents.py,sha256=bjC2O5oZmoJItJXIMPTWc2lsp_AJC9tMiTQOal2hwPA,13532
@@ -9,10 +9,10 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
9
9
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
10
10
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
11
11
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
12
- praisonaiagents/llm/llm.py,sha256=S-9GBQcOoXQ7mzT119N7k6QzqwPIHSrXqbzluYzGntQ,49533
12
+ praisonaiagents/llm/llm.py,sha256=4QBFSi2c13qJmYqhwZS7XgiC8YL5K8lbxbUoqR9Y6xM,50481
13
13
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
14
14
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
15
- praisonaiagents/process/process.py,sha256=_1Nk37kOYakPaUWAJff86rP0ENyykXqMnhTp8E0efuE,30802
15
+ praisonaiagents/process/process.py,sha256=gP3QQxxFO4oUw_HYLf8MoyWyaj_104LIL_AbwLiBxaU,31261
16
16
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
17
17
  praisonaiagents/task/task.py,sha256=ikFjzNm4WPYONSLtWA3uDGNIUx_TvXTeU5SukWoC66E,14271
18
18
  praisonaiagents/tools/__init__.py,sha256=-0lV5n5cG54vYW6REjXIfuJnCLKnfQIDlXsySCaPB9s,7347
@@ -35,7 +35,7 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
35
35
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
36
36
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
37
37
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
38
- praisonaiagents-0.0.53.dist-info/METADATA,sha256=DAmi3nqq0kER9xJhFPZ8e3JC_TCeSdH2zKzrrZyZCZ4,830
39
- praisonaiagents-0.0.53.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
- praisonaiagents-0.0.53.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
41
- praisonaiagents-0.0.53.dist-info/RECORD,,
38
+ praisonaiagents-0.0.56.dist-info/METADATA,sha256=oh3zNT8tijRBVIJGlTGOlnMXtlKgTIWvILTKj1f5W2Y,830
39
+ praisonaiagents-0.0.56.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
+ praisonaiagents-0.0.56.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
41
+ praisonaiagents-0.0.56.dist-info/RECORD,,