praisonaiagents 0.0.54__py3-none-any.whl → 0.0.56__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -97,36 +97,79 @@ def process_stream_chunks(chunks):
97
97
 
98
98
  content_list = []
99
99
  reasoning_list = []
100
+ tool_calls = []
101
+ current_tool_call = None
100
102
 
103
+ # First pass: Get initial tool call data
101
104
  for chunk in chunks:
102
105
  if not hasattr(chunk, "choices") or not chunk.choices:
103
106
  continue
104
107
 
105
- # Track usage from each chunk
106
- if hasattr(chunk, "usage"):
107
- completion_tokens += getattr(chunk.usage, "completion_tokens", 0)
108
- prompt_tokens += getattr(chunk.usage, "prompt_tokens", 0)
109
-
110
108
  delta = getattr(chunk.choices[0], "delta", None)
111
109
  if not delta:
112
110
  continue
113
-
111
+
112
+ # Handle content and reasoning
114
113
  if hasattr(delta, "content") and delta.content:
115
114
  content_list.append(delta.content)
116
115
  if hasattr(delta, "reasoning_content") and delta.reasoning_content:
117
116
  reasoning_list.append(delta.reasoning_content)
117
+
118
+ # Handle tool calls
119
+ if hasattr(delta, "tool_calls") and delta.tool_calls:
120
+ for tool_call_delta in delta.tool_calls:
121
+ if tool_call_delta.index is not None and tool_call_delta.id:
122
+ # Found the initial tool call
123
+ current_tool_call = {
124
+ "id": tool_call_delta.id,
125
+ "type": "function",
126
+ "function": {
127
+ "name": tool_call_delta.function.name,
128
+ "arguments": ""
129
+ }
130
+ }
131
+ while len(tool_calls) <= tool_call_delta.index:
132
+ tool_calls.append(None)
133
+ tool_calls[tool_call_delta.index] = current_tool_call
134
+ current_tool_call = tool_calls[tool_call_delta.index]
135
+ elif current_tool_call is not None and hasattr(tool_call_delta.function, "arguments"):
136
+ if tool_call_delta.function.arguments:
137
+ current_tool_call["function"]["arguments"] += tool_call_delta.function.arguments
138
+
139
+ # Remove any None values and empty tool calls
140
+ tool_calls = [tc for tc in tool_calls if tc and tc["id"] and tc["function"]["name"]]
118
141
 
119
142
  combined_content = "".join(content_list) if content_list else ""
120
143
  combined_reasoning = "".join(reasoning_list) if reasoning_list else None
121
144
  finish_reason = getattr(last_chunk.choices[0], "finish_reason", None) if hasattr(last_chunk, "choices") and last_chunk.choices else None
122
145
 
146
+ # Create ToolCall objects
147
+ processed_tool_calls = []
148
+ if tool_calls:
149
+ try:
150
+ from openai.types.chat import ChatCompletionMessageToolCall
151
+ for tc in tool_calls:
152
+ tool_call = ChatCompletionMessageToolCall(
153
+ id=tc["id"],
154
+ type=tc["type"],
155
+ function={
156
+ "name": tc["function"]["name"],
157
+ "arguments": tc["function"]["arguments"]
158
+ }
159
+ )
160
+ processed_tool_calls.append(tool_call)
161
+ except Exception as e:
162
+ print(f"Error processing tool call: {e}")
163
+
123
164
  message = ChatCompletionMessage(
124
165
  content=combined_content,
125
- reasoning_content=combined_reasoning
166
+ role="assistant",
167
+ reasoning_content=combined_reasoning,
168
+ tool_calls=processed_tool_calls if processed_tool_calls else None
126
169
  )
127
170
 
128
171
  choice = Choice(
129
- finish_reason=finish_reason,
172
+ finish_reason=finish_reason or "tool_calls" if processed_tool_calls else None,
130
173
  index=0,
131
174
  message=message
132
175
  )
@@ -528,6 +571,53 @@ Your Goal: {self.goal}
528
571
  def __str__(self):
529
572
  return f"Agent(name='{self.name}', role='{self.role}', goal='{self.goal}')"
530
573
 
574
+ def _process_stream_response(self, messages, temperature, start_time, formatted_tools=None, reasoning_steps=False):
575
+ """Process streaming response and return final response"""
576
+ try:
577
+ # Create the response stream
578
+ response_stream = client.chat.completions.create(
579
+ model=self.llm,
580
+ messages=messages,
581
+ temperature=temperature,
582
+ tools=formatted_tools if formatted_tools else None,
583
+ stream=True
584
+ )
585
+
586
+ full_response_text = ""
587
+ reasoning_content = ""
588
+ chunks = []
589
+
590
+ # Create Live display with proper configuration
591
+ with Live(
592
+ display_generating("", start_time),
593
+ console=self.console,
594
+ refresh_per_second=4,
595
+ transient=True,
596
+ vertical_overflow="ellipsis",
597
+ auto_refresh=True
598
+ ) as live:
599
+ for chunk in response_stream:
600
+ chunks.append(chunk)
601
+ if chunk.choices[0].delta.content:
602
+ full_response_text += chunk.choices[0].delta.content
603
+ live.update(display_generating(full_response_text, start_time))
604
+
605
+ # Update live display with reasoning content if enabled
606
+ if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
607
+ rc = chunk.choices[0].delta.reasoning_content
608
+ if rc:
609
+ reasoning_content += rc
610
+ live.update(display_generating(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
611
+
612
+ # Clear the last generating display with a blank line
613
+ self.console.print()
614
+ final_response = process_stream_chunks(chunks)
615
+ return final_response
616
+
617
+ except Exception as e:
618
+ display_error(f"Error in stream processing: {e}")
619
+ return None
620
+
531
621
  def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False):
532
622
  start_time = time.time()
533
623
  logging.debug(f"{self.name} sending messages to LLM: {messages}")
@@ -554,20 +644,31 @@ Your Goal: {self.goal}
554
644
  logging.warning(f"Tool {tool} not recognized")
555
645
 
556
646
  try:
557
- initial_response = client.chat.completions.create(
558
- model=self.llm,
559
- messages=messages,
560
- temperature=temperature,
561
- tools=formatted_tools if formatted_tools else None,
562
- stream=False
563
- )
647
+ if stream:
648
+ # Process as streaming response with formatted tools
649
+ final_response = self._process_stream_response(
650
+ messages,
651
+ temperature,
652
+ start_time,
653
+ formatted_tools=formatted_tools if formatted_tools else None,
654
+ reasoning_steps=reasoning_steps
655
+ )
656
+ else:
657
+ # Process as regular non-streaming response
658
+ final_response = client.chat.completions.create(
659
+ model=self.llm,
660
+ messages=messages,
661
+ temperature=temperature,
662
+ tools=formatted_tools if formatted_tools else None,
663
+ stream=False
664
+ )
564
665
 
565
- tool_calls = getattr(initial_response.choices[0].message, 'tool_calls', None)
666
+ tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
566
667
 
567
668
  if tool_calls:
568
669
  messages.append({
569
- "role": "assistant",
570
- "content": initial_response.choices[0].message.content,
670
+ "role": "assistant",
671
+ "content": final_response.choices[0].message.content,
571
672
  "tool_calls": tool_calls
572
673
  })
573
674
 
@@ -590,55 +691,24 @@ Your Goal: {self.goal}
590
691
  "content": results_str
591
692
  })
592
693
 
593
- if stream:
594
- response_stream = client.chat.completions.create(
595
- model=self.llm,
596
- messages=messages,
597
- temperature=temperature,
598
- stream=True
599
- )
600
- full_response_text = ""
601
- reasoning_content = ""
602
- chunks = []
603
-
604
- # Create Live display with proper configuration
605
- with Live(
606
- display_generating("", start_time),
607
- console=self.console,
608
- refresh_per_second=4,
609
- transient=True,
610
- vertical_overflow="ellipsis",
611
- auto_refresh=True
612
- ) as live:
613
- for chunk in response_stream:
614
- chunks.append(chunk)
615
- if chunk.choices[0].delta.content:
616
- full_response_text += chunk.choices[0].delta.content
617
- live.update(display_generating(full_response_text, start_time))
618
-
619
- # Update live display with reasoning content if enabled
620
- if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
621
- rc = chunk.choices[0].delta.reasoning_content
622
- if rc:
623
- reasoning_content += rc
624
- live.update(display_generating(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
625
-
626
- # Clear the last generating display with a blank line
627
- self.console.print()
628
-
629
- final_response = process_stream_chunks(chunks)
630
- return final_response
631
- else:
632
- if tool_calls:
694
+ # Get final response after tool calls
695
+ if stream:
696
+ final_response = self._process_stream_response(
697
+ messages,
698
+ temperature,
699
+ start_time,
700
+ formatted_tools=formatted_tools if formatted_tools else None,
701
+ reasoning_steps=reasoning_steps
702
+ )
703
+ else:
633
704
  final_response = client.chat.completions.create(
634
705
  model=self.llm,
635
706
  messages=messages,
636
707
  temperature=temperature,
637
708
  stream=False
638
709
  )
639
- return final_response
640
- else:
641
- return initial_response
710
+
711
+ return final_response
642
712
 
643
713
  except Exception as e:
644
714
  display_error(f"Error in chat completion: {e}")
@@ -758,8 +828,7 @@ Your Goal: {self.goal}
758
828
 
759
829
  tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
760
830
  response_text = response.choices[0].message.content.strip()
761
-
762
- if tool_calls:
831
+ if tool_calls: ## TODO: Most likely this tool call is already called in _chat_completion, so maybe we can remove this.
763
832
  messages.append({
764
833
  "role": "assistant",
765
834
  "content": response_text,
@@ -17,6 +17,8 @@ from ..main import (
17
17
  from rich.console import Console
18
18
  from rich.live import Live
19
19
 
20
+ # TODO: Include in-build tool calling in LLM class
21
+ # TODO: Restructure so that duplicate calls are not made (Sync with agent.py)
20
22
  class LLMContextLengthExceededException(Exception):
21
23
  """Raised when LLM context length is exceeded"""
22
24
  def __init__(self, message: str):
@@ -44,6 +44,8 @@ class Process:
44
44
  current_task = start_task
45
45
  visited_tasks = set()
46
46
  loop_data = {} # Store loop-specific data
47
+
48
+ # TODO: start task with loop feature is not available in aworkflow method
47
49
 
48
50
  while current_task:
49
51
  current_iter += 1
@@ -350,9 +352,10 @@ Provide a JSON with the structure:
350
352
  if row: # Skip empty rows
351
353
  task_desc = row[0] # Take first column
352
354
  row_task = Task(
353
- description=task_desc, # Keep full row as description
355
+ description=f"{start_task.description}\n{task_desc}" if start_task.description else task_desc,
354
356
  agent=start_task.agent,
355
- name=task_desc, # Use first column as name
357
+ name=f"{start_task.name}_{i+1}" if start_task.name else task_desc,
358
+ expected_output=getattr(start_task, 'expected_output', None),
356
359
  is_start=(i == 0),
357
360
  task_type="task",
358
361
  condition={
@@ -374,9 +377,10 @@ Provide a JSON with the structure:
374
377
  previous_task = None
375
378
  for i, line in enumerate(lines):
376
379
  row_task = Task(
377
- description=line.strip(),
380
+ description=f"{start_task.description}\n{line.strip()}" if start_task.description else line.strip(),
378
381
  agent=start_task.agent,
379
- name=line.strip(),
382
+ name=f"{start_task.name}_{i+1}" if start_task.name else line.strip(),
383
+ expected_output=getattr(start_task, 'expected_output', None),
380
384
  is_start=(i == 0),
381
385
  task_type="task",
382
386
  condition={
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.54
3
+ Version: 0.0.56
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,7 +1,7 @@
1
1
  praisonaiagents/__init__.py,sha256=JtPibbmeFv3meIb3vkKjckB0p7m-Vqt2RYPwOH8P41k,1228
2
2
  praisonaiagents/main.py,sha256=0kB9gn9meXtr4EIrdgA2lAioKIHCRJ61audsGDwuTm4,14428
3
3
  praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=Rd2ZCToraAoe57UDT1JfrB03ffRKtZ-Tct9avFcZyT4,54257
4
+ praisonaiagents/agent/agent.py,sha256=VZPci7g_LAvTz7_rWvWAZ1JgCGKQc1vSfI6x0fqi2os,57598
5
5
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
6
6
  praisonaiagents/agents/agents.py,sha256=94YPQl-hl-EPY6-Xk2Rj9wlIs9YtiLQbsutSOXWX8QI,36156
7
7
  praisonaiagents/agents/autoagents.py,sha256=bjC2O5oZmoJItJXIMPTWc2lsp_AJC9tMiTQOal2hwPA,13532
@@ -9,10 +9,10 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
9
9
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
10
10
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
11
11
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
12
- praisonaiagents/llm/llm.py,sha256=M6xh9cuO0KZjzpAkHZrnktxw4eCmXLymoZqMoXeq-0U,50352
12
+ praisonaiagents/llm/llm.py,sha256=4QBFSi2c13qJmYqhwZS7XgiC8YL5K8lbxbUoqR9Y6xM,50481
13
13
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
14
14
  praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
15
- praisonaiagents/process/process.py,sha256=_1Nk37kOYakPaUWAJff86rP0ENyykXqMnhTp8E0efuE,30802
15
+ praisonaiagents/process/process.py,sha256=gP3QQxxFO4oUw_HYLf8MoyWyaj_104LIL_AbwLiBxaU,31261
16
16
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
17
17
  praisonaiagents/task/task.py,sha256=ikFjzNm4WPYONSLtWA3uDGNIUx_TvXTeU5SukWoC66E,14271
18
18
  praisonaiagents/tools/__init__.py,sha256=-0lV5n5cG54vYW6REjXIfuJnCLKnfQIDlXsySCaPB9s,7347
@@ -35,7 +35,7 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
35
35
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
36
36
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
37
37
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
38
- praisonaiagents-0.0.54.dist-info/METADATA,sha256=Zfzek1Y53OzW35U-lLAX5mTpdS0xxV57mCDdHhcSfYo,830
39
- praisonaiagents-0.0.54.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
- praisonaiagents-0.0.54.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
41
- praisonaiagents-0.0.54.dist-info/RECORD,,
38
+ praisonaiagents-0.0.56.dist-info/METADATA,sha256=oh3zNT8tijRBVIJGlTGOlnMXtlKgTIWvILTKj1f5W2Y,830
39
+ praisonaiagents-0.0.56.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
+ praisonaiagents-0.0.56.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
41
+ praisonaiagents-0.0.56.dist-info/RECORD,,