praisonaiagents 0.0.49__py3-none-any.whl → 0.0.51__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,10 +21,137 @@ from ..main import (
21
21
  )
22
22
  import inspect
23
23
  import uuid
24
+ from dataclasses import dataclass
24
25
 
25
26
  if TYPE_CHECKING:
26
27
  from ..task.task import Task
27
28
 
29
+ @dataclass
30
+ class ChatCompletionMessage:
31
+ content: str
32
+ role: str = "assistant"
33
+ refusal: Optional[str] = None
34
+ audio: Optional[str] = None
35
+ function_call: Optional[dict] = None
36
+ tool_calls: Optional[List] = None
37
+ reasoning_content: Optional[str] = None
38
+
39
+ @dataclass
40
+ class Choice:
41
+ finish_reason: Optional[str]
42
+ index: int
43
+ message: ChatCompletionMessage
44
+ logprobs: Optional[dict] = None
45
+
46
+ @dataclass
47
+ class CompletionTokensDetails:
48
+ accepted_prediction_tokens: Optional[int] = None
49
+ audio_tokens: Optional[int] = None
50
+ reasoning_tokens: Optional[int] = None
51
+ rejected_prediction_tokens: Optional[int] = None
52
+
53
+ @dataclass
54
+ class PromptTokensDetails:
55
+ audio_tokens: Optional[int] = None
56
+ cached_tokens: int = 0
57
+
58
+ @dataclass
59
+ class CompletionUsage:
60
+ completion_tokens: int = 0
61
+ prompt_tokens: int = 0
62
+ total_tokens: int = 0
63
+ completion_tokens_details: Optional[CompletionTokensDetails] = None
64
+ prompt_tokens_details: Optional[PromptTokensDetails] = None
65
+ prompt_cache_hit_tokens: int = 0
66
+ prompt_cache_miss_tokens: int = 0
67
+
68
+ @dataclass
69
+ class ChatCompletion:
70
+ id: str
71
+ choices: List[Choice]
72
+ created: int
73
+ model: str
74
+ object: str = "chat.completion"
75
+ system_fingerprint: Optional[str] = None
76
+ service_tier: Optional[str] = None
77
+ usage: Optional[CompletionUsage] = None
78
+
79
+ def process_stream_chunks(chunks):
80
+ """Process streaming chunks into combined response"""
81
+ if not chunks:
82
+ return None
83
+
84
+ try:
85
+ first_chunk = chunks[0]
86
+ last_chunk = chunks[-1]
87
+
88
+ # Basic metadata
89
+ id = getattr(first_chunk, "id", None)
90
+ created = getattr(first_chunk, "created", None)
91
+ model = getattr(first_chunk, "model", None)
92
+ system_fingerprint = getattr(first_chunk, "system_fingerprint", None)
93
+
94
+ # Track usage
95
+ completion_tokens = 0
96
+ prompt_tokens = 0
97
+
98
+ content_list = []
99
+ reasoning_list = []
100
+
101
+ for chunk in chunks:
102
+ if not hasattr(chunk, "choices") or not chunk.choices:
103
+ continue
104
+
105
+ # Track usage from each chunk
106
+ if hasattr(chunk, "usage"):
107
+ completion_tokens += getattr(chunk.usage, "completion_tokens", 0)
108
+ prompt_tokens += getattr(chunk.usage, "prompt_tokens", 0)
109
+
110
+ delta = getattr(chunk.choices[0], "delta", None)
111
+ if not delta:
112
+ continue
113
+
114
+ if hasattr(delta, "content") and delta.content:
115
+ content_list.append(delta.content)
116
+ if hasattr(delta, "reasoning_content") and delta.reasoning_content:
117
+ reasoning_list.append(delta.reasoning_content)
118
+
119
+ combined_content = "".join(content_list) if content_list else ""
120
+ combined_reasoning = "".join(reasoning_list) if reasoning_list else None
121
+ finish_reason = getattr(last_chunk.choices[0], "finish_reason", None) if hasattr(last_chunk, "choices") and last_chunk.choices else None
122
+
123
+ message = ChatCompletionMessage(
124
+ content=combined_content,
125
+ reasoning_content=combined_reasoning
126
+ )
127
+
128
+ choice = Choice(
129
+ finish_reason=finish_reason,
130
+ index=0,
131
+ message=message
132
+ )
133
+
134
+ usage = CompletionUsage(
135
+ completion_tokens=completion_tokens,
136
+ prompt_tokens=prompt_tokens,
137
+ total_tokens=completion_tokens + prompt_tokens,
138
+ completion_tokens_details=CompletionTokensDetails(),
139
+ prompt_tokens_details=PromptTokensDetails()
140
+ )
141
+
142
+ return ChatCompletion(
143
+ id=id,
144
+ choices=[choice],
145
+ created=created,
146
+ model=model,
147
+ system_fingerprint=system_fingerprint,
148
+ usage=usage
149
+ )
150
+
151
+ except Exception as e:
152
+ print(f"Error processing chunks: {e}")
153
+ return None
154
+
28
155
  class Agent:
29
156
  def _generate_tool_definition(self, function_name):
30
157
  """
@@ -297,7 +424,7 @@ Your Goal: {self.goal}
297
424
  self.agent_id = str(uuid.uuid4())
298
425
 
299
426
  # Store user_id
300
- self.user_id = user_id
427
+ self.user_id = user_id or "praison"
301
428
 
302
429
  # Check if knowledge parameter has any values
303
430
  if not knowledge:
@@ -399,7 +526,7 @@ Your Goal: {self.goal}
399
526
  def __str__(self):
400
527
  return f"Agent(name='{self.name}', role='{self.role}', goal='{self.goal}')"
401
528
 
402
- def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True):
529
+ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, show_reasoning=False):
403
530
  start_time = time.time()
404
531
  logging.debug(f"{self.name} sending messages to LLM: {messages}")
405
532
 
@@ -469,30 +596,35 @@ Your Goal: {self.goal}
469
596
  stream=True
470
597
  )
471
598
  full_response_text = ""
599
+ reasoning_content = ""
600
+ chunks = []
472
601
 
473
602
  # Create Live display with proper configuration
474
603
  with Live(
475
604
  display_generating("", start_time),
476
605
  console=self.console,
477
606
  refresh_per_second=4,
478
- transient=True, # Changed to False to preserve output
607
+ transient=True,
479
608
  vertical_overflow="ellipsis",
480
609
  auto_refresh=True
481
610
  ) as live:
482
611
  for chunk in response_stream:
612
+ chunks.append(chunk)
483
613
  if chunk.choices[0].delta.content:
484
614
  full_response_text += chunk.choices[0].delta.content
485
615
  live.update(display_generating(full_response_text, start_time))
616
+
617
+ # Update live display with reasoning content if enabled
618
+ if show_reasoning and hasattr(chunk.choices[0].delta, "reasoning_content"):
619
+ rc = chunk.choices[0].delta.reasoning_content
620
+ if rc:
621
+ reasoning_content += rc
622
+ live.update(display_generating(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
486
623
 
487
624
  # Clear the last generating display with a blank line
488
625
  self.console.print()
489
626
 
490
- final_response = client.chat.completions.create(
491
- model=self.llm,
492
- messages=messages,
493
- temperature=temperature,
494
- stream=False
495
- )
627
+ final_response = process_stream_chunks(chunks)
496
628
  return final_response
497
629
  else:
498
630
  if tool_calls:
@@ -510,7 +642,7 @@ Your Goal: {self.goal}
510
642
  display_error(f"Error in chat completion: {e}")
511
643
  return None
512
644
 
513
- def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
645
+ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, show_reasoning=False):
514
646
  # Search for existing knowledge if any knowledge is provided
515
647
  if self.knowledge:
516
648
  search_results = self.knowledge.search(prompt, agent_id=self.agent_id)
@@ -616,7 +748,7 @@ Your Goal: {self.goal}
616
748
  agent_tools=agent_tools
617
749
  )
618
750
 
619
- response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None)
751
+ response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, show_reasoning=show_reasoning)
620
752
  if not response:
621
753
  return None
622
754
 
@@ -749,8 +881,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
749
881
  cleaned = cleaned[:-3].strip()
750
882
  return cleaned
751
883
 
752
- async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
753
- """Async version of chat method"""
884
+ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, show_reasoning=False):
885
+ """Async version of chat method. TODO: Requires Syncing with chat method."""
754
886
  try:
755
887
  # Search for existing knowledge if any knowledge is provided
756
888
  if self.knowledge:
@@ -781,7 +913,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
781
913
  agent_name=self.name,
782
914
  agent_role=self.role,
783
915
  agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
784
- execute_tool_fn=self.execute_tool_async
916
+ execute_tool_fn=self.execute_tool_async,
917
+ show_reasoning=show_reasoning
785
918
  )
786
919
 
787
920
  self.chat_history.append({"role": "user", "content": prompt})
@@ -896,7 +1029,7 @@ Your Goal: {self.goal}
896
1029
  display_error(f"Error in achat: {e}")
897
1030
  return None
898
1031
 
899
- async def _achat_completion(self, response, tools):
1032
+ async def _achat_completion(self, response, tools, show_reasoning=False):
900
1033
  """Async version of _chat_completion method"""
901
1034
  try:
902
1035
  message = response.choices[0].message
@@ -942,9 +1075,39 @@ Your Goal: {self.goal}
942
1075
  final_response = await async_client.chat.completions.create(
943
1076
  model=self.llm,
944
1077
  messages=messages,
945
- temperature=0.2
1078
+ temperature=0.2,
1079
+ stream=True
946
1080
  )
947
- return final_response.choices[0].message.content
1081
+ full_response_text = ""
1082
+ reasoning_content = ""
1083
+ chunks = []
1084
+ start_time = time.time()
1085
+
1086
+ with Live(
1087
+ display_generating("", start_time),
1088
+ console=self.console,
1089
+ refresh_per_second=4,
1090
+ transient=True,
1091
+ vertical_overflow="ellipsis",
1092
+ auto_refresh=True
1093
+ ) as live:
1094
+ async for chunk in final_response:
1095
+ chunks.append(chunk)
1096
+ if chunk.choices[0].delta.content:
1097
+ full_response_text += chunk.choices[0].delta.content
1098
+ live.update(display_generating(full_response_text, start_time))
1099
+
1100
+ if show_reasoning and hasattr(chunk.choices[0].delta, "reasoning_content"):
1101
+ rc = chunk.choices[0].delta.reasoning_content
1102
+ if rc:
1103
+ reasoning_content += rc
1104
+ live.update(display_generating(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
1105
+
1106
+ self.console.print()
1107
+
1108
+ final_response = process_stream_chunks(chunks)
1109
+ return final_response.choices[0].message.content if final_response else full_response_text
1110
+
948
1111
  except Exception as e:
949
1112
  display_error(f"Error in final chat completion: {e}")
950
1113
  return formatted_results
@@ -952,7 +1115,7 @@ Your Goal: {self.goal}
952
1115
  return None
953
1116
  except Exception as e:
954
1117
  display_error(f"Error in _achat_completion: {e}")
955
- return None
1118
+ return None
956
1119
 
957
1120
  async def astart(self, prompt: str, **kwargs):
958
1121
  """Async version of start method"""
praisonaiagents/main.py CHANGED
@@ -353,7 +353,7 @@ class ReflectionOutput(BaseModel):
353
353
  reflection: str
354
354
  satisfactory: Literal["yes", "no"]
355
355
 
356
- client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
356
+ client = OpenAI(api_key=(os.environ["OPENAI_API_KEY"] if os.environ.get("OPENAI_API_KEY") else "xxxx"))
357
357
 
358
358
  class TaskOutput(BaseModel):
359
359
  description: str
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: praisonaiagents
3
- Version: 0.0.49
3
+ Version: 0.0.51
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,7 +1,7 @@
1
1
  praisonaiagents/__init__.py,sha256=JtPibbmeFv3meIb3vkKjckB0p7m-Vqt2RYPwOH8P41k,1228
2
- praisonaiagents/main.py,sha256=wcu0gVfGfY43ROBD2iSNPdVrRDh2wF4grq7zor_9_14,14382
2
+ praisonaiagents/main.py,sha256=0kB9gn9meXtr4EIrdgA2lAioKIHCRJ61audsGDwuTm4,14428
3
3
  praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=IuWrshHW66K5kQLeJGGSvN9zSbvsTA_j0iEbeWk2gB0,47260
4
+ praisonaiagents/agent/agent.py,sha256=5_IEyIrlQN75a9zJeZdWRMTvuMNNIofwttzo2KF9lyM,53584
5
5
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
6
6
  praisonaiagents/agents/agents.py,sha256=PRqBEUqRadVLBoDd-tgne5fVB87bR6P9qOgvDdjS-dY,37028
7
7
  praisonaiagents/agents/autoagents.py,sha256=bjC2O5oZmoJItJXIMPTWc2lsp_AJC9tMiTQOal2hwPA,13532
@@ -35,7 +35,7 @@ praisonaiagents/tools/wikipedia_tools.py,sha256=pGko-f33wqXgxJTv8db7TbizY5XnzBQR
35
35
  praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxNMMs1A,17122
36
36
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
37
37
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
38
- praisonaiagents-0.0.49.dist-info/METADATA,sha256=GZR-GRcVn8qYBMTDmz19wuM-AdZ6Uj3WQbWMxil4tO0,830
39
- praisonaiagents-0.0.49.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
- praisonaiagents-0.0.49.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
41
- praisonaiagents-0.0.49.dist-info/RECORD,,
38
+ praisonaiagents-0.0.51.dist-info/METADATA,sha256=W6SXNFt4RaQFloUFqjzpEFAPMmucqWM_OEtoU4hBFAE,830
39
+ praisonaiagents-0.0.51.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
40
+ praisonaiagents-0.0.51.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
41
+ praisonaiagents-0.0.51.dist-info/RECORD,,