praisonaiagents 0.0.7__py3-none-any.whl → 0.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -102,7 +102,7 @@ class Agent:
102
102
  role: str,
103
103
  goal: str,
104
104
  backstory: str,
105
- llm: Optional[Union[str, Any]] = "gpt-4o-mini",
105
+ llm: Optional[Union[str, Any]] = "gpt-4o",
106
106
  tools: Optional[List[Any]] = None,
107
107
  function_calling_llm: Optional[Any] = None,
108
108
  max_iter: int = 20,
@@ -125,7 +125,9 @@ class Agent:
125
125
  use_system_prompt: Optional[bool] = True,
126
126
  markdown: bool = True,
127
127
  self_reflect: bool = True,
128
- max_reflection_iter: int = 3
128
+ max_reflect: int = 3,
129
+ min_reflect: int = 1,
130
+ reflect_llm: Optional[str] = None
129
131
  ):
130
132
  self.name = name
131
133
  self.role = role
@@ -155,8 +157,9 @@ class Agent:
155
157
  self.chat_history = []
156
158
  self.markdown = markdown
157
159
  self.self_reflect = self_reflect
158
- self.max_reflection_iter = max_reflection_iter
159
-
160
+ self.max_reflect = max_reflect
161
+ self.min_reflect = min_reflect
162
+ self.reflect_llm = reflect_llm
160
163
  def execute_tool(self, function_name, arguments):
161
164
  """
162
165
  Execute a tool dynamically based on the function name and arguments.
@@ -371,7 +374,7 @@ class Agent:
371
374
 
372
375
  try:
373
376
  reflection_response = client.beta.chat.completions.parse(
374
- model=self.llm,
377
+ model=self.reflect_llm if self.reflect_llm else self.llm,
375
378
  messages=messages,
376
379
  temperature=temperature,
377
380
  response_format=ReflectionOutput
@@ -380,35 +383,42 @@ class Agent:
380
383
  reflection_output = reflection_response.choices[0].message.parsed
381
384
 
382
385
  if self.verbose:
383
- display_self_reflection(f"Agent {self.name} self reflection: reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'")
386
+ display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'")
384
387
 
385
388
  messages.append({"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"})
386
389
 
387
- if reflection_output.satisfactory == "yes":
390
+ # Only consider satisfactory after minimum reflections
391
+ if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
388
392
  if self.verbose:
389
- display_self_reflection("Agent marked the response as satisfactory")
393
+ display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections")
394
+ self.chat_history.append({"role": "user", "content": prompt})
390
395
  self.chat_history.append({"role": "assistant", "content": response_text})
391
396
  display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
392
397
  return response_text
393
398
 
394
- logging.debug(f"{self.name} reflection not satisfactory, requesting regeneration.")
399
+ # Check if we've hit max reflections
400
+ if reflection_count >= self.max_reflect - 1:
401
+ if self.verbose:
402
+ display_self_reflection("Maximum reflection count reached, returning current response")
403
+ self.chat_history.append({"role": "user", "content": prompt})
404
+ self.chat_history.append({"role": "assistant", "content": response_text})
405
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
406
+ return response_text
407
+
408
+ logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
395
409
  messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
396
410
  response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True)
397
411
  response_text = response.choices[0].message.content.strip()
412
+ reflection_count += 1
413
+ continue # Continue the loop for more reflections
414
+
398
415
  except Exception as e:
399
416
  display_error(f"Error in parsing self-reflection json {e}. Retrying")
400
417
  logging.error("Reflection parsing failed.", exc_info=True)
401
418
  messages.append({"role": "assistant", "content": f"Self Reflection failed."})
419
+ reflection_count += 1
420
+ continue # Continue even after error to try again
402
421
 
403
- reflection_count += 1
404
-
405
- self.chat_history.append({"role": "user", "content": prompt})
406
- self.chat_history.append({"role": "assistant", "content": response_text})
407
-
408
- if self.verbose:
409
- logging.info(f"Agent {self.name} final response: {response_text}")
410
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
411
- return response_text
412
422
  except Exception as e:
413
423
  display_error(f"Error in chat: {e}")
414
424
  return None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: praisonaiagents
3
- Version: 0.0.7
3
+ Version: 0.0.8
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -1,7 +1,7 @@
1
1
  praisonaiagents/__init__.py,sha256=gI8vEabBTRPsE_E8GA5sBMi4sTtJI-YokPrH2Nor-k0,741
2
2
  praisonaiagents/main.py,sha256=zDhN5KKtKbfruolDNxlyJkcFlkSt4KQkQTDRfQVAhxc,3960
3
3
  praisonaiagents/agent/__init__.py,sha256=sKO8wGEXvtCrvV1e834r1Okv0XAqAxqZCqz6hKLiTvA,79
4
- praisonaiagents/agent/agent.py,sha256=D51Ia4jGxG-eLWaztQI03vyx_KrcK46p3eEkj8L0GNU,17381
4
+ praisonaiagents/agent/agent.py,sha256=ov7WrSpgsbjqLt2yJVEmqVyVMPJqBgwkUTk1tim0dGg,18241
5
5
  praisonaiagents/agents/__init__.py,sha256=7RDeQNSqZg5uBjD4M_0p_F6YgfWuDuxPFydPU50kDYc,120
6
6
  praisonaiagents/agents/agents.py,sha256=NkosnTo41bB9H0lYt_YQIHwaRyW2Bcp_4KKpYWeaFk0,13696
7
7
  praisonaiagents/build/lib/praisonaiagents/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
@@ -14,7 +14,7 @@ praisonaiagents/build/lib/praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb3
14
14
  praisonaiagents/build/lib/praisonaiagents/task/task.py,sha256=4Y1qX8OeEFcid2yhAiPYylvHpuDmWORsyNL16_BiVvI,1831
15
15
  praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
16
16
  praisonaiagents/task/task.py,sha256=4Y1qX8OeEFcid2yhAiPYylvHpuDmWORsyNL16_BiVvI,1831
17
- praisonaiagents-0.0.7.dist-info/METADATA,sha256=2b0TkGxlgNiEyFHjeqJhKe9Uph4unxb1QqVlJhFY6js,232
18
- praisonaiagents-0.0.7.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
19
- praisonaiagents-0.0.7.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
20
- praisonaiagents-0.0.7.dist-info/RECORD,,
17
+ praisonaiagents-0.0.8.dist-info/METADATA,sha256=wIiXOiCtBxNdqhBruAc7ECloBsy_SIW-K33C1acNfOI,232
18
+ praisonaiagents-0.0.8.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
19
+ praisonaiagents-0.0.8.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
20
+ praisonaiagents-0.0.8.dist-info/RECORD,,