praisonaiagents 0.0.21__tar.gz → 0.0.23__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/PKG-INFO +1 -1
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/agent/agent.py +27 -36
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/agents/agents.py +23 -7
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents.egg-info/PKG-INFO +1 -1
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/pyproject.toml +1 -1
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/agent/agent.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/process/process.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents.egg-info/SOURCES.txt +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents.egg-info/requires.txt +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/setup.cfg +0 -0
@@ -140,7 +140,7 @@ class Agent:
|
|
140
140
|
goal: Optional[str] = None,
|
141
141
|
backstory: Optional[str] = None,
|
142
142
|
instructions: Optional[str] = None,
|
143
|
-
llm: Optional[Union[str, Any]] =
|
143
|
+
llm: Optional[Union[str, Any]] = None,
|
144
144
|
tools: Optional[List[Any]] = None,
|
145
145
|
function_calling_llm: Optional[Any] = None,
|
146
146
|
max_iter: int = 20,
|
@@ -189,7 +189,8 @@ class Agent:
|
|
189
189
|
self.self_reflect = True if self_reflect is None else self_reflect
|
190
190
|
|
191
191
|
self.instructions = instructions
|
192
|
-
|
192
|
+
# Check for model name in environment variable if not provided
|
193
|
+
self.llm = llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
|
193
194
|
self.tools = tools if tools else [] # Store original tools
|
194
195
|
self.function_calling_llm = function_calling_llm
|
195
196
|
self.max_iter = max_iter
|
@@ -214,7 +215,8 @@ class Agent:
|
|
214
215
|
self.markdown = markdown
|
215
216
|
self.max_reflect = max_reflect
|
216
217
|
self.min_reflect = min_reflect
|
217
|
-
|
218
|
+
# Use the same model selection logic for reflect_llm
|
219
|
+
self.reflect_llm = reflect_llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
|
218
220
|
self.console = Console() # Create a single console instance for the agent
|
219
221
|
|
220
222
|
# Initialize system prompt
|
@@ -392,7 +394,7 @@ Your Goal: {self.goal}
|
|
392
394
|
display_error(f"Error in chat completion: {e}")
|
393
395
|
return None
|
394
396
|
|
395
|
-
def chat(self, prompt, temperature=0.2, tools=None, output_json=None):
|
397
|
+
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
|
396
398
|
if self.use_system_prompt:
|
397
399
|
system_prompt = f"""{self.backstory}\n
|
398
400
|
Your Role: {self.role}\n
|
@@ -400,6 +402,8 @@ Your Goal: {self.goal}
|
|
400
402
|
"""
|
401
403
|
if output_json:
|
402
404
|
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_json.schema_json()}"
|
405
|
+
elif output_pydantic:
|
406
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_pydantic.schema_json()}"
|
403
407
|
else:
|
404
408
|
system_prompt = None
|
405
409
|
|
@@ -408,9 +412,9 @@ Your Goal: {self.goal}
|
|
408
412
|
messages.append({"role": "system", "content": system_prompt})
|
409
413
|
messages.extend(self.chat_history)
|
410
414
|
|
411
|
-
# Modify prompt if output_json is specified
|
415
|
+
# Modify prompt if output_json or output_pydantic is specified
|
412
416
|
original_prompt = prompt
|
413
|
-
if output_json:
|
417
|
+
if output_json or output_pydantic:
|
414
418
|
if isinstance(prompt, str):
|
415
419
|
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
416
420
|
elif isinstance(prompt, list):
|
@@ -485,23 +489,15 @@ Your Goal: {self.goal}
|
|
485
489
|
return None
|
486
490
|
response_text = response.choices[0].message.content.strip()
|
487
491
|
|
488
|
-
# Handle output_json if specified
|
489
|
-
if output_json:
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
self.chat_history.append({"role": "assistant", "content": response_text})
|
498
|
-
if self.verbose:
|
499
|
-
display_interaction(original_prompt, response_text, markdown=self.markdown,
|
500
|
-
generation_time=time.time() - start_time, console=self.console)
|
501
|
-
return parsed_model
|
502
|
-
except Exception as e:
|
503
|
-
display_error(f"Failed to parse response as {output_json.__name__}: {e}")
|
504
|
-
return None
|
492
|
+
# Handle output_json or output_pydantic if specified
|
493
|
+
if output_json or output_pydantic:
|
494
|
+
# Add to chat history and return raw response
|
495
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
496
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
497
|
+
if self.verbose:
|
498
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown,
|
499
|
+
generation_time=time.time() - start_time, console=self.console)
|
500
|
+
return response_text
|
505
501
|
|
506
502
|
if not self.self_reflect:
|
507
503
|
self.chat_history.append({"role": "user", "content": original_prompt})
|
@@ -583,19 +579,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
583
579
|
cleaned = cleaned[:-3].strip()
|
584
580
|
return cleaned
|
585
581
|
|
586
|
-
async def achat(self, prompt, temperature=0.2, tools=None, output_json=None):
|
582
|
+
async def achat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
|
587
583
|
"""Async version of chat method"""
|
588
584
|
try:
|
589
585
|
# Build system prompt
|
590
586
|
system_prompt = self.system_prompt
|
591
587
|
if output_json:
|
592
588
|
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_json.schema_json()}"
|
589
|
+
elif output_pydantic:
|
590
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_pydantic.schema_json()}"
|
593
591
|
|
594
592
|
# Build messages
|
595
593
|
if isinstance(prompt, str):
|
596
594
|
messages = [
|
597
595
|
{"role": "system", "content": system_prompt},
|
598
|
-
{"role": "user", "content": prompt + ("\nReturn ONLY a valid JSON object. No other text or explanation." if output_json else "")}
|
596
|
+
{"role": "user", "content": prompt + ("\nReturn ONLY a valid JSON object. No other text or explanation." if (output_json or output_pydantic) else "")}
|
599
597
|
]
|
600
598
|
else:
|
601
599
|
# For multimodal prompts
|
@@ -603,7 +601,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
603
601
|
{"role": "system", "content": system_prompt},
|
604
602
|
{"role": "user", "content": prompt}
|
605
603
|
]
|
606
|
-
if output_json:
|
604
|
+
if output_json or output_pydantic:
|
607
605
|
# Add JSON instruction to text content
|
608
606
|
for item in messages[-1]["content"]:
|
609
607
|
if item["type"] == "text":
|
@@ -637,22 +635,15 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
637
635
|
tools=formatted_tools
|
638
636
|
)
|
639
637
|
return await self._achat_completion(response, tools)
|
640
|
-
elif output_json:
|
638
|
+
elif output_json or output_pydantic:
|
641
639
|
response = await async_client.chat.completions.create(
|
642
640
|
model=self.llm,
|
643
641
|
messages=messages,
|
644
642
|
temperature=temperature,
|
645
643
|
response_format={"type": "json_object"}
|
646
644
|
)
|
647
|
-
|
648
|
-
|
649
|
-
cleaned_json = self.clean_json_output(result)
|
650
|
-
try:
|
651
|
-
parsed = json.loads(cleaned_json)
|
652
|
-
return output_json(**parsed)
|
653
|
-
except Exception as e:
|
654
|
-
display_error(f"Error parsing JSON response: {e}")
|
655
|
-
return None
|
645
|
+
# Return the raw response
|
646
|
+
return response.choices[0].message.content
|
656
647
|
else:
|
657
648
|
response = await async_client.chat.completions.create(
|
658
649
|
model=self.llm,
|
@@ -54,9 +54,11 @@ class PraisonAIAgents:
|
|
54
54
|
self.verbose = verbose
|
55
55
|
self.max_retries = max_retries
|
56
56
|
self.process = process
|
57
|
-
|
58
|
-
|
59
|
-
self.manager_llm = manager_llm
|
57
|
+
|
58
|
+
# Check for manager_llm in environment variable if not provided
|
59
|
+
self.manager_llm = manager_llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
|
60
|
+
if self.verbose:
|
61
|
+
logging.info(f"Using model {self.manager_llm} for manager")
|
60
62
|
|
61
63
|
# If no tasks provided, generate them from agents
|
62
64
|
if tasks is None:
|
@@ -193,10 +195,17 @@ Here are the results of previous tasks that might be useful:\n
|
|
193
195
|
|
194
196
|
agent_output = await executor_agent.achat(
|
195
197
|
_get_multimodal_message(task_prompt, task.images),
|
196
|
-
tools=task.tools
|
198
|
+
tools=task.tools,
|
199
|
+
output_json=task.output_json,
|
200
|
+
output_pydantic=task.output_pydantic
|
197
201
|
)
|
198
202
|
else:
|
199
|
-
agent_output = await executor_agent.achat(
|
203
|
+
agent_output = await executor_agent.achat(
|
204
|
+
task_prompt,
|
205
|
+
tools=task.tools,
|
206
|
+
output_json=task.output_json,
|
207
|
+
output_pydantic=task.output_pydantic
|
208
|
+
)
|
200
209
|
|
201
210
|
if agent_output:
|
202
211
|
task_output = TaskOutput(
|
@@ -403,10 +412,17 @@ Here are the results of previous tasks that might be useful:\n
|
|
403
412
|
|
404
413
|
agent_output = executor_agent.chat(
|
405
414
|
_get_multimodal_message(task_prompt, task.images),
|
406
|
-
tools=task.tools
|
415
|
+
tools=task.tools,
|
416
|
+
output_json=task.output_json,
|
417
|
+
output_pydantic=task.output_pydantic
|
407
418
|
)
|
408
419
|
else:
|
409
|
-
agent_output = executor_agent.chat(
|
420
|
+
agent_output = executor_agent.chat(
|
421
|
+
task_prompt,
|
422
|
+
tools=task.tools,
|
423
|
+
output_json=task.output_json,
|
424
|
+
output_pydantic=task.output_pydantic
|
425
|
+
)
|
410
426
|
|
411
427
|
if agent_output:
|
412
428
|
task_output = TaskOutput(
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/main.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.21 → praisonaiagents-0.0.23}/praisonaiagents.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|