praisonaiagents 0.0.22__tar.gz → 0.0.23__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/PKG-INFO +1 -1
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/agent/agent.py +22 -33
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/agents/agents.py +18 -4
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents.egg-info/PKG-INFO +1 -1
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/pyproject.toml +1 -1
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/agent/agent.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/process/process.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents.egg-info/SOURCES.txt +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents.egg-info/requires.txt +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/setup.cfg +0 -0
@@ -394,7 +394,7 @@ Your Goal: {self.goal}
|
|
394
394
|
display_error(f"Error in chat completion: {e}")
|
395
395
|
return None
|
396
396
|
|
397
|
-
def chat(self, prompt, temperature=0.2, tools=None, output_json=None):
|
397
|
+
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
|
398
398
|
if self.use_system_prompt:
|
399
399
|
system_prompt = f"""{self.backstory}\n
|
400
400
|
Your Role: {self.role}\n
|
@@ -402,6 +402,8 @@ Your Goal: {self.goal}
|
|
402
402
|
"""
|
403
403
|
if output_json:
|
404
404
|
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_json.schema_json()}"
|
405
|
+
elif output_pydantic:
|
406
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_pydantic.schema_json()}"
|
405
407
|
else:
|
406
408
|
system_prompt = None
|
407
409
|
|
@@ -410,9 +412,9 @@ Your Goal: {self.goal}
|
|
410
412
|
messages.append({"role": "system", "content": system_prompt})
|
411
413
|
messages.extend(self.chat_history)
|
412
414
|
|
413
|
-
# Modify prompt if output_json is specified
|
415
|
+
# Modify prompt if output_json or output_pydantic is specified
|
414
416
|
original_prompt = prompt
|
415
|
-
if output_json:
|
417
|
+
if output_json or output_pydantic:
|
416
418
|
if isinstance(prompt, str):
|
417
419
|
prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
|
418
420
|
elif isinstance(prompt, list):
|
@@ -487,23 +489,15 @@ Your Goal: {self.goal}
|
|
487
489
|
return None
|
488
490
|
response_text = response.choices[0].message.content.strip()
|
489
491
|
|
490
|
-
# Handle output_json if specified
|
491
|
-
if output_json:
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
self.chat_history.append({"role": "assistant", "content": response_text})
|
500
|
-
if self.verbose:
|
501
|
-
display_interaction(original_prompt, response_text, markdown=self.markdown,
|
502
|
-
generation_time=time.time() - start_time, console=self.console)
|
503
|
-
return parsed_model
|
504
|
-
except Exception as e:
|
505
|
-
display_error(f"Failed to parse response as {output_json.__name__}: {e}")
|
506
|
-
return None
|
492
|
+
# Handle output_json or output_pydantic if specified
|
493
|
+
if output_json or output_pydantic:
|
494
|
+
# Add to chat history and return raw response
|
495
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
496
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
497
|
+
if self.verbose:
|
498
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown,
|
499
|
+
generation_time=time.time() - start_time, console=self.console)
|
500
|
+
return response_text
|
507
501
|
|
508
502
|
if not self.self_reflect:
|
509
503
|
self.chat_history.append({"role": "user", "content": original_prompt})
|
@@ -585,19 +579,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
585
579
|
cleaned = cleaned[:-3].strip()
|
586
580
|
return cleaned
|
587
581
|
|
588
|
-
async def achat(self, prompt, temperature=0.2, tools=None, output_json=None):
|
582
|
+
async def achat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None):
|
589
583
|
"""Async version of chat method"""
|
590
584
|
try:
|
591
585
|
# Build system prompt
|
592
586
|
system_prompt = self.system_prompt
|
593
587
|
if output_json:
|
594
588
|
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_json.schema_json()}"
|
589
|
+
elif output_pydantic:
|
590
|
+
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {output_pydantic.schema_json()}"
|
595
591
|
|
596
592
|
# Build messages
|
597
593
|
if isinstance(prompt, str):
|
598
594
|
messages = [
|
599
595
|
{"role": "system", "content": system_prompt},
|
600
|
-
{"role": "user", "content": prompt + ("\nReturn ONLY a valid JSON object. No other text or explanation." if output_json else "")}
|
596
|
+
{"role": "user", "content": prompt + ("\nReturn ONLY a valid JSON object. No other text or explanation." if (output_json or output_pydantic) else "")}
|
601
597
|
]
|
602
598
|
else:
|
603
599
|
# For multimodal prompts
|
@@ -605,7 +601,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
605
601
|
{"role": "system", "content": system_prompt},
|
606
602
|
{"role": "user", "content": prompt}
|
607
603
|
]
|
608
|
-
if output_json:
|
604
|
+
if output_json or output_pydantic:
|
609
605
|
# Add JSON instruction to text content
|
610
606
|
for item in messages[-1]["content"]:
|
611
607
|
if item["type"] == "text":
|
@@ -639,22 +635,15 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
639
635
|
tools=formatted_tools
|
640
636
|
)
|
641
637
|
return await self._achat_completion(response, tools)
|
642
|
-
elif output_json:
|
638
|
+
elif output_json or output_pydantic:
|
643
639
|
response = await async_client.chat.completions.create(
|
644
640
|
model=self.llm,
|
645
641
|
messages=messages,
|
646
642
|
temperature=temperature,
|
647
643
|
response_format={"type": "json_object"}
|
648
644
|
)
|
649
|
-
|
650
|
-
|
651
|
-
cleaned_json = self.clean_json_output(result)
|
652
|
-
try:
|
653
|
-
parsed = json.loads(cleaned_json)
|
654
|
-
return output_json(**parsed)
|
655
|
-
except Exception as e:
|
656
|
-
display_error(f"Error parsing JSON response: {e}")
|
657
|
-
return None
|
645
|
+
# Return the raw response
|
646
|
+
return response.choices[0].message.content
|
658
647
|
else:
|
659
648
|
response = await async_client.chat.completions.create(
|
660
649
|
model=self.llm,
|
@@ -195,10 +195,17 @@ Here are the results of previous tasks that might be useful:\n
|
|
195
195
|
|
196
196
|
agent_output = await executor_agent.achat(
|
197
197
|
_get_multimodal_message(task_prompt, task.images),
|
198
|
-
tools=task.tools
|
198
|
+
tools=task.tools,
|
199
|
+
output_json=task.output_json,
|
200
|
+
output_pydantic=task.output_pydantic
|
199
201
|
)
|
200
202
|
else:
|
201
|
-
agent_output = await executor_agent.achat(
|
203
|
+
agent_output = await executor_agent.achat(
|
204
|
+
task_prompt,
|
205
|
+
tools=task.tools,
|
206
|
+
output_json=task.output_json,
|
207
|
+
output_pydantic=task.output_pydantic
|
208
|
+
)
|
202
209
|
|
203
210
|
if agent_output:
|
204
211
|
task_output = TaskOutput(
|
@@ -405,10 +412,17 @@ Here are the results of previous tasks that might be useful:\n
|
|
405
412
|
|
406
413
|
agent_output = executor_agent.chat(
|
407
414
|
_get_multimodal_message(task_prompt, task.images),
|
408
|
-
tools=task.tools
|
415
|
+
tools=task.tools,
|
416
|
+
output_json=task.output_json,
|
417
|
+
output_pydantic=task.output_pydantic
|
409
418
|
)
|
410
419
|
else:
|
411
|
-
agent_output = executor_agent.chat(
|
420
|
+
agent_output = executor_agent.chat(
|
421
|
+
task_prompt,
|
422
|
+
tools=task.tools,
|
423
|
+
output_json=task.output_json,
|
424
|
+
output_pydantic=task.output_pydantic
|
425
|
+
)
|
412
426
|
|
413
427
|
if agent_output:
|
414
428
|
task_output = TaskOutput(
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents/build/lib/praisonaiagents/main.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.22 → praisonaiagents-0.0.23}/praisonaiagents.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|