praisonaiagents 0.0.116__py3-none-any.whl → 0.0.117__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +109 -10
- praisonaiagents/agents/agents.py +1 -1
- praisonaiagents/agents/autoagents.py +13 -1
- praisonaiagents/main.py +0 -19
- praisonaiagents/process/process.py +4 -2
- {praisonaiagents-0.0.116.dist-info → praisonaiagents-0.0.117.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.116.dist-info → praisonaiagents-0.0.117.dist-info}/RECORD +9 -9
- {praisonaiagents-0.0.116.dist-info → praisonaiagents-0.0.117.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.116.dist-info → praisonaiagents-0.0.117.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -354,8 +354,10 @@ class Agent:
|
|
354
354
|
# Check for model name in environment variable if not provided
|
355
355
|
self._using_custom_llm = False
|
356
356
|
|
357
|
-
#
|
358
|
-
self.
|
357
|
+
# Store OpenAI client parameters for lazy initialization
|
358
|
+
self._openai_api_key = api_key
|
359
|
+
self._openai_base_url = base_url
|
360
|
+
self.__openai_client = None
|
359
361
|
|
360
362
|
# If base_url is provided, always create a custom LLM instance
|
361
363
|
if base_url:
|
@@ -488,6 +490,42 @@ Your Goal: {self.goal}
|
|
488
490
|
for source in knowledge:
|
489
491
|
self._process_knowledge(source)
|
490
492
|
|
493
|
+
@property
|
494
|
+
def _openai_client(self):
|
495
|
+
"""Lazily initialize OpenAI client only when needed."""
|
496
|
+
if self.__openai_client is None:
|
497
|
+
try:
|
498
|
+
self.__openai_client = get_openai_client(
|
499
|
+
api_key=self._openai_api_key,
|
500
|
+
base_url=self._openai_base_url
|
501
|
+
)
|
502
|
+
except ValueError as e:
|
503
|
+
# If we're using a custom LLM, we might not need the OpenAI client
|
504
|
+
# Return None and let the calling code handle it
|
505
|
+
if self._using_custom_llm:
|
506
|
+
return None
|
507
|
+
else:
|
508
|
+
raise e
|
509
|
+
return self.__openai_client
|
510
|
+
|
511
|
+
@property
|
512
|
+
def llm_model(self):
|
513
|
+
"""Unified property to get the LLM model regardless of configuration type.
|
514
|
+
|
515
|
+
Returns:
|
516
|
+
The LLM model/instance being used by this agent.
|
517
|
+
- For standard models: returns the model string (e.g., "gpt-4o")
|
518
|
+
- For custom LLM instances: returns the LLM instance object
|
519
|
+
- For provider models: returns the LLM instance object
|
520
|
+
"""
|
521
|
+
if hasattr(self, 'llm_instance') and self.llm_instance:
|
522
|
+
return self.llm_instance
|
523
|
+
elif hasattr(self, 'llm') and self.llm:
|
524
|
+
return self.llm
|
525
|
+
else:
|
526
|
+
# Default fallback
|
527
|
+
return "gpt-4o"
|
528
|
+
|
491
529
|
def _process_knowledge(self, knowledge_item):
|
492
530
|
"""Process and store knowledge from a file path, URL, or string."""
|
493
531
|
try:
|
@@ -694,14 +732,39 @@ Your Role: {self.role}\n
|
|
694
732
|
Your Goal: {self.goal}
|
695
733
|
"""
|
696
734
|
|
697
|
-
# Use openai_client's build_messages method
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
735
|
+
# Use openai_client's build_messages method if available
|
736
|
+
if self._openai_client is not None:
|
737
|
+
messages, original_prompt = self._openai_client.build_messages(
|
738
|
+
prompt=prompt,
|
739
|
+
system_prompt=system_prompt,
|
740
|
+
chat_history=self.chat_history,
|
741
|
+
output_json=output_json,
|
742
|
+
output_pydantic=output_pydantic
|
743
|
+
)
|
744
|
+
else:
|
745
|
+
# Fallback implementation for when OpenAI client is not available
|
746
|
+
messages = []
|
747
|
+
|
748
|
+
# Add system message if provided
|
749
|
+
if system_prompt:
|
750
|
+
messages.append({"role": "system", "content": system_prompt})
|
751
|
+
|
752
|
+
# Add chat history
|
753
|
+
messages.extend(self.chat_history)
|
754
|
+
|
755
|
+
# Add user prompt
|
756
|
+
if isinstance(prompt, list):
|
757
|
+
messages.extend(prompt)
|
758
|
+
original_prompt = prompt
|
759
|
+
else:
|
760
|
+
messages.append({"role": "user", "content": str(prompt)})
|
761
|
+
original_prompt = str(prompt)
|
762
|
+
|
763
|
+
# Add JSON format instruction if needed
|
764
|
+
if output_json or output_pydantic:
|
765
|
+
model = output_pydantic or output_json
|
766
|
+
json_instruction = f"\nPlease respond with valid JSON matching this schema: {model.model_json_schema()}"
|
767
|
+
messages[-1]["content"] += json_instruction
|
705
768
|
|
706
769
|
return messages, original_prompt
|
707
770
|
|
@@ -943,6 +1006,9 @@ Your Goal: {self.goal}
|
|
943
1006
|
|
944
1007
|
def _process_stream_response(self, messages, temperature, start_time, formatted_tools=None, reasoning_steps=False):
|
945
1008
|
"""Process streaming response and return final response"""
|
1009
|
+
if self._openai_client is None:
|
1010
|
+
raise ValueError("OpenAI client is not initialized. Please provide OPENAI_API_KEY or use a custom LLM provider.")
|
1011
|
+
|
946
1012
|
return self._openai_client.process_stream_response(
|
947
1013
|
messages=messages,
|
948
1014
|
model=self.llm,
|
@@ -1009,6 +1075,9 @@ Your Goal: {self.goal}
|
|
1009
1075
|
|
1010
1076
|
# Note: openai_client expects tools in various formats and will format them internally
|
1011
1077
|
# But since we already have formatted_tools, we can pass them directly
|
1078
|
+
if self._openai_client is None:
|
1079
|
+
raise ValueError("OpenAI client is not initialized. Please provide OPENAI_API_KEY or use a custom LLM provider.")
|
1080
|
+
|
1012
1081
|
final_response = self._openai_client.chat_completion_with_tools(
|
1013
1082
|
messages=messages,
|
1014
1083
|
model=self.llm,
|
@@ -1202,6 +1271,17 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1202
1271
|
messages.append({"role": "user", "content": reflection_prompt})
|
1203
1272
|
|
1204
1273
|
try:
|
1274
|
+
# Check if OpenAI client is available
|
1275
|
+
if self._openai_client is None:
|
1276
|
+
# For custom LLMs, self-reflection with structured output is not supported
|
1277
|
+
if self.verbose:
|
1278
|
+
display_self_reflection(f"Agent {self.name}: Self-reflection with structured output is not supported for custom LLM providers. Skipping reflection.", console=self.console)
|
1279
|
+
# Return the original response without reflection
|
1280
|
+
self.chat_history.append({"role": "user", "content": prompt})
|
1281
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
1282
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1283
|
+
return response_text
|
1284
|
+
|
1205
1285
|
reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
|
1206
1286
|
model=self.reflect_llm if self.reflect_llm else self.llm,
|
1207
1287
|
messages=messages,
|
@@ -1388,6 +1468,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1388
1468
|
|
1389
1469
|
# Use the new _format_tools_for_completion helper method
|
1390
1470
|
formatted_tools = self._format_tools_for_completion(tools)
|
1471
|
+
|
1472
|
+
# Check if OpenAI client is available
|
1473
|
+
if self._openai_client is None:
|
1474
|
+
error_msg = "OpenAI client is not initialized. Please provide OPENAI_API_KEY or use a custom LLM provider."
|
1475
|
+
display_error(error_msg)
|
1476
|
+
return None
|
1391
1477
|
|
1392
1478
|
# Make the API call based on the type of request
|
1393
1479
|
if tools:
|
@@ -1442,6 +1528,19 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1442
1528
|
]
|
1443
1529
|
|
1444
1530
|
try:
|
1531
|
+
# Check if OpenAI client is available for self-reflection
|
1532
|
+
if self._openai_client is None:
|
1533
|
+
# For custom LLMs, self-reflection with structured output is not supported
|
1534
|
+
if self.verbose:
|
1535
|
+
display_self_reflection(f"Agent {self.name}: Self-reflection with structured output is not supported for custom LLM providers. Skipping reflection.", console=self.console)
|
1536
|
+
# Return the original response without reflection
|
1537
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
1538
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
1539
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1540
|
+
total_time = time.time() - start_time
|
1541
|
+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1542
|
+
return response_text
|
1543
|
+
|
1445
1544
|
reflection_response = await self._openai_client.async_client.beta.chat.completions.parse(
|
1446
1545
|
model=self.reflect_llm if self.reflect_llm else self.llm,
|
1447
1546
|
messages=reflection_messages,
|
praisonaiagents/agents/agents.py
CHANGED
@@ -7,7 +7,7 @@ from pydantic import BaseModel
|
|
7
7
|
from rich.text import Text
|
8
8
|
from rich.panel import Panel
|
9
9
|
from rich.console import Console
|
10
|
-
from ..main import display_error, TaskOutput, error_logs
|
10
|
+
from ..main import display_error, TaskOutput, error_logs
|
11
11
|
from ..agent.agent import Agent
|
12
12
|
from ..task.task import Task
|
13
13
|
from ..process.process import Process, LoopItems
|
@@ -12,7 +12,8 @@ from typing import List, Any, Optional, Dict, Tuple
|
|
12
12
|
import logging
|
13
13
|
import os
|
14
14
|
from pydantic import BaseModel, ConfigDict
|
15
|
-
from ..main import display_instruction, display_tool_call, display_interaction
|
15
|
+
from ..main import display_instruction, display_tool_call, display_interaction
|
16
|
+
from ..llm import get_openai_client
|
16
17
|
|
17
18
|
# Define Pydantic models for structured output
|
18
19
|
class TaskConfig(BaseModel):
|
@@ -237,6 +238,17 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
|
|
237
238
|
"""
|
238
239
|
|
239
240
|
try:
|
241
|
+
# Get OpenAI client
|
242
|
+
try:
|
243
|
+
client = get_openai_client()
|
244
|
+
except ValueError as e:
|
245
|
+
# AutoAgents requires OpenAI for structured output generation
|
246
|
+
raise ValueError(
|
247
|
+
"AutoAgents requires OpenAI API for automatic agent generation. "
|
248
|
+
"Please set OPENAI_API_KEY environment variable or use PraisonAIAgents class directly "
|
249
|
+
"with manually configured agents for non-OpenAI providers."
|
250
|
+
) from e
|
251
|
+
|
240
252
|
response = client.beta.chat.completions.parse(
|
241
253
|
model=self.llm,
|
242
254
|
response_format=AutoAgentsConfig,
|
praisonaiagents/main.py
CHANGED
@@ -3,7 +3,6 @@ import time
|
|
3
3
|
import json
|
4
4
|
import logging
|
5
5
|
from typing import List, Optional, Dict, Any, Union, Literal, Type
|
6
|
-
from openai import OpenAI
|
7
6
|
from pydantic import BaseModel, ConfigDict
|
8
7
|
from rich import print
|
9
8
|
from rich.console import Console
|
@@ -377,24 +376,6 @@ class ReflectionOutput(BaseModel):
|
|
377
376
|
reflection: str
|
378
377
|
satisfactory: Literal["yes", "no"]
|
379
378
|
|
380
|
-
# Constants
|
381
|
-
LOCAL_SERVER_API_KEY_PLACEHOLDER = "not-needed"
|
382
|
-
|
383
|
-
# Initialize OpenAI client with proper API key handling
|
384
|
-
api_key = os.environ.get("OPENAI_API_KEY")
|
385
|
-
base_url = os.environ.get("OPENAI_API_BASE") or os.environ.get("OPENAI_BASE_URL")
|
386
|
-
|
387
|
-
# For local servers like LM Studio, allow minimal API key
|
388
|
-
if base_url and not api_key:
|
389
|
-
api_key = LOCAL_SERVER_API_KEY_PLACEHOLDER
|
390
|
-
elif not api_key:
|
391
|
-
raise ValueError(
|
392
|
-
"OPENAI_API_KEY environment variable is required for the default OpenAI service. "
|
393
|
-
"If you are targeting a local server (e.g., LM Studio), ensure OPENAI_API_BASE is set "
|
394
|
-
f"(e.g., 'http://localhost:1234/v1') and you can use a placeholder API key by setting OPENAI_API_KEY='{LOCAL_SERVER_API_KEY_PLACEHOLDER}'"
|
395
|
-
)
|
396
|
-
|
397
|
-
client = OpenAI(api_key=api_key, base_url=base_url)
|
398
379
|
|
399
380
|
class TaskOutput(BaseModel):
|
400
381
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
@@ -5,10 +5,10 @@ from typing import Dict, Optional, List, Any, AsyncGenerator
|
|
5
5
|
from pydantic import BaseModel, ConfigDict
|
6
6
|
from ..agent.agent import Agent
|
7
7
|
from ..task.task import Task
|
8
|
-
from ..main import display_error
|
8
|
+
from ..main import display_error
|
9
9
|
import csv
|
10
10
|
import os
|
11
|
-
from openai import AsyncOpenAI
|
11
|
+
from openai import AsyncOpenAI, OpenAI
|
12
12
|
|
13
13
|
class LoopItems(BaseModel):
|
14
14
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
@@ -140,6 +140,8 @@ class Process:
|
|
140
140
|
|
141
141
|
def _get_manager_instructions_with_fallback(self, manager_task, manager_prompt, ManagerInstructions):
|
142
142
|
"""Sync version of getting manager instructions with fallback"""
|
143
|
+
# Create OpenAI client
|
144
|
+
client = OpenAI()
|
143
145
|
try:
|
144
146
|
# First try structured output (OpenAI compatible)
|
145
147
|
logging.info("Attempting structured output...")
|
@@ -1,14 +1,14 @@
|
|
1
1
|
praisonaiagents/__init__.py,sha256=TezvgadS1p5FGnIRAUVOB_6Jzb3Of7ZtzjtyeCqRsmM,3017
|
2
2
|
praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9813
|
3
|
-
praisonaiagents/main.py,sha256=
|
3
|
+
praisonaiagents/main.py,sha256=bamnEu5PaekloGi52VqAFclm-HzjEVeKtWF0Zpdmfzs,15479
|
4
4
|
praisonaiagents/session.py,sha256=d-CZPYikOHb0q-H9f_IWKJsypnQfz1YKeLLkyxs6oDo,15532
|
5
5
|
praisonaiagents/agent/__init__.py,sha256=IhIDtAkfJ99cxbttwou52coih_AejS2-jpazsX6LbDY,350
|
6
|
-
praisonaiagents/agent/agent.py,sha256=
|
6
|
+
praisonaiagents/agent/agent.py,sha256=oZaMJJXoWOWJVOFSLmnoBEpF9rb54pnvSqZHgiOhzAw,108660
|
7
7
|
praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
|
8
8
|
praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
|
9
9
|
praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
|
10
|
-
praisonaiagents/agents/agents.py,sha256=
|
11
|
-
praisonaiagents/agents/autoagents.py,sha256=
|
10
|
+
praisonaiagents/agents/agents.py,sha256=WnptTEMSDMAM30Ka6rOAu6rBD-ZLev3qphb1a3BbP1g,63301
|
11
|
+
praisonaiagents/agents/autoagents.py,sha256=njkcv7wgDjrUd5auLL3rMc7qv20Kfo40zdn49UxWR9k,14235
|
12
12
|
praisonaiagents/guardrails/__init__.py,sha256=HA8zhp-KRHTxo0194MUwXOUJjPyjOu7E3d7xUIKYVVY,310
|
13
13
|
praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXCFIyZXZVY4f0hnQyWc,1352
|
14
14
|
praisonaiagents/guardrails/llm_guardrail.py,sha256=MTTqmYDdZX-18QN9T17T5P_6H2qnV8GVgymJufW1WuM,3277
|
@@ -24,7 +24,7 @@ praisonaiagents/mcp/mcp_sse.py,sha256=z8TMFhW9xuLQ7QnpOa3n1-nSHt0-Bf27qso0u4qxYS
|
|
24
24
|
praisonaiagents/memory/__init__.py,sha256=aEFdhgtTqDdMhc_JCWM-f4XI9cZIj7Wz5g_MUa-0amg,397
|
25
25
|
praisonaiagents/memory/memory.py,sha256=D5BmQTktv6VOJ49yW2m1MjjCJ5UDSX1Qo46_443ymKo,44276
|
26
26
|
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
27
|
-
praisonaiagents/process/process.py,sha256=
|
27
|
+
praisonaiagents/process/process.py,sha256=NTc9rbelIdEA-S1Nwd79OVeKA7mmoZWXhD5r8S91LNs,66624
|
28
28
|
praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
29
29
|
praisonaiagents/task/task.py,sha256=imqJ8wzZzVyUSym2EyF2tC-vAsV1UdfI_P3YM5mqAiw,20786
|
30
30
|
praisonaiagents/telemetry/__init__.py,sha256=5iAOrj_N_cKMmh2ltWGYs3PfOYt_jcwUoElW8fTAIsc,3062
|
@@ -53,7 +53,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
53
53
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
54
54
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
55
55
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
56
|
-
praisonaiagents-0.0.
|
57
|
-
praisonaiagents-0.0.
|
58
|
-
praisonaiagents-0.0.
|
59
|
-
praisonaiagents-0.0.
|
56
|
+
praisonaiagents-0.0.117.dist-info/METADATA,sha256=mgOfLNqTgXkHQ-hP91BvEx1g-ZaDLGJCswHO2zM2N9U,1669
|
57
|
+
praisonaiagents-0.0.117.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
58
|
+
praisonaiagents-0.0.117.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
59
|
+
praisonaiagents-0.0.117.dist-info/RECORD,,
|
File without changes
|
File without changes
|