praisonaiagents 0.0.146__tar.gz → 0.0.148__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/PKG-INFO +1 -1
- praisonaiagents-0.0.148/praisonaiagents/__init__.py +138 -0
- praisonaiagents-0.0.148/praisonaiagents/_logging.py +134 -0
- praisonaiagents-0.0.148/praisonaiagents/_warning_patch.py +73 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/agent/agent.py +17 -10
- praisonaiagents-0.0.148/praisonaiagents/llm/__init__.py +50 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/llm/llm.py +260 -108
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/main.py +1 -5
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents.egg-info/PKG-INFO +1 -1
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents.egg-info/SOURCES.txt +2 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/pyproject.toml +1 -1
- praisonaiagents-0.0.146/praisonaiagents/__init__.py +0 -213
- praisonaiagents-0.0.146/praisonaiagents/llm/__init__.py +0 -95
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/README.md +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/agent/context_agent.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/agent/handoff.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/agent/image_agent.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/agent/router_agent.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/agents/autoagents.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/approval.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/guardrails/__init__.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/guardrails/guardrail_result.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/knowledge/__init__.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/knowledge/chunking.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/knowledge/knowledge.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/llm/model_capabilities.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/llm/model_router.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/llm/openai_client.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/mcp/__init__.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/mcp/mcp.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/mcp/mcp_sse.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/memory/__init__.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/memory/memory.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/process/process.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/session.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/telemetry/__init__.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/telemetry/integration.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/telemetry/performance_cli.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/telemetry/performance_monitor.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/telemetry/performance_utils.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/telemetry/telemetry.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/README.md +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/arxiv_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/calculator_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/csv_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/duckdb_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/excel_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/file_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/json_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/mongodb_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/newspaper_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/pandas_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/python_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/searxng_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/shell_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/spider_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/test.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/train/data/generatecot.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/wikipedia_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/xml_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/yaml_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents/tools/yfinance_tools.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents.egg-info/requires.txt +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/setup.cfg +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test-graph-memory.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_context_agent.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_embedding_logging.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_fix_comprehensive.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_gemini_streaming_fix.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_handoff_compatibility.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_http_stream_basic.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_llm_self_reflection_direct.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_ollama_async_fix.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_ollama_fix.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_ollama_sequential_fix.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_posthog_fixed.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_self_reflection_comprehensive.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_self_reflection_fix_simple.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_self_reflection_fix_verification.py +0 -0
- {praisonaiagents-0.0.146 → praisonaiagents-0.0.148}/tests/test_validation_feedback.py +0 -0
@@ -0,0 +1,138 @@
|
|
1
|
+
"""
|
2
|
+
Praison AI Agents - A package for hierarchical AI agent task execution
|
3
|
+
"""
|
4
|
+
|
5
|
+
# Apply warning patch BEFORE any imports to intercept warnings at the source
|
6
|
+
from . import _warning_patch
|
7
|
+
|
8
|
+
# Import centralized logging configuration FIRST
|
9
|
+
from . import _logging
|
10
|
+
|
11
|
+
# Configure root logger after logging is initialized
|
12
|
+
_logging.configure_root_logger()
|
13
|
+
|
14
|
+
# Now import everything else
|
15
|
+
from .agent.agent import Agent
|
16
|
+
from .agent.image_agent import ImageAgent
|
17
|
+
from .agent.context_agent import ContextAgent, create_context_agent
|
18
|
+
from .agents.agents import PraisonAIAgents
|
19
|
+
from .task.task import Task
|
20
|
+
from .tools.tools import Tools
|
21
|
+
from .agents.autoagents import AutoAgents
|
22
|
+
from .knowledge.knowledge import Knowledge
|
23
|
+
from .knowledge.chunking import Chunking
|
24
|
+
# MCP support (optional)
|
25
|
+
try:
|
26
|
+
from .mcp.mcp import MCP
|
27
|
+
_mcp_available = True
|
28
|
+
except ImportError:
|
29
|
+
_mcp_available = False
|
30
|
+
MCP = None
|
31
|
+
from .session import Session
|
32
|
+
from .memory.memory import Memory
|
33
|
+
from .guardrails import GuardrailResult, LLMGuardrail
|
34
|
+
from .agent.handoff import Handoff, handoff, handoff_filters, RECOMMENDED_PROMPT_PREFIX, prompt_with_handoff_instructions
|
35
|
+
from .main import (
|
36
|
+
TaskOutput,
|
37
|
+
ReflectionOutput,
|
38
|
+
display_interaction,
|
39
|
+
display_self_reflection,
|
40
|
+
display_instruction,
|
41
|
+
display_tool_call,
|
42
|
+
display_error,
|
43
|
+
display_generating,
|
44
|
+
clean_triple_backticks,
|
45
|
+
error_logs,
|
46
|
+
register_display_callback,
|
47
|
+
sync_display_callbacks,
|
48
|
+
async_display_callbacks,
|
49
|
+
)
|
50
|
+
|
51
|
+
# Telemetry support (lazy loaded)
|
52
|
+
try:
|
53
|
+
from .telemetry import (
|
54
|
+
get_telemetry,
|
55
|
+
enable_telemetry,
|
56
|
+
disable_telemetry,
|
57
|
+
MinimalTelemetry,
|
58
|
+
TelemetryCollector
|
59
|
+
)
|
60
|
+
_telemetry_available = True
|
61
|
+
except ImportError:
|
62
|
+
# Telemetry not available - provide stub functions
|
63
|
+
_telemetry_available = False
|
64
|
+
def get_telemetry():
|
65
|
+
return None
|
66
|
+
|
67
|
+
def enable_telemetry(*args, **kwargs):
|
68
|
+
import logging
|
69
|
+
logging.warning(
|
70
|
+
"Telemetry not available. Install with: pip install praisonaiagents[telemetry]"
|
71
|
+
)
|
72
|
+
return None
|
73
|
+
|
74
|
+
def disable_telemetry():
|
75
|
+
pass
|
76
|
+
|
77
|
+
MinimalTelemetry = None
|
78
|
+
TelemetryCollector = None
|
79
|
+
|
80
|
+
# Add Agents as an alias for PraisonAIAgents
|
81
|
+
Agents = PraisonAIAgents
|
82
|
+
|
83
|
+
# Apply telemetry auto-instrumentation after all imports
|
84
|
+
if _telemetry_available:
|
85
|
+
try:
|
86
|
+
# Only instrument if telemetry is enabled
|
87
|
+
_telemetry = get_telemetry()
|
88
|
+
if _telemetry and _telemetry.enabled:
|
89
|
+
from .telemetry.integration import auto_instrument_all
|
90
|
+
auto_instrument_all(_telemetry)
|
91
|
+
except Exception:
|
92
|
+
# Silently fail if there are any issues
|
93
|
+
pass
|
94
|
+
|
95
|
+
__all__ = [
|
96
|
+
'Agent',
|
97
|
+
'ImageAgent',
|
98
|
+
'ContextAgent',
|
99
|
+
'create_context_agent',
|
100
|
+
'PraisonAIAgents',
|
101
|
+
'Agents',
|
102
|
+
'Tools',
|
103
|
+
'Task',
|
104
|
+
'TaskOutput',
|
105
|
+
'ReflectionOutput',
|
106
|
+
'AutoAgents',
|
107
|
+
'Session',
|
108
|
+
'Memory',
|
109
|
+
'display_interaction',
|
110
|
+
'display_self_reflection',
|
111
|
+
'display_instruction',
|
112
|
+
'display_tool_call',
|
113
|
+
'display_error',
|
114
|
+
'display_generating',
|
115
|
+
'clean_triple_backticks',
|
116
|
+
'error_logs',
|
117
|
+
'register_display_callback',
|
118
|
+
'sync_display_callbacks',
|
119
|
+
'async_display_callbacks',
|
120
|
+
'Knowledge',
|
121
|
+
'Chunking',
|
122
|
+
'GuardrailResult',
|
123
|
+
'LLMGuardrail',
|
124
|
+
'Handoff',
|
125
|
+
'handoff',
|
126
|
+
'handoff_filters',
|
127
|
+
'RECOMMENDED_PROMPT_PREFIX',
|
128
|
+
'prompt_with_handoff_instructions',
|
129
|
+
'get_telemetry',
|
130
|
+
'enable_telemetry',
|
131
|
+
'disable_telemetry',
|
132
|
+
'MinimalTelemetry',
|
133
|
+
'TelemetryCollector'
|
134
|
+
]
|
135
|
+
|
136
|
+
# Add MCP to __all__ if available
|
137
|
+
if _mcp_available:
|
138
|
+
__all__.append('MCP')
|
@@ -0,0 +1,134 @@
|
|
1
|
+
"""
|
2
|
+
Centralized logging configuration for PraisonAI Agents.
|
3
|
+
This module consolidates all logging configuration in one place to avoid duplication.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import os
|
7
|
+
import logging
|
8
|
+
from typing import List
|
9
|
+
|
10
|
+
# ========================================================================
|
11
|
+
# ENVIRONMENT CONFIGURATION
|
12
|
+
# ========================================================================
|
13
|
+
def _configure_environment():
|
14
|
+
"""Set environment variables to suppress debug messages at the source."""
|
15
|
+
env_vars = {
|
16
|
+
# LiteLLM configuration
|
17
|
+
"LITELLM_TELEMETRY": "False",
|
18
|
+
"LITELLM_DROP_PARAMS": "True",
|
19
|
+
"LITELLM_LOG": "ERROR",
|
20
|
+
"LITELLM_DEBUG": "False",
|
21
|
+
"LITELLM_SUPPRESS_DEBUG_INFO": "True",
|
22
|
+
"LITELLM_VERBOSE": "False",
|
23
|
+
"LITELLM_SET_VERBOSE": "False",
|
24
|
+
# HTTPX configuration
|
25
|
+
"HTTPX_DISABLE_WARNINGS": "True",
|
26
|
+
"HTTPX_LOG_LEVEL": "ERROR",
|
27
|
+
# Pydantic configuration
|
28
|
+
"PYDANTIC_WARNINGS_ENABLED": "False",
|
29
|
+
}
|
30
|
+
|
31
|
+
for key, value in env_vars.items():
|
32
|
+
os.environ[key] = value
|
33
|
+
|
34
|
+
|
35
|
+
# ========================================================================
|
36
|
+
# LOGGER CONFIGURATION
|
37
|
+
# ========================================================================
|
38
|
+
def _get_all_noisy_loggers() -> List[str]:
|
39
|
+
"""Get list of all loggers that should be suppressed."""
|
40
|
+
return [
|
41
|
+
# LiteLLM and variants
|
42
|
+
"litellm", "LiteLLM", "LiteLLM Router", "LiteLLM Proxy",
|
43
|
+
# HTTP libraries
|
44
|
+
"httpx", "httpx._trace", "httpx._client",
|
45
|
+
"httpcore", "httpcore._trace",
|
46
|
+
# OpenAI
|
47
|
+
"openai._base_client", "openai._client",
|
48
|
+
# Markdown
|
49
|
+
"markdown_it", "rich.markdown",
|
50
|
+
# System
|
51
|
+
"asyncio", "selector_events", "pydantic",
|
52
|
+
"praisonaiagents.telemetry.telemetry",
|
53
|
+
]
|
54
|
+
|
55
|
+
|
56
|
+
def _configure_loggers():
|
57
|
+
"""Configure all loggers based on LOGLEVEL environment variable."""
|
58
|
+
loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
|
59
|
+
|
60
|
+
# When DEBUG is set, allow some HTTP logging for API endpoints
|
61
|
+
if loglevel == 'DEBUG':
|
62
|
+
allowed_debug_loggers = {"httpx", "httpx._client", "openai._client"}
|
63
|
+
|
64
|
+
for logger_name in _get_all_noisy_loggers():
|
65
|
+
if logger_name not in allowed_debug_loggers:
|
66
|
+
logger = logging.getLogger(logger_name)
|
67
|
+
logger.setLevel(logging.CRITICAL)
|
68
|
+
logger.handlers = []
|
69
|
+
logger.propagate = False
|
70
|
+
|
71
|
+
# Ensure allowed loggers are at INFO level to show API calls
|
72
|
+
for logger_name in allowed_debug_loggers:
|
73
|
+
logger = logging.getLogger(logger_name)
|
74
|
+
logger.setLevel(logging.INFO)
|
75
|
+
else:
|
76
|
+
# Suppress all noisy loggers when not in DEBUG mode
|
77
|
+
for logger_name in _get_all_noisy_loggers():
|
78
|
+
logger = logging.getLogger(logger_name)
|
79
|
+
logger.setLevel(logging.CRITICAL)
|
80
|
+
logger.handlers = []
|
81
|
+
logger.propagate = False
|
82
|
+
|
83
|
+
|
84
|
+
# ========================================================================
|
85
|
+
# LITELLM CONFIGURATION
|
86
|
+
# ========================================================================
|
87
|
+
def _configure_litellm():
|
88
|
+
"""Configure litellm after it's imported."""
|
89
|
+
try:
|
90
|
+
import litellm
|
91
|
+
litellm.telemetry = False
|
92
|
+
litellm.drop_params = True
|
93
|
+
litellm.suppress_debug_info = True
|
94
|
+
|
95
|
+
if hasattr(litellm, '_logging_obj'):
|
96
|
+
litellm._logging_obj.setLevel(logging.CRITICAL)
|
97
|
+
|
98
|
+
if hasattr(litellm, 'set_verbose'):
|
99
|
+
litellm.set_verbose = False
|
100
|
+
|
101
|
+
except (ImportError, AttributeError):
|
102
|
+
pass
|
103
|
+
|
104
|
+
|
105
|
+
# ========================================================================
|
106
|
+
# ROOT LOGGER CONFIGURATION
|
107
|
+
# ========================================================================
|
108
|
+
def configure_root_logger():
|
109
|
+
"""Configure the root logger with RichHandler."""
|
110
|
+
from rich.logging import RichHandler
|
111
|
+
|
112
|
+
loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
|
113
|
+
|
114
|
+
logging.basicConfig(
|
115
|
+
level=getattr(logging, loglevel, logging.INFO),
|
116
|
+
format="%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s",
|
117
|
+
datefmt="[%X]",
|
118
|
+
handlers=[RichHandler(rich_tracebacks=True)],
|
119
|
+
force=True
|
120
|
+
)
|
121
|
+
|
122
|
+
|
123
|
+
# ========================================================================
|
124
|
+
# INITIALIZATION
|
125
|
+
# ========================================================================
|
126
|
+
def initialize_logging():
|
127
|
+
"""Initialize all logging configuration."""
|
128
|
+
_configure_environment()
|
129
|
+
_configure_loggers()
|
130
|
+
_configure_litellm()
|
131
|
+
|
132
|
+
|
133
|
+
# Auto-initialize on import
|
134
|
+
initialize_logging()
|
@@ -0,0 +1,73 @@
|
|
1
|
+
"""
|
2
|
+
Minimal warning patch to suppress specific third-party warnings.
|
3
|
+
This module patches the warnings module to intercept specific messages.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import warnings
|
7
|
+
import functools
|
8
|
+
import sys
|
9
|
+
|
10
|
+
# Apply aggressive warning filters first
|
11
|
+
warnings.filterwarnings("ignore", message=".*Pydantic serializer warnings.*")
|
12
|
+
warnings.filterwarnings("ignore", message=".*PydanticSerializationUnexpectedValue.*")
|
13
|
+
warnings.filterwarnings("ignore", message=".*Expected 9 fields but got.*")
|
14
|
+
warnings.filterwarnings("ignore", message=".*Expected `StreamingChoices` but got.*")
|
15
|
+
warnings.filterwarnings("ignore", message=".*serialized value may not be as expected.*")
|
16
|
+
warnings.filterwarnings("ignore", message=".*Use 'content=<...>' to upload raw bytes/text content.*")
|
17
|
+
warnings.filterwarnings("ignore", message=".*The `dict` method is deprecated.*")
|
18
|
+
warnings.filterwarnings("ignore", category=UserWarning, module="pydantic.*")
|
19
|
+
|
20
|
+
# Store the original warn function
|
21
|
+
_original_warn = warnings.warn
|
22
|
+
_original_warn_explicit = warnings.warn_explicit
|
23
|
+
|
24
|
+
# Messages to suppress (partial matches)
|
25
|
+
SUPPRESSED_PATTERNS = [
|
26
|
+
"Use 'content=<...>' to upload raw bytes/text content",
|
27
|
+
"The `dict` method is deprecated; use `model_dump` instead",
|
28
|
+
"Pydantic serializer warnings",
|
29
|
+
"PydanticSerializationUnexpectedValue",
|
30
|
+
"Expected 9 fields but got 5 for type `Message`",
|
31
|
+
"Expected `StreamingChoices` but got `Choices`",
|
32
|
+
"serialized value may not be as expected"
|
33
|
+
]
|
34
|
+
|
35
|
+
@functools.wraps(_original_warn)
|
36
|
+
def _patched_warn(message, category=None, stacklevel=1, source=None):
|
37
|
+
"""Patched warn function that suppresses specific messages."""
|
38
|
+
msg_str = str(message)
|
39
|
+
|
40
|
+
for pattern in SUPPRESSED_PATTERNS:
|
41
|
+
if pattern in msg_str:
|
42
|
+
return
|
43
|
+
|
44
|
+
if category == UserWarning and "pydantic" in msg_str.lower():
|
45
|
+
return
|
46
|
+
|
47
|
+
_original_warn(message, category, stacklevel, source)
|
48
|
+
|
49
|
+
@functools.wraps(_original_warn_explicit)
|
50
|
+
def _patched_warn_explicit(message, category, filename, lineno, module=None, registry=None, module_globals=None, source=None):
|
51
|
+
"""Patched warn_explicit function that suppresses specific messages."""
|
52
|
+
msg_str = str(message)
|
53
|
+
|
54
|
+
for pattern in SUPPRESSED_PATTERNS:
|
55
|
+
if pattern in msg_str:
|
56
|
+
return
|
57
|
+
|
58
|
+
if category == UserWarning and "pydantic" in msg_str.lower():
|
59
|
+
return
|
60
|
+
|
61
|
+
if module and "pydantic" in str(module):
|
62
|
+
return
|
63
|
+
|
64
|
+
_original_warn_explicit(message, category, filename, lineno, module, registry, module_globals, source)
|
65
|
+
|
66
|
+
# Apply the patches
|
67
|
+
warnings.warn = _patched_warn
|
68
|
+
warnings.warn_explicit = _patched_warn_explicit
|
69
|
+
|
70
|
+
# Also patch sys.modules warnings if it exists
|
71
|
+
if 'warnings' in sys.modules:
|
72
|
+
sys.modules['warnings'].warn = _patched_warn
|
73
|
+
sys.modules['warnings'].warn_explicit = _patched_warn_explicit
|
@@ -331,8 +331,15 @@ class Agent:
|
|
331
331
|
|
332
332
|
# Configure logging to suppress unwanted outputs
|
333
333
|
logging.getLogger("litellm").setLevel(logging.WARNING)
|
334
|
-
|
335
|
-
logging
|
334
|
+
|
335
|
+
# Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
|
336
|
+
loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
|
337
|
+
if loglevel == 'DEBUG':
|
338
|
+
logging.getLogger("httpx").setLevel(logging.INFO)
|
339
|
+
logging.getLogger("httpcore").setLevel(logging.INFO)
|
340
|
+
else:
|
341
|
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
342
|
+
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
336
343
|
|
337
344
|
# If instructions are provided, use them to set role, goal, and backstory
|
338
345
|
if instructions:
|
@@ -1229,14 +1236,14 @@ Your Goal: {self.goal}"""
|
|
1229
1236
|
border_style="green",
|
1230
1237
|
expand=False
|
1231
1238
|
)
|
1232
|
-
else:
|
1233
|
-
|
1234
|
-
|
1235
|
-
|
1236
|
-
|
1237
|
-
|
1238
|
-
|
1239
|
-
|
1239
|
+
# else:
|
1240
|
+
# # No content yet: show generating message
|
1241
|
+
# return Panel(
|
1242
|
+
# f"[bold cyan]Generating response...[/bold cyan]",
|
1243
|
+
# title=f"[bold]{self.name}[/bold] - {elapsed:.1f}s",
|
1244
|
+
# border_style="cyan",
|
1245
|
+
# expand=False
|
1246
|
+
# )
|
1240
1247
|
|
1241
1248
|
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=None, task_name=None, task_description=None, task_id=None):
|
1242
1249
|
# Reset the final display flag for each new conversation
|
@@ -0,0 +1,50 @@
|
|
1
|
+
import os
|
2
|
+
|
3
|
+
# Ensure litellm telemetry is disabled before imports
|
4
|
+
os.environ["LITELLM_TELEMETRY"] = "False"
|
5
|
+
|
6
|
+
# Import modules
|
7
|
+
from .llm import LLM, LLMContextLengthExceededException
|
8
|
+
from .openai_client import (
|
9
|
+
OpenAIClient,
|
10
|
+
get_openai_client,
|
11
|
+
ChatCompletionMessage,
|
12
|
+
Choice,
|
13
|
+
CompletionTokensDetails,
|
14
|
+
PromptTokensDetails,
|
15
|
+
CompletionUsage,
|
16
|
+
ChatCompletion,
|
17
|
+
ToolCall,
|
18
|
+
process_stream_chunks
|
19
|
+
)
|
20
|
+
from .model_capabilities import (
|
21
|
+
supports_structured_outputs,
|
22
|
+
supports_streaming_with_tools
|
23
|
+
)
|
24
|
+
from .model_router import (
|
25
|
+
ModelRouter,
|
26
|
+
ModelProfile,
|
27
|
+
TaskComplexity,
|
28
|
+
create_routing_agent
|
29
|
+
)
|
30
|
+
|
31
|
+
__all__ = [
|
32
|
+
"LLM",
|
33
|
+
"LLMContextLengthExceededException",
|
34
|
+
"OpenAIClient",
|
35
|
+
"get_openai_client",
|
36
|
+
"ChatCompletionMessage",
|
37
|
+
"Choice",
|
38
|
+
"CompletionTokensDetails",
|
39
|
+
"PromptTokensDetails",
|
40
|
+
"CompletionUsage",
|
41
|
+
"ChatCompletion",
|
42
|
+
"ToolCall",
|
43
|
+
"process_stream_chunks",
|
44
|
+
"supports_structured_outputs",
|
45
|
+
"supports_streaming_with_tools",
|
46
|
+
"ModelRouter",
|
47
|
+
"ModelProfile",
|
48
|
+
"TaskComplexity",
|
49
|
+
"create_routing_agent"
|
50
|
+
]
|