praisonaiagents 0.0.124__tar.gz → 0.0.125__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/PKG-INFO +2 -1
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/__init__.py +24 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/knowledge/knowledge.py +0 -3
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/llm/__init__.py +6 -9
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/llm/llm.py +51 -25
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/main.py +1 -18
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/mcp/mcp.py +46 -8
- praisonaiagents-0.0.125/praisonaiagents/mcp/mcp_http_stream.py +466 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/mcp/mcp_sse.py +19 -2
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/process/process.py +88 -3
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/task/task.py +1 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents.egg-info/PKG-INFO +2 -1
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents.egg-info/SOURCES.txt +4 -1
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents.egg-info/requires.txt +1 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/pyproject.toml +3 -2
- praisonaiagents-0.0.125/tests/test_http_stream_basic.py +58 -0
- praisonaiagents-0.0.125/tests/test_validation_feedback.py +252 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/README.md +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/agent/agent.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/agent/handoff.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/agent/image_agent.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/agents/autoagents.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/approval.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/guardrails/__init__.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/guardrails/guardrail_result.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/knowledge/__init__.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/knowledge/chunking.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/llm/model_capabilities.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/llm/openai_client.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/mcp/__init__.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/memory/__init__.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/memory/memory.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/session.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/telemetry/__init__.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/telemetry/integration.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/telemetry/telemetry.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/README.md +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/arxiv_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/calculator_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/csv_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/duckdb_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/excel_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/file_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/json_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/newspaper_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/pandas_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/python_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/searxng_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/shell_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/spider_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/test.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/train/data/generatecot.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/wikipedia_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/xml_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/yaml_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents/tools/yfinance_tools.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/setup.cfg +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/tests/test-graph-memory.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/tests/test.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/tests/test_handoff_compatibility.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/tests/test_ollama_async_fix.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/tests/test_ollama_fix.py +0 -0
- {praisonaiagents-0.0.124 → praisonaiagents-0.0.125}/tests/test_posthog_fixed.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: praisonaiagents
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.125
|
4
4
|
Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
|
5
5
|
Author: Mervin Praison
|
6
6
|
Requires-Python: >=3.10
|
@@ -9,6 +9,7 @@ Requires-Dist: rich
|
|
9
9
|
Requires-Dist: openai
|
10
10
|
Requires-Dist: mcp>=1.6.0
|
11
11
|
Requires-Dist: posthog>=3.0.0
|
12
|
+
Requires-Dist: aiohttp>=3.8.0
|
12
13
|
Provides-Extra: mcp
|
13
14
|
Requires-Dist: mcp>=1.6.0; extra == "mcp"
|
14
15
|
Requires-Dist: fastapi>=0.115.0; extra == "mcp"
|
@@ -2,6 +2,30 @@
|
|
2
2
|
Praison AI Agents - A package for hierarchical AI agent task execution
|
3
3
|
"""
|
4
4
|
|
5
|
+
# Configure logging before any other imports
|
6
|
+
import os
|
7
|
+
import logging
|
8
|
+
from rich.logging import RichHandler
|
9
|
+
|
10
|
+
# Get log level from environment variable
|
11
|
+
LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
|
12
|
+
|
13
|
+
# Configure root logger
|
14
|
+
logging.basicConfig(
|
15
|
+
level=getattr(logging, LOGLEVEL, logging.INFO),
|
16
|
+
format="%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s",
|
17
|
+
datefmt="[%X]",
|
18
|
+
handlers=[RichHandler(rich_tracebacks=True)]
|
19
|
+
)
|
20
|
+
|
21
|
+
# Suppress specific noisy loggers
|
22
|
+
logging.getLogger("litellm").setLevel(logging.WARNING)
|
23
|
+
logging.getLogger("litellm.utils").setLevel(logging.WARNING)
|
24
|
+
logging.getLogger("markdown_it").setLevel(logging.WARNING)
|
25
|
+
logging.getLogger("rich.markdown").setLevel(logging.WARNING)
|
26
|
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
27
|
+
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
28
|
+
|
5
29
|
from .agent.agent import Agent
|
6
30
|
from .agent.image_agent import ImageAgent
|
7
31
|
from .agents.agents import PraisonAIAgents
|
@@ -67,9 +67,6 @@ class Knowledge:
|
|
67
67
|
|
68
68
|
# Disable OpenAI API request logging
|
69
69
|
logging.getLogger('openai').setLevel(logging.WARNING)
|
70
|
-
|
71
|
-
# Set root logger to warning to catch any uncategorized logs
|
72
|
-
logging.getLogger().setLevel(logging.WARNING)
|
73
70
|
|
74
71
|
@cached_property
|
75
72
|
def _deps(self):
|
@@ -5,19 +5,16 @@ import os
|
|
5
5
|
# Disable litellm telemetry before any imports
|
6
6
|
os.environ["LITELLM_TELEMETRY"] = "False"
|
7
7
|
|
8
|
-
# Suppress all relevant logs at module level
|
9
|
-
logging.getLogger("litellm").setLevel(logging.
|
10
|
-
logging.getLogger("openai").setLevel(logging.
|
11
|
-
logging.getLogger("httpx").setLevel(logging.
|
12
|
-
logging.getLogger("httpcore").setLevel(logging.
|
13
|
-
logging.getLogger("pydantic").setLevel(logging.
|
8
|
+
# Suppress all relevant logs at module level - consistent with main __init__.py
|
9
|
+
logging.getLogger("litellm").setLevel(logging.WARNING)
|
10
|
+
logging.getLogger("openai").setLevel(logging.WARNING)
|
11
|
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
12
|
+
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
13
|
+
logging.getLogger("pydantic").setLevel(logging.WARNING)
|
14
14
|
|
15
15
|
# Suppress pydantic warnings
|
16
16
|
warnings.filterwarnings("ignore", category=UserWarning, module="pydantic")
|
17
17
|
|
18
|
-
# Configure logging to suppress all INFO messages
|
19
|
-
logging.basicConfig(level=logging.WARNING)
|
20
|
-
|
21
18
|
# Import after suppressing warnings
|
22
19
|
from .llm import LLM, LLMContextLengthExceededException
|
23
20
|
from .openai_client import (
|
@@ -864,32 +864,44 @@ class LLM:
|
|
864
864
|
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
|
865
865
|
|
866
866
|
if ollama_params:
|
867
|
-
# Get response
|
868
|
-
if
|
869
|
-
|
867
|
+
# Get response based on streaming mode
|
868
|
+
if stream:
|
869
|
+
# Streaming approach
|
870
|
+
if verbose:
|
871
|
+
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
872
|
+
response_text = ""
|
873
|
+
for chunk in litellm.completion(
|
874
|
+
**self._build_completion_params(
|
875
|
+
messages=ollama_params["follow_up_messages"],
|
876
|
+
temperature=temperature,
|
877
|
+
stream=True
|
878
|
+
)
|
879
|
+
):
|
880
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
881
|
+
content = chunk.choices[0].delta.content
|
882
|
+
response_text += content
|
883
|
+
live.update(display_generating(response_text, start_time))
|
884
|
+
else:
|
870
885
|
response_text = ""
|
871
886
|
for chunk in litellm.completion(
|
872
887
|
**self._build_completion_params(
|
873
888
|
messages=ollama_params["follow_up_messages"],
|
874
889
|
temperature=temperature,
|
875
|
-
stream=
|
890
|
+
stream=True
|
876
891
|
)
|
877
892
|
):
|
878
893
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
879
|
-
|
880
|
-
response_text += content
|
881
|
-
live.update(display_generating(response_text, start_time))
|
894
|
+
response_text += chunk.choices[0].delta.content
|
882
895
|
else:
|
883
|
-
|
884
|
-
|
896
|
+
# Non-streaming approach
|
897
|
+
resp = litellm.completion(
|
885
898
|
**self._build_completion_params(
|
886
899
|
messages=ollama_params["follow_up_messages"],
|
887
900
|
temperature=temperature,
|
888
|
-
stream=
|
901
|
+
stream=False
|
889
902
|
)
|
890
|
-
)
|
891
|
-
|
892
|
-
response_text += chunk.choices[0].delta.content
|
903
|
+
)
|
904
|
+
response_text = resp.get("choices", [{}])[0].get("message", {}).get("content", "") or ""
|
893
905
|
|
894
906
|
# Set flag to indicate Ollama was handled
|
895
907
|
ollama_handled = True
|
@@ -945,9 +957,26 @@ class LLM:
|
|
945
957
|
|
946
958
|
# Otherwise do the existing streaming approach if not already handled
|
947
959
|
elif not ollama_handled:
|
948
|
-
# Get response after tool calls
|
949
|
-
if
|
950
|
-
|
960
|
+
# Get response after tool calls
|
961
|
+
if stream:
|
962
|
+
# Streaming approach
|
963
|
+
if verbose:
|
964
|
+
with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
|
965
|
+
final_response_text = ""
|
966
|
+
for chunk in litellm.completion(
|
967
|
+
**self._build_completion_params(
|
968
|
+
messages=messages,
|
969
|
+
tools=formatted_tools,
|
970
|
+
temperature=temperature,
|
971
|
+
stream=True,
|
972
|
+
**kwargs
|
973
|
+
)
|
974
|
+
):
|
975
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
976
|
+
content = chunk.choices[0].delta.content
|
977
|
+
final_response_text += content
|
978
|
+
live.update(display_generating(final_response_text, current_time))
|
979
|
+
else:
|
951
980
|
final_response_text = ""
|
952
981
|
for chunk in litellm.completion(
|
953
982
|
**self._build_completion_params(
|
@@ -959,22 +988,19 @@ class LLM:
|
|
959
988
|
)
|
960
989
|
):
|
961
990
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
962
|
-
|
963
|
-
final_response_text += content
|
964
|
-
live.update(display_generating(final_response_text, current_time))
|
991
|
+
final_response_text += chunk.choices[0].delta.content
|
965
992
|
else:
|
966
|
-
|
967
|
-
|
993
|
+
# Non-streaming approach
|
994
|
+
resp = litellm.completion(
|
968
995
|
**self._build_completion_params(
|
969
996
|
messages=messages,
|
970
997
|
tools=formatted_tools,
|
971
998
|
temperature=temperature,
|
972
|
-
stream=
|
999
|
+
stream=False,
|
973
1000
|
**kwargs
|
974
1001
|
)
|
975
|
-
)
|
976
|
-
|
977
|
-
final_response_text += chunk.choices[0].delta.content
|
1002
|
+
)
|
1003
|
+
final_response_text = resp.get("choices", [{}])[0].get("message", {}).get("content", "") or ""
|
978
1004
|
|
979
1005
|
final_response_text = final_response_text.strip()
|
980
1006
|
|
@@ -9,32 +9,15 @@ from rich.console import Console
|
|
9
9
|
from rich.panel import Panel
|
10
10
|
from rich.text import Text
|
11
11
|
from rich.markdown import Markdown
|
12
|
-
from rich.logging import RichHandler
|
13
12
|
from rich.live import Live
|
14
13
|
import asyncio
|
15
14
|
|
16
|
-
#
|
17
|
-
# logging.basicConfig(level=logging.WARNING)
|
18
|
-
|
19
|
-
# Suppress litellm logs
|
15
|
+
# Logging is already configured in __init__.py, just clean up handlers for litellm
|
20
16
|
logging.getLogger("litellm").handlers = []
|
21
17
|
logging.getLogger("litellm.utils").handlers = []
|
22
18
|
logging.getLogger("litellm").propagate = False
|
23
19
|
logging.getLogger("litellm.utils").propagate = False
|
24
20
|
|
25
|
-
LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
|
26
|
-
|
27
|
-
logging.basicConfig(
|
28
|
-
level=getattr(logging, LOGLEVEL, logging.INFO),
|
29
|
-
format="%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s",
|
30
|
-
datefmt="[%X]",
|
31
|
-
handlers=[RichHandler(rich_tracebacks=True)]
|
32
|
-
)
|
33
|
-
|
34
|
-
# Add these lines to suppress markdown parser debug logs
|
35
|
-
logging.getLogger('markdown_it').setLevel(logging.WARNING)
|
36
|
-
logging.getLogger('rich.markdown').setLevel(logging.WARNING)
|
37
|
-
|
38
21
|
# Global list to store error logs
|
39
22
|
error_logs = []
|
40
23
|
|
@@ -187,15 +187,45 @@ class MCP:
|
|
187
187
|
self.timeout = timeout
|
188
188
|
self.debug = debug
|
189
189
|
|
190
|
-
# Check if this is an
|
190
|
+
# Check if this is an HTTP URL
|
191
191
|
if isinstance(command_or_string, str) and re.match(r'^https?://', command_or_string):
|
192
|
-
#
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
192
|
+
# Determine transport type based on URL or kwargs
|
193
|
+
if command_or_string.endswith('/sse') and 'transport_type' not in kwargs:
|
194
|
+
# Legacy SSE URL - use SSE transport for backward compatibility
|
195
|
+
from .mcp_sse import SSEMCPClient
|
196
|
+
self.sse_client = SSEMCPClient(command_or_string, debug=debug, timeout=timeout)
|
197
|
+
self._tools = list(self.sse_client.tools)
|
198
|
+
self.is_sse = True
|
199
|
+
self.is_http_stream = False
|
200
|
+
self.is_npx = False
|
201
|
+
return
|
202
|
+
else:
|
203
|
+
# Use HTTP Stream transport for all other HTTP URLs
|
204
|
+
from .mcp_http_stream import HTTPStreamMCPClient
|
205
|
+
# Extract transport options from kwargs
|
206
|
+
transport_options = {}
|
207
|
+
if 'responseMode' in kwargs:
|
208
|
+
transport_options['responseMode'] = kwargs.pop('responseMode')
|
209
|
+
if 'headers' in kwargs:
|
210
|
+
transport_options['headers'] = kwargs.pop('headers')
|
211
|
+
if 'cors' in kwargs:
|
212
|
+
transport_options['cors'] = kwargs.pop('cors')
|
213
|
+
if 'session' in kwargs:
|
214
|
+
transport_options['session'] = kwargs.pop('session')
|
215
|
+
if 'resumability' in kwargs:
|
216
|
+
transport_options['resumability'] = kwargs.pop('resumability')
|
217
|
+
|
218
|
+
self.http_stream_client = HTTPStreamMCPClient(
|
219
|
+
command_or_string,
|
220
|
+
debug=debug,
|
221
|
+
timeout=timeout,
|
222
|
+
options=transport_options
|
223
|
+
)
|
224
|
+
self._tools = list(self.http_stream_client.tools)
|
225
|
+
self.is_sse = False
|
226
|
+
self.is_http_stream = True
|
227
|
+
self.is_npx = False
|
228
|
+
return
|
199
229
|
|
200
230
|
# Handle the single string format for stdio client
|
201
231
|
if isinstance(command_or_string, str) and args is None:
|
@@ -219,6 +249,7 @@ class MCP:
|
|
219
249
|
|
220
250
|
# Set up stdio client
|
221
251
|
self.is_sse = False
|
252
|
+
self.is_http_stream = False
|
222
253
|
|
223
254
|
# Ensure UTF-8 encoding in environment for Docker compatibility
|
224
255
|
env = kwargs.get('env', {})
|
@@ -275,6 +306,9 @@ class MCP:
|
|
275
306
|
"""
|
276
307
|
if self.is_sse:
|
277
308
|
return list(self.sse_client.tools)
|
309
|
+
|
310
|
+
if self.is_http_stream:
|
311
|
+
return list(self.http_stream_client.tools)
|
278
312
|
|
279
313
|
tool_functions = []
|
280
314
|
|
@@ -448,6 +482,10 @@ class MCP:
|
|
448
482
|
if self.is_sse and hasattr(self, 'sse_client') and self.sse_client.tools:
|
449
483
|
# Return all tools from SSE client
|
450
484
|
return self.sse_client.to_openai_tools()
|
485
|
+
|
486
|
+
if self.is_http_stream and hasattr(self, 'http_stream_client') and self.http_stream_client.tools:
|
487
|
+
# Return all tools from HTTP Stream client
|
488
|
+
return self.http_stream_client.to_openai_tools()
|
451
489
|
|
452
490
|
# For simplicity, we'll convert the first tool only if multiple exist
|
453
491
|
# More complex implementations could handle multiple tools
|