media-agent-mcp 2.6.11__py3-none-any.whl → 2.6.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- media_agent_mcp/__init__.py +2 -3
- media_agent_mcp/ai_models/seed16.py +3 -3
- media_agent_mcp/async_server.py +315 -2
- {media_agent_mcp-2.6.11.dist-info → media_agent_mcp-2.6.12.dist-info}/METADATA +1 -1
- {media_agent_mcp-2.6.11.dist-info → media_agent_mcp-2.6.12.dist-info}/RECORD +8 -9
- media_agent_mcp/server.py +0 -600
- {media_agent_mcp-2.6.11.dist-info → media_agent_mcp-2.6.12.dist-info}/WHEEL +0 -0
- {media_agent_mcp-2.6.11.dist-info → media_agent_mcp-2.6.12.dist-info}/entry_points.txt +0 -0
- {media_agent_mcp-2.6.11.dist-info → media_agent_mcp-2.6.12.dist-info}/top_level.txt +0 -0
media_agent_mcp/__init__.py
CHANGED
@@ -1,9 +1,8 @@
|
|
1
1
|
"""Media Agent MCP Server - A Model Context Protocol server for media processing."""
|
2
2
|
|
3
3
|
from . import ai_models, media_selectors, storage, video
|
4
|
-
from .
|
5
|
-
from .async_server import main as async_main
|
4
|
+
from .async_server import main
|
6
5
|
from . import async_wrapper
|
7
6
|
|
8
7
|
__version__ = "0.1.0"
|
9
|
-
__all__ = ['ai_models', 'media_selectors', 'storage', 'video', 'main', '
|
8
|
+
__all__ = ['ai_models', 'media_selectors', 'storage', 'video', 'main', 'async_wrapper']
|
@@ -49,9 +49,9 @@ def process_vlm_task(messages: List[Dict[str, Any]], max_tokens: int = 4096,
|
|
49
49
|
'temperature': temperature,
|
50
50
|
'top_p': top_p,
|
51
51
|
'stream': False, # Blocking request
|
52
|
-
'thinking':{
|
53
|
-
|
54
|
-
}
|
52
|
+
# 'thinking':{
|
53
|
+
# 'type': os.getenv('VLM_THINKING_TYPE', 'disabled')
|
54
|
+
# }
|
55
55
|
}
|
56
56
|
|
57
57
|
if is_json:
|
media_agent_mcp/async_server.py
CHANGED
@@ -26,6 +26,12 @@ from dotenv import load_dotenv
|
|
26
26
|
import uvicorn
|
27
27
|
import anyio
|
28
28
|
from functools import wraps
|
29
|
+
import uuid
|
30
|
+
import weakref
|
31
|
+
from starlette.applications import Starlette
|
32
|
+
from starlette.middleware.base import BaseHTTPMiddleware
|
33
|
+
from starlette.requests import Request
|
34
|
+
from starlette.responses import Response
|
29
35
|
|
30
36
|
def async_retry(max_retries=3, delay=2):
|
31
37
|
def decorator(func):
|
@@ -44,6 +50,20 @@ def async_retry(max_retries=3, delay=2):
|
|
44
50
|
await asyncio.sleep(delay)
|
45
51
|
continue
|
46
52
|
return result
|
53
|
+
except anyio.ClosedResourceError as e:
|
54
|
+
logger.warning(f"ClosedResourceError in {func.__name__} (attempt {attempt + 1}): {e}")
|
55
|
+
# For ClosedResourceError, we should handle it gracefully
|
56
|
+
if attempt < max_retries - 1:
|
57
|
+
logger.info(f"Retrying {func.__name__} after ClosedResourceError...")
|
58
|
+
await asyncio.sleep(delay)
|
59
|
+
continue
|
60
|
+
else:
|
61
|
+
# On final attempt, return a structured error response
|
62
|
+
return {
|
63
|
+
"status": "error",
|
64
|
+
"data": None,
|
65
|
+
"message": f"Session expired during {func.__name__} execution. Please retry with a new session."
|
66
|
+
}
|
47
67
|
except Exception as e:
|
48
68
|
last_exception = str(e)
|
49
69
|
logger.error(f"Attempt {attempt + 1} of {max_retries} failed for {func.__name__} with exception: {e}. Retrying in {delay}s...")
|
@@ -57,6 +77,46 @@ def async_retry(max_retries=3, delay=2):
|
|
57
77
|
return wrapper
|
58
78
|
return decorator
|
59
79
|
|
80
|
+
def session_aware_retry(max_retries=3, delay=2):
|
81
|
+
"""Enhanced retry decorator that handles session expiration specifically."""
|
82
|
+
def decorator(func):
|
83
|
+
@wraps(func)
|
84
|
+
async def wrapper(*args, **kwargs):
|
85
|
+
for attempt in range(max_retries):
|
86
|
+
try:
|
87
|
+
result = await func(*args, **kwargs)
|
88
|
+
return result
|
89
|
+
except anyio.ClosedResourceError as e:
|
90
|
+
logger.warning(f"Session expired during {func.__name__} execution (attempt {attempt + 1}): {e}")
|
91
|
+
|
92
|
+
if attempt < max_retries - 1:
|
93
|
+
# Generate new session for retry
|
94
|
+
new_session_id = session_manager.generate_session_id()
|
95
|
+
logger.info(f"Generated new session {new_session_id} for retry of {func.__name__}")
|
96
|
+
await asyncio.sleep(delay)
|
97
|
+
continue
|
98
|
+
else:
|
99
|
+
return {
|
100
|
+
"status": "error",
|
101
|
+
"data": None,
|
102
|
+
"message": f"Session expired during {func.__name__} execution. A new session has been generated. Please retry your request."
|
103
|
+
}
|
104
|
+
except Exception as e:
|
105
|
+
logger.error(f"Unexpected error in {func.__name__} (attempt {attempt + 1}): {e}")
|
106
|
+
if attempt < max_retries - 1:
|
107
|
+
await asyncio.sleep(delay)
|
108
|
+
continue
|
109
|
+
else:
|
110
|
+
return {
|
111
|
+
"status": "error",
|
112
|
+
"data": None,
|
113
|
+
"message": f"Function {func.__name__} failed: {str(e)}"
|
114
|
+
}
|
115
|
+
|
116
|
+
return {"status": "error", "data": None, "message": f"Function {func.__name__} failed after {max_retries} retries"}
|
117
|
+
return wrapper
|
118
|
+
return decorator
|
119
|
+
|
60
120
|
from mcp.server.fastmcp import FastMCP
|
61
121
|
|
62
122
|
# Import async wrappers
|
@@ -81,6 +141,169 @@ from media_agent_mcp.async_wrapper import (
|
|
81
141
|
logging.basicConfig(level=logging.INFO)
|
82
142
|
logger = logging.getLogger(__name__)
|
83
143
|
|
144
|
+
# Session management for handling expired sessions
|
145
|
+
class SessionManager:
|
146
|
+
def __init__(self):
|
147
|
+
self._sessions = {} # Changed from WeakValueDictionary to regular dict
|
148
|
+
self._session_routes = {}
|
149
|
+
self._session_timestamps = {} # Track session creation times
|
150
|
+
|
151
|
+
def generate_session_id(self) -> str:
|
152
|
+
"""Generate a new unique session ID."""
|
153
|
+
return str(uuid.uuid4()).replace('-', '')
|
154
|
+
|
155
|
+
def register_session(self, session_id: str, session_obj):
|
156
|
+
"""Register a session object."""
|
157
|
+
import time
|
158
|
+
self._sessions[session_id] = session_obj
|
159
|
+
self._session_timestamps[session_id] = time.time()
|
160
|
+
logger.info(f"Registered session: {session_id}")
|
161
|
+
|
162
|
+
def get_session(self, session_id: str):
|
163
|
+
"""Get session object by ID."""
|
164
|
+
return self._sessions.get(session_id)
|
165
|
+
|
166
|
+
def remove_session(self, session_id: str):
|
167
|
+
"""Remove a session."""
|
168
|
+
if session_id in self._sessions:
|
169
|
+
del self._sessions[session_id]
|
170
|
+
if session_id in self._session_routes:
|
171
|
+
del self._session_routes[session_id]
|
172
|
+
if session_id in self._session_timestamps:
|
173
|
+
del self._session_timestamps[session_id]
|
174
|
+
logger.info(f"Removed session: {session_id}")
|
175
|
+
|
176
|
+
def cleanup_expired_sessions(self, max_age_seconds: int = 3600):
|
177
|
+
"""Clean up sessions older than max_age_seconds."""
|
178
|
+
import time
|
179
|
+
current_time = time.time()
|
180
|
+
expired_sessions = []
|
181
|
+
|
182
|
+
for session_id, timestamp in self._session_timestamps.items():
|
183
|
+
if current_time - timestamp > max_age_seconds:
|
184
|
+
expired_sessions.append(session_id)
|
185
|
+
|
186
|
+
for session_id in expired_sessions:
|
187
|
+
self.remove_session(session_id)
|
188
|
+
logger.info(f"Cleaned up expired session: {session_id}")
|
189
|
+
|
190
|
+
return len(expired_sessions)
|
191
|
+
|
192
|
+
def get_session_count(self) -> int:
|
193
|
+
"""Get the number of active sessions."""
|
194
|
+
return len(self._sessions)
|
195
|
+
|
196
|
+
def get_route_count(self) -> int:
|
197
|
+
"""Get the number of route mappings."""
|
198
|
+
return len(self._session_routes)
|
199
|
+
|
200
|
+
def add_route_mapping(self, old_session_id: str, new_session_id: str):
|
201
|
+
"""Add route mapping for session forwarding."""
|
202
|
+
self._session_routes[old_session_id] = new_session_id
|
203
|
+
logger.info(f"Added route mapping: {old_session_id} -> {new_session_id}")
|
204
|
+
|
205
|
+
def get_route_mapping(self, session_id: str) -> Optional[str]:
|
206
|
+
"""Get route mapping for a session."""
|
207
|
+
return self._session_routes.get(session_id)
|
208
|
+
|
209
|
+
# Global session manager
|
210
|
+
session_manager = SessionManager()
|
211
|
+
|
212
|
+
class SessionErrorHandlingMiddleware(BaseHTTPMiddleware):
|
213
|
+
"""Middleware to handle ClosedResourceError and auto-regenerate sessions."""
|
214
|
+
|
215
|
+
async def dispatch(self, request: Request, call_next):
|
216
|
+
try:
|
217
|
+
response = await call_next(request)
|
218
|
+
return response
|
219
|
+
except anyio.ClosedResourceError as e:
|
220
|
+
logger.warning(f"ClosedResourceError detected: {e}")
|
221
|
+
|
222
|
+
# Extract session_id from request
|
223
|
+
session_id = self._extract_session_id(request)
|
224
|
+
if session_id:
|
225
|
+
# Generate new session ID
|
226
|
+
new_session_id = session_manager.generate_session_id()
|
227
|
+
|
228
|
+
# Add route mapping
|
229
|
+
session_manager.add_route_mapping(session_id, new_session_id)
|
230
|
+
|
231
|
+
# Remove old session
|
232
|
+
session_manager.remove_session(session_id)
|
233
|
+
|
234
|
+
logger.info(f"Auto-generated new session {new_session_id} to replace expired session {session_id}")
|
235
|
+
|
236
|
+
# Create a redirect response with new session ID
|
237
|
+
new_url = str(request.url).replace(f"session_id={session_id}", f"session_id={new_session_id}")
|
238
|
+
|
239
|
+
from starlette.responses import RedirectResponse
|
240
|
+
return RedirectResponse(url=new_url, status_code=307) # Temporary redirect preserving method
|
241
|
+
|
242
|
+
# If no session_id found, re-raise the error
|
243
|
+
raise e
|
244
|
+
except Exception as e:
|
245
|
+
logger.error(f"Unexpected error in middleware: {e}")
|
246
|
+
raise e
|
247
|
+
|
248
|
+
def _extract_session_id(self, request: Request) -> Optional[str]:
|
249
|
+
"""Extract session_id from request URL or headers."""
|
250
|
+
# Try to get from query parameters
|
251
|
+
session_id = request.query_params.get('session_id')
|
252
|
+
if session_id:
|
253
|
+
return session_id
|
254
|
+
|
255
|
+
# Try to get from path parameters
|
256
|
+
if hasattr(request, 'path_params') and 'session_id' in request.path_params:
|
257
|
+
return request.path_params['session_id']
|
258
|
+
|
259
|
+
# Try to extract from URL path
|
260
|
+
import re
|
261
|
+
path = str(request.url.path)
|
262
|
+
match = re.search(r'session_id=([a-f0-9]+)', str(request.url))
|
263
|
+
if match:
|
264
|
+
return match.group(1)
|
265
|
+
|
266
|
+
return None
|
267
|
+
|
268
|
+
# Global exception handler for ClosedResourceError
|
269
|
+
async def handle_closed_resource_error(request, exc):
|
270
|
+
"""Global handler for ClosedResourceError exceptions."""
|
271
|
+
logger.error(f"Global ClosedResourceError handler triggered: {exc}")
|
272
|
+
|
273
|
+
# Extract session_id from request if possible
|
274
|
+
session_id = None
|
275
|
+
if hasattr(request, 'query_params'):
|
276
|
+
session_id = request.query_params.get('session_id')
|
277
|
+
|
278
|
+
if session_id:
|
279
|
+
# Generate new session ID
|
280
|
+
new_session_id = session_manager.generate_session_id()
|
281
|
+
session_manager.add_route_mapping(session_id, new_session_id)
|
282
|
+
session_manager.remove_session(session_id)
|
283
|
+
|
284
|
+
logger.info(f"Global handler: Generated new session {new_session_id} to replace {session_id}")
|
285
|
+
|
286
|
+
from starlette.responses import JSONResponse
|
287
|
+
return JSONResponse(
|
288
|
+
status_code=410, # Gone - indicates the resource is no longer available
|
289
|
+
content={
|
290
|
+
"error": "session_expired",
|
291
|
+
"message": "Session has expired. A new session has been generated.",
|
292
|
+
"old_session_id": session_id,
|
293
|
+
"new_session_id": new_session_id,
|
294
|
+
"action": "retry_with_new_session"
|
295
|
+
}
|
296
|
+
)
|
297
|
+
|
298
|
+
from starlette.responses import JSONResponse
|
299
|
+
return JSONResponse(
|
300
|
+
status_code=500,
|
301
|
+
content={
|
302
|
+
"error": "internal_server_error",
|
303
|
+
"message": "An internal server error occurred. Please try again."
|
304
|
+
}
|
305
|
+
)
|
306
|
+
|
84
307
|
# Initialize FastMCP server (will be configured in main function)
|
85
308
|
load_dotenv()
|
86
309
|
mcp = FastMCP("Media-Agent-MCP-Async")
|
@@ -436,6 +659,76 @@ async def tts_tool(text: str, speaker_id: str) -> dict:
|
|
436
659
|
return result
|
437
660
|
|
438
661
|
|
662
|
+
@mcp.tool()
|
663
|
+
async def get_session_status() -> dict:
|
664
|
+
"""
|
665
|
+
Get current session management status and statistics.
|
666
|
+
|
667
|
+
Returns:
|
668
|
+
Dictionary with session statistics and status information
|
669
|
+
"""
|
670
|
+
try:
|
671
|
+
active_sessions = session_manager.get_session_count()
|
672
|
+
route_mappings = session_manager.get_route_count()
|
673
|
+
|
674
|
+
# Clean up expired sessions (older than 1 hour)
|
675
|
+
cleaned_sessions = session_manager.cleanup_expired_sessions(3600)
|
676
|
+
|
677
|
+
return {
|
678
|
+
"status": "success",
|
679
|
+
"data": {
|
680
|
+
"active_sessions": active_sessions,
|
681
|
+
"route_mappings": route_mappings,
|
682
|
+
"cleaned_sessions": cleaned_sessions,
|
683
|
+
"session_manager_enabled": True,
|
684
|
+
"features": [
|
685
|
+
"automatic_session_recovery",
|
686
|
+
"closed_resource_error_handling",
|
687
|
+
"request_forwarding",
|
688
|
+
"session_route_mapping",
|
689
|
+
"automatic_session_cleanup"
|
690
|
+
]
|
691
|
+
},
|
692
|
+
"message": f"Session management is active. Cleaned {cleaned_sessions} expired sessions."
|
693
|
+
}
|
694
|
+
except Exception as e:
|
695
|
+
logger.error(f"Error getting session status: {e}")
|
696
|
+
return {
|
697
|
+
"status": "error",
|
698
|
+
"data": None,
|
699
|
+
"message": f"Failed to get session status: {str(e)}"
|
700
|
+
}
|
701
|
+
|
702
|
+
|
703
|
+
@mcp.tool()
|
704
|
+
async def generate_new_session() -> dict:
|
705
|
+
"""
|
706
|
+
Manually generate a new session ID for testing or recovery purposes.
|
707
|
+
|
708
|
+
Returns:
|
709
|
+
Dictionary with new session ID
|
710
|
+
"""
|
711
|
+
try:
|
712
|
+
new_session_id = session_manager.generate_session_id()
|
713
|
+
logger.info(f"Manually generated new session: {new_session_id}")
|
714
|
+
|
715
|
+
return {
|
716
|
+
"status": "success",
|
717
|
+
"data": {
|
718
|
+
"session_id": new_session_id,
|
719
|
+
"timestamp": asyncio.get_event_loop().time()
|
720
|
+
},
|
721
|
+
"message": f"New session generated: {new_session_id}"
|
722
|
+
}
|
723
|
+
except Exception as e:
|
724
|
+
logger.error(f"Error generating new session: {e}")
|
725
|
+
return {
|
726
|
+
"status": "error",
|
727
|
+
"data": None,
|
728
|
+
"message": f"Failed to generate new session: {str(e)}"
|
729
|
+
}
|
730
|
+
|
731
|
+
|
439
732
|
def main():
|
440
733
|
"""Main entry point for the Async MCP server."""
|
441
734
|
import os
|
@@ -497,6 +790,11 @@ def main():
|
|
497
790
|
logger.info(f"Transport: {args.transport}")
|
498
791
|
if args.transport == 'sse':
|
499
792
|
logger.info(f"SSE Server will run on {args.host}:{args.port}")
|
793
|
+
logger.info("Session management features enabled:")
|
794
|
+
logger.info(" - Automatic session expiration detection")
|
795
|
+
logger.info(" - Auto-generation of new session IDs")
|
796
|
+
logger.info(" - Request forwarding with route mapping")
|
797
|
+
logger.info(" - ClosedResourceError handling")
|
500
798
|
|
501
799
|
logger.info("Available async tools:")
|
502
800
|
logger.info(" 1. video_last_frame_tool_async - Extract last frame from video and upload to TOS")
|
@@ -514,8 +812,11 @@ def main():
|
|
514
812
|
logger.info(" 13. install_tools_plugin_async - Install development tools (ffmpeg and ffprobe)")
|
515
813
|
logger.info(" 14. omni_human_tool_async - Generate a video using Omni Human AI model")
|
516
814
|
logger.info(" 15. google_edit_tool_async - Edit images with Google Gemini (async)")
|
815
|
+
logger.info(" 16. get_session_status - Get current session management status and statistics")
|
816
|
+
logger.info(" 17. generate_new_session - Manually generate a new session ID")
|
517
817
|
logger.info("")
|
518
818
|
logger.info("All tools support concurrent execution using asyncio.gather() or run_multiple_tools_concurrently()")
|
819
|
+
logger.info("Session management tools (16-17) help monitor and manage connection sessions")
|
519
820
|
|
520
821
|
try:
|
521
822
|
# Start the server with specified transport
|
@@ -523,9 +824,21 @@ def main():
|
|
523
824
|
logger.info(f"Starting async SSE server on {args.host}:{args.port}")
|
524
825
|
mcp.settings.host = args.host
|
525
826
|
mcp.settings.port = args.port
|
827
|
+
|
828
|
+
# Get the SSE app and add session error handling middleware
|
829
|
+
sse_app = mcp.sse_app()
|
830
|
+
|
831
|
+
# Add session error handling middleware
|
832
|
+
sse_app.add_middleware(SessionErrorHandlingMiddleware)
|
833
|
+
|
834
|
+
# Add global exception handler for ClosedResourceError
|
835
|
+
sse_app.add_exception_handler(anyio.ClosedResourceError, handle_closed_resource_error)
|
836
|
+
|
837
|
+
logger.info("Added SessionErrorHandlingMiddleware and global ClosedResourceError handler for automatic session recovery")
|
838
|
+
|
526
839
|
# Use uvicorn to run SSE app with extended keep-alive timeout (5 minutes)
|
527
840
|
uvicorn.run(
|
528
|
-
|
841
|
+
sse_app,
|
529
842
|
host=args.host,
|
530
843
|
port=args.port,
|
531
844
|
timeout_keep_alive=300
|
@@ -540,4 +853,4 @@ def main():
|
|
540
853
|
|
541
854
|
|
542
855
|
if __name__ == "__main__":
|
543
|
-
main()
|
856
|
+
main()
|
@@ -1,11 +1,10 @@
|
|
1
|
-
media_agent_mcp/__init__.py,sha256=
|
2
|
-
media_agent_mcp/async_server.py,sha256=
|
1
|
+
media_agent_mcp/__init__.py,sha256=4GfH0flV1CGHefl-eNXt-Gidk7aDXA6Jlh2uVDOXGSo,312
|
2
|
+
media_agent_mcp/async_server.py,sha256=MbPUoWZFiUSC9xRI4pi2eyiIIGsZRzMCV-58c8ybaL8,32944
|
3
3
|
media_agent_mcp/async_wrapper.py,sha256=hiiBhhz9WeVDfSBWVh6ovhf5jeP5ZbsieBbz9P-KPn0,15351
|
4
|
-
media_agent_mcp/server.py,sha256=hZ_m7HodF_ct_q3FXuCrjE6_tw6rXOim38q2sSdTI9I,19976
|
5
4
|
media_agent_mcp/ai_models/__init__.py,sha256=2kHzTYwjQw89U4QGDq0e2WqJScqDkDNlDaWHGak5JeY,553
|
6
5
|
media_agent_mcp/ai_models/omni_human.py,sha256=s_Ja4gEmHG_bgii1x4SoeV73lz0Zg_3iBRvu36goxVA,4643
|
7
6
|
media_agent_mcp/ai_models/openaiedit.py,sha256=uu4d2BgXSrjWRdNPs_SryI9muxO93pItVtEze9nDhjc,9776
|
8
|
-
media_agent_mcp/ai_models/seed16.py,sha256=
|
7
|
+
media_agent_mcp/ai_models/seed16.py,sha256=cX0ZONj2Jpu_dzSIq8oXSJfnsfGWVcaEmWyRxg6jMfQ,5110
|
9
8
|
media_agent_mcp/ai_models/seedance.py,sha256=ni7LtXn4jTn5wX2NtcWDMj5Eea8LoP1QLYgwSx_GvBs,9014
|
10
9
|
media_agent_mcp/ai_models/seededit.py,sha256=2f0GiQPqH5IJKsZyW0h5WNA99DX--9oOKiD6vqg8Urk,2091
|
11
10
|
media_agent_mcp/ai_models/seedream.py,sha256=2i-GYQfX1OXh8qFyKedx9UN6VU6uiJOSYNZcYKwi-Xk,4454
|
@@ -44,8 +43,8 @@ media_agent_mcp/video/__init__.py,sha256=tfz22XEeFSeuKa3AggYCE0vCDt4IwXRCKW6avof
|
|
44
43
|
media_agent_mcp/video/processor.py,sha256=twfqmN5DbVryjDawZUcqTUcnglcBJYpUbAnApqHgD0c,12787
|
45
44
|
media_agent_mcp/video/stack.py,sha256=pyoJiJ9NhU1tjy2l3kARI9sWFoC00Fj97psxYOBi2NU,1736
|
46
45
|
media_agent_mcp/video/subtitle.py,sha256=TlrWVhWJqYTUJpnVz7eccwMAn8ixfrRzRxS6ETMY-DM,16323
|
47
|
-
media_agent_mcp-2.6.
|
48
|
-
media_agent_mcp-2.6.
|
49
|
-
media_agent_mcp-2.6.
|
50
|
-
media_agent_mcp-2.6.
|
51
|
-
media_agent_mcp-2.6.
|
46
|
+
media_agent_mcp-2.6.12.dist-info/METADATA,sha256=SNnWhZr4DY5sc2LDzWjZVq23862p9jqIjTDMMyl5H-I,11306
|
47
|
+
media_agent_mcp-2.6.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
48
|
+
media_agent_mcp-2.6.12.dist-info/entry_points.txt,sha256=qhOUwR-ORVf9GO7emhhl7Lgd6MISgqbZr8bEuSH_VdA,70
|
49
|
+
media_agent_mcp-2.6.12.dist-info/top_level.txt,sha256=WEa0YfchpTxZgiKn8gdxYgs-dir5HepJaTOrxAGx9nY,16
|
50
|
+
media_agent_mcp-2.6.12.dist-info/RECORD,,
|
media_agent_mcp/server.py
DELETED
@@ -1,600 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
"""Media Agent MCP Server - A Model Context Protocol server for media processing.
|
3
|
-
|
4
|
-
This server provides 10 tools for media processing:
|
5
|
-
1. TOS - Save content as URL
|
6
|
-
2. Video Concat - Concatenate two videos
|
7
|
-
3. Video Last Frame - Get the last frame from a video
|
8
|
-
4. Combine Audio Video - Combine video and audio with optional delay
|
9
|
-
5. Seedream - Creating images (AI model)
|
10
|
-
6. Seedance (lite & pro) - Creating videos (AI model)
|
11
|
-
7. Seededit - Maintain the main character (AI model)
|
12
|
-
8. Seed1.6 (VLM) - Do vision tasks in workflow (AI model)
|
13
|
-
9. Image Selector - Choose the best one from images
|
14
|
-
10. Video Selector - Choose the best video from videos
|
15
|
-
"""
|
16
|
-
|
17
|
-
import argparse
|
18
|
-
import logging
|
19
|
-
from typing import Optional, Dict, Any
|
20
|
-
import json
|
21
|
-
from dotenv import load_dotenv
|
22
|
-
import uvicorn
|
23
|
-
import anyio
|
24
|
-
|
25
|
-
from mcp.server.fastmcp import FastMCP
|
26
|
-
|
27
|
-
# Import modules
|
28
|
-
from media_agent_mcp.storage import upload_to_tos
|
29
|
-
from media_agent_mcp.video import concat_videos, extract_last_frame, stack_videos
|
30
|
-
from media_agent_mcp.audio.combiner import combine_audio_video_from_urls
|
31
|
-
from media_agent_mcp.ai_models.seedream import generate_image
|
32
|
-
from media_agent_mcp.ai_models.seedance import generate_video
|
33
|
-
from media_agent_mcp.ai_models.seededit import seededit
|
34
|
-
from media_agent_mcp.ai_models.omni_human import generate_video_from_omni_human
|
35
|
-
from media_agent_mcp.ai_models.tts import tts
|
36
|
-
from media_agent_mcp.media_selectors.image_selector import select_best_image
|
37
|
-
from media_agent_mcp.media_selectors.video_selector import select_best_video
|
38
|
-
|
39
|
-
# Configure logging
|
40
|
-
logging.basicConfig(level=logging.INFO)
|
41
|
-
logger = logging.getLogger(__name__)
|
42
|
-
|
43
|
-
# Swallow ClosedResourceError from AnyIO (e.g., SSE client disconnected)
|
44
|
-
class IgnoreClosedResourceErrorMiddleware:
|
45
|
-
def __init__(self, app):
|
46
|
-
self.app = app
|
47
|
-
|
48
|
-
async def __call__(self, scope, receive, send):
|
49
|
-
try:
|
50
|
-
await self.app(scope, receive, send)
|
51
|
-
except anyio.ClosedResourceError:
|
52
|
-
logger.warning("SSE client disconnected (ClosedResourceError). Ignoring.")
|
53
|
-
return
|
54
|
-
|
55
|
-
# Initialize FastMCP server (will be configured in main function)
|
56
|
-
load_dotenv()
|
57
|
-
mcp = FastMCP("Media-Agent-MCP")
|
58
|
-
|
59
|
-
|
60
|
-
@mcp.tool()
|
61
|
-
def video_concat_tool(video_urls: list[str]) -> str:
|
62
|
-
"""
|
63
|
-
Concatenate multiple videos from URLs and upload to TOS.
|
64
|
-
|
65
|
-
Args:
|
66
|
-
video_urls: List of video URLs to concatenate in order
|
67
|
-
|
68
|
-
Returns:
|
69
|
-
JSON string with status, data, and message
|
70
|
-
"""
|
71
|
-
try:
|
72
|
-
result = concat_videos(video_urls)
|
73
|
-
|
74
|
-
if isinstance(result, dict):
|
75
|
-
return json.dumps(result)
|
76
|
-
else:
|
77
|
-
# Handle legacy string returns
|
78
|
-
if result.startswith("Error:"):
|
79
|
-
return json.dumps({
|
80
|
-
"status": "error",
|
81
|
-
"data": None,
|
82
|
-
"message": result
|
83
|
-
})
|
84
|
-
else:
|
85
|
-
success_result = {
|
86
|
-
"status": "success",
|
87
|
-
"data": {"url": result},
|
88
|
-
"message": "Videos concatenated successfully"
|
89
|
-
}
|
90
|
-
return json.dumps(success_result)
|
91
|
-
except Exception as e:
|
92
|
-
logger.error(f"Error in video_concat_tool: {str(e)}")
|
93
|
-
return json.dumps({
|
94
|
-
"status": "error",
|
95
|
-
"data": None,
|
96
|
-
"message": f"Error: {str(e)}"
|
97
|
-
})
|
98
|
-
|
99
|
-
|
100
|
-
@mcp.tool()
|
101
|
-
def video_last_frame_tool(video_url: str) -> str:
|
102
|
-
"""
|
103
|
-
Extract the last frame from a video file and upload to TOS.
|
104
|
-
|
105
|
-
Args:
|
106
|
-
video_url: URL or path to the video file
|
107
|
-
|
108
|
-
Returns:
|
109
|
-
JSON string with status, data, and message
|
110
|
-
"""
|
111
|
-
try:
|
112
|
-
# Extract last frame and upload to TOS
|
113
|
-
result = extract_last_frame(video_url)
|
114
|
-
|
115
|
-
if isinstance(result, dict):
|
116
|
-
return json.dumps(result)
|
117
|
-
else:
|
118
|
-
# Handle legacy string returns
|
119
|
-
if result.startswith("Error:"):
|
120
|
-
return json.dumps({
|
121
|
-
"status": "error",
|
122
|
-
"data": None,
|
123
|
-
"message": result
|
124
|
-
})
|
125
|
-
else:
|
126
|
-
success_result = {
|
127
|
-
"status": "success",
|
128
|
-
"data": {"url": result},
|
129
|
-
"message": "Last frame extracted successfully"
|
130
|
-
}
|
131
|
-
return json.dumps(success_result)
|
132
|
-
|
133
|
-
except Exception as e:
|
134
|
-
logger.error(f"Error in video_last_frame_tool: {str(e)}")
|
135
|
-
return json.dumps({
|
136
|
-
"status": "error",
|
137
|
-
"data": None,
|
138
|
-
"message": f"Error: {str(e)}"
|
139
|
-
})
|
140
|
-
|
141
|
-
|
142
|
-
@mcp.tool()
|
143
|
-
def combine_audio_video_tool(video_url: str, audio_url: str, delay_ms: float = 0.0) -> str:
|
144
|
-
"""
|
145
|
-
Combine video and audio from URLs with optional delay.
|
146
|
-
|
147
|
-
Args:
|
148
|
-
video_url: URL of the video file
|
149
|
-
audio_url: URL of the audio file
|
150
|
-
delay_ms: Delay in milliseconds for the audio to start
|
151
|
-
|
152
|
-
Returns:
|
153
|
-
JSON string with status, data, and message
|
154
|
-
"""
|
155
|
-
try:
|
156
|
-
result = combine_audio_video_from_urls(video_url, audio_url, delay_ms)
|
157
|
-
return json.dumps(result)
|
158
|
-
except Exception as e:
|
159
|
-
logger.error(f"Error in combine_audio_video_tool: {str(e)}")
|
160
|
-
return json.dumps({
|
161
|
-
"status": "error",
|
162
|
-
"data": None,
|
163
|
-
"message": f"Error: {str(e)}"
|
164
|
-
})
|
165
|
-
|
166
|
-
|
167
|
-
@mcp.tool()
|
168
|
-
def seedream_generate_image_tool(prompt: str, size: str = "1024x1024") -> str:
|
169
|
-
"""
|
170
|
-
Generate an image using Seedream AI model.
|
171
|
-
|
172
|
-
Args:
|
173
|
-
prompt: Text description of the image to generate
|
174
|
-
size: Size of the image (e.g., "1024x1024")
|
175
|
-
|
176
|
-
Returns:
|
177
|
-
JSON string with status, data, and message
|
178
|
-
"""
|
179
|
-
try:
|
180
|
-
result = generate_image(prompt, size=size)
|
181
|
-
|
182
|
-
if isinstance(result, dict):
|
183
|
-
return json.dumps(result)
|
184
|
-
else:
|
185
|
-
# Handle legacy string returns
|
186
|
-
if result.startswith("Error:"):
|
187
|
-
return json.dumps({
|
188
|
-
"status": "error",
|
189
|
-
"data": None,
|
190
|
-
"message": result
|
191
|
-
})
|
192
|
-
else:
|
193
|
-
success_result = {
|
194
|
-
"status": "success",
|
195
|
-
"data": {"image_url": result},
|
196
|
-
"message": "Image generated successfully"
|
197
|
-
}
|
198
|
-
return json.dumps(success_result)
|
199
|
-
except Exception as e:
|
200
|
-
logger.error(f"Error in seedream_generate_image_tool: {str(e)}")
|
201
|
-
return json.dumps({
|
202
|
-
"status": "error",
|
203
|
-
"data": None,
|
204
|
-
"message": f"Error: {str(e)}"
|
205
|
-
})
|
206
|
-
|
207
|
-
|
208
|
-
@mcp.tool()
|
209
|
-
def seedance_generate_video_tool(prompt: str, first_frame_image: str,
|
210
|
-
last_frame_image: str = None, duration: int = 5,
|
211
|
-
resolution: str = "720p") -> str:
|
212
|
-
"""
|
213
|
-
Generate a video using Seedance AI model with first/last frame images.
|
214
|
-
|
215
|
-
Args:
|
216
|
-
prompt: Text description of the video to generate (optional for image-to-video)
|
217
|
-
first_frame_image: URL or base64 of the first frame image
|
218
|
-
last_frame_image: URL or base64 of the last frame image (optional)
|
219
|
-
duration: Duration of the video in seconds (5 or 10)
|
220
|
-
resolution: Video resolution (480p, 720p)
|
221
|
-
|
222
|
-
Returns:
|
223
|
-
JSON string with status, data, and message
|
224
|
-
"""
|
225
|
-
try:
|
226
|
-
# Validate duration
|
227
|
-
if duration not in [5, 10]:
|
228
|
-
return json.dumps({
|
229
|
-
"status": "error",
|
230
|
-
"data": None,
|
231
|
-
"message": "Duration must be 5 or 10 seconds"
|
232
|
-
})
|
233
|
-
|
234
|
-
# Validate resolution
|
235
|
-
if resolution not in ["480p", "720p"]:
|
236
|
-
return json.dumps({
|
237
|
-
"status": "error",
|
238
|
-
"data": None,
|
239
|
-
"message": "Resolution must be 480p or 720p"
|
240
|
-
})
|
241
|
-
|
242
|
-
result = generate_video(prompt, first_frame_image, last_frame_image, duration, resolution)
|
243
|
-
|
244
|
-
if isinstance(result, dict):
|
245
|
-
return json.dumps(result)
|
246
|
-
else:
|
247
|
-
# Handle legacy string returns
|
248
|
-
if result.startswith("Error:"):
|
249
|
-
return json.dumps({
|
250
|
-
"status": "error",
|
251
|
-
"data": None,
|
252
|
-
"message": result
|
253
|
-
})
|
254
|
-
else:
|
255
|
-
success_result = {
|
256
|
-
"status": "success",
|
257
|
-
"data": {"video_url": result},
|
258
|
-
"message": "Video generated successfully"
|
259
|
-
}
|
260
|
-
return json.dumps(success_result)
|
261
|
-
except Exception as e:
|
262
|
-
logger.error(f"Error in seedance_generate_video_tool: {str(e)}")
|
263
|
-
return json.dumps({
|
264
|
-
"status": "error",
|
265
|
-
"data": None,
|
266
|
-
"message": f"Error: {str(e)}"
|
267
|
-
})
|
268
|
-
|
269
|
-
|
270
|
-
@mcp.tool()
|
271
|
-
def seededit_tool(image_url: str, prompt: str, seed: int = -1, scale: float = 0.5, charactor_keep: bool = False) -> str:
|
272
|
-
"""
|
273
|
-
Edit an image using Seededit AI model while maintaining character consistency.
|
274
|
-
|
275
|
-
Args:
|
276
|
-
image_url: Input image URL for editing
|
277
|
-
prompt: Text prompt for image editing
|
278
|
-
seed: Random seed for reproducibility (-1 for random)
|
279
|
-
scale: Guidance scale for editing (0.1 to 1.0)
|
280
|
-
charactor_keep: Whether to keep character consistency
|
281
|
-
|
282
|
-
Returns:
|
283
|
-
JSON string with status, data, and message
|
284
|
-
"""
|
285
|
-
try:
|
286
|
-
result = seededit(image_url, prompt, scale=scale, seed=seed)
|
287
|
-
|
288
|
-
if isinstance(result, dict):
|
289
|
-
return json.dumps(result)
|
290
|
-
else:
|
291
|
-
# Handle legacy string returns
|
292
|
-
if result.startswith("Error:"):
|
293
|
-
return json.dumps({
|
294
|
-
"status": "error",
|
295
|
-
"data": None,
|
296
|
-
"message": result
|
297
|
-
})
|
298
|
-
else:
|
299
|
-
success_result = {
|
300
|
-
"status": "success",
|
301
|
-
"data": {"image_url": result},
|
302
|
-
"message": "Image edited successfully"
|
303
|
-
}
|
304
|
-
return json.dumps(success_result)
|
305
|
-
except Exception as e:
|
306
|
-
logger.error(f"Error in seededit_tool: {str(e)}")
|
307
|
-
return json.dumps({
|
308
|
-
"status": "error",
|
309
|
-
"data": None,
|
310
|
-
"message": f"Error: {str(e)}"
|
311
|
-
})
|
312
|
-
|
313
|
-
|
314
|
-
@mcp.tool()
|
315
|
-
def vlm_vision_task_tool(messages: list) -> str:
|
316
|
-
"""
|
317
|
-
Perform vision-language tasks using VLM model.
|
318
|
-
|
319
|
-
Args:
|
320
|
-
messages: OpenAI-compatible messages format
|
321
|
-
|
322
|
-
Returns:
|
323
|
-
JSON string with status, data, and message
|
324
|
-
"""
|
325
|
-
try:
|
326
|
-
# Import VLM module
|
327
|
-
from media_agent_mcp.ai_models.vlm import vlm_vision_task
|
328
|
-
|
329
|
-
result = vlm_vision_task(messages)
|
330
|
-
|
331
|
-
if isinstance(result, dict):
|
332
|
-
return json.dumps(result)
|
333
|
-
else:
|
334
|
-
# Handle legacy string returns
|
335
|
-
if result.startswith("Error:"):
|
336
|
-
return json.dumps({
|
337
|
-
"status": "error",
|
338
|
-
"data": None,
|
339
|
-
"message": result
|
340
|
-
})
|
341
|
-
else:
|
342
|
-
success_result = {
|
343
|
-
"status": "success",
|
344
|
-
"data": {"response": result},
|
345
|
-
"message": "VLM task completed successfully"
|
346
|
-
}
|
347
|
-
return json.dumps(success_result)
|
348
|
-
except Exception as e:
|
349
|
-
logger.error(f"Error in vlm_vision_task_tool: {str(e)}")
|
350
|
-
return json.dumps({
|
351
|
-
"status": "error",
|
352
|
-
"data": None,
|
353
|
-
"message": f"Error: {str(e)}"
|
354
|
-
})
|
355
|
-
|
356
|
-
|
357
|
-
@mcp.tool()
|
358
|
-
def omni_human_tool(image_url: str, audio_url: str) -> str:
|
359
|
-
"""
|
360
|
-
Generate a video using Omni Human AI model.
|
361
|
-
|
362
|
-
Args:
|
363
|
-
image_url: URL of the input image.
|
364
|
-
audio_url: URL of the input audio.
|
365
|
-
|
366
|
-
Returns:
|
367
|
-
JSON string with status, data, and message.
|
368
|
-
"""
|
369
|
-
try:
|
370
|
-
result = generate_video_from_omni_human(image_url, audio_url)
|
371
|
-
if isinstance(result, dict):
|
372
|
-
return json.dumps(result)
|
373
|
-
else:
|
374
|
-
if result.startswith("Error:"):
|
375
|
-
return json.dumps({
|
376
|
-
"status": "error",
|
377
|
-
"data": None,
|
378
|
-
"message": result
|
379
|
-
})
|
380
|
-
else:
|
381
|
-
success_result = {
|
382
|
-
"status": "success",
|
383
|
-
"data": {"video_url": result},
|
384
|
-
"message": "Video generated successfully"
|
385
|
-
}
|
386
|
-
return json.dumps(success_result)
|
387
|
-
except Exception as e:
|
388
|
-
logger.error(f"Error in omni_human_tool: {str(e)}")
|
389
|
-
return json.dumps({
|
390
|
-
"status": "error",
|
391
|
-
"data": None,
|
392
|
-
"message": f"Error: {str(e)}"
|
393
|
-
})
|
394
|
-
|
395
|
-
|
396
|
-
@mcp.tool()
|
397
|
-
def tts_tool(text: str, speaker_id: str) -> str:
|
398
|
-
"""
|
399
|
-
Synthesize speech using TTS AI model.
|
400
|
-
|
401
|
-
Args:
|
402
|
-
text: Text to synthesize.
|
403
|
-
speaker_id: Speaker ID for the voice.
|
404
|
-
|
405
|
-
Returns:
|
406
|
-
JSON string with status, data, and message.
|
407
|
-
"""
|
408
|
-
try:
|
409
|
-
result = tts(text, speaker_id)
|
410
|
-
if isinstance(result, dict):
|
411
|
-
return json.dumps(result)
|
412
|
-
else:
|
413
|
-
if result.startswith("Error:"):
|
414
|
-
return json.dumps({
|
415
|
-
"status": "error",
|
416
|
-
"data": None,
|
417
|
-
"message": result
|
418
|
-
})
|
419
|
-
else:
|
420
|
-
success_result = {
|
421
|
-
"status": "success",
|
422
|
-
"data": {"audio_url": result},
|
423
|
-
"message": "Speech synthesized successfully"
|
424
|
-
}
|
425
|
-
return json.dumps(success_result)
|
426
|
-
except Exception as e:
|
427
|
-
logger.error(f"Error in tts_tool: {str(e)}")
|
428
|
-
return json.dumps({
|
429
|
-
"status": "error",
|
430
|
-
"data": None,
|
431
|
-
"message": f"Error: {str(e)}"
|
432
|
-
})
|
433
|
-
|
434
|
-
|
435
|
-
@mcp.tool()
|
436
|
-
def image_selector_tool(image_paths: list[str], prompt: str) -> str:
|
437
|
-
"""
|
438
|
-
Select the best image from multiple options using VLM model.
|
439
|
-
|
440
|
-
Args:
|
441
|
-
image_paths: List of paths to image files
|
442
|
-
prompt: Selection criteria prompt
|
443
|
-
|
444
|
-
Returns:
|
445
|
-
JSON string with status, data, and message
|
446
|
-
"""
|
447
|
-
try:
|
448
|
-
result = select_best_image(image_paths, prompt)
|
449
|
-
|
450
|
-
if isinstance(result, dict) and result.get("status") == "success":
|
451
|
-
success_result = {
|
452
|
-
"status": "success",
|
453
|
-
"data": result.get("data"),
|
454
|
-
"message": "Image selected successfully"
|
455
|
-
}
|
456
|
-
return json.dumps(success_result)
|
457
|
-
else:
|
458
|
-
return json.dumps({
|
459
|
-
"status": "error",
|
460
|
-
"data": None,
|
461
|
-
"message": f"Error: {str(result)}"
|
462
|
-
})
|
463
|
-
except Exception as e:
|
464
|
-
logger.error(f"Error in image_selector_tool: {str(e)}")
|
465
|
-
return json.dumps({
|
466
|
-
"status": "error",
|
467
|
-
"data": None,
|
468
|
-
"message": f"Error: {str(e)}"
|
469
|
-
})
|
470
|
-
|
471
|
-
|
472
|
-
@mcp.tool()
|
473
|
-
def video_selector_tool(video_paths: list[str], prompt: str) -> str:
|
474
|
-
"""
|
475
|
-
Select the best video from multiple options using VLM model.
|
476
|
-
|
477
|
-
Args:
|
478
|
-
video_paths: List of paths to videos to choose from
|
479
|
-
prompt: Selection criteria prompt
|
480
|
-
|
481
|
-
Returns:
|
482
|
-
JSON string with status, data, and message
|
483
|
-
"""
|
484
|
-
try:
|
485
|
-
result = select_best_video(video_paths, prompt)
|
486
|
-
|
487
|
-
if isinstance(result, dict) and result.get("status") == "success":
|
488
|
-
success_result = {
|
489
|
-
"status": "success",
|
490
|
-
"data": result.get("data"),
|
491
|
-
"message": "Video selected successfully"
|
492
|
-
}
|
493
|
-
return json.dumps(success_result)
|
494
|
-
else:
|
495
|
-
return json.dumps({
|
496
|
-
"status": "error",
|
497
|
-
"data": None,
|
498
|
-
"message": f"Error: {str(result)}"
|
499
|
-
})
|
500
|
-
except Exception as e:
|
501
|
-
logger.error(f"Error in video_selector_tool: {str(e)}")
|
502
|
-
return json.dumps({
|
503
|
-
"status": "error",
|
504
|
-
"data": None,
|
505
|
-
"message": f"Error: {str(e)}"
|
506
|
-
})
|
507
|
-
|
508
|
-
|
509
|
-
@mcp.tool()
|
510
|
-
def tos_save_content_tool(content: str, file_extension: str = "txt",
|
511
|
-
object_key: Optional[str] = None) -> str:
|
512
|
-
"""
|
513
|
-
Save content to TOS and return URL.
|
514
|
-
|
515
|
-
Args:
|
516
|
-
content: Content to save
|
517
|
-
file_extension: File extension for the content (default: txt)
|
518
|
-
object_key: Optional key to use for the object in TOS
|
519
|
-
|
520
|
-
Returns:
|
521
|
-
JSON string with status, data, and message
|
522
|
-
"""
|
523
|
-
try:
|
524
|
-
result = upload_to_tos(content, file_extension, object_key)
|
525
|
-
|
526
|
-
if isinstance(result, dict):
|
527
|
-
return json.dumps(result)
|
528
|
-
else:
|
529
|
-
# Handle legacy string returns
|
530
|
-
if result.startswith("Error:"):
|
531
|
-
return json.dumps({
|
532
|
-
"status": "error",
|
533
|
-
"data": None,
|
534
|
-
"message": result
|
535
|
-
})
|
536
|
-
else:
|
537
|
-
success_result = {
|
538
|
-
"status": "success",
|
539
|
-
"data": {"url": result},
|
540
|
-
"message": "Content saved successfully"
|
541
|
-
}
|
542
|
-
return json.dumps(success_result)
|
543
|
-
except Exception as e:
|
544
|
-
logger.error(f"Error in tos_save_content_tool: {str(e)}")
|
545
|
-
return json.dumps({
|
546
|
-
"status": "error",
|
547
|
-
"data": None,
|
548
|
-
"message": f"Error: {str(e)}"
|
549
|
-
})
|
550
|
-
|
551
|
-
|
552
|
-
def main():
|
553
|
-
"""Main entry point for the MCP server."""
|
554
|
-
# Parse command line arguments
|
555
|
-
parser = argparse.ArgumentParser(description='Media Agent MCP Server')
|
556
|
-
parser.add_argument('--transport', type=str, choices=['sse', 'stdio'], default='stdio',
|
557
|
-
help='Transport method: sse or stdio (default: stdio)')
|
558
|
-
parser.add_argument('--host', type=str, default='127.0.0.1',
|
559
|
-
help='Host for SSE transport (default: 127.0.0.1)')
|
560
|
-
parser.add_argument('--port', type=int, default=8000,
|
561
|
-
help='Port for SSE transport (default: 8000)')
|
562
|
-
parser.add_argument('--version', action='store_true',
|
563
|
-
help='Show version information')
|
564
|
-
|
565
|
-
args = parser.parse_args()
|
566
|
-
|
567
|
-
if args.version:
|
568
|
-
print("Media Agent MCP Server v0.1.0")
|
569
|
-
return
|
570
|
-
|
571
|
-
logger.info("Starting Media Agent MCP Server...")
|
572
|
-
logger.info(f"Transport: {args.transport}")
|
573
|
-
if args.transport == 'sse':
|
574
|
-
logger.info(f"SSE Server will run on {args.host}:{args.port}")
|
575
|
-
|
576
|
-
logger.info("Available tools:")
|
577
|
-
logger.info(" 1. video_last_frame_tool - Extract last frame from video and upload to TOS")
|
578
|
-
logger.info(" 2. video_concat_tool - Concatenate two videos")
|
579
|
-
logger.info(" 3. seedream_generate_image_tool - Generate images with AI")
|
580
|
-
logger.info(" 4. seedance_generate_video_tool - Generate videos with AI")
|
581
|
-
logger.info(" 5. seededit_tool - Edit images while maintaining character")
|
582
|
-
logger.info(" 6. vlm_vision_task_tool - Perform vision tasks with OpenAI-compatible messages")
|
583
|
-
logger.info(" 7. image_selector_tool - Select best image using VLM model")
|
584
|
-
logger.info(" 8. video_selector_tool - Select best video using VLM model")
|
585
|
-
logger.info(" 9. tos_save_content_tool - Save content to TOS and get URL")
|
586
|
-
logger.info(" 10. omni_human_tool - Generate a video using Omni Human AI model")
|
587
|
-
logger.info(" 11. tts_tool - Synthesize speech using TTS AI model")
|
588
|
-
logger.info("")
|
589
|
-
|
590
|
-
# Configure and run the server
|
591
|
-
if args.transport == 'sse':
|
592
|
-
# SSE transport
|
593
|
-
uvicorn.run(IgnoreClosedResourceErrorMiddleware(mcp.create_sse_app()), host=args.host, port=args.port)
|
594
|
-
else:
|
595
|
-
# STDIO transport (default)
|
596
|
-
mcp.run()
|
597
|
-
|
598
|
-
|
599
|
-
if __name__ == "__main__":
|
600
|
-
main()
|
File without changes
|
File without changes
|
File without changes
|