media-agent-mcp 0.3.2__tar.gz → 0.3.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/PKG-INFO +6 -6
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/pyproject.toml +2 -9
- media_agent_mcp-0.3.4/setup.cfg +4 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/__init__.py +3 -1
- media_agent_mcp-0.3.4/src/media_agent_mcp/async_server.py +277 -0
- media_agent_mcp-0.3.4/src/media_agent_mcp/async_wrapper.py +273 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/server.py +5 -6
- media_agent_mcp-0.3.4/src/media_agent_mcp.egg-info/PKG-INFO +495 -0
- media_agent_mcp-0.3.4/src/media_agent_mcp.egg-info/SOURCES.txt +24 -0
- media_agent_mcp-0.3.4/src/media_agent_mcp.egg-info/dependency_links.txt +1 -0
- media_agent_mcp-0.3.4/src/media_agent_mcp.egg-info/entry_points.txt +2 -0
- media_agent_mcp-0.3.4/src/media_agent_mcp.egg-info/requires.txt +11 -0
- media_agent_mcp-0.3.4/src/media_agent_mcp.egg-info/top_level.txt +1 -0
- media_agent_mcp-0.3.2/.env.template +0 -29
- media_agent_mcp-0.3.2/.gitignore +0 -10
- media_agent_mcp-0.3.2/.idea/.gitignore +0 -8
- media_agent_mcp-0.3.2/.idea/inspectionProfiles/Project_Default.xml +0 -7
- media_agent_mcp-0.3.2/.idea/inspectionProfiles/profiles_settings.xml +0 -6
- media_agent_mcp-0.3.2/.idea/media-agent-mcp.iml +0 -14
- media_agent_mcp-0.3.2/.idea/misc.xml +0 -7
- media_agent_mcp-0.3.2/.idea/modules.xml +0 -8
- media_agent_mcp-0.3.2/.idea/vcs.xml +0 -6
- media_agent_mcp-0.3.2/.idea/workspace.xml +0 -141
- media_agent_mcp-0.3.2/.python-version +0 -1
- media_agent_mcp-0.3.2/uv.lock +0 -957
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/README.md +0 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/ai_models/__init__.py +0 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/ai_models/seed16.py +0 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/ai_models/seedance.py +0 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/ai_models/seededit.py +0 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/ai_models/seedream.py +0 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/media_selectors/__init__.py +0 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/media_selectors/image_selector.py +0 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/media_selectors/video_selector.py +0 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/storage/__init__.py +0 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/storage/tos_client.py +0 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/video/__init__.py +0 -0
- {media_agent_mcp-0.3.2 → media_agent_mcp-0.3.4}/src/media_agent_mcp/video/processor.py +0 -0
@@ -1,27 +1,27 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: media-agent-mcp
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.4
|
4
4
|
Summary: A Model Context Protocol server for media processing with AI tools
|
5
5
|
Author-email: Media Agent Team <team@mediaagent.com>
|
6
|
-
Keywords: ai,
|
6
|
+
Keywords: mcp,ai,media,video,image,processing
|
7
7
|
Classifier: Development Status :: 3 - Alpha
|
8
8
|
Classifier: Intended Audience :: Developers
|
9
9
|
Classifier: License :: OSI Approved :: MIT License
|
10
10
|
Classifier: Programming Language :: Python :: 3
|
11
11
|
Classifier: Programming Language :: Python :: 3.12
|
12
12
|
Requires-Python: >=3.12
|
13
|
+
Description-Content-Type: text/markdown
|
13
14
|
Requires-Dist: httpx>=0.28.1
|
14
15
|
Requires-Dist: mcp[cli]>=1.12.2
|
15
|
-
Requires-Dist: numpy>=1.24.0
|
16
16
|
Requires-Dist: opencv-python>=4.12.0.88
|
17
17
|
Requires-Dist: pillow>=11.3.0
|
18
|
-
Requires-Dist: python-dotenv>=1.0.0
|
19
18
|
Requires-Dist: requests>=2.32.4
|
20
19
|
Requires-Dist: tos>=2.7.1
|
21
20
|
Requires-Dist: uvicorn>=0.32.1
|
21
|
+
Requires-Dist: numpy>=1.24.0
|
22
|
+
Requires-Dist: python-dotenv>=1.0.0
|
22
23
|
Requires-Dist: volcengine-python-sdk>=1.0.0
|
23
24
|
Requires-Dist: volcengine>=1.0.194
|
24
|
-
Description-Content-Type: text/markdown
|
25
25
|
|
26
26
|
# Media Agent MCP
|
27
27
|
|
@@ -492,4 +492,4 @@ MIT License
|
|
492
492
|
**版本**: 0.1.0
|
493
493
|
**状态**: Alpha
|
494
494
|
**Python要求**: >= 3.12
|
495
|
-
**维护者**: Media Agent Team
|
495
|
+
**维护者**: Media Agent Team
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "media-agent-mcp"
|
3
|
-
version = "0.3.
|
3
|
+
version = "0.3.4"
|
4
4
|
description = "A Model Context Protocol server for media processing with AI tools"
|
5
5
|
readme = "README.md"
|
6
6
|
requires-python = ">=3.12"
|
@@ -30,11 +30,4 @@ dependencies = [
|
|
30
30
|
]
|
31
31
|
|
32
32
|
[project.scripts]
|
33
|
-
media-agent-mcp = "media_agent_mcp.
|
34
|
-
|
35
|
-
[build-system]
|
36
|
-
requires = ["hatchling"]
|
37
|
-
build-backend = "hatchling.build"
|
38
|
-
|
39
|
-
[tool.hatch.build.targets.wheel]
|
40
|
-
packages = ["src/media_agent_mcp"]
|
33
|
+
media-agent-mcp = "media_agent_mcp.async_server:main"
|
@@ -2,6 +2,8 @@
|
|
2
2
|
|
3
3
|
from . import ai_models, media_selectors, storage, video
|
4
4
|
from .server import main
|
5
|
+
from .async_server import main as async_main
|
6
|
+
from . import async_wrapper
|
5
7
|
|
6
8
|
__version__ = "0.1.0"
|
7
|
-
__all__ = ['ai_models', '
|
9
|
+
__all__ = ['ai_models', 'media_selectors', 'storage', 'video', 'main', 'async_main', 'async_wrapper']
|
@@ -0,0 +1,277 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""Async Media Agent MCP Server - A Model Context Protocol server for media processing with async support.
|
3
|
+
|
4
|
+
This server provides 9 async tools for media processing using threading:
|
5
|
+
1. TOS - Save content as URL
|
6
|
+
2. Video Concat - Concatenate two videos
|
7
|
+
3. Video Last Frame - Get the last frame from a video
|
8
|
+
4. Seedream - Creating images (AI model)
|
9
|
+
5. Seedance (lite & pro) - Creating videos (AI model)
|
10
|
+
6. Seededit - Maintain the main character (AI model)
|
11
|
+
7. Seed1.6 (VLM) - Do vision tasks in workflow (AI model)
|
12
|
+
8. Image Selector - Choose the best one from images
|
13
|
+
9. Video Selector - Choose the best video from videos
|
14
|
+
|
15
|
+
All tools are wrapped with threading to provide async functionality without modifying original functions.
|
16
|
+
"""
|
17
|
+
|
18
|
+
import argparse
|
19
|
+
import asyncio
|
20
|
+
import logging
|
21
|
+
from typing import List, Optional
|
22
|
+
import json
|
23
|
+
from dotenv import load_dotenv
|
24
|
+
import uvicorn
|
25
|
+
|
26
|
+
from mcp.server.fastmcp import FastMCP
|
27
|
+
|
28
|
+
# Import async wrappers
|
29
|
+
from media_agent_mcp.async_wrapper import (
|
30
|
+
async_video_concat_tool,
|
31
|
+
async_video_last_frame_tool,
|
32
|
+
async_seedream_generate_image_tool,
|
33
|
+
async_seedance_generate_video_tool,
|
34
|
+
async_seededit_tool,
|
35
|
+
async_vlm_vision_task_tool,
|
36
|
+
async_image_selector_tool,
|
37
|
+
async_video_selector_tool,
|
38
|
+
async_tos_save_content_tool,
|
39
|
+
cleanup_executor
|
40
|
+
)
|
41
|
+
|
42
|
+
# Configure logging
|
43
|
+
logging.basicConfig(level=logging.INFO)
|
44
|
+
logger = logging.getLogger(__name__)
|
45
|
+
|
46
|
+
# Initialize FastMCP server (will be configured in main function)
|
47
|
+
load_dotenv()
|
48
|
+
mcp = FastMCP("Media-Agent-MCP-Async")
|
49
|
+
|
50
|
+
|
51
|
+
@mcp.tool()
|
52
|
+
async def video_concat_tool_async(video_urls: List[str]) -> str:
|
53
|
+
"""
|
54
|
+
Asynchronously concatenate multiple videos from URLs and upload to TOS.
|
55
|
+
|
56
|
+
Args:
|
57
|
+
video_urls: List of video URLs to concatenate in order
|
58
|
+
|
59
|
+
Returns:
|
60
|
+
JSON string with status, data, and message
|
61
|
+
"""
|
62
|
+
return await async_video_concat_tool(video_urls)
|
63
|
+
|
64
|
+
|
65
|
+
@mcp.tool()
|
66
|
+
async def video_last_frame_tool_async(video_url: str) -> str:
|
67
|
+
"""
|
68
|
+
Asynchronously extract the last frame from a video file and upload to TOS.
|
69
|
+
|
70
|
+
Args:
|
71
|
+
video_url: URL or path to the video file
|
72
|
+
|
73
|
+
Returns:
|
74
|
+
JSON string with status, data, and message
|
75
|
+
"""
|
76
|
+
return await async_video_last_frame_tool(video_url)
|
77
|
+
|
78
|
+
|
79
|
+
@mcp.tool()
|
80
|
+
async def seedream_generate_image_tool_async(prompt: str, size: str = "1024x1024") -> str:
|
81
|
+
"""
|
82
|
+
Asynchronously generate an image using Seedream AI model.
|
83
|
+
|
84
|
+
Args:
|
85
|
+
prompt: Text description of the image to generate
|
86
|
+
size: Size of the image (e.g., "1024x1024")
|
87
|
+
|
88
|
+
Returns:
|
89
|
+
JSON string with status, data, and message
|
90
|
+
"""
|
91
|
+
return await async_seedream_generate_image_tool(prompt, size)
|
92
|
+
|
93
|
+
|
94
|
+
@mcp.tool()
|
95
|
+
async def seedance_generate_video_tool_async(prompt: str, first_frame_image: str,
|
96
|
+
last_frame_image: str = None, duration: int = 5,
|
97
|
+
resolution: str = "720p") -> str:
|
98
|
+
"""
|
99
|
+
Asynchronously generate a video using Seedance AI model with first/last frame images.
|
100
|
+
|
101
|
+
Args:
|
102
|
+
prompt: Text description of the video to generate (optional for image-to-video)
|
103
|
+
first_frame_image: URL or base64 of the first frame image
|
104
|
+
last_frame_image: URL or base64 of the last frame image (optional)
|
105
|
+
duration: Duration of the video in seconds (5 or 10)
|
106
|
+
resolution: Video resolution (480p, 720p)
|
107
|
+
|
108
|
+
Returns:
|
109
|
+
JSON string with status, data, and message
|
110
|
+
"""
|
111
|
+
return await async_seedance_generate_video_tool(prompt, first_frame_image, last_frame_image, duration, resolution)
|
112
|
+
|
113
|
+
|
114
|
+
@mcp.tool()
|
115
|
+
async def seededit_tool_async(image_url: str, prompt: str, seed: int = -1,
|
116
|
+
scale: float = 0.5, charactor_keep: bool = False) -> str:
|
117
|
+
"""
|
118
|
+
Asynchronously edit an image using Seededit model.
|
119
|
+
|
120
|
+
Args:
|
121
|
+
image_url: Input image URL for editing
|
122
|
+
prompt: Text prompt for image editing
|
123
|
+
seed: Random seed for reproducibility (-1 for random)
|
124
|
+
scale: Influence degree of text description (0-1)
|
125
|
+
charactor_keep: whether to keep the main character in this image, if you wanna change the main character, please keep False
|
126
|
+
|
127
|
+
Returns:
|
128
|
+
JSON string with status, data, and message
|
129
|
+
"""
|
130
|
+
return await async_seededit_tool(image_url, prompt, seed, scale, charactor_keep)
|
131
|
+
|
132
|
+
|
133
|
+
@mcp.tool()
|
134
|
+
async def vlm_vision_task_tool_async(messages: List) -> str:
|
135
|
+
"""
|
136
|
+
Asynchronously perform vision-language tasks using VLM model.
|
137
|
+
|
138
|
+
Args:
|
139
|
+
messages: OpenAI-compatible messages format
|
140
|
+
|
141
|
+
Returns:
|
142
|
+
JSON string with status, data, and message
|
143
|
+
"""
|
144
|
+
return await async_vlm_vision_task_tool(messages)
|
145
|
+
|
146
|
+
|
147
|
+
@mcp.tool()
|
148
|
+
async def image_selector_tool_async(image_paths: List[str], prompt: str) -> str:
|
149
|
+
"""
|
150
|
+
Asynchronously select the best image from multiple options using VLM model.
|
151
|
+
|
152
|
+
Args:
|
153
|
+
image_paths: List of paths to image files
|
154
|
+
prompt: Selection criteria prompt
|
155
|
+
|
156
|
+
Returns:
|
157
|
+
JSON string with status, data, and message
|
158
|
+
"""
|
159
|
+
return await async_image_selector_tool(image_paths, prompt)
|
160
|
+
|
161
|
+
|
162
|
+
@mcp.tool()
|
163
|
+
async def video_selector_tool_async(video_paths: List[str], prompt: str) -> str:
|
164
|
+
"""
|
165
|
+
Asynchronously select the best video from multiple options using VLM model.
|
166
|
+
|
167
|
+
Args:
|
168
|
+
video_paths: List of paths to videos to choose from
|
169
|
+
prompt: Selection criteria prompt
|
170
|
+
|
171
|
+
Returns:
|
172
|
+
JSON string with status, data, and message
|
173
|
+
"""
|
174
|
+
return await async_video_selector_tool(video_paths, prompt)
|
175
|
+
|
176
|
+
|
177
|
+
@mcp.tool()
|
178
|
+
async def tos_save_content_tool_async(content: str, file_extension: str = "txt",
|
179
|
+
object_key: Optional[str] = None) -> str:
|
180
|
+
"""
|
181
|
+
Asynchronously save content to TOS and return URL.
|
182
|
+
|
183
|
+
Args:
|
184
|
+
content: Content to save
|
185
|
+
file_extension: File extension for the content (default: txt)
|
186
|
+
object_key: Optional key to use for the object in TOS
|
187
|
+
|
188
|
+
Returns:
|
189
|
+
JSON string with status, data, and message
|
190
|
+
"""
|
191
|
+
return await async_tos_save_content_tool(content, file_extension, object_key)
|
192
|
+
|
193
|
+
|
194
|
+
# Utility function for concurrent execution
|
195
|
+
async def run_multiple_tools_concurrently(*coroutines):
|
196
|
+
"""
|
197
|
+
Run multiple async tools concurrently.
|
198
|
+
|
199
|
+
Args:
|
200
|
+
*coroutines: Variable number of coroutines to run concurrently
|
201
|
+
|
202
|
+
Returns:
|
203
|
+
List of results from all coroutines
|
204
|
+
"""
|
205
|
+
return await asyncio.gather(*coroutines, return_exceptions=True)
|
206
|
+
|
207
|
+
|
208
|
+
# Example usage function
|
209
|
+
async def example_concurrent_usage():
|
210
|
+
"""
|
211
|
+
Example of how to use multiple tools concurrently.
|
212
|
+
"""
|
213
|
+
# Example: Generate image and process video concurrently
|
214
|
+
image_task = seedream_generate_image_tool_async("A beautiful sunset", "1024x1024")
|
215
|
+
video_task = video_last_frame_tool_async("https://example.com/video.mp4")
|
216
|
+
|
217
|
+
# Run both tasks concurrently
|
218
|
+
results = await run_multiple_tools_concurrently(image_task, video_task)
|
219
|
+
|
220
|
+
return results
|
221
|
+
|
222
|
+
|
223
|
+
def main():
|
224
|
+
"""Main entry point for the Async MCP server."""
|
225
|
+
# Parse command line arguments
|
226
|
+
parser = argparse.ArgumentParser(description='Async Media Agent MCP Server')
|
227
|
+
parser.add_argument('--transport', type=str, choices=['sse', 'stdio'], default='sse',
|
228
|
+
help='Transport method: sse or stdio (default: sse)')
|
229
|
+
parser.add_argument('--host', type=str, default='127.0.0.1',
|
230
|
+
help='Host for SSE transport (default: 127.0.0.1)')
|
231
|
+
parser.add_argument('--port', type=int, default=8000,
|
232
|
+
help='Port for SSE transport (default: 8001)')
|
233
|
+
parser.add_argument('--version', action='store_true',
|
234
|
+
help='Show version information')
|
235
|
+
|
236
|
+
args = parser.parse_args()
|
237
|
+
|
238
|
+
if args.version:
|
239
|
+
print("Async Media Agent MCP Server v0.1.0")
|
240
|
+
return
|
241
|
+
|
242
|
+
logger.info("Starting Async Media Agent MCP Server...")
|
243
|
+
logger.info(f"Transport: {args.transport}")
|
244
|
+
if args.transport == 'sse':
|
245
|
+
logger.info(f"SSE Server will run on {args.host}:{args.port}")
|
246
|
+
|
247
|
+
logger.info("Available async tools:")
|
248
|
+
logger.info(" 1. video_last_frame_tool_async - Extract last frame from video and upload to TOS")
|
249
|
+
logger.info(" 2. video_concat_tool_async - Concatenate two videos")
|
250
|
+
logger.info(" 3. seedream_generate_image_tool_async - Generate images with AI (async)")
|
251
|
+
logger.info(" 4. seedance_generate_video_tool_async - Generate videos with AI (async)")
|
252
|
+
logger.info(" 5. seededit_tool_async - Edit images while maintaining character (async)")
|
253
|
+
logger.info(" 6. vlm_vision_task_tool_async - Perform vision tasks with OpenAI-compatible messages (async)")
|
254
|
+
logger.info(" 7. image_selector_tool_async - Select best image using VLM model (async)")
|
255
|
+
logger.info(" 8. video_selector_tool_async - Select best video using VLM model (async)")
|
256
|
+
logger.info(" 9. tos_save_content_tool_async - Save content to TOS and return URL (async)")
|
257
|
+
logger.info("")
|
258
|
+
logger.info("All tools support concurrent execution using asyncio.gather() or run_multiple_tools_concurrently()")
|
259
|
+
|
260
|
+
try:
|
261
|
+
# Start the server with specified transport
|
262
|
+
if args.transport == 'sse':
|
263
|
+
logger.info(f"Starting async SSE server on {args.host}:{args.port}")
|
264
|
+
mcp.settings.host = args.host
|
265
|
+
mcp.settings.port = args.port
|
266
|
+
mcp.run(transport="sse")
|
267
|
+
else:
|
268
|
+
# Default stdio transport
|
269
|
+
mcp.run(transport="stdio")
|
270
|
+
finally:
|
271
|
+
# Clean up thread pool executor on shutdown
|
272
|
+
logger.info("Cleaning up thread pool executor...")
|
273
|
+
cleanup_executor()
|
274
|
+
|
275
|
+
|
276
|
+
if __name__ == "__main__":
|
277
|
+
main()
|
@@ -0,0 +1,273 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""Async wrapper module for MCP tools using threading.
|
3
|
+
|
4
|
+
This module provides async wrappers for all MCP tools without modifying the original functions.
|
5
|
+
It uses threading to make synchronous functions asynchronous.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import asyncio
|
9
|
+
import concurrent.futures
|
10
|
+
import functools
|
11
|
+
import json
|
12
|
+
import logging
|
13
|
+
from typing import Any, Callable, Dict, List, Optional
|
14
|
+
|
15
|
+
# Import original functions
|
16
|
+
from media_agent_mcp.storage import upload_to_tos
|
17
|
+
from media_agent_mcp.video import concat_videos, extract_last_frame
|
18
|
+
from media_agent_mcp.ai_models.seedream import generate_image
|
19
|
+
from media_agent_mcp.ai_models.seedance import generate_video
|
20
|
+
from media_agent_mcp.ai_models.seededit import seededit
|
21
|
+
from media_agent_mcp.media_selectors.image_selector import select_best_image
|
22
|
+
from media_agent_mcp.media_selectors.video_selector import select_best_video
|
23
|
+
|
24
|
+
logger = logging.getLogger(__name__)
|
25
|
+
|
26
|
+
# Thread pool executor for running sync functions
|
27
|
+
_executor = concurrent.futures.ThreadPoolExecutor(max_workers=10)
|
28
|
+
|
29
|
+
|
30
|
+
def async_wrapper(sync_func: Callable) -> Callable:
|
31
|
+
"""Decorator to wrap synchronous functions as async using threading.
|
32
|
+
|
33
|
+
Args:
|
34
|
+
sync_func: The synchronous function to wrap
|
35
|
+
|
36
|
+
Returns:
|
37
|
+
Async version of the function
|
38
|
+
"""
|
39
|
+
@functools.wraps(sync_func)
|
40
|
+
async def wrapper(*args, **kwargs):
|
41
|
+
loop = asyncio.get_event_loop()
|
42
|
+
return await loop.run_in_executor(_executor, sync_func, *args, **kwargs)
|
43
|
+
return wrapper
|
44
|
+
|
45
|
+
|
46
|
+
def json_response_wrapper(func: Callable) -> Callable:
|
47
|
+
"""Wrapper to ensure consistent JSON response format.
|
48
|
+
|
49
|
+
Args:
|
50
|
+
func: Function to wrap
|
51
|
+
|
52
|
+
Returns:
|
53
|
+
Function that returns JSON string
|
54
|
+
"""
|
55
|
+
@functools.wraps(func)
|
56
|
+
def wrapper(*args, **kwargs):
|
57
|
+
try:
|
58
|
+
result = func(*args, **kwargs)
|
59
|
+
if isinstance(result, dict):
|
60
|
+
return json.dumps(result)
|
61
|
+
else:
|
62
|
+
# Handle legacy string returns
|
63
|
+
if result.startswith("Error:"):
|
64
|
+
return json.dumps({
|
65
|
+
"status": "error",
|
66
|
+
"data": None,
|
67
|
+
"message": result
|
68
|
+
})
|
69
|
+
else:
|
70
|
+
return json.dumps({
|
71
|
+
"status": "success",
|
72
|
+
"data": {"url": result},
|
73
|
+
"message": "Operation completed successfully"
|
74
|
+
})
|
75
|
+
except Exception as e:
|
76
|
+
logger.error(f"Error in {func.__name__}: {str(e)}")
|
77
|
+
return json.dumps({
|
78
|
+
"status": "error",
|
79
|
+
"data": None,
|
80
|
+
"message": f"Error: {str(e)}"
|
81
|
+
})
|
82
|
+
return wrapper
|
83
|
+
|
84
|
+
|
85
|
+
# Async wrapped functions
|
86
|
+
@async_wrapper
|
87
|
+
@json_response_wrapper
|
88
|
+
def _sync_video_concat(video_urls: List[str]) -> str:
|
89
|
+
"""Synchronous video concatenation wrapper."""
|
90
|
+
return concat_videos(video_urls)
|
91
|
+
|
92
|
+
|
93
|
+
@async_wrapper
|
94
|
+
@json_response_wrapper
|
95
|
+
def _sync_video_last_frame(video_url: str) -> str:
|
96
|
+
"""Synchronous video last frame extraction wrapper."""
|
97
|
+
return extract_last_frame(video_url)
|
98
|
+
|
99
|
+
|
100
|
+
@async_wrapper
|
101
|
+
@json_response_wrapper
|
102
|
+
def _sync_seedream_generate_image(prompt: str, size: str = "1024x1024") -> str:
|
103
|
+
"""Synchronous image generation wrapper."""
|
104
|
+
return generate_image(prompt, size=size)
|
105
|
+
|
106
|
+
|
107
|
+
@async_wrapper
|
108
|
+
@json_response_wrapper
|
109
|
+
def _sync_seedance_generate_video(prompt: str, first_frame_image: str,
|
110
|
+
last_frame_image: str = None, duration: int = 5,
|
111
|
+
resolution: str = "720p") -> str:
|
112
|
+
"""Synchronous video generation wrapper."""
|
113
|
+
if not prompt and not first_frame_image:
|
114
|
+
return json.dumps({
|
115
|
+
"status": "error",
|
116
|
+
"data": None,
|
117
|
+
"message": "Error: Either prompt or first_frame_image must be provided"
|
118
|
+
})
|
119
|
+
|
120
|
+
return generate_video(
|
121
|
+
prompt=prompt,
|
122
|
+
first_frame_image=first_frame_image,
|
123
|
+
last_frame_image=last_frame_image,
|
124
|
+
duration=duration,
|
125
|
+
resolution=resolution
|
126
|
+
)
|
127
|
+
|
128
|
+
|
129
|
+
@async_wrapper
|
130
|
+
@json_response_wrapper
|
131
|
+
def _sync_seededit(image_url: str, prompt: str, seed: int = -1,
|
132
|
+
scale: float = 0.5, charactor_keep: bool = False) -> str:
|
133
|
+
"""Synchronous image editing wrapper."""
|
134
|
+
return seededit(
|
135
|
+
image_url=image_url,
|
136
|
+
prompt=prompt,
|
137
|
+
charactor_keep=charactor_keep,
|
138
|
+
return_url=True,
|
139
|
+
scale=scale,
|
140
|
+
seed=seed
|
141
|
+
)
|
142
|
+
|
143
|
+
|
144
|
+
@async_wrapper
|
145
|
+
@json_response_wrapper
|
146
|
+
def _sync_vlm_vision_task(messages: List) -> str:
|
147
|
+
"""Synchronous VLM vision task wrapper."""
|
148
|
+
from media_agent_mcp.ai_models.seed16 import process_vlm_task
|
149
|
+
return process_vlm_task(messages)
|
150
|
+
|
151
|
+
|
152
|
+
@async_wrapper
|
153
|
+
def _sync_image_selector(image_paths: List[str], prompt: str) -> str:
|
154
|
+
"""Synchronous image selector wrapper."""
|
155
|
+
try:
|
156
|
+
result = select_best_image(image_paths, prompt)
|
157
|
+
return json.dumps({
|
158
|
+
"status": "success",
|
159
|
+
"data": result,
|
160
|
+
"message": "Image selection completed successfully"
|
161
|
+
})
|
162
|
+
except Exception as e:
|
163
|
+
return json.dumps({
|
164
|
+
"status": "error",
|
165
|
+
"data": None,
|
166
|
+
"message": f"Image selection failed: {str(e)}"
|
167
|
+
})
|
168
|
+
|
169
|
+
|
170
|
+
@async_wrapper
|
171
|
+
def _sync_video_selector(video_paths: List[str], prompt: str) -> str:
|
172
|
+
"""Synchronous video selector wrapper."""
|
173
|
+
try:
|
174
|
+
result = select_best_video(video_paths, prompt)
|
175
|
+
return json.dumps({
|
176
|
+
"status": "success",
|
177
|
+
"data": result,
|
178
|
+
"message": "Video selection completed successfully"
|
179
|
+
})
|
180
|
+
except Exception as e:
|
181
|
+
return json.dumps({
|
182
|
+
"status": "error",
|
183
|
+
"data": None,
|
184
|
+
"message": f"Video selection failed: {str(e)}"
|
185
|
+
})
|
186
|
+
|
187
|
+
|
188
|
+
@async_wrapper
|
189
|
+
def _sync_tos_save_content(content: str, file_extension: str = "txt",
|
190
|
+
object_key: Optional[str] = None) -> str:
|
191
|
+
"""Synchronous TOS content save wrapper."""
|
192
|
+
import tempfile
|
193
|
+
import os
|
194
|
+
|
195
|
+
try:
|
196
|
+
# Create temporary file with content
|
197
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix=f'.{file_extension}', delete=False) as temp_file:
|
198
|
+
temp_file.write(content)
|
199
|
+
temp_file_path = temp_file.name
|
200
|
+
|
201
|
+
try:
|
202
|
+
# Upload to TOS
|
203
|
+
result = upload_to_tos(temp_file_path, object_key)
|
204
|
+
return json.dumps(result)
|
205
|
+
finally:
|
206
|
+
# Clean up temporary file
|
207
|
+
if os.path.exists(temp_file_path):
|
208
|
+
os.unlink(temp_file_path)
|
209
|
+
|
210
|
+
except Exception as e:
|
211
|
+
return json.dumps({
|
212
|
+
"status": "error",
|
213
|
+
"data": None,
|
214
|
+
"message": f"TOS save failed: {str(e)}"
|
215
|
+
})
|
216
|
+
|
217
|
+
|
218
|
+
# Public async API
|
219
|
+
async def async_video_concat_tool(video_urls: List[str]) -> str:
|
220
|
+
"""Async video concatenation tool."""
|
221
|
+
return await _sync_video_concat(video_urls)
|
222
|
+
|
223
|
+
|
224
|
+
async def async_video_last_frame_tool(video_url: str) -> str:
|
225
|
+
"""Async video last frame extraction tool."""
|
226
|
+
return await _sync_video_last_frame(video_url)
|
227
|
+
|
228
|
+
|
229
|
+
async def async_seedream_generate_image_tool(prompt: str, size: str = "1024x1024") -> str:
|
230
|
+
"""Async image generation tool."""
|
231
|
+
return await _sync_seedream_generate_image(prompt, size)
|
232
|
+
|
233
|
+
|
234
|
+
async def async_seedance_generate_video_tool(prompt: str, first_frame_image: str,
|
235
|
+
last_frame_image: str = None, duration: int = 5,
|
236
|
+
resolution: str = "720p") -> str:
|
237
|
+
"""Async video generation tool."""
|
238
|
+
return await _sync_seedance_generate_video(prompt, first_frame_image, last_frame_image, duration, resolution)
|
239
|
+
|
240
|
+
|
241
|
+
async def async_seededit_tool(image_url: str, prompt: str, seed: int = -1,
|
242
|
+
scale: float = 0.5, charactor_keep: bool = False) -> str:
|
243
|
+
"""Async image editing tool."""
|
244
|
+
return await _sync_seededit(image_url, prompt, seed, scale, charactor_keep)
|
245
|
+
|
246
|
+
|
247
|
+
async def async_vlm_vision_task_tool(messages: List) -> str:
|
248
|
+
"""Async VLM vision task tool."""
|
249
|
+
return await _sync_vlm_vision_task(messages)
|
250
|
+
|
251
|
+
|
252
|
+
async def async_image_selector_tool(image_paths: List[str], prompt: str) -> str:
|
253
|
+
"""Async image selector tool."""
|
254
|
+
return await _sync_image_selector(image_paths, prompt)
|
255
|
+
|
256
|
+
|
257
|
+
async def async_video_selector_tool(video_paths: List[str], prompt: str) -> str:
|
258
|
+
"""Async video selector tool."""
|
259
|
+
return await _sync_video_selector(video_paths, prompt)
|
260
|
+
|
261
|
+
|
262
|
+
async def async_tos_save_content_tool(content: str, file_extension: str = "txt",
|
263
|
+
object_key: Optional[str] = None) -> str:
|
264
|
+
"""Async TOS content save tool."""
|
265
|
+
return await _sync_tos_save_content(content, file_extension, object_key)
|
266
|
+
|
267
|
+
|
268
|
+
def cleanup_executor():
|
269
|
+
"""Clean up the thread pool executor."""
|
270
|
+
global _executor
|
271
|
+
if _executor:
|
272
|
+
_executor.shutdown(wait=True)
|
273
|
+
_executor = None
|
@@ -39,9 +39,6 @@ logger = logging.getLogger(__name__)
|
|
39
39
|
load_dotenv()
|
40
40
|
mcp = FastMCP("Media-Agent-MCP")
|
41
41
|
|
42
|
-
def create_sse_app():
|
43
|
-
return mcp.sse_app()
|
44
|
-
|
45
42
|
|
46
43
|
@mcp.tool()
|
47
44
|
def video_concat_tool(video_urls: list[str]) -> str:
|
@@ -373,8 +370,6 @@ def main():
|
|
373
370
|
help='Host for SSE transport (default: 127.0.0.1)')
|
374
371
|
parser.add_argument('--port', type=int, default=8000,
|
375
372
|
help='Port for SSE transport (default: 8000)')
|
376
|
-
parser.add_argument('--workers', type=int, default=4,
|
377
|
-
help='Number of worker processes for SSE (default: 4)')
|
378
373
|
|
379
374
|
parser.add_argument('--version', action='store_true',
|
380
375
|
help='Show version information')
|
@@ -404,7 +399,11 @@ def main():
|
|
404
399
|
# Start the server with specified transport
|
405
400
|
if args.transport == 'sse':
|
406
401
|
logger.info(f"Starting SSE server on {args.host}:{args.port} with {args.workers} workers")
|
407
|
-
|
402
|
+
mcp.settings.host = args.host
|
403
|
+
mcp.settings.port = args.port
|
404
|
+
|
405
|
+
mcp.run(transport="sse")
|
406
|
+
|
408
407
|
else:
|
409
408
|
# Default stdio transport
|
410
409
|
mcp.run(transport="stdio")
|