massgen 0.0.3__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +142 -8
- massgen/adapters/__init__.py +29 -0
- massgen/adapters/ag2_adapter.py +483 -0
- massgen/adapters/base.py +183 -0
- massgen/adapters/tests/__init__.py +0 -0
- massgen/adapters/tests/test_ag2_adapter.py +439 -0
- massgen/adapters/tests/test_agent_adapter.py +128 -0
- massgen/adapters/utils/__init__.py +2 -0
- massgen/adapters/utils/ag2_utils.py +236 -0
- massgen/adapters/utils/tests/__init__.py +0 -0
- massgen/adapters/utils/tests/test_ag2_utils.py +138 -0
- massgen/agent_config.py +329 -55
- massgen/api_params_handler/__init__.py +10 -0
- massgen/api_params_handler/_api_params_handler_base.py +99 -0
- massgen/api_params_handler/_chat_completions_api_params_handler.py +176 -0
- massgen/api_params_handler/_claude_api_params_handler.py +113 -0
- massgen/api_params_handler/_response_api_params_handler.py +130 -0
- massgen/backend/__init__.py +39 -4
- massgen/backend/azure_openai.py +385 -0
- massgen/backend/base.py +341 -69
- massgen/backend/base_with_mcp.py +1102 -0
- massgen/backend/capabilities.py +386 -0
- massgen/backend/chat_completions.py +577 -130
- massgen/backend/claude.py +1033 -537
- massgen/backend/claude_code.py +1203 -0
- massgen/backend/cli_base.py +209 -0
- massgen/backend/docs/BACKEND_ARCHITECTURE.md +126 -0
- massgen/backend/{CLAUDE_API_RESEARCH.md → docs/CLAUDE_API_RESEARCH.md} +18 -18
- massgen/backend/{GEMINI_API_DOCUMENTATION.md → docs/GEMINI_API_DOCUMENTATION.md} +9 -9
- massgen/backend/docs/Gemini MCP Integration Analysis.md +1050 -0
- massgen/backend/docs/MCP_IMPLEMENTATION_CLAUDE_BACKEND.md +177 -0
- massgen/backend/docs/MCP_INTEGRATION_RESPONSE_BACKEND.md +352 -0
- massgen/backend/docs/OPENAI_GPT5_MODELS.md +211 -0
- massgen/backend/{OPENAI_RESPONSES_API_FORMAT.md → docs/OPENAI_RESPONSE_API_TOOL_CALLS.md} +3 -3
- massgen/backend/docs/OPENAI_response_streaming.md +20654 -0
- massgen/backend/docs/inference_backend.md +257 -0
- massgen/backend/docs/permissions_and_context_files.md +1085 -0
- massgen/backend/external.py +126 -0
- massgen/backend/gemini.py +1850 -241
- massgen/backend/grok.py +40 -156
- massgen/backend/inference.py +156 -0
- massgen/backend/lmstudio.py +171 -0
- massgen/backend/response.py +1095 -322
- massgen/chat_agent.py +131 -113
- massgen/cli.py +1560 -275
- massgen/config_builder.py +2396 -0
- massgen/configs/BACKEND_CONFIGURATION.md +458 -0
- massgen/configs/README.md +559 -216
- massgen/configs/ag2/ag2_case_study.yaml +27 -0
- massgen/configs/ag2/ag2_coder.yaml +34 -0
- massgen/configs/ag2/ag2_coder_case_study.yaml +36 -0
- massgen/configs/ag2/ag2_gemini.yaml +27 -0
- massgen/configs/ag2/ag2_groupchat.yaml +108 -0
- massgen/configs/ag2/ag2_groupchat_gpt.yaml +118 -0
- massgen/configs/ag2/ag2_single_agent.yaml +21 -0
- massgen/configs/basic/multi/fast_timeout_example.yaml +37 -0
- massgen/configs/basic/multi/gemini_4o_claude.yaml +31 -0
- massgen/configs/basic/multi/gemini_gpt5nano_claude.yaml +36 -0
- massgen/configs/{gemini_4o_claude.yaml → basic/multi/geminicode_4o_claude.yaml} +3 -3
- massgen/configs/basic/multi/geminicode_gpt5nano_claude.yaml +36 -0
- massgen/configs/basic/multi/glm_gemini_claude.yaml +25 -0
- massgen/configs/basic/multi/gpt4o_audio_generation.yaml +30 -0
- massgen/configs/basic/multi/gpt4o_image_generation.yaml +31 -0
- massgen/configs/basic/multi/gpt5nano_glm_qwen.yaml +26 -0
- massgen/configs/basic/multi/gpt5nano_image_understanding.yaml +26 -0
- massgen/configs/{three_agents_default.yaml → basic/multi/three_agents_default.yaml} +8 -4
- massgen/configs/basic/multi/three_agents_opensource.yaml +27 -0
- massgen/configs/basic/multi/three_agents_vllm.yaml +20 -0
- massgen/configs/basic/multi/two_agents_gemini.yaml +19 -0
- massgen/configs/{two_agents.yaml → basic/multi/two_agents_gpt5.yaml} +14 -6
- massgen/configs/basic/multi/two_agents_opensource_lmstudio.yaml +31 -0
- massgen/configs/basic/multi/two_qwen_vllm_sglang.yaml +28 -0
- massgen/configs/{single_agent.yaml → basic/single/single_agent.yaml} +1 -1
- massgen/configs/{single_flash2.5.yaml → basic/single/single_flash2.5.yaml} +1 -2
- massgen/configs/basic/single/single_gemini2.5pro.yaml +16 -0
- massgen/configs/basic/single/single_gpt4o_audio_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_image_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_video_generation.yaml +24 -0
- massgen/configs/basic/single/single_gpt5nano.yaml +20 -0
- massgen/configs/basic/single/single_gpt5nano_file_search.yaml +18 -0
- massgen/configs/basic/single/single_gpt5nano_image_understanding.yaml +17 -0
- massgen/configs/basic/single/single_gptoss120b.yaml +15 -0
- massgen/configs/basic/single/single_openrouter_audio_understanding.yaml +15 -0
- massgen/configs/basic/single/single_qwen_video_understanding.yaml +15 -0
- massgen/configs/debug/code_execution/command_filtering_blacklist.yaml +29 -0
- massgen/configs/debug/code_execution/command_filtering_whitelist.yaml +28 -0
- massgen/configs/debug/code_execution/docker_verification.yaml +29 -0
- massgen/configs/debug/skip_coordination_test.yaml +27 -0
- massgen/configs/debug/test_sdk_migration.yaml +17 -0
- massgen/configs/docs/DISCORD_MCP_SETUP.md +208 -0
- massgen/configs/docs/TWITTER_MCP_ENESCINAR_SETUP.md +82 -0
- massgen/configs/providers/azure/azure_openai_multi.yaml +21 -0
- massgen/configs/providers/azure/azure_openai_single.yaml +19 -0
- massgen/configs/providers/claude/claude.yaml +14 -0
- massgen/configs/providers/gemini/gemini_gpt5nano.yaml +28 -0
- massgen/configs/providers/local/lmstudio.yaml +11 -0
- massgen/configs/providers/openai/gpt5.yaml +46 -0
- massgen/configs/providers/openai/gpt5_nano.yaml +46 -0
- massgen/configs/providers/others/grok_single_agent.yaml +19 -0
- massgen/configs/providers/others/zai_coding_team.yaml +108 -0
- massgen/configs/providers/others/zai_glm45.yaml +12 -0
- massgen/configs/{creative_team.yaml → teams/creative/creative_team.yaml} +16 -6
- massgen/configs/{travel_planning.yaml → teams/creative/travel_planning.yaml} +16 -6
- massgen/configs/{news_analysis.yaml → teams/research/news_analysis.yaml} +16 -6
- massgen/configs/{research_team.yaml → teams/research/research_team.yaml} +15 -7
- massgen/configs/{technical_analysis.yaml → teams/research/technical_analysis.yaml} +16 -6
- massgen/configs/tools/code-execution/basic_command_execution.yaml +25 -0
- massgen/configs/tools/code-execution/code_execution_use_case_simple.yaml +41 -0
- massgen/configs/tools/code-execution/docker_claude_code.yaml +32 -0
- massgen/configs/tools/code-execution/docker_multi_agent.yaml +32 -0
- massgen/configs/tools/code-execution/docker_simple.yaml +29 -0
- massgen/configs/tools/code-execution/docker_with_resource_limits.yaml +32 -0
- massgen/configs/tools/code-execution/multi_agent_playwright_automation.yaml +57 -0
- massgen/configs/tools/filesystem/cc_gpt5_gemini_filesystem.yaml +34 -0
- massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +68 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5.yaml +43 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5_gptoss.yaml +49 -0
- massgen/configs/tools/filesystem/claude_code_gpt5nano.yaml +31 -0
- massgen/configs/tools/filesystem/claude_code_single.yaml +40 -0
- massgen/configs/tools/filesystem/fs_permissions_test.yaml +87 -0
- massgen/configs/tools/filesystem/gemini_gemini_workspace_cleanup.yaml +54 -0
- massgen/configs/tools/filesystem/gemini_gpt5_filesystem_casestudy.yaml +30 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_file_context_path.yaml +43 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_protected_paths.yaml +45 -0
- massgen/configs/tools/filesystem/gpt5mini_cc_fs_context_path.yaml +31 -0
- massgen/configs/tools/filesystem/grok4_gpt5_gemini_filesystem.yaml +32 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_claude_code_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_gemini_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/two_claude_code_filesystem_multiturn.yaml +47 -0
- massgen/configs/tools/filesystem/multiturn/two_gemini_flash_filesystem_multiturn.yaml +48 -0
- massgen/configs/tools/mcp/claude_code_discord_mcp_example.yaml +27 -0
- massgen/configs/tools/mcp/claude_code_simple_mcp.yaml +35 -0
- massgen/configs/tools/mcp/claude_code_twitter_mcp_example.yaml +32 -0
- massgen/configs/tools/mcp/claude_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/claude_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/five_agents_travel_mcp_test.yaml +157 -0
- massgen/configs/tools/mcp/five_agents_weather_mcp_test.yaml +103 -0
- massgen/configs/tools/mcp/gemini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_sharing.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_single_agent.yaml +17 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_with_claude_code.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gemini_notion_mcp.yaml +52 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gpt5mini_claude_code_discord_mcp_example.yaml +38 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/multimcp_gemini.yaml +111 -0
- massgen/configs/tools/mcp/qwen_api_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/qwen_api_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/qwen_local_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/qwen_local_mcp_test.yaml +27 -0
- massgen/configs/tools/planning/five_agents_discord_mcp_planning_mode.yaml +140 -0
- massgen/configs/tools/planning/five_agents_filesystem_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_notion_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_twitter_mcp_planning_mode.yaml +155 -0
- massgen/configs/tools/planning/gpt5_mini_case_study_mcp_planning_mode.yaml +73 -0
- massgen/configs/tools/web-search/claude_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gemini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt5_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt_oss_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/grok3_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/qwen_api_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/qwen_local_streamable_http_test.yaml +43 -0
- massgen/coordination_tracker.py +708 -0
- massgen/docker/README.md +462 -0
- massgen/filesystem_manager/__init__.py +21 -0
- massgen/filesystem_manager/_base.py +9 -0
- massgen/filesystem_manager/_code_execution_server.py +545 -0
- massgen/filesystem_manager/_docker_manager.py +477 -0
- massgen/filesystem_manager/_file_operation_tracker.py +248 -0
- massgen/filesystem_manager/_filesystem_manager.py +813 -0
- massgen/filesystem_manager/_path_permission_manager.py +1261 -0
- massgen/filesystem_manager/_workspace_tools_server.py +1815 -0
- massgen/formatter/__init__.py +10 -0
- massgen/formatter/_chat_completions_formatter.py +284 -0
- massgen/formatter/_claude_formatter.py +235 -0
- massgen/formatter/_formatter_base.py +156 -0
- massgen/formatter/_response_formatter.py +263 -0
- massgen/frontend/__init__.py +1 -2
- massgen/frontend/coordination_ui.py +471 -286
- massgen/frontend/displays/base_display.py +56 -11
- massgen/frontend/displays/create_coordination_table.py +1956 -0
- massgen/frontend/displays/rich_terminal_display.py +1259 -619
- massgen/frontend/displays/simple_display.py +9 -4
- massgen/frontend/displays/terminal_display.py +27 -68
- massgen/logger_config.py +681 -0
- massgen/mcp_tools/README.md +232 -0
- massgen/mcp_tools/__init__.py +105 -0
- massgen/mcp_tools/backend_utils.py +1035 -0
- massgen/mcp_tools/circuit_breaker.py +195 -0
- massgen/mcp_tools/client.py +894 -0
- massgen/mcp_tools/config_validator.py +138 -0
- massgen/mcp_tools/docs/circuit_breaker.md +646 -0
- massgen/mcp_tools/docs/client.md +950 -0
- massgen/mcp_tools/docs/config_validator.md +478 -0
- massgen/mcp_tools/docs/exceptions.md +1165 -0
- massgen/mcp_tools/docs/security.md +854 -0
- massgen/mcp_tools/exceptions.py +338 -0
- massgen/mcp_tools/hooks.py +212 -0
- massgen/mcp_tools/security.py +780 -0
- massgen/message_templates.py +342 -64
- massgen/orchestrator.py +1515 -241
- massgen/stream_chunk/__init__.py +35 -0
- massgen/stream_chunk/base.py +92 -0
- massgen/stream_chunk/multimodal.py +237 -0
- massgen/stream_chunk/text.py +162 -0
- massgen/tests/mcp_test_server.py +150 -0
- massgen/tests/multi_turn_conversation_design.md +0 -8
- massgen/tests/test_azure_openai_backend.py +156 -0
- massgen/tests/test_backend_capabilities.py +262 -0
- massgen/tests/test_backend_event_loop_all.py +179 -0
- massgen/tests/test_chat_completions_refactor.py +142 -0
- massgen/tests/test_claude_backend.py +15 -28
- massgen/tests/test_claude_code.py +268 -0
- massgen/tests/test_claude_code_context_sharing.py +233 -0
- massgen/tests/test_claude_code_orchestrator.py +175 -0
- massgen/tests/test_cli_backends.py +180 -0
- massgen/tests/test_code_execution.py +679 -0
- massgen/tests/test_external_agent_backend.py +134 -0
- massgen/tests/test_final_presentation_fallback.py +237 -0
- massgen/tests/test_gemini_planning_mode.py +351 -0
- massgen/tests/test_grok_backend.py +7 -10
- massgen/tests/test_http_mcp_server.py +42 -0
- massgen/tests/test_integration_simple.py +198 -0
- massgen/tests/test_mcp_blocking.py +125 -0
- massgen/tests/test_message_context_building.py +29 -47
- massgen/tests/test_orchestrator_final_presentation.py +48 -0
- massgen/tests/test_path_permission_manager.py +2087 -0
- massgen/tests/test_rich_terminal_display.py +14 -13
- massgen/tests/test_timeout.py +133 -0
- massgen/tests/test_v3_3agents.py +11 -12
- massgen/tests/test_v3_simple.py +8 -13
- massgen/tests/test_v3_three_agents.py +11 -18
- massgen/tests/test_v3_two_agents.py +8 -13
- massgen/token_manager/__init__.py +7 -0
- massgen/token_manager/token_manager.py +400 -0
- massgen/utils.py +52 -16
- massgen/v1/agent.py +45 -91
- massgen/v1/agents.py +18 -53
- massgen/v1/backends/gemini.py +50 -153
- massgen/v1/backends/grok.py +21 -54
- massgen/v1/backends/oai.py +39 -111
- massgen/v1/cli.py +36 -93
- massgen/v1/config.py +8 -12
- massgen/v1/logging.py +43 -127
- massgen/v1/main.py +18 -32
- massgen/v1/orchestrator.py +68 -209
- massgen/v1/streaming_display.py +62 -163
- massgen/v1/tools.py +8 -12
- massgen/v1/types.py +9 -23
- massgen/v1/utils.py +5 -23
- massgen-0.1.0.dist-info/METADATA +1245 -0
- massgen-0.1.0.dist-info/RECORD +273 -0
- massgen-0.1.0.dist-info/entry_points.txt +2 -0
- massgen/frontend/logging/__init__.py +0 -9
- massgen/frontend/logging/realtime_logger.py +0 -197
- massgen-0.0.3.dist-info/METADATA +0 -568
- massgen-0.0.3.dist-info/RECORD +0 -76
- massgen-0.0.3.dist-info/entry_points.txt +0 -2
- /massgen/backend/{Function calling openai responses.md → docs/Function calling openai responses.md} +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/WHEEL +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1815 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Workspace Tools MCP Server for MassGen
|
|
5
|
+
|
|
6
|
+
This MCP server provides workspace management tools for agents including file operations,
|
|
7
|
+
deletion, and comparison capabilities. It implements copy-on-write behavior for multi-agent
|
|
8
|
+
collaboration and provides safe file manipulation within allowed paths.
|
|
9
|
+
|
|
10
|
+
Tools provided:
|
|
11
|
+
- copy_file: Copy a single file or directory from any accessible path to workspace
|
|
12
|
+
- copy_files_batch: Copy multiple files with pattern matching and exclusions
|
|
13
|
+
- delete_file: Delete a single file or directory from workspace
|
|
14
|
+
- delete_files_batch: Delete multiple files with pattern matching
|
|
15
|
+
- compare_directories: Compare two directories and show differences
|
|
16
|
+
- compare_files: Compare two text files and show unified diff
|
|
17
|
+
- generate_and_store_image_with_input_images: Create variations of existing images using gpt-4.1
|
|
18
|
+
- generate_and_store_image_no_input_images: Generate new images from text prompts using gpt-4.1
|
|
19
|
+
- generate_and_store_audio_no_input_audios: Generate audio from text using OpenAI's gpt-4o-audio-preview model
|
|
20
|
+
- generate_text_with_input_audio: Transcribe audio files to text using OpenAI's Transcription API
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
import argparse
|
|
24
|
+
import base64
|
|
25
|
+
import difflib
|
|
26
|
+
import filecmp
|
|
27
|
+
import fnmatch
|
|
28
|
+
import os
|
|
29
|
+
import shutil
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
32
|
+
|
|
33
|
+
import fastmcp
|
|
34
|
+
from dotenv import load_dotenv
|
|
35
|
+
from openai import OpenAI
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def get_copy_file_pairs(
|
|
39
|
+
allowed_paths: List[Path],
|
|
40
|
+
source_base_path: str,
|
|
41
|
+
destination_base_path: str = "",
|
|
42
|
+
include_patterns: Optional[List[str]] = None,
|
|
43
|
+
exclude_patterns: Optional[List[str]] = None,
|
|
44
|
+
) -> List[Tuple[Path, Path]]:
|
|
45
|
+
"""
|
|
46
|
+
Get all source->destination file pairs that would be copied by copy_files_batch.
|
|
47
|
+
|
|
48
|
+
This function can be imported by the filesystem manager for permission validation.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
allowed_paths: List of allowed base paths for validation
|
|
52
|
+
source_base_path: Base path to copy from
|
|
53
|
+
destination_base_path: Base path in workspace to copy to
|
|
54
|
+
include_patterns: List of glob patterns for files to include
|
|
55
|
+
exclude_patterns: List of glob patterns for files to exclude
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
List of (source_path, destination_path) tuples
|
|
59
|
+
|
|
60
|
+
Raises:
|
|
61
|
+
ValueError: If paths are invalid
|
|
62
|
+
"""
|
|
63
|
+
if include_patterns is None:
|
|
64
|
+
include_patterns = ["*"]
|
|
65
|
+
if exclude_patterns is None:
|
|
66
|
+
exclude_patterns = []
|
|
67
|
+
|
|
68
|
+
# Validate source base path
|
|
69
|
+
source_base = Path(source_base_path).resolve()
|
|
70
|
+
if not source_base.exists():
|
|
71
|
+
raise ValueError(f"Source base path does not exist: {source_base}")
|
|
72
|
+
|
|
73
|
+
_validate_path_access(source_base, allowed_paths)
|
|
74
|
+
|
|
75
|
+
# Handle destination base path - resolve relative paths relative to workspace
|
|
76
|
+
if destination_base_path:
|
|
77
|
+
if Path(destination_base_path).is_absolute():
|
|
78
|
+
dest_base = Path(destination_base_path).resolve()
|
|
79
|
+
else:
|
|
80
|
+
# Relative path should be resolved relative to workspace (current working directory)
|
|
81
|
+
dest_base = (Path.cwd() / destination_base_path).resolve()
|
|
82
|
+
else:
|
|
83
|
+
# No destination specified - this shouldn't happen for batch operations
|
|
84
|
+
raise ValueError("destination_base_path is required for copy_files_batch")
|
|
85
|
+
|
|
86
|
+
_validate_path_access(dest_base, allowed_paths)
|
|
87
|
+
|
|
88
|
+
# Collect all file pairs
|
|
89
|
+
file_pairs = []
|
|
90
|
+
|
|
91
|
+
for item in source_base.rglob("*"):
|
|
92
|
+
if not item.is_file():
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
# Get relative path from source base
|
|
96
|
+
rel_path = item.relative_to(source_base)
|
|
97
|
+
rel_path_str = str(rel_path)
|
|
98
|
+
|
|
99
|
+
# Check include patterns
|
|
100
|
+
included = any(fnmatch.fnmatch(rel_path_str, pattern) for pattern in include_patterns)
|
|
101
|
+
if not included:
|
|
102
|
+
continue
|
|
103
|
+
|
|
104
|
+
# Check exclude patterns
|
|
105
|
+
excluded = any(fnmatch.fnmatch(rel_path_str, pattern) for pattern in exclude_patterns)
|
|
106
|
+
if excluded:
|
|
107
|
+
continue
|
|
108
|
+
|
|
109
|
+
# Calculate destination
|
|
110
|
+
dest_file = (dest_base / rel_path).resolve()
|
|
111
|
+
|
|
112
|
+
# Validate destination is within allowed paths
|
|
113
|
+
_validate_path_access(dest_file, allowed_paths)
|
|
114
|
+
|
|
115
|
+
file_pairs.append((item, dest_file))
|
|
116
|
+
|
|
117
|
+
return file_pairs
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def _validate_path_access(path: Path, allowed_paths: List[Path]) -> None:
|
|
121
|
+
"""
|
|
122
|
+
Validate that a path is within allowed directories.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
path: Path to validate
|
|
126
|
+
allowed_paths: List of allowed base paths
|
|
127
|
+
|
|
128
|
+
Raises:
|
|
129
|
+
ValueError: If path is not within allowed directories
|
|
130
|
+
"""
|
|
131
|
+
if not allowed_paths:
|
|
132
|
+
return # No restrictions
|
|
133
|
+
|
|
134
|
+
for allowed_path in allowed_paths:
|
|
135
|
+
try:
|
|
136
|
+
path.relative_to(allowed_path)
|
|
137
|
+
return # Path is within this allowed directory
|
|
138
|
+
except ValueError:
|
|
139
|
+
continue
|
|
140
|
+
|
|
141
|
+
raise ValueError(f"Path not in allowed directories: {path}")
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def _is_critical_path(path: Path, allowed_paths: List[Path] = None) -> bool:
|
|
145
|
+
"""
|
|
146
|
+
Check if a path is a critical system file that should not be deleted.
|
|
147
|
+
|
|
148
|
+
Critical paths include:
|
|
149
|
+
- .git directories (version control)
|
|
150
|
+
- .env files (environment variables)
|
|
151
|
+
- .massgen directories (MassGen metadata) - UNLESS within an allowed workspace
|
|
152
|
+
- node_modules (package dependencies)
|
|
153
|
+
- venv/.venv (Python virtual environments)
|
|
154
|
+
- __pycache__ (Python cache)
|
|
155
|
+
- massgen_logs (logging)
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
path: Path to check
|
|
159
|
+
allowed_paths: List of allowed base paths (workspaces). If provided and path
|
|
160
|
+
is within an allowed path, only check for critical patterns
|
|
161
|
+
within that workspace (not in parent paths).
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
True if path is critical and should not be deleted
|
|
165
|
+
|
|
166
|
+
Examples:
|
|
167
|
+
# Outside workspace - blocks any .massgen in path
|
|
168
|
+
_is_critical_path(Path("/home/.massgen/config")) → True (blocked)
|
|
169
|
+
|
|
170
|
+
# Inside workspace - allows user files even if parent has .massgen
|
|
171
|
+
workspace = Path("/home/.massgen/workspaces/workspace1")
|
|
172
|
+
_is_critical_path(Path("/home/.massgen/workspaces/workspace1/user_dir"), [workspace]) → False (allowed)
|
|
173
|
+
_is_critical_path(Path("/home/.massgen/workspaces/workspace1/.git"), [workspace]) → True (blocked)
|
|
174
|
+
"""
|
|
175
|
+
CRITICAL_PATTERNS = [
|
|
176
|
+
".git",
|
|
177
|
+
".env",
|
|
178
|
+
".massgen",
|
|
179
|
+
"node_modules",
|
|
180
|
+
"__pycache__",
|
|
181
|
+
".venv",
|
|
182
|
+
"venv",
|
|
183
|
+
".pytest_cache",
|
|
184
|
+
".mypy_cache",
|
|
185
|
+
".ruff_cache",
|
|
186
|
+
"massgen_logs",
|
|
187
|
+
]
|
|
188
|
+
|
|
189
|
+
resolved_path = path.resolve()
|
|
190
|
+
|
|
191
|
+
# If path is within an allowed workspace, only check for critical patterns
|
|
192
|
+
# within the workspace itself (not in parent directories)
|
|
193
|
+
if allowed_paths:
|
|
194
|
+
for allowed_path in allowed_paths:
|
|
195
|
+
try:
|
|
196
|
+
# Get the relative path from workspace
|
|
197
|
+
rel_path = resolved_path.relative_to(allowed_path.resolve())
|
|
198
|
+
# Only check parts within the workspace
|
|
199
|
+
for part in rel_path.parts:
|
|
200
|
+
if part in CRITICAL_PATTERNS:
|
|
201
|
+
return True
|
|
202
|
+
# Check if the file name itself is critical
|
|
203
|
+
if resolved_path.name in CRITICAL_PATTERNS:
|
|
204
|
+
return True
|
|
205
|
+
# Path is within workspace and not critical
|
|
206
|
+
return False
|
|
207
|
+
except ValueError:
|
|
208
|
+
# Not within this allowed path, continue checking
|
|
209
|
+
continue
|
|
210
|
+
|
|
211
|
+
# Path is not within any allowed workspace, check entire path
|
|
212
|
+
parts = resolved_path.parts
|
|
213
|
+
for part in parts:
|
|
214
|
+
if part in CRITICAL_PATTERNS:
|
|
215
|
+
return True
|
|
216
|
+
|
|
217
|
+
# Check if the file name itself is critical
|
|
218
|
+
if resolved_path.name in CRITICAL_PATTERNS:
|
|
219
|
+
return True
|
|
220
|
+
|
|
221
|
+
return False
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def _is_text_file(path: Path) -> bool:
|
|
225
|
+
"""
|
|
226
|
+
Check if a file is likely a text file (not binary).
|
|
227
|
+
|
|
228
|
+
Uses simple heuristic: try to read as text and check for null bytes.
|
|
229
|
+
|
|
230
|
+
TODO: Handle multi-modal files once implemented.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
path: Path to check
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
True if file appears to be text
|
|
237
|
+
"""
|
|
238
|
+
try:
|
|
239
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
240
|
+
# Read first 8KB to check
|
|
241
|
+
chunk = f.read(8192)
|
|
242
|
+
# If it contains null bytes, it's probably binary
|
|
243
|
+
if "\0" in chunk:
|
|
244
|
+
return False
|
|
245
|
+
return True
|
|
246
|
+
except (UnicodeDecodeError, OSError):
|
|
247
|
+
return False
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def _is_permission_path_root(path: Path, allowed_paths: List[Path]) -> bool:
|
|
251
|
+
"""
|
|
252
|
+
Check if a path is exactly one of the permission path roots.
|
|
253
|
+
|
|
254
|
+
This prevents deletion of workspace directories, context path roots, etc.,
|
|
255
|
+
while still allowing deletion of files and subdirectories within them.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
path: Path to check
|
|
259
|
+
allowed_paths: List of allowed base paths (permission path roots)
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
True if path is exactly a permission path root
|
|
263
|
+
|
|
264
|
+
Examples (Unix/macOS):
|
|
265
|
+
allowed_paths = [Path("/workspace1"), Path("/context")]
|
|
266
|
+
_is_permission_path_root(Path("/workspace1")) → True (blocked)
|
|
267
|
+
_is_permission_path_root(Path("/workspace1/file.txt")) → False (allowed)
|
|
268
|
+
_is_permission_path_root(Path("/workspace1/subdir")) → False (allowed)
|
|
269
|
+
_is_permission_path_root(Path("/context")) → True (blocked)
|
|
270
|
+
_is_permission_path_root(Path("/context/config.yaml")) → False (allowed)
|
|
271
|
+
|
|
272
|
+
Examples (Windows):
|
|
273
|
+
allowed_paths = [Path("C:\\workspace1"), Path("D:\\context")]
|
|
274
|
+
_is_permission_path_root(Path("C:\\workspace1")) → True (blocked)
|
|
275
|
+
_is_permission_path_root(Path("C:\\workspace1\\file.txt")) → False (allowed)
|
|
276
|
+
_is_permission_path_root(Path("D:\\context")) → True (blocked)
|
|
277
|
+
_is_permission_path_root(Path("D:\\context\\data.json")) → False (allowed)
|
|
278
|
+
"""
|
|
279
|
+
resolved_path = path.resolve()
|
|
280
|
+
for allowed_path in allowed_paths:
|
|
281
|
+
if resolved_path == allowed_path.resolve():
|
|
282
|
+
return True
|
|
283
|
+
return False
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
def _validate_and_resolve_paths(allowed_paths: List[Path], source_path: str, destination_path: str) -> tuple[Path, Path]:
|
|
287
|
+
"""
|
|
288
|
+
Validate source and destination paths for copy operations.
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
allowed_paths: List of allowed base paths for validation
|
|
292
|
+
source_path: Source file/directory path
|
|
293
|
+
destination_path: Destination path in workspace
|
|
294
|
+
|
|
295
|
+
Returns:
|
|
296
|
+
Tuple of (resolved_source, resolved_destination)
|
|
297
|
+
|
|
298
|
+
Raises:
|
|
299
|
+
ValueError: If paths are invalid
|
|
300
|
+
"""
|
|
301
|
+
try:
|
|
302
|
+
# Validate and resolve source
|
|
303
|
+
source = Path(source_path).resolve()
|
|
304
|
+
if not source.exists():
|
|
305
|
+
raise ValueError(f"Source path does not exist: {source}")
|
|
306
|
+
|
|
307
|
+
_validate_path_access(source, allowed_paths)
|
|
308
|
+
|
|
309
|
+
# Handle destination path - resolve relative paths relative to workspace
|
|
310
|
+
if Path(destination_path).is_absolute():
|
|
311
|
+
destination = Path(destination_path).resolve()
|
|
312
|
+
else:
|
|
313
|
+
# Relative path should be resolved relative to workspace (current working directory)
|
|
314
|
+
destination = (Path.cwd() / destination_path).resolve()
|
|
315
|
+
|
|
316
|
+
_validate_path_access(destination, allowed_paths)
|
|
317
|
+
|
|
318
|
+
return source, destination
|
|
319
|
+
|
|
320
|
+
except Exception as e:
|
|
321
|
+
raise ValueError(f"Path validation failed: {e}")
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def _perform_copy(source: Path, destination: Path, overwrite: bool = False) -> Dict[str, Any]:
|
|
325
|
+
"""
|
|
326
|
+
Perform the actual copy operation.
|
|
327
|
+
|
|
328
|
+
Args:
|
|
329
|
+
source: Source path
|
|
330
|
+
destination: Destination path
|
|
331
|
+
overwrite: Whether to overwrite existing files
|
|
332
|
+
|
|
333
|
+
Returns:
|
|
334
|
+
Dict with operation results
|
|
335
|
+
"""
|
|
336
|
+
try:
|
|
337
|
+
# Check if destination exists
|
|
338
|
+
if destination.exists() and not overwrite:
|
|
339
|
+
raise ValueError(f"Destination already exists (use overwrite=true): {destination}")
|
|
340
|
+
|
|
341
|
+
# Create parent directories
|
|
342
|
+
destination.parent.mkdir(parents=True, exist_ok=True)
|
|
343
|
+
|
|
344
|
+
if source.is_file():
|
|
345
|
+
shutil.copy2(source, destination)
|
|
346
|
+
return {"type": "file", "source": str(source), "destination": str(destination), "size": destination.stat().st_size}
|
|
347
|
+
elif source.is_dir():
|
|
348
|
+
if destination.exists():
|
|
349
|
+
shutil.rmtree(destination)
|
|
350
|
+
shutil.copytree(source, destination)
|
|
351
|
+
|
|
352
|
+
file_count = len([f for f in destination.rglob("*") if f.is_file()])
|
|
353
|
+
return {"type": "directory", "source": str(source), "destination": str(destination), "file_count": file_count}
|
|
354
|
+
else:
|
|
355
|
+
raise ValueError(f"Source is neither file nor directory: {source}")
|
|
356
|
+
|
|
357
|
+
except Exception as e:
|
|
358
|
+
raise ValueError(f"Copy operation failed: {e}")
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
async def create_server() -> fastmcp.FastMCP:
|
|
362
|
+
"""Factory function to create and configure the workspace copy server."""
|
|
363
|
+
|
|
364
|
+
parser = argparse.ArgumentParser(description="Workspace Copy MCP Server")
|
|
365
|
+
parser.add_argument(
|
|
366
|
+
"--allowed-paths",
|
|
367
|
+
type=str,
|
|
368
|
+
nargs="*",
|
|
369
|
+
default=[],
|
|
370
|
+
help="List of allowed base paths for file operations (default: no restrictions)",
|
|
371
|
+
)
|
|
372
|
+
args = parser.parse_args()
|
|
373
|
+
|
|
374
|
+
# Create the FastMCP server
|
|
375
|
+
mcp = fastmcp.FastMCP("Workspace Copy")
|
|
376
|
+
|
|
377
|
+
# Add allowed paths from arguments
|
|
378
|
+
mcp.allowed_paths = [Path(p).resolve() for p in args.allowed_paths]
|
|
379
|
+
|
|
380
|
+
# Below is for debugging - can be uncommented if needed
|
|
381
|
+
# @mcp.tool()
|
|
382
|
+
# def get_cwd() -> Dict[str, Any]:
|
|
383
|
+
# """
|
|
384
|
+
# Get the current working directory of the workspace copy server.
|
|
385
|
+
|
|
386
|
+
# Useful for testing and verifying that relative paths resolve correctly.
|
|
387
|
+
|
|
388
|
+
# Returns:
|
|
389
|
+
# Dictionary with current working directory information
|
|
390
|
+
# """
|
|
391
|
+
# cwd = Path.cwd()
|
|
392
|
+
# return {
|
|
393
|
+
# "success": True,
|
|
394
|
+
# "operation": "get_cwd",
|
|
395
|
+
# "cwd": str(cwd),
|
|
396
|
+
# "absolute_path": str(cwd.resolve()),
|
|
397
|
+
# "allowed_paths": [str(p) for p in mcp.allowed_paths],
|
|
398
|
+
# "allowed_paths_count": len(mcp.allowed_paths),
|
|
399
|
+
# }
|
|
400
|
+
|
|
401
|
+
@mcp.tool()
|
|
402
|
+
def copy_file(source_path: str, destination_path: str, overwrite: bool = False) -> Dict[str, Any]:
|
|
403
|
+
"""
|
|
404
|
+
Copy a file or directory from any accessible path to the agent's workspace.
|
|
405
|
+
|
|
406
|
+
This is the primary tool for copying files from temp workspaces, context paths,
|
|
407
|
+
or any other accessible location to the current agent's workspace.
|
|
408
|
+
|
|
409
|
+
Args:
|
|
410
|
+
source_path: Path to source file/directory (must be absolute path)
|
|
411
|
+
destination_path: Destination path - can be:
|
|
412
|
+
- Relative path: Resolved relative to your workspace (e.g., "output/file.txt")
|
|
413
|
+
- Absolute path: Must be within allowed directories for security
|
|
414
|
+
overwrite: Whether to overwrite existing files/directories (default: False)
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
Dictionary with copy operation results
|
|
418
|
+
"""
|
|
419
|
+
source, destination = _validate_and_resolve_paths(mcp.allowed_paths, source_path, destination_path)
|
|
420
|
+
result = _perform_copy(source, destination, overwrite)
|
|
421
|
+
|
|
422
|
+
return {"success": True, "operation": "copy_file", "details": result}
|
|
423
|
+
|
|
424
|
+
@mcp.tool()
|
|
425
|
+
def copy_files_batch(
|
|
426
|
+
source_base_path: str,
|
|
427
|
+
destination_base_path: str = "",
|
|
428
|
+
include_patterns: Optional[List[str]] = None,
|
|
429
|
+
exclude_patterns: Optional[List[str]] = None,
|
|
430
|
+
overwrite: bool = False,
|
|
431
|
+
) -> Dict[str, Any]:
|
|
432
|
+
"""
|
|
433
|
+
Copy multiple files with pattern matching and exclusions.
|
|
434
|
+
|
|
435
|
+
This advanced tool allows copying multiple files at once with glob-style patterns
|
|
436
|
+
for inclusion and exclusion, useful for copying entire directory structures
|
|
437
|
+
while filtering out unwanted files.
|
|
438
|
+
|
|
439
|
+
Args:
|
|
440
|
+
source_base_path: Base path to copy from (must be absolute path)
|
|
441
|
+
destination_base_path: Base destination path - can be:
|
|
442
|
+
- Relative path: Resolved relative to your workspace (e.g., "project/output")
|
|
443
|
+
- Absolute path: Must be within allowed directories for security
|
|
444
|
+
- Empty string: Copy to workspace root
|
|
445
|
+
include_patterns: List of glob patterns for files to include (default: ["*"])
|
|
446
|
+
exclude_patterns: List of glob patterns for files to exclude (default: [])
|
|
447
|
+
overwrite: Whether to overwrite existing files (default: False)
|
|
448
|
+
|
|
449
|
+
Returns:
|
|
450
|
+
Dictionary with batch copy operation results
|
|
451
|
+
"""
|
|
452
|
+
if include_patterns is None:
|
|
453
|
+
include_patterns = ["*"]
|
|
454
|
+
if exclude_patterns is None:
|
|
455
|
+
exclude_patterns = []
|
|
456
|
+
|
|
457
|
+
try:
|
|
458
|
+
copied_files = []
|
|
459
|
+
skipped_files = []
|
|
460
|
+
errors = []
|
|
461
|
+
|
|
462
|
+
# Get all file pairs to copy
|
|
463
|
+
file_pairs = get_copy_file_pairs(mcp.allowed_paths, source_base_path, destination_base_path, include_patterns, exclude_patterns)
|
|
464
|
+
|
|
465
|
+
# Process each file pair
|
|
466
|
+
for source_file, dest_file in file_pairs:
|
|
467
|
+
rel_path_str = str(source_file.relative_to(Path(source_base_path).resolve()))
|
|
468
|
+
|
|
469
|
+
try:
|
|
470
|
+
# Check if destination exists
|
|
471
|
+
if dest_file.exists() and not overwrite:
|
|
472
|
+
skipped_files.append({"path": rel_path_str, "reason": "destination exists (overwrite=false)"})
|
|
473
|
+
continue
|
|
474
|
+
|
|
475
|
+
# Create parent directories
|
|
476
|
+
dest_file.parent.mkdir(parents=True, exist_ok=True)
|
|
477
|
+
|
|
478
|
+
# Copy file
|
|
479
|
+
shutil.copy2(source_file, dest_file)
|
|
480
|
+
|
|
481
|
+
copied_files.append({"source": str(source_file), "destination": str(dest_file), "relative_path": rel_path_str, "size": dest_file.stat().st_size})
|
|
482
|
+
|
|
483
|
+
except Exception as e:
|
|
484
|
+
errors.append({"path": rel_path_str, "error": str(e)})
|
|
485
|
+
|
|
486
|
+
return {
|
|
487
|
+
"success": True,
|
|
488
|
+
"operation": "copy_files_batch",
|
|
489
|
+
"summary": {"copied": len(copied_files), "skipped": len(skipped_files), "errors": len(errors)},
|
|
490
|
+
"details": {"copied_files": copied_files, "skipped_files": skipped_files, "errors": errors},
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
except Exception as e:
|
|
494
|
+
return {"success": False, "operation": "copy_files_batch", "error": str(e)}
|
|
495
|
+
|
|
496
|
+
@mcp.tool()
|
|
497
|
+
def delete_file(path: str, recursive: bool = False) -> Dict[str, Any]:
|
|
498
|
+
"""
|
|
499
|
+
Delete a file or directory from the workspace.
|
|
500
|
+
|
|
501
|
+
This tool allows agents to clean up outdated files or directories, helping maintain
|
|
502
|
+
a clean workspace without cluttering it with old versions.
|
|
503
|
+
|
|
504
|
+
Args:
|
|
505
|
+
path: Path to file/directory to delete - can be:
|
|
506
|
+
- Relative path: Resolved relative to your workspace (e.g., "old_file.txt")
|
|
507
|
+
- Absolute path: Must be within allowed directories for security
|
|
508
|
+
recursive: Whether to delete directories and their contents (default: False)
|
|
509
|
+
Required for non-empty directories
|
|
510
|
+
|
|
511
|
+
Returns:
|
|
512
|
+
Dictionary with deletion operation results
|
|
513
|
+
|
|
514
|
+
Security:
|
|
515
|
+
- Requires WRITE permission on path (validated by PathPermissionManager hook)
|
|
516
|
+
- Must be within allowed directories
|
|
517
|
+
- System files (.git, .env, etc.) cannot be deleted
|
|
518
|
+
- Permission path roots themselves cannot be deleted
|
|
519
|
+
- Protected paths specified in config are immune from deletion
|
|
520
|
+
"""
|
|
521
|
+
try:
|
|
522
|
+
# Resolve path
|
|
523
|
+
if Path(path).is_absolute():
|
|
524
|
+
target_path = Path(path).resolve()
|
|
525
|
+
else:
|
|
526
|
+
# Relative path - resolve relative to workspace
|
|
527
|
+
target_path = (Path.cwd() / path).resolve()
|
|
528
|
+
|
|
529
|
+
# Validate path access
|
|
530
|
+
_validate_path_access(target_path, mcp.allowed_paths)
|
|
531
|
+
|
|
532
|
+
# Check if path exists
|
|
533
|
+
if not target_path.exists():
|
|
534
|
+
return {"success": False, "operation": "delete_file", "error": f"Path does not exist: {target_path}"}
|
|
535
|
+
|
|
536
|
+
# Prevent deletion of critical system paths
|
|
537
|
+
if _is_critical_path(target_path, mcp.allowed_paths):
|
|
538
|
+
return {"success": False, "operation": "delete_file", "error": f"Cannot delete critical system path: {target_path}"}
|
|
539
|
+
|
|
540
|
+
# Prevent deletion of permission path roots themselves
|
|
541
|
+
if _is_permission_path_root(target_path, mcp.allowed_paths):
|
|
542
|
+
return {
|
|
543
|
+
"success": False,
|
|
544
|
+
"operation": "delete_file",
|
|
545
|
+
"error": f"Cannot delete permission path root: {target_path}. You can delete files/directories within it, but not the root itself.",
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
# Handle file deletion
|
|
549
|
+
if target_path.is_file():
|
|
550
|
+
size = target_path.stat().st_size
|
|
551
|
+
target_path.unlink()
|
|
552
|
+
return {"success": True, "operation": "delete_file", "details": {"type": "file", "path": str(target_path), "size": size}}
|
|
553
|
+
|
|
554
|
+
# Handle directory deletion
|
|
555
|
+
elif target_path.is_dir():
|
|
556
|
+
if not recursive:
|
|
557
|
+
# Check if directory is empty
|
|
558
|
+
if any(target_path.iterdir()):
|
|
559
|
+
return {"success": False, "operation": "delete_file", "error": f"Directory not empty (use recursive=true): {target_path}"}
|
|
560
|
+
target_path.rmdir()
|
|
561
|
+
else:
|
|
562
|
+
# Count files before deletion
|
|
563
|
+
file_count = len([f for f in target_path.rglob("*") if f.is_file()])
|
|
564
|
+
shutil.rmtree(target_path)
|
|
565
|
+
return {"success": True, "operation": "delete_file", "details": {"type": "directory", "path": str(target_path), "file_count": file_count}}
|
|
566
|
+
|
|
567
|
+
return {"success": True, "operation": "delete_file", "details": {"type": "directory", "path": str(target_path)}}
|
|
568
|
+
|
|
569
|
+
else:
|
|
570
|
+
return {"success": False, "operation": "delete_file", "error": f"Path is neither file nor directory: {target_path}"}
|
|
571
|
+
|
|
572
|
+
except Exception as e:
|
|
573
|
+
return {"success": False, "operation": "delete_file", "error": str(e)}
|
|
574
|
+
|
|
575
|
+
@mcp.tool()
|
|
576
|
+
def delete_files_batch(
|
|
577
|
+
base_path: str,
|
|
578
|
+
include_patterns: Optional[List[str]] = None,
|
|
579
|
+
exclude_patterns: Optional[List[str]] = None,
|
|
580
|
+
) -> Dict[str, Any]:
|
|
581
|
+
"""
|
|
582
|
+
Delete multiple files matching patterns.
|
|
583
|
+
|
|
584
|
+
This advanced tool allows deleting multiple files at once with glob-style patterns
|
|
585
|
+
for inclusion and exclusion, useful for cleaning up entire directory structures
|
|
586
|
+
while preserving specific files.
|
|
587
|
+
|
|
588
|
+
Args:
|
|
589
|
+
base_path: Base directory to search in - can be:
|
|
590
|
+
- Relative path: Resolved relative to your workspace (e.g., "build")
|
|
591
|
+
- Absolute path: Must be within allowed directories for security
|
|
592
|
+
include_patterns: List of glob patterns for files to include (default: ["*"])
|
|
593
|
+
exclude_patterns: List of glob patterns for files to exclude (default: [])
|
|
594
|
+
|
|
595
|
+
Returns:
|
|
596
|
+
Dictionary with batch deletion results including:
|
|
597
|
+
- deleted: List of deleted files
|
|
598
|
+
- skipped: List of skipped files (read-only or system files)
|
|
599
|
+
- errors: List of errors encountered
|
|
600
|
+
|
|
601
|
+
Security:
|
|
602
|
+
- Requires WRITE permission on each file
|
|
603
|
+
- Must be within allowed directories
|
|
604
|
+
- System files (.git, .env, etc.) cannot be deleted
|
|
605
|
+
"""
|
|
606
|
+
if include_patterns is None:
|
|
607
|
+
include_patterns = ["*"]
|
|
608
|
+
if exclude_patterns is None:
|
|
609
|
+
exclude_patterns = []
|
|
610
|
+
|
|
611
|
+
try:
|
|
612
|
+
deleted_files = []
|
|
613
|
+
skipped_files = []
|
|
614
|
+
errors = []
|
|
615
|
+
|
|
616
|
+
# Resolve base path
|
|
617
|
+
if Path(base_path).is_absolute():
|
|
618
|
+
base = Path(base_path).resolve()
|
|
619
|
+
else:
|
|
620
|
+
base = (Path.cwd() / base_path).resolve()
|
|
621
|
+
|
|
622
|
+
# Validate base path
|
|
623
|
+
if not base.exists():
|
|
624
|
+
return {"success": False, "operation": "delete_files_batch", "error": f"Base path does not exist: {base}"}
|
|
625
|
+
|
|
626
|
+
_validate_path_access(base, mcp.allowed_paths)
|
|
627
|
+
|
|
628
|
+
# Collect files to delete
|
|
629
|
+
for item in base.rglob("*"):
|
|
630
|
+
if not item.is_file():
|
|
631
|
+
continue
|
|
632
|
+
|
|
633
|
+
# Get relative path from base
|
|
634
|
+
rel_path = item.relative_to(base)
|
|
635
|
+
rel_path_str = str(rel_path)
|
|
636
|
+
|
|
637
|
+
# Check include patterns
|
|
638
|
+
included = any(fnmatch.fnmatch(rel_path_str, pattern) for pattern in include_patterns)
|
|
639
|
+
if not included:
|
|
640
|
+
continue
|
|
641
|
+
|
|
642
|
+
# Check exclude patterns
|
|
643
|
+
excluded = any(fnmatch.fnmatch(rel_path_str, pattern) for pattern in exclude_patterns)
|
|
644
|
+
if excluded:
|
|
645
|
+
continue
|
|
646
|
+
|
|
647
|
+
try:
|
|
648
|
+
# Check if this is a critical system file
|
|
649
|
+
if _is_critical_path(item, mcp.allowed_paths):
|
|
650
|
+
skipped_files.append({"path": rel_path_str, "reason": "system file (protected)"})
|
|
651
|
+
continue
|
|
652
|
+
|
|
653
|
+
# Check if this is a permission path root
|
|
654
|
+
if _is_permission_path_root(item, mcp.allowed_paths):
|
|
655
|
+
skipped_files.append({"path": rel_path_str, "reason": "permission path root (protected)"})
|
|
656
|
+
continue
|
|
657
|
+
|
|
658
|
+
# Validate path access
|
|
659
|
+
_validate_path_access(item, mcp.allowed_paths)
|
|
660
|
+
|
|
661
|
+
# Delete file
|
|
662
|
+
size = item.stat().st_size
|
|
663
|
+
item.unlink()
|
|
664
|
+
|
|
665
|
+
deleted_files.append({"path": str(item), "relative_path": rel_path_str, "size": size})
|
|
666
|
+
|
|
667
|
+
except Exception as e:
|
|
668
|
+
errors.append({"path": rel_path_str, "error": str(e)})
|
|
669
|
+
|
|
670
|
+
return {
|
|
671
|
+
"success": True,
|
|
672
|
+
"operation": "delete_files_batch",
|
|
673
|
+
"summary": {"deleted": len(deleted_files), "skipped": len(skipped_files), "errors": len(errors)},
|
|
674
|
+
"details": {"deleted_files": deleted_files, "skipped_files": skipped_files, "errors": errors},
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
except Exception as e:
|
|
678
|
+
return {"success": False, "operation": "delete_files_batch", "error": str(e)}
|
|
679
|
+
|
|
680
|
+
@mcp.tool()
|
|
681
|
+
def compare_directories(dir1: str, dir2: str, show_content_diff: bool = False) -> Dict[str, Any]:
|
|
682
|
+
"""
|
|
683
|
+
Compare two directories and show differences.
|
|
684
|
+
|
|
685
|
+
This tool helps understand what changed between two workspaces or directory states,
|
|
686
|
+
making it easier to review changes before deployment or understand agent modifications.
|
|
687
|
+
|
|
688
|
+
Args:
|
|
689
|
+
dir1: First directory path (absolute or relative to workspace)
|
|
690
|
+
dir2: Second directory path (absolute or relative to workspace)
|
|
691
|
+
show_content_diff: Whether to include unified diffs of different files (default: False)
|
|
692
|
+
|
|
693
|
+
Returns:
|
|
694
|
+
Dictionary with comparison results:
|
|
695
|
+
- only_in_dir1: Files only in first directory
|
|
696
|
+
- only_in_dir2: Files only in second directory
|
|
697
|
+
- different: Files that exist in both but have different content
|
|
698
|
+
- identical: Files that are identical
|
|
699
|
+
- content_diffs: Optional unified diffs (if show_content_diff=True)
|
|
700
|
+
|
|
701
|
+
Security:
|
|
702
|
+
- Read-only operation, never modifies files
|
|
703
|
+
- Both paths must be within allowed directories
|
|
704
|
+
"""
|
|
705
|
+
try:
|
|
706
|
+
# Resolve paths
|
|
707
|
+
path1 = Path(dir1).resolve() if Path(dir1).is_absolute() else (Path.cwd() / dir1).resolve()
|
|
708
|
+
path2 = Path(dir2).resolve() if Path(dir2).is_absolute() else (Path.cwd() / dir2).resolve()
|
|
709
|
+
|
|
710
|
+
# Validate paths
|
|
711
|
+
_validate_path_access(path1, mcp.allowed_paths)
|
|
712
|
+
_validate_path_access(path2, mcp.allowed_paths)
|
|
713
|
+
|
|
714
|
+
if not path1.exists() or not path1.is_dir():
|
|
715
|
+
return {"success": False, "operation": "compare_directories", "error": f"First path is not a directory: {path1}"}
|
|
716
|
+
|
|
717
|
+
if not path2.exists() or not path2.is_dir():
|
|
718
|
+
return {"success": False, "operation": "compare_directories", "error": f"Second path is not a directory: {path2}"}
|
|
719
|
+
|
|
720
|
+
# Use filecmp for comparison
|
|
721
|
+
dcmp = filecmp.dircmp(str(path1), str(path2))
|
|
722
|
+
|
|
723
|
+
result = {
|
|
724
|
+
"success": True,
|
|
725
|
+
"operation": "compare_directories",
|
|
726
|
+
"details": {
|
|
727
|
+
"only_in_dir1": list(dcmp.left_only),
|
|
728
|
+
"only_in_dir2": list(dcmp.right_only),
|
|
729
|
+
"different": list(dcmp.diff_files),
|
|
730
|
+
"identical": list(dcmp.same_files),
|
|
731
|
+
},
|
|
732
|
+
}
|
|
733
|
+
|
|
734
|
+
# Add content diffs if requested
|
|
735
|
+
if show_content_diff and dcmp.diff_files:
|
|
736
|
+
content_diffs = {}
|
|
737
|
+
for filename in dcmp.diff_files:
|
|
738
|
+
file1 = path1 / filename
|
|
739
|
+
file2 = path2 / filename
|
|
740
|
+
try:
|
|
741
|
+
# Only diff text files
|
|
742
|
+
if _is_text_file(file1) and _is_text_file(file2):
|
|
743
|
+
with open(file1) as f1, open(file2) as f2:
|
|
744
|
+
lines1 = f1.readlines()
|
|
745
|
+
lines2 = f2.readlines()
|
|
746
|
+
diff = list(difflib.unified_diff(lines1, lines2, fromfile=f"dir1/{filename}", tofile=f"dir2/{filename}", lineterm=""))
|
|
747
|
+
content_diffs[filename] = "\n".join(diff[:100]) # Limit to 100 lines
|
|
748
|
+
except Exception as e:
|
|
749
|
+
content_diffs[filename] = f"Error generating diff: {e}"
|
|
750
|
+
|
|
751
|
+
result["details"]["content_diffs"] = content_diffs
|
|
752
|
+
|
|
753
|
+
return result
|
|
754
|
+
|
|
755
|
+
except Exception as e:
|
|
756
|
+
return {"success": False, "operation": "compare_directories", "error": str(e)}
|
|
757
|
+
|
|
758
|
+
@mcp.tool()
|
|
759
|
+
def compare_files(file1: str, file2: str, context_lines: int = 3) -> Dict[str, Any]:
|
|
760
|
+
"""
|
|
761
|
+
Compare two text files and show unified diff.
|
|
762
|
+
|
|
763
|
+
This tool provides detailed line-by-line comparison of two files,
|
|
764
|
+
making it easy to see exactly what changed between versions.
|
|
765
|
+
|
|
766
|
+
Args:
|
|
767
|
+
file1: First file path (absolute or relative to workspace)
|
|
768
|
+
file2: Second file path (absolute or relative to workspace)
|
|
769
|
+
context_lines: Number of context lines around changes (default: 3)
|
|
770
|
+
|
|
771
|
+
Returns:
|
|
772
|
+
Dictionary with comparison results:
|
|
773
|
+
- identical: Boolean indicating if files are identical
|
|
774
|
+
- diff: Unified diff output
|
|
775
|
+
- stats: Statistics (lines added/removed/changed)
|
|
776
|
+
|
|
777
|
+
Security:
|
|
778
|
+
- Read-only operation, never modifies files
|
|
779
|
+
- Both paths must be within allowed directories
|
|
780
|
+
- Works best with text files
|
|
781
|
+
"""
|
|
782
|
+
try:
|
|
783
|
+
# Resolve paths
|
|
784
|
+
path1 = Path(file1).resolve() if Path(file1).is_absolute() else (Path.cwd() / file1).resolve()
|
|
785
|
+
path2 = Path(file2).resolve() if Path(file2).is_absolute() else (Path.cwd() / file2).resolve()
|
|
786
|
+
|
|
787
|
+
# Validate paths
|
|
788
|
+
_validate_path_access(path1, mcp.allowed_paths)
|
|
789
|
+
_validate_path_access(path2, mcp.allowed_paths)
|
|
790
|
+
|
|
791
|
+
if not path1.exists() or not path1.is_file():
|
|
792
|
+
return {"success": False, "operation": "compare_files", "error": f"First path is not a file: {path1}"}
|
|
793
|
+
|
|
794
|
+
if not path2.exists() or not path2.is_file():
|
|
795
|
+
return {"success": False, "operation": "compare_files", "error": f"Second path is not a file: {path2}"}
|
|
796
|
+
|
|
797
|
+
# Read files
|
|
798
|
+
try:
|
|
799
|
+
with open(path1) as f1:
|
|
800
|
+
lines1 = f1.readlines()
|
|
801
|
+
with open(path2) as f2:
|
|
802
|
+
lines2 = f2.readlines()
|
|
803
|
+
except UnicodeDecodeError:
|
|
804
|
+
return {"success": False, "operation": "compare_files", "error": "Files appear to be binary, not text"}
|
|
805
|
+
|
|
806
|
+
# Generate diff
|
|
807
|
+
diff = list(difflib.unified_diff(lines1, lines2, fromfile=str(path1), tofile=str(path2), lineterm="", n=context_lines))
|
|
808
|
+
|
|
809
|
+
# Calculate stats
|
|
810
|
+
added = sum(1 for line in diff if line.startswith("+") and not line.startswith("+++"))
|
|
811
|
+
removed = sum(1 for line in diff if line.startswith("-") and not line.startswith("---"))
|
|
812
|
+
|
|
813
|
+
return {
|
|
814
|
+
"success": True,
|
|
815
|
+
"operation": "compare_files",
|
|
816
|
+
"details": {"identical": len(diff) == 0, "diff": "\n".join(diff[:500]), "stats": {"added": added, "removed": removed, "changed": min(added, removed)}},
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
except Exception as e:
|
|
820
|
+
return {"success": False, "operation": "compare_files", "error": str(e)}
|
|
821
|
+
|
|
822
|
+
@mcp.tool()
|
|
823
|
+
def generate_and_store_image_with_input_images(
|
|
824
|
+
base_image_paths: List[str],
|
|
825
|
+
prompt: str = "Create a variation of the provided images",
|
|
826
|
+
model: str = "gpt-4.1",
|
|
827
|
+
n: int = 1,
|
|
828
|
+
storage_path: Optional[str] = None,
|
|
829
|
+
) -> Dict[str, Any]:
|
|
830
|
+
"""
|
|
831
|
+
Create variations based on multiple input images using OpenAI's gpt-4.1 API.
|
|
832
|
+
|
|
833
|
+
This tool generates image variations based on multiple base images using OpenAI's gpt-4.1 API
|
|
834
|
+
and saves them to the workspace with automatic organization.
|
|
835
|
+
|
|
836
|
+
Args:
|
|
837
|
+
base_image_paths: List of paths to base images (PNG/JPEG files, less than 4MB)
|
|
838
|
+
- Relative path: Resolved relative to workspace
|
|
839
|
+
- Absolute path: Must be within allowed directories
|
|
840
|
+
prompt: Text description for the variation (default: "Create a variation of the provided images")
|
|
841
|
+
model: Model to use (default: "gpt-4.1")
|
|
842
|
+
n: Number of variations to generate (default: 1)
|
|
843
|
+
storage_path: Directory path where to save variations (optional)
|
|
844
|
+
- Relative path: Resolved relative to workspace
|
|
845
|
+
- Absolute path: Must be within allowed directories
|
|
846
|
+
- None/empty: Saves to workspace root
|
|
847
|
+
|
|
848
|
+
Returns:
|
|
849
|
+
Dictionary containing:
|
|
850
|
+
- success: Whether operation succeeded
|
|
851
|
+
- operation: "generate_and_store_image_with_input_images"
|
|
852
|
+
- note: Note about usage
|
|
853
|
+
- images: List of generated images with file paths and metadata
|
|
854
|
+
- model: Model used for generation
|
|
855
|
+
- prompt: The prompt used
|
|
856
|
+
- total_images: Total number of images generated
|
|
857
|
+
|
|
858
|
+
Examples:
|
|
859
|
+
generate_and_store_image_with_input_images(["cat.png", "dog.png"], "Combine these animals")
|
|
860
|
+
→ Generates a variation combining both images
|
|
861
|
+
|
|
862
|
+
generate_and_store_image_with_input_images(["art/logo.png", "art/icon.png"], "Create a unified design")
|
|
863
|
+
→ Generates variations based on both images
|
|
864
|
+
|
|
865
|
+
Security:
|
|
866
|
+
- Requires valid OpenAI API key
|
|
867
|
+
- Input images must be valid image files less than 4MB
|
|
868
|
+
- Files are saved to specified path within workspace
|
|
869
|
+
"""
|
|
870
|
+
from datetime import datetime
|
|
871
|
+
|
|
872
|
+
try:
|
|
873
|
+
# Load environment variables
|
|
874
|
+
script_dir = Path(__file__).parent.parent.parent
|
|
875
|
+
env_path = script_dir / ".env"
|
|
876
|
+
if env_path.exists():
|
|
877
|
+
load_dotenv(env_path)
|
|
878
|
+
else:
|
|
879
|
+
load_dotenv()
|
|
880
|
+
|
|
881
|
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
882
|
+
|
|
883
|
+
if not openai_api_key:
|
|
884
|
+
return {
|
|
885
|
+
"success": False,
|
|
886
|
+
"operation": "generate_and_store_image_with_input_images",
|
|
887
|
+
"error": "OpenAI API key not found. Please set OPENAI_API_KEY in .env file or environment variable.",
|
|
888
|
+
}
|
|
889
|
+
|
|
890
|
+
# Initialize OpenAI client
|
|
891
|
+
client = OpenAI(api_key=openai_api_key)
|
|
892
|
+
|
|
893
|
+
# Prepare content list with prompt and images
|
|
894
|
+
content = [{"type": "input_text", "text": prompt}]
|
|
895
|
+
|
|
896
|
+
# Process and validate all input images
|
|
897
|
+
validated_paths = []
|
|
898
|
+
for image_path_str in base_image_paths:
|
|
899
|
+
# Resolve image path
|
|
900
|
+
if Path(image_path_str).is_absolute():
|
|
901
|
+
image_path = Path(image_path_str).resolve()
|
|
902
|
+
else:
|
|
903
|
+
image_path = (Path.cwd() / image_path_str).resolve()
|
|
904
|
+
|
|
905
|
+
# Validate image path
|
|
906
|
+
_validate_path_access(image_path, mcp.allowed_paths)
|
|
907
|
+
|
|
908
|
+
if not image_path.exists():
|
|
909
|
+
return {
|
|
910
|
+
"success": False,
|
|
911
|
+
"operation": "generate_and_store_image_with_input_images",
|
|
912
|
+
"error": f"Image file does not exist: {image_path}",
|
|
913
|
+
}
|
|
914
|
+
|
|
915
|
+
# Allow both PNG and JPEG formats
|
|
916
|
+
if image_path.suffix.lower() not in [".png", ".jpg", ".jpeg"]:
|
|
917
|
+
return {
|
|
918
|
+
"success": False,
|
|
919
|
+
"operation": "generate_and_store_image_with_input_images",
|
|
920
|
+
"error": f"Image must be PNG or JPEG format: {image_path}",
|
|
921
|
+
}
|
|
922
|
+
|
|
923
|
+
# Check file size (must be less than 4MB)
|
|
924
|
+
file_size = image_path.stat().st_size
|
|
925
|
+
if file_size > 4 * 1024 * 1024:
|
|
926
|
+
return {
|
|
927
|
+
"success": False,
|
|
928
|
+
"operation": "generate_and_store_image_with_input_images",
|
|
929
|
+
"error": f"Image file too large (must be < 4MB): {image_path} is {file_size / (1024*1024):.2f}MB",
|
|
930
|
+
}
|
|
931
|
+
|
|
932
|
+
validated_paths.append(image_path)
|
|
933
|
+
|
|
934
|
+
# Read and encode image to base64
|
|
935
|
+
with open(image_path, "rb") as f:
|
|
936
|
+
image_data = f.read()
|
|
937
|
+
image_base64 = base64.b64encode(image_data).decode("utf-8")
|
|
938
|
+
|
|
939
|
+
# Determine MIME type
|
|
940
|
+
mime_type = "image/jpeg" if image_path.suffix.lower() in [".jpg", ".jpeg"] else "image/png"
|
|
941
|
+
|
|
942
|
+
# Add image to content
|
|
943
|
+
content.append(
|
|
944
|
+
{
|
|
945
|
+
"type": "input_image",
|
|
946
|
+
"image_url": f"data:{mime_type};base64,{image_base64}",
|
|
947
|
+
},
|
|
948
|
+
)
|
|
949
|
+
|
|
950
|
+
# Determine storage directory
|
|
951
|
+
if storage_path:
|
|
952
|
+
if Path(storage_path).is_absolute():
|
|
953
|
+
storage_dir = Path(storage_path).resolve()
|
|
954
|
+
else:
|
|
955
|
+
storage_dir = (Path.cwd() / storage_path).resolve()
|
|
956
|
+
else:
|
|
957
|
+
storage_dir = Path.cwd()
|
|
958
|
+
|
|
959
|
+
# Validate storage directory
|
|
960
|
+
_validate_path_access(storage_dir, mcp.allowed_paths)
|
|
961
|
+
storage_dir.mkdir(parents=True, exist_ok=True)
|
|
962
|
+
|
|
963
|
+
try:
|
|
964
|
+
# print("Content for OpenAI API:", str(content))
|
|
965
|
+
# Generate variations using gpt-4.1 API with all images at once
|
|
966
|
+
# append content to a file
|
|
967
|
+
response = client.responses.create(
|
|
968
|
+
model=model,
|
|
969
|
+
input=[
|
|
970
|
+
{
|
|
971
|
+
"role": "user",
|
|
972
|
+
"content": content,
|
|
973
|
+
},
|
|
974
|
+
],
|
|
975
|
+
tools=[{"type": "image_generation"}],
|
|
976
|
+
)
|
|
977
|
+
|
|
978
|
+
# Extract image generation calls from response
|
|
979
|
+
image_generation_calls = [output for output in response.output if output.type == "image_generation_call"]
|
|
980
|
+
|
|
981
|
+
all_variations = []
|
|
982
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
983
|
+
|
|
984
|
+
# Process generated images
|
|
985
|
+
for idx, output in enumerate(image_generation_calls):
|
|
986
|
+
if hasattr(output, "result"):
|
|
987
|
+
image_base64 = output.result
|
|
988
|
+
image_bytes = base64.b64decode(image_base64)
|
|
989
|
+
|
|
990
|
+
# Generate filename
|
|
991
|
+
if len(image_generation_calls) > 1:
|
|
992
|
+
filename = f"variation_{idx+1}_{timestamp}.png"
|
|
993
|
+
else:
|
|
994
|
+
filename = f"variation_{timestamp}.png"
|
|
995
|
+
|
|
996
|
+
# Full file path
|
|
997
|
+
file_path = storage_dir / filename
|
|
998
|
+
|
|
999
|
+
# Save image
|
|
1000
|
+
file_path.write_bytes(image_bytes)
|
|
1001
|
+
|
|
1002
|
+
all_variations.append(
|
|
1003
|
+
{
|
|
1004
|
+
"source_images": [str(p) for p in validated_paths],
|
|
1005
|
+
"file_path": str(file_path),
|
|
1006
|
+
"filename": filename,
|
|
1007
|
+
"size": len(image_bytes),
|
|
1008
|
+
"index": idx,
|
|
1009
|
+
},
|
|
1010
|
+
)
|
|
1011
|
+
|
|
1012
|
+
# If no images were generated, check for text response
|
|
1013
|
+
if not all_variations:
|
|
1014
|
+
text_outputs = [output.content for output in response.output if hasattr(output, "content")]
|
|
1015
|
+
if text_outputs:
|
|
1016
|
+
return {
|
|
1017
|
+
"success": False,
|
|
1018
|
+
"operation": "generate_and_store_image_with_input_images",
|
|
1019
|
+
"error": f"No images generated. Response: {' '.join(text_outputs)}",
|
|
1020
|
+
}
|
|
1021
|
+
|
|
1022
|
+
except Exception as api_error:
|
|
1023
|
+
return {
|
|
1024
|
+
"success": False,
|
|
1025
|
+
"operation": "generate_and_store_image_with_input_images",
|
|
1026
|
+
"error": f"OpenAI API error: {str(api_error)}",
|
|
1027
|
+
}
|
|
1028
|
+
|
|
1029
|
+
return {
|
|
1030
|
+
"success": True,
|
|
1031
|
+
"operation": "generate_and_store_image_with_input_images",
|
|
1032
|
+
"note": "If no input images were provided, you must use generate_and_store_image_no_input_images tool.",
|
|
1033
|
+
"images": all_variations,
|
|
1034
|
+
"model": model,
|
|
1035
|
+
"prompt": prompt,
|
|
1036
|
+
"total_images": len(all_variations),
|
|
1037
|
+
}
|
|
1038
|
+
|
|
1039
|
+
except Exception as e:
|
|
1040
|
+
return {
|
|
1041
|
+
"success": False,
|
|
1042
|
+
"operation": "generate_and_store_image_with_input_images",
|
|
1043
|
+
"error": f"Failed to generate variations: {str(e)}",
|
|
1044
|
+
}
|
|
1045
|
+
|
|
1046
|
+
@mcp.tool()
|
|
1047
|
+
def generate_and_store_audio_no_input_audios(
|
|
1048
|
+
prompt: str,
|
|
1049
|
+
model: str = "gpt-4o-audio-preview",
|
|
1050
|
+
voice: str = "alloy",
|
|
1051
|
+
audio_format: str = "wav",
|
|
1052
|
+
storage_path: Optional[str] = None,
|
|
1053
|
+
) -> Dict[str, Any]:
|
|
1054
|
+
"""
|
|
1055
|
+
Generate audio from text using OpenAI's gpt-4o-audio-preview model and store it in the workspace.
|
|
1056
|
+
|
|
1057
|
+
This tool generates audio speech from text prompts using OpenAI's audio generation API
|
|
1058
|
+
and saves the audio files to the workspace with automatic organization.
|
|
1059
|
+
|
|
1060
|
+
Args:
|
|
1061
|
+
prompt: Text content to convert to audio speech
|
|
1062
|
+
model: Model to use for generation (default: "gpt-4o-audio-preview")
|
|
1063
|
+
voice: Voice to use for audio generation (default: "alloy")
|
|
1064
|
+
Options: "alloy", "echo", "fable", "onyx", "nova", "shimmer"
|
|
1065
|
+
audio_format: Audio format for output (default: "wav")
|
|
1066
|
+
Options: "wav", "mp3", "opus", "aac", "flac"
|
|
1067
|
+
storage_path: Directory path where to save the audio (optional)
|
|
1068
|
+
- Relative path: Resolved relative to workspace (e.g., "audio/generated")
|
|
1069
|
+
- Absolute path: Must be within allowed directories
|
|
1070
|
+
- None/empty: Saves to workspace root
|
|
1071
|
+
|
|
1072
|
+
Returns:
|
|
1073
|
+
Dictionary containing:
|
|
1074
|
+
- success: Whether operation succeeded
|
|
1075
|
+
- operation: "generate_and_store_audio_no_input_audios"
|
|
1076
|
+
- audio_file: Generated audio file with path and metadata
|
|
1077
|
+
- model: Model used for generation
|
|
1078
|
+
- prompt: The prompt used for generation
|
|
1079
|
+
- voice: Voice used for generation
|
|
1080
|
+
- format: Audio format used
|
|
1081
|
+
|
|
1082
|
+
Examples:
|
|
1083
|
+
generate_and_store_audio_no_input_audios("Is a golden retriever a good family dog?")
|
|
1084
|
+
→ Generates and saves to: 20240115_143022_audio.wav
|
|
1085
|
+
|
|
1086
|
+
generate_and_store_audio_no_input_audios("Hello world", voice="nova", audio_format="mp3")
|
|
1087
|
+
→ Generates with nova voice and saves as: 20240115_143022_audio.mp3
|
|
1088
|
+
|
|
1089
|
+
Security:
|
|
1090
|
+
- Requires valid OpenAI API key (automatically detected from .env or environment)
|
|
1091
|
+
- Files are saved to specified path within workspace
|
|
1092
|
+
- Path must be within allowed directories
|
|
1093
|
+
"""
|
|
1094
|
+
from datetime import datetime
|
|
1095
|
+
|
|
1096
|
+
try:
|
|
1097
|
+
# Load environment variables
|
|
1098
|
+
script_dir = Path(__file__).parent.parent.parent
|
|
1099
|
+
env_path = script_dir / ".env"
|
|
1100
|
+
if env_path.exists():
|
|
1101
|
+
load_dotenv(env_path)
|
|
1102
|
+
else:
|
|
1103
|
+
load_dotenv()
|
|
1104
|
+
|
|
1105
|
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
1106
|
+
|
|
1107
|
+
if not openai_api_key:
|
|
1108
|
+
return {
|
|
1109
|
+
"success": False,
|
|
1110
|
+
"operation": "generate_and_store_audio_no_input_audios",
|
|
1111
|
+
"error": "OpenAI API key not found. Please set OPENAI_API_KEY in .env file or environment variable.",
|
|
1112
|
+
}
|
|
1113
|
+
|
|
1114
|
+
# Initialize OpenAI client
|
|
1115
|
+
client = OpenAI(api_key=openai_api_key)
|
|
1116
|
+
|
|
1117
|
+
# Determine storage directory
|
|
1118
|
+
if storage_path:
|
|
1119
|
+
if Path(storage_path).is_absolute():
|
|
1120
|
+
storage_dir = Path(storage_path).resolve()
|
|
1121
|
+
else:
|
|
1122
|
+
storage_dir = (Path.cwd() / storage_path).resolve()
|
|
1123
|
+
else:
|
|
1124
|
+
storage_dir = Path.cwd()
|
|
1125
|
+
|
|
1126
|
+
# Validate storage directory is within allowed paths
|
|
1127
|
+
_validate_path_access(storage_dir, mcp.allowed_paths)
|
|
1128
|
+
|
|
1129
|
+
# Create directory if it doesn't exist
|
|
1130
|
+
storage_dir.mkdir(parents=True, exist_ok=True)
|
|
1131
|
+
|
|
1132
|
+
try:
|
|
1133
|
+
# Generate audio using OpenAI API
|
|
1134
|
+
completion = client.chat.completions.create(
|
|
1135
|
+
model=model,
|
|
1136
|
+
modalities=["text", "audio"],
|
|
1137
|
+
audio={"voice": voice, "format": audio_format},
|
|
1138
|
+
messages=[
|
|
1139
|
+
{
|
|
1140
|
+
"role": "user",
|
|
1141
|
+
"content": prompt,
|
|
1142
|
+
},
|
|
1143
|
+
],
|
|
1144
|
+
)
|
|
1145
|
+
|
|
1146
|
+
# Check if audio data is available
|
|
1147
|
+
if not completion.choices[0].message.audio or not completion.choices[0].message.audio.data:
|
|
1148
|
+
return {
|
|
1149
|
+
"success": False,
|
|
1150
|
+
"operation": "generate_and_store_audio_no_input_audios",
|
|
1151
|
+
"error": "No audio data received from API",
|
|
1152
|
+
}
|
|
1153
|
+
|
|
1154
|
+
# Decode audio data from base64
|
|
1155
|
+
audio_bytes = base64.b64decode(completion.choices[0].message.audio.data)
|
|
1156
|
+
|
|
1157
|
+
# Generate filename with timestamp
|
|
1158
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1159
|
+
|
|
1160
|
+
# Clean prompt for filename (first 30 chars)
|
|
1161
|
+
clean_prompt = "".join(c for c in prompt[:30] if c.isalnum() or c in (" ", "-", "_")).strip()
|
|
1162
|
+
clean_prompt = clean_prompt.replace(" ", "_")
|
|
1163
|
+
|
|
1164
|
+
filename = f"{timestamp}_{clean_prompt}.{audio_format}"
|
|
1165
|
+
|
|
1166
|
+
# Full file path
|
|
1167
|
+
file_path = storage_dir / filename
|
|
1168
|
+
|
|
1169
|
+
# Write audio to file
|
|
1170
|
+
file_path.write_bytes(audio_bytes)
|
|
1171
|
+
file_size = len(audio_bytes)
|
|
1172
|
+
|
|
1173
|
+
# Get text response if available
|
|
1174
|
+
text_response = completion.choices[0].message.content if completion.choices[0].message.content else None
|
|
1175
|
+
|
|
1176
|
+
return {
|
|
1177
|
+
"success": True,
|
|
1178
|
+
"operation": "generate_and_store_audio_no_input_audios",
|
|
1179
|
+
"audio_file": {
|
|
1180
|
+
"file_path": str(file_path),
|
|
1181
|
+
"filename": filename,
|
|
1182
|
+
"size": file_size,
|
|
1183
|
+
"format": audio_format,
|
|
1184
|
+
},
|
|
1185
|
+
"model": model,
|
|
1186
|
+
"prompt": prompt,
|
|
1187
|
+
"voice": voice,
|
|
1188
|
+
"format": audio_format,
|
|
1189
|
+
"text_response": text_response,
|
|
1190
|
+
}
|
|
1191
|
+
|
|
1192
|
+
except Exception as api_error:
|
|
1193
|
+
return {
|
|
1194
|
+
"success": False,
|
|
1195
|
+
"operation": "generate_and_store_audio_no_input_audios",
|
|
1196
|
+
"error": f"OpenAI API error: {str(api_error)}",
|
|
1197
|
+
}
|
|
1198
|
+
|
|
1199
|
+
except Exception as e:
|
|
1200
|
+
return {
|
|
1201
|
+
"success": False,
|
|
1202
|
+
"operation": "generate_and_store_audio_no_input_audios",
|
|
1203
|
+
"error": f"Failed to generate or save audio: {str(e)}",
|
|
1204
|
+
}
|
|
1205
|
+
|
|
1206
|
+
@mcp.tool()
|
|
1207
|
+
def generate_and_store_image_no_input_images(
|
|
1208
|
+
prompt: str,
|
|
1209
|
+
model: str = "gpt-4.1",
|
|
1210
|
+
storage_path: Optional[str] = None,
|
|
1211
|
+
) -> Dict[str, Any]:
|
|
1212
|
+
"""
|
|
1213
|
+
Generate image using OpenAI's response with gpt-4.1 **WITHOUT ANY INPUT IMAGES** and store it in the workspace.
|
|
1214
|
+
|
|
1215
|
+
This tool Generate image using OpenAI's response with gpt-4.1 **WITHOUT ANY INPUT IMAGES** and store it in the workspace.
|
|
1216
|
+
|
|
1217
|
+
Args:
|
|
1218
|
+
prompt: Text description of the image to generate
|
|
1219
|
+
model: Model to use for generation (default: "gpt-4.1")
|
|
1220
|
+
Options: "gpt-4.1"
|
|
1221
|
+
n: Number of images to generate (default: 1)
|
|
1222
|
+
- gpt-4.1: only 1
|
|
1223
|
+
storage_path: Directory path where to save the image (optional)
|
|
1224
|
+
- Relative path: Resolved relative to workspace (e.g., "images/generated")
|
|
1225
|
+
- Absolute path: Must be within allowed directories
|
|
1226
|
+
- None/empty: Saves to workspace root
|
|
1227
|
+
|
|
1228
|
+
Returns:
|
|
1229
|
+
Dictionary containing:
|
|
1230
|
+
- success: Whether operation succeeded
|
|
1231
|
+
- operation: "generate_and_store_image_no_input_images"
|
|
1232
|
+
- note: Note about operation
|
|
1233
|
+
- images: List of generated images with file paths and metadata
|
|
1234
|
+
- model: Model used for generation
|
|
1235
|
+
- prompt: The prompt used for generation
|
|
1236
|
+
- total_images: Total number of images generated and saved
|
|
1237
|
+
- images: List of generated images with file paths and metadata
|
|
1238
|
+
|
|
1239
|
+
Examples:
|
|
1240
|
+
generate_and_store_image_no_input_images("a cat in space")
|
|
1241
|
+
→ Generates and saves to: 20240115_143022_a_cat_in_space.png
|
|
1242
|
+
|
|
1243
|
+
generate_and_store_image_no_input_images("sunset over mountains", storage_path="art/landscapes")
|
|
1244
|
+
→ Generates and saves to: art/landscapes/20240115_143022_sunset_over_mountains.png
|
|
1245
|
+
|
|
1246
|
+
Security:
|
|
1247
|
+
- Requires valid OpenAI API key (automatically detected from .env or environment)
|
|
1248
|
+
- Files are saved to specified path within workspace
|
|
1249
|
+
- Path must be within allowed directories
|
|
1250
|
+
|
|
1251
|
+
Note:
|
|
1252
|
+
API key is automatically detected in this order:
|
|
1253
|
+
1. First checks .env file in current directory or parent directories
|
|
1254
|
+
2. Then checks environment variables
|
|
1255
|
+
"""
|
|
1256
|
+
from datetime import datetime
|
|
1257
|
+
|
|
1258
|
+
try:
|
|
1259
|
+
# Try to find and load .env file from multiple locations
|
|
1260
|
+
# 1. Try loading from script directory
|
|
1261
|
+
script_dir = Path(__file__).parent.parent.parent # Go up to project root
|
|
1262
|
+
env_path = script_dir / ".env"
|
|
1263
|
+
if env_path.exists():
|
|
1264
|
+
load_dotenv(env_path)
|
|
1265
|
+
else:
|
|
1266
|
+
# 2. Try loading from current directory and parent directories
|
|
1267
|
+
load_dotenv()
|
|
1268
|
+
|
|
1269
|
+
# Get API key from environment (load_dotenv will have loaded .env file)
|
|
1270
|
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
1271
|
+
|
|
1272
|
+
if not openai_api_key:
|
|
1273
|
+
return {
|
|
1274
|
+
"success": False,
|
|
1275
|
+
"operation": "generate_and_store_image",
|
|
1276
|
+
"error": "OpenAI API key not found. Please set OPENAI_API_KEY in .env file or environment variable.",
|
|
1277
|
+
}
|
|
1278
|
+
|
|
1279
|
+
# Initialize OpenAI client
|
|
1280
|
+
client = OpenAI(api_key=openai_api_key)
|
|
1281
|
+
|
|
1282
|
+
# Determine storage directory
|
|
1283
|
+
if storage_path:
|
|
1284
|
+
if Path(storage_path).is_absolute():
|
|
1285
|
+
storage_dir = Path(storage_path).resolve()
|
|
1286
|
+
else:
|
|
1287
|
+
storage_dir = (Path.cwd() / storage_path).resolve()
|
|
1288
|
+
else:
|
|
1289
|
+
storage_dir = Path.cwd()
|
|
1290
|
+
|
|
1291
|
+
# Validate storage directory is within allowed paths
|
|
1292
|
+
_validate_path_access(storage_dir, mcp.allowed_paths)
|
|
1293
|
+
|
|
1294
|
+
# Create directory if it doesn't exist
|
|
1295
|
+
storage_dir.mkdir(parents=True, exist_ok=True)
|
|
1296
|
+
|
|
1297
|
+
try:
|
|
1298
|
+
# Generate image using OpenAI API with gpt-4.1 non-streaming format
|
|
1299
|
+
response = client.responses.create(
|
|
1300
|
+
model=model,
|
|
1301
|
+
input=prompt,
|
|
1302
|
+
tools=[{"type": "image_generation"}],
|
|
1303
|
+
)
|
|
1304
|
+
|
|
1305
|
+
# Extract image data from response
|
|
1306
|
+
image_data = [output.result for output in response.output if output.type == "image_generation_call"]
|
|
1307
|
+
|
|
1308
|
+
saved_images = []
|
|
1309
|
+
|
|
1310
|
+
if image_data:
|
|
1311
|
+
# Generate filename with timestamp
|
|
1312
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1313
|
+
|
|
1314
|
+
# Clean prompt for filename
|
|
1315
|
+
clean_prompt = "".join(c for c in prompt[:30] if c.isalnum() or c in (" ", "-", "_")).strip()
|
|
1316
|
+
clean_prompt = clean_prompt.replace(" ", "_")
|
|
1317
|
+
|
|
1318
|
+
for idx, image_base64 in enumerate(image_data):
|
|
1319
|
+
# Decode base64 image data
|
|
1320
|
+
image_bytes = base64.b64decode(image_base64)
|
|
1321
|
+
|
|
1322
|
+
# Add index if generating multiple images
|
|
1323
|
+
if len(image_data) > 1:
|
|
1324
|
+
filename = f"{timestamp}_{clean_prompt}_{idx+1}.png"
|
|
1325
|
+
else:
|
|
1326
|
+
filename = f"{timestamp}_{clean_prompt}.png"
|
|
1327
|
+
|
|
1328
|
+
# Full file path
|
|
1329
|
+
file_path = storage_dir / filename
|
|
1330
|
+
|
|
1331
|
+
# Write image to file
|
|
1332
|
+
file_path.write_bytes(image_bytes)
|
|
1333
|
+
file_size = len(image_bytes)
|
|
1334
|
+
|
|
1335
|
+
saved_images.append(
|
|
1336
|
+
{
|
|
1337
|
+
"file_path": str(file_path),
|
|
1338
|
+
"filename": filename,
|
|
1339
|
+
"size": file_size,
|
|
1340
|
+
"index": idx,
|
|
1341
|
+
},
|
|
1342
|
+
)
|
|
1343
|
+
|
|
1344
|
+
result = {
|
|
1345
|
+
"success": True,
|
|
1346
|
+
"operation": "generate_and_store_image_no_input_images",
|
|
1347
|
+
"note": "New images are generated and saved to the specified path.",
|
|
1348
|
+
"images": saved_images,
|
|
1349
|
+
"model": model,
|
|
1350
|
+
"prompt": prompt,
|
|
1351
|
+
"total_images": len(saved_images),
|
|
1352
|
+
}
|
|
1353
|
+
|
|
1354
|
+
return result
|
|
1355
|
+
|
|
1356
|
+
except Exception as api_error:
|
|
1357
|
+
print(f"OpenAI API error: {str(api_error)}")
|
|
1358
|
+
return {
|
|
1359
|
+
"success": False,
|
|
1360
|
+
"operation": "generate_and_store_image_no_input_images",
|
|
1361
|
+
"error": f"OpenAI API error: {str(api_error)}",
|
|
1362
|
+
}
|
|
1363
|
+
|
|
1364
|
+
except Exception as e:
|
|
1365
|
+
return {
|
|
1366
|
+
"success": False,
|
|
1367
|
+
"operation": "generate_and_store_image_no_input_images",
|
|
1368
|
+
"error": f"Failed to generate or save image: {str(e)}",
|
|
1369
|
+
}
|
|
1370
|
+
|
|
1371
|
+
@mcp.tool()
|
|
1372
|
+
def generate_text_with_input_audio(
|
|
1373
|
+
audio_paths: List[str],
|
|
1374
|
+
model: str = "gpt-4o-transcribe",
|
|
1375
|
+
) -> Dict[str, Any]:
|
|
1376
|
+
"""
|
|
1377
|
+
Transcribe audio file(s) to text using OpenAI's Transcription API.
|
|
1378
|
+
|
|
1379
|
+
This tool processes one or more audio files through OpenAI's Transcription API
|
|
1380
|
+
to extract the text content from the audio. Each file is processed separately.
|
|
1381
|
+
|
|
1382
|
+
Args:
|
|
1383
|
+
audio_paths: List of paths to input audio files (WAV, MP3, M4A, etc.)
|
|
1384
|
+
- Relative path: Resolved relative to workspace
|
|
1385
|
+
- Absolute path: Must be within allowed directories
|
|
1386
|
+
model: Model to use (default: "gpt-4o-transcribe")
|
|
1387
|
+
|
|
1388
|
+
Returns:
|
|
1389
|
+
Dictionary containing:
|
|
1390
|
+
- success: Whether operation succeeded
|
|
1391
|
+
- operation: "generate_text_with_input_audio"
|
|
1392
|
+
- transcriptions: List of transcription results for each file
|
|
1393
|
+
- audio_files: List of paths to the input audio files
|
|
1394
|
+
- model: Model used
|
|
1395
|
+
|
|
1396
|
+
Examples:
|
|
1397
|
+
generate_text_with_input_audio(["recording.wav"])
|
|
1398
|
+
→ Returns transcription for recording.wav
|
|
1399
|
+
|
|
1400
|
+
generate_text_with_input_audio(["interview1.mp3", "interview2.mp3"])
|
|
1401
|
+
→ Returns separate transcriptions for each file
|
|
1402
|
+
|
|
1403
|
+
Security:
|
|
1404
|
+
- Requires valid OpenAI API key
|
|
1405
|
+
- All input audio files must exist and be readable
|
|
1406
|
+
"""
|
|
1407
|
+
try:
|
|
1408
|
+
# Load environment variables
|
|
1409
|
+
script_dir = Path(__file__).parent.parent.parent
|
|
1410
|
+
env_path = script_dir / ".env"
|
|
1411
|
+
if env_path.exists():
|
|
1412
|
+
load_dotenv(env_path)
|
|
1413
|
+
else:
|
|
1414
|
+
load_dotenv()
|
|
1415
|
+
|
|
1416
|
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
1417
|
+
|
|
1418
|
+
if not openai_api_key:
|
|
1419
|
+
return {
|
|
1420
|
+
"success": False,
|
|
1421
|
+
"operation": "generate_text_with_input_audio",
|
|
1422
|
+
"error": "OpenAI API key not found. Please set OPENAI_API_KEY in .env file or environment variable.",
|
|
1423
|
+
}
|
|
1424
|
+
|
|
1425
|
+
# Initialize OpenAI client
|
|
1426
|
+
client = OpenAI(api_key=openai_api_key)
|
|
1427
|
+
|
|
1428
|
+
# Validate and process input audio files
|
|
1429
|
+
validated_audio_paths = []
|
|
1430
|
+
audio_extensions = [".wav", ".mp3", ".m4a", ".mp4", ".ogg", ".flac", ".aac", ".wma", ".opus"]
|
|
1431
|
+
|
|
1432
|
+
for audio_path_str in audio_paths:
|
|
1433
|
+
# Resolve audio path
|
|
1434
|
+
if Path(audio_path_str).is_absolute():
|
|
1435
|
+
audio_path = Path(audio_path_str).resolve()
|
|
1436
|
+
else:
|
|
1437
|
+
audio_path = (Path.cwd() / audio_path_str).resolve()
|
|
1438
|
+
|
|
1439
|
+
# Validate audio path
|
|
1440
|
+
_validate_path_access(audio_path, mcp.allowed_paths)
|
|
1441
|
+
|
|
1442
|
+
if not audio_path.exists():
|
|
1443
|
+
return {
|
|
1444
|
+
"success": False,
|
|
1445
|
+
"operation": "generate_text_with_input_audio",
|
|
1446
|
+
"error": f"Audio file does not exist: {audio_path}",
|
|
1447
|
+
}
|
|
1448
|
+
|
|
1449
|
+
# Check if file is an audio file
|
|
1450
|
+
if audio_path.suffix.lower() not in audio_extensions:
|
|
1451
|
+
return {
|
|
1452
|
+
"success": False,
|
|
1453
|
+
"operation": "generate_text_with_input_audio",
|
|
1454
|
+
"error": f"File does not appear to be an audio file: {audio_path}",
|
|
1455
|
+
}
|
|
1456
|
+
|
|
1457
|
+
validated_audio_paths.append(audio_path)
|
|
1458
|
+
|
|
1459
|
+
# Process each audio file separately using OpenAI Transcription API
|
|
1460
|
+
transcriptions = []
|
|
1461
|
+
|
|
1462
|
+
for audio_path in validated_audio_paths:
|
|
1463
|
+
try:
|
|
1464
|
+
# Open audio file
|
|
1465
|
+
with open(audio_path, "rb") as audio_file:
|
|
1466
|
+
# Basic transcription without prompt
|
|
1467
|
+
transcription = client.audio.transcriptions.create(
|
|
1468
|
+
model=model,
|
|
1469
|
+
file=audio_file,
|
|
1470
|
+
response_format="text",
|
|
1471
|
+
)
|
|
1472
|
+
|
|
1473
|
+
# Add transcription to list
|
|
1474
|
+
transcriptions.append(
|
|
1475
|
+
{
|
|
1476
|
+
"file": str(audio_path),
|
|
1477
|
+
"transcription": transcription,
|
|
1478
|
+
},
|
|
1479
|
+
)
|
|
1480
|
+
|
|
1481
|
+
except Exception as api_error:
|
|
1482
|
+
return {
|
|
1483
|
+
"success": False,
|
|
1484
|
+
"operation": "generate_text_with_input_audio",
|
|
1485
|
+
"error": f"Transcription API error for file {audio_path}: {str(api_error)}",
|
|
1486
|
+
}
|
|
1487
|
+
|
|
1488
|
+
return {
|
|
1489
|
+
"success": True,
|
|
1490
|
+
"operation": "generate_text_with_input_audio",
|
|
1491
|
+
"transcriptions": transcriptions,
|
|
1492
|
+
"audio_files": [str(p) for p in validated_audio_paths],
|
|
1493
|
+
"model": model,
|
|
1494
|
+
}
|
|
1495
|
+
|
|
1496
|
+
except Exception as e:
|
|
1497
|
+
return {
|
|
1498
|
+
"success": False,
|
|
1499
|
+
"operation": "generate_text_with_input_audio",
|
|
1500
|
+
"error": f"Failed to transcribe audio: {str(e)}",
|
|
1501
|
+
}
|
|
1502
|
+
|
|
1503
|
+
@mcp.tool()
|
|
1504
|
+
def convert_text_to_speech(
|
|
1505
|
+
input_text: str,
|
|
1506
|
+
model: str = "gpt-4o-mini-tts",
|
|
1507
|
+
voice: str = "alloy",
|
|
1508
|
+
instructions: Optional[str] = None,
|
|
1509
|
+
storage_path: Optional[str] = None,
|
|
1510
|
+
audio_format: str = "mp3",
|
|
1511
|
+
) -> Dict[str, Any]:
|
|
1512
|
+
"""
|
|
1513
|
+
Convert text (transcription) directly to speech using OpenAI's TTS API with streaming response.
|
|
1514
|
+
|
|
1515
|
+
This tool converts text directly to speech audio using OpenAI's Text-to-Speech API,
|
|
1516
|
+
designed specifically for converting transcriptions or any text content to spoken audio.
|
|
1517
|
+
Uses streaming response for efficient file handling.
|
|
1518
|
+
|
|
1519
|
+
Args:
|
|
1520
|
+
input_text: The text content to convert to speech (e.g., transcription text)
|
|
1521
|
+
model: TTS model to use (default: "gpt-4o-mini-tts")
|
|
1522
|
+
Options: "gpt-4o-mini-tts", "tts-1", "tts-1-hd"
|
|
1523
|
+
voice: Voice to use for speech synthesis (default: "alloy")
|
|
1524
|
+
Options: "alloy", "echo", "fable", "onyx", "nova", "shimmer", "coral", "sage"
|
|
1525
|
+
instructions: Optional speaking instructions for tone and style (e.g., "Speak in a cheerful tone")
|
|
1526
|
+
storage_path: Directory path where to save the audio file (optional)
|
|
1527
|
+
- Relative path: Resolved relative to workspace
|
|
1528
|
+
- Absolute path: Must be within allowed directories
|
|
1529
|
+
- None/empty: Saves to workspace root
|
|
1530
|
+
audio_format: Output audio format (default: "mp3")
|
|
1531
|
+
Options: "mp3", "opus", "aac", "flac", "wav", "pcm"
|
|
1532
|
+
|
|
1533
|
+
Returns:
|
|
1534
|
+
Dictionary containing:
|
|
1535
|
+
- success: Whether operation succeeded
|
|
1536
|
+
- operation: "convert_text_to_speech"
|
|
1537
|
+
- audio_file: Generated audio file with path and metadata
|
|
1538
|
+
- model: TTS model used
|
|
1539
|
+
- voice: Voice used
|
|
1540
|
+
- format: Audio format used
|
|
1541
|
+
- text_length: Length of input text
|
|
1542
|
+
- instructions: Speaking instructions if provided
|
|
1543
|
+
|
|
1544
|
+
Examples:
|
|
1545
|
+
convert_text_to_speech("Hello world, this is a test.")
|
|
1546
|
+
→ Converts text to speech and saves as MP3
|
|
1547
|
+
|
|
1548
|
+
convert_text_to_speech(
|
|
1549
|
+
"Today is a wonderful day to build something people love!",
|
|
1550
|
+
voice="coral",
|
|
1551
|
+
instructions="Speak in a cheerful and positive tone."
|
|
1552
|
+
)
|
|
1553
|
+
→ Converts with specific voice and speaking instructions
|
|
1554
|
+
|
|
1555
|
+
Security:
|
|
1556
|
+
- Requires valid OpenAI API key
|
|
1557
|
+
- Files are saved to specified path within workspace
|
|
1558
|
+
- Path must be within allowed directories
|
|
1559
|
+
"""
|
|
1560
|
+
from datetime import datetime
|
|
1561
|
+
|
|
1562
|
+
try:
|
|
1563
|
+
# Load environment variables
|
|
1564
|
+
script_dir = Path(__file__).parent.parent.parent
|
|
1565
|
+
env_path = script_dir / ".env"
|
|
1566
|
+
if env_path.exists():
|
|
1567
|
+
load_dotenv(env_path)
|
|
1568
|
+
else:
|
|
1569
|
+
load_dotenv()
|
|
1570
|
+
|
|
1571
|
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
1572
|
+
|
|
1573
|
+
if not openai_api_key:
|
|
1574
|
+
return {
|
|
1575
|
+
"success": False,
|
|
1576
|
+
"operation": "convert_text_to_speech",
|
|
1577
|
+
"error": "OpenAI API key not found. Please set OPENAI_API_KEY in .env file or environment variable.",
|
|
1578
|
+
}
|
|
1579
|
+
|
|
1580
|
+
# Initialize OpenAI client
|
|
1581
|
+
client = OpenAI(api_key=openai_api_key)
|
|
1582
|
+
|
|
1583
|
+
# Determine storage directory
|
|
1584
|
+
if storage_path:
|
|
1585
|
+
if Path(storage_path).is_absolute():
|
|
1586
|
+
storage_dir = Path(storage_path).resolve()
|
|
1587
|
+
else:
|
|
1588
|
+
storage_dir = (Path.cwd() / storage_path).resolve()
|
|
1589
|
+
else:
|
|
1590
|
+
storage_dir = Path.cwd()
|
|
1591
|
+
|
|
1592
|
+
# Validate storage directory is within allowed paths
|
|
1593
|
+
_validate_path_access(storage_dir, mcp.allowed_paths)
|
|
1594
|
+
|
|
1595
|
+
# Create directory if it doesn't exist
|
|
1596
|
+
storage_dir.mkdir(parents=True, exist_ok=True)
|
|
1597
|
+
|
|
1598
|
+
# Generate filename with timestamp
|
|
1599
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1600
|
+
|
|
1601
|
+
# Clean text for filename (first 30 chars)
|
|
1602
|
+
clean_text = "".join(c for c in input_text[:30] if c.isalnum() or c in (" ", "-", "_")).strip()
|
|
1603
|
+
clean_text = clean_text.replace(" ", "_")
|
|
1604
|
+
|
|
1605
|
+
filename = f"speech_{timestamp}_{clean_text}.{audio_format}"
|
|
1606
|
+
file_path = storage_dir / filename
|
|
1607
|
+
|
|
1608
|
+
try:
|
|
1609
|
+
# Prepare request parameters
|
|
1610
|
+
request_params = {
|
|
1611
|
+
"model": model,
|
|
1612
|
+
"voice": voice,
|
|
1613
|
+
"input": input_text,
|
|
1614
|
+
}
|
|
1615
|
+
|
|
1616
|
+
# Add instructions if provided (only for models that support it)
|
|
1617
|
+
if instructions and model in ["gpt-4o-mini-tts"]:
|
|
1618
|
+
request_params["instructions"] = instructions
|
|
1619
|
+
|
|
1620
|
+
# Use streaming response for efficient file handling
|
|
1621
|
+
with client.audio.speech.with_streaming_response.create(**request_params) as response:
|
|
1622
|
+
# Stream directly to file
|
|
1623
|
+
response.stream_to_file(file_path)
|
|
1624
|
+
|
|
1625
|
+
# Get file size
|
|
1626
|
+
file_size = file_path.stat().st_size
|
|
1627
|
+
|
|
1628
|
+
return {
|
|
1629
|
+
"success": True,
|
|
1630
|
+
"operation": "convert_text_to_speech",
|
|
1631
|
+
"audio_file": {
|
|
1632
|
+
"file_path": str(file_path),
|
|
1633
|
+
"filename": filename,
|
|
1634
|
+
"size": file_size,
|
|
1635
|
+
"format": audio_format,
|
|
1636
|
+
},
|
|
1637
|
+
"model": model,
|
|
1638
|
+
"voice": voice,
|
|
1639
|
+
"format": audio_format,
|
|
1640
|
+
"text_length": len(input_text),
|
|
1641
|
+
"instructions": instructions if instructions else None,
|
|
1642
|
+
}
|
|
1643
|
+
|
|
1644
|
+
except Exception as api_error:
|
|
1645
|
+
return {
|
|
1646
|
+
"success": False,
|
|
1647
|
+
"operation": "convert_text_to_speech",
|
|
1648
|
+
"error": f"OpenAI TTS API error: {str(api_error)}",
|
|
1649
|
+
}
|
|
1650
|
+
|
|
1651
|
+
except Exception as e:
|
|
1652
|
+
return {
|
|
1653
|
+
"success": False,
|
|
1654
|
+
"operation": "convert_text_to_speech",
|
|
1655
|
+
"error": f"Failed to convert text to speech: {str(e)}",
|
|
1656
|
+
}
|
|
1657
|
+
|
|
1658
|
+
@mcp.tool()
|
|
1659
|
+
def generate_and_store_video_no_input_images(
|
|
1660
|
+
prompt: str,
|
|
1661
|
+
model: str = "sora-2",
|
|
1662
|
+
seconds: int = 4,
|
|
1663
|
+
storage_path: Optional[str] = None,
|
|
1664
|
+
) -> Dict[str, Any]:
|
|
1665
|
+
"""
|
|
1666
|
+
Generate a video from a text prompt using OpenAI's Sora-2 API.
|
|
1667
|
+
|
|
1668
|
+
This tool generates a video based on a text prompt using OpenAI's Sora-2 API
|
|
1669
|
+
and saves it to the workspace with automatic organization.
|
|
1670
|
+
|
|
1671
|
+
Args:
|
|
1672
|
+
prompt: Text description for the video to generate
|
|
1673
|
+
model: Model to use (default: "sora-2")
|
|
1674
|
+
storage_path: Directory path where to save the video (optional)
|
|
1675
|
+
- Relative path: Resolved relative to workspace
|
|
1676
|
+
- Absolute path: Must be within allowed directories
|
|
1677
|
+
- None/empty: Saves to workspace root
|
|
1678
|
+
|
|
1679
|
+
Returns:
|
|
1680
|
+
Dictionary containing:
|
|
1681
|
+
- success: Whether operation succeeded
|
|
1682
|
+
- operation: "generate_and_store_video_no_input_images"
|
|
1683
|
+
- video_path: Path to the saved video file
|
|
1684
|
+
- model: Model used for generation
|
|
1685
|
+
- prompt: The prompt used
|
|
1686
|
+
- duration: Time taken for generation in seconds
|
|
1687
|
+
|
|
1688
|
+
Examples:
|
|
1689
|
+
generate_and_store_video_no_input_images("A cool cat on a motorcycle in the night")
|
|
1690
|
+
→ Generates a video and saves to workspace root
|
|
1691
|
+
|
|
1692
|
+
generate_and_store_video_no_input_images("Dancing robot", storage_path="videos/")
|
|
1693
|
+
→ Generates a video and saves to videos/ directory
|
|
1694
|
+
|
|
1695
|
+
Security:
|
|
1696
|
+
- Requires valid OpenAI API key with Sora-2 access
|
|
1697
|
+
- Files are saved to specified path within workspace
|
|
1698
|
+
"""
|
|
1699
|
+
import time
|
|
1700
|
+
from datetime import datetime
|
|
1701
|
+
|
|
1702
|
+
try:
|
|
1703
|
+
# Load environment variables
|
|
1704
|
+
script_dir = Path(__file__).parent.parent.parent
|
|
1705
|
+
env_path = script_dir / ".env"
|
|
1706
|
+
if env_path.exists():
|
|
1707
|
+
load_dotenv(env_path)
|
|
1708
|
+
else:
|
|
1709
|
+
load_dotenv()
|
|
1710
|
+
|
|
1711
|
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
1712
|
+
|
|
1713
|
+
if not openai_api_key:
|
|
1714
|
+
return {
|
|
1715
|
+
"success": False,
|
|
1716
|
+
"operation": "generate_and_store_video_no_input_images",
|
|
1717
|
+
"error": "OpenAI API key not found. Please set OPENAI_API_KEY in .env file or environment variable.",
|
|
1718
|
+
}
|
|
1719
|
+
|
|
1720
|
+
# Initialize OpenAI client
|
|
1721
|
+
client = OpenAI(api_key=openai_api_key)
|
|
1722
|
+
|
|
1723
|
+
# Determine storage directory
|
|
1724
|
+
if storage_path:
|
|
1725
|
+
if Path(storage_path).is_absolute():
|
|
1726
|
+
storage_dir = Path(storage_path).resolve()
|
|
1727
|
+
else:
|
|
1728
|
+
storage_dir = (Path.cwd() / storage_path).resolve()
|
|
1729
|
+
else:
|
|
1730
|
+
storage_dir = Path.cwd()
|
|
1731
|
+
|
|
1732
|
+
# Validate storage directory is within allowed paths
|
|
1733
|
+
_validate_path_access(storage_dir, mcp.allowed_paths)
|
|
1734
|
+
|
|
1735
|
+
# Create directory if it doesn't exist
|
|
1736
|
+
storage_dir.mkdir(parents=True, exist_ok=True)
|
|
1737
|
+
|
|
1738
|
+
try:
|
|
1739
|
+
start_time = time.time()
|
|
1740
|
+
|
|
1741
|
+
# Start video generation (no print statements to avoid MCP JSON parsing issues)
|
|
1742
|
+
video = client.videos.create(
|
|
1743
|
+
model=model,
|
|
1744
|
+
prompt=prompt,
|
|
1745
|
+
seconds=str(seconds),
|
|
1746
|
+
)
|
|
1747
|
+
|
|
1748
|
+
getattr(video, "progress", 0)
|
|
1749
|
+
|
|
1750
|
+
# Monitor progress (silently, no stdout writes)
|
|
1751
|
+
while video.status in ("in_progress", "queued"):
|
|
1752
|
+
# Refresh status
|
|
1753
|
+
video = client.videos.retrieve(video.id)
|
|
1754
|
+
getattr(video, "progress", 0)
|
|
1755
|
+
time.sleep(2)
|
|
1756
|
+
|
|
1757
|
+
if video.status == "failed":
|
|
1758
|
+
message = getattr(
|
|
1759
|
+
getattr(video, "error", None),
|
|
1760
|
+
"message",
|
|
1761
|
+
"Video generation failed",
|
|
1762
|
+
)
|
|
1763
|
+
return {
|
|
1764
|
+
"success": False,
|
|
1765
|
+
"operation": "generate_and_store_video_no_input_images",
|
|
1766
|
+
"error": message,
|
|
1767
|
+
}
|
|
1768
|
+
|
|
1769
|
+
# Download video content
|
|
1770
|
+
content = client.videos.download_content(video.id, variant="video")
|
|
1771
|
+
|
|
1772
|
+
# Generate filename with timestamp
|
|
1773
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1774
|
+
clean_prompt = "".join(c for c in prompt[:30] if c.isalnum() or c in (" ", "-", "_")).strip()
|
|
1775
|
+
clean_prompt = clean_prompt.replace(" ", "_")
|
|
1776
|
+
filename = f"{timestamp}_{clean_prompt}.mp4"
|
|
1777
|
+
|
|
1778
|
+
# Full file path
|
|
1779
|
+
file_path = storage_dir / filename
|
|
1780
|
+
|
|
1781
|
+
# Write video to file
|
|
1782
|
+
content.write_to_file(str(file_path))
|
|
1783
|
+
|
|
1784
|
+
# Calculate duration
|
|
1785
|
+
duration = time.time() - start_time
|
|
1786
|
+
|
|
1787
|
+
# Get file size
|
|
1788
|
+
file_size = file_path.stat().st_size
|
|
1789
|
+
|
|
1790
|
+
return {
|
|
1791
|
+
"success": True,
|
|
1792
|
+
"operation": "generate_and_store_video_no_input_images",
|
|
1793
|
+
"video_path": str(file_path),
|
|
1794
|
+
"filename": filename,
|
|
1795
|
+
"size": file_size,
|
|
1796
|
+
"model": model,
|
|
1797
|
+
"prompt": prompt,
|
|
1798
|
+
"duration": duration,
|
|
1799
|
+
}
|
|
1800
|
+
|
|
1801
|
+
except Exception as api_error:
|
|
1802
|
+
return {
|
|
1803
|
+
"success": False,
|
|
1804
|
+
"operation": "generate_and_store_video_no_input_images",
|
|
1805
|
+
"error": f"OpenAI API error: {str(api_error)}",
|
|
1806
|
+
}
|
|
1807
|
+
|
|
1808
|
+
except Exception as e:
|
|
1809
|
+
return {
|
|
1810
|
+
"success": False,
|
|
1811
|
+
"operation": "generate_and_store_video_no_input_images",
|
|
1812
|
+
"error": f"Failed to generate or save video: {str(e)}",
|
|
1813
|
+
}
|
|
1814
|
+
|
|
1815
|
+
return mcp
|