ultimate-gemini-mcp 3.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- src/__init__.py +16 -0
- src/config/__init__.py +44 -0
- src/config/constants.py +68 -0
- src/config/settings.py +143 -0
- src/core/__init__.py +47 -0
- src/core/exceptions.py +62 -0
- src/core/validation.py +117 -0
- src/server.py +168 -0
- src/services/__init__.py +13 -0
- src/services/gemini_client.py +304 -0
- src/services/image_service.py +174 -0
- src/services/prompt_enhancer.py +137 -0
- src/tools/__init__.py +11 -0
- src/tools/batch_generate.py +181 -0
- src/tools/generate_image.py +240 -0
- ultimate_gemini_mcp-3.0.7.dist-info/METADATA +462 -0
- ultimate_gemini_mcp-3.0.7.dist-info/RECORD +20 -0
- ultimate_gemini_mcp-3.0.7.dist-info/WHEEL +4 -0
- ultimate_gemini_mcp-3.0.7.dist-info/entry_points.txt +2 -0
- ultimate_gemini_mcp-3.0.7.dist-info/licenses/LICENSE +31 -0
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Batch image generation tool for processing multiple prompts efficiently.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from ..config import MAX_BATCH_SIZE, get_settings
|
|
11
|
+
from ..core import validate_batch_size, validate_prompts_list
|
|
12
|
+
from .generate_image import generate_image_tool
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
async def batch_generate_images(
|
|
18
|
+
prompts: list[str],
|
|
19
|
+
model: str | None = None,
|
|
20
|
+
enhance_prompt: bool = True,
|
|
21
|
+
aspect_ratio: str = "1:1",
|
|
22
|
+
output_format: str = "png",
|
|
23
|
+
batch_size: int | None = None,
|
|
24
|
+
**shared_params: Any,
|
|
25
|
+
) -> dict[str, Any]:
|
|
26
|
+
"""
|
|
27
|
+
Generate multiple images from a list of prompts.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
prompts: List of text prompts
|
|
31
|
+
model: Model to use for all images
|
|
32
|
+
enhance_prompt: Enhance all prompts
|
|
33
|
+
aspect_ratio: Aspect ratio for all images
|
|
34
|
+
output_format: Output format for all images
|
|
35
|
+
batch_size: Number of images to process in parallel (default: from config)
|
|
36
|
+
**shared_params: Additional parameters shared across all generations
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Dict with batch results
|
|
40
|
+
"""
|
|
41
|
+
# Validate inputs
|
|
42
|
+
validate_prompts_list(prompts)
|
|
43
|
+
|
|
44
|
+
settings = get_settings()
|
|
45
|
+
if batch_size is None:
|
|
46
|
+
batch_size = settings.api.max_batch_size
|
|
47
|
+
|
|
48
|
+
validate_batch_size(batch_size, MAX_BATCH_SIZE)
|
|
49
|
+
|
|
50
|
+
# Prepare results
|
|
51
|
+
results: dict[str, Any] = {
|
|
52
|
+
"success": True,
|
|
53
|
+
"total_prompts": len(prompts),
|
|
54
|
+
"batch_size": batch_size,
|
|
55
|
+
"completed": 0,
|
|
56
|
+
"failed": 0,
|
|
57
|
+
"results": [],
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
# Process prompts in batches
|
|
61
|
+
for i in range(0, len(prompts), batch_size):
|
|
62
|
+
batch = prompts[i : i + batch_size]
|
|
63
|
+
logger.info(f"Processing batch {i // batch_size + 1}: {len(batch)} prompts")
|
|
64
|
+
|
|
65
|
+
# Create tasks for parallel processing
|
|
66
|
+
tasks = [
|
|
67
|
+
generate_image_tool(
|
|
68
|
+
prompt=prompt,
|
|
69
|
+
model=model,
|
|
70
|
+
enhance_prompt=enhance_prompt,
|
|
71
|
+
aspect_ratio=aspect_ratio,
|
|
72
|
+
output_format=output_format,
|
|
73
|
+
**shared_params,
|
|
74
|
+
)
|
|
75
|
+
for prompt in batch
|
|
76
|
+
]
|
|
77
|
+
|
|
78
|
+
# Execute batch
|
|
79
|
+
batch_results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
80
|
+
|
|
81
|
+
# Process results
|
|
82
|
+
for j, result in enumerate(batch_results):
|
|
83
|
+
prompt_index = i + j
|
|
84
|
+
|
|
85
|
+
if isinstance(result, Exception):
|
|
86
|
+
logger.error(f"Failed to generate image for prompt {prompt_index}: {result}")
|
|
87
|
+
results["failed"] += 1
|
|
88
|
+
results["results"].append(
|
|
89
|
+
{
|
|
90
|
+
"prompt_index": prompt_index,
|
|
91
|
+
"prompt": batch[j],
|
|
92
|
+
"success": False,
|
|
93
|
+
"error": str(result),
|
|
94
|
+
}
|
|
95
|
+
)
|
|
96
|
+
else:
|
|
97
|
+
# result is dict[str, Any] here (not an Exception)
|
|
98
|
+
if not isinstance(result, dict):
|
|
99
|
+
logger.error(f"Unexpected result type: {type(result)}")
|
|
100
|
+
results["failed"] += 1
|
|
101
|
+
results["results"].append(
|
|
102
|
+
{
|
|
103
|
+
"prompt_index": prompt_index,
|
|
104
|
+
"prompt": batch[j],
|
|
105
|
+
"success": False,
|
|
106
|
+
"error": "Unexpected result type",
|
|
107
|
+
}
|
|
108
|
+
)
|
|
109
|
+
else:
|
|
110
|
+
results["completed"] += 1
|
|
111
|
+
results["results"].append(
|
|
112
|
+
{"prompt_index": prompt_index, "prompt": batch[j], **result}
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
return results
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def register_batch_generate_tool(mcp_server: Any) -> None:
|
|
119
|
+
"""Register batch_generate tool with MCP server."""
|
|
120
|
+
|
|
121
|
+
@mcp_server.tool()
|
|
122
|
+
async def batch_generate(
|
|
123
|
+
prompts: list[str],
|
|
124
|
+
model: str | None = None,
|
|
125
|
+
enhance_prompt: bool = True,
|
|
126
|
+
aspect_ratio: str = "1:1",
|
|
127
|
+
output_format: str = "png",
|
|
128
|
+
batch_size: int | None = None,
|
|
129
|
+
negative_prompt: str | None = None,
|
|
130
|
+
) -> str:
|
|
131
|
+
"""
|
|
132
|
+
Generate multiple images from a list of prompts efficiently.
|
|
133
|
+
|
|
134
|
+
Processes prompts in parallel batches for optimal performance.
|
|
135
|
+
All images share the same generation settings.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
prompts: List of text descriptions for image generation
|
|
139
|
+
model: Model to use for all images (default: gemini-3-pro-image-preview)
|
|
140
|
+
enhance_prompt: Enhance all prompts automatically (default: True)
|
|
141
|
+
aspect_ratio: Aspect ratio for all images (default: 1:1)
|
|
142
|
+
output_format: Image format for all images (default: png)
|
|
143
|
+
batch_size: Parallel batch size (default: from config)
|
|
144
|
+
negative_prompt: Negative prompt for Imagen models (optional)
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
JSON string with batch results including individual image paths
|
|
148
|
+
|
|
149
|
+
IMPORTANT - AI Assistant Instructions:
|
|
150
|
+
After batch generation completes, you MUST:
|
|
151
|
+
1. Parse the JSON response to extract file paths from result["results"][i]["images"][0]["path"]
|
|
152
|
+
2. Show the user a summary of all generated images with their file paths
|
|
153
|
+
3. Optionally display one or more images using the Read tool
|
|
154
|
+
4. Let the user know the total count of successful vs failed generations
|
|
155
|
+
|
|
156
|
+
Example response to user:
|
|
157
|
+
"Successfully generated 3 images:
|
|
158
|
+
1. /path/to/image1.png - [description]
|
|
159
|
+
2. /path/to/image2.png - [description]
|
|
160
|
+
3. /path/to/image3.png - [description]"
|
|
161
|
+
|
|
162
|
+
DO NOT just say "batch generation completed" without listing the file paths!
|
|
163
|
+
"""
|
|
164
|
+
try:
|
|
165
|
+
result = await batch_generate_images(
|
|
166
|
+
prompts=prompts,
|
|
167
|
+
model=model,
|
|
168
|
+
enhance_prompt=enhance_prompt,
|
|
169
|
+
aspect_ratio=aspect_ratio,
|
|
170
|
+
output_format=output_format,
|
|
171
|
+
batch_size=batch_size,
|
|
172
|
+
negative_prompt=negative_prompt,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
return json.dumps(result, indent=2)
|
|
176
|
+
|
|
177
|
+
except Exception as e:
|
|
178
|
+
logger.error(f"Batch generation error: {e}")
|
|
179
|
+
return json.dumps(
|
|
180
|
+
{"success": False, "error": str(e), "error_type": type(e).__name__}, indent=2
|
|
181
|
+
)
|
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Image generation tool supporting both Gemini and Imagen models.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import base64
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from ..config import get_settings
|
|
12
|
+
from ..core import (
|
|
13
|
+
validate_aspect_ratio,
|
|
14
|
+
validate_image_format,
|
|
15
|
+
validate_model,
|
|
16
|
+
validate_prompt,
|
|
17
|
+
)
|
|
18
|
+
from ..services import ImageService
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
async def generate_image_tool(
|
|
24
|
+
prompt: str,
|
|
25
|
+
model: str | None = None,
|
|
26
|
+
enhance_prompt: bool = True,
|
|
27
|
+
aspect_ratio: str = "1:1",
|
|
28
|
+
image_size: str = "2K",
|
|
29
|
+
output_format: str = "png",
|
|
30
|
+
# Reference images (up to 14)
|
|
31
|
+
reference_image_paths: list[str] | None = None,
|
|
32
|
+
# Google Search grounding
|
|
33
|
+
enable_google_search: bool = False,
|
|
34
|
+
# Response modalities
|
|
35
|
+
response_modalities: list[str] | None = None,
|
|
36
|
+
# Output options
|
|
37
|
+
save_to_disk: bool = True,
|
|
38
|
+
**kwargs: Any,
|
|
39
|
+
) -> dict[str, Any]:
|
|
40
|
+
"""
|
|
41
|
+
Generate images using Gemini 3 Pro Image.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
prompt: Text description for image generation
|
|
45
|
+
model: Model to use (default: gemini-3-pro-image-preview)
|
|
46
|
+
enhance_prompt: Automatically enhance prompt for better results
|
|
47
|
+
aspect_ratio: Image aspect ratio (1:1, 16:9, 9:16, etc.)
|
|
48
|
+
image_size: Image resolution: 1K, 2K, or 4K (default: 2K)
|
|
49
|
+
output_format: Image format (png, jpeg, webp)
|
|
50
|
+
reference_image_paths: Paths to reference images (up to 14)
|
|
51
|
+
enable_google_search: Use Google Search for real-time data grounding
|
|
52
|
+
response_modalities: Response types (TEXT, IMAGE - default: both)
|
|
53
|
+
save_to_disk: Save images to output directory
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Dict with generated images and metadata
|
|
57
|
+
"""
|
|
58
|
+
# Validate inputs
|
|
59
|
+
validate_prompt(prompt)
|
|
60
|
+
if model:
|
|
61
|
+
validate_model(model)
|
|
62
|
+
validate_aspect_ratio(aspect_ratio)
|
|
63
|
+
validate_image_format(output_format)
|
|
64
|
+
|
|
65
|
+
# Get settings
|
|
66
|
+
settings = get_settings()
|
|
67
|
+
|
|
68
|
+
# Determine model
|
|
69
|
+
if model is None:
|
|
70
|
+
model = settings.api.default_model
|
|
71
|
+
|
|
72
|
+
# Initialize image service
|
|
73
|
+
image_service = ImageService(
|
|
74
|
+
api_key=settings.api.gemini_api_key,
|
|
75
|
+
enable_enhancement=settings.api.enable_prompt_enhancement,
|
|
76
|
+
timeout=settings.api.request_timeout,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
try:
|
|
80
|
+
# Prepare parameters for Gemini 3 Pro Image
|
|
81
|
+
params: dict[str, Any] = {
|
|
82
|
+
"aspect_ratio": aspect_ratio,
|
|
83
|
+
"image_size": image_size,
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
# Add reference images if provided (up to 14)
|
|
87
|
+
if reference_image_paths:
|
|
88
|
+
reference_images = []
|
|
89
|
+
for img_path in reference_image_paths[:14]: # Limit to max 14
|
|
90
|
+
image_path = Path(img_path)
|
|
91
|
+
if image_path.exists():
|
|
92
|
+
image_data = base64.b64encode(image_path.read_bytes()).decode()
|
|
93
|
+
reference_images.append(image_data)
|
|
94
|
+
else:
|
|
95
|
+
logger.warning(f"Reference image not found: {img_path}")
|
|
96
|
+
|
|
97
|
+
if reference_images:
|
|
98
|
+
params["reference_images"] = reference_images
|
|
99
|
+
|
|
100
|
+
# Add Google Search grounding if enabled
|
|
101
|
+
if enable_google_search:
|
|
102
|
+
params["enable_google_search"] = True
|
|
103
|
+
|
|
104
|
+
# Add response modalities
|
|
105
|
+
if response_modalities:
|
|
106
|
+
params["response_modalities"] = response_modalities
|
|
107
|
+
|
|
108
|
+
# Generate images
|
|
109
|
+
results = await image_service.generate(
|
|
110
|
+
prompt=prompt,
|
|
111
|
+
model=model,
|
|
112
|
+
enhance_prompt=enhance_prompt and settings.api.enable_prompt_enhancement,
|
|
113
|
+
**params,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Prepare response
|
|
117
|
+
response: dict[str, Any] = {
|
|
118
|
+
"success": True,
|
|
119
|
+
"model": model,
|
|
120
|
+
"prompt": prompt,
|
|
121
|
+
"images_generated": len(results),
|
|
122
|
+
"images": [],
|
|
123
|
+
"metadata": {
|
|
124
|
+
"enhance_prompt": enhance_prompt,
|
|
125
|
+
"aspect_ratio": aspect_ratio,
|
|
126
|
+
},
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
# Save images and prepare for MCP response
|
|
130
|
+
for result in results:
|
|
131
|
+
image_info = {
|
|
132
|
+
"index": result.index,
|
|
133
|
+
"size": result.get_size(),
|
|
134
|
+
"timestamp": result.timestamp.isoformat(),
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
if save_to_disk:
|
|
138
|
+
# Save to output directory
|
|
139
|
+
file_path = result.save(settings.output_dir)
|
|
140
|
+
image_info["path"] = str(file_path)
|
|
141
|
+
image_info["filename"] = file_path.name
|
|
142
|
+
|
|
143
|
+
# Add enhanced prompt info
|
|
144
|
+
if "enhanced_prompt" in result.metadata:
|
|
145
|
+
image_info["enhanced_prompt"] = result.metadata["enhanced_prompt"]
|
|
146
|
+
|
|
147
|
+
response["images"].append(image_info)
|
|
148
|
+
|
|
149
|
+
return response
|
|
150
|
+
|
|
151
|
+
finally:
|
|
152
|
+
await image_service.close()
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def register_generate_image_tool(mcp_server: Any) -> None:
|
|
156
|
+
"""Register generate_image tool with MCP server."""
|
|
157
|
+
|
|
158
|
+
@mcp_server.tool()
|
|
159
|
+
async def generate_image(
|
|
160
|
+
prompt: str,
|
|
161
|
+
model: str | None = None,
|
|
162
|
+
enhance_prompt: bool = True,
|
|
163
|
+
aspect_ratio: str = "1:1",
|
|
164
|
+
image_size: str = "2K",
|
|
165
|
+
output_format: str = "png",
|
|
166
|
+
reference_image_paths: list[str] | None = None,
|
|
167
|
+
enable_google_search: bool = False,
|
|
168
|
+
response_modalities: list[str] | None = None,
|
|
169
|
+
) -> str:
|
|
170
|
+
"""
|
|
171
|
+
Generate images using Gemini 3 Pro Image - a state-of-the-art image generation model
|
|
172
|
+
optimized for professional asset production with advanced reasoning capabilities.
|
|
173
|
+
|
|
174
|
+
Features:
|
|
175
|
+
- High-resolution output: 1K, 2K, and 4K visuals
|
|
176
|
+
- Advanced text rendering for infographics, menus, diagrams
|
|
177
|
+
- Up to 14 reference images for consistent style/characters
|
|
178
|
+
- Google Search grounding for real-time data (weather, stocks, events)
|
|
179
|
+
- Thinking mode: Uses reasoning to refine composition
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
prompt: Text description of the image to generate
|
|
183
|
+
model: Model to use (default: gemini-3-pro-image-preview)
|
|
184
|
+
enhance_prompt: Automatically enhance prompt using AI (default: True)
|
|
185
|
+
aspect_ratio: Image aspect ratio like 1:1, 16:9, 9:16, 3:2, 4:5, etc. (default: 1:1)
|
|
186
|
+
image_size: Image resolution: 1K, 2K, or 4K (default: 2K)
|
|
187
|
+
output_format: Image format: png, jpeg, webp (default: png)
|
|
188
|
+
reference_image_paths: Paths to reference images (up to 14 total, max 6 objects, max 5 humans)
|
|
189
|
+
enable_google_search: Enable Google Search grounding for real-time data
|
|
190
|
+
response_modalities: Response types like ["TEXT", "IMAGE"] (default: both)
|
|
191
|
+
|
|
192
|
+
Available models:
|
|
193
|
+
- gemini-3-pro-image-preview (default and only model)
|
|
194
|
+
|
|
195
|
+
Returns:
|
|
196
|
+
JSON string with generation results, file paths, thoughts, and grounding metadata
|
|
197
|
+
|
|
198
|
+
IMPORTANT - AI Assistant Instructions:
|
|
199
|
+
After generating an image, you MUST:
|
|
200
|
+
1. Parse the JSON response to extract the file path from result["images"][0]["path"]
|
|
201
|
+
2. Inform the user of the EXACT file path where the image was saved
|
|
202
|
+
3. Use the Read tool to load and display the image to the user
|
|
203
|
+
4. If thoughts were generated, show the thinking process to the user
|
|
204
|
+
5. Optionally: Use bash to open the image in the default viewer:
|
|
205
|
+
- macOS: `open /path/to/image.png`
|
|
206
|
+
- Linux: `xdg-open /path/to/image.png`
|
|
207
|
+
- Windows: `start /path/to/image.png`
|
|
208
|
+
|
|
209
|
+
Example workflow:
|
|
210
|
+
```
|
|
211
|
+
1. Call generate_image tool
|
|
212
|
+
2. Parse response JSON to get file_path and thoughts
|
|
213
|
+
3. Tell user: "Image saved to: /Users/name/gemini_images/image.png"
|
|
214
|
+
4. Show thinking process if present
|
|
215
|
+
5. Call Read tool with the file_path to display the image
|
|
216
|
+
6. Optionally call Bash with `open /path/to/image.png` to open in Preview
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
DO NOT just say "image generated successfully" without showing the path and image!
|
|
220
|
+
"""
|
|
221
|
+
try:
|
|
222
|
+
result = await generate_image_tool(
|
|
223
|
+
prompt=prompt,
|
|
224
|
+
model=model,
|
|
225
|
+
enhance_prompt=enhance_prompt,
|
|
226
|
+
aspect_ratio=aspect_ratio,
|
|
227
|
+
image_size=image_size,
|
|
228
|
+
output_format=output_format,
|
|
229
|
+
reference_image_paths=reference_image_paths,
|
|
230
|
+
enable_google_search=enable_google_search,
|
|
231
|
+
response_modalities=response_modalities,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
return json.dumps(result, indent=2)
|
|
235
|
+
|
|
236
|
+
except Exception as e:
|
|
237
|
+
logger.error(f"Error generating image: {e}")
|
|
238
|
+
return json.dumps(
|
|
239
|
+
{"success": False, "error": str(e), "error_type": type(e).__name__}, indent=2
|
|
240
|
+
)
|