ultimate-gemini-mcp 1.0.3__tar.gz → 1.0.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultimate-gemini-mcp might be problematic. Click here for more details.

Files changed (31) hide show
  1. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/PKG-INFO +1 -1
  2. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/pyproject.toml +1 -1
  3. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/__init__.py +1 -1
  4. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/config/constants.py +7 -7
  5. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/config/settings.py +3 -8
  6. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/core/__init__.py +0 -2
  7. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/core/exceptions.py +3 -1
  8. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/core/validation.py +6 -23
  9. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/server.py +10 -11
  10. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/services/gemini_client.py +18 -33
  11. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/services/image_service.py +11 -49
  12. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/services/imagen_client.py +17 -22
  13. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/services/prompt_enhancer.py +2 -6
  14. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/tools/batch_generate.py +17 -22
  15. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/tools/generate_image.py +7 -29
  16. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/.env.example +0 -0
  17. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/.github/workflows/README.md +0 -0
  18. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/.github/workflows/claude-code-review.yml +0 -0
  19. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/.github/workflows/claude.yml +0 -0
  20. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/.github/workflows/publish.yml +0 -0
  21. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/.github/workflows/test.yml +0 -0
  22. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/.gitignore +0 -0
  23. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/CLAUDE.md +0 -0
  24. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/LICENSE +0 -0
  25. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/MANIFEST.in +0 -0
  26. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/PUBLISHING.md +0 -0
  27. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/README.md +0 -0
  28. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/TEST_RESULTS.md +0 -0
  29. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/config/__init__.py +0 -0
  30. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/services/__init__.py +1 -1
  31. {ultimate_gemini_mcp-1.0.3 → ultimate_gemini_mcp-1.0.5}/src/tools/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultimate-gemini-mcp
3
- Version: 1.0.3
3
+ Version: 1.0.5
4
4
  Summary: Ultimate image generation MCP server unifying Gemini 2.5 Flash Image and Imagen 4/Fast/Ultra with advanced features
5
5
  Project-URL: Homepage, https://github.com/anand-92/ultimate-image-gen-mcp
6
6
  Project-URL: Repository, https://github.com/anand-92/ultimate-image-gen-mcp
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "ultimate-gemini-mcp"
3
- version = "1.0.3"
3
+ version = "1.0.5"
4
4
  description = "Ultimate image generation MCP server unifying Gemini 2.5 Flash Image and Imagen 4/Fast/Ultra with advanced features"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.11"
@@ -7,7 +7,7 @@ A unified MCP server that combines the best features from:
7
7
  - Advanced features: batch processing, editing, templates, and more
8
8
  """
9
9
 
10
- __version__ = "1.0.3"
10
+ __version__ = "1.0.5"
11
11
  __author__ = "Ultimate Gemini MCP"
12
12
 
13
13
  from .config import get_settings
@@ -31,13 +31,13 @@ DEFAULT_ENHANCEMENT_MODEL = "gemini-flash-latest"
31
31
 
32
32
  # Aspect ratios
33
33
  ASPECT_RATIOS = [
34
- "1:1", # Square
35
- "2:3", # Portrait
36
- "3:2", # Landscape
37
- "3:4", # Portrait
38
- "4:3", # Standard landscape
39
- "4:5", # Portrait
40
- "5:4", # Landscape
34
+ "1:1", # Square
35
+ "2:3", # Portrait
36
+ "3:2", # Landscape
37
+ "3:4", # Portrait
38
+ "4:3", # Standard landscape
39
+ "4:5", # Portrait
40
+ "5:4", # Landscape
41
41
  "9:16", # Vertical mobile
42
42
  "16:9", # Widescreen
43
43
  "21:9", # Ultrawide
@@ -4,7 +4,6 @@ Configuration settings for the Ultimate Gemini MCP server.
4
4
 
5
5
  import os
6
6
  from pathlib import Path
7
- from typing import Optional
8
7
 
9
8
  from pydantic import Field
10
9
  from pydantic_settings import BaseSettings, SettingsConfigDict
@@ -81,9 +80,7 @@ class APIConfig(BaseSettings):
81
80
  enable_prompt_enhancement: bool = Field(
82
81
  default=True, description="Enable automatic prompt enhancement"
83
82
  )
84
- enable_batch_processing: bool = Field(
85
- default=True, description="Enable batch processing"
86
- )
83
+ enable_batch_processing: bool = Field(default=True, description="Enable batch processing")
87
84
 
88
85
  # Request settings
89
86
  request_timeout: int = Field(default=DEFAULT_TIMEOUT, description="API request timeout")
@@ -104,9 +101,7 @@ class APIConfig(BaseSettings):
104
101
  self.gemini_api_key = os.getenv("GOOGLE_API_KEY", "")
105
102
 
106
103
  if not self.gemini_api_key:
107
- raise ValueError(
108
- "GEMINI_API_KEY or GOOGLE_API_KEY environment variable is required"
109
- )
104
+ raise ValueError("GEMINI_API_KEY or GOOGLE_API_KEY environment variable is required")
110
105
 
111
106
  @classmethod
112
107
  def from_env(cls) -> "APIConfig":
@@ -132,7 +127,7 @@ class Settings:
132
127
 
133
128
 
134
129
  # Global settings instance (lazy initialization)
135
- _settings: Optional[Settings] = None
130
+ _settings: Settings | None = None
136
131
 
137
132
 
138
133
  def get_settings() -> Settings:
@@ -21,7 +21,6 @@ from .validation import (
21
21
  validate_model,
22
22
  validate_negative_prompt,
23
23
  validate_number_of_images,
24
- validate_person_generation,
25
24
  validate_prompt,
26
25
  validate_prompts_list,
27
26
  validate_seed,
@@ -45,7 +44,6 @@ __all__ = [
45
44
  "validate_aspect_ratio",
46
45
  "validate_number_of_images",
47
46
  "validate_image_format",
48
- "validate_person_generation",
49
47
  "validate_seed",
50
48
  "validate_file_path",
51
49
  "validate_base64_image",
@@ -24,7 +24,9 @@ class ValidationError(UltimateGeminiError):
24
24
  class APIError(UltimateGeminiError):
25
25
  """Raised when an API request fails."""
26
26
 
27
- def __init__(self, message: str, status_code: int | None = None, response_data: dict | None = None):
27
+ def __init__(
28
+ self, message: str, status_code: int | None = None, response_data: dict | None = None
29
+ ):
28
30
  super().__init__(message)
29
31
  self.status_code = status_code
30
32
  self.response_data = response_data
@@ -5,7 +5,6 @@ Input validation utilities.
5
5
  import base64
6
6
  import re
7
7
  from pathlib import Path
8
- from typing import Any
9
8
 
10
9
  from ..config.constants import (
11
10
  ALL_MODELS,
@@ -14,7 +13,6 @@ from ..config.constants import (
14
13
  MAX_IMAGES_PER_REQUEST,
15
14
  MAX_NEGATIVE_PROMPT_LENGTH,
16
15
  MAX_PROMPT_LENGTH,
17
- PERSON_GENERATION_OPTIONS,
18
16
  )
19
17
  from .exceptions import ValidationError
20
18
 
@@ -50,9 +48,7 @@ def validate_aspect_ratio(aspect_ratio: str) -> None:
50
48
  """Validate aspect ratio."""
51
49
  if aspect_ratio not in ASPECT_RATIOS:
52
50
  available = ", ".join(ASPECT_RATIOS)
53
- raise ValidationError(
54
- f"Invalid aspect ratio '{aspect_ratio}'. Available: {available}"
55
- )
51
+ raise ValidationError(f"Invalid aspect ratio '{aspect_ratio}'. Available: {available}")
56
52
 
57
53
 
58
54
  def validate_number_of_images(num: int) -> None:
@@ -61,27 +57,14 @@ def validate_number_of_images(num: int) -> None:
61
57
  raise ValidationError(f"Number of images must be at least 1, got {num}")
62
58
 
63
59
  if num > MAX_IMAGES_PER_REQUEST:
64
- raise ValidationError(
65
- f"Number of images exceeds maximum: {num} > {MAX_IMAGES_PER_REQUEST}"
66
- )
60
+ raise ValidationError(f"Number of images exceeds maximum: {num} > {MAX_IMAGES_PER_REQUEST}")
67
61
 
68
62
 
69
63
  def validate_image_format(format_str: str) -> None:
70
64
  """Validate image format."""
71
65
  if format_str.lower() not in IMAGE_FORMATS:
72
66
  available = ", ".join(IMAGE_FORMATS.keys())
73
- raise ValidationError(
74
- f"Invalid image format '{format_str}'. Available: {available}"
75
- )
76
-
77
-
78
- def validate_person_generation(option: str) -> None:
79
- """Validate person generation option."""
80
- if option not in PERSON_GENERATION_OPTIONS:
81
- available = ", ".join(PERSON_GENERATION_OPTIONS)
82
- raise ValidationError(
83
- f"Invalid person generation option '{option}'. Available: {available}"
84
- )
67
+ raise ValidationError(f"Invalid image format '{format_str}'. Available: {available}")
85
68
 
86
69
 
87
70
  def validate_seed(seed: int | None) -> None:
@@ -98,7 +81,7 @@ def validate_file_path(path: str) -> Path:
98
81
  try:
99
82
  file_path = Path(path).resolve()
100
83
  except Exception as e:
101
- raise ValidationError(f"Invalid file path '{path}': {e}")
84
+ raise ValidationError(f"Invalid file path '{path}': {e}") from e
102
85
 
103
86
  if not file_path.exists():
104
87
  raise ValidationError(f"File does not exist: {file_path}")
@@ -120,7 +103,7 @@ def validate_base64_image(data: str) -> None:
120
103
  if len(decoded) == 0:
121
104
  raise ValidationError("Decoded image data is empty")
122
105
  except Exception as e:
123
- raise ValidationError(f"Invalid base64 image data: {e}")
106
+ raise ValidationError(f"Invalid base64 image data: {e}") from e
124
107
 
125
108
 
126
109
  def validate_prompts_list(prompts: list[str]) -> None:
@@ -137,7 +120,7 @@ def validate_prompts_list(prompts: list[str]) -> None:
137
120
  try:
138
121
  validate_prompt(prompt)
139
122
  except ValidationError as e:
140
- raise ValidationError(f"Invalid prompt at index {i}: {e}")
123
+ raise ValidationError(f"Invalid prompt at index {i}: {e}") from e
141
124
 
142
125
 
143
126
  def sanitize_filename(filename: str) -> str:
@@ -10,7 +10,6 @@ Unified MCP server supporting:
10
10
 
11
11
  import logging
12
12
  import sys
13
- from pathlib import Path
14
13
 
15
14
  from fastmcp import FastMCP
16
15
 
@@ -69,9 +68,9 @@ def create_app() -> FastMCP:
69
68
  "Image editing",
70
69
  "Character consistency",
71
70
  "Multi-image blending",
72
- "World knowledge integration"
71
+ "World knowledge integration",
73
72
  ],
74
- "default": True
73
+ "default": True,
75
74
  }
76
75
  },
77
76
  "imagen": {
@@ -84,8 +83,8 @@ def create_app() -> FastMCP:
84
83
  "Negative prompts",
85
84
  "Seed-based reproducibility",
86
85
  "Person generation controls",
87
- "Advanced controls"
88
- ]
86
+ "Advanced controls",
87
+ ],
89
88
  },
90
89
  "imagen-4-fast": {
91
90
  "name": "Imagen 4 Fast",
@@ -96,8 +95,8 @@ def create_app() -> FastMCP:
96
95
  "Negative prompts",
97
96
  "Seed-based reproducibility",
98
97
  "Person generation controls",
99
- "Cost-effective"
100
- ]
98
+ "Cost-effective",
99
+ ],
101
100
  },
102
101
  "imagen-4-ultra": {
103
102
  "name": "Imagen 4 Ultra",
@@ -107,10 +106,10 @@ def create_app() -> FastMCP:
107
106
  "Best prompt adherence",
108
107
  "Professional results",
109
108
  "Enhanced text rendering",
110
- "Advanced controls"
111
- ]
112
- }
113
- }
109
+ "Advanced controls",
110
+ ],
111
+ },
112
+ },
114
113
  }
115
114
 
116
115
  return json.dumps(models_info, indent=2)
@@ -3,7 +3,6 @@ Gemini API client for Gemini 2.5 Flash Image generation.
3
3
  Uses the generateContent API endpoint per Google's documentation.
4
4
  """
5
5
 
6
- import base64
7
6
  import logging
8
7
  from typing import Any
9
8
 
@@ -69,12 +68,7 @@ class GeminiClient:
69
68
 
70
69
  # Add input image if provided (for editing)
71
70
  if input_image:
72
- parts.append({
73
- "inline_data": {
74
- "mime_type": "image/png",
75
- "data": input_image
76
- }
77
- })
71
+ parts.append({"inline_data": {"mime_type": "image/png", "data": input_image}})
78
72
 
79
73
  # Add text prompt (include aspect ratio hint if specified)
80
74
  prompt_text = prompt
@@ -84,20 +78,13 @@ class GeminiClient:
84
78
  parts.append({"text": prompt_text})
85
79
 
86
80
  # Build generation config for image generation
87
- generation_config = {
88
- "responseModalities": ["Image"]
89
- }
81
+ generation_config = {"responseModalities": ["Image"]}
90
82
 
91
83
  # Add aspect ratio to image config if specified
92
84
  if aspect_ratio:
93
- generation_config["imageConfig"] = {
94
- "aspectRatio": aspect_ratio
95
- }
85
+ generation_config["imageConfig"] = {"aspectRatio": aspect_ratio}
96
86
 
97
- request_body = {
98
- "contents": [{"parts": parts}],
99
- "generationConfig": generation_config
100
- }
87
+ request_body = {"contents": [{"parts": parts}], "generationConfig": generation_config}
101
88
 
102
89
  headers = {
103
90
  "x-goog-api-key": self.api_key,
@@ -118,7 +105,9 @@ class GeminiClient:
118
105
  images = self._extract_images(data)
119
106
 
120
107
  if not images:
121
- logger.error(f"No images extracted from response. Response structure: {list(data.keys())}")
108
+ logger.error(
109
+ f"No images extracted from response. Response structure: {list(data.keys())}"
110
+ )
122
111
  if "candidates" in data:
123
112
  logger.error(f"Candidates: {data['candidates']}")
124
113
  raise APIError("No image data found in Gemini API response")
@@ -129,7 +118,7 @@ class GeminiClient:
129
118
  self._handle_http_error(e)
130
119
  except Exception as e:
131
120
  logger.error(f"Gemini API request failed: {e}")
132
- raise APIError(f"Gemini API request failed: {e}")
121
+ raise APIError(f"Gemini API request failed: {e}") from e
133
122
 
134
123
  async def generate_text(
135
124
  self,
@@ -152,14 +141,10 @@ class GeminiClient:
152
141
  model_id = GEMINI_MODELS.get(model, model)
153
142
  url = f"{self.base_url}/models/{model_id}:generateContent"
154
143
 
155
- request_body = {
156
- "contents": [{"parts": [{"text": prompt}]}]
157
- }
144
+ request_body = {"contents": [{"parts": [{"text": prompt}]}]}
158
145
 
159
146
  if system_instruction:
160
- request_body["system_instruction"] = {
161
- "parts": [{"text": system_instruction}]
162
- }
147
+ request_body["system_instruction"] = {"parts": [{"text": system_instruction}]}
163
148
 
164
149
  headers = {
165
150
  "x-goog-api-key": self.api_key,
@@ -179,7 +164,7 @@ class GeminiClient:
179
164
  self._handle_http_error(e)
180
165
  except Exception as e:
181
166
  logger.error(f"Gemini text generation failed: {e}")
182
- raise APIError(f"Gemini text generation failed: {e}")
167
+ raise APIError(f"Gemini text generation failed: {e}") from e
183
168
 
184
169
  def _extract_images(self, response_data: dict[str, Any]) -> list[str]:
185
170
  """Extract base64 image data from Gemini API response."""
@@ -230,23 +215,23 @@ class GeminiClient:
230
215
 
231
216
  if status_code == 401 or status_code == 403:
232
217
  raise AuthenticationError(
233
- "Authentication failed. Please check your Gemini API key.",
234
- status_code=status_code
218
+ "Authentication failed. Please check your Gemini API key.", status_code=status_code
235
219
  )
236
220
  elif status_code == 429:
237
221
  raise RateLimitError(
238
- "Rate limit exceeded. Please try again later.",
239
- status_code=status_code
222
+ "Rate limit exceeded. Please try again later.", status_code=status_code
240
223
  )
241
- elif status_code == 400 and ("SAFETY" in error_text.upper() or "BLOCKED" in error_text.upper()):
224
+ elif status_code == 400 and (
225
+ "SAFETY" in error_text.upper() or "BLOCKED" in error_text.upper()
226
+ ):
242
227
  raise ContentPolicyError(
243
228
  "Content was blocked by safety filters. Please modify your prompt.",
244
- status_code=status_code
229
+ status_code=status_code,
245
230
  )
246
231
  else:
247
232
  raise APIError(
248
233
  f"API request failed with status {status_code}: {error_text}",
249
- status_code=status_code
234
+ status_code=status_code,
250
235
  )
251
236
 
252
237
  async def close(self) -> None:
@@ -9,8 +9,6 @@ from datetime import datetime
9
9
  from pathlib import Path
10
10
  from typing import Any
11
11
 
12
- from PIL import Image
13
-
14
12
  from ..config.constants import GEMINI_MODELS, IMAGEN_MODELS
15
13
  from ..core import sanitize_filename
16
14
  from ..core.exceptions import ImageProcessingError
@@ -30,7 +28,7 @@ class ImageResult:
30
28
  prompt: str,
31
29
  model: str,
32
30
  index: int = 0,
33
- metadata: dict[str, Any] | None = None
31
+ metadata: dict[str, Any] | None = None,
34
32
  ):
35
33
  self.image_data = image_data # Base64-encoded
36
34
  self.prompt = prompt
@@ -53,7 +51,7 @@ class ImageResult:
53
51
  logger.info(f"Saved image to {output_path}")
54
52
  return output_path
55
53
  except Exception as e:
56
- raise ImageProcessingError(f"Failed to save image: {e}")
54
+ raise ImageProcessingError(f"Failed to save image: {e}") from e
57
55
 
58
56
  def _generate_filename(self) -> str:
59
57
  """Generate descriptive filename."""
@@ -71,13 +69,7 @@ class ImageResult:
71
69
  class ImageService:
72
70
  """Unified service for image generation using Gemini or Imagen."""
73
71
 
74
- def __init__(
75
- self,
76
- api_key: str,
77
- *,
78
- enable_enhancement: bool = True,
79
- timeout: int = 60
80
- ):
72
+ def __init__(self, api_key: str, *, enable_enhancement: bool = True, timeout: int = 60):
81
73
  """
82
74
  Initialize image service.
83
75
 
@@ -100,12 +92,7 @@ class ImageService:
100
92
  self.prompt_enhancer = PromptEnhancer(self.gemini_client)
101
93
 
102
94
  async def generate(
103
- self,
104
- prompt: str,
105
- *,
106
- model: str | None = None,
107
- enhance_prompt: bool = True,
108
- **kwargs: Any
95
+ self, prompt: str, *, model: str | None = None, enhance_prompt: bool = True, **kwargs: Any
109
96
  ) -> list[ImageResult]:
110
97
  """
111
98
  Generate images using the appropriate API.
@@ -136,8 +123,7 @@ class ImageService:
136
123
  if enhance_prompt and self.enable_enhancement and self.prompt_enhancer:
137
124
  try:
138
125
  result = await self.prompt_enhancer.enhance_prompt(
139
- prompt,
140
- context=enhancement_context
126
+ prompt, context=enhancement_context
141
127
  )
142
128
  prompt = result["enhanced_prompt"]
143
129
  logger.info(f"Prompt enhanced: {len(original_prompt)} -> {len(prompt)} chars")
@@ -151,18 +137,10 @@ class ImageService:
151
137
  return await self._generate_with_imagen(prompt, model, original_prompt, kwargs)
152
138
 
153
139
  async def _generate_with_gemini(
154
- self,
155
- prompt: str,
156
- model: str,
157
- original_prompt: str,
158
- params: dict[str, Any]
140
+ self, prompt: str, model: str, original_prompt: str, params: dict[str, Any]
159
141
  ) -> list[ImageResult]:
160
142
  """Generate images using Gemini API."""
161
- response = await self.gemini_client.generate_image(
162
- prompt=prompt,
163
- model=model,
164
- **params
165
- )
143
+ response = await self.gemini_client.generate_image(prompt=prompt, model=model, **params)
166
144
 
167
145
  images = response["images"]
168
146
  results = []
@@ -173,29 +151,17 @@ class ImageService:
173
151
  prompt=original_prompt,
174
152
  model=model,
175
153
  index=i,
176
- metadata={
177
- "enhanced_prompt": prompt,
178
- "api": "gemini",
179
- **params
180
- }
154
+ metadata={"enhanced_prompt": prompt, "api": "gemini", **params},
181
155
  )
182
156
  results.append(result)
183
157
 
184
158
  return results
185
159
 
186
160
  async def _generate_with_imagen(
187
- self,
188
- prompt: str,
189
- model: str,
190
- original_prompt: str,
191
- params: dict[str, Any]
161
+ self, prompt: str, model: str, original_prompt: str, params: dict[str, Any]
192
162
  ) -> list[ImageResult]:
193
163
  """Generate images using Imagen API."""
194
- response = await self.imagen_client.generate_image(
195
- prompt=prompt,
196
- model=model,
197
- **params
198
- )
164
+ response = await self.imagen_client.generate_image(prompt=prompt, model=model, **params)
199
165
 
200
166
  images = response["images"]
201
167
  results = []
@@ -206,11 +172,7 @@ class ImageService:
206
172
  prompt=original_prompt,
207
173
  model=model,
208
174
  index=i,
209
- metadata={
210
- "enhanced_prompt": prompt,
211
- "api": "imagen",
212
- **params
213
- }
175
+ metadata={"enhanced_prompt": prompt, "api": "imagen", **params},
214
176
  )
215
177
  results.append(result)
216
178
 
@@ -73,19 +73,22 @@ class ImagenClient:
73
73
 
74
74
  # Build request body according to Imagen API
75
75
  request_body: dict[str, Any] = {
76
- "instances": [
77
- {
78
- "prompt": prompt
79
- }
80
- ],
76
+ "instances": [{"prompt": prompt}],
81
77
  "parameters": {
82
78
  "outputMimeType": output_format,
83
79
  "sampleCount": number_of_images,
84
80
  "personGeneration": person_generation,
85
- "aspectRatio": aspect_ratio
86
- }
81
+ "aspectRatio": aspect_ratio,
82
+ },
87
83
  }
88
84
 
85
+ # Add imageSize for models that support it (Standard and Ultra, not Fast)
86
+ # Fast model only supports 1K, while Standard and Ultra support up to 2K
87
+ if model in ["imagen-4", "imagen-4-ultra"]:
88
+ request_body["parameters"]["imageSize"] = "2K"
89
+ elif model == "imagen-4-fast":
90
+ request_body["parameters"]["imageSize"] = "1K"
91
+
89
92
  # Add optional parameters
90
93
  if negative_prompt:
91
94
  request_body["instances"][0]["negativePrompt"] = negative_prompt
@@ -106,9 +109,7 @@ class ImagenClient:
106
109
  logger.debug(f"Sending request to {url}")
107
110
  # Add API key as query parameter
108
111
  response = await self.client.post(
109
- f"{url}?key={self.api_key}",
110
- json=request_body,
111
- headers=headers
112
+ f"{url}?key={self.api_key}", json=request_body, headers=headers
112
113
  )
113
114
  response.raise_for_status()
114
115
  data = response.json()
@@ -119,17 +120,13 @@ class ImagenClient:
119
120
  if not images:
120
121
  raise APIError("No image data found in Imagen API response")
121
122
 
122
- return {
123
- "images": images,
124
- "model": model,
125
- "response": data
126
- }
123
+ return {"images": images, "model": model, "response": data}
127
124
 
128
125
  except httpx.HTTPStatusError as e:
129
126
  self._handle_http_error(e)
130
127
  except Exception as e:
131
128
  logger.error(f"Imagen API request failed: {e}")
132
- raise APIError(f"Imagen API request failed: {e}")
129
+ raise APIError(f"Imagen API request failed: {e}") from e
133
130
 
134
131
  def _extract_images(self, response_data: dict[str, Any]) -> list[str]:
135
132
  """Extract base64 image data from Imagen API response."""
@@ -156,23 +153,21 @@ class ImagenClient:
156
153
 
157
154
  if status_code == 401 or status_code == 403:
158
155
  raise AuthenticationError(
159
- "Authentication failed. Please check your API key.",
160
- status_code=status_code
156
+ "Authentication failed. Please check your API key.", status_code=status_code
161
157
  )
162
158
  elif status_code == 429:
163
159
  raise RateLimitError(
164
- "Rate limit exceeded. Please try again later.",
165
- status_code=status_code
160
+ "Rate limit exceeded. Please try again later.", status_code=status_code
166
161
  )
167
162
  elif status_code == 400 and "SAFETY" in error_text.upper():
168
163
  raise ContentPolicyError(
169
164
  "Content was blocked by safety filters. Please modify your prompt.",
170
- status_code=status_code
165
+ status_code=status_code,
171
166
  )
172
167
  else:
173
168
  raise APIError(
174
169
  f"API request failed with status {status_code}: {error_text}",
175
- status_code=status_code
170
+ status_code=status_code,
176
171
  )
177
172
 
178
173
  async def close(self) -> None:
@@ -64,7 +64,7 @@ class PromptEnhancer:
64
64
  enhanced = await self.gemini_client.generate_text(
65
65
  prompt=instruction,
66
66
  system_instruction=PROMPT_ENHANCEMENT_SYSTEM_INSTRUCTION,
67
- model="gemini-flash-latest"
67
+ model="gemini-flash-latest",
68
68
  )
69
69
 
70
70
  # Clean up the enhanced prompt
@@ -84,11 +84,7 @@ class PromptEnhancer:
84
84
  "enhanced_prompt": original_prompt,
85
85
  }
86
86
 
87
- def _build_enhancement_instruction(
88
- self,
89
- prompt: str,
90
- context: dict[str, Any] | None
91
- ) -> str:
87
+ def _build_enhancement_instruction(self, prompt: str, context: dict[str, Any] | None) -> str:
92
88
  """Build the instruction for prompt enhancement."""
93
89
  instruction_parts = [f"Enhance this image generation prompt:\n\n{prompt}"]
94
90
 
@@ -54,12 +54,12 @@ async def batch_generate_images(
54
54
  "batch_size": batch_size,
55
55
  "completed": 0,
56
56
  "failed": 0,
57
- "results": []
57
+ "results": [],
58
58
  }
59
59
 
60
60
  # Process prompts in batches
61
61
  for i in range(0, len(prompts), batch_size):
62
- batch = prompts[i:i + batch_size]
62
+ batch = prompts[i : i + batch_size]
63
63
  logger.info(f"Processing batch {i // batch_size + 1}: {len(batch)} prompts")
64
64
 
65
65
  # Create tasks for parallel processing
@@ -71,7 +71,7 @@ async def batch_generate_images(
71
71
  aspect_ratio=aspect_ratio,
72
72
  output_format=output_format,
73
73
  number_of_images=1,
74
- **shared_params
74
+ **shared_params,
75
75
  )
76
76
  for prompt in batch
77
77
  ]
@@ -86,19 +86,19 @@ async def batch_generate_images(
86
86
  if isinstance(result, Exception):
87
87
  logger.error(f"Failed to generate image for prompt {prompt_index}: {result}")
88
88
  results["failed"] += 1
89
- results["results"].append({
90
- "prompt_index": prompt_index,
91
- "prompt": batch[j],
92
- "success": False,
93
- "error": str(result)
94
- })
89
+ results["results"].append(
90
+ {
91
+ "prompt_index": prompt_index,
92
+ "prompt": batch[j],
93
+ "success": False,
94
+ "error": str(result),
95
+ }
96
+ )
95
97
  else:
96
98
  results["completed"] += 1
97
- results["results"].append({
98
- "prompt_index": prompt_index,
99
- "prompt": batch[j],
100
- **result
101
- })
99
+ results["results"].append(
100
+ {"prompt_index": prompt_index, "prompt": batch[j], **result}
101
+ )
102
102
 
103
103
  return results
104
104
 
@@ -114,7 +114,6 @@ def register_batch_generate_tool(mcp_server: Any) -> None:
114
114
  aspect_ratio: str = "1:1",
115
115
  output_format: str = "png",
116
116
  batch_size: int | None = None,
117
- person_generation: str = "allow_adult",
118
117
  negative_prompt: str | None = None,
119
118
  ) -> str:
120
119
  """
@@ -130,7 +129,6 @@ def register_batch_generate_tool(mcp_server: Any) -> None:
130
129
  aspect_ratio: Aspect ratio for all images (default: 1:1)
131
130
  output_format: Image format for all images (default: png)
132
131
  batch_size: Parallel batch size (default: from config)
133
- person_generation: Person policy for Imagen models (default: allow_adult)
134
132
  negative_prompt: Negative prompt for Imagen models (optional)
135
133
 
136
134
  Returns:
@@ -144,7 +142,6 @@ def register_batch_generate_tool(mcp_server: Any) -> None:
144
142
  aspect_ratio=aspect_ratio,
145
143
  output_format=output_format,
146
144
  batch_size=batch_size,
147
- person_generation=person_generation,
148
145
  negative_prompt=negative_prompt,
149
146
  )
150
147
 
@@ -152,8 +149,6 @@ def register_batch_generate_tool(mcp_server: Any) -> None:
152
149
 
153
150
  except Exception as e:
154
151
  logger.error(f"Batch generation error: {e}")
155
- return json.dumps({
156
- "success": False,
157
- "error": str(e),
158
- "error_type": type(e).__name__
159
- }, indent=2)
152
+ return json.dumps(
153
+ {"success": False, "error": str(e), "error_type": type(e).__name__}, indent=2
154
+ )
@@ -8,15 +8,12 @@ import logging
8
8
  from pathlib import Path
9
9
  from typing import Any
10
10
 
11
- from fastmcp import Image
12
-
13
- from ..config import ALL_MODELS, ASPECT_RATIOS, get_settings
11
+ from ..config import get_settings
14
12
  from ..core import (
15
13
  validate_aspect_ratio,
16
14
  validate_image_format,
17
15
  validate_model,
18
16
  validate_number_of_images,
19
- validate_person_generation,
20
17
  validate_prompt,
21
18
  validate_seed,
22
19
  )
@@ -38,7 +35,6 @@ async def generate_image_tool(
38
35
  blend_images: bool = False,
39
36
  use_world_knowledge: bool = False,
40
37
  # Imagen-specific options
41
- person_generation: str = "allow_adult",
42
38
  negative_prompt: str | None = None,
43
39
  seed: int | None = None,
44
40
  # Output options
@@ -59,7 +55,6 @@ async def generate_image_tool(
59
55
  maintain_character_consistency: Maintain character features across generations (Gemini)
60
56
  blend_images: Enable multi-image blending (Gemini)
61
57
  use_world_knowledge: Use real-world knowledge for context (Gemini)
62
- person_generation: Person generation policy (Imagen: dont_allow, allow_adult, allow_all)
63
58
  negative_prompt: What to avoid in the image (Imagen)
64
59
  seed: Random seed for reproducibility (Imagen)
65
60
  save_to_disk: Save images to output directory
@@ -75,18 +70,6 @@ async def generate_image_tool(
75
70
  validate_aspect_ratio(aspect_ratio)
76
71
  validate_image_format(output_format)
77
72
 
78
- if person_generation:
79
- validate_person_generation(person_generation)
80
-
81
- # Warn if prompt may conflict with person_generation policy
82
- if person_generation == "dont_allow":
83
- person_keywords = ["person", "people", "man", "woman", "child", "human", "face", "portrait", "crowd"]
84
- if any(keyword in prompt.lower() for keyword in person_keywords):
85
- logger.warning(
86
- f"Prompt contains person-related keywords but person_generation is set to 'dont_allow'. "
87
- f"This may result in the API blocking image generation."
88
- )
89
-
90
73
  if seed is not None:
91
74
  validate_seed(seed)
92
75
  logger.warning(
@@ -134,7 +117,7 @@ async def generate_image_tool(
134
117
  if model.startswith("imagen"):
135
118
  params["number_of_images"] = number_of_images
136
119
  params["output_format"] = f"image/{output_format}"
137
- params["person_generation"] = person_generation
120
+ params["person_generation"] = "allow_all" # Hard-coded to allow all people
138
121
  if negative_prompt:
139
122
  params["negative_prompt"] = negative_prompt
140
123
  if seed is not None:
@@ -145,7 +128,7 @@ async def generate_image_tool(
145
128
  prompt=prompt,
146
129
  model=model,
147
130
  enhance_prompt=enhance_prompt and settings.api.enable_prompt_enhancement,
148
- **params
131
+ **params,
149
132
  )
150
133
 
151
134
  # Prepare response
@@ -158,7 +141,7 @@ async def generate_image_tool(
158
141
  "metadata": {
159
142
  "enhance_prompt": enhance_prompt,
160
143
  "aspect_ratio": aspect_ratio,
161
- }
144
+ },
162
145
  }
163
146
 
164
147
  # Save images and prepare for MCP response
@@ -202,7 +185,6 @@ def register_generate_image_tool(mcp_server: Any) -> None:
202
185
  maintain_character_consistency: bool = False,
203
186
  blend_images: bool = False,
204
187
  use_world_knowledge: bool = False,
205
- person_generation: str = "allow_adult",
206
188
  negative_prompt: str | None = None,
207
189
  seed: int | None = None,
208
190
  ) -> str:
@@ -224,7 +206,6 @@ def register_generate_image_tool(mcp_server: Any) -> None:
224
206
  maintain_character_consistency: Maintain character features (Gemini only)
225
207
  blend_images: Enable multi-image blending (Gemini only)
226
208
  use_world_knowledge: Use real-world knowledge (Gemini only)
227
- person_generation: Person policy: dont_allow, allow_adult, allow_all (Imagen only)
228
209
  negative_prompt: What to avoid in the image (Imagen only)
229
210
  seed: Random seed for reproducibility (NOT SUPPORTED - will be ignored)
230
211
 
@@ -249,7 +230,6 @@ def register_generate_image_tool(mcp_server: Any) -> None:
249
230
  maintain_character_consistency=maintain_character_consistency,
250
231
  blend_images=blend_images,
251
232
  use_world_knowledge=use_world_knowledge,
252
- person_generation=person_generation,
253
233
  negative_prompt=negative_prompt,
254
234
  seed=seed,
255
235
  )
@@ -258,8 +238,6 @@ def register_generate_image_tool(mcp_server: Any) -> None:
258
238
 
259
239
  except Exception as e:
260
240
  logger.error(f"Error generating image: {e}")
261
- return json.dumps({
262
- "success": False,
263
- "error": str(e),
264
- "error_type": type(e).__name__
265
- }, indent=2)
241
+ return json.dumps(
242
+ {"success": False, "error": str(e), "error_type": type(e).__name__}, indent=2
243
+ )
@@ -1,8 +1,8 @@
1
1
  """Services module for Ultimate Gemini MCP."""
2
2
 
3
3
  from .gemini_client import GeminiClient
4
- from .imagen_client import ImagenClient
5
4
  from .image_service import ImageResult, ImageService
5
+ from .imagen_client import ImagenClient
6
6
  from .prompt_enhancer import PromptEnhancer, create_prompt_enhancer
7
7
 
8
8
  __all__ = [