myaidev-method 0.2.22 → 0.2.24-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/USER_GUIDE.md +453 -48
  2. package/bin/cli.js +236 -38
  3. package/content-rules.example.md +80 -0
  4. package/dist/mcp/mcp-launcher.js +237 -0
  5. package/dist/server/.tsbuildinfo +1 -1
  6. package/dist/server/auth/layers.d.ts +1 -1
  7. package/dist/server/auth/services/AuthService.d.ts +1 -1
  8. package/dist/server/auth/services/TokenService.js.map +1 -1
  9. package/dist/server/auth/services/example.d.ts +5 -5
  10. package/package.json +22 -17
  11. package/src/config/workflows.js +28 -44
  12. package/src/index.js +21 -8
  13. package/src/lib/ascii-banner.js +214 -0
  14. package/src/lib/config-manager.js +470 -0
  15. package/src/lib/content-generator.js +427 -0
  16. package/src/lib/html-conversion-utils.js +843 -0
  17. package/src/lib/seo-optimizer.js +515 -0
  18. package/src/lib/update-manager.js +2 -1
  19. package/src/lib/visual-config-utils.js +321 -295
  20. package/src/lib/visual-generation-utils.js +1000 -811
  21. package/src/lib/wordpress-client.js +633 -0
  22. package/src/lib/workflow-installer.js +3 -3
  23. package/src/scripts/configure-wordpress-mcp.js +8 -3
  24. package/src/scripts/generate-visual-cli.js +365 -235
  25. package/src/scripts/html-conversion-cli.js +526 -0
  26. package/src/scripts/init/configure.js +436 -0
  27. package/src/scripts/init/install.js +460 -0
  28. package/src/scripts/ping.js +250 -0
  29. package/src/scripts/utils/file-utils.js +404 -0
  30. package/src/scripts/utils/logger.js +300 -0
  31. package/src/scripts/utils/write-content.js +293 -0
  32. package/src/scripts/wordpress/publish-to-wordpress.js +165 -0
  33. package/src/server/auth/services/TokenService.ts +1 -1
  34. package/src/templates/claude/agents/content-rules-setup.md +657 -0
  35. package/src/templates/claude/agents/content-writer.md +328 -1
  36. package/src/templates/claude/agents/visual-content-generator.md +311 -8
  37. package/src/templates/claude/commands/myai-configure.md +1 -1
  38. package/src/templates/claude/commands/myai-content-rules-setup.md +204 -0
  39. package/src/templates/claude/commands/myai-convert-html.md +186 -0
  40. package/src/templates/codex/commands/myai-content-rules-setup.md +85 -0
  41. package/src/templates/diagrams/architecture.d2 +52 -0
  42. package/src/templates/diagrams/flowchart.d2 +42 -0
  43. package/src/templates/diagrams/sequence.d2 +47 -0
  44. package/src/templates/docs/content-creation-guide.md +164 -0
  45. package/src/templates/docs/deployment-guide.md +336 -0
  46. package/src/templates/docs/visual-generation-guide.md +248 -0
  47. package/src/templates/docs/wordpress-publishing-guide.md +208 -0
  48. package/src/templates/gemini/commands/myai-content-rules-setup.toml +57 -0
  49. package/src/templates/infographics/comparison-table.html +347 -0
  50. package/src/templates/infographics/data-chart.html +268 -0
  51. package/src/templates/infographics/process-flow.html +365 -0
  52. package/.claude/mcp/sparc-orchestrator-server.js +0 -607
  53. package/.claude/mcp/wordpress-server.js +0 -1277
  54. package/src/agents/content-writer-prompt.md +0 -164
  55. package/src/agents/content-writer.json +0 -70
  56. package/src/templates/claude/mcp_config.json +0 -74
  57. package/src/templates/claude/slash_commands.json +0 -166
  58. package/src/templates/scripts/configure-wordpress-mcp.js +0 -181
  59. /package/src/scripts/{wordpress-health-check.js → wordpress/wordpress-health-check.js} +0 -0
@@ -19,58 +19,59 @@
19
19
  * @module visual-generation-utils
20
20
  */
21
21
 
22
- import fetch from 'node-fetch';
23
- import fs from 'fs-extra';
24
- import path from 'path';
25
- import dotenv from 'dotenv';
22
+ import fetch from "node-fetch";
23
+ import fs from "fs-extra";
24
+ import path from "path";
25
+ import dotenv from "dotenv";
26
26
 
27
27
  dotenv.config();
28
28
 
29
29
  // API Configuration
30
- const GEMINI_API_BASE = 'https://generativelanguage.googleapis.com/v1beta';
31
- const OPENAI_API_BASE = 'https://api.openai.com/v1';
30
+ const GEMINI_API_BASE = "https://generativelanguage.googleapis.com/v1beta";
31
+ const OPENAI_API_BASE = "https://api.openai.com/v1";
32
32
 
33
33
  // Gemini Models for image generation
34
- const GEMINI_IMAGE_MODEL = 'gemini-3-pro-image-preview'; // Gemini 3.0 "Nano Banana" preview
35
- const GEMINI_IMAGEN_MODEL = 'imagen-3.0-generate-002'; // Imagen via Gemini API
34
+ const GEMINI_IMAGE_MODEL = "gemini-3-pro-image-preview"; // Gemini 3.0 "Nano Banana" preview
35
+ const GEMINI_IMAGEN_MODEL = "imagen-3.0-generate-002"; // Imagen via Gemini API
36
36
 
37
37
  // OpenAI GPT Image Models (SOTA)
38
38
  const OPENAI_IMAGE_MODELS = {
39
- 'gpt-image-1.5': 'gpt-image-1.5', // State-of-the-art (recommended)
40
- 'gpt-image-1': 'gpt-image-1', // Main model
41
- 'gpt-image-1-mini': 'gpt-image-1-mini' // Cost-effective option
39
+ "gpt-image-1.5": "gpt-image-1.5", // State-of-the-art (recommended)
40
+ "gpt-image-1": "gpt-image-1", // Main model
41
+ "gpt-image-1-mini": "gpt-image-1-mini", // Cost-effective option
42
42
  };
43
43
 
44
44
  // FLUX 2 Models (via Fal.ai or BFL API)
45
45
  const FLUX2_MODELS = {
46
- 'flux2-pro': 'fal-ai/flux-2/pro', // State-of-the-art quality, fastest, lowest cost
47
- 'flux2-flex': 'fal-ai/flux-2/flex', // Developer-controlled parameters
48
- 'flux2-dev': 'fal-ai/flux-2/dev', // 32B open-weight model
49
- // Legacy FLUX 1.x models (still available)
50
- 'flux-pro': 'fal-ai/flux-pro/v1.1-ultra',
51
- 'flux-dev': 'fal-ai/flux/dev'
46
+ "flux2-pro": "fal-ai/flux-2/pro", // State-of-the-art quality, fastest, lowest cost
47
+ "flux2-flex": "fal-ai/flux-2/flex", // Developer-controlled parameters
48
+ "flux2-dev": "fal-ai/flux-2/dev", // 32B open-weight model
49
+ // Legacy FLUX 1.x models (still available)
50
+ "flux-pro": "fal-ai/flux-pro/v1.1-ultra",
51
+ "flux-dev": "fal-ai/flux/dev",
52
52
  };
53
53
 
54
54
  // Pricing (USD per image/video)
55
55
  const PRICING = {
56
- // SOTA Models (Recommended)
57
- gemini: 0.02, // Gemini 3.0 Pro Image "Nano Banana" - fast, cheap
58
- 'gpt-image-1.5': 0.19, // OpenAI GPT Image 1.5 - SOTA quality (high quality)
59
- 'gpt-image-1.5-medium': 0.07, // GPT Image 1.5 medium quality
60
- 'gpt-image-1.5-low': 0.02, // GPT Image 1.5 low quality
61
- 'gpt-image-1': 0.19, // OpenAI GPT Image 1 (high quality)
62
- 'gpt-image-1-mini': 0.02, // OpenAI GPT Image 1 Mini - budget option
63
- // Additional Models
64
- imagen: 0.03, // Imagen 3 (Gemini API)
65
- // FLUX 2 pricing
66
- flux2_pro: 0.05, // FLUX 2 Pro
67
- flux2_flex: 0.04, // FLUX 2 Flex
68
- flux2_dev: 0.025, // FLUX 2 Dev
69
- // Legacy FLUX 1.x
70
- flux_pro: 0.06, // FLUX Pro v1.1 Ultra
71
- flux_dev: 0.025, // FLUX Dev (per megapixel)
72
- // Video
73
- veo3: 0.40 // Veo 3 (per second)
56
+ // SOTA Models (Recommended)
57
+ gemini: 0.02, // Gemini 3.0 Pro Image "Nano Banana" - fast, cheap
58
+ "gpt-image-1.5": 0.19, // OpenAI GPT Image 1.5 - SOTA quality (high quality)
59
+ "gpt-image-1.5-medium": 0.07, // GPT Image 1.5 medium quality
60
+ "gpt-image-1.5-low": 0.02, // GPT Image 1.5 low quality
61
+ "gpt-image-1": 0.19, // OpenAI GPT Image 1 (high quality)
62
+ "gpt-image-1-mini": 0.02, // OpenAI GPT Image 1 Mini - budget option
63
+ // Additional Models
64
+ imagen: 0.03, // Imagen 3 (Gemini API)
65
+ nano_banana_pro: 0.15, // Nano Banana Pro (Fal API)
66
+ // FLUX 2 pricing
67
+ flux2_pro: 0.05, // FLUX 2 Pro
68
+ flux2_flex: 0.04, // FLUX 2 Flex
69
+ flux2_dev: 0.025, // FLUX 2 Dev
70
+ // Legacy FLUX 1.x
71
+ flux_pro: 0.06, // FLUX Pro v1.1 Ultra
72
+ flux_dev: 0.025, // FLUX Dev (per megapixel)
73
+ // Video
74
+ veo3: 0.4, // Veo 3 (per second)
74
75
  };
75
76
 
76
77
  /**
@@ -79,37 +80,44 @@ const PRICING = {
79
80
  * @returns {Object} Validation results
80
81
  */
81
82
  export function validateAPIKeys() {
82
- // Support both GEMINI_API_KEY (preferred) and GOOGLE_API_KEY (legacy)
83
- const geminiKey = process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY;
84
- const openaiKey = process.env.OPENAI_API_KEY;
85
- const falKey = process.env.FAL_KEY;
86
- const bflKey = process.env.BFL_API_KEY; // Black Forest Labs direct API
87
-
88
- const hasGemini = !!(geminiKey && geminiKey.length > 20);
89
- const hasOpenAI = !!(openaiKey && openaiKey.length > 20);
90
- const hasFal = !!(falKey && falKey.length > 20);
91
- const hasBFL = !!(bflKey && bflKey.length > 20);
92
-
93
- const availableServices = [];
94
- if (hasGemini) {
95
- availableServices.push('gemini', 'imagen');
96
- }
97
- if (hasOpenAI) {
98
- availableServices.push('gpt-image-1.5', 'gpt-image-1', 'gpt-image-1-mini');
99
- }
100
- if (hasFal || hasBFL) {
101
- availableServices.push('flux2-pro', 'flux2-flex', 'flux2-dev', 'flux-pro', 'flux-dev', 'veo3');
102
- }
103
-
104
- return {
105
- hasGemini,
106
- hasGoogle: hasGemini, // Legacy compatibility
107
- hasOpenAI,
108
- hasFal,
109
- hasBFL,
110
- hasAny: hasGemini || hasOpenAI || hasFal || hasBFL,
111
- availableServices
112
- };
83
+ // Support both GEMINI_API_KEY (preferred) and GOOGLE_API_KEY (legacy)
84
+ const geminiKey = process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY;
85
+ const openaiKey = process.env.OPENAI_API_KEY;
86
+ const falKey = process.env.FAL_KEY;
87
+ const bflKey = process.env.BFL_API_KEY; // Black Forest Labs direct API
88
+
89
+ const hasGemini = !!(geminiKey && geminiKey.length > 20);
90
+ const hasOpenAI = !!(openaiKey && openaiKey.length > 20);
91
+ const hasFal = !!(falKey && falKey.length > 20);
92
+ const hasBFL = !!(bflKey && bflKey.length > 20);
93
+
94
+ const availableServices = [];
95
+ if (hasGemini) {
96
+ availableServices.push("gemini", "imagen");
97
+ }
98
+ if (hasOpenAI) {
99
+ availableServices.push("gpt-image-1.5", "gpt-image-1", "gpt-image-1-mini");
100
+ }
101
+ if (hasFal || hasBFL) {
102
+ availableServices.push(
103
+ "flux2-pro",
104
+ "flux2-flex",
105
+ "flux2-dev",
106
+ "flux-pro",
107
+ "flux-dev",
108
+ "veo3",
109
+ );
110
+ }
111
+
112
+ return {
113
+ hasGemini,
114
+ hasGoogle: hasGemini, // Legacy compatibility
115
+ hasOpenAI,
116
+ hasFal,
117
+ hasBFL,
118
+ hasAny: hasGemini || hasOpenAI || hasFal || hasBFL,
119
+ availableServices,
120
+ };
113
121
  }
114
122
 
115
123
  /**
@@ -120,50 +128,50 @@ export function validateAPIKeys() {
120
128
  * @returns {number} Estimated cost in USD
121
129
  */
122
130
  export function estimateCost(service, options = {}) {
123
- const { quality = 'high' } = options;
131
+ const { quality = "high" } = options;
124
132
 
125
- switch (service) {
126
- case 'gemini':
127
- return PRICING.gemini;
133
+ switch (service) {
134
+ case "gemini":
135
+ return PRICING.gemini;
128
136
 
129
- case 'imagen':
130
- return PRICING.imagen;
137
+ case "imagen":
138
+ return PRICING.imagen;
131
139
 
132
- // OpenAI GPT Image models - cost varies by quality
133
- case 'gpt-image-1.5':
134
- if (quality === 'low') return PRICING['gpt-image-1.5-low'];
135
- if (quality === 'medium') return PRICING['gpt-image-1.5-medium'];
136
- return PRICING['gpt-image-1.5']; // high quality default
140
+ // OpenAI GPT Image models - cost varies by quality
141
+ case "gpt-image-1.5":
142
+ if (quality === "low") return PRICING["gpt-image-1.5-low"];
143
+ if (quality === "medium") return PRICING["gpt-image-1.5-medium"];
144
+ return PRICING["gpt-image-1.5"]; // high quality default
137
145
 
138
- case 'gpt-image-1':
139
- return PRICING['gpt-image-1'];
146
+ case "gpt-image-1":
147
+ return PRICING["gpt-image-1"];
140
148
 
141
- case 'gpt-image-1-mini':
142
- return PRICING['gpt-image-1-mini'];
149
+ case "gpt-image-1-mini":
150
+ return PRICING["gpt-image-1-mini"];
143
151
 
144
- case 'flux2-pro':
145
- return PRICING.flux2_pro;
152
+ case "flux2-pro":
153
+ return PRICING.flux2_pro;
146
154
 
147
- case 'flux2-flex':
148
- return PRICING.flux2_flex;
155
+ case "flux2-flex":
156
+ return PRICING.flux2_flex;
149
157
 
150
- case 'flux2-dev':
151
- return PRICING.flux2_dev;
158
+ case "flux2-dev":
159
+ return PRICING.flux2_dev;
152
160
 
153
- case 'flux':
154
- case 'flux-pro':
155
- return PRICING.flux_pro;
161
+ case "flux":
162
+ case "flux-pro":
163
+ return PRICING.flux_pro;
156
164
 
157
- case 'flux-dev':
158
- return PRICING.flux_dev;
165
+ case "flux-dev":
166
+ return PRICING.flux_dev;
159
167
 
160
- case 'veo3':
161
- case 'veo3-fast':
162
- return PRICING.veo3; // per second, will multiply by duration
168
+ case "veo3":
169
+ case "veo3-fast":
170
+ return PRICING.veo3; // per second, will multiply by duration
163
171
 
164
- default:
165
- return 0;
166
- }
172
+ default:
173
+ return 0;
174
+ }
167
175
  }
168
176
 
169
177
  /**
@@ -173,20 +181,22 @@ export function estimateCost(service, options = {}) {
173
181
  * @returns {string} Selected service name
174
182
  * @throws {Error} If no API keys are configured
175
183
  */
176
- export function selectBestService(preferred = 'gemini') {
177
- const { availableServices, hasAny } = validateAPIKeys();
178
-
179
- if (!hasAny) {
180
- throw new Error('No API keys configured. Set GEMINI_API_KEY, OPENAI_API_KEY, or FAL_KEY in your environment.');
181
- }
182
-
183
- // Return preferred service if available
184
- if (availableServices.includes(preferred)) {
185
- return preferred;
186
- }
187
-
188
- // Fallback to first available service
189
- return availableServices[0];
184
+ export function selectBestService(preferred = "gemini") {
185
+ const { availableServices, hasAny } = validateAPIKeys();
186
+
187
+ if (!hasAny) {
188
+ throw new Error(
189
+ "No API keys configured. Set GEMINI_API_KEY, OPENAI_API_KEY, or FAL_KEY in your environment.",
190
+ );
191
+ }
192
+
193
+ // Return preferred service if available
194
+ if (availableServices.includes(preferred)) {
195
+ return preferred;
196
+ }
197
+
198
+ // Fallback to first available service
199
+ return availableServices[0];
190
200
  }
191
201
 
192
202
  /**
@@ -200,105 +210,105 @@ export function selectBestService(preferred = 'gemini') {
200
210
  * @returns {Promise<Object>} Generated image data
201
211
  */
202
212
  export async function generateImageGemini(prompt, options = {}) {
203
- const {
204
- imageSize = '1K',
205
- maxRetries = 3
206
- } = options;
207
-
208
- const apiKey = process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY;
209
- if (!apiKey) {
210
- throw new Error('GEMINI_API_KEY not configured. Set GEMINI_API_KEY in your environment.');
211
- }
212
-
213
- const endpoint = `${GEMINI_API_BASE}/models/${GEMINI_IMAGE_MODEL}:generateContent`;
214
-
215
- const requestBody = {
216
- contents: [
217
- {
218
- role: 'user',
219
- parts: [
220
- {
221
- text: prompt
222
- }
223
- ]
224
- }
225
- ],
226
- generationConfig: {
227
- responseModalities: ['IMAGE', 'TEXT'],
228
- imageConfig: {
229
- image_size: imageSize
230
- }
231
- }
232
- };
233
-
234
- let lastError;
235
- for (let attempt = 1; attempt <= maxRetries; attempt++) {
236
- try {
237
- const response = await fetch(`${endpoint}?key=${apiKey}`, {
238
- method: 'POST',
239
- headers: {
240
- 'Content-Type': 'application/json'
241
- },
242
- body: JSON.stringify(requestBody)
243
- });
244
-
245
- if (!response.ok) {
246
- const errorText = await response.text();
247
- throw new Error(`Gemini API error: ${response.status} - ${errorText}`);
248
- }
249
-
250
- const data = await response.json();
251
-
252
- // Handle streaming response format (array of candidates)
253
- const candidates = Array.isArray(data) ? data : (data.candidates || [data]);
254
-
255
- for (const candidate of candidates) {
256
- const content = candidate.content || candidate;
257
- const parts = content.parts || [];
258
-
259
- for (const part of parts) {
260
- // Check for inline image data
261
- if (part.inlineData && part.inlineData.data) {
262
- return {
263
- data: part.inlineData.data,
264
- mimeType: part.inlineData.mimeType || 'image/png',
265
- service: 'gemini',
266
- model: GEMINI_IMAGE_MODEL,
267
- cost: PRICING.gemini
268
- };
269
- }
270
-
271
- // Check for file data reference
272
- if (part.fileData && part.fileData.fileUri) {
273
- const imageResponse = await fetch(part.fileData.fileUri);
274
- const imageBuffer = await imageResponse.arrayBuffer();
275
- const base64Data = Buffer.from(imageBuffer).toString('base64');
276
-
277
- return {
278
- data: base64Data,
279
- mimeType: part.fileData.mimeType || 'image/png',
280
- service: 'gemini',
281
- model: GEMINI_IMAGE_MODEL,
282
- cost: PRICING.gemini
283
- };
284
- }
285
- }
286
- }
287
-
288
- throw new Error('No image data in Gemini response');
289
-
290
- } catch (error) {
291
- lastError = error;
292
-
293
- if (attempt < maxRetries) {
294
- const backoff = Math.pow(2, attempt) * 1000;
295
- console.log(`⚠️ Gemini attempt ${attempt} failed: ${error.message}. Retrying in ${backoff / 1000}s...`);
296
- await sleep(backoff);
297
- }
298
- }
299
- }
300
-
301
- throw lastError;
213
+ const { imageSize = "1K", maxRetries = 3 } = options;
214
+
215
+ const apiKey = process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY;
216
+ if (!apiKey) {
217
+ throw new Error(
218
+ "GEMINI_API_KEY not configured. Set GEMINI_API_KEY in your environment.",
219
+ );
220
+ }
221
+
222
+ const endpoint = `${GEMINI_API_BASE}/models/${GEMINI_IMAGE_MODEL}:generateContent`;
223
+
224
+ const requestBody = {
225
+ contents: [
226
+ {
227
+ role: "user",
228
+ parts: [
229
+ {
230
+ text: prompt,
231
+ },
232
+ ],
233
+ },
234
+ ],
235
+ generationConfig: {
236
+ responseModalities: ["IMAGE", "TEXT"],
237
+ imageConfig: {
238
+ image_size: imageSize,
239
+ },
240
+ },
241
+ };
242
+
243
+ let lastError;
244
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
245
+ try {
246
+ const response = await fetch(`${endpoint}?key=${apiKey}`, {
247
+ method: "POST",
248
+ headers: {
249
+ "Content-Type": "application/json",
250
+ },
251
+ body: JSON.stringify(requestBody),
252
+ });
253
+
254
+ if (!response.ok) {
255
+ const errorText = await response.text();
256
+ throw new Error(`Gemini API error: ${response.status} - ${errorText}`);
257
+ }
258
+
259
+ const data = await response.json();
260
+
261
+ // Handle streaming response format (array of candidates)
262
+ const candidates = Array.isArray(data) ? data : data.candidates || [data];
263
+
264
+ for (const candidate of candidates) {
265
+ const content = candidate.content || candidate;
266
+ const parts = content.parts || [];
267
+
268
+ for (const part of parts) {
269
+ // Check for inline image data
270
+ if (part.inlineData && part.inlineData.data) {
271
+ return {
272
+ data: part.inlineData.data,
273
+ mimeType: part.inlineData.mimeType || "image/png",
274
+ service: "gemini",
275
+ model: GEMINI_IMAGE_MODEL,
276
+ cost: PRICING.gemini,
277
+ };
278
+ }
279
+
280
+ // Check for file data reference
281
+ if (part.fileData && part.fileData.fileUri) {
282
+ const imageResponse = await fetch(part.fileData.fileUri);
283
+ const imageBuffer = await imageResponse.arrayBuffer();
284
+ const base64Data = Buffer.from(imageBuffer).toString("base64");
285
+
286
+ return {
287
+ data: base64Data,
288
+ mimeType: part.fileData.mimeType || "image/png",
289
+ service: "gemini",
290
+ model: GEMINI_IMAGE_MODEL,
291
+ cost: PRICING.gemini,
292
+ };
293
+ }
294
+ }
295
+ }
296
+
297
+ throw new Error("No image data in Gemini response");
298
+ } catch (error) {
299
+ lastError = error;
300
+
301
+ if (attempt < maxRetries) {
302
+ const backoff = Math.pow(2, attempt) * 1000;
303
+ console.log(
304
+ `⚠️ Gemini attempt ${attempt} failed: ${error.message}. Retrying in ${backoff / 1000}s...`,
305
+ );
306
+ await sleep(backoff);
307
+ }
308
+ }
309
+ }
310
+
311
+ throw lastError;
302
312
  }
303
313
 
304
314
  /**
@@ -313,90 +323,89 @@ export async function generateImageGemini(prompt, options = {}) {
313
323
  * @returns {Promise<Object>} Generated image data
314
324
  */
315
325
  export async function generateImageImagen(prompt, options = {}) {
316
- const {
317
- aspectRatio = '1:1',
318
- numberOfImages = 1,
319
- maxRetries = 3
320
- } = options;
321
-
322
- const apiKey = process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY;
323
- if (!apiKey) {
324
- throw new Error('GEMINI_API_KEY not configured. Set GEMINI_API_KEY in your environment.');
325
- }
326
-
327
- const endpoint = `${GEMINI_API_BASE}/models/${GEMINI_IMAGEN_MODEL}:generateImages`;
328
-
329
- const requestBody = {
330
- prompt: prompt,
331
- config: {
332
- numberOfImages: Math.min(numberOfImages, 4),
333
- aspectRatio: aspectRatio,
334
- safetyFilterLevel: 'BLOCK_MEDIUM_AND_ABOVE'
335
- }
336
- };
337
-
338
- let lastError;
339
- for (let attempt = 1; attempt <= maxRetries; attempt++) {
340
- try {
341
- const response = await fetch(`${endpoint}?key=${apiKey}`, {
342
- method: 'POST',
343
- headers: {
344
- 'Content-Type': 'application/json'
345
- },
346
- body: JSON.stringify(requestBody)
347
- });
348
-
349
- if (!response.ok) {
350
- const errorText = await response.text();
351
- throw new Error(`Imagen API error: ${response.status} - ${errorText}`);
352
- }
353
-
354
- const data = await response.json();
355
-
356
- // Imagen returns images array with base64 encoded data
357
- if (data.generatedImages && data.generatedImages[0]) {
358
- const image = data.generatedImages[0];
359
-
360
- if (image.image && image.image.imageBytes) {
361
- return {
362
- data: image.image.imageBytes,
363
- mimeType: 'image/png',
364
- service: 'imagen',
365
- model: GEMINI_IMAGEN_MODEL,
366
- cost: PRICING.imagen
367
- };
368
- }
369
- }
370
-
371
- // Alternative response format
372
- if (data.images && data.images[0]) {
373
- const image = data.images[0];
374
-
375
- if (image.bytesBase64Encoded || image.imageBytes) {
376
- return {
377
- data: image.bytesBase64Encoded || image.imageBytes,
378
- mimeType: 'image/png',
379
- service: 'imagen',
380
- model: GEMINI_IMAGEN_MODEL,
381
- cost: PRICING.imagen
382
- };
383
- }
384
- }
385
-
386
- throw new Error('No image data in Imagen response');
387
-
388
- } catch (error) {
389
- lastError = error;
390
-
391
- if (attempt < maxRetries) {
392
- const backoff = Math.pow(2, attempt) * 1000;
393
- console.log(`⚠️ Imagen attempt ${attempt} failed: ${error.message}. Retrying in ${backoff / 1000}s...`);
394
- await sleep(backoff);
395
- }
396
- }
397
- }
398
-
399
- throw lastError;
326
+ const { aspectRatio = "1:1", numberOfImages = 1, maxRetries = 3 } = options;
327
+
328
+ const apiKey = process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY;
329
+ if (!apiKey) {
330
+ throw new Error(
331
+ "GEMINI_API_KEY not configured. Set GEMINI_API_KEY in your environment.",
332
+ );
333
+ }
334
+
335
+ const endpoint = `${GEMINI_API_BASE}/models/${GEMINI_IMAGEN_MODEL}:generateImages`;
336
+
337
+ const requestBody = {
338
+ prompt: prompt,
339
+ config: {
340
+ numberOfImages: Math.min(numberOfImages, 4),
341
+ aspectRatio: aspectRatio,
342
+ safetyFilterLevel: "BLOCK_MEDIUM_AND_ABOVE",
343
+ },
344
+ };
345
+
346
+ let lastError;
347
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
348
+ try {
349
+ const response = await fetch(`${endpoint}?key=${apiKey}`, {
350
+ method: "POST",
351
+ headers: {
352
+ "Content-Type": "application/json",
353
+ },
354
+ body: JSON.stringify(requestBody),
355
+ });
356
+
357
+ if (!response.ok) {
358
+ const errorText = await response.text();
359
+ throw new Error(`Imagen API error: ${response.status} - ${errorText}`);
360
+ }
361
+
362
+ const data = await response.json();
363
+
364
+ // Imagen returns images array with base64 encoded data
365
+ if (data.generatedImages && data.generatedImages[0]) {
366
+ const image = data.generatedImages[0];
367
+
368
+ if (image.image && image.image.imageBytes) {
369
+ return {
370
+ data: image.image.imageBytes,
371
+ mimeType: "image/png",
372
+ service: "imagen",
373
+ model: GEMINI_IMAGEN_MODEL,
374
+ cost: PRICING.imagen,
375
+ };
376
+ }
377
+ }
378
+
379
+ // Alternative response format
380
+ if (data.images && data.images[0]) {
381
+ const image = data.images[0];
382
+
383
+ if (image.bytesBase64Encoded || image.imageBytes) {
384
+ return {
385
+ data: image.bytesBase64Encoded || image.imageBytes,
386
+ mimeType: "image/png",
387
+ service: "imagen",
388
+ model: GEMINI_IMAGEN_MODEL,
389
+ cost: PRICING.imagen,
390
+ };
391
+ }
392
+ }
393
+
394
+ throw new Error("No image data in Imagen response");
395
+ } catch (error) {
396
+ lastError = error;
397
+
398
+ if (attempt < maxRetries) {
399
+ const backoff = Math.pow(2, attempt) * 1000;
400
+ console.log(
401
+ `⚠️ Imagen attempt ${attempt} failed: ${error.message}. Retrying in ${backoff / 1000}s...`,
402
+ );
403
+ await sleep(backoff);
404
+ }
405
+ }
406
+ }
407
+
408
+ throw lastError;
400
409
  }
401
410
 
402
411
  /**
@@ -420,109 +429,122 @@ export async function generateImageImagen(prompt, options = {}) {
420
429
  * @returns {Promise<Object>} Generated image data
421
430
  */
422
431
  export async function generateImageOpenAI(prompt, options = {}) {
423
- const {
424
- model = 'gpt-image-1.5',
425
- size = '1024x1024',
426
- quality = 'high',
427
- outputFormat = 'png',
428
- background = 'auto',
429
- maxRetries = 3
430
- } = options;
431
-
432
- const apiKey = process.env.OPENAI_API_KEY;
433
- if (!apiKey) {
434
- throw new Error('OPENAI_API_KEY not configured. Get your key from https://platform.openai.com/api-keys');
435
- }
436
-
437
- const endpoint = `${OPENAI_API_BASE}/images/generations`;
438
-
439
- const requestBody = {
440
- model: OPENAI_IMAGE_MODELS[model] || model,
441
- prompt: prompt,
442
- n: 1,
443
- size: size,
444
- quality: quality,
445
- output_format: outputFormat
446
- };
447
-
448
- // Add background for PNG format (transparency support)
449
- if (outputFormat === 'png' && background !== 'auto') {
450
- requestBody.background = background;
451
- }
452
-
453
- let lastError;
454
- for (let attempt = 1; attempt <= maxRetries; attempt++) {
455
- try {
456
- const response = await fetch(endpoint, {
457
- method: 'POST',
458
- headers: {
459
- 'Content-Type': 'application/json',
460
- 'Authorization': `Bearer ${apiKey}`
461
- },
462
- body: JSON.stringify(requestBody)
463
- });
464
-
465
- if (!response.ok) {
466
- const errorText = await response.text();
467
- let errorMessage = `OpenAI API error: ${response.status}`;
468
- try {
469
- const errorData = JSON.parse(errorText);
470
- errorMessage = errorData.error?.message || errorMessage;
471
- } catch {
472
- errorMessage = `${errorMessage} - ${errorText}`;
473
- }
474
- throw new Error(errorMessage);
475
- }
476
-
477
- const data = await response.json();
478
-
479
- // OpenAI returns base64-encoded image data
480
- if (data.data && data.data[0]) {
481
- const imageData = data.data[0];
482
-
483
- // Handle base64 response (primary)
484
- if (imageData.b64_json) {
485
- return {
486
- data: imageData.b64_json,
487
- mimeType: outputFormat === 'jpeg' ? 'image/jpeg' : outputFormat === 'webp' ? 'image/webp' : 'image/png',
488
- service: 'openai',
489
- model: model,
490
- cost: estimateCost(model, { quality }),
491
- revisedPrompt: imageData.revised_prompt
492
- };
493
- }
494
-
495
- // Handle URL response (fallback)
496
- if (imageData.url) {
497
- const imageResponse = await fetch(imageData.url);
498
- const imageBuffer = await imageResponse.arrayBuffer();
499
- const base64Data = Buffer.from(imageBuffer).toString('base64');
500
-
501
- return {
502
- data: base64Data,
503
- mimeType: outputFormat === 'jpeg' ? 'image/jpeg' : outputFormat === 'webp' ? 'image/webp' : 'image/png',
504
- service: 'openai',
505
- model: model,
506
- cost: estimateCost(model, { quality }),
507
- revisedPrompt: imageData.revised_prompt
508
- };
509
- }
510
- }
511
-
512
- throw new Error('No image data in OpenAI response');
513
-
514
- } catch (error) {
515
- lastError = error;
516
-
517
- if (attempt < maxRetries) {
518
- const backoff = Math.pow(2, attempt) * 1000;
519
- console.log(`⚠️ OpenAI attempt ${attempt} failed: ${error.message}. Retrying in ${backoff / 1000}s...`);
520
- await sleep(backoff);
521
- }
522
- }
523
- }
524
-
525
- throw lastError;
432
+ const {
433
+ model = "gpt-image-1.5",
434
+ size = "1024x1024",
435
+ quality = "high",
436
+ outputFormat = "png",
437
+ background = "auto",
438
+ maxRetries = 3,
439
+ } = options;
440
+
441
+ const apiKey = process.env.OPENAI_API_KEY;
442
+ if (!apiKey) {
443
+ throw new Error(
444
+ "OPENAI_API_KEY not configured. Get your key from https://platform.openai.com/api-keys",
445
+ );
446
+ }
447
+
448
+ const endpoint = `${OPENAI_API_BASE}/images/generations`;
449
+
450
+ const requestBody = {
451
+ model: OPENAI_IMAGE_MODELS[model] || model,
452
+ prompt: prompt,
453
+ n: 1,
454
+ size: size,
455
+ quality: quality,
456
+ output_format: outputFormat,
457
+ };
458
+
459
+ // Add background for PNG format (transparency support)
460
+ if (outputFormat === "png" && background !== "auto") {
461
+ requestBody.background = background;
462
+ }
463
+
464
+ let lastError;
465
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
466
+ try {
467
+ const response = await fetch(endpoint, {
468
+ method: "POST",
469
+ headers: {
470
+ "Content-Type": "application/json",
471
+ Authorization: `Bearer ${apiKey}`,
472
+ },
473
+ body: JSON.stringify(requestBody),
474
+ });
475
+
476
+ if (!response.ok) {
477
+ const errorText = await response.text();
478
+ let errorMessage = `OpenAI API error: ${response.status}`;
479
+ try {
480
+ const errorData = JSON.parse(errorText);
481
+ errorMessage = errorData.error?.message || errorMessage;
482
+ } catch {
483
+ errorMessage = `${errorMessage} - ${errorText}`;
484
+ }
485
+ throw new Error(errorMessage);
486
+ }
487
+
488
+ const data = await response.json();
489
+
490
+ // OpenAI returns base64-encoded image data
491
+ if (data.data && data.data[0]) {
492
+ const imageData = data.data[0];
493
+
494
+ // Handle base64 response (primary)
495
+ if (imageData.b64_json) {
496
+ return {
497
+ data: imageData.b64_json,
498
+ mimeType:
499
+ outputFormat === "jpeg"
500
+ ? "image/jpeg"
501
+ : outputFormat === "webp"
502
+ ? "image/webp"
503
+ : "image/png",
504
+ service: "openai",
505
+ model: model,
506
+ cost: estimateCost(model, { quality }),
507
+ revisedPrompt: imageData.revised_prompt,
508
+ };
509
+ }
510
+
511
+ // Handle URL response (fallback)
512
+ if (imageData.url) {
513
+ const imageResponse = await fetch(imageData.url);
514
+ const imageBuffer = await imageResponse.arrayBuffer();
515
+ const base64Data = Buffer.from(imageBuffer).toString("base64");
516
+
517
+ return {
518
+ data: base64Data,
519
+ mimeType:
520
+ outputFormat === "jpeg"
521
+ ? "image/jpeg"
522
+ : outputFormat === "webp"
523
+ ? "image/webp"
524
+ : "image/png",
525
+ service: "openai",
526
+ model: model,
527
+ cost: estimateCost(model, { quality }),
528
+ revisedPrompt: imageData.revised_prompt,
529
+ };
530
+ }
531
+ }
532
+
533
+ throw new Error("No image data in OpenAI response");
534
+ } catch (error) {
535
+ lastError = error;
536
+
537
+ if (attempt < maxRetries) {
538
+ const backoff = Math.pow(2, attempt) * 1000;
539
+ console.log(
540
+ `⚠️ OpenAI attempt ${attempt} failed: ${error.message}. Retrying in ${backoff / 1000}s...`,
541
+ );
542
+ await sleep(backoff);
543
+ }
544
+ }
545
+ }
546
+
547
+ throw lastError;
526
548
  }
527
549
 
528
550
  /**
@@ -546,96 +568,99 @@ export async function generateImageOpenAI(prompt, options = {}) {
546
568
  * @returns {Promise<Object>} Generated image data
547
569
  */
548
570
  export async function generateImageFlux2(prompt, options = {}) {
549
- const {
550
- model = 'flux2-pro',
551
- size = 'square',
552
- steps = 28,
553
- guidance = 3.5,
554
- referenceImages = [],
555
- maxRetries = 3
556
- } = options;
557
-
558
- const apiKey = process.env.FAL_KEY || process.env.BFL_API_KEY;
559
- if (!apiKey) {
560
- throw new Error('FAL_KEY not configured. Get your key from https://fal.ai/dashboard/keys');
561
- }
562
-
563
- // Import fal.ai client
564
- const { fal } = await import('@fal-ai/client');
565
- fal.config({ credentials: apiKey });
566
-
567
- // Get endpoint for model
568
- const endpoint = FLUX2_MODELS[model] || FLUX2_MODELS['flux2-pro'];
569
-
570
- // Build input based on model capabilities
571
- const input = {
572
- prompt: prompt,
573
- image_size: size === '1024x1024' ? 'square' : size,
574
- num_images: 1
575
- };
576
-
577
- // FLUX 2 Flex supports custom parameters
578
- if (model === 'flux2-flex') {
579
- input.num_inference_steps = steps;
580
- input.guidance_scale = guidance;
581
- }
582
-
583
- // Add reference images if provided (FLUX 2 multi-reference feature)
584
- if (referenceImages.length > 0) {
585
- input.reference_images = referenceImages.slice(0, 10); // Max 10
586
- }
587
-
588
- let lastError;
589
- for (let attempt = 1; attempt <= maxRetries; attempt++) {
590
- try {
591
- const result = await fal.subscribe(endpoint, {
592
- input,
593
- logs: false
594
- });
595
-
596
- // Extract image from result
597
- let imageUrl;
598
- let contentType = 'image/png';
599
-
600
- if (result.data?.images?.[0]) {
601
- imageUrl = result.data.images[0].url;
602
- contentType = result.data.images[0].content_type || 'image/png';
603
- } else if (result.images?.[0]) {
604
- imageUrl = result.images[0].url;
605
- contentType = result.images[0].content_type || 'image/png';
606
- } else if (result.image?.url) {
607
- imageUrl = result.image.url;
608
- }
609
-
610
- if (imageUrl) {
611
- // Fetch and convert to base64
612
- const imageResponse = await fetch(imageUrl);
613
- const imageBuffer = await imageResponse.arrayBuffer();
614
- const base64Data = Buffer.from(imageBuffer).toString('base64');
615
-
616
- return {
617
- data: base64Data,
618
- mimeType: contentType,
619
- service: 'flux2',
620
- model: model,
621
- cost: PRICING[model.replace('-', '_')] || PRICING.flux2_pro
622
- };
623
- }
624
-
625
- throw new Error('No image data in FLUX 2 response');
626
-
627
- } catch (error) {
628
- lastError = error;
629
-
630
- if (attempt < maxRetries) {
631
- const backoff = Math.pow(2, attempt) * 1000;
632
- console.log(`⚠️ FLUX 2 attempt ${attempt} failed: ${error.message}. Retrying in ${backoff / 1000}s...`);
633
- await sleep(backoff);
634
- }
635
- }
636
- }
637
-
638
- throw lastError;
571
+ const {
572
+ model = "flux2-pro",
573
+ size = "square",
574
+ steps = 28,
575
+ guidance = 3.5,
576
+ referenceImages = [],
577
+ maxRetries = 3,
578
+ } = options;
579
+
580
+ const apiKey = process.env.FAL_KEY || process.env.BFL_API_KEY;
581
+ if (!apiKey) {
582
+ throw new Error(
583
+ "FAL_KEY not configured. Get your key from https://fal.ai/dashboard/keys",
584
+ );
585
+ }
586
+
587
+ // Import fal.ai client
588
+ const { fal } = await import("@fal-ai/client");
589
+ fal.config({ credentials: apiKey });
590
+
591
+ // Get endpoint for model
592
+ const endpoint = FLUX2_MODELS[model] || FLUX2_MODELS["flux2-pro"];
593
+
594
+ // Build input based on model capabilities
595
+ const input = {
596
+ prompt: prompt,
597
+ image_size: size === "1024x1024" ? "square" : size,
598
+ num_images: 1,
599
+ };
600
+
601
+ // FLUX 2 Flex supports custom parameters
602
+ if (model === "flux2-flex") {
603
+ input.num_inference_steps = steps;
604
+ input.guidance_scale = guidance;
605
+ }
606
+
607
+ // Add reference images if provided (FLUX 2 multi-reference feature)
608
+ if (referenceImages.length > 0) {
609
+ input.reference_images = referenceImages.slice(0, 10); // Max 10
610
+ }
611
+
612
+ let lastError;
613
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
614
+ try {
615
+ const result = await fal.subscribe(endpoint, {
616
+ input,
617
+ logs: false,
618
+ });
619
+
620
+ // Extract image from result
621
+ let imageUrl;
622
+ let contentType = "image/png";
623
+
624
+ if (result.data?.images?.[0]) {
625
+ imageUrl = result.data.images[0].url;
626
+ contentType = result.data.images[0].content_type || "image/png";
627
+ } else if (result.images?.[0]) {
628
+ imageUrl = result.images[0].url;
629
+ contentType = result.images[0].content_type || "image/png";
630
+ } else if (result.image?.url) {
631
+ imageUrl = result.image.url;
632
+ }
633
+
634
+ if (imageUrl) {
635
+ // Fetch and convert to base64
636
+ const imageResponse = await fetch(imageUrl);
637
+ const imageBuffer = await imageResponse.arrayBuffer();
638
+ const base64Data = Buffer.from(imageBuffer).toString("base64");
639
+
640
+ return {
641
+ data: base64Data,
642
+ mimeType: contentType,
643
+ service: "flux2",
644
+ model: model,
645
+ cost: PRICING[model.replace("-", "_")] || PRICING.flux2_pro,
646
+ };
647
+ }
648
+
649
+ throw new Error("No image data in FLUX 2 response");
650
+ } catch (error) {
651
+ lastError = error;
652
+
653
+ if (attempt < maxRetries) {
654
+ const backoff = Math.pow(2, attempt) * 1000;
655
+ console.log(
656
+ `⚠️ FLUX 2 attempt ${attempt} failed: ${error.message}. Retrying in ${backoff / 1000}s...`,
657
+ );
658
+ await sleep(backoff);
659
+ }
660
+ }
661
+ }
662
+
663
+ throw lastError;
639
664
  }
640
665
 
641
666
  /**
@@ -650,75 +675,74 @@ export async function generateImageFlux2(prompt, options = {}) {
650
675
  * @returns {Promise<Object>} Generated image data
651
676
  */
652
677
  export async function generateImageFal(prompt, options = {}) {
653
- const {
654
- model = 'flux-pro',
655
- size = '1024x1024',
656
- maxRetries = 3
657
- } = options;
658
-
659
- const apiKey = process.env.FAL_KEY;
660
- if (!apiKey) {
661
- throw new Error('FAL_KEY not configured. Get your key from https://fal.ai/dashboard/keys');
662
- }
663
-
664
- const { fal } = await import('@fal-ai/client');
665
- fal.config({ credentials: apiKey });
666
-
667
- const endpoint = FLUX2_MODELS[model] || FLUX2_MODELS['flux-pro'];
668
-
669
- let lastError;
670
- for (let attempt = 1; attempt <= maxRetries; attempt++) {
671
- try {
672
- const result = await fal.subscribe(endpoint, {
673
- input: {
674
- prompt: prompt,
675
- image_size: size === '1024x1024' ? 'square' : 'landscape',
676
- num_images: 1
677
- },
678
- logs: false
679
- });
680
-
681
- let imageUrl;
682
- let contentType = 'image/png';
683
-
684
- if (result.data?.images?.[0]) {
685
- imageUrl = result.data.images[0].url;
686
- contentType = result.data.images[0].content_type || 'image/png';
687
- } else if (result.images?.[0]) {
688
- imageUrl = result.images[0].url;
689
- contentType = result.images[0].content_type || 'image/png';
690
- } else if (result.image?.url) {
691
- imageUrl = result.image.url;
692
- }
693
-
694
- if (imageUrl) {
695
- const imageResponse = await fetch(imageUrl);
696
- const imageBuffer = await imageResponse.arrayBuffer();
697
- const base64Data = Buffer.from(imageBuffer).toString('base64');
698
-
699
- return {
700
- data: base64Data,
701
- mimeType: contentType,
702
- service: 'fal',
703
- model: model,
704
- cost: PRICING[model.replace('-', '_')] || PRICING.flux_pro
705
- };
706
- }
707
-
708
- throw new Error('No image data in Fal.ai response');
709
-
710
- } catch (error) {
711
- lastError = error;
712
-
713
- if (attempt < maxRetries) {
714
- const backoff = Math.pow(2, attempt) * 1000;
715
- console.log(`⚠️ Fal.ai attempt ${attempt} failed: ${error.message}. Retrying in ${backoff / 1000}s...`);
716
- await sleep(backoff);
717
- }
718
- }
719
- }
720
-
721
- throw lastError;
678
+ const { model = "flux-pro", size = "1024x1024", maxRetries = 3 } = options;
679
+
680
+ const apiKey = process.env.FAL_KEY;
681
+ if (!apiKey) {
682
+ throw new Error(
683
+ "FAL_KEY not configured. Get your key from https://fal.ai/dashboard/keys",
684
+ );
685
+ }
686
+
687
+ const { fal } = await import("@fal-ai/client");
688
+ fal.config({ credentials: apiKey });
689
+
690
+ const endpoint = FLUX2_MODELS[model] || FLUX2_MODELS["flux-pro"];
691
+
692
+ let lastError;
693
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
694
+ try {
695
+ const result = await fal.subscribe(endpoint, {
696
+ input: {
697
+ prompt: prompt,
698
+ image_size: size === "1024x1024" ? "square" : "landscape",
699
+ num_images: 1,
700
+ },
701
+ logs: false,
702
+ });
703
+
704
+ let imageUrl;
705
+ let contentType = "image/png";
706
+
707
+ if (result.data?.images?.[0]) {
708
+ imageUrl = result.data.images[0].url;
709
+ contentType = result.data.images[0].content_type || "image/png";
710
+ } else if (result.images?.[0]) {
711
+ imageUrl = result.images[0].url;
712
+ contentType = result.images[0].content_type || "image/png";
713
+ } else if (result.image?.url) {
714
+ imageUrl = result.image.url;
715
+ }
716
+
717
+ if (imageUrl) {
718
+ const imageResponse = await fetch(imageUrl);
719
+ const imageBuffer = await imageResponse.arrayBuffer();
720
+ const base64Data = Buffer.from(imageBuffer).toString("base64");
721
+
722
+ return {
723
+ data: base64Data,
724
+ mimeType: contentType,
725
+ service: "fal",
726
+ model: model,
727
+ cost: PRICING[model.replace("-", "_")] || PRICING.flux_pro,
728
+ };
729
+ }
730
+
731
+ throw new Error("No image data in Fal.ai response");
732
+ } catch (error) {
733
+ lastError = error;
734
+
735
+ if (attempt < maxRetries) {
736
+ const backoff = Math.pow(2, attempt) * 1000;
737
+ console.log(
738
+ `⚠️ Fal.ai attempt ${attempt} failed: ${error.message}. Retrying in ${backoff / 1000}s...`,
739
+ );
740
+ await sleep(backoff);
741
+ }
742
+ }
743
+ }
744
+
745
+ throw lastError;
722
746
  }
723
747
 
724
748
  /**
@@ -734,65 +758,68 @@ export async function generateImageFal(prompt, options = {}) {
734
758
  * @returns {Promise<Object>} Generated video data
735
759
  */
736
760
  export async function generateVideoVeo3(prompt, options = {}) {
737
- const {
738
- model = 'veo3',
739
- duration = 5,
740
- aspectRatio = '16:9',
741
- maxRetries = 3
742
- } = options;
743
-
744
- const apiKey = process.env.FAL_KEY;
745
- if (!apiKey) {
746
- throw new Error('FAL_KEY not configured. Get your key from https://fal.ai/dashboard/keys');
747
- }
748
-
749
- const { fal } = await import('@fal-ai/client');
750
- fal.config({ credentials: apiKey });
751
-
752
- const endpoint = model === 'veo3-fast' ? 'fal-ai/veo3-fast' : 'fal-ai/veo3';
753
-
754
- let lastError;
755
- for (let attempt = 1; attempt <= maxRetries; attempt++) {
756
- try {
757
- const result = await fal.subscribe(endpoint, {
758
- input: {
759
- prompt: prompt,
760
- duration: Math.min(duration, 10),
761
- aspect_ratio: aspectRatio
762
- },
763
- logs: false
764
- });
765
-
766
- if (result.video?.url) {
767
- const videoResponse = await fetch(result.video.url);
768
- const videoBuffer = await videoResponse.arrayBuffer();
769
- const base64Data = Buffer.from(videoBuffer).toString('base64');
770
-
771
- return {
772
- data: base64Data,
773
- url: result.video.url,
774
- mimeType: 'video/mp4',
775
- service: 'veo3',
776
- model: model,
777
- cost: PRICING.veo3 * duration,
778
- duration: duration
779
- };
780
- }
781
-
782
- throw new Error('No video data in Veo 3 response');
783
-
784
- } catch (error) {
785
- lastError = error;
786
-
787
- if (attempt < maxRetries) {
788
- const backoff = Math.pow(2, attempt) * 1000;
789
- console.log(`⚠️ Veo 3 attempt ${attempt} failed: ${error.message}. Retrying in ${backoff / 1000}s...`);
790
- await sleep(backoff);
791
- }
792
- }
793
- }
794
-
795
- throw lastError;
761
+ const {
762
+ model = "veo3",
763
+ duration = 5,
764
+ aspectRatio = "16:9",
765
+ maxRetries = 3,
766
+ } = options;
767
+
768
+ const apiKey = process.env.FAL_KEY;
769
+ if (!apiKey) {
770
+ throw new Error(
771
+ "FAL_KEY not configured. Get your key from https://fal.ai/dashboard/keys",
772
+ );
773
+ }
774
+
775
+ const { fal } = await import("@fal-ai/client");
776
+ fal.config({ credentials: apiKey });
777
+
778
+ const endpoint = model === "veo3-fast" ? "fal-ai/veo3-fast" : "fal-ai/veo3";
779
+
780
+ let lastError;
781
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
782
+ try {
783
+ const result = await fal.subscribe(endpoint, {
784
+ input: {
785
+ prompt: prompt,
786
+ duration: Math.min(duration, 10),
787
+ aspect_ratio: aspectRatio,
788
+ },
789
+ logs: false,
790
+ });
791
+
792
+ if (result.video?.url) {
793
+ const videoResponse = await fetch(result.video.url);
794
+ const videoBuffer = await videoResponse.arrayBuffer();
795
+ const base64Data = Buffer.from(videoBuffer).toString("base64");
796
+
797
+ return {
798
+ data: base64Data,
799
+ url: result.video.url,
800
+ mimeType: "video/mp4",
801
+ service: "veo3",
802
+ model: model,
803
+ cost: PRICING.veo3 * duration,
804
+ duration: duration,
805
+ };
806
+ }
807
+
808
+ throw new Error("No video data in Veo 3 response");
809
+ } catch (error) {
810
+ lastError = error;
811
+
812
+ if (attempt < maxRetries) {
813
+ const backoff = Math.pow(2, attempt) * 1000;
814
+ console.log(
815
+ `⚠️ Veo 3 attempt ${attempt} failed: ${error.message}. Retrying in ${backoff / 1000}s...`,
816
+ );
817
+ await sleep(backoff);
818
+ }
819
+ }
820
+ }
821
+
822
+ throw lastError;
796
823
  }
797
824
 
798
825
  /**
@@ -802,14 +829,14 @@ export async function generateVideoVeo3(prompt, options = {}) {
802
829
  * @returns {Promise<Buffer>} Image buffer
803
830
  */
804
831
  export async function downloadImage(url) {
805
- const response = await fetch(url);
832
+ const response = await fetch(url);
806
833
 
807
- if (!response.ok) {
808
- throw new Error(`Failed to download image: ${response.status}`);
809
- }
834
+ if (!response.ok) {
835
+ throw new Error(`Failed to download image: ${response.status}`);
836
+ }
810
837
 
811
- const arrayBuffer = await response.arrayBuffer();
812
- return Buffer.from(arrayBuffer);
838
+ const arrayBuffer = await response.arrayBuffer();
839
+ return Buffer.from(arrayBuffer);
813
840
  }
814
841
 
815
842
  /**
@@ -823,67 +850,78 @@ export async function downloadImage(url) {
823
850
  * @returns {Promise<Object>} Generated image data with buffer
824
851
  */
825
852
  export async function generateImage(prompt, options = {}) {
826
- const { preferredService, type = 'general', ...serviceOptions } = options;
827
-
828
- // Select service
829
- const defaultService = process.env.VISUAL_DEFAULT_SERVICE || 'gemini';
830
- const service = selectBestService(preferredService || defaultService);
831
-
832
- console.log(`🎨 Generating ${type} image using ${service}...`);
833
-
834
- // Enhance prompt based on image type
835
- const enhancedPrompt = enhancePrompt(prompt, type);
836
-
837
- // Generate based on service
838
- let result;
839
- switch (service) {
840
- case 'gemini':
841
- result = await generateImageGemini(enhancedPrompt, serviceOptions);
842
- break;
843
-
844
- case 'imagen':
845
- result = await generateImageImagen(enhancedPrompt, serviceOptions);
846
- break;
847
-
848
- // OpenAI GPT Image models (SOTA)
849
- case 'gpt-image-1.5':
850
- case 'gpt-image-1':
851
- case 'gpt-image-1-mini':
852
- result = await generateImageOpenAI(enhancedPrompt, { ...serviceOptions, model: service });
853
- break;
854
-
855
- case 'flux2-pro':
856
- case 'flux2-flex':
857
- case 'flux2-dev':
858
- result = await generateImageFlux2(enhancedPrompt, { ...serviceOptions, model: service });
859
- break;
860
-
861
- case 'flux':
862
- case 'flux-pro':
863
- case 'flux-dev':
864
- result = await generateImageFal(enhancedPrompt, { ...serviceOptions, model: service });
865
- break;
866
-
867
- default:
868
- throw new Error(`Unknown service: ${service}`);
869
- }
870
-
871
- // Convert to buffer
872
- let buffer;
873
- if (result.data) {
874
- buffer = Buffer.from(result.data, 'base64');
875
- } else if (result.url) {
876
- buffer = await downloadImage(result.url);
877
- } else {
878
- throw new Error('No image data or URL in response');
879
- }
880
-
881
- return {
882
- ...result,
883
- buffer,
884
- prompt: enhancedPrompt,
885
- originalPrompt: prompt
886
- };
853
+ const { preferredService, type = "general", ...serviceOptions } = options;
854
+
855
+ // Select service
856
+ const defaultService = process.env.VISUAL_DEFAULT_SERVICE || "gemini";
857
+ const service = selectBestService(preferredService || defaultService);
858
+
859
+ const modelInfo = serviceOptions.model ? ` (${serviceOptions.model})` : "";
860
+ console.log(`🎨 Generating ${type} image using ${modelInfo}...`);
861
+
862
+ // Enhance prompt based on image type
863
+ const enhancedPrompt = enhancePrompt(prompt, type);
864
+
865
+ // Generate based on service
866
+ let result;
867
+ switch (service) {
868
+ case "gemini":
869
+ result = await generateImageGemini(enhancedPrompt, serviceOptions);
870
+ break;
871
+
872
+ case "imagen":
873
+ result = await generateImageImagen(enhancedPrompt, serviceOptions);
874
+ break;
875
+
876
+ // OpenAI GPT Image models (SOTA)
877
+ case "gpt-image-1.5":
878
+ case "gpt-image-1":
879
+ case "gpt-image-1-mini":
880
+ result = await generateImageOpenAI(enhancedPrompt, {
881
+ ...serviceOptions,
882
+ model: service,
883
+ });
884
+ break;
885
+
886
+ case "flux2-pro":
887
+ case "flux2-flex":
888
+ case "flux2-dev":
889
+ result = await generateImageFlux2(enhancedPrompt, {
890
+ ...serviceOptions,
891
+ model: service,
892
+ });
893
+ break;
894
+
895
+ case "flux":
896
+ case "flux-pro":
897
+ case "flux-dev":
898
+ case "nano-banana-pro":
899
+ result = await generateImageFal(enhancedPrompt, {
900
+ ...serviceOptions,
901
+ model: serviceOptions.model || service,,
902
+ });
903
+ break;
904
+
905
+ default:
906
+ throw new Error(`Unknown service: ${service}`);
907
+ }
908
+
909
+ // Convert to buffer
910
+ let buffer;
911
+ if (result.data) {
912
+ buffer = Buffer.from(result.data, "base64");
913
+ } else if (result.url) {
914
+ buffer = await downloadImage(result.url);
915
+ } else {
916
+ throw new Error("No image data or URL in response");
917
+ }
918
+
919
+ return {
920
+ ...result,
921
+ buffer,
922
+ prompt: enhancedPrompt,
923
+ originalPrompt: prompt,
924
+ };
887
925
  }
888
926
 
889
927
  /**
@@ -894,26 +932,40 @@ export async function generateImage(prompt, options = {}) {
894
932
  * @returns {Promise<Object>} Generated video data
895
933
  */
896
934
  export async function generateVideo(prompt, options = {}) {
897
- const { preferredService = 'veo3', ...serviceOptions } = options;
935
+ const { preferredService = "veo3", ...serviceOptions } = options;
898
936
 
899
- console.log(`🎬 Generating video using ${preferredService}...`);
937
+ console.log(`🎬 Generating video using ${preferredService}...`);
900
938
 
901
- return await generateVideoVeo3(prompt, serviceOptions);
939
+ return await generateVideoVeo3(prompt, serviceOptions);
902
940
  }
903
941
 
904
942
  /**
905
943
  * Enhance prompt based on image type
906
944
  *
907
945
  * @param {string} prompt - Original prompt
908
- * @param {string} type - Image type (hero, illustration, diagram, screenshot)
946
+ * @param {string} type - Image type (hero, illustration, diagram, infographic-*, etc.)
909
947
  * @returns {string} Enhanced prompt
910
948
  */
911
949
  function enhancePrompt(prompt, type) {
912
950
  const enhancements = {
951
+ // Standard types
913
952
  hero: 'Professional hero image, high quality, visually striking, suitable for article header:',
914
953
  illustration: 'Clean illustration, professional style, clear and informative:',
915
954
  diagram: 'Technical diagram, clear labels, professional design, easy to understand:',
916
955
  screenshot: 'Professional screenshot, clean interface, high resolution:',
956
+
957
+ // Infographic types (optimized for GPT Image 1.5 text rendering)
958
+ 'infographic-data': 'Clean data visualization infographic with clear large typography, color-coded sections, modern flat design. Include prominent title area, 3-5 data callouts with large numbers, clean minimal layout, professional business style:',
959
+ 'infographic-process': 'Step-by-step process infographic with clearly numbered steps, simple icons for each step, connecting arrows between steps, clean modern design, horizontal or vertical flow layout, each step clearly labeled with action text:',
960
+ 'infographic-comparison': 'Side-by-side comparison infographic with two distinct columns, clear header labels, aligned comparison points, checkmarks for advantages, X marks for disadvantages, professional business style, easy to scan layout:',
961
+ 'infographic-timeline': 'Horizontal timeline infographic with dated milestone markers, small icons at each event point, connecting timeline line, clean modern design, clear date labels, brief event descriptions:',
962
+
963
+ // Technical diagram types
964
+ 'architecture-diagram': 'Technical system architecture diagram with labeled component boxes, directional connection arrows, cloud/server/database icons where appropriate, clear legend area, isometric or clean flat technical illustration style:',
965
+ 'flowchart': 'Professional flowchart with standard shapes - diamonds for decisions, rectangles for processes, ovals for start/end points, clear yes/no branching paths, labeled arrows, clean professional style:',
966
+ 'sequence-diagram': 'Technical sequence diagram showing component interactions with participant boxes at top, vertical lifelines, horizontal arrows with action labels, activation boxes, clean UML-style presentation:',
967
+
968
+ // Default
917
969
  general: 'High quality image, professional style:'
918
970
  };
919
971
 
@@ -921,6 +973,112 @@ function enhancePrompt(prompt, type) {
921
973
  return `${prefix} ${prompt}`;
922
974
  }
923
975
 
976
+ /**
977
+ * Get recommended service for image type
978
+ * Uses user's configured default service if available, otherwise falls back
979
+ * to first available service. Both Gemini and GPT Image 1.5 are excellent
980
+ * for text rendering - let the user choose based on their preference.
981
+ *
982
+ * @param {string} type - Image type (used for logging/future enhancements)
983
+ * @param {Array} availableServices - Optional pre-computed available services
984
+ * @returns {string} Recommended service name
985
+ */
986
+ export function getRecommendedServiceForType(type, availableServices = null) {
987
+ // Get available services if not provided
988
+ if (!availableServices) {
989
+ const validation = validateAPIKeys();
990
+ availableServices = validation.availableServices;
991
+ }
992
+
993
+ // Get user's configured default service
994
+ const userDefault = process.env.VISUAL_DEFAULT_SERVICE;
995
+
996
+ // If user has a configured default and it's available, use it
997
+ if (userDefault && availableServices.includes(userDefault)) {
998
+ return userDefault;
999
+ }
1000
+
1001
+ // Otherwise return first available service
1002
+ if (availableServices.length > 0) {
1003
+ return availableServices[0];
1004
+ }
1005
+
1006
+ // Fallback (will likely fail without API keys, but maintains API compatibility)
1007
+ return 'gemini';
1008
+ }
1009
+
1010
+ /**
1011
+ * Build structured infographic prompt from data
1012
+ *
1013
+ * @param {Object} config - Infographic configuration
1014
+ * @param {string} config.type - Infographic type
1015
+ * @param {string} config.title - Infographic title
1016
+ * @param {Array} config.data - Data points or steps
1017
+ * @param {string} config.style - Style preference
1018
+ * @returns {string} Structured prompt
1019
+ */
1020
+ export function buildInfographicPrompt(config) {
1021
+ const { type, title, data = [], style = 'modern flat design' } = config;
1022
+
1023
+ let prompt = '';
1024
+
1025
+ switch (type) {
1026
+ case 'infographic-data':
1027
+ prompt = `Data visualization infographic titled "${title}". `;
1028
+ prompt += `Display these metrics prominently: `;
1029
+ prompt += data.map(d => `${d.label}: ${d.value}`).join(', ') + '. ';
1030
+ prompt += `Style: ${style}, clear typography, color-coded sections.`;
1031
+ break;
1032
+
1033
+ case 'infographic-process':
1034
+ prompt = `Process flow infographic titled "${title}". `;
1035
+ prompt += `Show these steps in sequence: `;
1036
+ prompt += data.map((step, i) => `Step ${i + 1}: ${step}`).join('; ') + '. ';
1037
+ prompt += `Style: numbered steps with icons, connecting arrows, ${style}.`;
1038
+ break;
1039
+
1040
+ case 'infographic-comparison':
1041
+ prompt = `Comparison infographic titled "${title}". `;
1042
+ prompt += `Compare these aspects: `;
1043
+ prompt += data.map(d => `${d.category} - Option A: ${d.optionA}, Option B: ${d.optionB}`).join('; ') + '. ';
1044
+ prompt += `Style: two-column layout, checkmarks for strengths, ${style}.`;
1045
+ break;
1046
+
1047
+ case 'infographic-timeline':
1048
+ prompt = `Timeline infographic titled "${title}". `;
1049
+ prompt += `Show these milestones: `;
1050
+ prompt += data.map(d => `${d.date}: ${d.event}`).join('; ') + '. ';
1051
+ prompt += `Style: horizontal timeline, dated markers, ${style}.`;
1052
+ break;
1053
+
1054
+ default:
1055
+ prompt = `Infographic: ${title}. ${data.join(', ')}. Style: ${style}.`;
1056
+ }
1057
+
1058
+ return prompt;
1059
+ }
1060
+
1061
+ /**
1062
+ * Build structured architecture diagram prompt
1063
+ *
1064
+ * @param {Object} config - Diagram configuration
1065
+ * @param {string} config.title - Diagram title
1066
+ * @param {Array} config.components - System components
1067
+ * @param {Array} config.connections - Component connections
1068
+ * @param {string} config.style - Style preference
1069
+ * @returns {string} Structured prompt
1070
+ */
1071
+ export function buildArchitectureDiagramPrompt(config) {
1072
+ const { title, components = [], connections = [], style = 'isometric technical' } = config;
1073
+
1074
+ let prompt = `Technical architecture diagram: "${title}". `;
1075
+ prompt += `Components: ${components.join(', ')}. `;
1076
+ prompt += `Connections: ${connections.join('; ')}. `;
1077
+ prompt += `Style: ${style}, labeled boxes, directional arrows, clean professional design.`;
1078
+
1079
+ return prompt;
1080
+ }
1081
+
924
1082
  /**
925
1083
  * Sleep utility for retry backoff
926
1084
  *
@@ -928,7 +1086,7 @@ function enhancePrompt(prompt, type) {
928
1086
  * @returns {Promise<void>}
929
1087
  */
930
1088
  function sleep(ms) {
931
- return new Promise(resolve => setTimeout(resolve, ms));
1089
+ return new Promise((resolve) => setTimeout(resolve, ms));
932
1090
  }
933
1091
 
934
1092
  /**
@@ -938,127 +1096,158 @@ function sleep(ms) {
938
1096
  * @returns {Object} Service information
939
1097
  */
940
1098
  export function getServiceInfo(service) {
941
- const info = {
942
- gemini: {
943
- name: 'Gemini 3.0 Pro Image',
944
- nickname: 'Nano Banana',
945
- speed: 'Fast',
946
- cost: '$0.02/image',
947
- quality: 'Good',
948
- bestFor: 'Quick hero images, high volume',
949
- provider: 'Google Gemini API (API Key)',
950
- model: GEMINI_IMAGE_MODEL
951
- },
952
- imagen: {
953
- name: 'Imagen 3',
954
- nickname: 'Premium Quality',
955
- speed: 'Medium',
956
- cost: '$0.03/image',
957
- quality: 'Excellent',
958
- bestFor: 'Premium hero images, photorealistic',
959
- provider: 'Google Gemini API (API Key)',
960
- model: GEMINI_IMAGEN_MODEL
961
- },
962
- // OpenAI GPT Image Models (SOTA)
963
- 'gpt-image-1.5': {
964
- name: 'GPT Image 1.5',
965
- nickname: 'State-of-the-Art',
966
- speed: 'Medium',
967
- cost: '$0.02-$0.19/image',
968
- quality: 'Outstanding',
969
- bestFor: 'Best text rendering, highest quality, transparent backgrounds',
970
- provider: 'OpenAI',
971
- model: 'gpt-image-1.5',
972
- features: ['Best-in-class text rendering', 'Transparency support', 'Multiple quality tiers', 'WebP/JPEG/PNG output'],
973
- qualityTiers: {
974
- low: '$0.02/image (~272 tokens)',
975
- medium: '$0.07/image (~1056 tokens)',
976
- high: '$0.19/image (~4160 tokens)'
977
- }
978
- },
979
- 'gpt-image-1': {
980
- name: 'GPT Image 1',
981
- nickname: 'Premium Quality',
982
- speed: 'Medium',
983
- cost: '$0.19/image',
984
- quality: 'Outstanding',
985
- bestFor: 'High quality images, text rendering',
986
- provider: 'OpenAI',
987
- model: 'gpt-image-1',
988
- features: ['Excellent text rendering', 'Multiple sizes', 'Transparency support']
989
- },
990
- 'gpt-image-1-mini': {
991
- name: 'GPT Image 1 Mini',
992
- nickname: 'Cost-Effective',
993
- speed: 'Fast',
994
- cost: '$0.02/image',
995
- quality: 'Good',
996
- bestFor: 'Quick images, budget-conscious, high volume',
997
- provider: 'OpenAI',
998
- model: 'gpt-image-1-mini',
999
- features: ['Fast generation', 'Low cost', 'Good quality']
1000
- },
1001
- 'flux2-pro': {
1002
- name: 'FLUX 2 Pro',
1003
- nickname: 'State-of-the-Art',
1004
- speed: 'Fast',
1005
- cost: '$0.05/image',
1006
- quality: 'Outstanding',
1007
- bestFor: 'Best quality, fastest generation, lowest cost',
1008
- provider: 'Black Forest Labs (Fal.ai)',
1009
- model: 'flux-2/pro',
1010
- features: ['Multi-reference (up to 10 images)', 'Enhanced photorealism', 'Complex typography', 'UI mockups']
1011
- },
1012
- 'flux2-flex': {
1013
- name: 'FLUX 2 Flex',
1014
- nickname: 'Developer Control',
1015
- speed: 'Medium',
1016
- cost: '$0.04/image',
1017
- quality: 'Outstanding',
1018
- bestFor: 'Custom parameters, fine-tuned control',
1019
- provider: 'Black Forest Labs (Fal.ai)',
1020
- model: 'flux-2/flex',
1021
- features: ['Custom inference steps', 'Guidance scale control', 'Developer-friendly']
1022
- },
1023
- 'flux2-dev': {
1024
- name: 'FLUX 2 Dev',
1025
- nickname: 'Open-Weight',
1026
- speed: 'Fast',
1027
- cost: '$0.025/image',
1028
- quality: 'Excellent',
1029
- bestFor: 'Developer workflows, local deployment option',
1030
- provider: 'Black Forest Labs (Fal.ai)',
1031
- model: 'flux-2/dev',
1032
- features: ['32B parameters', 'Open-weight model', 'Local deployment available']
1033
- },
1034
- 'flux-pro': {
1035
- name: 'FLUX Pro v1.1 Ultra',
1036
- nickname: 'Legacy Premium',
1037
- speed: 'Medium',
1038
- cost: '$0.06/image',
1039
- quality: 'Outstanding',
1040
- bestFor: 'Premium artistic images (legacy)',
1041
- provider: 'Fal.ai'
1042
- },
1043
- 'flux-dev': {
1044
- name: 'FLUX Dev',
1045
- nickname: 'Legacy Developer',
1046
- speed: 'Fast',
1047
- cost: '$0.025/MP',
1048
- quality: 'Excellent',
1049
- bestFor: 'Developer workflows (legacy)',
1050
- provider: 'Fal.ai'
1051
- },
1052
- veo3: {
1053
- name: 'Veo 3',
1054
- nickname: 'Cutting Edge Video',
1055
- speed: 'Slow',
1056
- cost: '$0.40/second',
1057
- quality: 'Outstanding',
1058
- bestFor: 'Premium video content, latest features',
1059
- provider: 'Google (Fal.ai)'
1060
- }
1061
- };
1062
-
1063
- return info[service] || null;
1099
+ const info = {
1100
+ gemini: {
1101
+ name: "Gemini 3.0 Pro Image",
1102
+ nickname: "Nano Banana",
1103
+ speed: "Fast",
1104
+ cost: "$0.02/image",
1105
+ quality: "Good",
1106
+ bestFor: "Quick hero images, high volume",
1107
+ provider: "Google Gemini API (API Key)",
1108
+ model: GEMINI_IMAGE_MODEL,
1109
+ },
1110
+ imagen: {
1111
+ name: "Imagen 3",
1112
+ nickname: "Premium Quality",
1113
+ speed: "Medium",
1114
+ cost: "$0.03/image",
1115
+ quality: "Excellent",
1116
+ bestFor: "Premium hero images, photorealistic",
1117
+ provider: "Google Gemini API (API Key)",
1118
+ model: GEMINI_IMAGEN_MODEL,
1119
+ },
1120
+ // OpenAI GPT Image Models (SOTA)
1121
+ "gpt-image-1.5": {
1122
+ name: "GPT Image 1.5",
1123
+ nickname: "State-of-the-Art",
1124
+ speed: "Medium",
1125
+ cost: "$0.02-$0.19/image",
1126
+ quality: "Outstanding",
1127
+ bestFor: "Best text rendering, highest quality, transparent backgrounds",
1128
+ provider: "OpenAI",
1129
+ model: "gpt-image-1.5",
1130
+ features: [
1131
+ "Best-in-class text rendering",
1132
+ "Transparency support",
1133
+ "Multiple quality tiers",
1134
+ "WebP/JPEG/PNG output",
1135
+ ],
1136
+ qualityTiers: {
1137
+ low: "$0.02/image (~272 tokens)",
1138
+ medium: "$0.07/image (~1056 tokens)",
1139
+ high: "$0.19/image (~4160 tokens)",
1140
+ },
1141
+ },
1142
+ "gpt-image-1": {
1143
+ name: "GPT Image 1",
1144
+ nickname: "Premium Quality",
1145
+ speed: "Medium",
1146
+ cost: "$0.19/image",
1147
+ quality: "Outstanding",
1148
+ bestFor: "High quality images, text rendering",
1149
+ provider: "OpenAI",
1150
+ model: "gpt-image-1",
1151
+ features: [
1152
+ "Excellent text rendering",
1153
+ "Multiple sizes",
1154
+ "Transparency support",
1155
+ ],
1156
+ },
1157
+ "gpt-image-1-mini": {
1158
+ name: "GPT Image 1 Mini",
1159
+ nickname: "Cost-Effective",
1160
+ speed: "Fast",
1161
+ cost: "$0.02/image",
1162
+ quality: "Good",
1163
+ bestFor: "Quick images, budget-conscious, high volume",
1164
+ provider: "OpenAI",
1165
+ model: "gpt-image-1-mini",
1166
+ features: ["Fast generation", "Low cost", "Good quality"],
1167
+ },
1168
+ "flux2-pro": {
1169
+ name: "FLUX 2 Pro",
1170
+ nickname: "State-of-the-Art",
1171
+ speed: "Fast",
1172
+ cost: "$0.05/image",
1173
+ quality: "Outstanding",
1174
+ bestFor: "Best quality, fastest generation, lowest cost",
1175
+ provider: "Black Forest Labs (Fal.ai)",
1176
+ model: "flux-2/pro",
1177
+ features: [
1178
+ "Multi-reference (up to 10 images)",
1179
+ "Enhanced photorealism",
1180
+ "Complex typography",
1181
+ "UI mockups",
1182
+ ],
1183
+ },
1184
+ "flux2-flex": {
1185
+ name: "FLUX 2 Flex",
1186
+ nickname: "Developer Control",
1187
+ speed: "Medium",
1188
+ cost: "$0.04/image",
1189
+ quality: "Outstanding",
1190
+ bestFor: "Custom parameters, fine-tuned control",
1191
+ provider: "Black Forest Labs (Fal.ai)",
1192
+ model: "flux-2/flex",
1193
+ features: [
1194
+ "Custom inference steps",
1195
+ "Guidance scale control",
1196
+ "Developer-friendly",
1197
+ ],
1198
+ },
1199
+ "flux2-dev": {
1200
+ name: "FLUX 2 Dev",
1201
+ nickname: "Open-Weight",
1202
+ speed: "Fast",
1203
+ cost: "$0.025/image",
1204
+ quality: "Excellent",
1205
+ bestFor: "Developer workflows, local deployment option",
1206
+ provider: "Black Forest Labs (Fal.ai)",
1207
+ model: "flux-2/dev",
1208
+ features: [
1209
+ "32B parameters",
1210
+ "Open-weight model",
1211
+ "Local deployment available",
1212
+ ],
1213
+ },
1214
+ "flux-pro": {
1215
+ name: "FLUX Pro v1.1 Ultra",
1216
+ nickname: "Legacy Premium",
1217
+ speed: "Medium",
1218
+ cost: "$0.06/image",
1219
+ quality: "Outstanding",
1220
+ bestFor: "Premium artistic images (legacy)",
1221
+ provider: "Fal.ai",
1222
+ },
1223
+ "flux-dev": {
1224
+ name: "FLUX Dev",
1225
+ nickname: "Legacy Developer",
1226
+ speed: "Fast",
1227
+ cost: "$0.025/MP",
1228
+ quality: "Excellent",
1229
+ bestFor: "Developer workflows (legacy)",
1230
+ provider: "Fal.ai",
1231
+ },
1232
+ "nano-banana-pro": {
1233
+ name: "Nano Banana Pro",
1234
+ nickname: "Premium Quality",
1235
+ speed: "Fast",
1236
+ cost: "$0.15/image ($0.30 for 4K)",
1237
+ quality: "Excellent",
1238
+ bestFor: "High-quality branded content, detailed illustrations",
1239
+ provider: "Fal.ai",
1240
+ },
1241
+ veo3: {
1242
+ name: "Veo 3",
1243
+ nickname: "Cutting Edge Video",
1244
+ speed: "Slow",
1245
+ cost: "$0.40/second",
1246
+ quality: "Outstanding",
1247
+ bestFor: "Premium video content, latest features",
1248
+ provider: "Google (Fal.ai)",
1249
+ },
1250
+ };
1251
+
1252
+ return info[service] || null;
1064
1253
  }