iflow-mcp_orion4d-comfyui_mcp 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iflow_mcp_orion4d_comfyui_mcp/__init__.py +9 -0
- iflow_mcp_orion4d_comfyui_mcp/__main__.py +8 -0
- iflow_mcp_orion4d_comfyui_mcp/browser_controller.py +123 -0
- iflow_mcp_orion4d_comfyui_mcp/comfyui_client.py +301 -0
- iflow_mcp_orion4d_comfyui_mcp/generate_key.py +158 -0
- iflow_mcp_orion4d_comfyui_mcp/server.py +1027 -0
- iflow_mcp_orion4d_comfyui_mcp-1.0.0.dist-info/METADATA +239 -0
- iflow_mcp_orion4d_comfyui_mcp-1.0.0.dist-info/RECORD +11 -0
- iflow_mcp_orion4d_comfyui_mcp-1.0.0.dist-info/WHEEL +4 -0
- iflow_mcp_orion4d_comfyui_mcp-1.0.0.dist-info/entry_points.txt +2 -0
- iflow_mcp_orion4d_comfyui_mcp-1.0.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Contrôleur pour envoyer des commandes à l'extension Chrome via WebSocket.
|
|
3
|
+
Compatible avec le ConnectionManager du serveur MCP.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import logging
|
|
7
|
+
from typing import Dict, Any
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
class BrowserController:
|
|
12
|
+
"""
|
|
13
|
+
Contrôleur pour envoyer des commandes à l'extension Chrome.
|
|
14
|
+
Utilise le WebSocket ConnectionManager au lieu de Playwright.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, manager):
|
|
18
|
+
"""
|
|
19
|
+
Initialise le contrôleur avec le WebSocket manager.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
manager: Instance de ConnectionManager pour WebSocket
|
|
23
|
+
"""
|
|
24
|
+
self.manager = manager
|
|
25
|
+
logger.info("BrowserController initialisé avec WebSocket manager")
|
|
26
|
+
|
|
27
|
+
async def click_element(self, selector: str) -> Dict[str, Any]:
|
|
28
|
+
"""
|
|
29
|
+
Envoie une commande de clic à l'extension Chrome.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
selector: Sélecteur CSS de l'élément
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
dict: Confirmation de l'envoi
|
|
36
|
+
"""
|
|
37
|
+
command = {
|
|
38
|
+
"action": "click",
|
|
39
|
+
"selector": selector
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
await self.manager.send_command(command)
|
|
43
|
+
logger.info(f"Commande click envoyée: {selector}")
|
|
44
|
+
|
|
45
|
+
return {
|
|
46
|
+
"status": "sent",
|
|
47
|
+
"action": "click",
|
|
48
|
+
"selector": selector,
|
|
49
|
+
"message": f"Commande envoyée à {len(self.manager.active_connections)} extension(s)"
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
async def fill_input(self, selector: str, text: str) -> Dict[str, Any]:
|
|
53
|
+
"""
|
|
54
|
+
Envoie une commande pour remplir un champ texte.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
selector: Sélecteur CSS du champ
|
|
58
|
+
text: Texte à insérer
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
dict: Confirmation de l'envoi
|
|
62
|
+
"""
|
|
63
|
+
command = {
|
|
64
|
+
"action": "fill",
|
|
65
|
+
"selector": selector,
|
|
66
|
+
"text": text
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
await self.manager.send_command(command)
|
|
70
|
+
logger.info(f"Commande fill envoyée: {selector} = '{text[:50]}'")
|
|
71
|
+
|
|
72
|
+
return {
|
|
73
|
+
"status": "sent",
|
|
74
|
+
"action": "fill",
|
|
75
|
+
"selector": selector,
|
|
76
|
+
"text": text,
|
|
77
|
+
"message": f"Commande envoyée à {len(self.manager.active_connections)} extension(s)"
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
async def get_workflow(self) -> Dict[str, Any]:
|
|
81
|
+
"""
|
|
82
|
+
Demande à l'extension de récupérer le workflow actuel.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
dict: Confirmation (workflow sera affiché dans la console de l'extension)
|
|
86
|
+
"""
|
|
87
|
+
command = {
|
|
88
|
+
"action": "get_workflow"
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
await self.manager.send_command(command)
|
|
92
|
+
logger.info("Commande get_workflow envoyée")
|
|
93
|
+
|
|
94
|
+
return {
|
|
95
|
+
"status": "sent",
|
|
96
|
+
"action": "get_workflow",
|
|
97
|
+
"message": "Le workflow sera affiché dans la console de l'extension Chrome (F12)",
|
|
98
|
+
"connections": len(self.manager.active_connections)
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
async def execute_script(self, script: str) -> Dict[str, Any]:
|
|
102
|
+
"""
|
|
103
|
+
Envoie du JavaScript arbitraire à exécuter.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
script: Code JavaScript à exécuter
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
dict: Confirmation de l'envoi
|
|
110
|
+
"""
|
|
111
|
+
command = {
|
|
112
|
+
"action": "execute_js",
|
|
113
|
+
"script": script
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
await self.manager.send_command(command)
|
|
117
|
+
logger.info(f"Script JS envoyé: {script[:100]}")
|
|
118
|
+
|
|
119
|
+
return {
|
|
120
|
+
"status": "sent",
|
|
121
|
+
"action": "execute_js",
|
|
122
|
+
"message": f"Script envoyé à {len(self.manager.active_connections)} extension(s)"
|
|
123
|
+
}
|
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import logging
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
logging.basicConfig(level=logging.INFO)
|
|
8
|
+
logger = logging.getLogger("ComfyUIClient")
|
|
9
|
+
|
|
10
|
+
DEFAULT_MAPPING = {
|
|
11
|
+
"prompt": ("6", "text"),
|
|
12
|
+
"width": ("5", "width"),
|
|
13
|
+
"height": ("5", "height"),
|
|
14
|
+
"model": ("4", "ckpt_name")
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
class ComfyUIClient:
|
|
18
|
+
def __init__(self, base_url="http://127.0.0.1:8188", workflows_dir="workflows"):
|
|
19
|
+
self.base_url = base_url
|
|
20
|
+
self.workflows_dir = Path(workflows_dir)
|
|
21
|
+
self.available_models = self._get_available_models()
|
|
22
|
+
|
|
23
|
+
def _get_available_models(self):
|
|
24
|
+
"""Fetch list of available checkpoint models from ComfyUI"""
|
|
25
|
+
try:
|
|
26
|
+
response = requests.get(f"{self.base_url}/object_info/CheckpointLoaderSimple")
|
|
27
|
+
if response.status_code != 200:
|
|
28
|
+
logger.warning("Failed to fetch model list; using default handling")
|
|
29
|
+
return []
|
|
30
|
+
data = response.json()
|
|
31
|
+
models = data["CheckpointLoaderSimple"]["input"]["required"]["ckpt_name"][0]
|
|
32
|
+
logger.info(f"Available models: {len(models)} models found")
|
|
33
|
+
return models
|
|
34
|
+
except Exception as e:
|
|
35
|
+
logger.warning(f"Error fetching models: {e}")
|
|
36
|
+
return []
|
|
37
|
+
|
|
38
|
+
def list_workflows(self):
|
|
39
|
+
"""Liste tous les workflows disponibles (récursif avec sous-dossiers)"""
|
|
40
|
+
if not self.workflows_dir.exists():
|
|
41
|
+
return []
|
|
42
|
+
workflows = []
|
|
43
|
+
for json_file in self.workflows_dir.rglob("*.json"):
|
|
44
|
+
relative_path = json_file.relative_to(self.workflows_dir)
|
|
45
|
+
workflow_id = str(relative_path.with_suffix('')).replace('\\', '/')
|
|
46
|
+
workflows.append(workflow_id)
|
|
47
|
+
return sorted(workflows)
|
|
48
|
+
|
|
49
|
+
def _is_ui_format(self, workflow: dict) -> bool:
|
|
50
|
+
"""Détecte si le workflow est au format UI ou API"""
|
|
51
|
+
return "nodes" in workflow and "links" in workflow
|
|
52
|
+
|
|
53
|
+
def _convert_ui_to_api(self, ui_workflow: dict) -> dict:
|
|
54
|
+
"""
|
|
55
|
+
Convertit un workflow UI ComfyUI en format API.
|
|
56
|
+
Version améliorée qui gère correctement les widgets.
|
|
57
|
+
"""
|
|
58
|
+
logger.info("Converting workflow from UI format to API format")
|
|
59
|
+
api_workflow = {}
|
|
60
|
+
nodes = ui_workflow.get("nodes", [])
|
|
61
|
+
links = ui_workflow.get("links", [])
|
|
62
|
+
|
|
63
|
+
# Créer un mapping link_id -> (source_node_id, source_slot)
|
|
64
|
+
link_map = {}
|
|
65
|
+
for link in links:
|
|
66
|
+
if len(link) >= 5:
|
|
67
|
+
link_id = link[0]
|
|
68
|
+
source_node_id = str(link[1])
|
|
69
|
+
source_slot = link[2]
|
|
70
|
+
link_map[link_id] = (source_node_id, source_slot)
|
|
71
|
+
|
|
72
|
+
# Convertir chaque node
|
|
73
|
+
for node in nodes:
|
|
74
|
+
node_id = str(node["id"])
|
|
75
|
+
node_type = node.get("type")
|
|
76
|
+
if not node_type:
|
|
77
|
+
logger.warning(f"Node {node_id} has no type, skipping")
|
|
78
|
+
continue
|
|
79
|
+
|
|
80
|
+
inputs = {}
|
|
81
|
+
|
|
82
|
+
# Récupérer la définition des inputs du node
|
|
83
|
+
node_inputs = node.get("inputs", [])
|
|
84
|
+
|
|
85
|
+
# 1. D'ABORD : Traiter les connections (links)
|
|
86
|
+
for input_def in node_inputs:
|
|
87
|
+
link_id = input_def.get("link")
|
|
88
|
+
if link_id is not None and link_id in link_map:
|
|
89
|
+
source_node_id, source_slot = link_map[link_id]
|
|
90
|
+
input_name = input_def.get("name")
|
|
91
|
+
if input_name:
|
|
92
|
+
inputs[input_name] = [source_node_id, source_slot]
|
|
93
|
+
|
|
94
|
+
# 2. ENSUITE : Traiter les widgets_values
|
|
95
|
+
if "widgets_values" in node and node["widgets_values"]:
|
|
96
|
+
widgets = node["widgets_values"]
|
|
97
|
+
widget_inputs = []
|
|
98
|
+
for input_def in node_inputs:
|
|
99
|
+
has_widget = input_def.get("widget") is not None
|
|
100
|
+
has_link = input_def.get("link") is not None
|
|
101
|
+
if has_widget and not has_link:
|
|
102
|
+
widget_inputs.append(input_def["name"])
|
|
103
|
+
|
|
104
|
+
for i, value in enumerate(widgets):
|
|
105
|
+
if i < len(widget_inputs):
|
|
106
|
+
input_name = widget_inputs[i]
|
|
107
|
+
inputs[input_name] = value
|
|
108
|
+
else:
|
|
109
|
+
logger.warning(f"Node {node_id}: Extra widget value at index {i}: {value}")
|
|
110
|
+
|
|
111
|
+
# Créer le node au format API
|
|
112
|
+
api_workflow[node_id] = {
|
|
113
|
+
"inputs": inputs,
|
|
114
|
+
"class_type": node_type
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
logger.debug(f"Node {node_id} ({node_type}): {len(inputs)} inputs converted")
|
|
118
|
+
|
|
119
|
+
logger.info(f"Converted {len(api_workflow)} nodes from UI to API format")
|
|
120
|
+
return api_workflow
|
|
121
|
+
|
|
122
|
+
def load_workflow(self, workflow_id: str) -> dict:
|
|
123
|
+
"""
|
|
124
|
+
Charge un workflow et le convertit automatiquement si nécessaire.
|
|
125
|
+
Supporte les sous-dossiers (ex: "flux/upscale")
|
|
126
|
+
"""
|
|
127
|
+
workflow_path = self.workflows_dir / f"{workflow_id}.json"
|
|
128
|
+
if not workflow_path.exists():
|
|
129
|
+
raise FileNotFoundError(f"Workflow '{workflow_id}' not found at {workflow_path}")
|
|
130
|
+
|
|
131
|
+
with open(workflow_path, 'r', encoding='utf-8') as f:
|
|
132
|
+
workflow = json.load(f)
|
|
133
|
+
|
|
134
|
+
# Détection et conversion automatique
|
|
135
|
+
if self._is_ui_format(workflow):
|
|
136
|
+
logger.info(f"Workflow '{workflow_id}' is in UI format, converting to API format")
|
|
137
|
+
return self._convert_ui_to_api(workflow)
|
|
138
|
+
else:
|
|
139
|
+
logger.info(f"Workflow '{workflow_id}' is already in API format")
|
|
140
|
+
return workflow
|
|
141
|
+
|
|
142
|
+
def generate_image(self, prompt, width=512, height=512, workflow_id="basic_api_test", model=None):
|
|
143
|
+
"""
|
|
144
|
+
Generate an image using ComfyUI with a predefined workflow.
|
|
145
|
+
Automatically converts UI format workflows to API format.
|
|
146
|
+
"""
|
|
147
|
+
# Load workflow (with automatic conversion)
|
|
148
|
+
workflow = self.load_workflow(workflow_id)
|
|
149
|
+
|
|
150
|
+
# Apply parameters using mapping
|
|
151
|
+
mapping = DEFAULT_MAPPING
|
|
152
|
+
|
|
153
|
+
# Update prompt
|
|
154
|
+
if "prompt" in mapping:
|
|
155
|
+
node_id, field = mapping["prompt"]
|
|
156
|
+
if node_id in workflow:
|
|
157
|
+
workflow[node_id]["inputs"][field] = prompt
|
|
158
|
+
|
|
159
|
+
# Update dimensions
|
|
160
|
+
if "width" in mapping:
|
|
161
|
+
node_id, field = mapping["width"]
|
|
162
|
+
if node_id in workflow:
|
|
163
|
+
workflow[node_id]["inputs"][field] = width
|
|
164
|
+
|
|
165
|
+
if "height" in mapping:
|
|
166
|
+
node_id, field = mapping["height"]
|
|
167
|
+
if node_id in workflow:
|
|
168
|
+
workflow[node_id]["inputs"][field] = height
|
|
169
|
+
|
|
170
|
+
# Update model if specified
|
|
171
|
+
if model and "model" in mapping:
|
|
172
|
+
node_id, field = mapping["model"]
|
|
173
|
+
if node_id in workflow:
|
|
174
|
+
if self.available_models and model not in self.available_models:
|
|
175
|
+
logger.warning(f"Model '{model}' not found. Using workflow default.")
|
|
176
|
+
else:
|
|
177
|
+
workflow[node_id]["inputs"][field] = model
|
|
178
|
+
|
|
179
|
+
# Submit workflow
|
|
180
|
+
payload = {"prompt": workflow}
|
|
181
|
+
response = requests.post(f"{self.base_url}/prompt", json=payload)
|
|
182
|
+
response.raise_for_status()
|
|
183
|
+
result = response.json()
|
|
184
|
+
prompt_id = result.get("prompt_id")
|
|
185
|
+
|
|
186
|
+
if not prompt_id:
|
|
187
|
+
raise ValueError("No prompt_id returned from ComfyUI")
|
|
188
|
+
|
|
189
|
+
logger.info(f"Workflow submitted. Prompt ID: {prompt_id}")
|
|
190
|
+
|
|
191
|
+
# Poll for completion
|
|
192
|
+
max_wait = 120
|
|
193
|
+
start_time = time.time()
|
|
194
|
+
|
|
195
|
+
while time.time() - start_time < max_wait:
|
|
196
|
+
history_response = requests.get(f"{self.base_url}/history/{prompt_id}")
|
|
197
|
+
history_response.raise_for_status()
|
|
198
|
+
history = history_response.json()
|
|
199
|
+
|
|
200
|
+
if prompt_id in history:
|
|
201
|
+
outputs = history[prompt_id].get("outputs", {})
|
|
202
|
+
for node_output in outputs.values():
|
|
203
|
+
if "images" in node_output:
|
|
204
|
+
images = node_output["images"]
|
|
205
|
+
if images:
|
|
206
|
+
img = images[0]
|
|
207
|
+
filename = img["filename"]
|
|
208
|
+
subfolder = img.get("subfolder", "")
|
|
209
|
+
img_type = img.get("type", "output")
|
|
210
|
+
|
|
211
|
+
url = f"{self.base_url}/view?filename={filename}"
|
|
212
|
+
if subfolder:
|
|
213
|
+
url += f"&subfolder={subfolder}"
|
|
214
|
+
url += f"&type={img_type}"
|
|
215
|
+
|
|
216
|
+
logger.info(f"Image generated: {url}")
|
|
217
|
+
return url
|
|
218
|
+
|
|
219
|
+
time.sleep(1)
|
|
220
|
+
|
|
221
|
+
raise TimeoutError(f"Image generation timed out after {max_wait} seconds")
|
|
222
|
+
|
|
223
|
+
def get_queue_info(self) -> dict:
|
|
224
|
+
"""
|
|
225
|
+
Récupère les informations de la file d'attente ComfyUI
|
|
226
|
+
|
|
227
|
+
Returns:
|
|
228
|
+
dict: État de la queue avec running et pending
|
|
229
|
+
"""
|
|
230
|
+
try:
|
|
231
|
+
response = requests.get(f"{self.base_url}/queue")
|
|
232
|
+
response.raise_for_status()
|
|
233
|
+
return response.json()
|
|
234
|
+
except Exception as e:
|
|
235
|
+
logger.error(f"Erreur lors de la récupération de la queue: {e}")
|
|
236
|
+
return {"queue_running": [], "queue_pending": []}
|
|
237
|
+
|
|
238
|
+
def get_object_info(self, node_class: str = None) -> dict:
|
|
239
|
+
"""
|
|
240
|
+
Récupère les informations des nodes ComfyUI
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
node_class: Classe spécifique à récupérer (optionnel)
|
|
244
|
+
|
|
245
|
+
Returns:
|
|
246
|
+
dict: Informations sur les nodes disponibles
|
|
247
|
+
"""
|
|
248
|
+
try:
|
|
249
|
+
if node_class:
|
|
250
|
+
response = requests.get(f"{self.base_url}/object_info/{node_class}")
|
|
251
|
+
else:
|
|
252
|
+
response = requests.get(f"{self.base_url}/object_info")
|
|
253
|
+
response.raise_for_status()
|
|
254
|
+
return response.json()
|
|
255
|
+
except Exception as e:
|
|
256
|
+
logger.error(f"Erreur lors de la récupération de object_info: {e}")
|
|
257
|
+
return {}
|
|
258
|
+
|
|
259
|
+
def queue_prompt(self, workflow: dict) -> dict:
|
|
260
|
+
"""Envoie un workflow à ComfyUI pour exécution"""
|
|
261
|
+
try:
|
|
262
|
+
payload = {"prompt": workflow}
|
|
263
|
+
response = requests.post(f"{self.base_url}/prompt", json=payload)
|
|
264
|
+
response.raise_for_status()
|
|
265
|
+
return response.json()
|
|
266
|
+
except Exception as e:
|
|
267
|
+
logger.error(f"Erreur lors de l'envoi du workflow: {e}")
|
|
268
|
+
return {"status": "error", "message": str(e)}
|
|
269
|
+
|
|
270
|
+
def get_history(self, prompt_id: str) -> dict:
|
|
271
|
+
"""Récupère l'historique d'un prompt (outputs, status)"""
|
|
272
|
+
try:
|
|
273
|
+
resp = requests.get(f"{self.base_url}/history/{prompt_id}")
|
|
274
|
+
resp.raise_for_status()
|
|
275
|
+
return resp.json()
|
|
276
|
+
except Exception as e:
|
|
277
|
+
logger.error(f"Erreur get_history({prompt_id}): {e}")
|
|
278
|
+
return {"status": "error", "message": str(e)}
|
|
279
|
+
|
|
280
|
+
def interrupt(self) -> dict:
|
|
281
|
+
"""Sends an interrupt request to ComfyUI"""
|
|
282
|
+
try:
|
|
283
|
+
response = requests.post(f"{self.base_url}/interrupt")
|
|
284
|
+
response.raise_for_status()
|
|
285
|
+
logger.info("Interrupt request sent to ComfyUI.")
|
|
286
|
+
return {"status": "success", "message": "Interrupt request sent"}
|
|
287
|
+
except Exception as e:
|
|
288
|
+
logger.error(f"Error sending interrupt request: {e}")
|
|
289
|
+
return {"status": "error", "message": str(e)}
|
|
290
|
+
|
|
291
|
+
async def get_system_stats(self) -> dict:
|
|
292
|
+
"""Récupère les stats CPU, RAM, GPU du backend ComfyUI"""
|
|
293
|
+
import aiohttp
|
|
294
|
+
try:
|
|
295
|
+
async with aiohttp.ClientSession() as session:
|
|
296
|
+
async with session.get(f"{self.base_url}/system_stats") as response:
|
|
297
|
+
response.raise_for_status()
|
|
298
|
+
return await response.json()
|
|
299
|
+
except Exception as e:
|
|
300
|
+
logger.error(f"Erreur lors de la récupération des stats système: {e}")
|
|
301
|
+
return {"status": "error", "message": str(e)}
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Générateur de clés sécurisées pour le serveur MCP ComfyUI.
|
|
3
|
+
Génère MCP_API_KEY (pour ChatGPT) et WEBSOCKET_TOKEN (pour Extension Chrome).
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import secrets
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
def generate_api_key(length: int = 32) -> str:
|
|
11
|
+
"""
|
|
12
|
+
Génère une clé API cryptographiquement sécurisée.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
length: Longueur de la clé (par défaut 32 caractères)
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
Clé API URL-safe
|
|
19
|
+
"""
|
|
20
|
+
return secrets.token_urlsafe(length)
|
|
21
|
+
|
|
22
|
+
if __name__ == "__main__":
|
|
23
|
+
print("\n" + "="*70)
|
|
24
|
+
print("🔑 Générateur de Clés Sécurisées - Serveur MCP ComfyUI")
|
|
25
|
+
print("="*70 + "\n")
|
|
26
|
+
|
|
27
|
+
# Générer les clés
|
|
28
|
+
mcp_api_key = generate_api_key(32)
|
|
29
|
+
websocket_token = generate_api_key(32)
|
|
30
|
+
date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
31
|
+
|
|
32
|
+
print(f"📅 Date de génération : {date}\n")
|
|
33
|
+
|
|
34
|
+
print("🔐 Vos clés générées :")
|
|
35
|
+
print("-" * 70)
|
|
36
|
+
print(f"\n1️⃣ MCP_API_KEY (pour ChatGPT via API HTTP):")
|
|
37
|
+
print(f" {mcp_api_key}\n")
|
|
38
|
+
print(f"2️⃣ WEBSOCKET_TOKEN (pour Extension Chrome):")
|
|
39
|
+
print(f" {websocket_token}\n")
|
|
40
|
+
|
|
41
|
+
print("="*70)
|
|
42
|
+
print("📋 CONFIGURATION .env")
|
|
43
|
+
print("="*70)
|
|
44
|
+
print("\nAjoutez ces lignes dans votre fichier .env :\n")
|
|
45
|
+
print(f"MCP_API_KEY={mcp_api_key}")
|
|
46
|
+
print(f"WEBSOCKET_TOKEN={websocket_token}\n")
|
|
47
|
+
|
|
48
|
+
print("="*70)
|
|
49
|
+
print("📋 CONFIGURATION CHATGPT")
|
|
50
|
+
print("="*70)
|
|
51
|
+
print("\nCustom Action / GPT Configuration:")
|
|
52
|
+
print(" • Authentication Type: API Key")
|
|
53
|
+
print(" • Custom Header Name: X-API-Key")
|
|
54
|
+
print(f" • API Key: {mcp_api_key}\n")
|
|
55
|
+
|
|
56
|
+
print("="*70)
|
|
57
|
+
print("📋 CONFIGURATION EXTENSION CHROME")
|
|
58
|
+
print("="*70)
|
|
59
|
+
print("\nDans le popup de l'extension :")
|
|
60
|
+
print(" • URL: ws://127.0.0.1:8000/ws")
|
|
61
|
+
print(f" • Token: {websocket_token}\n")
|
|
62
|
+
|
|
63
|
+
print("="*70)
|
|
64
|
+
print("\n⚠️ SÉCURITÉ - IMPORTANT :")
|
|
65
|
+
print("="*70)
|
|
66
|
+
print(" • Ne partagez JAMAIS ces clés")
|
|
67
|
+
print(" • Conservez-les uniquement dans .env (git ignoré)")
|
|
68
|
+
print(" • Regénérez-les si elles sont compromises")
|
|
69
|
+
print(" • Utilisez des clés différentes pour chaque environnement")
|
|
70
|
+
print("\n" + "="*70 + "\n")
|
|
71
|
+
|
|
72
|
+
# Proposer de sauvegarder
|
|
73
|
+
save = input("💾 Voulez-vous créer/mettre à jour le fichier .env ? (o/n) : ")
|
|
74
|
+
|
|
75
|
+
if save.lower() in ['o', 'oui', 'y', 'yes']:
|
|
76
|
+
env_path = Path('.env')
|
|
77
|
+
|
|
78
|
+
# Vérifier si .env existe déjà
|
|
79
|
+
if env_path.exists():
|
|
80
|
+
backup = input("\n⚠️ Le fichier .env existe déjà. Créer une sauvegarde ? (o/n) : ")
|
|
81
|
+
if backup.lower() in ['o', 'oui', 'y', 'yes']:
|
|
82
|
+
backup_path = Path(f'.env.backup.{datetime.now().strftime("%Y%m%d_%H%M%S")}')
|
|
83
|
+
env_path.rename(backup_path)
|
|
84
|
+
print(f"✅ Sauvegarde créée : {backup_path}")
|
|
85
|
+
|
|
86
|
+
env_content = f"""# Configuration Serveur MCP ComfyUI
|
|
87
|
+
# Généré le {date}
|
|
88
|
+
|
|
89
|
+
# ComfyUI
|
|
90
|
+
COMFYUI_BASE_URL=http://127.0.0.1:8188
|
|
91
|
+
WORKFLOWS_DIR=workflows
|
|
92
|
+
COMFYUI_PATH=D:\\ComfyUI_dev\\ComfyUI
|
|
93
|
+
|
|
94
|
+
# Sécurité - API Key pour ChatGPT (via HTTP/HTTPS)
|
|
95
|
+
MCP_API_KEY={mcp_api_key}
|
|
96
|
+
|
|
97
|
+
# Sécurité - Token WebSocket pour Extension Chrome
|
|
98
|
+
ENABLE_BROWSER_CONTROL=true
|
|
99
|
+
WEBSOCKET_TOKEN={websocket_token}
|
|
100
|
+
|
|
101
|
+
# Timeouts (en secondes)
|
|
102
|
+
HTTP_TIMEOUT=60
|
|
103
|
+
GENERATION_TIMEOUT=300
|
|
104
|
+
|
|
105
|
+
# Options navigateur (non utilisé avec Extension Chrome)
|
|
106
|
+
BROWSER_HEADLESS=false
|
|
107
|
+
"""
|
|
108
|
+
|
|
109
|
+
with open('.env', 'w', encoding='utf-8') as f:
|
|
110
|
+
f.write(env_content)
|
|
111
|
+
|
|
112
|
+
print(f"\n✅ Fichier .env créé avec succès !")
|
|
113
|
+
print(f" Emplacement : {env_path.absolute()}")
|
|
114
|
+
|
|
115
|
+
# Créer aussi un .env.example (sans les vraies clés)
|
|
116
|
+
env_example_content = f"""# Configuration Serveur MCP ComfyUI
|
|
117
|
+
# Exemple de configuration - Générez vos propres clés avec generate_api_key.py
|
|
118
|
+
|
|
119
|
+
# ComfyUI
|
|
120
|
+
COMFYUI_BASE_URL=http://127.0.0.1:8188
|
|
121
|
+
WORKFLOWS_DIR=workflows
|
|
122
|
+
COMFYUI_PATH=D:\\ComfyUI_dev\\ComfyUI
|
|
123
|
+
|
|
124
|
+
# Sécurité - API Key pour ChatGPT (via HTTP/HTTPS)
|
|
125
|
+
# Générez avec : python generate_api_key.py
|
|
126
|
+
MCP_API_KEY=votre_cle_api_ici
|
|
127
|
+
|
|
128
|
+
# Sécurité - Token WebSocket pour Extension Chrome
|
|
129
|
+
ENABLE_BROWSER_CONTROL=true
|
|
130
|
+
# Générez avec : python generate_api_key.py
|
|
131
|
+
WEBSOCKET_TOKEN=votre_token_websocket_ici
|
|
132
|
+
|
|
133
|
+
# Timeouts (en secondes)
|
|
134
|
+
HTTP_TIMEOUT=60
|
|
135
|
+
GENERATION_TIMEOUT=300
|
|
136
|
+
|
|
137
|
+
# Options navigateur
|
|
138
|
+
BROWSER_HEADLESS=false
|
|
139
|
+
"""
|
|
140
|
+
|
|
141
|
+
with open('.env.example', 'w', encoding='utf-8') as f:
|
|
142
|
+
f.write(env_example_content)
|
|
143
|
+
|
|
144
|
+
print(f"✅ Fichier .env.example créé (template pour Git)")
|
|
145
|
+
|
|
146
|
+
print("\n" + "="*70)
|
|
147
|
+
print("🎉 CONFIGURATION TERMINÉE")
|
|
148
|
+
print("="*70)
|
|
149
|
+
print("\n✅ Prochaines étapes :")
|
|
150
|
+
print(" 1. Redémarrez le serveur MCP : python server.py")
|
|
151
|
+
print(" 2. Configurez l'extension Chrome avec le WEBSOCKET_TOKEN")
|
|
152
|
+
print(" 3. Configurez ChatGPT avec le MCP_API_KEY")
|
|
153
|
+
print("\n" + "="*70 + "\n")
|
|
154
|
+
|
|
155
|
+
else:
|
|
156
|
+
print("\n✅ Copiez manuellement les clés dans votre .env")
|
|
157
|
+
print(" N'oubliez pas les deux clés : MCP_API_KEY et WEBSOCKET_TOKEN\n")
|
|
158
|
+
|