lfx-nightly 0.2.0.dev0__py3-none-any.whl → 0.2.0.dev41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. lfx/_assets/component_index.json +1 -1
  2. lfx/base/agents/agent.py +21 -4
  3. lfx/base/agents/altk_base_agent.py +393 -0
  4. lfx/base/agents/altk_tool_wrappers.py +565 -0
  5. lfx/base/agents/events.py +2 -1
  6. lfx/base/composio/composio_base.py +159 -224
  7. lfx/base/data/base_file.py +97 -20
  8. lfx/base/data/docling_utils.py +61 -10
  9. lfx/base/data/storage_utils.py +301 -0
  10. lfx/base/data/utils.py +178 -14
  11. lfx/base/mcp/util.py +2 -2
  12. lfx/base/models/anthropic_constants.py +21 -12
  13. lfx/base/models/groq_constants.py +74 -58
  14. lfx/base/models/groq_model_discovery.py +265 -0
  15. lfx/base/models/model.py +1 -1
  16. lfx/base/models/model_utils.py +100 -0
  17. lfx/base/models/openai_constants.py +7 -0
  18. lfx/base/models/watsonx_constants.py +32 -8
  19. lfx/base/tools/run_flow.py +601 -129
  20. lfx/cli/commands.py +9 -4
  21. lfx/cli/common.py +2 -2
  22. lfx/cli/run.py +1 -1
  23. lfx/cli/script_loader.py +53 -11
  24. lfx/components/Notion/create_page.py +1 -1
  25. lfx/components/Notion/list_database_properties.py +1 -1
  26. lfx/components/Notion/list_pages.py +1 -1
  27. lfx/components/Notion/list_users.py +1 -1
  28. lfx/components/Notion/page_content_viewer.py +1 -1
  29. lfx/components/Notion/search.py +1 -1
  30. lfx/components/Notion/update_page_property.py +1 -1
  31. lfx/components/__init__.py +19 -5
  32. lfx/components/{agents → altk}/__init__.py +5 -9
  33. lfx/components/altk/altk_agent.py +193 -0
  34. lfx/components/apify/apify_actor.py +1 -1
  35. lfx/components/composio/__init__.py +70 -18
  36. lfx/components/composio/apollo_composio.py +11 -0
  37. lfx/components/composio/bitbucket_composio.py +11 -0
  38. lfx/components/composio/canva_composio.py +11 -0
  39. lfx/components/composio/coda_composio.py +11 -0
  40. lfx/components/composio/composio_api.py +10 -0
  41. lfx/components/composio/discord_composio.py +1 -1
  42. lfx/components/composio/elevenlabs_composio.py +11 -0
  43. lfx/components/composio/exa_composio.py +11 -0
  44. lfx/components/composio/firecrawl_composio.py +11 -0
  45. lfx/components/composio/fireflies_composio.py +11 -0
  46. lfx/components/composio/gmail_composio.py +1 -1
  47. lfx/components/composio/googlebigquery_composio.py +11 -0
  48. lfx/components/composio/googlecalendar_composio.py +1 -1
  49. lfx/components/composio/googledocs_composio.py +1 -1
  50. lfx/components/composio/googlemeet_composio.py +1 -1
  51. lfx/components/composio/googlesheets_composio.py +1 -1
  52. lfx/components/composio/googletasks_composio.py +1 -1
  53. lfx/components/composio/heygen_composio.py +11 -0
  54. lfx/components/composio/mem0_composio.py +11 -0
  55. lfx/components/composio/peopledatalabs_composio.py +11 -0
  56. lfx/components/composio/perplexityai_composio.py +11 -0
  57. lfx/components/composio/serpapi_composio.py +11 -0
  58. lfx/components/composio/slack_composio.py +3 -574
  59. lfx/components/composio/slackbot_composio.py +1 -1
  60. lfx/components/composio/snowflake_composio.py +11 -0
  61. lfx/components/composio/tavily_composio.py +11 -0
  62. lfx/components/composio/youtube_composio.py +2 -2
  63. lfx/components/cuga/__init__.py +34 -0
  64. lfx/components/cuga/cuga_agent.py +730 -0
  65. lfx/components/data/__init__.py +78 -28
  66. lfx/components/data_source/__init__.py +58 -0
  67. lfx/components/{data → data_source}/api_request.py +26 -3
  68. lfx/components/{data → data_source}/csv_to_data.py +15 -10
  69. lfx/components/{data → data_source}/json_to_data.py +15 -8
  70. lfx/components/{data → data_source}/news_search.py +1 -1
  71. lfx/components/{data → data_source}/rss.py +1 -1
  72. lfx/components/{data → data_source}/sql_executor.py +1 -1
  73. lfx/components/{data → data_source}/url.py +1 -1
  74. lfx/components/{data → data_source}/web_search.py +1 -1
  75. lfx/components/datastax/astradb_cql.py +1 -1
  76. lfx/components/datastax/astradb_graph.py +1 -1
  77. lfx/components/datastax/astradb_tool.py +1 -1
  78. lfx/components/datastax/astradb_vectorstore.py +1 -1
  79. lfx/components/datastax/hcd.py +1 -1
  80. lfx/components/deactivated/json_document_builder.py +1 -1
  81. lfx/components/docling/__init__.py +0 -3
  82. lfx/components/docling/chunk_docling_document.py +3 -1
  83. lfx/components/docling/export_docling_document.py +3 -1
  84. lfx/components/elastic/elasticsearch.py +1 -1
  85. lfx/components/files_and_knowledge/__init__.py +47 -0
  86. lfx/components/{data → files_and_knowledge}/directory.py +1 -1
  87. lfx/components/{data → files_and_knowledge}/file.py +304 -24
  88. lfx/components/{knowledge_bases → files_and_knowledge}/retrieval.py +2 -2
  89. lfx/components/{data → files_and_knowledge}/save_file.py +218 -31
  90. lfx/components/flow_controls/__init__.py +58 -0
  91. lfx/components/{logic → flow_controls}/conditional_router.py +1 -1
  92. lfx/components/{logic → flow_controls}/loop.py +43 -9
  93. lfx/components/flow_controls/run_flow.py +108 -0
  94. lfx/components/glean/glean_search_api.py +1 -1
  95. lfx/components/groq/groq.py +35 -28
  96. lfx/components/helpers/__init__.py +102 -0
  97. lfx/components/ibm/watsonx.py +7 -1
  98. lfx/components/input_output/__init__.py +3 -1
  99. lfx/components/input_output/chat.py +4 -3
  100. lfx/components/input_output/chat_output.py +10 -4
  101. lfx/components/input_output/text.py +1 -1
  102. lfx/components/input_output/text_output.py +1 -1
  103. lfx/components/{data → input_output}/webhook.py +1 -1
  104. lfx/components/knowledge_bases/__init__.py +59 -4
  105. lfx/components/langchain_utilities/character.py +1 -1
  106. lfx/components/langchain_utilities/csv_agent.py +84 -16
  107. lfx/components/langchain_utilities/json_agent.py +67 -12
  108. lfx/components/langchain_utilities/language_recursive.py +1 -1
  109. lfx/components/llm_operations/__init__.py +46 -0
  110. lfx/components/{processing → llm_operations}/batch_run.py +17 -8
  111. lfx/components/{processing → llm_operations}/lambda_filter.py +1 -1
  112. lfx/components/{logic → llm_operations}/llm_conditional_router.py +1 -1
  113. lfx/components/{processing/llm_router.py → llm_operations/llm_selector.py} +3 -3
  114. lfx/components/{processing → llm_operations}/structured_output.py +1 -1
  115. lfx/components/logic/__init__.py +126 -0
  116. lfx/components/mem0/mem0_chat_memory.py +11 -0
  117. lfx/components/models/__init__.py +64 -9
  118. lfx/components/models_and_agents/__init__.py +49 -0
  119. lfx/components/{agents → models_and_agents}/agent.py +6 -4
  120. lfx/components/models_and_agents/embedding_model.py +353 -0
  121. lfx/components/models_and_agents/language_model.py +398 -0
  122. lfx/components/{agents → models_and_agents}/mcp_component.py +53 -44
  123. lfx/components/{helpers → models_and_agents}/memory.py +1 -1
  124. lfx/components/nvidia/system_assist.py +1 -1
  125. lfx/components/olivya/olivya.py +1 -1
  126. lfx/components/ollama/ollama.py +24 -5
  127. lfx/components/processing/__init__.py +9 -60
  128. lfx/components/processing/converter.py +1 -1
  129. lfx/components/processing/dataframe_operations.py +1 -1
  130. lfx/components/processing/parse_json_data.py +2 -2
  131. lfx/components/processing/parser.py +1 -1
  132. lfx/components/processing/split_text.py +1 -1
  133. lfx/components/qdrant/qdrant.py +1 -1
  134. lfx/components/redis/redis.py +1 -1
  135. lfx/components/twelvelabs/split_video.py +10 -0
  136. lfx/components/twelvelabs/video_file.py +12 -0
  137. lfx/components/utilities/__init__.py +43 -0
  138. lfx/components/{helpers → utilities}/calculator_core.py +1 -1
  139. lfx/components/{helpers → utilities}/current_date.py +1 -1
  140. lfx/components/{processing → utilities}/python_repl_core.py +1 -1
  141. lfx/components/vectorstores/local_db.py +9 -0
  142. lfx/components/youtube/youtube_transcripts.py +118 -30
  143. lfx/custom/custom_component/component.py +57 -1
  144. lfx/custom/custom_component/custom_component.py +68 -6
  145. lfx/custom/directory_reader/directory_reader.py +5 -2
  146. lfx/graph/edge/base.py +43 -20
  147. lfx/graph/state/model.py +15 -2
  148. lfx/graph/utils.py +6 -0
  149. lfx/graph/vertex/param_handler.py +10 -7
  150. lfx/helpers/__init__.py +12 -0
  151. lfx/helpers/flow.py +117 -0
  152. lfx/inputs/input_mixin.py +24 -1
  153. lfx/inputs/inputs.py +13 -1
  154. lfx/interface/components.py +161 -83
  155. lfx/log/logger.py +5 -3
  156. lfx/schema/image.py +2 -12
  157. lfx/services/database/__init__.py +5 -0
  158. lfx/services/database/service.py +25 -0
  159. lfx/services/deps.py +87 -22
  160. lfx/services/interfaces.py +5 -0
  161. lfx/services/manager.py +24 -10
  162. lfx/services/mcp_composer/service.py +1029 -162
  163. lfx/services/session.py +5 -0
  164. lfx/services/settings/auth.py +18 -11
  165. lfx/services/settings/base.py +56 -30
  166. lfx/services/settings/constants.py +8 -0
  167. lfx/services/storage/local.py +108 -46
  168. lfx/services/storage/service.py +171 -29
  169. lfx/template/field/base.py +3 -0
  170. lfx/utils/image.py +29 -11
  171. lfx/utils/ssrf_protection.py +384 -0
  172. lfx/utils/validate_cloud.py +26 -0
  173. {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/METADATA +38 -22
  174. {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/RECORD +189 -160
  175. {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/WHEEL +1 -1
  176. lfx/components/agents/altk_agent.py +0 -366
  177. lfx/components/agents/cuga_agent.py +0 -1013
  178. lfx/components/docling/docling_remote_vlm.py +0 -284
  179. lfx/components/logic/run_flow.py +0 -71
  180. lfx/components/models/embedding_model.py +0 -195
  181. lfx/components/models/language_model.py +0 -144
  182. lfx/components/processing/dataframe_to_toolset.py +0 -259
  183. /lfx/components/{data → data_source}/mock_data.py +0 -0
  184. /lfx/components/{knowledge_bases → files_and_knowledge}/ingestion.py +0 -0
  185. /lfx/components/{logic → flow_controls}/data_conditional_router.py +0 -0
  186. /lfx/components/{logic → flow_controls}/flow_tool.py +0 -0
  187. /lfx/components/{logic → flow_controls}/listen.py +0 -0
  188. /lfx/components/{logic → flow_controls}/notify.py +0 -0
  189. /lfx/components/{logic → flow_controls}/pass_message.py +0 -0
  190. /lfx/components/{logic → flow_controls}/sub_flow.py +0 -0
  191. /lfx/components/{processing → models_and_agents}/prompt.py +0 -0
  192. /lfx/components/{helpers → processing}/create_list.py +0 -0
  193. /lfx/components/{helpers → processing}/output_parser.py +0 -0
  194. /lfx/components/{helpers → processing}/store_message.py +0 -0
  195. /lfx/components/{helpers → utilities}/id_generator.py +0 -0
  196. {lfx_nightly-0.2.0.dev0.dist-info → lfx_nightly-0.2.0.dev41.dist-info}/entry_points.txt +0 -0
@@ -1,13 +1,18 @@
1
1
  """MCP Composer service for proxying and orchestrating MCP servers."""
2
2
 
3
3
  import asyncio
4
+ import json
4
5
  import os
6
+ import platform
5
7
  import re
6
8
  import select
7
9
  import socket
8
10
  import subprocess
11
+ import tempfile
12
+ import typing
9
13
  from collections.abc import Callable
10
14
  from functools import wraps
15
+ from pathlib import Path
11
16
  from typing import Any
12
17
 
13
18
  from lfx.log.logger import logger
@@ -68,20 +73,379 @@ class MCPComposerService(Service):
68
73
 
69
74
  def __init__(self):
70
75
  super().__init__()
71
- self.project_composers: dict[str, dict] = {} # project_id -> {process, host, port, sse_url, auth_config}
76
+ self.project_composers: dict[
77
+ str, dict
78
+ ] = {} # project_id -> {process, host, port, streamable_http_url, auth_config}
72
79
  self._start_locks: dict[
73
80
  str, asyncio.Lock
74
81
  ] = {} # Lock to prevent concurrent start operations for the same project
82
+ self._active_start_tasks: dict[
83
+ str, asyncio.Task
84
+ ] = {} # Track active start tasks to cancel them when new request arrives
85
+ self._port_to_project: dict[int, str] = {} # Track which project is using which port
86
+ self._pid_to_project: dict[int, str] = {} # Track which PID belongs to which project
87
+ self._last_errors: dict[str, str] = {} # Track last error message per project for UI display
75
88
 
76
- def _is_port_available(self, port: int) -> bool:
77
- """Check if a port is available by trying to bind to it."""
89
+ def get_last_error(self, project_id: str) -> str | None:
90
+ """Get the last error message for a project, if any."""
91
+ return self._last_errors.get(project_id)
92
+
93
+ def set_last_error(self, project_id: str, error_message: str) -> None:
94
+ """Set the last error message for a project."""
95
+ self._last_errors[project_id] = error_message
96
+
97
+ def clear_last_error(self, project_id: str) -> None:
98
+ """Clear the last error message for a project."""
99
+ self._last_errors.pop(project_id, None)
100
+
101
+ def _is_port_available(self, port: int, host: str = "localhost") -> bool:
102
+ """Check if a port is available by trying to bind to it.
103
+
104
+ Args:
105
+ port: Port number to check
106
+ host: Host to check (default: localhost)
107
+
108
+ Returns:
109
+ True if port is available (not in use), False if in use
110
+
111
+ Raises:
112
+ ValueError: If port is not in valid range (0-65535)
113
+ """
114
+ import errno
115
+
116
+ # Validate port range before attempting bind
117
+ max_port = 65535
118
+ if not isinstance(port, int) or port < 0 or port > max_port:
119
+ msg = f"Invalid port number: {port}. Port must be between 0 and {max_port}."
120
+ raise ValueError(msg)
121
+
122
+ # Check both IPv4 and IPv6 to ensure port is truly available
123
+ # MCP Composer tries to bind on both, so we need to check both
124
+
125
+ # Check IPv4
78
126
  try:
79
127
  with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
80
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
81
- sock.bind(("0.0.0.0", port))
82
- return True # Port is available
128
+ # Don't use SO_REUSEADDR here as it can give false positives
129
+ sock.bind((host, port))
83
130
  except OSError:
84
- return False # Port is in use/bound
131
+ return False # Port is in use on IPv4
132
+
133
+ # Check IPv6 (if supported on this system)
134
+ try:
135
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as sock:
136
+ # Don't use SO_REUSEADDR here as it can give false positives
137
+ # Use ::1 for localhost on IPv6
138
+ ipv6_host = "::1" if host in ("localhost", "127.0.0.1") else host
139
+ sock.bind((ipv6_host, port))
140
+ except OSError as e:
141
+ # Check if it's "address already in use" error
142
+ # errno.EADDRINUSE is 48 on macOS, 98 on Linux, 10048 on Windows (WSAEADDRINUSE)
143
+ # We check both the standard errno and Windows-specific error code
144
+ if e.errno in (errno.EADDRINUSE, 10048):
145
+ return False # Port is in use on IPv6
146
+ # For other errors (e.g., IPv6 not supported, EADDRNOTAVAIL), continue
147
+ # IPv6 might not be supported on this system, which is okay
148
+
149
+ return True # Port is available on both IPv4 and IPv6 (or IPv6 not supported)
150
+
151
+ async def _kill_process_on_port(self, port: int) -> bool:
152
+ """Kill the process using the specified port.
153
+
154
+ Cross-platform implementation supporting Windows, macOS, and Linux.
155
+
156
+ Args:
157
+ port: The port number to check
158
+
159
+ Returns:
160
+ True if a process was found and killed, False otherwise
161
+ """
162
+ try:
163
+ await logger.adebug(f"Checking for processes using port {port}...")
164
+ os_type = platform.system()
165
+
166
+ # Platform-specific command to find PID
167
+ if os_type == "Windows":
168
+ # Use netstat on Windows - use full path to avoid PATH issues
169
+ netstat_cmd = os.path.join(os.environ.get("SYSTEMROOT", "C:\\Windows"), "System32", "netstat.exe") # noqa: PTH118
170
+ result = await asyncio.to_thread(
171
+ subprocess.run,
172
+ [netstat_cmd, "-ano"],
173
+ capture_output=True,
174
+ text=True,
175
+ check=False,
176
+ )
177
+
178
+ if result.returncode == 0:
179
+ # Parse netstat output to find PID
180
+ # Format: TCP 0.0.0.0:PORT 0.0.0.0:0 LISTENING PID
181
+ windows_pids: list[int] = []
182
+ for line in result.stdout.split("\n"):
183
+ if f":{port}" in line and "LISTENING" in line:
184
+ parts = line.split()
185
+ if parts:
186
+ try:
187
+ pid = int(parts[-1])
188
+ windows_pids.append(pid)
189
+ except (ValueError, IndexError):
190
+ continue
191
+
192
+ await logger.adebug(f"Found {len(windows_pids)} process(es) using port {port}: {windows_pids}")
193
+
194
+ for pid in windows_pids:
195
+ try:
196
+ await logger.adebug(f"Attempting to kill process {pid} on port {port}...")
197
+ # Use taskkill on Windows - use full path to avoid PATH issues
198
+ taskkill_cmd = os.path.join( # noqa: PTH118
199
+ os.environ.get("SYSTEMROOT", "C:\\Windows"), "System32", "taskkill.exe"
200
+ )
201
+ kill_result = await asyncio.to_thread(
202
+ subprocess.run,
203
+ [taskkill_cmd, "/F", "/PID", str(pid)],
204
+ capture_output=True,
205
+ check=False,
206
+ )
207
+
208
+ if kill_result.returncode == 0:
209
+ await logger.adebug(f"Successfully killed process {pid} on port {port}")
210
+ return True
211
+ await logger.awarning(
212
+ f"taskkill returned {kill_result.returncode} for process {pid} on port {port}"
213
+ )
214
+ except Exception as e: # noqa: BLE001
215
+ await logger.aerror(f"Error killing PID {pid}: {e}")
216
+
217
+ return False
218
+ else:
219
+ # Use lsof on Unix-like systems (macOS, Linux)
220
+ result = await asyncio.to_thread(
221
+ subprocess.run,
222
+ ["lsof", "-ti", f":{port}"],
223
+ capture_output=True,
224
+ text=True,
225
+ check=False,
226
+ )
227
+
228
+ await logger.adebug(f"lsof returned code {result.returncode} for port {port}")
229
+
230
+ # Extract PIDs from lsof output
231
+ lsof_output = result.stdout.strip()
232
+ lsof_errors = result.stderr.strip()
233
+
234
+ if lsof_output:
235
+ await logger.adebug(f"lsof stdout: {lsof_output}")
236
+ if lsof_errors:
237
+ await logger.adebug(f"lsof stderr: {lsof_errors}")
238
+
239
+ if result.returncode == 0 and lsof_output:
240
+ unix_pids = lsof_output.split("\n")
241
+ await logger.adebug(f"Found {len(unix_pids)} process(es) using port {port}: {unix_pids}")
242
+
243
+ for pid_str in unix_pids:
244
+ try:
245
+ pid = int(pid_str.strip())
246
+ await logger.adebug(f"Attempting to kill process {pid} on port {port}...")
247
+
248
+ # Try to kill the process
249
+ kill_result = await asyncio.to_thread(
250
+ subprocess.run,
251
+ ["kill", "-9", str(pid)],
252
+ capture_output=True,
253
+ check=False,
254
+ )
255
+
256
+ if kill_result.returncode == 0:
257
+ await logger.adebug(f"Successfully sent kill signal to process {pid} on port {port}")
258
+ return True
259
+ await logger.awarning(
260
+ f"kill command returned {kill_result.returncode} for process {pid} on port {port}"
261
+ )
262
+ except (ValueError, ProcessLookupError) as e:
263
+ await logger.aerror(f"Error processing PID {pid_str}: {e}")
264
+
265
+ # If we get here, we found processes but couldn't kill any
266
+ return False
267
+ await logger.adebug(f"No process found using port {port}")
268
+ return False
269
+ except Exception as e: # noqa: BLE001
270
+ await logger.aerror(f"Error finding/killing process on port {port}: {e}")
271
+ return False
272
+ return False
273
+
274
+ async def _kill_zombie_mcp_processes(self, port: int) -> bool:
275
+ """Kill zombie MCP Composer processes that may be stuck.
276
+
277
+ On Windows, sometimes MCP Composer processes start but fail to bind to port.
278
+ These processes become "zombies" that need to be killed before retry.
279
+
280
+ Args:
281
+ port: The port that should be used
282
+
283
+ Returns:
284
+ True if zombie processes were found and killed
285
+ """
286
+ try:
287
+ os_type = platform.system()
288
+ if os_type != "Windows":
289
+ return False
290
+
291
+ await logger.adebug(f"Looking for zombie MCP Composer processes on Windows for port {port}...")
292
+
293
+ # First, try to find and kill any process using the port directly
294
+ # Use full path to netstat on Windows to avoid PATH issues
295
+ netstat_cmd = os.path.join(os.environ.get("SYSTEMROOT", "C:\\Windows"), "System32", "netstat.exe") # noqa: PTH118
296
+ netstat_result = await asyncio.to_thread(
297
+ subprocess.run,
298
+ [netstat_cmd, "-ano"],
299
+ capture_output=True,
300
+ text=True,
301
+ check=False,
302
+ )
303
+
304
+ killed_any = False
305
+ if netstat_result.returncode == 0:
306
+ # Parse netstat output to find PIDs using our port
307
+ pids_on_port: list[int] = []
308
+ for line in netstat_result.stdout.split("\n"):
309
+ if f":{port}" in line and "LISTENING" in line:
310
+ parts = line.split()
311
+ if parts:
312
+ try:
313
+ pid = int(parts[-1])
314
+ # Only kill if not tracked by us
315
+ if pid not in self._pid_to_project:
316
+ pids_on_port.append(pid)
317
+ else:
318
+ project = self._pid_to_project[pid]
319
+ await logger.adebug(
320
+ f"Process {pid} on port {port} is tracked, skipping (project: {project})"
321
+ )
322
+ except (ValueError, IndexError):
323
+ continue
324
+
325
+ if pids_on_port:
326
+ await logger.adebug(
327
+ f"Found {len(pids_on_port)} untracked process(es) on port {port}: {pids_on_port}"
328
+ )
329
+ for pid in pids_on_port:
330
+ try:
331
+ await logger.adebug(f"Killing process {pid} on port {port}...")
332
+ # Use full path to taskkill on Windows to avoid PATH issues
333
+ taskkill_cmd = os.path.join( # noqa: PTH118
334
+ os.environ.get("SYSTEMROOT", "C:\\Windows"), "System32", "taskkill.exe"
335
+ )
336
+ kill_result = await asyncio.to_thread(
337
+ subprocess.run,
338
+ [taskkill_cmd, "/F", "/PID", str(pid)],
339
+ capture_output=True,
340
+ check=False,
341
+ )
342
+ if kill_result.returncode == 0:
343
+ await logger.adebug(f"Successfully killed process {pid} on port {port}")
344
+ killed_any = True
345
+ else:
346
+ stderr_output = (
347
+ kill_result.stderr.decode()
348
+ if isinstance(kill_result.stderr, bytes)
349
+ else kill_result.stderr
350
+ )
351
+ await logger.awarning(f"Failed to kill process {pid} on port {port}: {stderr_output}")
352
+ except Exception as e: # noqa: BLE001
353
+ await logger.adebug(f"Error killing process {pid}: {e}")
354
+
355
+ # Also look for any orphaned mcp-composer processes (without checking port)
356
+ # This catches processes that failed to bind but are still running
357
+ # Use PowerShell instead of deprecated wmic.exe for Windows 10/11 compatibility
358
+ try:
359
+ # Use PowerShell to get Python processes with command line info
360
+ # Build PowerShell command to find MCP Composer processes
361
+ ps_filter = (
362
+ f"$_.Name -eq 'python.exe' -and $_.CommandLine -like '*mcp-composer*' "
363
+ f"-and ($_.CommandLine -like '*--port {port}*' -or $_.CommandLine -like '*--port={port}*')"
364
+ )
365
+ ps_cmd = (
366
+ f"Get-WmiObject Win32_Process | Where-Object {{ {ps_filter} }} | "
367
+ "Select-Object ProcessId,CommandLine | ConvertTo-Json"
368
+ )
369
+ powershell_cmd = ["powershell.exe", "-NoProfile", "-Command", ps_cmd]
370
+
371
+ ps_result = await asyncio.to_thread(
372
+ subprocess.run,
373
+ powershell_cmd,
374
+ capture_output=True,
375
+ text=True,
376
+ check=False,
377
+ timeout=5,
378
+ )
379
+
380
+ if ps_result.returncode == 0 and ps_result.stdout.strip():
381
+ try:
382
+ # PowerShell may return single object or array
383
+ processes = json.loads(ps_result.stdout)
384
+ if isinstance(processes, dict):
385
+ processes = [processes]
386
+ elif not isinstance(processes, list):
387
+ processes = []
388
+
389
+ for proc in processes:
390
+ try:
391
+ pid = int(proc.get("ProcessId", 0))
392
+ if pid <= 0 or pid in self._pid_to_project:
393
+ continue
394
+
395
+ await logger.adebug(
396
+ f"Found orphaned MCP Composer process {pid} for port {port}, killing it"
397
+ )
398
+ # Use full path to taskkill on Windows to avoid PATH issues
399
+ taskkill_cmd = os.path.join( # noqa: PTH118
400
+ os.environ.get("SYSTEMROOT", "C:\\Windows"), "System32", "taskkill.exe"
401
+ )
402
+ kill_result = await asyncio.to_thread(
403
+ subprocess.run,
404
+ [taskkill_cmd, "/F", "/PID", str(pid)],
405
+ capture_output=True,
406
+ check=False,
407
+ )
408
+ if kill_result.returncode == 0:
409
+ await logger.adebug(f"Successfully killed orphaned process {pid}")
410
+ killed_any = True
411
+
412
+ except (ValueError, KeyError) as e:
413
+ await logger.adebug(f"Error processing PowerShell result: {e}")
414
+ continue
415
+
416
+ except json.JSONDecodeError as e:
417
+ await logger.adebug(f"Failed to parse PowerShell output: {e}")
418
+
419
+ except asyncio.TimeoutError:
420
+ await logger.adebug("PowerShell command timed out while checking for orphaned processes")
421
+ except Exception as e: # noqa: BLE001
422
+ await logger.adebug(f"Error using PowerShell to find orphaned processes: {e}")
423
+
424
+ if killed_any:
425
+ # Give Windows time to clean up
426
+ await logger.adebug("Waiting 3 seconds for Windows to release port...")
427
+ await asyncio.sleep(3)
428
+
429
+ return killed_any # noqa: TRY300
430
+
431
+ except Exception as e: # noqa: BLE001
432
+ await logger.adebug(f"Error killing zombie processes: {e}")
433
+ return False
434
+
435
+ def _is_port_used_by_another_project(self, port: int, current_project_id: str) -> tuple[bool, str | None]:
436
+ """Check if a port is being used by another project.
437
+
438
+ Args:
439
+ port: The port to check
440
+ current_project_id: The current project ID
441
+
442
+ Returns:
443
+ Tuple of (is_used_by_other, other_project_id)
444
+ """
445
+ other_project_id = self._port_to_project.get(port)
446
+ if other_project_id and other_project_id != current_project_id:
447
+ return True, other_project_id
448
+ return False, None
85
449
 
86
450
  async def start(self):
87
451
  """Check if the MCP Composer service is enabled."""
@@ -125,47 +489,300 @@ class MCPComposerService(Service):
125
489
  composer_info = self.project_composers[project_id]
126
490
  process = composer_info.get("process")
127
491
 
128
- if process:
129
- try:
130
- # Check if process is still running before trying to terminate
131
- if process.poll() is None:
132
- await logger.adebug(f"Terminating MCP Composer process {process.pid} for project {project_id}")
133
- process.terminate()
492
+ try:
493
+ if process:
494
+ try:
495
+ # Check if process is still running before trying to terminate
496
+ if process.poll() is None:
497
+ await logger.adebug(f"Terminating MCP Composer process {process.pid} for project {project_id}")
498
+ process.terminate()
134
499
 
135
- # Wait longer for graceful shutdown
136
- try:
137
- await asyncio.wait_for(self._wait_for_process_exit(process), timeout=3.0)
138
- await logger.adebug(f"MCP Composer for project {project_id} terminated gracefully")
139
- except asyncio.TimeoutError:
140
- await logger.aerror(
141
- f"MCP Composer for project {project_id} did not terminate gracefully, force killing"
142
- )
143
- process.kill()
144
- # Wait a bit more for force kill to complete
500
+ # Wait longer for graceful shutdown
145
501
  try:
146
- await asyncio.wait_for(self._wait_for_process_exit(process), timeout=2.0)
502
+ await asyncio.wait_for(asyncio.to_thread(process.wait), timeout=2.0)
503
+ await logger.adebug(f"MCP Composer for project {project_id} terminated gracefully")
147
504
  except asyncio.TimeoutError:
148
505
  await logger.aerror(
149
- f"Failed to kill MCP Composer process {process.pid} for project {project_id}"
506
+ f"MCP Composer for project {project_id} did not terminate gracefully, force killing"
150
507
  )
151
- else:
152
- await logger.adebug(f"MCP Composer process for project {project_id} was already terminated")
153
-
154
- await logger.adebug(f"MCP Composer stopped for project {project_id}")
508
+ await asyncio.to_thread(process.kill)
509
+ await asyncio.to_thread(process.wait)
510
+ else:
511
+ await logger.adebug(f"MCP Composer process for project {project_id} was already terminated")
155
512
 
156
- except ProcessLookupError:
157
- # Process already terminated
158
- await logger.adebug(f"MCP Composer process for project {project_id} was already terminated")
159
- except Exception as e: # noqa: BLE001
160
- await logger.aerror(f"Error stopping MCP Composer for project {project_id}: {e}")
513
+ await logger.adebug(f"MCP Composer stopped for project {project_id}")
161
514
 
162
- # Remove from tracking
163
- del self.project_composers[project_id]
515
+ except ProcessLookupError:
516
+ # Process already terminated
517
+ await logger.adebug(f"MCP Composer process for project {project_id} was already terminated")
518
+ except Exception as e: # noqa: BLE001
519
+ await logger.aerror(f"Error stopping MCP Composer for project {project_id}: {e}")
520
+ finally:
521
+ # Always clean up tracking, even if stopping failed
522
+ port = composer_info.get("port")
523
+ if port and self._port_to_project.get(port) == project_id:
524
+ self._port_to_project.pop(port, None)
525
+ await logger.adebug(f"Released port {port} from project {project_id}")
526
+
527
+ # Clean up PID tracking
528
+ if process and process.pid:
529
+ self._pid_to_project.pop(process.pid, None)
530
+ await logger.adebug(f"Released PID {process.pid} tracking for project {project_id}")
531
+
532
+ # Remove from tracking
533
+ self.project_composers.pop(project_id, None)
534
+ await logger.adebug(f"Removed tracking for project {project_id}")
164
535
 
165
536
  async def _wait_for_process_exit(self, process):
166
537
  """Wait for a process to exit."""
167
538
  await asyncio.to_thread(process.wait)
168
539
 
540
+ async def _read_process_output_and_extract_error(
541
+ self,
542
+ process: subprocess.Popen,
543
+ oauth_server_url: str | None,
544
+ timeout: float = 2.0,
545
+ stdout_file=None,
546
+ stderr_file=None,
547
+ ) -> tuple[str, str, str]:
548
+ """Read process output and extract user-friendly error message.
549
+
550
+ Args:
551
+ process: The subprocess to read from
552
+ oauth_server_url: OAuth server URL for error messages
553
+ timeout: Timeout for reading output
554
+ stdout_file: Optional file handle for stdout (Windows)
555
+ stderr_file: Optional file handle for stderr (Windows)
556
+
557
+ Returns:
558
+ Tuple of (stdout, stderr, error_message)
559
+ """
560
+ stdout_content = ""
561
+ stderr_content = ""
562
+
563
+ try:
564
+ # On Windows with temp files, read from files instead of pipes
565
+ if stdout_file and stderr_file:
566
+ # Close file handles to flush and allow reading
567
+ try:
568
+ stdout_file.close()
569
+ stderr_file.close()
570
+ except Exception as e: # noqa: BLE001
571
+ await logger.adebug(f"Error closing temp files: {e}")
572
+
573
+ # Read from temp files using asyncio.to_thread
574
+ try:
575
+
576
+ def read_file(filepath):
577
+ return Path(filepath).read_bytes()
578
+
579
+ stdout_bytes = await asyncio.to_thread(read_file, stdout_file.name)
580
+ stdout_content = stdout_bytes.decode("utf-8", errors="replace") if stdout_bytes else ""
581
+ except Exception as e: # noqa: BLE001
582
+ await logger.adebug(f"Error reading stdout file: {e}")
583
+
584
+ try:
585
+
586
+ def read_file(filepath):
587
+ return Path(filepath).read_bytes()
588
+
589
+ stderr_bytes = await asyncio.to_thread(read_file, stderr_file.name)
590
+ stderr_content = stderr_bytes.decode("utf-8", errors="replace") if stderr_bytes else ""
591
+ except Exception as e: # noqa: BLE001
592
+ await logger.adebug(f"Error reading stderr file: {e}")
593
+
594
+ # Clean up temp files
595
+ try:
596
+ Path(stdout_file.name).unlink()
597
+ Path(stderr_file.name).unlink()
598
+ except Exception as e: # noqa: BLE001
599
+ await logger.adebug(f"Error removing temp files: {e}")
600
+ else:
601
+ # Use asyncio.to_thread to avoid blocking the event loop
602
+ # Process returns bytes, decode with error handling
603
+ stdout_bytes, stderr_bytes = await asyncio.to_thread(process.communicate, timeout=timeout)
604
+ stdout_content = stdout_bytes.decode("utf-8", errors="replace") if stdout_bytes else ""
605
+ stderr_content = stderr_bytes.decode("utf-8", errors="replace") if stderr_bytes else ""
606
+
607
+ except subprocess.TimeoutExpired:
608
+ process.kill()
609
+ error_msg = self._extract_error_message("", "", oauth_server_url)
610
+ return "", "", error_msg
611
+
612
+ error_msg = self._extract_error_message(stdout_content, stderr_content, oauth_server_url)
613
+ return stdout_content, stderr_content, error_msg
614
+
615
+ async def _read_stream_non_blocking(self, stream, stream_name: str) -> str:
616
+ """Read from a stream without blocking and log the content.
617
+
618
+ Args:
619
+ stream: The stream to read from (stdout or stderr)
620
+ stream_name: Name of the stream for logging ("stdout" or "stderr")
621
+
622
+ Returns:
623
+ The content read from the stream (empty string if nothing available)
624
+ """
625
+ if not stream:
626
+ return ""
627
+
628
+ try:
629
+ # On Windows, select.select() doesn't work with pipes (only sockets)
630
+ # Use platform-specific approach
631
+ os_type = platform.system()
632
+
633
+ if os_type == "Windows":
634
+ # On Windows, select.select() doesn't work with pipes
635
+ # Skip stream reading during monitoring - output will be captured when process terminates
636
+ # This prevents blocking on peek() which can cause the monitoring loop to hang
637
+ return ""
638
+ # On Unix-like systems, use select
639
+ if select.select([stream], [], [], 0)[0]:
640
+ line_bytes = stream.readline()
641
+ if line_bytes:
642
+ # Decode bytes with error handling
643
+ line = line_bytes.decode("utf-8", errors="replace") if isinstance(line_bytes, bytes) else line_bytes
644
+ stripped = line.strip()
645
+ if stripped:
646
+ # Log errors at error level, everything else at debug
647
+ if stream_name == "stderr" and ("ERROR" in stripped or "error" in stripped):
648
+ await logger.aerror(f"MCP Composer {stream_name}: {stripped}")
649
+ else:
650
+ await logger.adebug(f"MCP Composer {stream_name}: {stripped}")
651
+ return stripped
652
+ except Exception as e: # noqa: BLE001
653
+ await logger.adebug(f"Error reading {stream_name}: {e}")
654
+ return ""
655
+
656
+ async def _ensure_port_available(self, port: int, current_project_id: str) -> None:
657
+ """Ensure a port is available, only killing untracked processes.
658
+
659
+ Args:
660
+ port: The port number to ensure is available
661
+ current_project_id: The project ID requesting the port
662
+
663
+ Raises:
664
+ MCPComposerPortError: If port cannot be made available
665
+ MCPComposerConfigError: If port is invalid
666
+ """
667
+ try:
668
+ is_port_available = self._is_port_available(port)
669
+ await logger.adebug(f"Port {port} availability check: {is_port_available}")
670
+ except (ValueError, OverflowError, TypeError) as e:
671
+ # Port validation failed - invalid port number or type
672
+ # ValueError: from our validation
673
+ # OverflowError: from socket.bind() when port > 65535
674
+ # TypeError: when port is not an integer
675
+ error_msg = f"Invalid port number: {port}. Port must be an integer between 0 and 65535."
676
+ await logger.aerror(f"Invalid port for project {current_project_id}: {e}")
677
+ raise MCPComposerConfigError(error_msg, current_project_id) from e
678
+
679
+ if not is_port_available:
680
+ # Check if the port is being used by a tracked project
681
+ is_used_by_other, other_project_id = self._is_port_used_by_another_project(port, current_project_id)
682
+
683
+ if is_used_by_other and other_project_id:
684
+ # Port is being used by another tracked project
685
+ # Check if we can take ownership (e.g., the other project is failing)
686
+ other_composer = self.project_composers.get(other_project_id)
687
+ if other_composer and other_composer.get("process"):
688
+ other_process = other_composer["process"]
689
+ # If the other process is still running and healthy, don't kill it
690
+ if other_process.poll() is None:
691
+ await logger.aerror(
692
+ f"Port {port} requested by project {current_project_id} is already in use by "
693
+ f"project {other_project_id}. Will not kill active MCP Composer process."
694
+ )
695
+ port_error_msg = (
696
+ f"Port {port} is already in use by another project. "
697
+ f"Please choose a different port (e.g., {port + 1}) "
698
+ f"or disable OAuth on the other project first."
699
+ )
700
+ raise MCPComposerPortError(port_error_msg, current_project_id)
701
+
702
+ # Process died but port tracking wasn't cleaned up - allow takeover
703
+ await logger.adebug(
704
+ f"Port {port} was tracked to project {other_project_id} but process died. "
705
+ f"Allowing project {current_project_id} to take ownership."
706
+ )
707
+ # Clean up the old tracking
708
+ await self._do_stop_project_composer(other_project_id)
709
+
710
+ # Check if port is used by a process owned by the current project (e.g., stuck in startup loop)
711
+ port_owner_project = self._port_to_project.get(port)
712
+ if port_owner_project == current_project_id:
713
+ # Port is owned by current project - safe to kill
714
+ await logger.adebug(
715
+ f"Port {port} is in use by current project {current_project_id} (likely stuck in startup). "
716
+ f"Killing process to retry."
717
+ )
718
+ killed = await self._kill_process_on_port(port)
719
+ if killed:
720
+ await logger.adebug(
721
+ f"Successfully killed own process on port {port}. Waiting for port to be released..."
722
+ )
723
+ await asyncio.sleep(2)
724
+ is_port_available = self._is_port_available(port)
725
+ if not is_port_available:
726
+ await logger.aerror(f"Port {port} is still in use after killing own process.")
727
+ port_error_msg = f"Port {port} is still in use after killing process"
728
+ raise MCPComposerPortError(port_error_msg)
729
+ else:
730
+ # Port is in use by unknown process - don't kill it (security concern)
731
+ await logger.aerror(
732
+ f"Port {port} is in use by an unknown process (not owned by Langflow). "
733
+ f"Will not kill external application for security reasons."
734
+ )
735
+ port_error_msg = (
736
+ f"Port {port} is already in use by another application. "
737
+ f"Please choose a different port (e.g., {port + 1}) or free up the port manually."
738
+ )
739
+ raise MCPComposerPortError(port_error_msg, current_project_id)
740
+
741
+ await logger.adebug(f"Port {port} is available, proceeding with MCP Composer startup")
742
+
743
+ async def _log_startup_error_details(
744
+ self,
745
+ project_id: str,
746
+ cmd: list[str],
747
+ host: str,
748
+ port: int,
749
+ stdout: str = "",
750
+ stderr: str = "",
751
+ error_msg: str = "",
752
+ exit_code: int | None = None,
753
+ pid: int | None = None,
754
+ ) -> None:
755
+ """Log detailed startup error information.
756
+
757
+ Args:
758
+ project_id: The project ID
759
+ cmd: The command that was executed
760
+ host: Target host
761
+ port: Target port
762
+ stdout: Standard output from the process
763
+ stderr: Standard error from the process
764
+ error_msg: User-friendly error message
765
+ exit_code: Process exit code (if terminated)
766
+ pid: Process ID (if still running)
767
+ """
768
+ await logger.aerror(f"MCP Composer startup failed for project {project_id}:")
769
+ if exit_code is not None:
770
+ await logger.aerror(f" - Process died with exit code: {exit_code}")
771
+ if pid is not None:
772
+ await logger.aerror(f" - Process is running (PID: {pid}) but failed to bind to port {port}")
773
+ await logger.aerror(f" - Target: {host}:{port}")
774
+
775
+ # Obfuscate secrets in command before logging
776
+ safe_cmd = self._obfuscate_command_secrets(cmd)
777
+ await logger.aerror(f" - Command: {' '.join(safe_cmd)}")
778
+
779
+ if stderr.strip():
780
+ await logger.aerror(f" - Error output: {stderr.strip()}")
781
+ if stdout.strip():
782
+ await logger.aerror(f" - Standard output: {stdout.strip()}")
783
+ if error_msg:
784
+ await logger.aerror(f" - Error message: {error_msg}")
785
+
169
786
  def _validate_oauth_settings(self, auth_config: dict[str, Any]) -> None:
170
787
  """Validate that all required OAuth settings are present and non-empty.
171
788
 
@@ -205,6 +822,18 @@ class MCPComposerService(Service):
205
822
  config_error_msg = f"Invalid OAuth configuration: {'; '.join(error_parts)}"
206
823
  raise MCPComposerConfigError(config_error_msg)
207
824
 
825
+ @staticmethod
826
+ def _normalize_config_value(value: Any) -> Any:
827
+ """Normalize a config value (None or empty string becomes None).
828
+
829
+ Args:
830
+ value: The value to normalize
831
+
832
+ Returns:
833
+ None if value is None or empty string, otherwise the value
834
+ """
835
+ return None if (value is None or value == "") else value
836
+
208
837
  def _has_auth_config_changed(self, existing_auth: dict[str, Any] | None, new_auth: dict[str, Any] | None) -> bool:
209
838
  """Check if auth configuration has changed in a way that requires restart."""
210
839
  if not existing_auth and not new_auth:
@@ -230,12 +859,8 @@ class MCPComposerService(Service):
230
859
 
231
860
  # Compare relevant fields
232
861
  for field in fields_to_check:
233
- old_val = existing_auth.get(field)
234
- new_val = new_auth.get(field)
235
-
236
- # Convert None and empty string to None for comparison
237
- old_normalized = None if (old_val is None or old_val == "") else old_val
238
- new_normalized = None if (new_val is None or new_val == "") else new_val
862
+ old_normalized = self._normalize_config_value(existing_auth.get(field))
863
+ new_normalized = self._normalize_config_value(new_auth.get(field))
239
864
 
240
865
  if old_normalized != new_normalized:
241
866
  return True
@@ -252,23 +877,30 @@ class MCPComposerService(Service):
252
877
  List of command arguments with secrets replaced with ***REDACTED***
253
878
  """
254
879
  safe_cmd = []
255
- skip_next = False
880
+ i = 0
256
881
 
257
- for i, arg in enumerate(cmd):
258
- if skip_next:
259
- skip_next = False
260
- safe_cmd.append("***REDACTED***")
261
- continue
882
+ while i < len(cmd):
883
+ arg = cmd[i]
262
884
 
885
+ # Check if this is --env followed by a secret key
263
886
  if arg == "--env" and i + 2 < len(cmd):
264
- # Check if next env var is a secret
265
887
  env_key = cmd[i + 1]
888
+ env_value = cmd[i + 2]
889
+
266
890
  if any(secret in env_key.lower() for secret in ["secret", "key", "token"]):
267
- safe_cmd.extend([arg, env_key]) # Keep env key, redact value
268
- skip_next = True
891
+ # Redact the value
892
+ safe_cmd.extend([arg, env_key, "***REDACTED***"])
893
+ i += 3 # Skip all three: --env, key, and value
269
894
  continue
270
895
 
896
+ # Not a secret, keep as-is
897
+ safe_cmd.extend([arg, env_key, env_value])
898
+ i += 3
899
+ continue
900
+
901
+ # Regular argument
271
902
  safe_cmd.append(arg)
903
+ i += 1
272
904
 
273
905
  return safe_cmd
274
906
 
@@ -319,16 +951,93 @@ class MCPComposerService(Service):
319
951
  async def start_project_composer(
320
952
  self,
321
953
  project_id: str,
322
- sse_url: str,
954
+ streamable_http_url: str,
323
955
  auth_config: dict[str, Any] | None,
324
- max_startup_checks: int = 5,
956
+ max_retries: int = 3,
957
+ max_startup_checks: int = 40,
325
958
  startup_delay: float = 2.0,
959
+ *,
960
+ legacy_sse_url: str | None = None,
326
961
  ) -> None:
327
962
  """Start an MCP Composer instance for a specific project.
328
963
 
964
+ Args:
965
+ project_id: The project ID
966
+ streamable_http_url: Streamable HTTP endpoint for the remote Langflow MCP server
967
+ auth_config: Authentication configuration
968
+ max_retries: Maximum number of retry attempts (default: 3)
969
+ max_startup_checks: Number of checks per retry attempt (default: 40)
970
+ startup_delay: Delay between checks in seconds (default: 2.0)
971
+ legacy_sse_url: Optional legacy SSE URL used for backward compatibility
972
+
329
973
  Raises:
330
974
  MCPComposerError: Various specific errors if startup fails
331
975
  """
976
+ # Cancel any active start operation for this project
977
+ if project_id in self._active_start_tasks:
978
+ active_task = self._active_start_tasks[project_id]
979
+ if not active_task.done():
980
+ await logger.adebug(f"Cancelling previous MCP Composer start operation for project {project_id}")
981
+ active_task.cancel()
982
+ try:
983
+ await active_task
984
+ except asyncio.CancelledError:
985
+ await logger.adebug(f"Previous start operation for project {project_id} cancelled successfully")
986
+ finally:
987
+ # Clean up the cancelled task from tracking
988
+ del self._active_start_tasks[project_id]
989
+
990
+ # Create and track the current task
991
+ current_task = asyncio.current_task()
992
+ if not current_task:
993
+ await logger.awarning(
994
+ f"Could not get current task for project {project_id}. "
995
+ f"Concurrent start operations may not be properly cancelled."
996
+ )
997
+ else:
998
+ self._active_start_tasks[project_id] = current_task
999
+
1000
+ try:
1001
+ await self._do_start_project_composer(
1002
+ project_id,
1003
+ streamable_http_url,
1004
+ auth_config,
1005
+ max_retries,
1006
+ max_startup_checks,
1007
+ startup_delay,
1008
+ legacy_sse_url=legacy_sse_url,
1009
+ )
1010
+ finally:
1011
+ # Clean up the task reference when done
1012
+ if project_id in self._active_start_tasks and self._active_start_tasks[project_id] == current_task:
1013
+ del self._active_start_tasks[project_id]
1014
+
1015
+ async def _do_start_project_composer(
1016
+ self,
1017
+ project_id: str,
1018
+ streamable_http_url: str,
1019
+ auth_config: dict[str, Any] | None,
1020
+ max_retries: int = 3,
1021
+ max_startup_checks: int = 40,
1022
+ startup_delay: float = 2.0,
1023
+ *,
1024
+ legacy_sse_url: str | None = None,
1025
+ ) -> None:
1026
+ """Internal method to start an MCP Composer instance.
1027
+
1028
+ Args:
1029
+ project_id: The project ID
1030
+ streamable_http_url: Streamable HTTP endpoint for the remote Langflow MCP server
1031
+ auth_config: Authentication configuration
1032
+ max_retries: Maximum number of retry attempts (default: 3)
1033
+ max_startup_checks: Number of checks per retry attempt (default: 40)
1034
+ startup_delay: Delay between checks in seconds (default: 2.0)
1035
+ legacy_sse_url: Optional legacy SSE URL used for backward compatibility
1036
+
1037
+ Raises:
1038
+ MCPComposerError: Various specific errors if startup fails
1039
+ """
1040
+ legacy_sse_url = legacy_sse_url or f"{streamable_http_url.rstrip('/')}/sse"
332
1041
  if not auth_config:
333
1042
  no_auth_error_msg = "No auth settings provided"
334
1043
  raise MCPComposerConfigError(no_auth_error_msg, project_id)
@@ -366,11 +1075,13 @@ class MCPComposerService(Service):
366
1075
  composer_info = self.project_composers[project_id]
367
1076
  process = composer_info.get("process")
368
1077
  existing_auth = composer_info.get("auth_config", {})
1078
+ existing_port = composer_info.get("port")
369
1079
 
370
1080
  # Check if process is still running
371
1081
  if process and process.poll() is None:
372
1082
  # Process is running - only restart if config changed
373
1083
  auth_changed = self._has_auth_config_changed(existing_auth, auth_config)
1084
+
374
1085
  if auth_changed:
375
1086
  await logger.adebug(f"Config changed for project {project_id}, restarting MCP Composer")
376
1087
  await self._do_stop_project_composer(project_id)
@@ -383,48 +1094,181 @@ class MCPComposerService(Service):
383
1094
  # Process died or never started properly, restart it
384
1095
  await logger.adebug(f"MCP Composer process died for project {project_id}, restarting")
385
1096
  await self._do_stop_project_composer(project_id)
1097
+ # Also kill any process that might be using the old port
1098
+ if existing_port:
1099
+ try:
1100
+ await asyncio.wait_for(self._kill_process_on_port(existing_port), timeout=5.0)
1101
+ except asyncio.TimeoutError:
1102
+ await logger.aerror(f"Timeout while killing process on port {existing_port}")
386
1103
 
387
- is_port_available = self._is_port_available(project_port)
388
- if not is_port_available:
389
- await logger.awarning(f"Port {project_port} is already in use.")
390
- port_error_msg = f"Port {project_port} is already in use"
391
- raise MCPComposerPortError(port_error_msg)
1104
+ # Retry loop: try starting the process multiple times
1105
+ last_error = None
1106
+ try:
1107
+ # Before first attempt, try to kill any zombie MCP Composer processes
1108
+ # This is a best-effort operation - don't fail startup if it errors
1109
+ try:
1110
+ await logger.adebug(
1111
+ f"Checking for zombie MCP Composer processes on port {project_port} before startup..."
1112
+ )
1113
+ zombies_killed = await self._kill_zombie_mcp_processes(project_port)
1114
+ if zombies_killed:
1115
+ await logger.adebug(f"Killed zombie processes, port {project_port} should now be free")
1116
+ except Exception as zombie_error: # noqa: BLE001
1117
+ # Log but continue - zombie cleanup is optional
1118
+ await logger.awarning(
1119
+ f"Failed to check/kill zombie processes (non-fatal): {zombie_error}. Continuing with startup..."
1120
+ )
392
1121
 
393
- # Start the MCP Composer process (single attempt, no outer retry loop)
394
- process = await self._start_project_composer_process(
395
- project_id, project_host, project_port, sse_url, auth_config, max_startup_checks, startup_delay
396
- )
397
- self.project_composers[project_id] = {
398
- "process": process,
399
- "host": project_host,
400
- "port": project_port,
401
- "sse_url": sse_url,
402
- "auth_config": auth_config,
403
- }
1122
+ # Ensure port is available (only kill untracked processes)
1123
+ try:
1124
+ await self._ensure_port_available(project_port, project_id)
1125
+ except (MCPComposerPortError, MCPComposerConfigError) as e:
1126
+ # Port/config error before starting - store and raise immediately (no retries)
1127
+ self._last_errors[project_id] = e.message
1128
+ raise
1129
+ for retry_attempt in range(1, max_retries + 1):
1130
+ try:
1131
+ await logger.adebug(
1132
+ f"Starting MCP Composer for project {project_id} (attempt {retry_attempt}/{max_retries})"
1133
+ )
404
1134
 
405
- await logger.adebug(
406
- f"MCP Composer started for project {project_id} on port {project_port} (PID: {process.pid})"
407
- )
1135
+ # Re-check port availability before each attempt to prevent race conditions
1136
+ if retry_attempt > 1:
1137
+ await logger.adebug(f"Re-checking port {project_port} availability before retry...")
1138
+ await self._ensure_port_available(project_port, project_id)
1139
+
1140
+ process = await self._start_project_composer_process(
1141
+ project_id,
1142
+ project_host,
1143
+ project_port,
1144
+ streamable_http_url,
1145
+ auth_config,
1146
+ max_startup_checks,
1147
+ startup_delay,
1148
+ legacy_sse_url=legacy_sse_url,
1149
+ )
1150
+
1151
+ except MCPComposerError as e:
1152
+ last_error = e
1153
+ await logger.aerror(
1154
+ f"MCP Composer startup attempt {retry_attempt}/{max_retries} failed "
1155
+ f"for project {project_id}: {e.message}"
1156
+ )
1157
+
1158
+ # For config/port errors, don't retry - fail immediately
1159
+ if isinstance(e, (MCPComposerConfigError, MCPComposerPortError)):
1160
+ await logger.aerror(
1161
+ f"Configuration or port error for project {project_id}, not retrying: {e.message}"
1162
+ )
1163
+ raise # Re-raise to exit retry loop immediately
1164
+
1165
+ # Clean up any partially started process before retrying
1166
+ if project_id in self.project_composers:
1167
+ await self._do_stop_project_composer(project_id)
1168
+
1169
+ # If not the last attempt, wait and try to clean up zombie processes
1170
+ if retry_attempt < max_retries:
1171
+ await logger.adebug(f"Waiting 2 seconds before retry attempt {retry_attempt + 1}...")
1172
+ await asyncio.sleep(2)
1173
+
1174
+ # On Windows, try to kill any zombie MCP Composer processes for this port
1175
+ # This is a best-effort operation - don't fail retry if it errors
1176
+ try:
1177
+ msg = f"Checking for zombie MCP Composer processes on port {project_port}"
1178
+ await logger.adebug(msg)
1179
+ zombies_killed = await self._kill_zombie_mcp_processes(project_port)
1180
+ if zombies_killed:
1181
+ await logger.adebug(f"Killed zombie processes, port {project_port} should be free")
1182
+ except Exception as retry_zombie_error: # noqa: BLE001
1183
+ # Log but continue - zombie cleanup is optional
1184
+ msg = f"Failed to check/kill zombie processes during retry: {retry_zombie_error}"
1185
+ await logger.awarning(msg)
1186
+
1187
+ else:
1188
+ # Success! Store the composer info and register the port and PID
1189
+ self.project_composers[project_id] = {
1190
+ "process": process,
1191
+ "host": project_host,
1192
+ "port": project_port,
1193
+ "streamable_http_url": streamable_http_url,
1194
+ "legacy_sse_url": legacy_sse_url,
1195
+ "sse_url": legacy_sse_url,
1196
+ "auth_config": auth_config,
1197
+ }
1198
+ self._port_to_project[project_port] = project_id
1199
+ self._pid_to_project[process.pid] = project_id
1200
+ # Clear any previous error on success
1201
+ self.clear_last_error(project_id)
1202
+
1203
+ await logger.adebug(
1204
+ f"MCP Composer started for project {project_id} on port {project_port} "
1205
+ f"(PID: {process.pid}) after {retry_attempt} attempt(s)"
1206
+ )
1207
+ return # Success!
1208
+
1209
+ # All retries failed, raise the last error
1210
+ if last_error:
1211
+ await logger.aerror(
1212
+ f"MCP Composer failed to start for project {project_id} after {max_retries} attempts"
1213
+ )
1214
+ # Store the error message for later retrieval
1215
+ self._last_errors[project_id] = last_error.message
1216
+ raise last_error
1217
+
1218
+ except asyncio.CancelledError:
1219
+ # Operation was cancelled, clean up any started process
1220
+ await logger.adebug(f"MCP Composer start operation for project {project_id} was cancelled")
1221
+ if project_id in self.project_composers:
1222
+ await self._do_stop_project_composer(project_id)
1223
+ raise # Re-raise to propagate cancellation
408
1224
 
409
1225
  async def _start_project_composer_process(
410
1226
  self,
411
1227
  project_id: str,
412
1228
  host: str,
413
1229
  port: int,
414
- sse_url: str,
1230
+ streamable_http_url: str,
415
1231
  auth_config: dict[str, Any] | None = None,
416
- max_startup_checks: int = 5,
1232
+ max_startup_checks: int = 40,
417
1233
  startup_delay: float = 2.0,
1234
+ *,
1235
+ legacy_sse_url: str | None = None,
418
1236
  ) -> subprocess.Popen:
419
- """Start the MCP Composer subprocess for a specific project."""
1237
+ """Start the MCP Composer subprocess for a specific project.
1238
+
1239
+ Args:
1240
+ project_id: The project ID
1241
+ host: Host to bind to
1242
+ port: Port to bind to
1243
+ streamable_http_url: Streamable HTTP endpoint to connect to
1244
+ auth_config: Authentication configuration
1245
+ max_startup_checks: Number of port binding checks (default: 40)
1246
+ startup_delay: Delay between checks in seconds (default: 2.0)
1247
+ legacy_sse_url: Optional legacy SSE URL used for backward compatibility when required by tooling
1248
+
1249
+ Returns:
1250
+ The started subprocess
1251
+
1252
+ Raises:
1253
+ MCPComposerStartupError: If startup fails
1254
+ """
420
1255
  settings = get_settings_service().settings
1256
+ # Some composer tooling still uses the --sse-url flag for backwards compatibility even in HTTP mode.
1257
+ effective_legacy_sse_url = legacy_sse_url or f"{streamable_http_url.rstrip('/')}/sse"
1258
+
421
1259
  cmd = [
422
1260
  "uvx",
423
1261
  f"mcp-composer{settings.mcp_composer_version}",
1262
+ "--port",
1263
+ str(port),
1264
+ "--host",
1265
+ host,
424
1266
  "--mode",
425
- "sse",
1267
+ "http",
1268
+ "--endpoint",
1269
+ streamable_http_url,
426
1270
  "--sse-url",
427
- sse_url,
1271
+ effective_legacy_sse_url,
428
1272
  "--disable-composer-tools",
429
1273
  ]
430
1274
 
@@ -442,6 +1286,8 @@ class MCPComposerService(Service):
442
1286
  cmd.extend(["--env", "ENABLE_OAUTH", "True"])
443
1287
 
444
1288
  # Map auth config to environment variables for OAuth
1289
+ # Note: oauth_host and oauth_port are passed both via --host/--port CLI args
1290
+ # (for server binding) and as environment variables (for OAuth flow)
445
1291
  oauth_env_mapping = {
446
1292
  "oauth_host": "OAUTH_HOST",
447
1293
  "oauth_port": "OAUTH_PORT",
@@ -462,68 +1308,102 @@ class MCPComposerService(Service):
462
1308
  if value is not None and str(value).strip():
463
1309
  cmd.extend(["--env", env_key, str(value)])
464
1310
 
1311
+ # Log the command being executed (with secrets obfuscated)
1312
+ safe_cmd = self._obfuscate_command_secrets(cmd)
1313
+ await logger.adebug(f"Starting MCP Composer with command: {' '.join(safe_cmd)}")
1314
+
465
1315
  # Start the subprocess with both stdout and stderr captured
466
- process = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) # noqa: ASYNC220, S603
1316
+ # On Windows, use temp files to avoid pipe buffering issues that can cause process to hang
1317
+ stdout_handle: int | typing.IO[bytes] = subprocess.PIPE
1318
+ stderr_handle: int | typing.IO[bytes] = subprocess.PIPE
1319
+ stdout_file = None
1320
+ stderr_file = None
1321
+
1322
+ if platform.system() == "Windows":
1323
+ # Create temp files for stdout/stderr on Windows to avoid pipe deadlocks
1324
+ # Note: We intentionally don't use context manager as we need files to persist
1325
+ # for the subprocess and be cleaned up manually later
1326
+ stdout_file = tempfile.NamedTemporaryFile( # noqa: SIM115
1327
+ mode="w+b", delete=False, prefix=f"mcp_composer_{project_id}_stdout_", suffix=".log"
1328
+ )
1329
+ stderr_file = tempfile.NamedTemporaryFile( # noqa: SIM115
1330
+ mode="w+b", delete=False, prefix=f"mcp_composer_{project_id}_stderr_", suffix=".log"
1331
+ )
1332
+ stdout_handle = stdout_file
1333
+ stderr_handle = stderr_file
1334
+ stdout_name = stdout_file.name
1335
+ stderr_name = stderr_file.name
1336
+ await logger.adebug(f"Using temp files for MCP Composer logs: stdout={stdout_name}, stderr={stderr_name}")
1337
+
1338
+ process = subprocess.Popen(cmd, env=env, stdout=stdout_handle, stderr=stderr_handle) # noqa: ASYNC220, S603
467
1339
 
468
1340
  # Monitor the process startup with multiple checks
469
1341
  process_running = False
470
1342
  port_bound = False
471
1343
 
472
- await logger.adebug(f"Monitoring MCP Composer startup for project {project_id} (PID: {process.pid})")
1344
+ await logger.adebug(
1345
+ f"MCP Composer process started with PID {process.pid}, monitoring startup for project {project_id}..."
1346
+ )
473
1347
 
474
- for check in range(max_startup_checks):
475
- await asyncio.sleep(startup_delay)
1348
+ try:
1349
+ for check in range(max_startup_checks):
1350
+ await asyncio.sleep(startup_delay)
476
1351
 
477
- # Check if process is still running
478
- poll_result = process.poll()
1352
+ # Check if process is still running
1353
+ poll_result = process.poll()
479
1354
 
480
- startup_error_msg = None
481
- if poll_result is not None:
482
- # Process terminated, get the error output
483
- await logger.aerror(f"MCP Composer process {process.pid} terminated with exit code: {poll_result}")
484
- try:
485
- stdout_content, stderr_content = process.communicate(timeout=2)
486
- # Log the full error details for debugging
487
- await logger.aerror(f"MCP Composer startup failed for project {project_id}")
488
- await logger.aerror(f"MCP Composer stdout:\n{stdout_content}")
489
- await logger.aerror(f"MCP Composer stderr:\n{stderr_content}")
490
- safe_cmd = self._obfuscate_command_secrets(cmd)
491
- await logger.aerror(f"Command that failed: {' '.join(safe_cmd)}")
492
-
493
- # Extract meaningful error message
494
- startup_error_msg = self._extract_error_message(stdout_content, stderr_content, oauth_server_url)
495
- raise MCPComposerStartupError(startup_error_msg, project_id)
496
- except subprocess.TimeoutExpired:
497
- process.kill()
498
- await logger.aerror(
499
- f"MCP Composer process {process.pid} terminated unexpectedly for project {project_id}"
1355
+ startup_error_msg = None
1356
+ if poll_result is not None:
1357
+ # Process terminated, get the error output
1358
+ (
1359
+ stdout_content,
1360
+ stderr_content,
1361
+ startup_error_msg,
1362
+ ) = await self._read_process_output_and_extract_error(
1363
+ process, oauth_server_url, stdout_file=stdout_file, stderr_file=stderr_file
500
1364
  )
501
- startup_error_msg = self._extract_error_message("", "", oauth_server_url)
502
- raise MCPComposerStartupError(startup_error_msg, project_id) from None
1365
+ await self._log_startup_error_details(
1366
+ project_id, cmd, host, port, stdout_content, stderr_content, startup_error_msg, poll_result
1367
+ )
1368
+ raise MCPComposerStartupError(startup_error_msg, project_id)
503
1369
 
504
- # Process is still running, check if port is bound
505
- port_bound = not self._is_port_available(port)
1370
+ # Process is still running, check if port is bound
1371
+ port_bound = not self._is_port_available(port)
506
1372
 
507
- if port_bound:
1373
+ if port_bound:
1374
+ await logger.adebug(
1375
+ f"MCP Composer for project {project_id} bound to port {port} "
1376
+ f"(check {check + 1}/{max_startup_checks})"
1377
+ )
1378
+ process_running = True
1379
+ break
508
1380
  await logger.adebug(
509
- f"MCP Composer for project {project_id} bound to port {port} "
1381
+ f"MCP Composer for project {project_id} not yet bound to port {port} "
510
1382
  f"(check {check + 1}/{max_startup_checks})"
511
1383
  )
512
- process_running = True
513
- break
1384
+
1385
+ # Try to read any available stderr/stdout without blocking to see what's happening
1386
+ await self._read_stream_non_blocking(process.stderr, "stderr")
1387
+ await self._read_stream_non_blocking(process.stdout, "stdout")
1388
+
1389
+ except asyncio.CancelledError:
1390
+ # Operation was cancelled, kill the process and cleanup
514
1391
  await logger.adebug(
515
- f"MCP Composer for project {project_id} not yet bound to port {port} "
516
- f"(check {check + 1}/{max_startup_checks})"
1392
+ f"MCP Composer process startup cancelled for project {project_id}, terminating process {process.pid}"
517
1393
  )
518
-
519
- # Try to read any available stderr without blocking (only log if there's an error)
520
- if process.stderr and select.select([process.stderr], [], [], 0)[0]:
1394
+ try:
1395
+ process.terminate()
1396
+ # Wait for graceful termination with timeout
521
1397
  try:
522
- stderr_line = process.stderr.readline()
523
- if stderr_line and "ERROR" in stderr_line:
524
- await logger.aerror(f"MCP Composer error: {stderr_line.strip()}")
525
- except Exception: # noqa: BLE001
526
- pass
1398
+ await asyncio.wait_for(asyncio.to_thread(process.wait), timeout=2.0)
1399
+ except asyncio.TimeoutError:
1400
+ # Force kill if graceful termination times out
1401
+ await logger.adebug(f"Process {process.pid} did not terminate gracefully, force killing")
1402
+ await asyncio.to_thread(process.kill)
1403
+ await asyncio.to_thread(process.wait)
1404
+ except Exception as e: # noqa: BLE001
1405
+ await logger.adebug(f"Error terminating process during cancellation: {e}")
1406
+ raise # Re-raise to propagate cancellation
527
1407
 
528
1408
  # After all checks
529
1409
  if not process_running or not port_bound:
@@ -532,56 +1412,43 @@ class MCPComposerService(Service):
532
1412
 
533
1413
  if poll_result is not None:
534
1414
  # Process died
535
- startup_error_msg = None
536
- try:
537
- stdout_content, stderr_content = process.communicate(timeout=2)
538
- # Extract meaningful error message
539
- startup_error_msg = self._extract_error_message(stdout_content, stderr_content, oauth_server_url)
540
- await logger.aerror(f"MCP Composer startup failed for project {project_id}:")
541
- await logger.aerror(f" - Process died with exit code: {poll_result}")
542
- await logger.aerror(f" - Target: {host}:{port}")
543
- # Obfuscate secrets in command before logging
544
- safe_cmd = self._obfuscate_command_secrets(cmd)
545
- await logger.aerror(f" - Command: {' '.join(safe_cmd)}")
546
- if stderr_content.strip():
547
- await logger.aerror(f" - Error output: {stderr_content.strip()}")
548
- if stdout_content.strip():
549
- await logger.aerror(f" - Standard output: {stdout_content.strip()}")
550
- await logger.aerror(f" - Error message: {startup_error_msg}")
551
- except subprocess.TimeoutExpired:
552
- await logger.aerror(f"MCP Composer for project {project_id} died but couldn't read output")
553
- process.kill()
554
-
1415
+ stdout_content, stderr_content, startup_error_msg = await self._read_process_output_and_extract_error(
1416
+ process, oauth_server_url, stdout_file=stdout_file, stderr_file=stderr_file
1417
+ )
1418
+ await self._log_startup_error_details(
1419
+ project_id, cmd, host, port, stdout_content, stderr_content, startup_error_msg, poll_result
1420
+ )
555
1421
  raise MCPComposerStartupError(startup_error_msg, project_id)
556
1422
  # Process running but port not bound
557
- await logger.aerror(f"MCP Composer startup failed for project {project_id}:")
558
- await logger.aerror(f" - Process is running (PID: {process.pid}) but failed to bind to port {port}")
559
1423
  await logger.aerror(
560
1424
  f" - Checked {max_startup_checks} times over {max_startup_checks * startup_delay} seconds"
561
1425
  )
562
- await logger.aerror(f" - Target: {host}:{port}")
563
1426
 
564
1427
  # Get any available output before terminating
565
- startup_error_msg = None
566
- try:
567
- process.terminate()
568
- stdout_content, stderr_content = process.communicate(timeout=2)
569
- startup_error_msg = self._extract_error_message(stdout_content, stderr_content, oauth_server_url)
570
- if stderr_content.strip():
571
- await logger.aerror(f" - Process stderr: {stderr_content.strip()}")
572
- if stdout_content.strip():
573
- await logger.aerror(f" - Process stdout: {stdout_content.strip()}")
574
- except Exception: # noqa: BLE001
575
- process.kill()
576
- await logger.aerror(" - Could not retrieve process output before termination")
577
-
1428
+ process.terminate()
1429
+ stdout_content, stderr_content, startup_error_msg = await self._read_process_output_and_extract_error(
1430
+ process, oauth_server_url, stdout_file=stdout_file, stderr_file=stderr_file
1431
+ )
1432
+ await self._log_startup_error_details(
1433
+ project_id, cmd, host, port, stdout_content, stderr_content, startup_error_msg, pid=process.pid
1434
+ )
578
1435
  raise MCPComposerStartupError(startup_error_msg, project_id)
579
1436
 
580
- # Close the pipes if everything is successful
581
- if process.stdout:
582
- process.stdout.close()
583
- if process.stderr:
584
- process.stderr.close()
1437
+ # Close the pipes/files if everything is successful
1438
+ if stdout_file and stderr_file:
1439
+ # Clean up temp files on success
1440
+ try:
1441
+ stdout_file.close()
1442
+ stderr_file.close()
1443
+ Path(stdout_file.name).unlink()
1444
+ Path(stderr_file.name).unlink()
1445
+ except Exception as e: # noqa: BLE001
1446
+ await logger.adebug(f"Error cleaning up temp files on success: {e}")
1447
+ else:
1448
+ if process.stdout:
1449
+ process.stdout.close()
1450
+ if process.stderr:
1451
+ process.stderr.close()
585
1452
 
586
1453
  return process
587
1454