mcp-mesh 0.6.0__py3-none-any.whl → 0.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. _mcp_mesh/__init__.py +1 -1
  2. _mcp_mesh/engine/decorator_registry.py +26 -2
  3. _mcp_mesh/engine/dependency_injector.py +14 -1
  4. _mcp_mesh/engine/llm_config.py +11 -7
  5. _mcp_mesh/engine/mesh_llm_agent.py +247 -61
  6. _mcp_mesh/engine/mesh_llm_agent_injector.py +174 -0
  7. _mcp_mesh/engine/provider_handlers/__init__.py +20 -0
  8. _mcp_mesh/engine/provider_handlers/base_provider_handler.py +122 -0
  9. _mcp_mesh/engine/provider_handlers/claude_handler.py +138 -0
  10. _mcp_mesh/engine/provider_handlers/generic_handler.py +156 -0
  11. _mcp_mesh/engine/provider_handlers/openai_handler.py +163 -0
  12. _mcp_mesh/engine/provider_handlers/provider_handler_registry.py +167 -0
  13. _mcp_mesh/engine/response_parser.py +3 -38
  14. _mcp_mesh/engine/tool_schema_builder.py +3 -2
  15. _mcp_mesh/generated/.openapi-generator/FILES +3 -0
  16. _mcp_mesh/generated/.openapi-generator-ignore +0 -1
  17. _mcp_mesh/generated/mcp_mesh_registry_client/__init__.py +51 -97
  18. _mcp_mesh/generated/mcp_mesh_registry_client/models/__init__.py +42 -72
  19. _mcp_mesh/generated/mcp_mesh_registry_client/models/agent_info.py +11 -1
  20. _mcp_mesh/generated/mcp_mesh_registry_client/models/dependency_resolution_info.py +108 -0
  21. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_provider.py +95 -0
  22. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter.py +37 -58
  23. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner.py +32 -63
  24. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner_one_of.py +30 -29
  25. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_info.py +41 -59
  26. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_agent_registration.py +51 -98
  27. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_registration_response.py +70 -85
  28. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_tool_registration.py +51 -84
  29. _mcp_mesh/generated/mcp_mesh_registry_client/models/resolved_llm_provider.py +112 -0
  30. _mcp_mesh/pipeline/api_heartbeat/api_dependency_resolution.py +54 -21
  31. _mcp_mesh/pipeline/mcp_heartbeat/dependency_resolution.py +43 -26
  32. _mcp_mesh/pipeline/mcp_heartbeat/fast_heartbeat_check.py +3 -3
  33. _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_orchestrator.py +35 -10
  34. _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_pipeline.py +1 -1
  35. _mcp_mesh/pipeline/mcp_heartbeat/llm_tools_resolution.py +77 -39
  36. _mcp_mesh/pipeline/mcp_startup/fastapiserver_setup.py +118 -35
  37. _mcp_mesh/pipeline/mcp_startup/fastmcpserver_discovery.py +1 -1
  38. _mcp_mesh/pipeline/mcp_startup/heartbeat_preparation.py +48 -3
  39. _mcp_mesh/pipeline/mcp_startup/server_discovery.py +77 -48
  40. _mcp_mesh/pipeline/mcp_startup/startup_orchestrator.py +2 -2
  41. _mcp_mesh/shared/health_check_cache.py +246 -0
  42. _mcp_mesh/shared/registry_client_wrapper.py +29 -2
  43. {mcp_mesh-0.6.0.dist-info → mcp_mesh-0.6.2.dist-info}/METADATA +1 -1
  44. {mcp_mesh-0.6.0.dist-info → mcp_mesh-0.6.2.dist-info}/RECORD +50 -39
  45. mesh/__init__.py +12 -2
  46. mesh/decorators.py +105 -39
  47. mesh/helpers.py +259 -0
  48. mesh/types.py +53 -4
  49. {mcp_mesh-0.6.0.dist-info → mcp_mesh-0.6.2.dist-info}/WHEEL +0 -0
  50. {mcp_mesh-0.6.0.dist-info → mcp_mesh-0.6.2.dist-info}/licenses/LICENSE +0 -0
@@ -255,12 +255,14 @@ class DependencyResolutionStep(PipelineStep):
255
255
  # Get current agent ID for self-dependency detection
256
256
  import os
257
257
 
258
- from ...engine.self_dependency_proxy import SelfDependencyProxy
258
+ from ...engine.self_dependency_proxy import \
259
+ SelfDependencyProxy
259
260
 
260
261
  # Get current agent ID from DecoratorRegistry (single source of truth)
261
262
  current_agent_id = None
262
263
  try:
263
- from ...engine.decorator_registry import DecoratorRegistry
264
+ from ...engine.decorator_registry import \
265
+ DecoratorRegistry
264
266
 
265
267
  config = DecoratorRegistry.get_resolved_agent_config()
266
268
  current_agent_id = config["agent_id"]
@@ -293,36 +295,51 @@ class DependencyResolutionStep(PipelineStep):
293
295
  )
294
296
 
295
297
  if is_self_dependency:
296
- # Create self-dependency proxy with cached function reference
297
- original_func = injector.find_original_function(
298
- dep_function_name
299
- )
300
- if original_func:
301
- new_proxy = SelfDependencyProxy(
302
- original_func, dep_function_name
298
+ # Create self-dependency proxy with WRAPPER function (not original)
299
+ # The wrapper has dependency injection logic, so calling it ensures
300
+ # the target function's dependencies are also injected properly.
301
+ wrapper_func = None
302
+ if dep_function_name in mesh_tools:
303
+ wrapper_func = mesh_tools[dep_function_name].function
304
+ self.logger.debug(
305
+ f"🔍 Found wrapper for '{dep_function_name}' in DecoratorRegistry"
303
306
  )
304
- self.logger.warning(
305
- f"⚠️ SELF-DEPENDENCY: Using direct function call for '{capability}' "
306
- f"instead of HTTP to avoid deadlock. Consider refactoring to "
307
- f"eliminate self-dependencies if possible."
307
+
308
+ if wrapper_func:
309
+ new_proxy = SelfDependencyProxy(
310
+ wrapper_func, dep_function_name
308
311
  )
309
312
  self.logger.info(
310
- f"🔄 Updated to SelfDependencyProxy: '{capability}'"
313
+ f"🔄 SELF-DEPENDENCY: Using wrapper for '{capability}' "
314
+ f"(local call with full DI support)"
311
315
  )
312
316
  else:
313
- self.logger.error(
314
- f"❌ Cannot create SelfDependencyProxy for '{capability}': "
315
- f"original function '{dep_function_name}' not found, falling back to HTTP"
316
- )
317
- # Use unified proxy for fallback
318
- new_proxy = EnhancedUnifiedMCPProxy(
319
- endpoint,
320
- dep_function_name,
321
- kwargs_config=kwargs_config,
322
- )
323
- self.logger.debug(
324
- f"🔧 Created EnhancedUnifiedMCPProxy (fallback): {kwargs_config}"
317
+ # Fallback to original function if wrapper not found
318
+ original_func = injector.find_original_function(
319
+ dep_function_name
325
320
  )
321
+ if original_func:
322
+ new_proxy = SelfDependencyProxy(
323
+ original_func, dep_function_name
324
+ )
325
+ self.logger.warning(
326
+ f"⚠️ SELF-DEPENDENCY: Using original function for '{capability}' "
327
+ f"(wrapper not found, DI may not work for nested deps)"
328
+ )
329
+ else:
330
+ self.logger.error(
331
+ f"❌ Cannot create SelfDependencyProxy for '{capability}': "
332
+ f"neither wrapper nor original function '{dep_function_name}' found, falling back to HTTP"
333
+ )
334
+ # Use unified proxy for fallback
335
+ new_proxy = EnhancedUnifiedMCPProxy(
336
+ endpoint,
337
+ dep_function_name,
338
+ kwargs_config=kwargs_config,
339
+ )
340
+ self.logger.debug(
341
+ f"🔧 Created EnhancedUnifiedMCPProxy (fallback): {kwargs_config}"
342
+ )
326
343
  else:
327
344
  # Create cross-service proxy using unified proxy
328
345
  new_proxy = EnhancedUnifiedMCPProxy(
@@ -72,15 +72,15 @@ class FastHeartbeatStep(PipelineStep):
72
72
 
73
73
  # Log status and action
74
74
  if status == FastHeartbeatStatus.NO_CHANGES:
75
- self.logger.info(
75
+ self.logger.debug(
76
76
  f"✅ Fast heartbeat: No changes detected for agent '{agent_id}'"
77
77
  )
78
78
  elif status == FastHeartbeatStatus.TOPOLOGY_CHANGED:
79
- self.logger.info(
79
+ self.logger.debug(
80
80
  f"🔄 Fast heartbeat: Topology changed for agent '{agent_id}' - full refresh needed"
81
81
  )
82
82
  elif status == FastHeartbeatStatus.AGENT_UNKNOWN:
83
- self.logger.info(
83
+ self.logger.debug(
84
84
  f"❓ Fast heartbeat: Agent '{agent_id}' unknown - re-registration needed"
85
85
  )
86
86
  elif status == FastHeartbeatStatus.REGISTRY_ERROR:
@@ -8,7 +8,7 @@ context management and error handling.
8
8
  import json
9
9
  import logging
10
10
  from datetime import UTC, datetime
11
- from typing import Any, Dict, Optional
11
+ from typing import Any, Optional
12
12
 
13
13
  from ...shared.support_types import HealthStatus, HealthStatusType
14
14
  from .heartbeat_pipeline import HeartbeatPipeline
@@ -44,9 +44,8 @@ class HeartbeatOrchestrator:
44
44
 
45
45
  try:
46
46
 
47
-
48
47
  # Prepare heartbeat context with validation
49
- heartbeat_context = self._prepare_heartbeat_context(agent_id, context)
48
+ heartbeat_context = await self._prepare_heartbeat_context(agent_id, context)
50
49
 
51
50
  # Validate required context before proceeding
52
51
  if not self._validate_heartbeat_context(heartbeat_context):
@@ -55,6 +54,16 @@ class HeartbeatOrchestrator:
55
54
  )
56
55
  return False
57
56
 
57
+ # Check if health status is unhealthy - skip heartbeat if so
58
+ health_status = heartbeat_context.get("health_status")
59
+ if health_status and health_status.status == HealthStatusType.UNHEALTHY:
60
+ self.logger.warning(
61
+ f"⚠️ Heartbeat #{self._heartbeat_count} skipped for agent '{agent_id}': Health status is UNHEALTHY"
62
+ )
63
+ self.logger.warning(f" Health checks failed: {health_status.checks}")
64
+ self.logger.warning(f" Errors: {health_status.errors}")
65
+ return False
66
+
58
67
  # Log heartbeat request details for debugging
59
68
  self._log_heartbeat_request(heartbeat_context, self._heartbeat_count)
60
69
 
@@ -104,13 +113,13 @@ class HeartbeatOrchestrator:
104
113
  )
105
114
  return False
106
115
 
107
- def _prepare_heartbeat_context(
116
+ async def _prepare_heartbeat_context(
108
117
  self, agent_id: str, startup_context: dict[str, Any]
109
118
  ) -> dict[str, Any]:
110
119
  """Prepare context for heartbeat pipeline execution."""
111
120
 
112
121
  # Build health status from startup context
113
- health_status = self._build_health_status_from_context(
122
+ health_status = await self._build_health_status_from_context(
114
123
  startup_context, agent_id
115
124
  )
116
125
 
@@ -151,12 +160,30 @@ class HeartbeatOrchestrator:
151
160
 
152
161
  return True
153
162
 
154
- def _build_health_status_from_context(
163
+ async def _build_health_status_from_context(
155
164
  self, startup_context: dict[str, Any], agent_id: str
156
165
  ) -> HealthStatus:
157
- """Build health status object from startup context."""
166
+ """Build health status object from startup context with optional user health check."""
167
+
168
+ agent_config = startup_context.get("agent_config", {})
158
169
 
159
- # Get existing health status from context or build from current state
170
+ # Check if user provided a health_check function
171
+ health_check_fn = agent_config.get("health_check")
172
+ health_check_ttl = agent_config.get("health_check_ttl", 15)
173
+
174
+ # If health check is configured, use the cache
175
+ if health_check_fn:
176
+ from ...shared.health_check_cache import get_health_status_with_cache
177
+
178
+ return await get_health_status_with_cache(
179
+ agent_id=agent_id,
180
+ health_check_fn=health_check_fn,
181
+ agent_config=agent_config,
182
+ startup_context=startup_context,
183
+ ttl=health_check_ttl,
184
+ )
185
+
186
+ # No health check configured - use existing logic
160
187
  existing_health_status = startup_context.get("health_status")
161
188
 
162
189
  if existing_health_status:
@@ -166,8 +193,6 @@ class HeartbeatOrchestrator:
166
193
  return existing_health_status
167
194
 
168
195
  # Build minimal health status from context if none exists
169
- agent_config = startup_context.get("agent_config", {})
170
-
171
196
  return HealthStatus(
172
197
  agent_name=agent_id,
173
198
  status=HealthStatusType.HEALTHY,
@@ -189,7 +189,7 @@ class HeartbeatPipeline(MeshPipeline):
189
189
  # NO_CHANGES - skip for optimization
190
190
  should_execute_remaining = False
191
191
  reason = "optimization (no changes detected)"
192
- self.logger.info(
192
+ self.logger.debug(
193
193
  f"🚀 Skipping remaining steps for optimization: {reason}"
194
194
  )
195
195
  elif FastHeartbeatStatusUtil.should_skip_for_resilience(
@@ -56,28 +56,32 @@ class LLMToolsResolutionStep(PipelineStep):
56
56
  # Use hash-based change detection and processing logic
57
57
  await self.process_llm_tools_from_heartbeat(heartbeat_response)
58
58
 
59
- # Extract LLM tools count for context
59
+ # Extract LLM tools and providers count for context
60
60
  llm_tools = heartbeat_response.get("llm_tools", {})
61
+ llm_providers = heartbeat_response.get("llm_providers", {})
61
62
  function_count = len(llm_tools)
62
63
  tool_count = sum(
63
64
  len(tools) if isinstance(tools, list) else 0
64
65
  for tools in llm_tools.values()
65
66
  )
67
+ provider_count = len(llm_providers)
66
68
 
67
- # Store processed LLM tools info for context
69
+ # Store processed LLM tools and providers info for context
68
70
  result.add_context("llm_function_count", function_count)
69
71
  result.add_context("llm_tool_count", tool_count)
72
+ result.add_context("llm_provider_count", provider_count)
70
73
  result.add_context("llm_tools", llm_tools)
74
+ result.add_context("llm_providers", llm_providers)
71
75
 
72
- result.message = "LLM tools resolution completed (efficient hash-based)"
76
+ result.message = "LLM tools and providers resolution completed (efficient hash-based)"
73
77
 
74
- if function_count > 0:
78
+ if function_count > 0 or provider_count > 0:
75
79
  self.logger.info(
76
- f"🤖 LLM tools resolved: {function_count} functions, {tool_count} tools"
80
+ f"🤖 LLM state resolved: {function_count} functions, {tool_count} tools, {provider_count} providers"
77
81
  )
78
82
 
79
83
  self.logger.debug(
80
- "🤖 LLM tools resolution step completed using hash-based change detection"
84
+ "🤖 LLM tools and providers resolution step completed using hash-based change detection"
81
85
  )
82
86
 
83
87
  except Exception as e:
@@ -90,31 +94,48 @@ class LLMToolsResolutionStep(PipelineStep):
90
94
 
91
95
  def _extract_llm_tools_state(
92
96
  self, heartbeat_response: dict[str, Any]
93
- ) -> dict[str, list[dict[str, Any]]]:
94
- """Extract LLM tools state structure from heartbeat response.
97
+ ) -> dict[str, Any]:
98
+ """Extract LLM tools and providers state structure from heartbeat response.
95
99
 
96
100
  Preserves array structure and order from registry.
97
101
 
98
102
  Returns:
99
- {function_id: [{function_name, capability, endpoint, input_schema, ...}, ...]}
103
+ {
104
+ "llm_tools": {function_id: [{function_name, capability, endpoint, input_schema, ...}, ...]},
105
+ "llm_providers": {function_id: {name, endpoint, agent_id, capability, tags, ...}}
106
+ }
100
107
  """
101
108
  llm_tools = heartbeat_response.get("llm_tools", {})
109
+ llm_providers = heartbeat_response.get("llm_providers", {})
102
110
 
103
111
  if not isinstance(llm_tools, dict):
104
112
  self.logger.warning(f"llm_tools is not a dict, type={type(llm_tools)}")
105
- return {}
113
+ llm_tools = {}
106
114
 
107
- # Return as-is since registry already provides the correct structure
108
- # Filter out non-dict or non-list values for safety
109
- state = {}
115
+ if not isinstance(llm_providers, dict):
116
+ self.logger.warning(f"llm_providers is not a dict, type={type(llm_providers)}")
117
+ llm_providers = {}
118
+
119
+ # Build state with both llm_tools and llm_providers
120
+ # This ensures hash changes when EITHER tools OR providers change
121
+ state = {
122
+ "llm_tools": {},
123
+ "llm_providers": llm_providers # Include providers directly
124
+ }
125
+
126
+ # Filter out non-list values for llm_tools
110
127
  for function_id, tools in llm_tools.items():
111
128
  if isinstance(tools, list):
112
- state[function_id] = tools
129
+ state["llm_tools"][function_id] = tools
113
130
 
114
131
  return state
115
132
 
116
133
  def _hash_llm_tools_state(self, state: dict) -> str:
117
- """Create hash of LLM tools state structure."""
134
+ """Create hash of LLM tools and providers state structure.
135
+
136
+ This hash includes BOTH llm_tools and llm_providers to ensure
137
+ rewiring happens when either changes.
138
+ """
118
139
  import hashlib
119
140
 
120
141
  # Convert to sorted JSON string for consistent hashing
@@ -129,64 +150,71 @@ class LLMToolsResolutionStep(PipelineStep):
129
150
  ) -> None:
130
151
  """Process heartbeat response to update LLM agent injection.
131
152
 
132
- Uses hash-based comparison to efficiently detect when ANY LLM tools change
153
+ Uses hash-based comparison to efficiently detect when ANY LLM tools OR providers change
133
154
  and then updates ALL affected LLM agents in one operation.
134
155
 
135
156
  Resilience logic:
136
- - No response (connection error, 5xx) → Skip entirely (keep existing tools)
137
- - 2xx response with empty llm_tools → Clear all LLM tools
138
- - 2xx response with partial llm_tools → Update to match registry exactly
157
+ - No response (connection error, 5xx) → Skip entirely (keep existing state)
158
+ - 2xx response with empty llm_tools/llm_providers → Clear all LLM state
159
+ - 2xx response with partial llm_tools/llm_providers → Update to match registry exactly
160
+
161
+ The hash includes both llm_tools and llm_providers to ensure rewiring happens
162
+ when either changes (e.g., provider failover from Claude to OpenAI).
139
163
  """
140
164
  try:
141
165
  if not heartbeat_response:
142
166
  # No response from registry (connection error, timeout, 5xx)
143
- # → Skip entirely for resilience (keep existing LLM tools)
167
+ # → Skip entirely for resilience (keep existing LLM tools and providers)
144
168
  self.logger.debug(
145
- "No heartbeat response - skipping LLM tools processing for resilience"
169
+ "No heartbeat response - skipping LLM state processing for resilience"
146
170
  )
147
171
  return
148
172
 
149
- # Extract current LLM tools state
173
+ # Extract current LLM tools and providers state
150
174
  current_state = self._extract_llm_tools_state(heartbeat_response)
151
175
 
152
- # IMPORTANT: Empty state from successful response means "no LLM tools"
176
+ # IMPORTANT: Empty state from successful response means "no LLM tools or providers"
153
177
  # This is different from "no response" which means "keep existing for resilience"
154
178
 
155
- # Hash the current state (including empty state)
179
+ # Hash the current state (including both llm_tools and llm_providers)
156
180
  current_hash = self._hash_llm_tools_state(current_state)
157
181
 
158
182
  # Compare with previous state (use global variable)
159
183
  global _last_llm_tools_hash
160
184
  if current_hash == _last_llm_tools_hash:
161
185
  self.logger.debug(
162
- f"🔄 LLM tools state unchanged (hash: {current_hash}), skipping processing"
186
+ f"🔄 LLM state unchanged (hash: {current_hash}), skipping processing"
163
187
  )
164
188
  return
165
189
 
166
190
  # State changed - determine what changed
167
- function_count = len(current_state)
168
- total_tools = sum(len(tools) for tools in current_state.values())
191
+ llm_tools = current_state.get("llm_tools", {})
192
+ llm_providers = current_state.get("llm_providers", {})
193
+
194
+ function_count = len(llm_tools)
195
+ total_tools = sum(len(tools) for tools in llm_tools.values())
196
+ provider_count = len(llm_providers)
169
197
 
170
198
  if _last_llm_tools_hash is None:
171
- if function_count > 0:
199
+ if function_count > 0 or provider_count > 0:
172
200
  self.logger.info(
173
- f"🤖 Initial LLM tools state detected: {function_count} functions, {total_tools} tools"
201
+ f"🤖 Initial LLM state detected: {function_count} functions, {total_tools} tools, {provider_count} providers"
174
202
  )
175
203
  else:
176
204
  self.logger.info(
177
- "🤖 Initial LLM tools state detected: no LLM tools"
205
+ "🤖 Initial LLM state detected: no LLM tools or providers"
178
206
  )
179
207
  else:
180
208
  self.logger.info(
181
- f"🤖 LLM tools state changed (hash: {_last_llm_tools_hash} → {current_hash})"
209
+ f"🤖 LLM state changed (hash: {_last_llm_tools_hash} → {current_hash})"
182
210
  )
183
- if function_count > 0:
211
+ if function_count > 0 or provider_count > 0:
184
212
  self.logger.info(
185
- f"🤖 Updating LLM tools for {function_count} functions ({total_tools} total tools)"
213
+ f"🤖 Updating LLM state: {function_count} functions ({total_tools} tools), {provider_count} providers"
186
214
  )
187
215
  else:
188
216
  self.logger.info(
189
- "🤖 Registry reports no LLM tools - clearing all existing LLM tools"
217
+ "🤖 Registry reports no LLM tools or providers - clearing all existing state"
190
218
  )
191
219
 
192
220
  injector = get_global_injector()
@@ -197,22 +225,32 @@ class LLMToolsResolutionStep(PipelineStep):
197
225
  self.logger.debug(
198
226
  "🤖 Initial LLM tools processing - calling process_llm_tools()"
199
227
  )
200
- injector.process_llm_tools(current_state)
228
+ injector.process_llm_tools(llm_tools)
201
229
  else:
202
230
  # Update - use update_llm_tools
203
231
  self.logger.debug("🤖 LLM tools update - calling update_llm_tools()")
204
- injector.update_llm_tools(current_state)
232
+ injector.update_llm_tools(llm_tools)
233
+
234
+ # Process LLM providers (v0.6.1 mesh delegation)
235
+ # Now part of hash-based change detection, so this always runs when state changes
236
+ if llm_providers:
237
+ self.logger.info(
238
+ f"🔌 Processing LLM providers for {len(llm_providers)} functions"
239
+ )
240
+ injector.process_llm_providers(llm_providers)
241
+ else:
242
+ self.logger.debug("🔌 No llm_providers in current state")
205
243
 
206
244
  # Store new hash for next comparison (use global variable)
207
245
  _last_llm_tools_hash = current_hash
208
246
 
209
- if function_count > 0:
247
+ if function_count > 0 or provider_count > 0:
210
248
  self.logger.info(
211
- f"✅ Successfully processed LLM tools for {function_count} functions ({total_tools} tools, state hash: {current_hash})"
249
+ f"✅ Successfully processed LLM state: {function_count} functions ({total_tools} tools), {provider_count} providers (hash: {current_hash})"
212
250
  )
213
251
  else:
214
252
  self.logger.info(
215
- f"✅ LLM tools state synchronized (no tools, state hash: {current_hash})"
253
+ f"✅ LLM state synchronized (no tools or providers, hash: {current_hash})"
216
254
  )
217
255
 
218
256
  except Exception as e: