mcp-mesh 0.7.11__py3-none-any.whl → 0.7.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. _mcp_mesh/__init__.py +1 -1
  2. _mcp_mesh/engine/__init__.py +1 -22
  3. _mcp_mesh/engine/async_mcp_client.py +88 -25
  4. _mcp_mesh/engine/decorator_registry.py +10 -9
  5. _mcp_mesh/engine/dependency_injector.py +64 -53
  6. _mcp_mesh/engine/mesh_llm_agent.py +119 -5
  7. _mcp_mesh/engine/mesh_llm_agent_injector.py +30 -0
  8. _mcp_mesh/engine/session_aware_client.py +3 -3
  9. _mcp_mesh/engine/unified_mcp_proxy.py +82 -90
  10. _mcp_mesh/pipeline/api_heartbeat/api_dependency_resolution.py +0 -89
  11. _mcp_mesh/pipeline/api_heartbeat/api_fast_heartbeat_check.py +3 -3
  12. _mcp_mesh/pipeline/api_heartbeat/api_heartbeat_pipeline.py +30 -28
  13. _mcp_mesh/pipeline/mcp_heartbeat/dependency_resolution.py +16 -18
  14. _mcp_mesh/pipeline/mcp_heartbeat/fast_heartbeat_check.py +5 -5
  15. _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_orchestrator.py +3 -3
  16. _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_pipeline.py +6 -6
  17. _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_send.py +1 -1
  18. _mcp_mesh/pipeline/mcp_heartbeat/llm_tools_resolution.py +15 -11
  19. _mcp_mesh/pipeline/mcp_heartbeat/registry_connection.py +3 -3
  20. _mcp_mesh/pipeline/mcp_startup/fastapiserver_setup.py +37 -268
  21. _mcp_mesh/pipeline/mcp_startup/lifespan_factory.py +142 -0
  22. _mcp_mesh/pipeline/mcp_startup/startup_orchestrator.py +57 -93
  23. _mcp_mesh/pipeline/shared/registry_connection.py +1 -1
  24. _mcp_mesh/shared/health_check_manager.py +313 -0
  25. _mcp_mesh/shared/logging_config.py +205 -14
  26. _mcp_mesh/shared/registry_client_wrapper.py +8 -8
  27. _mcp_mesh/shared/sse_parser.py +19 -17
  28. _mcp_mesh/tracing/execution_tracer.py +26 -1
  29. _mcp_mesh/tracing/fastapi_tracing_middleware.py +3 -4
  30. _mcp_mesh/tracing/trace_context_helper.py +25 -6
  31. {mcp_mesh-0.7.11.dist-info → mcp_mesh-0.7.13.dist-info}/METADATA +1 -1
  32. {mcp_mesh-0.7.11.dist-info → mcp_mesh-0.7.13.dist-info}/RECORD +38 -39
  33. mesh/__init__.py +3 -1
  34. mesh/decorators.py +81 -43
  35. mesh/helpers.py +72 -4
  36. mesh/types.py +48 -4
  37. _mcp_mesh/engine/full_mcp_proxy.py +0 -641
  38. _mcp_mesh/engine/mcp_client_proxy.py +0 -457
  39. _mcp_mesh/shared/health_check_cache.py +0 -246
  40. {mcp_mesh-0.7.11.dist-info → mcp_mesh-0.7.13.dist-info}/WHEEL +0 -0
  41. {mcp_mesh-0.7.11.dist-info → mcp_mesh-0.7.13.dist-info}/licenses/LICENSE +0 -0
@@ -1,8 +1,8 @@
1
1
  """
2
2
  API heartbeat pipeline for FastAPI service health monitoring.
3
3
 
4
- Provides structured execution of API service heartbeat operations with proper
5
- error handling and logging. Runs periodically to maintain registry communication
4
+ Provides structured execution of API service heartbeat operations with proper
5
+ error handling and logging. Runs periodically to maintain registry communication
6
6
  and service health status for FastAPI applications using @mesh.route decorators.
7
7
  """
8
8
 
@@ -12,11 +12,11 @@ from typing import Any
12
12
  from ...shared.fast_heartbeat_status import FastHeartbeatStatus, FastHeartbeatStatusUtil
13
13
  from ..shared.mesh_pipeline import MeshPipeline
14
14
  from ..shared.pipeline_types import PipelineStatus
15
- from .api_registry_connection import APIRegistryConnectionStep
16
- from .api_health_check import APIHealthCheckStep
15
+ from .api_dependency_resolution import APIDependencyResolutionStep
17
16
  from .api_fast_heartbeat_check import APIFastHeartbeatStep
17
+ from .api_health_check import APIHealthCheckStep
18
18
  from .api_heartbeat_send import APIHeartbeatSendStep
19
- from .api_dependency_resolution import APIDependencyResolutionStep
19
+ from .api_registry_connection import APIRegistryConnectionStep
20
20
 
21
21
  logger = logging.getLogger(__name__)
22
22
 
@@ -50,23 +50,21 @@ class APIHeartbeatPipeline(MeshPipeline):
50
50
  """Setup the API heartbeat pipeline steps with fast optimization."""
51
51
  # API heartbeat steps with fast optimization pattern
52
52
  steps = [
53
- APIRegistryConnectionStep(), # Prepare registry communication
54
- APIHealthCheckStep(), # Check FastAPI app health status
55
- APIFastHeartbeatStep(), # Fast heartbeat check (HEAD request)
56
- APIHeartbeatSendStep(), # Conditional heartbeat send (POST request)
53
+ APIRegistryConnectionStep(), # Prepare registry communication
54
+ APIHealthCheckStep(), # Check FastAPI app health status
55
+ APIFastHeartbeatStep(), # Fast heartbeat check (HEAD request)
56
+ APIHeartbeatSendStep(), # Conditional heartbeat send (POST request)
57
57
  APIDependencyResolutionStep(), # Conditional dependency resolution
58
58
  ]
59
59
 
60
60
  self.add_steps(steps)
61
- self.logger.debug(f"API heartbeat pipeline configured with {len(steps)} steps")
62
-
61
+ self.logger.trace(f"API heartbeat pipeline configured with {len(steps)} steps")
62
+
63
63
  # Log the pipeline strategy
64
64
  self.logger.info(
65
- f"🌐 API Heartbeat Pipeline initialized: fast optimization for FastAPI apps"
66
- )
67
- self.logger.debug(
68
- f"📋 Pipeline steps: {[step.name for step in steps]}"
65
+ "🌐 API Heartbeat Pipeline initialized: fast optimization for FastAPI apps"
69
66
  )
67
+ self.logger.trace(f"📋 Pipeline steps: {[step.name for step in steps]}")
70
68
 
71
69
  async def execute_api_heartbeat_cycle(
72
70
  self, heartbeat_context: dict[str, Any]
@@ -75,13 +73,13 @@ class APIHeartbeatPipeline(MeshPipeline):
75
73
  Execute a complete API heartbeat cycle with fast optimization and enhanced error handling.
76
74
 
77
75
  Args:
78
- heartbeat_context: Context containing registry_wrapper, service_id,
76
+ heartbeat_context: Context containing registry_wrapper, service_id,
79
77
  health_status, fastapi_app, etc.
80
78
 
81
79
  Returns:
82
80
  PipelineResult with execution status and any context updates
83
81
  """
84
- self.logger.debug("Starting API heartbeat pipeline execution")
82
+ self.logger.trace("Starting API heartbeat pipeline execution")
85
83
 
86
84
  # Initialize pipeline context with heartbeat-specific data
87
85
  self.context.clear()
@@ -92,7 +90,7 @@ class APIHeartbeatPipeline(MeshPipeline):
92
90
  result = await self._execute_with_conditional_logic()
93
91
 
94
92
  if result.is_success():
95
- self.logger.debug("✅ API heartbeat pipeline completed successfully")
93
+ self.logger.trace("✅ API heartbeat pipeline completed successfully")
96
94
  elif result.status == PipelineStatus.PARTIAL:
97
95
  self.logger.warning(
98
96
  f"⚠️ API heartbeat pipeline completed partially: {result.message}"
@@ -113,7 +111,7 @@ class APIHeartbeatPipeline(MeshPipeline):
113
111
  except Exception as e:
114
112
  # Log detailed error information for debugging
115
113
  import traceback
116
-
114
+
117
115
  self.logger.error(
118
116
  f"❌ API heartbeat pipeline failed with exception: {e}\n"
119
117
  f"Context keys: {list(self.context.keys())}\n"
@@ -122,7 +120,7 @@ class APIHeartbeatPipeline(MeshPipeline):
122
120
 
123
121
  # Create failure result with detailed context
124
122
  from ..shared.pipeline_types import PipelineResult
125
-
123
+
126
124
  failure_result = PipelineResult(
127
125
  status=PipelineStatus.FAILED,
128
126
  message=f"API heartbeat pipeline exception: {str(e)[:200]}...",
@@ -138,7 +136,7 @@ class APIHeartbeatPipeline(MeshPipeline):
138
136
 
139
137
  Always executes:
140
138
  - APIRegistryConnectionStep
141
- - APIHealthCheckStep
139
+ - APIHealthCheckStep
142
140
  - APIFastHeartbeatStep
143
141
 
144
142
  Conditionally executes based on fast heartbeat status:
@@ -150,7 +148,7 @@ class APIHeartbeatPipeline(MeshPipeline):
150
148
  PipelineResult with execution status and context
151
149
  """
152
150
  from ..shared.pipeline_types import PipelineResult
153
-
151
+
154
152
  overall_result = PipelineResult(
155
153
  message="API heartbeat pipeline execution completed"
156
154
  )
@@ -170,7 +168,7 @@ class APIHeartbeatPipeline(MeshPipeline):
170
168
 
171
169
  # Execute mandatory steps
172
170
  for step in mandatory_steps:
173
- self.logger.debug(f"Executing mandatory step: {step.name}")
171
+ self.logger.trace(f"Executing mandatory step: {step.name}")
174
172
 
175
173
  step_result = await step.execute(self.context)
176
174
  executed_steps.append(step.name)
@@ -226,7 +224,9 @@ class APIHeartbeatPipeline(MeshPipeline):
226
224
  # TOPOLOGY_CHANGED, AGENT_UNKNOWN - execute full pipeline
227
225
  should_execute_remaining = True
228
226
  reason = "changes detected or re-registration needed"
229
- self.logger.info(f"🔄 API heartbeat: Executing remaining steps: {reason}")
227
+ self.logger.info(
228
+ f"🔄 API heartbeat: Executing remaining steps: {reason}"
229
+ )
230
230
  else:
231
231
  # Unknown status - fallback to full execution
232
232
  self.logger.warning(
@@ -238,7 +238,7 @@ class APIHeartbeatPipeline(MeshPipeline):
238
238
  # Execute or skip conditional steps based on decision
239
239
  if should_execute_remaining:
240
240
  for step in conditional_steps:
241
- self.logger.debug(f"Executing conditional step: {step.name}")
241
+ self.logger.trace(f"Executing conditional step: {step.name}")
242
242
 
243
243
  step_result = await step.execute(self.context)
244
244
  executed_steps.append(step.name)
@@ -269,7 +269,7 @@ class APIHeartbeatPipeline(MeshPipeline):
269
269
  # Mark skipped steps
270
270
  for step in conditional_steps:
271
271
  skipped_steps.append(step.name)
272
-
272
+
273
273
  # For skipped heartbeat due to NO_CHANGES, set success context
274
274
  if fast_heartbeat_status == FastHeartbeatStatus.NO_CHANGES:
275
275
  overall_result.add_context("heartbeat_success", True)
@@ -300,10 +300,12 @@ class APIHeartbeatPipeline(MeshPipeline):
300
300
  except Exception as e:
301
301
  # Handle unexpected exceptions
302
302
  overall_result.status = PipelineStatus.FAILED
303
- overall_result.message = f"API pipeline execution failed with exception: {e}"
303
+ overall_result.message = (
304
+ f"API pipeline execution failed with exception: {e}"
305
+ )
304
306
  overall_result.add_error(str(e))
305
307
  for key, value in self.context.items():
306
308
  overall_result.add_context(key, value)
307
309
 
308
310
  self.logger.error(f"❌ API conditional pipeline execution failed: {e}")
309
- return overall_result
311
+ return overall_result
@@ -34,7 +34,7 @@ class DependencyResolutionStep(PipelineStep):
34
34
 
35
35
  async def execute(self, context: dict[str, Any]) -> PipelineResult:
36
36
  """Process dependency resolution with hash-based change detection."""
37
- self.logger.debug("Processing dependency resolution...")
37
+ self.logger.trace("Processing dependency resolution...")
38
38
 
39
39
  result = PipelineResult(message="Dependency resolution processed")
40
40
 
@@ -68,7 +68,7 @@ class DependencyResolutionStep(PipelineStep):
68
68
  result.add_context("dependencies_resolved", dependencies_resolved)
69
69
 
70
70
  result.message = "Dependency resolution completed (efficient hash-based)"
71
- self.logger.debug(
71
+ self.logger.trace(
72
72
  "🔗 Dependency resolution step completed using hash-based change detection"
73
73
  )
74
74
 
@@ -147,7 +147,7 @@ class DependencyResolutionStep(PipelineStep):
147
147
  if not heartbeat_response:
148
148
  # No response from registry (connection error, timeout, 5xx)
149
149
  # → Skip entirely for resilience (keep existing dependencies)
150
- self.logger.debug(
150
+ self.logger.trace(
151
151
  "No heartbeat response - skipping rewiring for resilience"
152
152
  )
153
153
  return
@@ -164,7 +164,7 @@ class DependencyResolutionStep(PipelineStep):
164
164
  # Compare with previous state (use global variable)
165
165
  global _last_dependency_hash
166
166
  if current_hash == _last_dependency_hash:
167
- self.logger.debug(
167
+ self.logger.trace(
168
168
  f"🔄 Dependency state unchanged (hash: {current_hash}), skipping rewiring"
169
169
  )
170
170
  return
@@ -255,29 +255,27 @@ class DependencyResolutionStep(PipelineStep):
255
255
  # Get current agent ID for self-dependency detection
256
256
  import os
257
257
 
258
- from ...engine.self_dependency_proxy import \
259
- SelfDependencyProxy
258
+ from ...engine.self_dependency_proxy import SelfDependencyProxy
260
259
 
261
260
  # Get current agent ID from DecoratorRegistry (single source of truth)
262
261
  current_agent_id = None
263
262
  try:
264
- from ...engine.decorator_registry import \
265
- DecoratorRegistry
263
+ from ...engine.decorator_registry import DecoratorRegistry
266
264
 
267
265
  config = DecoratorRegistry.get_resolved_agent_config()
268
266
  current_agent_id = config["agent_id"]
269
- self.logger.debug(
267
+ self.logger.trace(
270
268
  f"🔍 Current agent ID from DecoratorRegistry: '{current_agent_id}'"
271
269
  )
272
270
  except Exception as e:
273
271
  # Fallback to environment variable
274
272
  current_agent_id = os.getenv("MCP_MESH_AGENT_ID")
275
- self.logger.debug(
273
+ self.logger.trace(
276
274
  f"🔍 Current agent ID from environment: '{current_agent_id}' (fallback due to: {e})"
277
275
  )
278
276
 
279
277
  target_agent_id = dep_info.get("agent_id")
280
- self.logger.debug(
278
+ self.logger.trace(
281
279
  f"🔍 Target agent ID from registry: '{target_agent_id}'"
282
280
  )
283
281
 
@@ -288,7 +286,7 @@ class DependencyResolutionStep(PipelineStep):
288
286
  and current_agent_id == target_agent_id
289
287
  )
290
288
 
291
- self.logger.debug(
289
+ self.logger.trace(
292
290
  f"🔍 Self-dependency check for '{capability}': "
293
291
  f"current='{current_agent_id}' vs target='{target_agent_id}' "
294
292
  f"→ {'SELF' if is_self_dependency else 'CROSS'}-dependency"
@@ -301,7 +299,7 @@ class DependencyResolutionStep(PipelineStep):
301
299
  wrapper_func = None
302
300
  if dep_function_name in mesh_tools:
303
301
  wrapper_func = mesh_tools[dep_function_name].function
304
- self.logger.debug(
302
+ self.logger.trace(
305
303
  f"🔍 Found wrapper for '{dep_function_name}' in DecoratorRegistry"
306
304
  )
307
305
 
@@ -309,7 +307,7 @@ class DependencyResolutionStep(PipelineStep):
309
307
  new_proxy = SelfDependencyProxy(
310
308
  wrapper_func, dep_function_name
311
309
  )
312
- self.logger.info(
310
+ self.logger.debug(
313
311
  f"🔄 SELF-DEPENDENCY: Using wrapper for '{capability}' "
314
312
  f"(local call with full DI support)"
315
313
  )
@@ -337,7 +335,7 @@ class DependencyResolutionStep(PipelineStep):
337
335
  dep_function_name,
338
336
  kwargs_config=kwargs_config,
339
337
  )
340
- self.logger.debug(
338
+ self.logger.trace(
341
339
  f"🔧 Created EnhancedUnifiedMCPProxy (fallback): {kwargs_config}"
342
340
  )
343
341
  else:
@@ -347,7 +345,7 @@ class DependencyResolutionStep(PipelineStep):
347
345
  dep_function_name,
348
346
  kwargs_config=kwargs_config,
349
347
  )
350
- self.logger.info(
348
+ self.logger.debug(
351
349
  f"🔄 Updated to EnhancedUnifiedMCPProxy: '{capability}' -> {endpoint}/{dep_function_name}, "
352
350
  f"timeout={kwargs_config.get('timeout', 30)}s, streaming={kwargs_config.get('streaming', False)}"
353
351
  )
@@ -356,12 +354,12 @@ class DependencyResolutionStep(PipelineStep):
356
354
  dep_key = f"{func_id}:dep_{dep_index}"
357
355
  await injector.register_dependency(dep_key, new_proxy)
358
356
  updated_count += 1
359
- self.logger.debug(
357
+ self.logger.trace(
360
358
  f"🔗 Registered dependency '{capability}' at position {dep_index} with key '{dep_key}' (func_id: {func_id})"
361
359
  )
362
360
  else:
363
361
  if status != "available":
364
- self.logger.debug(
362
+ self.logger.trace(
365
363
  f"⚠️ Dependency '{capability}' at position {dep_index} not available: {status}"
366
364
  )
367
365
  else:
@@ -41,7 +41,7 @@ class FastHeartbeatStep(PipelineStep):
41
41
  Returns:
42
42
  PipelineResult with fast_heartbeat_status in context
43
43
  """
44
- self.logger.debug("Starting fast heartbeat check...")
44
+ self.logger.trace("Starting fast heartbeat check...")
45
45
 
46
46
  result = PipelineResult(message="Fast heartbeat check completed")
47
47
 
@@ -56,7 +56,7 @@ class FastHeartbeatStep(PipelineStep):
56
56
  if not registry_wrapper:
57
57
  raise ValueError("registry_wrapper is required in context")
58
58
 
59
- self.logger.debug(
59
+ self.logger.trace(
60
60
  f"🚀 Performing fast heartbeat check for agent '{agent_id}'"
61
61
  )
62
62
 
@@ -72,15 +72,15 @@ class FastHeartbeatStep(PipelineStep):
72
72
 
73
73
  # Log status and action
74
74
  if status == FastHeartbeatStatus.NO_CHANGES:
75
- self.logger.debug(
75
+ self.logger.trace(
76
76
  f"✅ Fast heartbeat: No changes detected for agent '{agent_id}'"
77
77
  )
78
78
  elif status == FastHeartbeatStatus.TOPOLOGY_CHANGED:
79
- self.logger.debug(
79
+ self.logger.trace(
80
80
  f"🔄 Fast heartbeat: Topology changed for agent '{agent_id}' - full refresh needed"
81
81
  )
82
82
  elif status == FastHeartbeatStatus.AGENT_UNKNOWN:
83
- self.logger.debug(
83
+ self.logger.trace(
84
84
  f"❓ Fast heartbeat: Agent '{agent_id}' unknown - re-registration needed"
85
85
  )
86
86
  elif status == FastHeartbeatStatus.REGISTRY_ERROR:
@@ -68,7 +68,7 @@ class HeartbeatOrchestrator:
68
68
  self._log_heartbeat_request(heartbeat_context, self._heartbeat_count)
69
69
 
70
70
  # Execute heartbeat pipeline with timeout protection
71
- self.logger.info(
71
+ self.logger.trace(
72
72
  f"💓 Executing heartbeat #{self._heartbeat_count} for agent '{agent_id}'"
73
73
  )
74
74
 
@@ -173,7 +173,7 @@ class HeartbeatOrchestrator:
173
173
 
174
174
  # If health check is configured, use the cache
175
175
  if health_check_fn:
176
- from ...shared.health_check_cache import get_health_status_with_cache
176
+ from ...shared.health_check_manager import get_health_status_with_cache
177
177
 
178
178
  return await get_health_status_with_cache(
179
179
  agent_id=agent_id,
@@ -260,7 +260,7 @@ class HeartbeatOrchestrator:
260
260
  fast_heartbeat_status
261
261
  ):
262
262
  # Fast heartbeat optimization - no changes detected
263
- self.logger.info(
263
+ self.logger.debug(
264
264
  f"🚀 Heartbeat #{heartbeat_count} optimized for agent '{agent_id}' - no changes detected"
265
265
  )
266
266
  return True
@@ -51,7 +51,7 @@ class HeartbeatPipeline(MeshPipeline):
51
51
  ]
52
52
 
53
53
  self.add_steps(steps)
54
- self.logger.debug(f"Heartbeat pipeline configured with {len(steps)} steps")
54
+ self.logger.trace(f"Heartbeat pipeline configured with {len(steps)} steps")
55
55
 
56
56
  async def execute_heartbeat_cycle(
57
57
  self, heartbeat_context: dict[str, Any]
@@ -65,7 +65,7 @@ class HeartbeatPipeline(MeshPipeline):
65
65
  Returns:
66
66
  PipelineResult with execution status and any context updates
67
67
  """
68
- self.logger.debug("Starting heartbeat pipeline execution")
68
+ self.logger.trace("Starting heartbeat pipeline execution")
69
69
 
70
70
  # Initialize pipeline context with heartbeat-specific data
71
71
  self.context.clear()
@@ -76,7 +76,7 @@ class HeartbeatPipeline(MeshPipeline):
76
76
  result = await self._execute_with_conditional_logic()
77
77
 
78
78
  if result.is_success():
79
- self.logger.debug("✅ Heartbeat pipeline completed successfully")
79
+ self.logger.trace("✅ Heartbeat pipeline completed successfully")
80
80
  elif result.status == PipelineStatus.PARTIAL:
81
81
  self.logger.warning(
82
82
  f"⚠️ Heartbeat pipeline completed partially: {result.message}"
@@ -149,7 +149,7 @@ class HeartbeatPipeline(MeshPipeline):
149
149
 
150
150
  # Execute mandatory steps
151
151
  for step in mandatory_steps:
152
- self.logger.debug(f"Executing mandatory step: {step.name}")
152
+ self.logger.trace(f"Executing mandatory step: {step.name}")
153
153
 
154
154
  step_result = await step.execute(self.context)
155
155
  executed_steps.append(step.name)
@@ -189,7 +189,7 @@ class HeartbeatPipeline(MeshPipeline):
189
189
  # NO_CHANGES - skip for optimization
190
190
  should_execute_remaining = False
191
191
  reason = "optimization (no changes detected)"
192
- self.logger.debug(
192
+ self.logger.trace(
193
193
  f"🚀 Skipping remaining steps for optimization: {reason}"
194
194
  )
195
195
  elif FastHeartbeatStatusUtil.should_skip_for_resilience(
@@ -217,7 +217,7 @@ class HeartbeatPipeline(MeshPipeline):
217
217
  # Execute or skip conditional steps based on decision
218
218
  if should_execute_remaining:
219
219
  for step in conditional_steps:
220
- self.logger.debug(f"Executing conditional step: {step.name}")
220
+ self.logger.trace(f"Executing conditional step: {step.name}")
221
221
 
222
222
  step_result = await step.execute(self.context)
223
223
  executed_steps.append(step.name)
@@ -41,7 +41,7 @@ class HeartbeatSendStep(PipelineStep):
41
41
  raise ValueError("Health status not available in context")
42
42
 
43
43
  # Prepare heartbeat for registry
44
- self.logger.debug(f"🔍 Preparing heartbeat for agent '{agent_id}'")
44
+ self.logger.trace(f"🔍 Preparing heartbeat for agent '{agent_id}'")
45
45
 
46
46
  # Send actual HTTP request to registry
47
47
  registry_wrapper = context.get("registry_wrapper")
@@ -39,7 +39,7 @@ class LLMToolsResolutionStep(PipelineStep):
39
39
 
40
40
  async def execute(self, context: dict[str, Any]) -> PipelineResult:
41
41
  """Process LLM tools resolution with hash-based change detection."""
42
- self.logger.debug("Processing LLM tools resolution...")
42
+ self.logger.trace("Processing LLM tools resolution...")
43
43
 
44
44
  result = PipelineResult(message="LLM tools resolution processed")
45
45
 
@@ -50,7 +50,7 @@ class LLMToolsResolutionStep(PipelineStep):
50
50
  if heartbeat_response is None:
51
51
  result.status = PipelineStatus.SUCCESS
52
52
  result.message = "No heartbeat response - completed successfully"
53
- self.logger.debug("ℹ️ No heartbeat response to process - this is normal")
53
+ self.logger.trace("ℹ️ No heartbeat response to process - this is normal")
54
54
  return result
55
55
 
56
56
  # Use hash-based change detection and processing logic
@@ -73,14 +73,16 @@ class LLMToolsResolutionStep(PipelineStep):
73
73
  result.add_context("llm_tools", llm_tools)
74
74
  result.add_context("llm_providers", llm_providers)
75
75
 
76
- result.message = "LLM tools and providers resolution completed (efficient hash-based)"
76
+ result.message = (
77
+ "LLM tools and providers resolution completed (efficient hash-based)"
78
+ )
77
79
 
78
80
  if function_count > 0 or provider_count > 0:
79
81
  self.logger.info(
80
82
  f"🤖 LLM state resolved: {function_count} functions, {tool_count} tools, {provider_count} providers"
81
83
  )
82
84
 
83
- self.logger.debug(
85
+ self.logger.trace(
84
86
  "🤖 LLM tools and providers resolution step completed using hash-based change detection"
85
87
  )
86
88
 
@@ -113,14 +115,16 @@ class LLMToolsResolutionStep(PipelineStep):
113
115
  llm_tools = {}
114
116
 
115
117
  if not isinstance(llm_providers, dict):
116
- self.logger.warning(f"llm_providers is not a dict, type={type(llm_providers)}")
118
+ self.logger.warning(
119
+ f"llm_providers is not a dict, type={type(llm_providers)}"
120
+ )
117
121
  llm_providers = {}
118
122
 
119
123
  # Build state with both llm_tools and llm_providers
120
124
  # This ensures hash changes when EITHER tools OR providers change
121
125
  state = {
122
126
  "llm_tools": {},
123
- "llm_providers": llm_providers # Include providers directly
127
+ "llm_providers": llm_providers, # Include providers directly
124
128
  }
125
129
 
126
130
  # Filter out non-list values for llm_tools
@@ -165,7 +169,7 @@ class LLMToolsResolutionStep(PipelineStep):
165
169
  if not heartbeat_response:
166
170
  # No response from registry (connection error, timeout, 5xx)
167
171
  # → Skip entirely for resilience (keep existing LLM tools and providers)
168
- self.logger.debug(
172
+ self.logger.trace(
169
173
  "No heartbeat response - skipping LLM state processing for resilience"
170
174
  )
171
175
  return
@@ -182,7 +186,7 @@ class LLMToolsResolutionStep(PipelineStep):
182
186
  # Compare with previous state (use global variable)
183
187
  global _last_llm_tools_hash
184
188
  if current_hash == _last_llm_tools_hash:
185
- self.logger.debug(
189
+ self.logger.trace(
186
190
  f"🔄 LLM state unchanged (hash: {current_hash}), skipping processing"
187
191
  )
188
192
  return
@@ -222,13 +226,13 @@ class LLMToolsResolutionStep(PipelineStep):
222
226
  # Determine if this is initial processing or an update
223
227
  if _last_llm_tools_hash is None:
224
228
  # Initial processing - use process_llm_tools
225
- self.logger.debug(
229
+ self.logger.trace(
226
230
  "🤖 Initial LLM tools processing - calling process_llm_tools()"
227
231
  )
228
232
  injector.process_llm_tools(llm_tools)
229
233
  else:
230
234
  # Update - use update_llm_tools
231
- self.logger.debug("🤖 LLM tools update - calling update_llm_tools()")
235
+ self.logger.trace("🤖 LLM tools update - calling update_llm_tools()")
232
236
  injector.update_llm_tools(llm_tools)
233
237
 
234
238
  # Process LLM providers (v0.6.1 mesh delegation)
@@ -239,7 +243,7 @@ class LLMToolsResolutionStep(PipelineStep):
239
243
  )
240
244
  injector.process_llm_providers(llm_providers)
241
245
  else:
242
- self.logger.debug("🔌 No llm_providers in current state")
246
+ self.logger.trace("🔌 No llm_providers in current state")
243
247
 
244
248
  # Store new hash for next comparison (use global variable)
245
249
  _last_llm_tools_hash = current_hash
@@ -33,7 +33,7 @@ class RegistryConnectionStep(PipelineStep):
33
33
 
34
34
  async def execute(self, context: dict[str, Any]) -> PipelineResult:
35
35
  """Establish registry connection or reuse existing one."""
36
- self.logger.debug("Checking registry connection...")
36
+ self.logger.trace("Checking registry connection...")
37
37
 
38
38
  result = PipelineResult(message="Registry connection ready")
39
39
 
@@ -45,7 +45,7 @@ class RegistryConnectionStep(PipelineStep):
45
45
  # Reuse existing connection for efficiency
46
46
  result.add_context("registry_wrapper", existing_wrapper)
47
47
  result.message = "Reusing existing registry connection"
48
- self.logger.debug("🔄 Reusing existing registry connection")
48
+ self.logger.trace("🔄 Reusing existing registry connection")
49
49
  return result
50
50
 
51
51
  # Create new connection if none exists
@@ -64,7 +64,7 @@ class RegistryConnectionStep(PipelineStep):
64
64
  result.add_context("registry_wrapper", registry_wrapper)
65
65
 
66
66
  result.message = f"Connected to registry at {registry_url}"
67
- self.logger.info(f"🔗 Registry connection established: {registry_url}")
67
+ self.logger.trace(f"🔗 Registry connection established: {registry_url}")
68
68
 
69
69
  except Exception as e:
70
70
  result.status = PipelineStatus.FAILED