chuk-tool-processor 0.6.9__py3-none-any.whl → 0.6.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chuk-tool-processor might be problematic. Click here for more details.

@@ -1,9 +1,11 @@
1
- # chuk_tool_processor/mcp/transport/stdio_transport.py
1
+ # chuk_tool_processor/mcp/transport/stdio_transport.py - ENHANCED
2
2
  from __future__ import annotations
3
3
 
4
4
  import asyncio
5
5
  import json
6
6
  import time
7
+ import signal
8
+ import psutil
7
9
  from typing import Dict, Any, List, Optional
8
10
  import logging
9
11
 
@@ -23,22 +25,24 @@ class StdioTransport(MCPBaseTransport):
23
25
  """
24
26
  STDIO transport for MCP communication using process pipes.
25
27
 
26
- This transport uses subprocess communication via stdin/stdout pipes
27
- to communicate with MCP servers.
28
+ ENHANCED: Now matches SSE transport robustness with improved process
29
+ management, health monitoring, and comprehensive error handling.
28
30
  """
29
31
 
30
32
  def __init__(self, server_params,
31
33
  connection_timeout: float = 30.0,
32
34
  default_timeout: float = 30.0,
33
- enable_metrics: bool = True):
35
+ enable_metrics: bool = True,
36
+ process_monitor: bool = True): # NEW
34
37
  """
35
- Initialize STDIO transport.
38
+ Initialize STDIO transport with enhanced configuration.
36
39
 
37
40
  Args:
38
41
  server_params: Server parameters (dict or StdioParameters object)
39
42
  connection_timeout: Timeout for initial connection setup
40
43
  default_timeout: Default timeout for operations
41
44
  enable_metrics: Whether to track performance metrics
45
+ process_monitor: Whether to monitor subprocess health (NEW)
42
46
  """
43
47
  # Convert dict to StdioParameters if needed
44
48
  if isinstance(server_params, dict):
@@ -53,13 +57,21 @@ class StdioTransport(MCPBaseTransport):
53
57
  self.connection_timeout = connection_timeout
54
58
  self.default_timeout = default_timeout
55
59
  self.enable_metrics = enable_metrics
60
+ self.process_monitor = process_monitor # NEW
56
61
 
57
62
  # Connection state
58
63
  self._context = None
59
64
  self._streams = None
60
65
  self._initialized = False
61
66
 
62
- # Performance metrics (consistent with other transports)
67
+ # Process monitoring (NEW - like SSE's health monitoring)
68
+ self._process_id = None
69
+ self._process_start_time = None
70
+ self._last_successful_ping = None
71
+ self._consecutive_failures = 0
72
+ self._max_consecutive_failures = 3
73
+
74
+ # Enhanced performance metrics (like SSE)
63
75
  self._metrics = {
64
76
  "total_calls": 0,
65
77
  "successful_calls": 0,
@@ -69,14 +81,76 @@ class StdioTransport(MCPBaseTransport):
69
81
  "last_ping_time": None,
70
82
  "initialization_time": None,
71
83
  "process_restarts": 0,
72
- "pipe_errors": 0
84
+ "pipe_errors": 0,
85
+ "process_crashes": 0, # NEW
86
+ "recovery_attempts": 0, # NEW
87
+ "memory_usage_mb": 0.0, # NEW
88
+ "cpu_percent": 0.0, # NEW
73
89
  }
74
90
 
75
91
  logger.debug("STDIO transport initialized for command: %s",
76
92
  getattr(self.server_params, 'command', 'unknown'))
77
93
 
94
+ async def _get_process_info(self) -> Optional[Dict[str, Any]]:
95
+ """Get process information for monitoring (NEW)."""
96
+ if not self._process_id or not self.process_monitor:
97
+ return None
98
+
99
+ try:
100
+ # FIXED: Validate PID is a real integer before using psutil
101
+ if not isinstance(self._process_id, int) or self._process_id <= 0:
102
+ return None
103
+
104
+ process = psutil.Process(self._process_id)
105
+ if process.is_running():
106
+ memory_info = process.memory_info()
107
+ return {
108
+ "pid": self._process_id,
109
+ "status": process.status(),
110
+ "memory_mb": memory_info.rss / 1024 / 1024,
111
+ "cpu_percent": process.cpu_percent(),
112
+ "create_time": process.create_time(),
113
+ "uptime": time.time() - self._process_start_time if self._process_start_time else 0
114
+ }
115
+ except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError, TypeError, ValueError):
116
+ # FIXED: Handle all possible errors including TypeError from mock objects
117
+ pass
118
+ return None
119
+
120
+ async def _monitor_process_health(self) -> bool:
121
+ """Monitor subprocess health (NEW - like SSE's health monitoring)."""
122
+ if not self.process_monitor:
123
+ return True
124
+
125
+ # FIXED: Check if process_id is valid before monitoring
126
+ if not self._process_id or not isinstance(self._process_id, int) or self._process_id <= 0:
127
+ return True # No monitoring if no valid PID
128
+
129
+ process_info = await self._get_process_info()
130
+ if not process_info:
131
+ logger.debug("Process monitoring unavailable (may be in test environment)")
132
+ return True # Don't fail in test environments
133
+
134
+ # Update metrics with process info
135
+ if self.enable_metrics:
136
+ self._metrics["memory_usage_mb"] = process_info["memory_mb"]
137
+ self._metrics["cpu_percent"] = process_info["cpu_percent"]
138
+
139
+ # Check for concerning process states
140
+ status = process_info.get("status", "unknown")
141
+ if status in ["zombie", "dead"]:
142
+ logger.error("Process is in %s state", status)
143
+ return False
144
+
145
+ # Check for excessive memory usage (warn at 1GB)
146
+ memory_mb = process_info.get("memory_mb", 0)
147
+ if memory_mb > 1024:
148
+ logger.warning("Process using excessive memory: %.1f MB", memory_mb)
149
+
150
+ return True
151
+
78
152
  async def initialize(self) -> bool:
79
- """Initialize by delegating to chuk-mcp with timeout protection."""
153
+ """Enhanced initialization with process monitoring."""
80
154
  if self._initialized:
81
155
  logger.warning("Transport already initialized")
82
156
  return True
@@ -93,6 +167,12 @@ class StdioTransport(MCPBaseTransport):
93
167
  timeout=self.connection_timeout
94
168
  )
95
169
 
170
+ # Capture process information for monitoring (NEW)
171
+ if self.process_monitor and hasattr(self._context, '_process'):
172
+ self._process_id = getattr(self._context._process, 'pid', None)
173
+ self._process_start_time = time.time()
174
+ logger.debug("Subprocess PID: %s", self._process_id)
175
+
96
176
  # Send initialize message with timeout
97
177
  init_result = await asyncio.wait_for(
98
178
  send_initialize(*self._streams),
@@ -100,14 +180,36 @@ class StdioTransport(MCPBaseTransport):
100
180
  )
101
181
 
102
182
  if init_result:
103
- self._initialized = True
183
+ # Enhanced health verification (like SSE)
184
+ logger.debug("Verifying connection with ping...")
185
+ ping_start = time.time()
186
+ ping_success = await asyncio.wait_for(
187
+ send_ping(*self._streams),
188
+ timeout=10.0
189
+ )
190
+ ping_time = time.time() - ping_start
104
191
 
105
- if self.enable_metrics:
106
- init_time = time.time() - start_time
107
- self._metrics["initialization_time"] = init_time
108
-
109
- logger.debug("STDIO transport initialized successfully in %.3fs", time.time() - start_time)
110
- return True
192
+ if ping_success:
193
+ self._initialized = True
194
+ self._last_successful_ping = time.time()
195
+ self._consecutive_failures = 0
196
+
197
+ if self.enable_metrics:
198
+ init_time = time.time() - start_time
199
+ self._metrics["initialization_time"] = init_time
200
+ self._metrics["last_ping_time"] = ping_time
201
+
202
+ logger.debug("STDIO transport initialized successfully in %.3fs (ping: %.3fs)",
203
+ time.time() - start_time, ping_time)
204
+ return True
205
+ else:
206
+ logger.warning("STDIO connection established but ping failed")
207
+ # Still consider it initialized
208
+ self._initialized = True
209
+ self._consecutive_failures = 1
210
+ if self.enable_metrics:
211
+ self._metrics["initialization_time"] = time.time() - start_time
212
+ return True
111
213
  else:
112
214
  logger.error("STDIO initialization failed")
113
215
  await self._cleanup()
@@ -116,24 +218,54 @@ class StdioTransport(MCPBaseTransport):
116
218
  except asyncio.TimeoutError:
117
219
  logger.error("STDIO initialization timed out after %ss", self.connection_timeout)
118
220
  await self._cleanup()
221
+ if self.enable_metrics:
222
+ self._metrics["process_crashes"] += 1
119
223
  return False
120
224
  except Exception as e:
121
225
  logger.error("Error initializing STDIO transport: %s", e)
122
226
  await self._cleanup()
227
+ if self.enable_metrics:
228
+ self._metrics["process_crashes"] += 1
229
+ return False
230
+
231
+ async def _attempt_recovery(self) -> bool:
232
+ """Attempt to recover from process/connection issues (NEW)."""
233
+ if self.enable_metrics:
234
+ self._metrics["recovery_attempts"] += 1
235
+ self._metrics["process_restarts"] += 1
236
+
237
+ logger.warning("Attempting STDIO process recovery...")
238
+
239
+ try:
240
+ # Force cleanup of existing process
241
+ await self._cleanup()
242
+
243
+ # Brief delay before restart
244
+ await asyncio.sleep(1.0)
245
+
246
+ # Re-initialize
247
+ return await self.initialize()
248
+ except Exception as e:
249
+ logger.error("Recovery attempt failed: %s", e)
123
250
  return False
124
251
 
125
252
  async def close(self) -> None:
126
- """Close by delegating to chuk-mcp context manager with enhanced cleanup."""
253
+ """Enhanced close with process monitoring and metrics."""
127
254
  if not self._initialized:
128
255
  return
129
256
 
130
- # Log final metrics
257
+ # Enhanced metrics logging (like SSE)
131
258
  if self.enable_metrics and self._metrics["total_calls"] > 0:
259
+ success_rate = (self._metrics["successful_calls"] / self._metrics["total_calls"] * 100)
132
260
  logger.debug(
133
- "STDIO transport closing - Total calls: %d, Success rate: %.1f%%, Avg response time: %.3fs",
261
+ "STDIO transport closing - Calls: %d, Success: %.1f%%, "
262
+ "Avg time: %.3fs, Restarts: %d, Crashes: %d, Memory: %.1f MB",
134
263
  self._metrics["total_calls"],
135
- (self._metrics["successful_calls"] / self._metrics["total_calls"] * 100),
136
- self._metrics["avg_response_time"]
264
+ success_rate,
265
+ self._metrics["avg_response_time"],
266
+ self._metrics["process_restarts"],
267
+ self._metrics["process_crashes"],
268
+ self._metrics["memory_usage_mb"]
137
269
  )
138
270
 
139
271
  if self._context:
@@ -146,16 +278,44 @@ class StdioTransport(MCPBaseTransport):
146
278
  await self._cleanup()
147
279
 
148
280
  async def _cleanup(self) -> None:
149
- """Clean up internal state."""
281
+ """Enhanced cleanup with process termination."""
282
+ # Attempt graceful process termination if we have a PID
283
+ if self._process_id and self.process_monitor:
284
+ try:
285
+ # FIXED: Validate PID is a real integer before using psutil
286
+ if isinstance(self._process_id, int) and self._process_id > 0:
287
+ process = psutil.Process(self._process_id)
288
+ if process.is_running():
289
+ logger.debug("Terminating subprocess %s", self._process_id)
290
+ process.terminate()
291
+
292
+ # Wait briefly for graceful termination
293
+ try:
294
+ process.wait(timeout=2.0)
295
+ except psutil.TimeoutExpired:
296
+ logger.warning("Process did not terminate gracefully, killing...")
297
+ process.kill()
298
+ except (psutil.NoSuchProcess, psutil.AccessDenied, TypeError, ValueError):
299
+ # FIXED: Handle all possible errors including TypeError from mock objects
300
+ logger.debug("Could not terminate process %s (may be mock or already dead)", self._process_id)
301
+
150
302
  self._context = None
151
303
  self._streams = None
152
304
  self._initialized = False
305
+ self._process_id = None
306
+ self._process_start_time = None
153
307
 
154
308
  async def send_ping(self) -> bool:
155
- """Send ping with performance tracking."""
309
+ """Enhanced ping with process health monitoring."""
156
310
  if not self._initialized:
157
311
  return False
158
312
 
313
+ # Check process health first (NEW) - but only if we have a real process
314
+ if self.process_monitor and self._process_id and isinstance(self._process_id, int):
315
+ if not await self._monitor_process_health():
316
+ self._consecutive_failures += 1
317
+ return False
318
+
159
319
  start_time = time.time()
160
320
  try:
161
321
  result = await asyncio.wait_for(
@@ -163,27 +323,45 @@ class StdioTransport(MCPBaseTransport):
163
323
  timeout=self.default_timeout
164
324
  )
165
325
 
326
+ success = bool(result)
327
+
328
+ if success:
329
+ self._last_successful_ping = time.time()
330
+ self._consecutive_failures = 0
331
+ else:
332
+ self._consecutive_failures += 1
333
+
166
334
  if self.enable_metrics:
167
335
  ping_time = time.time() - start_time
168
336
  self._metrics["last_ping_time"] = ping_time
169
- logger.debug("STDIO ping completed in %.3fs: %s", ping_time, result)
337
+ logger.debug("STDIO ping completed in %.3fs: %s", ping_time, success)
170
338
 
171
- return bool(result)
339
+ return success
172
340
  except asyncio.TimeoutError:
173
341
  logger.error("STDIO ping timed out")
342
+ self._consecutive_failures += 1
174
343
  return False
175
344
  except Exception as e:
176
345
  logger.error("STDIO ping failed: %s", e)
346
+ self._consecutive_failures += 1
177
347
  if self.enable_metrics:
178
348
  self._metrics["pipe_errors"] += 1
179
349
  return False
180
350
 
181
351
  def is_connected(self) -> bool:
182
- """Check connection status."""
183
- return self._initialized and self._streams is not None
352
+ """Enhanced connection status check (like SSE)."""
353
+ if not self._initialized or not self._streams:
354
+ return False
355
+
356
+ # Check for too many consecutive failures (like SSE)
357
+ if self._consecutive_failures >= self._max_consecutive_failures:
358
+ logger.warning("Connection marked unhealthy after %d failures", self._consecutive_failures)
359
+ return False
360
+
361
+ return True
184
362
 
185
363
  async def get_tools(self) -> List[Dict[str, Any]]:
186
- """Get tools list with performance tracking."""
364
+ """Enhanced tools retrieval with recovery."""
187
365
  if not self._initialized:
188
366
  logger.error("Cannot get tools: transport not initialized")
189
367
  return []
@@ -204,6 +382,9 @@ class StdioTransport(MCPBaseTransport):
204
382
  logger.warning("Unexpected tools response type: %s", type(response))
205
383
  tools = []
206
384
 
385
+ # Reset failure count on success
386
+ self._consecutive_failures = 0
387
+
207
388
  if self.enable_metrics:
208
389
  response_time = time.time() - start_time
209
390
  logger.debug("Retrieved %d tools in %.3fs", len(tools), response_time)
@@ -212,16 +393,18 @@ class StdioTransport(MCPBaseTransport):
212
393
 
213
394
  except asyncio.TimeoutError:
214
395
  logger.error("Get tools timed out")
396
+ self._consecutive_failures += 1
215
397
  return []
216
398
  except Exception as e:
217
399
  logger.error("Error getting tools: %s", e)
400
+ self._consecutive_failures += 1
218
401
  if self.enable_metrics:
219
402
  self._metrics["pipe_errors"] += 1
220
403
  return []
221
404
 
222
405
  async def call_tool(self, tool_name: str, arguments: Dict[str, Any],
223
406
  timeout: Optional[float] = None) -> Dict[str, Any]:
224
- """Call tool with timeout support and performance tracking."""
407
+ """Enhanced tool calling with recovery and process monitoring."""
225
408
  if not self._initialized:
226
409
  return {"isError": True, "error": "Transport not initialized"}
227
410
 
@@ -229,11 +412,22 @@ class StdioTransport(MCPBaseTransport):
229
412
  start_time = time.time()
230
413
 
231
414
  if self.enable_metrics:
232
- self._metrics["total_calls"] += 1 # FIXED: INCREMENT FIRST
415
+ self._metrics["total_calls"] += 1
233
416
 
234
417
  try:
235
418
  logger.debug("Calling tool '%s' with timeout %ss", tool_name, tool_timeout)
236
419
 
420
+ # Enhanced connection check with recovery attempt
421
+ if not self.is_connected():
422
+ logger.warning("Connection unhealthy, attempting recovery...")
423
+ if not await self._attempt_recovery():
424
+ if self.enable_metrics:
425
+ self._update_metrics(time.time() - start_time, False)
426
+ return {
427
+ "isError": True,
428
+ "error": "Failed to recover connection"
429
+ }
430
+
237
431
  response = await asyncio.wait_for(
238
432
  send_tools_call(*self._streams, tool_name, arguments),
239
433
  timeout=tool_timeout
@@ -242,18 +436,24 @@ class StdioTransport(MCPBaseTransport):
242
436
  response_time = time.time() - start_time
243
437
  result = self._normalize_mcp_response(response)
244
438
 
439
+ # Reset failure count and update health on success
440
+ self._consecutive_failures = 0
441
+ self._last_successful_ping = time.time()
442
+
245
443
  if self.enable_metrics:
246
444
  self._update_metrics(response_time, not result.get("isError", False))
247
445
 
248
446
  if not result.get("isError", False):
249
447
  logger.debug("Tool '%s' completed successfully in %.3fs", tool_name, response_time)
250
448
  else:
251
- logger.warning("Tool '%s' failed in %.3fs: %s", tool_name, response_time, result.get('error', 'Unknown error'))
449
+ logger.warning("Tool '%s' failed in %.3fs: %s", tool_name, response_time,
450
+ result.get('error', 'Unknown error'))
252
451
 
253
452
  return result
254
453
 
255
454
  except asyncio.TimeoutError:
256
455
  response_time = time.time() - start_time
456
+ self._consecutive_failures += 1
257
457
  if self.enable_metrics:
258
458
  self._update_metrics(response_time, False)
259
459
 
@@ -265,10 +465,20 @@ class StdioTransport(MCPBaseTransport):
265
465
  }
266
466
  except Exception as e:
267
467
  response_time = time.time() - start_time
468
+ self._consecutive_failures += 1
268
469
  if self.enable_metrics:
269
470
  self._update_metrics(response_time, False)
270
471
  self._metrics["pipe_errors"] += 1
271
472
 
473
+ # Enhanced process error detection
474
+ error_str = str(e).lower()
475
+ if any(indicator in error_str for indicator in
476
+ ["broken pipe", "process", "eof", "connection", "died"]):
477
+ logger.warning("Process error detected: %s", e)
478
+ self._initialized = False
479
+ if self.enable_metrics:
480
+ self._metrics["process_crashes"] += 1
481
+
272
482
  error_msg = f"Tool execution failed: {str(e)}"
273
483
  logger.error("Tool '%s' error: %s", tool_name, error_msg)
274
484
  return {
@@ -277,14 +487,13 @@ class StdioTransport(MCPBaseTransport):
277
487
  }
278
488
 
279
489
  def _update_metrics(self, response_time: float, success: bool) -> None:
280
- """Update performance metrics."""
490
+ """Enhanced metrics tracking (like SSE)."""
281
491
  if success:
282
492
  self._metrics["successful_calls"] += 1
283
493
  else:
284
494
  self._metrics["failed_calls"] += 1
285
495
 
286
496
  self._metrics["total_time"] += response_time
287
- # FIXED: Only calculate average if we have total calls
288
497
  if self._metrics["total_calls"] > 0:
289
498
  self._metrics["avg_response_time"] = (
290
499
  self._metrics["total_time"] / self._metrics["total_calls"]
@@ -292,10 +501,10 @@ class StdioTransport(MCPBaseTransport):
292
501
 
293
502
  def _normalize_mcp_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
294
503
  """
295
- Normalize response using shared base class logic with STDIO-specific handling.
504
+ Enhanced response normalization with STDIO-specific handling.
296
505
 
297
- STDIO has special requirements for preserving string representations
298
- of numeric values for backward compatibility.
506
+ STDIO preserves string representations of numeric values for
507
+ backward compatibility with existing tests.
299
508
  """
300
509
  # Handle explicit error in response
301
510
  if "error" in response:
@@ -318,7 +527,7 @@ class StdioTransport(MCPBaseTransport):
318
527
 
319
528
  def _extract_stdio_content(self, content_list: Any) -> Any:
320
529
  """
321
- Extract content with STDIO-specific string preservation logic.
530
+ Enhanced content extraction with STDIO-specific string preservation.
322
531
 
323
532
  STDIO transport preserves string representations of numeric values
324
533
  for backward compatibility with existing tests.
@@ -348,7 +557,7 @@ class StdioTransport(MCPBaseTransport):
348
557
  return content_list
349
558
 
350
559
  async def list_resources(self) -> Dict[str, Any]:
351
- """List resources with error handling."""
560
+ """Enhanced resource listing with error handling."""
352
561
  if not self._initialized:
353
562
  return {}
354
563
  try:
@@ -356,16 +565,19 @@ class StdioTransport(MCPBaseTransport):
356
565
  send_resources_list(*self._streams),
357
566
  timeout=self.default_timeout
358
567
  )
568
+ self._consecutive_failures = 0 # Reset on success
359
569
  return response if isinstance(response, dict) else {}
360
570
  except asyncio.TimeoutError:
361
571
  logger.error("List resources timed out")
572
+ self._consecutive_failures += 1
362
573
  return {}
363
574
  except Exception as e:
364
575
  logger.debug("Error listing resources: %s", e)
576
+ self._consecutive_failures += 1
365
577
  return {}
366
578
 
367
579
  async def list_prompts(self) -> Dict[str, Any]:
368
- """List prompts with error handling."""
580
+ """Enhanced prompt listing with error handling."""
369
581
  if not self._initialized:
370
582
  return {}
371
583
  try:
@@ -373,12 +585,15 @@ class StdioTransport(MCPBaseTransport):
373
585
  send_prompts_list(*self._streams),
374
586
  timeout=self.default_timeout
375
587
  )
588
+ self._consecutive_failures = 0 # Reset on success
376
589
  return response if isinstance(response, dict) else {}
377
590
  except asyncio.TimeoutError:
378
591
  logger.error("List prompts timed out")
592
+ self._consecutive_failures += 1
379
593
  return {}
380
594
  except Exception as e:
381
595
  logger.debug("Error listing prompts: %s", e)
596
+ self._consecutive_failures += 1
382
597
  return {}
383
598
 
384
599
  async def read_resource(self, uri: str) -> Dict[str, Any]:
@@ -390,9 +605,15 @@ class StdioTransport(MCPBaseTransport):
390
605
  send_resources_read(*self._streams, uri),
391
606
  timeout=self.default_timeout
392
607
  )
608
+ self._consecutive_failures = 0 # Reset on success
393
609
  return response if isinstance(response, dict) else {}
610
+ except asyncio.TimeoutError:
611
+ logger.error("Read resource timed out")
612
+ self._consecutive_failures += 1
613
+ return {}
394
614
  except Exception as e:
395
615
  logger.debug("Error reading resource: %s", e)
616
+ self._consecutive_failures += 1
396
617
  return {}
397
618
 
398
619
  async def get_prompt(self, name: str, arguments: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
@@ -404,49 +625,63 @@ class StdioTransport(MCPBaseTransport):
404
625
  send_prompts_get(*self._streams, name, arguments or {}),
405
626
  timeout=self.default_timeout
406
627
  )
628
+ self._consecutive_failures = 0 # Reset on success
407
629
  return response if isinstance(response, dict) else {}
630
+ except asyncio.TimeoutError:
631
+ logger.error("Get prompt timed out")
632
+ self._consecutive_failures += 1
633
+ return {}
408
634
  except Exception as e:
409
635
  logger.debug("Error getting prompt: %s", e)
636
+ self._consecutive_failures += 1
410
637
  return {}
411
638
 
412
- # ------------------------------------------------------------------ #
413
- # Metrics and monitoring (now consistent with other transports) #
414
- # ------------------------------------------------------------------ #
415
639
  def get_metrics(self) -> Dict[str, Any]:
416
- """Get performance and connection metrics."""
417
- return self._metrics.copy()
640
+ """Enhanced metrics with process and health information."""
641
+ metrics = self._metrics.copy()
642
+ metrics.update({
643
+ "is_connected": self.is_connected(),
644
+ "consecutive_failures": self._consecutive_failures,
645
+ "last_successful_ping": self._last_successful_ping,
646
+ "max_consecutive_failures": self._max_consecutive_failures,
647
+ "process_id": self._process_id,
648
+ "process_uptime": (time.time() - self._process_start_time) if self._process_start_time else 0,
649
+ })
650
+ return metrics
418
651
 
419
652
  def reset_metrics(self) -> None:
420
- """Reset performance metrics."""
653
+ """Enhanced metrics reset preserving health and process state."""
654
+ preserved_init_time = self._metrics.get("initialization_time")
655
+ preserved_last_ping = self._metrics.get("last_ping_time")
656
+ preserved_restarts = self._metrics.get("process_restarts", 0)
657
+
421
658
  self._metrics = {
422
659
  "total_calls": 0,
423
660
  "successful_calls": 0,
424
661
  "failed_calls": 0,
425
662
  "total_time": 0.0,
426
663
  "avg_response_time": 0.0,
427
- "last_ping_time": self._metrics.get("last_ping_time"),
428
- "initialization_time": self._metrics.get("initialization_time"),
429
- "process_restarts": self._metrics.get("process_restarts", 0),
430
- "pipe_errors": 0
664
+ "last_ping_time": preserved_last_ping,
665
+ "initialization_time": preserved_init_time,
666
+ "process_restarts": preserved_restarts,
667
+ "pipe_errors": 0,
668
+ "process_crashes": 0,
669
+ "recovery_attempts": 0,
670
+ "memory_usage_mb": 0.0,
671
+ "cpu_percent": 0.0,
431
672
  }
432
673
 
433
- # ------------------------------------------------------------------ #
434
- # Backward compatibility #
435
- # ------------------------------------------------------------------ #
436
674
  def get_streams(self) -> List[tuple]:
437
- """Provide streams for backward compatibility."""
675
+ """Enhanced streams access with connection check."""
438
676
  return [self._streams] if self._streams else []
439
677
 
440
- # ------------------------------------------------------------------ #
441
- # Context manager support (now uses base class with fixed error) #
442
- # ------------------------------------------------------------------ #
443
678
  async def __aenter__(self):
444
- """Context manager support."""
679
+ """Enhanced context manager entry."""
445
680
  success = await self.initialize()
446
681
  if not success:
447
- raise RuntimeError("Failed to initialize StdioTransport") # FIXED: message
682
+ raise RuntimeError("Failed to initialize StdioTransport")
448
683
  return self
449
684
 
450
685
  async def __aexit__(self, exc_type, exc_val, exc_tb):
451
- """Context manager cleanup."""
686
+ """Enhanced context manager cleanup."""
452
687
  await self.close()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chuk-tool-processor
3
- Version: 0.6.9
3
+ Version: 0.6.11
4
4
  Summary: Async-native framework for registering, discovering, and executing tools referenced in LLM responses
5
5
  Author-email: CHUK Team <chrishayuk@somejunkmailbox.com>
6
6
  Maintainer-email: CHUK Team <chrishayuk@somejunkmailbox.com>
@@ -22,6 +22,7 @@ Requires-Python: >=3.11
22
22
  Description-Content-Type: text/markdown
23
23
  Requires-Dist: chuk-mcp>=0.5.2
24
24
  Requires-Dist: dotenv>=0.9.9
25
+ Requires-Dist: psutil>=7.0.0
25
26
  Requires-Dist: pydantic>=2.11.3
26
27
  Requires-Dist: uuid>=1.30
27
28