pytest-fastcollect 0.5.2__cp312-cp312-musllinux_1_2_i686.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pytest_fastcollect/__init__.py +12 -0
- pytest_fastcollect/cache.py +171 -0
- pytest_fastcollect/constants.py +89 -0
- pytest_fastcollect/daemon.py +943 -0
- pytest_fastcollect/daemon_client.py +581 -0
- pytest_fastcollect/filter.py +204 -0
- pytest_fastcollect/plugin.py +601 -0
- pytest_fastcollect/py.typed +0 -0
- pytest_fastcollect/pytest_fastcollect.cpython-312-i386-linux-musl.so +0 -0
- pytest_fastcollect/socket_strategy.py +217 -0
- pytest_fastcollect-0.5.2.dist-info/METADATA +747 -0
- pytest_fastcollect-0.5.2.dist-info/RECORD +16 -0
- pytest_fastcollect-0.5.2.dist-info/WHEEL +4 -0
- pytest_fastcollect-0.5.2.dist-info/entry_points.txt +2 -0
- pytest_fastcollect-0.5.2.dist-info/licenses/LICENSE +21 -0
- pytest_fastcollect.libs/libgcc_s-27e5a392.so.1 +0 -0
|
@@ -0,0 +1,581 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Daemon Client: Communicates with Collection Daemon for instant collection.
|
|
3
|
+
|
|
4
|
+
Production Features:
|
|
5
|
+
- Comprehensive error handling and retries
|
|
6
|
+
- Connection pooling and timeouts
|
|
7
|
+
- Request validation
|
|
8
|
+
- Detailed error messages
|
|
9
|
+
- Health checking
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import socket
|
|
14
|
+
import os
|
|
15
|
+
import time
|
|
16
|
+
import logging
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import Dict, Any, Optional, Set
|
|
19
|
+
|
|
20
|
+
from .socket_strategy import create_socket_strategy
|
|
21
|
+
from .constants import (
|
|
22
|
+
DEFAULT_MAX_RETRIES,
|
|
23
|
+
DEFAULT_REQUEST_TIMEOUT_SECONDS,
|
|
24
|
+
HEALTH_CHECK_TIMEOUT_SECONDS,
|
|
25
|
+
HEALTH_CHECK_RETRIES,
|
|
26
|
+
STOP_COMMAND_TIMEOUT_SECONDS,
|
|
27
|
+
RETRY_BACKOFF_BASE_SECONDS,
|
|
28
|
+
STOP_COMMAND_SLEEP_SECONDS,
|
|
29
|
+
SIGTERM_WAIT_SECONDS,
|
|
30
|
+
SIGKILL_WAIT_SECONDS,
|
|
31
|
+
TASKLIST_TIMEOUT_SECONDS,
|
|
32
|
+
SOCKET_PATH_HASH_LENGTH,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# Configure logger
|
|
36
|
+
logger = logging.getLogger('pytest_fastcollect.daemon_client')
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ClientError(Exception):
|
|
40
|
+
"""Base exception for client errors."""
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class ConnectionError(ClientError):
|
|
45
|
+
"""Raised when cannot connect to daemon."""
|
|
46
|
+
pass
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class TimeoutError(ClientError):
|
|
50
|
+
"""Raised when request times out."""
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class ValidationError(ClientError):
|
|
55
|
+
"""Raised when request validation fails."""
|
|
56
|
+
pass
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class DaemonClient:
|
|
60
|
+
"""Client for communicating with Collection Daemon.
|
|
61
|
+
|
|
62
|
+
Production Features:
|
|
63
|
+
- Automatic retries with exponential backoff
|
|
64
|
+
- Comprehensive error handling
|
|
65
|
+
- Request validation
|
|
66
|
+
- Connection timeout management
|
|
67
|
+
- Detailed logging
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
def __init__(self, socket_path: str, max_retries: int = DEFAULT_MAX_RETRIES):
|
|
71
|
+
"""Initialize daemon client.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
socket_path: Path to Unix domain socket (or base path for TCP mode)
|
|
75
|
+
max_retries: Maximum number of retry attempts for failed requests
|
|
76
|
+
"""
|
|
77
|
+
self.socket_path = socket_path
|
|
78
|
+
self.max_retries = max_retries
|
|
79
|
+
|
|
80
|
+
# Validate socket path
|
|
81
|
+
if not isinstance(socket_path, str) or not socket_path:
|
|
82
|
+
raise ValidationError("Invalid socket path")
|
|
83
|
+
|
|
84
|
+
# Create socket strategy for cross-platform support
|
|
85
|
+
self.socket_strategy = create_socket_strategy(socket_path)
|
|
86
|
+
|
|
87
|
+
def is_daemon_running(self) -> bool:
|
|
88
|
+
"""Check if daemon is running and responsive.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
True if daemon is healthy and responding, False otherwise
|
|
92
|
+
|
|
93
|
+
Note:
|
|
94
|
+
Uses health check if available, falls back to status check
|
|
95
|
+
"""
|
|
96
|
+
try:
|
|
97
|
+
# Try health check first
|
|
98
|
+
response = self.send_request(
|
|
99
|
+
{"command": "health"},
|
|
100
|
+
timeout=HEALTH_CHECK_TIMEOUT_SECONDS,
|
|
101
|
+
retries=HEALTH_CHECK_RETRIES
|
|
102
|
+
)
|
|
103
|
+
return response.get("status") in ("healthy", "degraded")
|
|
104
|
+
except:
|
|
105
|
+
# Fall back to status check
|
|
106
|
+
try:
|
|
107
|
+
response = self.send_request(
|
|
108
|
+
{"command": "status"},
|
|
109
|
+
timeout=HEALTH_CHECK_TIMEOUT_SECONDS,
|
|
110
|
+
retries=HEALTH_CHECK_RETRIES
|
|
111
|
+
)
|
|
112
|
+
return response.get("status") == "running"
|
|
113
|
+
except:
|
|
114
|
+
return False
|
|
115
|
+
|
|
116
|
+
def _validate_request(self, request: Dict[str, Any]) -> None:
|
|
117
|
+
"""Validate request before sending.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
request: Request dictionary to validate
|
|
121
|
+
|
|
122
|
+
Raises:
|
|
123
|
+
ValidationError: If validation fails
|
|
124
|
+
"""
|
|
125
|
+
if not isinstance(request, dict):
|
|
126
|
+
raise ValidationError("Request must be a dictionary")
|
|
127
|
+
|
|
128
|
+
if "command" not in request:
|
|
129
|
+
raise ValidationError("Request missing 'command' field")
|
|
130
|
+
|
|
131
|
+
if not isinstance(request["command"], str):
|
|
132
|
+
raise ValidationError("Command must be a string")
|
|
133
|
+
|
|
134
|
+
def send_request(
|
|
135
|
+
self,
|
|
136
|
+
request: Dict[str, Any],
|
|
137
|
+
timeout: float = DEFAULT_REQUEST_TIMEOUT_SECONDS,
|
|
138
|
+
retries: Optional[int] = None
|
|
139
|
+
) -> Dict[str, Any]:
|
|
140
|
+
"""Send request to daemon and get response with automatic retries.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
request: Request dictionary with command and parameters
|
|
144
|
+
timeout: Request timeout in seconds
|
|
145
|
+
retries: Number of retry attempts (uses self.max_retries if not specified)
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
Response dictionary from daemon
|
|
149
|
+
|
|
150
|
+
Raises:
|
|
151
|
+
ValidationError: If request validation fails
|
|
152
|
+
ConnectionError: If cannot connect to daemon after all retries
|
|
153
|
+
TimeoutError: If daemon doesn't respond in time
|
|
154
|
+
ClientError: For other errors
|
|
155
|
+
|
|
156
|
+
Note:
|
|
157
|
+
- Automatically retries failed requests with exponential backoff
|
|
158
|
+
- Validates requests before sending
|
|
159
|
+
- Provides detailed error messages
|
|
160
|
+
"""
|
|
161
|
+
# Validate request
|
|
162
|
+
self._validate_request(request)
|
|
163
|
+
|
|
164
|
+
# Use default retries if not specified
|
|
165
|
+
if retries is None:
|
|
166
|
+
retries = self.max_retries
|
|
167
|
+
|
|
168
|
+
last_exception = None
|
|
169
|
+
for attempt in range(retries + 1):
|
|
170
|
+
try:
|
|
171
|
+
return self._send_request_once(request, timeout)
|
|
172
|
+
|
|
173
|
+
except socket.timeout as e:
|
|
174
|
+
last_exception = TimeoutError(
|
|
175
|
+
f"Daemon request timed out after {timeout}s"
|
|
176
|
+
)
|
|
177
|
+
logger.warning(
|
|
178
|
+
f"Request timed out (attempt {attempt + 1}/{retries + 1})"
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
except socket.error as e:
|
|
182
|
+
if e.errno == 2 or e.errno == 111: # ENOENT or ECONNREFUSED
|
|
183
|
+
last_exception = ConnectionError(
|
|
184
|
+
f"Cannot connect to daemon at {self.socket_path}. "
|
|
185
|
+
f"Is the daemon running?"
|
|
186
|
+
)
|
|
187
|
+
else:
|
|
188
|
+
last_exception = ConnectionError(
|
|
189
|
+
f"Socket error connecting to daemon: {e}"
|
|
190
|
+
)
|
|
191
|
+
logger.warning(
|
|
192
|
+
f"Connection failed (attempt {attempt + 1}/{retries + 1}): {e}"
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
except ClientError as e:
|
|
196
|
+
# Already a client error (ConnectionError, TimeoutError, ValidationError, etc.)
|
|
197
|
+
# Preserve it and continue retrying
|
|
198
|
+
last_exception = e
|
|
199
|
+
logger.warning(
|
|
200
|
+
f"Client error (attempt {attempt + 1}/{retries + 1}): {e}"
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
except Exception as e:
|
|
204
|
+
last_exception = ClientError(f"Unexpected error: {e}")
|
|
205
|
+
logger.error(
|
|
206
|
+
f"Unexpected error (attempt {attempt + 1}/{retries + 1}): {e}",
|
|
207
|
+
exc_info=True
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
# Don't sleep after last attempt
|
|
211
|
+
if attempt < retries:
|
|
212
|
+
# Exponential backoff: 0.1s, 0.2s, 0.4s, ...
|
|
213
|
+
sleep_time = RETRY_BACKOFF_BASE_SECONDS * (2 ** attempt)
|
|
214
|
+
logger.debug(f"Retrying in {sleep_time}s...")
|
|
215
|
+
time.sleep(sleep_time)
|
|
216
|
+
|
|
217
|
+
# All retries exhausted
|
|
218
|
+
if last_exception:
|
|
219
|
+
raise last_exception
|
|
220
|
+
else:
|
|
221
|
+
raise ClientError("All retry attempts failed")
|
|
222
|
+
|
|
223
|
+
def _send_request_once(
|
|
224
|
+
self,
|
|
225
|
+
request: Dict[str, Any],
|
|
226
|
+
timeout: float
|
|
227
|
+
) -> Dict[str, Any]:
|
|
228
|
+
"""Send a single request without retries.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
request: Request dictionary
|
|
232
|
+
timeout: Request timeout in seconds
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
Response dictionary from daemon
|
|
236
|
+
|
|
237
|
+
Raises:
|
|
238
|
+
Various socket exceptions that will be caught by send_request
|
|
239
|
+
"""
|
|
240
|
+
# Create and connect socket using strategy
|
|
241
|
+
sock = self.socket_strategy.create_client_socket(timeout)
|
|
242
|
+
|
|
243
|
+
try:
|
|
244
|
+
|
|
245
|
+
# Send request
|
|
246
|
+
request_data = json.dumps(request).encode('utf-8')
|
|
247
|
+
sock.sendall(request_data)
|
|
248
|
+
logger.debug(f"Sent request: {request.get('command')}")
|
|
249
|
+
|
|
250
|
+
# Shutdown write side to signal end of request
|
|
251
|
+
sock.shutdown(socket.SHUT_WR)
|
|
252
|
+
|
|
253
|
+
# Receive response
|
|
254
|
+
response_data = b""
|
|
255
|
+
while True:
|
|
256
|
+
chunk = sock.recv(4096)
|
|
257
|
+
if not chunk:
|
|
258
|
+
break
|
|
259
|
+
response_data += chunk
|
|
260
|
+
|
|
261
|
+
if not response_data:
|
|
262
|
+
raise ConnectionError("Empty response from daemon")
|
|
263
|
+
|
|
264
|
+
# Parse response
|
|
265
|
+
try:
|
|
266
|
+
response = json.loads(response_data.decode('utf-8'))
|
|
267
|
+
except json.JSONDecodeError as e:
|
|
268
|
+
raise ClientError(f"Invalid JSON response from daemon: {e}")
|
|
269
|
+
|
|
270
|
+
logger.debug(f"Received response: {response.get('status')}")
|
|
271
|
+
return response
|
|
272
|
+
|
|
273
|
+
finally:
|
|
274
|
+
try:
|
|
275
|
+
sock.close()
|
|
276
|
+
except:
|
|
277
|
+
pass
|
|
278
|
+
|
|
279
|
+
def collect(
|
|
280
|
+
self,
|
|
281
|
+
root_path: str,
|
|
282
|
+
filters: Optional[Dict[str, str]] = None
|
|
283
|
+
) -> Dict[str, Any]:
|
|
284
|
+
"""Request collection from daemon.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
root_path: Root directory for collection
|
|
288
|
+
filters: Optional filters for collection
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
Collection results with timing and module information
|
|
292
|
+
|
|
293
|
+
Note:
|
|
294
|
+
Since modules are pre-imported in daemon, this is nearly instant!
|
|
295
|
+
"""
|
|
296
|
+
request = {
|
|
297
|
+
"command": "collect",
|
|
298
|
+
"root_path": root_path,
|
|
299
|
+
"filters": filters or {},
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
logger.info(f"Requesting collection for {root_path}")
|
|
303
|
+
response = self.send_request(request)
|
|
304
|
+
logger.info(
|
|
305
|
+
f"Collection completed in {response.get('collection_time', 0):.4f}s"
|
|
306
|
+
)
|
|
307
|
+
return response
|
|
308
|
+
|
|
309
|
+
def get_status(self) -> Dict[str, Any]:
|
|
310
|
+
"""Get comprehensive daemon status.
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
Status dict including:
|
|
314
|
+
- status: "running" or error
|
|
315
|
+
- pid: Process ID
|
|
316
|
+
- uptime: Seconds since start
|
|
317
|
+
- cached_modules: Number of imported modules
|
|
318
|
+
- metrics: Request statistics
|
|
319
|
+
"""
|
|
320
|
+
logger.debug("Requesting daemon status")
|
|
321
|
+
return self.send_request({"command": "status"})
|
|
322
|
+
|
|
323
|
+
def get_health(self) -> Dict[str, Any]:
|
|
324
|
+
"""Get daemon health check.
|
|
325
|
+
|
|
326
|
+
Returns:
|
|
327
|
+
Health check result with diagnostics
|
|
328
|
+
"""
|
|
329
|
+
logger.debug("Requesting daemon health check")
|
|
330
|
+
return self.send_request({"command": "health"})
|
|
331
|
+
|
|
332
|
+
def reload(self, file_paths: Set[str]) -> Dict[str, Any]:
|
|
333
|
+
"""Request daemon to reload specified modules.
|
|
334
|
+
|
|
335
|
+
Args:
|
|
336
|
+
file_paths: Set of file paths to reload
|
|
337
|
+
|
|
338
|
+
Returns:
|
|
339
|
+
Reload result with timing and counts
|
|
340
|
+
|
|
341
|
+
Note:
|
|
342
|
+
This clears cached modules and re-imports them
|
|
343
|
+
"""
|
|
344
|
+
if not file_paths:
|
|
345
|
+
raise ValidationError("file_paths cannot be empty")
|
|
346
|
+
|
|
347
|
+
request = {
|
|
348
|
+
"command": "reload",
|
|
349
|
+
"file_paths": list(file_paths),
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
logger.info(f"Requesting reload of {len(file_paths)} modules")
|
|
353
|
+
response = self.send_request(request)
|
|
354
|
+
logger.info(
|
|
355
|
+
f"Reload completed: {response.get('modules_reloaded', 0)} modules "
|
|
356
|
+
f"in {response.get('reload_time', 0):.2f}s"
|
|
357
|
+
)
|
|
358
|
+
return response
|
|
359
|
+
|
|
360
|
+
def stop(self) -> Dict[str, Any]:
|
|
361
|
+
"""Stop the daemon gracefully.
|
|
362
|
+
|
|
363
|
+
Returns:
|
|
364
|
+
Stop confirmation
|
|
365
|
+
|
|
366
|
+
Note:
|
|
367
|
+
Daemon will clean up resources and exit
|
|
368
|
+
"""
|
|
369
|
+
logger.info("Requesting daemon stop")
|
|
370
|
+
return self.send_request({"command": "stop"}, timeout=STOP_COMMAND_TIMEOUT_SECONDS)
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
def get_socket_path(root_path: str) -> str:
|
|
374
|
+
"""Get socket path for a project.
|
|
375
|
+
|
|
376
|
+
Each project gets its own daemon socket based on root path hash.
|
|
377
|
+
|
|
378
|
+
Args:
|
|
379
|
+
root_path: Project root directory path
|
|
380
|
+
|
|
381
|
+
Returns:
|
|
382
|
+
Path to Unix domain socket for this project
|
|
383
|
+
|
|
384
|
+
Note:
|
|
385
|
+
Uses MD5 hash of root path to ensure unique socket per project
|
|
386
|
+
"""
|
|
387
|
+
# Use hash of root path to avoid collisions
|
|
388
|
+
import hashlib
|
|
389
|
+
|
|
390
|
+
# Resolve to absolute path for consistency
|
|
391
|
+
resolved_path = str(Path(root_path).resolve())
|
|
392
|
+
# MD5 used only for socket path generation, not security
|
|
393
|
+
path_hash = hashlib.md5(resolved_path.encode(), usedforsecurity=False).hexdigest()[:SOCKET_PATH_HASH_LENGTH]
|
|
394
|
+
|
|
395
|
+
# Store in temp directory
|
|
396
|
+
socket_path = f"/tmp/pytest-fastcollect-{path_hash}.sock"
|
|
397
|
+
|
|
398
|
+
return socket_path
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
def get_pid_file(socket_path: str) -> str:
|
|
402
|
+
"""Get PID file path for daemon.
|
|
403
|
+
|
|
404
|
+
Args:
|
|
405
|
+
socket_path: Path to daemon socket
|
|
406
|
+
|
|
407
|
+
Returns:
|
|
408
|
+
Path to PID file (socket_path + ".pid")
|
|
409
|
+
"""
|
|
410
|
+
return socket_path + ".pid"
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
def save_daemon_pid(socket_path: str, pid: int) -> None:
|
|
414
|
+
"""Save daemon PID to file.
|
|
415
|
+
|
|
416
|
+
Args:
|
|
417
|
+
socket_path: Path to daemon socket
|
|
418
|
+
pid: Process ID to save
|
|
419
|
+
|
|
420
|
+
Note:
|
|
421
|
+
Creates PID file next to socket file
|
|
422
|
+
"""
|
|
423
|
+
pid_file = get_pid_file(socket_path)
|
|
424
|
+
try:
|
|
425
|
+
with open(pid_file, 'w') as f:
|
|
426
|
+
f.write(str(pid))
|
|
427
|
+
logger.debug(f"Saved daemon PID {pid} to {pid_file}")
|
|
428
|
+
except Exception as e:
|
|
429
|
+
logger.error(f"Failed to save PID file: {e}")
|
|
430
|
+
raise
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
def get_daemon_pid(socket_path: str) -> Optional[int]:
|
|
434
|
+
"""Get daemon PID from file.
|
|
435
|
+
|
|
436
|
+
Args:
|
|
437
|
+
socket_path: Path to daemon socket
|
|
438
|
+
|
|
439
|
+
Returns:
|
|
440
|
+
PID if found and valid, None otherwise
|
|
441
|
+
|
|
442
|
+
Note:
|
|
443
|
+
Returns None if PID file doesn't exist or contains invalid data
|
|
444
|
+
"""
|
|
445
|
+
pid_file = get_pid_file(socket_path)
|
|
446
|
+
|
|
447
|
+
if not os.path.exists(pid_file):
|
|
448
|
+
return None
|
|
449
|
+
|
|
450
|
+
try:
|
|
451
|
+
with open(pid_file, 'r') as f:
|
|
452
|
+
pid_str = f.read().strip()
|
|
453
|
+
pid = int(pid_str)
|
|
454
|
+
if pid > 0:
|
|
455
|
+
return pid
|
|
456
|
+
else:
|
|
457
|
+
logger.warning(f"Invalid PID in file: {pid}")
|
|
458
|
+
return None
|
|
459
|
+
except (ValueError, OSError) as e:
|
|
460
|
+
logger.warning(f"Failed to read PID file: {e}")
|
|
461
|
+
return None
|
|
462
|
+
|
|
463
|
+
|
|
464
|
+
def is_process_running(pid: int) -> bool:
|
|
465
|
+
"""Check if process with given PID is running.
|
|
466
|
+
|
|
467
|
+
Args:
|
|
468
|
+
pid: Process ID to check
|
|
469
|
+
|
|
470
|
+
Returns:
|
|
471
|
+
True if process is running, False otherwise
|
|
472
|
+
|
|
473
|
+
Note:
|
|
474
|
+
Uses platform-specific methods to check process existence
|
|
475
|
+
"""
|
|
476
|
+
if pid <= 0:
|
|
477
|
+
return False
|
|
478
|
+
|
|
479
|
+
import sys
|
|
480
|
+
|
|
481
|
+
if sys.platform == 'win32':
|
|
482
|
+
# On Windows, use tasklist command
|
|
483
|
+
try:
|
|
484
|
+
import subprocess
|
|
485
|
+
result = subprocess.run(
|
|
486
|
+
['tasklist', '/FI', f'PID eq {pid}'],
|
|
487
|
+
capture_output=True,
|
|
488
|
+
text=True,
|
|
489
|
+
timeout=TASKLIST_TIMEOUT_SECONDS
|
|
490
|
+
)
|
|
491
|
+
return str(pid) in result.stdout
|
|
492
|
+
except Exception:
|
|
493
|
+
# If tasklist fails, assume process doesn't exist
|
|
494
|
+
return False
|
|
495
|
+
else:
|
|
496
|
+
# On Unix, use os.kill with signal 0
|
|
497
|
+
try:
|
|
498
|
+
os.kill(pid, 0)
|
|
499
|
+
return True
|
|
500
|
+
except OSError:
|
|
501
|
+
return False
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
def stop_daemon(socket_path: str) -> bool:
|
|
505
|
+
"""Stop daemon gracefully or forcefully.
|
|
506
|
+
|
|
507
|
+
Tries multiple approaches in order:
|
|
508
|
+
1. Send stop command via socket (graceful)
|
|
509
|
+
2. Send SIGTERM to process (graceful)
|
|
510
|
+
3. Send SIGKILL to process (forceful)
|
|
511
|
+
4. Clean up stale files
|
|
512
|
+
|
|
513
|
+
Args:
|
|
514
|
+
socket_path: Path to daemon socket
|
|
515
|
+
|
|
516
|
+
Returns:
|
|
517
|
+
True if daemon was stopped (or wasn't running), False on failure
|
|
518
|
+
|
|
519
|
+
Note:
|
|
520
|
+
Always cleans up stale socket and PID files
|
|
521
|
+
"""
|
|
522
|
+
logger.info(f"Stopping daemon at {socket_path}")
|
|
523
|
+
|
|
524
|
+
daemon_was_running = False
|
|
525
|
+
|
|
526
|
+
# Try graceful shutdown via socket first
|
|
527
|
+
try:
|
|
528
|
+
client = DaemonClient(socket_path, max_retries=1)
|
|
529
|
+
client.stop()
|
|
530
|
+
time.sleep(STOP_COMMAND_SLEEP_SECONDS)
|
|
531
|
+
daemon_was_running = True
|
|
532
|
+
logger.info("Daemon stopped via stop command")
|
|
533
|
+
|
|
534
|
+
# Check if actually stopped
|
|
535
|
+
if not os.path.exists(socket_path):
|
|
536
|
+
return True
|
|
537
|
+
except Exception as e:
|
|
538
|
+
logger.debug(f"Could not stop via socket: {e}")
|
|
539
|
+
|
|
540
|
+
# Try killing process
|
|
541
|
+
pid = get_daemon_pid(socket_path)
|
|
542
|
+
if pid and is_process_running(pid):
|
|
543
|
+
daemon_was_running = True
|
|
544
|
+
try:
|
|
545
|
+
# Send SIGTERM (graceful shutdown)
|
|
546
|
+
logger.info(f"Sending SIGTERM to daemon PID {pid}")
|
|
547
|
+
os.kill(pid, 15)
|
|
548
|
+
time.sleep(SIGTERM_WAIT_SECONDS)
|
|
549
|
+
|
|
550
|
+
# Check if still running
|
|
551
|
+
if is_process_running(pid):
|
|
552
|
+
# Send SIGKILL (forced shutdown)
|
|
553
|
+
logger.warning(f"Daemon didn't stop, sending SIGKILL to PID {pid}")
|
|
554
|
+
os.kill(pid, 9)
|
|
555
|
+
time.sleep(SIGKILL_WAIT_SECONDS)
|
|
556
|
+
|
|
557
|
+
logger.info(f"Daemon process {pid} stopped")
|
|
558
|
+
except OSError as e:
|
|
559
|
+
logger.error(f"Failed to kill daemon process: {e}")
|
|
560
|
+
|
|
561
|
+
# Clean up stale files
|
|
562
|
+
cleaned = False
|
|
563
|
+
|
|
564
|
+
if os.path.exists(socket_path):
|
|
565
|
+
try:
|
|
566
|
+
os.remove(socket_path)
|
|
567
|
+
logger.debug(f"Removed socket file: {socket_path}")
|
|
568
|
+
cleaned = True
|
|
569
|
+
except Exception as e:
|
|
570
|
+
logger.error(f"Failed to remove socket file: {e}")
|
|
571
|
+
|
|
572
|
+
pid_file = get_pid_file(socket_path)
|
|
573
|
+
if os.path.exists(pid_file):
|
|
574
|
+
try:
|
|
575
|
+
os.remove(pid_file)
|
|
576
|
+
logger.debug(f"Removed PID file: {pid_file}")
|
|
577
|
+
cleaned = True
|
|
578
|
+
except Exception as e:
|
|
579
|
+
logger.error(f"Failed to remove PID file: {e}")
|
|
580
|
+
|
|
581
|
+
return daemon_was_running or cleaned
|