maqet 0.0.1.4__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. maqet/__init__.py +50 -6
  2. maqet/__main__.py +96 -0
  3. maqet/__version__.py +3 -0
  4. maqet/api/__init__.py +35 -0
  5. maqet/api/decorators.py +184 -0
  6. maqet/api/metadata.py +147 -0
  7. maqet/api/registry.py +182 -0
  8. maqet/cli.py +71 -0
  9. maqet/config/__init__.py +26 -0
  10. maqet/config/merger.py +237 -0
  11. maqet/config/parser.py +198 -0
  12. maqet/config/validators.py +519 -0
  13. maqet/config_handlers.py +684 -0
  14. maqet/constants.py +200 -0
  15. maqet/exceptions.py +226 -0
  16. maqet/formatters.py +294 -0
  17. maqet/generators/__init__.py +12 -0
  18. maqet/generators/base_generator.py +101 -0
  19. maqet/generators/cli_generator.py +635 -0
  20. maqet/generators/python_generator.py +247 -0
  21. maqet/generators/rest_generator.py +58 -0
  22. maqet/handlers/__init__.py +12 -0
  23. maqet/handlers/base.py +108 -0
  24. maqet/handlers/init.py +147 -0
  25. maqet/handlers/stage.py +196 -0
  26. maqet/ipc/__init__.py +29 -0
  27. maqet/ipc/retry.py +265 -0
  28. maqet/ipc/runner_client.py +285 -0
  29. maqet/ipc/unix_socket_server.py +239 -0
  30. maqet/logger.py +160 -55
  31. maqet/machine.py +884 -0
  32. maqet/managers/__init__.py +7 -0
  33. maqet/managers/qmp_manager.py +333 -0
  34. maqet/managers/snapshot_coordinator.py +327 -0
  35. maqet/managers/vm_manager.py +683 -0
  36. maqet/maqet.py +1120 -0
  37. maqet/os_interactions.py +46 -0
  38. maqet/process_spawner.py +395 -0
  39. maqet/qemu_args.py +76 -0
  40. maqet/qmp/__init__.py +10 -0
  41. maqet/qmp/commands.py +92 -0
  42. maqet/qmp/keyboard.py +311 -0
  43. maqet/qmp/qmp.py +17 -0
  44. maqet/snapshot.py +473 -0
  45. maqet/state.py +958 -0
  46. maqet/storage.py +702 -162
  47. maqet/validation/__init__.py +9 -0
  48. maqet/validation/config_validator.py +170 -0
  49. maqet/vm_runner.py +523 -0
  50. maqet-0.0.5.dist-info/METADATA +237 -0
  51. maqet-0.0.5.dist-info/RECORD +55 -0
  52. {maqet-0.0.1.4.dist-info → maqet-0.0.5.dist-info}/WHEEL +1 -1
  53. maqet-0.0.5.dist-info/entry_points.txt +2 -0
  54. maqet-0.0.5.dist-info/licenses/LICENSE +21 -0
  55. {maqet-0.0.1.4.dist-info → maqet-0.0.5.dist-info}/top_level.txt +0 -1
  56. maqet/core.py +0 -411
  57. maqet/functions.py +0 -104
  58. maqet-0.0.1.4.dist-info/METADATA +0 -6
  59. maqet-0.0.1.4.dist-info/RECORD +0 -33
  60. qemu/machine/__init__.py +0 -36
  61. qemu/machine/console_socket.py +0 -142
  62. qemu/machine/machine.py +0 -954
  63. qemu/machine/py.typed +0 -0
  64. qemu/machine/qtest.py +0 -191
  65. qemu/qmp/__init__.py +0 -59
  66. qemu/qmp/error.py +0 -50
  67. qemu/qmp/events.py +0 -717
  68. qemu/qmp/legacy.py +0 -319
  69. qemu/qmp/message.py +0 -209
  70. qemu/qmp/models.py +0 -146
  71. qemu/qmp/protocol.py +0 -1057
  72. qemu/qmp/py.typed +0 -0
  73. qemu/qmp/qmp_client.py +0 -655
  74. qemu/qmp/qmp_shell.py +0 -618
  75. qemu/qmp/qmp_tui.py +0 -655
  76. qemu/qmp/util.py +0 -219
  77. qemu/utils/__init__.py +0 -162
  78. qemu/utils/accel.py +0 -84
  79. qemu/utils/py.typed +0 -0
  80. qemu/utils/qemu_ga_client.py +0 -323
  81. qemu/utils/qom.py +0 -273
  82. qemu/utils/qom_common.py +0 -175
  83. qemu/utils/qom_fuse.py +0 -207
@@ -0,0 +1,196 @@
1
+ # from subprocess import check_output
2
+ import os
3
+ import pty
4
+ import signal
5
+ import subprocess
6
+ from time import sleep
7
+
8
+ from maqet.handlers.base import Handler, HandlerError
9
+ from maqet.logger import LOG
10
+ from maqet.qmp import commands as qmp_commands
11
+ from maqet.qmp.keyboard import KeyboardEmulator as kb
12
+
13
+
14
+ class StageHandler(Handler):
15
+ """
16
+ Handles execution of pipeline stages. Methods are tasks
17
+ """
18
+
19
+
20
+ @StageHandler.method
21
+ def launch(state: dict):
22
+ state.vm.launch()
23
+
24
+
25
+ @StageHandler.method
26
+ def shutdown(state,
27
+ hard: bool = False,
28
+ timeout: int = 30) -> None:
29
+ state.vm.shutdown(hard=hard, timeout=timeout)
30
+
31
+
32
+ @StageHandler.method
33
+ def wait_for_input(state, prompt: str = ""):
34
+ input(prompt)
35
+
36
+
37
+ @StageHandler.method
38
+ def wait_for_shutdown(state):
39
+ while state.vm.is_running():
40
+ sleep(3)
41
+
42
+
43
+ @StageHandler.method
44
+ def qmp_key(state, keys: [str],
45
+ hold_time: int = 1,
46
+ **kwargs):
47
+ command = kb.press_keys(*keys, hold_time=hold_time)
48
+ qmp_run_command(state.vm, **command)
49
+
50
+
51
+ def qmp_run_command(vm,
52
+ command: str,
53
+ arguments: dict = None):
54
+ r = vm.qmp(cmd=command,
55
+ args_dict=arguments)
56
+ LOG.info(f"QMP: {command} {arguments} {r}")
57
+
58
+
59
+ @StageHandler.method
60
+ def qmp_type(state, text, type_delay: int = 10,
61
+ hold_time: int = 1, ** kwargs):
62
+ for command in kb.type_string(string=text,
63
+ hold_time=hold_time):
64
+ qmp_run_command(state.vm, **command)
65
+ sleep(type_delay/1000)
66
+
67
+
68
+ @StageHandler.method
69
+ def qmp(state,
70
+ cmd: str = None,
71
+ args_dict: dict = None,
72
+ command: str = None,
73
+ arguments: dict = None):
74
+ # Support both old and new parameter names for backward compatibility
75
+ command = command or cmd
76
+ arguments = arguments or args_dict
77
+ qmp_run_command(
78
+ vm=state.vm,
79
+ command=command,
80
+ arguments=arguments
81
+ )
82
+
83
+
84
+ @StageHandler.method
85
+ def qmp_screendump(state, filename: str):
86
+ command = qmp_commands.qmp_screendump(filename)
87
+ qmp_run_command(state.vm, **command)
88
+
89
+
90
+ @StageHandler.method
91
+ def qmp_stop(state):
92
+ command = qmp_commands.qmp_stop()
93
+ qmp_run_command(state.vm, **command)
94
+
95
+
96
+ @StageHandler.method
97
+ def qmp_cont(state):
98
+ command = qmp_commands.qmp_cont()
99
+ qmp_run_command(state.vm, **command)
100
+
101
+
102
+ @StageHandler.method
103
+ def qmp_pmemsave(state, filename: str):
104
+ # Get the memory size from the machine's memory attribute
105
+ size_bytes = state.vm.memory
106
+ # Set address to 0 to dump all memory
107
+ address = 0
108
+ command = qmp_commands.qmp_pmemsave(address, size_bytes, filename)
109
+ qmp_run_command(state.vm, **command)
110
+
111
+
112
+ @StageHandler.method
113
+ def wait(state, time: float):
114
+ sleep(float(time))
115
+
116
+
117
+ @StageHandler.method
118
+ def bash(state, script: str, silent=False, blocking=True, fatal=True, **kwargs):
119
+ LOG.debug(f"Executing bash script: {script}")
120
+
121
+ if not blocking:
122
+ raise HandlerError(
123
+ "Non-blocking bash tasks are not currently supported.")
124
+
125
+ process = None
126
+ try:
127
+ # For silent execution, redirect stdout and stderr
128
+ stdout = subprocess.DEVNULL if silent else None
129
+ stderr = subprocess.DEVNULL if silent else None
130
+
131
+ # Use Popen for better control over signal handling
132
+ # Don't create a new process group - let signals propagate naturally
133
+ process = subprocess.Popen(
134
+ script,
135
+ shell=True,
136
+ stdout=stdout,
137
+ stderr=stderr
138
+ )
139
+
140
+ # Wait for the process to complete
141
+ returncode = process.wait()
142
+
143
+ if fatal and returncode != 0:
144
+ raise subprocess.CalledProcessError(returncode, script)
145
+
146
+ LOG.debug(f"Bash script exited with return code {returncode}")
147
+
148
+ except subprocess.CalledProcessError as e:
149
+ LOG.critical(f"A critical error occurred in a bash script, which exited with code {
150
+ e.returncode}.")
151
+ LOG.critical(f"Script: {script}")
152
+ # The exception will be caught by the main loop, which will shut down the VM
153
+ raise HandlerError(f"Bash script failed with exit code {e.returncode}")
154
+ except FileNotFoundError:
155
+ raise HandlerError(
156
+ "Could not find the specified shell or command to execute the script.")
157
+ except KeyboardInterrupt:
158
+ LOG.info("Bash script interrupted by user (Ctrl+C)")
159
+ if process:
160
+ try:
161
+ # Immediately send SIGKILL to the entire process group for immediate termination
162
+ os.killpg(process.pid, signal.SIGKILL)
163
+ # Wait briefly for the process to die
164
+ process.wait(timeout=2)
165
+ except (OSError, subprocess.TimeoutExpired):
166
+ # Process might already be dead
167
+ pass
168
+ raise
169
+
170
+
171
+ @StageHandler.method
172
+ def echo(state, text: str):
173
+ print(text)
174
+
175
+
176
+ @StageHandler.method
177
+ def snapshot(state, drive: str, name: str,
178
+ overwrite: bool = False, **kwargs):
179
+ if drive not in state.storage:
180
+ raise HandlerError(f"Drive {drive} not exists")
181
+
182
+ state.storage[drive].snapshot(name, overwrite)
183
+
184
+
185
+ @StageHandler.method
186
+ def device_add(state, driver: str, id: str, **kwargs):
187
+ """Adds a device to the VM using QMP."""
188
+ command = qmp_commands.qmp_device_add(driver, id, **kwargs)
189
+ qmp_run_command(state.vm, **command)
190
+
191
+
192
+ @StageHandler.method
193
+ def device_del(state, id: str):
194
+ """Removes a device from the VM using QMP."""
195
+ command = qmp_commands.qmp_device_del(id)
196
+ qmp_run_command(state.vm, **command)
maqet/ipc/__init__.py ADDED
@@ -0,0 +1,29 @@
1
+ """
2
+ IPC (Inter-Process Communication) Module
3
+
4
+ Provides Unix domain socket-based IPC for communication between
5
+ CLI processes and VM runner processes.
6
+
7
+ Components:
8
+ - UnixSocketIPCServer: Server for VM runner processes
9
+ - RunnerClient: Client for CLI processes
10
+ - retry: Retry logic with exponential backoff and circuit breaker
11
+
12
+ Protocol: JSON-RPC over Unix domain sockets
13
+ """
14
+
15
+ from .unix_socket_server import UnixSocketIPCServer
16
+ from .runner_client import RunnerClient
17
+ from .retry import (
18
+ retry_with_backoff,
19
+ async_retry_with_backoff,
20
+ CircuitBreaker,
21
+ )
22
+
23
+ __all__ = [
24
+ "UnixSocketIPCServer",
25
+ "RunnerClient",
26
+ "retry_with_backoff",
27
+ "async_retry_with_backoff",
28
+ "CircuitBreaker",
29
+ ]
maqet/ipc/retry.py ADDED
@@ -0,0 +1,265 @@
1
+ """
2
+ IPC Retry Logic with Exponential Backoff
3
+
4
+ Provides decorators and utilities for retrying IPC operations that may fail
5
+ due to transient network issues, temporary unavailability, or timing issues.
6
+
7
+ Features:
8
+ - Exponential backoff with configurable base delay
9
+ - Configurable max attempts
10
+ - Selective exception handling (retry only on specific exceptions)
11
+ - Debug logging for retry attempts
12
+ - Synchronous and asynchronous decorator support
13
+ """
14
+
15
+ import asyncio
16
+ import functools
17
+ import time
18
+ from typing import Callable, Tuple, Type, Union
19
+
20
+ from ..logger import LOG
21
+
22
+
23
+ def retry_with_backoff(
24
+ max_attempts: int = 3,
25
+ backoff_base: float = 0.5,
26
+ exceptions: Tuple[Type[Exception], ...] = (
27
+ ConnectionRefusedError,
28
+ FileNotFoundError,
29
+ ),
30
+ ):
31
+ """
32
+ Decorator for synchronous functions with exponential backoff retry logic.
33
+
34
+ Retries function on specified exceptions using exponential backoff:
35
+ - Attempt 1: Immediate
36
+ - Attempt 2: Wait backoff_base seconds (0.5s default)
37
+ - Attempt 3: Wait backoff_base * 2 seconds (1s default)
38
+ - Attempt 4: Wait backoff_base * 4 seconds (2s default)
39
+ - etc.
40
+
41
+ Args:
42
+ max_attempts: Maximum number of attempts (default: 3)
43
+ backoff_base: Base delay in seconds for exponential backoff (default: 0.5)
44
+ exceptions: Tuple of exception types to retry on
45
+ (default: ConnectionRefusedError, FileNotFoundError)
46
+
47
+ Returns:
48
+ Decorated function that retries on transient failures
49
+
50
+ Example:
51
+ @retry_with_backoff(max_attempts=3, backoff_base=0.5)
52
+ def connect_to_service():
53
+ return service.connect()
54
+
55
+ Note:
56
+ - Only retries on specified exceptions (transient errors)
57
+ - Other exceptions propagate immediately (permanent errors)
58
+ - Logs retry attempts at DEBUG level
59
+ - Final failure logs at WARNING level
60
+ """
61
+
62
+ def decorator(func: Callable) -> Callable:
63
+ @functools.wraps(func)
64
+ def wrapper(*args, **kwargs):
65
+ last_exception = None
66
+
67
+ for attempt in range(1, max_attempts + 1):
68
+ try:
69
+ return func(*args, **kwargs)
70
+
71
+ except exceptions as e:
72
+ last_exception = e
73
+
74
+ if attempt < max_attempts:
75
+ # Calculate delay with exponential backoff
76
+ delay = backoff_base * (2 ** (attempt - 1))
77
+ LOG.debug(
78
+ f"IPC retry attempt {attempt}/{max_attempts} failed: {e}. "
79
+ f"Retrying in {delay:.2f}s..."
80
+ )
81
+ time.sleep(delay)
82
+ else:
83
+ # Max attempts reached
84
+ LOG.warning(
85
+ f"IPC operation failed after {max_attempts} attempts: {e}"
86
+ )
87
+
88
+ except Exception as e:
89
+ # Non-retryable exception, propagate immediately
90
+ LOG.debug(f"Non-retryable exception in IPC operation: {e}")
91
+ raise
92
+
93
+ # If we get here, all attempts failed
94
+ raise last_exception
95
+
96
+ return wrapper
97
+
98
+ return decorator
99
+
100
+
101
+ def async_retry_with_backoff(
102
+ max_attempts: int = 3,
103
+ backoff_base: float = 0.5,
104
+ exceptions: Tuple[Type[Exception], ...] = (
105
+ ConnectionRefusedError,
106
+ FileNotFoundError,
107
+ OSError,
108
+ ),
109
+ ):
110
+ """
111
+ Decorator for asynchronous functions with exponential backoff retry logic.
112
+
113
+ Async version of retry_with_backoff(). Retries async function on specified
114
+ exceptions using exponential backoff with asyncio.sleep.
115
+
116
+ Args:
117
+ max_attempts: Maximum number of attempts (default: 3)
118
+ backoff_base: Base delay in seconds for exponential backoff (default: 0.5)
119
+ exceptions: Tuple of exception types to retry on
120
+ (default: ConnectionRefusedError, FileNotFoundError, OSError)
121
+
122
+ Returns:
123
+ Decorated async function that retries on transient failures
124
+
125
+ Example:
126
+ @async_retry_with_backoff(max_attempts=3, backoff_base=0.5)
127
+ async def connect_to_socket():
128
+ return await asyncio.open_unix_connection("/path/to/socket")
129
+
130
+ Note:
131
+ - Only retries on specified exceptions (transient errors)
132
+ - Other exceptions propagate immediately (permanent errors)
133
+ - Logs retry attempts at DEBUG level
134
+ - Final failure logs at WARNING level
135
+ - Uses asyncio.sleep for non-blocking delays
136
+ """
137
+
138
+ def decorator(func: Callable) -> Callable:
139
+ @functools.wraps(func)
140
+ async def wrapper(*args, **kwargs):
141
+ last_exception = None
142
+
143
+ for attempt in range(1, max_attempts + 1):
144
+ try:
145
+ return await func(*args, **kwargs)
146
+
147
+ except exceptions as e:
148
+ last_exception = e
149
+
150
+ if attempt < max_attempts:
151
+ # Calculate delay with exponential backoff
152
+ delay = backoff_base * (2 ** (attempt - 1))
153
+ LOG.debug(
154
+ f"IPC retry attempt {attempt}/{max_attempts} failed: {e}. "
155
+ f"Retrying in {delay:.2f}s..."
156
+ )
157
+ await asyncio.sleep(delay)
158
+ else:
159
+ # Max attempts reached
160
+ LOG.warning(
161
+ f"IPC operation failed after {max_attempts} attempts: {e}"
162
+ )
163
+
164
+ except Exception as e:
165
+ # Non-retryable exception, propagate immediately
166
+ LOG.debug(f"Non-retryable exception in IPC operation: {e}")
167
+ raise
168
+
169
+ # If we get here, all attempts failed
170
+ raise last_exception
171
+
172
+ return wrapper
173
+
174
+ return decorator
175
+
176
+
177
+ class CircuitBreaker:
178
+ """
179
+ Circuit breaker pattern for IPC operations.
180
+
181
+ Prevents repeated attempts to connect to a failing service by
182
+ "opening the circuit" after a threshold of failures.
183
+
184
+ States:
185
+ - CLOSED: Normal operation, requests pass through
186
+ - OPEN: Too many failures, requests fail immediately
187
+ - HALF_OPEN: Testing if service recovered, allow one request
188
+
189
+ Usage:
190
+ breaker = CircuitBreaker(failure_threshold=5, timeout=60)
191
+
192
+ if breaker.is_open():
193
+ raise Exception("Circuit breaker open")
194
+
195
+ try:
196
+ result = connect_to_service()
197
+ breaker.record_success()
198
+ except Exception:
199
+ breaker.record_failure()
200
+ raise
201
+
202
+ Note:
203
+ - Circuit opens after failure_threshold consecutive failures
204
+ - After timeout seconds, circuit enters HALF_OPEN state
205
+ - One successful request in HALF_OPEN state closes circuit
206
+ - Thread-safe for concurrent access
207
+ """
208
+
209
+ def __init__(self, failure_threshold: int = 5, timeout: float = 60.0):
210
+ """
211
+ Initialize circuit breaker.
212
+
213
+ Args:
214
+ failure_threshold: Number of failures before opening circuit
215
+ timeout: Seconds to wait before attempting recovery (HALF_OPEN state)
216
+ """
217
+ self.failure_threshold = failure_threshold
218
+ self.timeout = timeout
219
+ self.failure_count = 0
220
+ self.last_failure_time = None
221
+ self.state = "CLOSED" # CLOSED, OPEN, HALF_OPEN
222
+
223
+ def is_open(self) -> bool:
224
+ """
225
+ Check if circuit is open (blocking requests).
226
+
227
+ Returns:
228
+ True if circuit is open and requests should be blocked
229
+ """
230
+ if self.state == "OPEN":
231
+ # Check if timeout elapsed, transition to HALF_OPEN
232
+ if (
233
+ self.last_failure_time
234
+ and time.time() - self.last_failure_time >= self.timeout
235
+ ):
236
+ LOG.debug("Circuit breaker entering HALF_OPEN state")
237
+ self.state = "HALF_OPEN"
238
+ return False
239
+ return True
240
+ return False
241
+
242
+ def record_success(self) -> None:
243
+ """Record successful operation, reset failure count."""
244
+ if self.state == "HALF_OPEN":
245
+ LOG.debug("Circuit breaker closing (recovered)")
246
+ self.failure_count = 0
247
+ self.state = "CLOSED"
248
+
249
+ def record_failure(self) -> None:
250
+ """Record failed operation, potentially open circuit."""
251
+ self.failure_count += 1
252
+ self.last_failure_time = time.time()
253
+
254
+ if self.failure_count >= self.failure_threshold:
255
+ LOG.warning(
256
+ f"Circuit breaker opening after {self.failure_count} failures"
257
+ )
258
+ self.state = "OPEN"
259
+
260
+ def reset(self) -> None:
261
+ """Manually reset circuit breaker to CLOSED state."""
262
+ LOG.debug("Circuit breaker manually reset")
263
+ self.failure_count = 0
264
+ self.last_failure_time = None
265
+ self.state = "CLOSED"