base-deployment-controller 0.2.0__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/PKG-INFO +1 -1
  2. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/pyproject.toml +1 -1
  3. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/__init__.py +6 -1
  4. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/builder.py +6 -1
  5. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/models/events.py +10 -1
  6. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/routers/deployment.py +87 -2
  7. base_deployment_controller-0.3.0/src/base_deployment_controller/services/deployment_status_monitor.py +278 -0
  8. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/services/status_event_manager.py +4 -4
  9. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller.egg-info/PKG-INFO +1 -1
  10. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller.egg-info/SOURCES.txt +3 -1
  11. base_deployment_controller-0.3.0/tests/test_deployment.py +102 -0
  12. base_deployment_controller-0.3.0/tests/test_status_event_manager.py +138 -0
  13. base_deployment_controller-0.2.0/tests/test_deployment.py +0 -76
  14. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/README.md +0 -0
  15. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/setup.cfg +0 -0
  16. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/models/__init__.py +0 -0
  17. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/models/api.py +0 -0
  18. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/models/compose.py +0 -0
  19. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/models/container.py +0 -0
  20. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/models/deployment.py +0 -0
  21. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/models/environment.py +0 -0
  22. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/models/task.py +0 -0
  23. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/routers/__init__.py +0 -0
  24. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/routers/api.py +0 -0
  25. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/routers/container.py +0 -0
  26. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/routers/environment.py +0 -0
  27. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/services/__init__.py +0 -0
  28. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/services/config.py +0 -0
  29. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller/services/task_manager.py +0 -0
  30. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller.egg-info/dependency_links.txt +0 -0
  31. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller.egg-info/requires.txt +0 -0
  32. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/src/base_deployment_controller.egg-info/top_level.txt +0 -0
  33. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/tests/test_api.py +0 -0
  34. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/tests/test_containers.py +0 -0
  35. {base_deployment_controller-0.2.0 → base_deployment_controller-0.3.0}/tests/test_envs.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: base-deployment-controller
3
- Version: 0.2.0
3
+ Version: 0.3.0
4
4
  Summary: REST API to control the basic operations of a deployment
5
5
  Author: Tknika
6
6
  License: Creative Commons Atribución-NoComercial (CC BY-NC)
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "base-deployment-controller"
7
- version = "0.2.0"
7
+ version = "0.3.0"
8
8
  description = "REST API to control the basic operations of a deployment"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -10,6 +10,7 @@ from .routers.api import APIRoutes
10
10
  from .routers.environment import EnvRoutes
11
11
  from .routers.container import ContainerRoutes
12
12
  from .services.status_event_manager import StatusEventManager
13
+ from .services.deployment_status_monitor import DeploymentStatusMonitor
13
14
  from .routers.deployment import DeploymentRoutes
14
15
  from .builder import AppBuilder
15
16
 
@@ -65,8 +66,12 @@ def create_app(
65
66
  api_routes = APIRoutes()
66
67
  env_routes = EnvRoutes(config_service, task_manager)
67
68
  status_events = StatusEventManager(config_service)
69
+ deployment_status_monitor = DeploymentStatusMonitor(
70
+ config_service,
71
+ status_events=status_events,
72
+ )
68
73
  container_routes = ContainerRoutes(config_service, task_manager, status_events)
69
- deployment_routes = DeploymentRoutes(config_service, task_manager)
74
+ deployment_routes = DeploymentRoutes(config_service, task_manager, deployment_status_monitor)
70
75
 
71
76
  app.include_router(api_routes.router)
72
77
  app.include_router(env_routes.router)
@@ -6,6 +6,7 @@ from fastapi import APIRouter, FastAPI
6
6
  from .services.config import ConfigService
7
7
  from .services.task_manager import TaskManager
8
8
  from .services.status_event_manager import StatusEventManager
9
+ from .services.deployment_status_monitor import DeploymentStatusMonitor
9
10
  from .routers.api import APIRoutes
10
11
  from .routers.environment import EnvRoutes
11
12
  from .routers.container import ContainerRoutes
@@ -81,11 +82,15 @@ class AppBuilder:
81
82
  config_service = ConfigService(self.compose_file, self.env_file)
82
83
  task_manager = TaskManager(ttl=3600)
83
84
  status_events = StatusEventManager(config_service)
85
+ deployment_status_monitor = DeploymentStatusMonitor(
86
+ config_service,
87
+ status_events=status_events,
88
+ )
84
89
 
85
90
  api_routes = APIRoutes()
86
91
  env_routes = EnvRoutes(config_service, task_manager)
87
92
  container_routes = ContainerRoutes(config_service, task_manager, status_events)
88
- deployment_routes = DeploymentRoutes(config_service, task_manager)
93
+ deployment_routes = DeploymentRoutes(config_service, task_manager, deployment_status_monitor)
89
94
 
90
95
  app.include_router(api_routes.router)
91
96
  app.include_router(env_routes.router)
@@ -1,11 +1,12 @@
1
1
  """
2
- Event models for container status streaming via SSE.
2
+ Event models for container and deployment status streaming via SSE.
3
3
  """
4
4
  from datetime import datetime
5
5
  from enum import Enum
6
6
  from typing import Optional
7
7
 
8
8
  from pydantic import BaseModel, Field
9
+ from .deployment import DeploymentStatus
9
10
 
10
11
 
11
12
  class ServiceState(str, Enum):
@@ -32,3 +33,11 @@ class ContainerStatusEvent(BaseModel):
32
33
  prev_state: Optional[ServiceState] = Field(None, description="Previous state if known")
33
34
  action: str = Field(..., description="Docker event action that triggered the state change")
34
35
  timestamp: datetime = Field(..., description="Event timestamp")
36
+
37
+
38
+ class DeploymentStatusEvent(BaseModel):
39
+ """Deployment status change event for SSE streaming."""
40
+
41
+ status: DeploymentStatus = Field(..., description="Current deployment status")
42
+ previous_status: Optional[DeploymentStatus] = Field(None, description="Previous deployment status if known")
43
+ timestamp: datetime = Field(..., description="Event timestamp")
@@ -5,15 +5,18 @@ Manages deployment-wide operations: status, up, stop, down, restart.
5
5
  import asyncio
6
6
  import json
7
7
  import logging
8
- from typing import AsyncIterator, Dict, Set
8
+ from datetime import datetime, timezone
9
+ from typing import AsyncIterator, Dict, Set, Optional
9
10
 
10
11
  from fastapi import APIRouter, HTTPException, Request
11
12
  from fastapi.responses import Response, StreamingResponse
12
13
 
13
14
  from ..models.deployment import DeploymentInfoResponse
14
15
  from ..models.environment import EnvVariable
16
+ from ..models.events import DeploymentStatusEvent
15
17
  from ..models.task import TaskResponse, TaskDetail, TaskStatus
16
18
  from ..services.config import ConfigService
19
+ from ..services.deployment_status_monitor import DeploymentStatusMonitor
17
20
  from ..services.task_manager import TaskManager
18
21
 
19
22
  logger = logging.getLogger(__name__)
@@ -25,6 +28,7 @@ class DeploymentRoutes:
25
28
 
26
29
  Manages deployment-wide operations:
27
30
  - GET /deployment - Get deployment status with metadata and env-vars
31
+ - GET /deployment/status - SSE stream of deployment status changes
28
32
  - POST /deployment/up - Start deployment (async)
29
33
  - POST /deployment/stop - Stop deployment (async)
30
34
  - POST /deployment/down - Down deployment (async)
@@ -36,23 +40,32 @@ class DeploymentRoutes:
36
40
  Args:
37
41
  config: Instance of `ConfigService` for Compose and Docker access.
38
42
  task_manager: Instance of `TaskManager` for async operations.
43
+ status_monitor: Instance of `DeploymentStatusMonitor` for status monitoring.
39
44
 
40
45
  Attributes:
41
46
  config: Injected configuration service.
42
47
  task_manager: Injected task manager service.
48
+ status_monitor: Injected deployment status monitor.
43
49
  router: Instance of `APIRouter` with /deployment endpoints.
44
50
  """
45
51
 
46
- def __init__(self, config: ConfigService, task_manager: TaskManager) -> None:
52
+ def __init__(
53
+ self,
54
+ config: ConfigService,
55
+ task_manager: TaskManager,
56
+ status_monitor: DeploymentStatusMonitor,
57
+ ) -> None:
47
58
  """
48
59
  Initialize deployment routes.
49
60
 
50
61
  Args:
51
62
  config: Configuration service instance for dependency injection.
52
63
  task_manager: Task manager instance for dependency injection.
64
+ status_monitor: Deployment status monitor instance for dependency injection.
53
65
  """
54
66
  self.config = config
55
67
  self.task_manager = task_manager
68
+ self.status_monitor = status_monitor
56
69
  self.router = self._build_router()
57
70
 
58
71
  def _build_router(self) -> APIRouter:
@@ -71,6 +84,12 @@ class DeploymentRoutes:
71
84
  methods=["GET"],
72
85
  response_model=DeploymentInfoResponse,
73
86
  )
87
+ # GET /deployment/status - SSE stream of deployment status changes
88
+ router.add_api_route(
89
+ "/status",
90
+ self.stream_deployment_status,
91
+ methods=["GET"],
92
+ )
74
93
  # POST /deployment/up - start deployment
75
94
  router.add_api_route(
76
95
  "/up",
@@ -412,6 +431,72 @@ class DeploymentRoutes:
412
431
  },
413
432
  )
414
433
 
434
+ async def stream_deployment_status(self) -> StreamingResponse:
435
+ """
436
+ Stream deployment status changes via Server-Sent Events (SSE).
437
+
438
+ Monitors deployment status and sends updates only when the status changes.
439
+ Sends the current status immediately upon connection.
440
+
441
+ Returns:
442
+ StreamingResponse with SSE stream of deployment status updates.
443
+ """
444
+ logger.info("Client connected to deployment status stream")
445
+
446
+ async def event_generator() -> AsyncIterator[str]:
447
+ """Generate SSE events for deployment status changes."""
448
+ q: Optional[asyncio.Queue] = None
449
+ try:
450
+ # Subscribe and get current status
451
+ q, current_status = await self.status_monitor.subscribe()
452
+
453
+ # Send current status immediately if available
454
+ if current_status:
455
+ event = DeploymentStatusEvent(
456
+ status=current_status,
457
+ previous_status=None,
458
+ timestamp=datetime.now(timezone.utc),
459
+ )
460
+ yield f"data: {event.model_dump_json()}\n\n"
461
+ logger.debug(f"Sent initial deployment status: {current_status}")
462
+
463
+ # Stream status changes
464
+ while True:
465
+ try:
466
+ # Get next event from queue (with timeout to allow cancellation)
467
+ event = await asyncio.wait_for(q.get(), timeout=5.0)
468
+ yield f"data: {event.model_dump_json()}\n\n"
469
+ logger.debug(f"Sent deployment status change: {event.status}")
470
+ except asyncio.TimeoutError:
471
+ # Send keep-alive comment to prevent connection timeout
472
+ yield ": keep-alive\n\n"
473
+ except asyncio.CancelledError:
474
+ logger.info("Deployment status stream cancelled by client")
475
+ break
476
+ except Exception as e:
477
+ logger.error(f"Error in deployment status stream: {e}")
478
+ yield f"event: error\ndata: {json.dumps({'error': str(e)})}\n\n"
479
+ break
480
+
481
+ except Exception as e:
482
+ logger.error(f"Error subscribing to deployment status: {e}")
483
+ yield f"event: error\ndata: {json.dumps({'error': str(e)})}\n\n"
484
+ finally:
485
+ # Unsubscribe when client disconnects
486
+ if q is not None:
487
+ await self.status_monitor.unsubscribe(q)
488
+ logger.info("Client disconnected from deployment status stream")
489
+
490
+ return StreamingResponse(
491
+ event_generator(),
492
+ media_type="text/event-stream",
493
+ headers={
494
+ "Cache-Control": "no-cache",
495
+ "Connection": "keep-alive",
496
+ "X-Accel-Buffering": "no",
497
+ },
498
+ )
499
+
415
500
  def _execute_compose_up(self, task_id: str) -> None:
416
501
  """
417
502
  Execute docker compose up.
@@ -0,0 +1,278 @@
1
+ """
2
+ DeploymentStatusMonitor: on-demand deployment status monitor with SSE subscribers.
3
+
4
+ Monitors deployment status changes by polling get_deployment_status at regular intervals
5
+ and broadcasts changes to subscribed SSE clients. Starts when the first subscriber connects
6
+ and stops when there are no subscribers.
7
+ """
8
+ import asyncio
9
+ import logging
10
+ from datetime import datetime, timezone
11
+ from typing import Dict, List, Optional, Set
12
+
13
+ from ..models.deployment import DeploymentStatus
14
+ from ..models.events import DeploymentStatusEvent, ServiceState
15
+ from ..services.config import ConfigService
16
+ from ..services.status_event_manager import StatusEventManager
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ class DeploymentStatusMonitor:
22
+ """
23
+ Manages deployment status monitoring and broadcasts status change events
24
+ to subscribed SSE clients. Starts when the first subscriber connects and stops
25
+ when there are no subscribers.
26
+
27
+ Uses asyncio to process container events without blocking the event loop.
28
+ """
29
+
30
+ def __init__(
31
+ self,
32
+ config: ConfigService,
33
+ status_events: StatusEventManager,
34
+ error_backoff_seconds: float = 0.25,
35
+ ) -> None:
36
+ """
37
+ Initialize the deployment status monitor.
38
+
39
+ Args:
40
+ config: ConfigService instance for accessing deployment status.
41
+ status_events: Shared StatusEventManager (single Docker events subscription).
42
+ error_backoff_seconds: Sleep time after errors in the event loop.
43
+ """
44
+ self.config = config
45
+ self.status_events = status_events
46
+ self._task: Optional[asyncio.Task] = None
47
+ self._subscribers: List[asyncio.Queue] = []
48
+ self._lock = asyncio.Lock()
49
+ self._last_status: Optional[DeploymentStatus] = None
50
+ self._error_backoff_seconds = error_backoff_seconds
51
+ self._container_states: Dict[str, ServiceState] = {}
52
+ self._relevant_containers: Set[str] = set()
53
+ self._docker_subscriber_q = None
54
+
55
+ services = self.config.compose_services or {}
56
+ for service_name, service_config in services.items():
57
+ container_name = service_config.get("container_name", service_name)
58
+ self._relevant_containers.add(container_name)
59
+
60
+ logger.info(
61
+ f"DeploymentStatusMonitor: initialized for {len(self._relevant_containers)} containers"
62
+ )
63
+
64
+ async def _ensure_started(self) -> None:
65
+ """Ensure the monitor task is running."""
66
+ async with self._lock:
67
+ if self._task and not self._task.done():
68
+ return
69
+ # Start monitor task
70
+ self._task = asyncio.create_task(self._monitor_loop())
71
+ logger.debug("DeploymentStatusMonitor: monitor started")
72
+
73
+ async def _maybe_stop(self) -> None:
74
+ """Stop the monitor if there are no subscribers."""
75
+ async with self._lock:
76
+ if self._subscribers:
77
+ return
78
+ if self._task and not self._task.done():
79
+ self._task.cancel()
80
+ try:
81
+ await self._task
82
+ except asyncio.CancelledError:
83
+ pass
84
+ self._task = None
85
+ logger.info("DeploymentStatusMonitor: monitor stopped")
86
+
87
+ async def subscribe(self) -> tuple[asyncio.Queue, Optional[DeploymentStatus]]:
88
+ """
89
+ Add a new subscriber and ensure the monitor is running.
90
+
91
+ Returns:
92
+ Tuple of (queue for receiving events, current deployment status).
93
+ The current status is returned immediately so the client has the initial state.
94
+ """
95
+ q: asyncio.Queue = asyncio.Queue()
96
+
97
+ # Ensure we have a snapshot for immediate, correct initial state.
98
+ if self._last_status is None:
99
+ await self._refresh_snapshot()
100
+
101
+ async with self._lock:
102
+ self._subscribers.append(q)
103
+ current_status = self._last_status
104
+
105
+ await self._ensure_started()
106
+ return q, current_status
107
+
108
+ async def _refresh_snapshot(self) -> None:
109
+ """Build an initial container state snapshot via Docker inspection."""
110
+
111
+ def _snapshot_sync() -> Dict[str, ServiceState]:
112
+ services = self.config.compose_services or {}
113
+ if not services:
114
+ return {}
115
+
116
+ client = self.config.get_docker_client()
117
+ snapshot: Dict[str, ServiceState] = {}
118
+ for service_name, service_config in services.items():
119
+ container_name = service_config.get("container_name", service_name)
120
+ try:
121
+ if not client.container.exists(container_name):
122
+ snapshot[container_name] = ServiceState.REMOVED
123
+ continue
124
+ inspect = client.container.inspect(container_name)
125
+ if getattr(inspect.state, "status", None) == "running":
126
+ snapshot[container_name] = ServiceState.STARTED
127
+ else:
128
+ snapshot[container_name] = ServiceState.STOPPED
129
+ except Exception:
130
+ snapshot[container_name] = ServiceState.ERROR
131
+ return snapshot
132
+
133
+ async with self._lock:
134
+ # Avoid doing expensive snapshots repeatedly.
135
+ if self._last_status is not None:
136
+ return
137
+
138
+ snapshot = await asyncio.to_thread(_snapshot_sync)
139
+ new_status = self._compute_deployment_status(snapshot)
140
+
141
+ async with self._lock:
142
+ self._container_states = snapshot
143
+ self._last_status = new_status
144
+ logger.info(f"DeploymentStatusMonitor: initial snapshot status={new_status}")
145
+
146
+ def _compute_deployment_status(self, states: Dict[str, ServiceState]) -> DeploymentStatus:
147
+ """Compute deployment status from relevant container states."""
148
+ if not self._relevant_containers:
149
+ return DeploymentStatus.UNKNOWN
150
+
151
+ started = 0
152
+ stopped = 0
153
+ transitional = 0
154
+ unknown = 0
155
+ total = len(self._relevant_containers)
156
+
157
+ for name in self._relevant_containers:
158
+ state = states.get(name)
159
+ if state in (ServiceState.STARTED, ServiceState.STARTING):
160
+ started += 1
161
+ elif state in (ServiceState.STOPPED, ServiceState.REMOVED, ServiceState.NOT_STARTED):
162
+ stopped += 1
163
+ elif state in (ServiceState.CREATING, ServiceState.STOPPING, ServiceState.PULLING, ServiceState.PULLED, ServiceState.REMOVED):
164
+ transitional += 1
165
+ elif state is None:
166
+ unknown += 1
167
+ else:
168
+ # Error/other states
169
+ logger.warning(f"DeploymentStatusMonitor: container '{name}' in state '{state}' treated as unknown")
170
+ unknown += 1
171
+
172
+ if started == total:
173
+ return DeploymentStatus.RUNNING
174
+ if stopped == total:
175
+ return DeploymentStatus.STOPPED
176
+ if started == 0 and stopped == 0 and unknown == total:
177
+ return DeploymentStatus.UNKNOWN
178
+ return DeploymentStatus.PARTIALLY_RUNNING
179
+
180
+ async def unsubscribe(self, q: asyncio.Queue) -> None:
181
+ """
182
+ Remove subscriber and stop monitor if none left.
183
+
184
+ Args:
185
+ q: The queue to unsubscribe.
186
+ """
187
+ async with self._lock:
188
+ if q in self._subscribers:
189
+ self._subscribers.remove(q)
190
+ await self._maybe_stop()
191
+
192
+ async def _broadcast(self, event: DeploymentStatusEvent) -> None:
193
+ """
194
+ Broadcast an event to all subscribers.
195
+
196
+ Args:
197
+ event: The deployment status event to broadcast.
198
+ """
199
+ # Snapshot subscribers to avoid holding lock while putting
200
+ async with self._lock:
201
+ subscribers = list(self._subscribers)
202
+
203
+ for q in subscribers:
204
+ try:
205
+ await asyncio.wait_for(q.put(event), timeout=0.1)
206
+ except asyncio.TimeoutError:
207
+ logger.warning("DeploymentStatusMonitor: timeout broadcasting to subscriber")
208
+ except Exception as e:
209
+ logger.error(f"DeploymentStatusMonitor: error broadcasting to subscriber: {e}")
210
+
211
+ async def _monitor_loop(self) -> None:
212
+ """
213
+ Background task: process container events and broadcast deployment status changes.
214
+
215
+ Runs continuously while there are subscribers.
216
+ Only broadcasts when the computed deployment status changes.
217
+ """
218
+ try:
219
+ logger.debug("DeploymentStatusMonitor: starting monitor loop (docker events)")
220
+
221
+ # Internal subscription to the shared Docker events manager.
222
+ self._docker_subscriber_q = self.status_events.subscribe()
223
+
224
+ while True:
225
+ try:
226
+ container_event = await self.status_events.get_event(self._docker_subscriber_q)
227
+ name = container_event.container_name
228
+
229
+ if name not in self._relevant_containers:
230
+ await asyncio.sleep(0)
231
+ continue
232
+
233
+ async with self._lock:
234
+ self._container_states[name] = container_event.state
235
+ current_status = self._compute_deployment_status(self._container_states)
236
+ prev_status = self._last_status
237
+
238
+ if current_status == prev_status:
239
+ continue
240
+
241
+ self._last_status = current_status
242
+
243
+ logger.info(
244
+ f"DeploymentStatusMonitor: status changed from {prev_status} to {current_status}"
245
+ )
246
+
247
+ event = DeploymentStatusEvent(
248
+ status=current_status,
249
+ previous_status=prev_status,
250
+ timestamp=datetime.now(timezone.utc),
251
+ )
252
+ await self._broadcast(event)
253
+
254
+ except asyncio.CancelledError:
255
+ logger.debug("DeploymentStatusMonitor: monitor loop cancelled")
256
+ raise
257
+ except Exception as e:
258
+ logger.error(
259
+ f"DeploymentStatusMonitor: error processing docker events: {e}",
260
+ exc_info=True,
261
+ )
262
+ await asyncio.sleep(self._error_backoff_seconds)
263
+
264
+ except asyncio.CancelledError:
265
+ logger.debug("DeploymentStatusMonitor: monitor loop exiting")
266
+ except Exception as e:
267
+ logger.error(
268
+ f"DeploymentStatusMonitor: fatal error in monitor loop: {e}",
269
+ exc_info=True
270
+ )
271
+ finally:
272
+ # Ensure we unsubscribe from Docker events.
273
+ try:
274
+ if self._docker_subscriber_q is not None:
275
+ self.status_events.unsubscribe(self._docker_subscriber_q)
276
+ except Exception:
277
+ pass
278
+ logger.debug("DeploymentStatusMonitor: monitor loop stopped")
@@ -99,7 +99,7 @@ class StatusEventManager:
99
99
  def _monitor_loop(self) -> None:
100
100
  """Background thread: listen to Docker events and broadcast mapped state events."""
101
101
  try:
102
- logger.info("StatusEventManager: starting Docker event monitor")
102
+ logger.debug("StatusEventManager: starting Docker event monitor")
103
103
  docker = self.config.get_docker_client()
104
104
  action_to_state = {
105
105
  "kill": ServiceState.STOPPING,
@@ -113,10 +113,10 @@ class StatusEventManager:
113
113
  "build": ServiceState.CREATING,
114
114
  }
115
115
 
116
- logger.info("StatusEventManager: listening to docker.system.events()")
116
+ logger.debug("StatusEventManager: listening to docker.system.events()")
117
117
  for event in docker.system.events(filters={"type": "container"}):
118
118
  if self._stop_event.is_set():
119
- logger.info("StatusEventManager: stop event received, breaking")
119
+ logger.debug("StatusEventManager: stop event received, breaking")
120
120
  break
121
121
  try:
122
122
  action = getattr(event, "action", "").lower()
@@ -136,7 +136,7 @@ class StatusEventManager:
136
136
  prev_state = self._last_state.get(name)
137
137
  self._last_state[name] = new_state
138
138
 
139
- logger.info(f"StatusEventManager: {name} state={new_state} (action={action})")
139
+ logger.debug(f"StatusEventManager: {name} state={new_state} (action={action})")
140
140
 
141
141
  ev = ContainerStatusEvent(
142
142
  container_name=name,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: base-deployment-controller
3
- Version: 0.2.0
3
+ Version: 0.3.0
4
4
  Summary: REST API to control the basic operations of a deployment
5
5
  Author: Tknika
6
6
  License: Creative Commons Atribución-NoComercial (CC BY-NC)
@@ -22,9 +22,11 @@ src/base_deployment_controller/routers/deployment.py
22
22
  src/base_deployment_controller/routers/environment.py
23
23
  src/base_deployment_controller/services/__init__.py
24
24
  src/base_deployment_controller/services/config.py
25
+ src/base_deployment_controller/services/deployment_status_monitor.py
25
26
  src/base_deployment_controller/services/status_event_manager.py
26
27
  src/base_deployment_controller/services/task_manager.py
27
28
  tests/test_api.py
28
29
  tests/test_containers.py
29
30
  tests/test_deployment.py
30
- tests/test_envs.py
31
+ tests/test_envs.py
32
+ tests/test_status_event_manager.py
@@ -0,0 +1,102 @@
1
+ """
2
+ Test cases for the Deployment endpoints (/deployment).
3
+
4
+ Tests the full deployment lifecycle:
5
+ - Deploy UP with real-time SSE monitoring
6
+ - Deploy DOWN with real-time SSE monitoring
7
+ """
8
+ import time
9
+ import threading
10
+ from tests.utils import stream_task_updates, monitor_deployment_status
11
+
12
+
13
+ class TestDeploymentEndpoints:
14
+ """Deployment endpoint tests with SSE monitoring."""
15
+
16
+ def test_deployment_lifecycle_with_sse_monitoring(self, api_client, api_url, api_server, check_dependencies):
17
+ """
18
+ Test complete deployment lifecycle with real-time SSE monitoring.
19
+
20
+ Sequence:
21
+ 1. Start background monitor for /deployment/status
22
+ 2. POST /deployment/up to start deployment
23
+ 3. Monitor SSE stream to track container state transitions
24
+ 4. Verify all containers reach STARTED state
25
+ 5. POST /deployment/down to down deployment
26
+ 6. Monitor SSE stream for shutdown progression
27
+ 7. Verify all containers reach REMOVED state
28
+ """
29
+ print("\n=== DEPLOYMENT LIFECYCLE TEST ===\n")
30
+
31
+ # Start background monitor for overall deployment status
32
+ stop_monitor = threading.Event()
33
+ monitor_thread = threading.Thread(
34
+ target=monitor_deployment_status,
35
+ args=(api_url, stop_monitor),
36
+ daemon=True
37
+ )
38
+ monitor_thread.start()
39
+
40
+ # Give monitor a moment to connect
41
+ time.sleep(1)
42
+
43
+ try:
44
+ # PHASE 1: Start deployment
45
+ print("PHASE 1: Starting deployment (compose up)...")
46
+ resp_up = api_client.post(f"{api_url}/deployment/up")
47
+
48
+ assert resp_up.status_code == 202, "UP should return 202 Accepted"
49
+ data_up = resp_up.json()
50
+ assert "task_id" in data_up, "Response should contain task_id"
51
+ assert data_up.get("status") == "running", "Initial status should be 'running'"
52
+
53
+ task_id_up = data_up["task_id"]
54
+ sse_endpoint_up = f"/deployment/tasks/{task_id_up}/stream"
55
+
56
+ # Stream and monitor UP progress
57
+ print(f"Monitoring UP task {task_id_up[:8]}... via SSE...")
58
+ up_start_time = time.monotonic()
59
+ final_state_up = stream_task_updates(
60
+ api_url, task_id_up, sse_endpoint_up, timeout=120
61
+ )
62
+ up_elapsed = time.monotonic() - up_start_time
63
+ print(f"UP completed in {up_elapsed:.2f} seconds")
64
+
65
+ # Verify all containers are in STARTED state
66
+ print(f"Final UP task: {final_state_up}")
67
+ assert final_state_up.get("task_status") == "completed", "UP task should be completed"
68
+ assert final_state_up.get("operation") == "up"
69
+
70
+ time.sleep(2) # Give containers time to settle
71
+
72
+ # PHASE 2: Stop deployment
73
+ print("\nPHASE 2: Stopping deployment (compose down)...")
74
+ resp_down = api_client.post(f"{api_url}/deployment/down")
75
+
76
+ assert resp_down.status_code == 202, "DOWN should return 202 Accepted"
77
+ data_down = resp_down.json()
78
+ assert "task_id" in data_down, "Response should contain task_id"
79
+
80
+ task_id_down = data_down["task_id"]
81
+ sse_endpoint_down = f"/deployment/tasks/{task_id_down}/stream"
82
+
83
+ # Stream and monitor DOWN progress
84
+ print(f"Monitoring DOWN task {task_id_down[:8]}... via SSE...")
85
+ down_start_time = time.monotonic()
86
+ final_state_down = stream_task_updates(
87
+ api_url, task_id_down, sse_endpoint_down, timeout=120
88
+ )
89
+ down_elapsed = time.monotonic() - down_start_time
90
+ print(f"DOWN completed in {down_elapsed:.2f} seconds")
91
+
92
+ # Verify all containers are in REMOVED state
93
+ print(f"Final DOWN task: {final_state_down}")
94
+ assert final_state_down.get("task_status") == "completed", "DOWN task should be completed"
95
+ assert final_state_down.get("operation") == "down"
96
+
97
+ print("\n✓ Deployment lifecycle test passed")
98
+
99
+ finally:
100
+ # Stop the monitor thread
101
+ stop_monitor.set()
102
+ monitor_thread.join(timeout=5)
@@ -0,0 +1,138 @@
1
+ """
2
+ Unit tests for StatusEventManager and ContainerStatusEvent.
3
+ """
4
+ import pytest
5
+ from datetime import datetime, timezone
6
+ from queue import Queue
7
+
8
+ from src.base_deployment_controller.models.events import ContainerStatusEvent, ServiceState
9
+ from src.base_deployment_controller.services.status_event_manager import StatusEventManager
10
+ from src.base_deployment_controller.services.config import ConfigService
11
+
12
+
13
+ def test_container_status_event_creation():
14
+ """Test that ContainerStatusEvent can be created and serialized."""
15
+ event = ContainerStatusEvent(
16
+ container_name="test-container",
17
+ state=ServiceState.STARTED,
18
+ prev_state=ServiceState.STARTING,
19
+ action="start",
20
+ timestamp=datetime.now(timezone.utc)
21
+ )
22
+
23
+ assert event.container_name == "test-container"
24
+ assert event.state == ServiceState.STARTED
25
+ assert event.prev_state == ServiceState.STARTING
26
+ assert event.action == "start"
27
+ assert event.timestamp is not None
28
+
29
+ # Test JSON serialization
30
+ json_str = event.model_dump_json()
31
+ assert "test-container" in json_str
32
+ assert "started" in json_str.lower()
33
+ print(f"✓ Event JSON: {json_str}")
34
+
35
+
36
+ def test_container_status_event_json_format():
37
+ """Test that JSON matches expected SSE format."""
38
+ event = ContainerStatusEvent(
39
+ container_name="web",
40
+ state=ServiceState.STOPPED,
41
+ prev_state=None,
42
+ action="stop",
43
+ timestamp=datetime(2026, 1, 20, 10, 30, 0, tzinfo=timezone.utc)
44
+ )
45
+
46
+ json_str = event.model_dump_json()
47
+
48
+ # Should be valid JSON
49
+ import json
50
+ data = json.loads(json_str)
51
+
52
+ assert data["container_name"] == "web"
53
+ assert data["state"] == "stopped"
54
+ assert data["action"] == "stop"
55
+ assert data["prev_state"] is None
56
+
57
+ print(f"✓ JSON structure is valid: {json_str}")
58
+
59
+
60
+ def test_status_event_manager_creation():
61
+ """Test that StatusEventManager can be instantiated."""
62
+ config = ConfigService("data/compose.yaml", "data/.env")
63
+ manager = StatusEventManager(config)
64
+
65
+ assert manager is not None
66
+ assert manager._thread is None # Not started yet
67
+ assert len(manager._subscribers) == 0
68
+ print("✓ StatusEventManager created successfully")
69
+
70
+
71
+ def test_status_event_manager_subscribe_unsubscribe():
72
+ """Test subscribe/unsubscribe functionality."""
73
+ config = ConfigService("data/compose.yaml", "data/.env")
74
+ manager = StatusEventManager(config)
75
+
76
+ # Subscribe
77
+ q1 = manager.subscribe()
78
+ assert isinstance(q1, Queue)
79
+ assert len(manager._subscribers) == 1
80
+ print("✓ First subscriber connected")
81
+
82
+ # Subscribe again
83
+ q2 = manager.subscribe()
84
+ assert len(manager._subscribers) == 2
85
+ print("✓ Second subscriber connected")
86
+
87
+ # Unsubscribe first
88
+ manager.unsubscribe(q1)
89
+ assert len(manager._subscribers) == 1
90
+ print("✓ First subscriber disconnected")
91
+
92
+ # Unsubscribe second
93
+ manager.unsubscribe(q2)
94
+ assert len(manager._subscribers) == 0
95
+ print("✓ Second subscriber disconnected")
96
+
97
+
98
+ def test_status_event_manager_broadcast():
99
+ """Test that events are broadcast to all subscribers."""
100
+ config = ConfigService("data/compose.yaml", "data/.env")
101
+ manager = StatusEventManager(config)
102
+
103
+ # Subscribe two clients
104
+ q1 = manager.subscribe()
105
+ q2 = manager.subscribe()
106
+
107
+ # Broadcast an event
108
+ event = ContainerStatusEvent(
109
+ container_name="test",
110
+ state=ServiceState.STARTED,
111
+ prev_state=None,
112
+ action="start",
113
+ timestamp=datetime.now(timezone.utc)
114
+ )
115
+ manager._broadcast(event)
116
+
117
+ # Both queues should receive the event
118
+ received1 = q1.get(timeout=1)
119
+ received2 = q2.get(timeout=1)
120
+
121
+ assert received1.container_name == "test"
122
+ assert received2.container_name == "test"
123
+ print("✓ Events broadcast to all subscribers")
124
+
125
+ manager.unsubscribe(q1)
126
+ manager.unsubscribe(q2)
127
+
128
+
129
+ if __name__ == "__main__":
130
+ print("Running StatusEventManager tests...\n")
131
+
132
+ test_container_status_event_creation()
133
+ test_container_status_event_json_format()
134
+ test_status_event_manager_creation()
135
+ test_status_event_manager_subscribe_unsubscribe()
136
+ test_status_event_manager_broadcast()
137
+
138
+ print("\n✓ All tests passed!")
@@ -1,76 +0,0 @@
1
- """
2
- Test cases for the Deployment endpoints (/deployment).
3
-
4
- Tests the full deployment lifecycle:
5
- - Deploy UP with real-time SSE monitoring
6
- - Deploy DOWN with real-time SSE monitoring
7
- """
8
- import time
9
- from tests.utils import stream_task_updates
10
-
11
-
12
- class TestDeploymentEndpoints:
13
- """Deployment endpoint tests with SSE monitoring."""
14
-
15
- def test_deployment_lifecycle_with_sse_monitoring(self, api_client, api_url, api_server, check_dependencies):
16
- """
17
- Test complete deployment lifecycle with real-time SSE monitoring.
18
-
19
- Sequence:
20
- 1. POST /deployment/up to start deployment
21
- 2. Monitor SSE stream to track container state transitions
22
- 3. Verify all containers reach STARTED state
23
- 4. POST /deployment/down to down deployment
24
- 5. Monitor SSE stream for shutdown progression
25
- 6. Verify all containers reach REMOVED state
26
- """
27
- print("\n=== DEPLOYMENT LIFECYCLE TEST ===\n")
28
-
29
- # PHASE 1: Start deployment
30
- print("PHASE 1: Starting deployment (compose up)...")
31
- resp_up = api_client.post(f"{api_url}/deployment/up")
32
-
33
- assert resp_up.status_code == 202, "UP should return 202 Accepted"
34
- data_up = resp_up.json()
35
- assert "task_id" in data_up, "Response should contain task_id"
36
- assert data_up.get("status") == "running", "Initial status should be 'running'"
37
-
38
- task_id_up = data_up["task_id"]
39
- sse_endpoint_up = f"/deployment/tasks/{task_id_up}/stream"
40
-
41
- # Stream and monitor UP progress
42
- print(f"Monitoring UP task {task_id_up[:8]}... via SSE...")
43
- final_state_up = stream_task_updates(
44
- api_url, task_id_up, sse_endpoint_up, timeout=120
45
- )
46
-
47
- # Verify all containers are in STARTED state
48
- print(f"Final UP task: {final_state_up}")
49
- assert final_state_up.get("task_status") == "completed", "UP task should be completed"
50
- assert final_state_up.get("operation") == "up"
51
-
52
- time.sleep(2) # Give containers time to settle
53
-
54
- # PHASE 2: Stop deployment
55
- print("\nPHASE 2: Stopping deployment (compose down)...")
56
- resp_down = api_client.post(f"{api_url}/deployment/down")
57
-
58
- assert resp_down.status_code == 202, "DOWN should return 202 Accepted"
59
- data_down = resp_down.json()
60
- assert "task_id" in data_down, "Response should contain task_id"
61
-
62
- task_id_down = data_down["task_id"]
63
- sse_endpoint_down = f"/deployment/tasks/{task_id_down}/stream"
64
-
65
- # Stream and monitor DOWN progress
66
- print(f"Monitoring DOWN task {task_id_down[:8]}... via SSE...")
67
- final_state_down = stream_task_updates(
68
- api_url, task_id_down, sse_endpoint_down, timeout=120
69
- )
70
-
71
- # Verify all containers are in REMOVED state
72
- print(f"Final DOWN task: {final_state_down}")
73
- assert final_state_down.get("task_status") == "completed", "DOWN task should be completed"
74
- assert final_state_down.get("operation") == "down"
75
-
76
- print("\n✓ Deployment lifecycle test passed")