pactown 0.1.4__py3-none-any.whl → 0.1.47__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pactown/orchestrator.py CHANGED
@@ -1,22 +1,19 @@
1
1
  """Orchestrator for managing pactown service ecosystems."""
2
2
 
3
- import asyncio
4
3
  import time
5
- from dataclasses import dataclass, field
4
+ from dataclasses import dataclass
6
5
  from pathlib import Path
7
- from typing import Optional, Callable
8
- import httpx
6
+ from typing import Optional
9
7
 
8
+ import httpx
10
9
  from rich.console import Console
11
- from rich.table import Table
12
- from rich.live import Live
13
10
  from rich.panel import Panel
11
+ from rich.table import Table
14
12
 
15
13
  from .config import EcosystemConfig, load_config
14
+ from .network import ServiceRegistry
16
15
  from .resolver import DependencyResolver
17
16
  from .sandbox_manager import SandboxManager, ServiceProcess
18
- from .network import ServiceRegistry, ServiceEndpoint
19
-
20
17
 
21
18
  console = Console()
22
19
 
@@ -33,7 +30,7 @@ class ServiceHealth:
33
30
 
34
31
  class Orchestrator:
35
32
  """Orchestrates the lifecycle of a pactown ecosystem."""
36
-
33
+
37
34
  def __init__(
38
35
  self,
39
36
  config: EcosystemConfig,
@@ -48,15 +45,15 @@ class Orchestrator:
48
45
  self.resolver = DependencyResolver(config)
49
46
  self.sandbox_manager = SandboxManager(config.sandbox_root)
50
47
  self._running: dict[str, ServiceProcess] = {}
51
-
48
+
52
49
  # Service registry for dynamic port allocation and discovery
53
50
  registry_path = Path(config.sandbox_root) / ".pactown-services.json"
54
51
  self.service_registry = ServiceRegistry(storage_path=registry_path)
55
-
52
+
56
53
  @classmethod
57
54
  def from_file(
58
- cls,
59
- config_path: str | Path,
55
+ cls,
56
+ config_path: str | Path,
60
57
  verbose: bool = True,
61
58
  dynamic_ports: bool = True,
62
59
  ) -> "Orchestrator":
@@ -64,7 +61,7 @@ class Orchestrator:
64
61
  config_path = Path(config_path)
65
62
  config = load_config(config_path)
66
63
  return cls(config, base_path=config_path.parent, verbose=verbose, dynamic_ports=dynamic_ports)
67
-
64
+
68
65
  def _get_readme_path(self, service_name: str) -> Path:
69
66
  """Get the README path for a service."""
70
67
  service = self.config.services[service_name]
@@ -72,33 +69,33 @@ class Orchestrator:
72
69
  if not readme_path.exists():
73
70
  raise FileNotFoundError(f"README not found: {readme_path}")
74
71
  return readme_path
75
-
72
+
76
73
  def validate(self) -> bool:
77
74
  """Validate the ecosystem configuration."""
78
75
  issues = self.resolver.validate()
79
-
76
+
80
77
  for name, service in self.config.services.items():
81
78
  readme_path = self.base_path / service.readme
82
79
  if not readme_path.exists():
83
80
  issues.append(f"README not found for '{name}': {readme_path}")
84
-
81
+
85
82
  if issues:
86
83
  console.print("[red]Validation failed:[/red]")
87
84
  for issue in issues:
88
85
  console.print(f" • {issue}")
89
86
  return False
90
-
87
+
91
88
  console.print("[green]✓ Ecosystem configuration is valid[/green]")
92
89
  return True
93
-
90
+
94
91
  def start_service(self, service_name: str) -> ServiceProcess:
95
92
  """Start a single service."""
96
93
  if service_name not in self.config.services:
97
94
  raise ValueError(f"Unknown service: {service_name}")
98
-
95
+
99
96
  service = self.config.services[service_name]
100
97
  readme_path = self._get_readme_path(service_name)
101
-
98
+
102
99
  # Register service and get allocated port
103
100
  if self.dynamic_ports:
104
101
  endpoint = self.service_registry.register(
@@ -107,43 +104,43 @@ class Orchestrator:
107
104
  health_check=service.health_check,
108
105
  )
109
106
  actual_port = endpoint.port
110
-
107
+
111
108
  if self.verbose and actual_port != service.port:
112
109
  console.print(f" [yellow]Port {service.port} busy, using {actual_port}[/yellow]")
113
110
  else:
114
111
  actual_port = service.port
115
-
112
+
116
113
  # Get dependencies from config
117
114
  dep_names = [d.name for d in service.depends_on]
118
-
115
+
119
116
  # Build environment with service discovery
120
117
  env = self.service_registry.get_environment(service_name, dep_names)
121
-
118
+
122
119
  # Add any extra env from config
123
120
  env.update(service.env)
124
121
  env["PACTOWN_ECOSYSTEM"] = self.config.name
125
-
122
+
126
123
  # Override port in service config for this run
127
124
  service_copy = service
128
125
  if actual_port != service.port:
129
126
  from dataclasses import replace
130
127
  service_copy = replace(service, port=actual_port)
131
-
128
+
132
129
  process = self.sandbox_manager.start_service(
133
130
  service_copy, readme_path, env, verbose=self.verbose
134
131
  )
135
132
  self._running[service_name] = process
136
133
  return process
137
-
134
+
138
135
  def start_all(
139
- self,
136
+ self,
140
137
  wait_for_health: bool = True,
141
138
  parallel: bool = True,
142
139
  max_workers: int = 4,
143
140
  ) -> dict[str, ServiceProcess]:
144
141
  """
145
142
  Start all services in dependency order.
146
-
143
+
147
144
  Args:
148
145
  wait_for_health: Wait for health checks
149
146
  parallel: Use parallel execution for independent services
@@ -153,86 +150,86 @@ class Orchestrator:
153
150
  return self._start_all_parallel(wait_for_health, max_workers)
154
151
  else:
155
152
  return self._start_all_sequential(wait_for_health)
156
-
153
+
157
154
  def _start_all_sequential(self, wait_for_health: bool = True) -> dict[str, ServiceProcess]:
158
155
  """Start all services sequentially in dependency order."""
159
156
  order = self.resolver.get_startup_order()
160
-
157
+
161
158
  if self.verbose:
162
159
  console.print(f"\n[bold]Starting ecosystem: {self.config.name}[/bold]")
163
160
  console.print(f"Startup order: {' → '.join(order)}\n")
164
-
161
+
165
162
  for name in order:
166
163
  try:
167
164
  self.start_service(name)
168
-
165
+
169
166
  if wait_for_health:
170
167
  service = self.config.services[name]
171
168
  if service.health_check:
172
169
  self._wait_for_health(name, timeout=service.timeout)
173
170
  else:
174
171
  time.sleep(1)
175
-
172
+
176
173
  except Exception as e:
177
174
  console.print(f"[red]Failed to start {name}: {e}[/red]")
178
175
  self.stop_all()
179
176
  raise
180
-
177
+
181
178
  if self.verbose:
182
179
  self.print_status()
183
-
180
+
184
181
  return self._running
185
-
182
+
186
183
  def _start_all_parallel(
187
- self,
184
+ self,
188
185
  wait_for_health: bool = True,
189
186
  max_workers: int = 4,
190
187
  ) -> dict[str, ServiceProcess]:
191
188
  """
192
189
  Start services in parallel waves based on dependencies.
193
-
190
+
194
191
  Services with no unmet dependencies start together in parallel.
195
192
  Once a wave completes, the next wave starts.
196
193
  """
197
194
  from concurrent.futures import ThreadPoolExecutor, as_completed
198
-
195
+
199
196
  if self.verbose:
200
197
  console.print(f"\n[bold]Starting ecosystem: {self.config.name} (parallel)[/bold]")
201
-
198
+
202
199
  # Build dependency map
203
200
  deps_map: dict[str, list[str]] = {}
204
201
  for name, service in self.config.services.items():
205
202
  deps_map[name] = [d.name for d in service.depends_on if d.name in self.config.services]
206
-
203
+
207
204
  started = set()
208
205
  remaining = set(self.config.services.keys())
209
206
  wave_num = 0
210
-
207
+
211
208
  while remaining:
212
209
  # Find services ready to start (all deps satisfied)
213
210
  ready = [
214
211
  name for name in remaining
215
212
  if all(d in started for d in deps_map.get(name, []))
216
213
  ]
217
-
214
+
218
215
  if not ready:
219
216
  raise ValueError(f"Cannot resolve dependencies for: {remaining}")
220
-
217
+
221
218
  wave_num += 1
222
219
  if self.verbose:
223
220
  console.print(f"\n[cyan]Wave {wave_num}:[/cyan] {', '.join(ready)}")
224
-
221
+
225
222
  # Start ready services in parallel
226
223
  wave_results = {}
227
224
  wave_errors = {}
228
-
225
+
229
226
  with ThreadPoolExecutor(max_workers=min(max_workers, len(ready))) as executor:
230
227
  futures = {}
231
-
228
+
232
229
  for name in ready:
233
230
  future = executor.submit(self._start_service_with_health, name, wait_for_health)
234
231
  futures[future] = name
235
-
232
+
236
233
  for future in as_completed(futures):
237
234
  name = futures[future]
238
235
  try:
@@ -244,101 +241,101 @@ class Orchestrator:
244
241
  except Exception as e:
245
242
  wave_errors[name] = str(e)
246
243
  remaining.remove(name)
247
-
244
+
248
245
  # Report wave results
249
246
  for name in wave_results:
250
247
  if self.verbose:
251
248
  console.print(f" [green]✓[/green] {name} started")
252
-
249
+
253
250
  for name, error in wave_errors.items():
254
251
  console.print(f" [red]✗[/red] {name}: {error}")
255
-
252
+
256
253
  # Stop on any failure
257
254
  if wave_errors:
258
- console.print(f"\n[red]Stopping due to errors...[/red]")
255
+ console.print("\n[red]Stopping due to errors...[/red]")
259
256
  self.stop_all()
260
257
  raise RuntimeError(f"Failed to start services: {wave_errors}")
261
-
258
+
262
259
  if self.verbose:
263
260
  console.print()
264
261
  self.print_status()
265
-
262
+
266
263
  return self._running
267
-
264
+
268
265
  def _start_service_with_health(self, service_name: str, wait_for_health: bool) -> ServiceProcess:
269
266
  """Start a service and optionally wait for health check."""
270
267
  proc = self.start_service(service_name)
271
-
268
+
272
269
  if wait_for_health:
273
270
  service = self.config.services[service_name]
274
271
  if service.health_check:
275
272
  self._wait_for_health(service_name, timeout=service.timeout)
276
273
  else:
277
274
  time.sleep(0.5)
278
-
275
+
279
276
  return proc
280
-
277
+
281
278
  def stop_service(self, service_name: str) -> bool:
282
279
  """Stop a single service."""
283
280
  if service_name not in self._running:
284
281
  return False
285
-
282
+
286
283
  if self.verbose:
287
284
  console.print(f"Stopping {service_name}...")
288
-
285
+
289
286
  success = self.sandbox_manager.stop_service(service_name)
290
287
  if success:
291
288
  del self._running[service_name]
292
289
  return success
293
-
290
+
294
291
  def stop_all(self) -> None:
295
292
  """Stop all services in reverse dependency order."""
296
293
  order = self.resolver.get_shutdown_order()
297
-
294
+
298
295
  if self.verbose:
299
296
  console.print(f"\n[bold]Stopping ecosystem: {self.config.name}[/bold]")
300
-
297
+
301
298
  for name in order:
302
299
  if name in self._running:
303
300
  self.stop_service(name)
304
301
  # Unregister from service registry
305
302
  self.service_registry.unregister(name)
306
-
303
+
307
304
  self.sandbox_manager.stop_all()
308
305
  self._running.clear()
309
-
306
+
310
307
  def restart_service(self, service_name: str) -> ServiceProcess:
311
308
  """Restart a single service."""
312
309
  self.stop_service(service_name)
313
310
  time.sleep(0.5)
314
311
  return self.start_service(service_name)
315
-
312
+
316
313
  def check_health(self, service_name: str) -> ServiceHealth:
317
314
  """Check health of a service."""
318
315
  if service_name not in self.config.services:
319
316
  return ServiceHealth(name=service_name, healthy=False, error="Unknown service")
320
-
317
+
321
318
  service = self.config.services[service_name]
322
-
319
+
323
320
  if service_name not in self._running:
324
321
  return ServiceHealth(name=service_name, healthy=False, error="Not running")
325
-
322
+
326
323
  if not self._running[service_name].is_running:
327
324
  return ServiceHealth(name=service_name, healthy=False, error="Process died")
328
-
325
+
329
326
  if not service.health_check:
330
327
  return ServiceHealth(name=service_name, healthy=True)
331
-
328
+
332
329
  # Get actual port from registry (may differ from config if dynamic)
333
330
  endpoint = self.service_registry.get(service_name)
334
331
  port = endpoint.port if endpoint else service.port
335
332
  url = f"http://localhost:{port}{service.health_check}"
336
-
333
+
337
334
  try:
338
335
  start = time.time()
339
336
  response = httpx.get(url, timeout=5.0)
340
337
  elapsed = (time.time() - start) * 1000
341
-
338
+
342
339
  return ServiceHealth(
343
340
  name=service_name,
344
341
  healthy=response.status_code < 400,
@@ -351,20 +348,20 @@ class Orchestrator:
351
348
  healthy=False,
352
349
  error=str(e),
353
350
  )
354
-
351
+
355
352
  def _wait_for_health(self, service_name: str, timeout: int = 60) -> bool:
356
353
  """Wait for a service to become healthy."""
357
354
  service = self.config.services[service_name]
358
-
355
+
359
356
  if not service.health_check:
360
357
  return True
361
-
358
+
362
359
  # Get actual port from registry
363
360
  endpoint = self.service_registry.get(service_name)
364
361
  port = endpoint.port if endpoint else service.port
365
362
  url = f"http://localhost:{port}{service.health_check}"
366
363
  deadline = time.time() + timeout
367
-
364
+
368
365
  while time.time() < deadline:
369
366
  try:
370
367
  response = httpx.get(url, timeout=2.0)
@@ -375,10 +372,10 @@ class Orchestrator:
375
372
  except Exception:
376
373
  pass
377
374
  time.sleep(0.5)
378
-
375
+
379
376
  console.print(f" [yellow]⚠[/yellow] {service_name} health check timed out")
380
377
  return False
381
-
378
+
382
379
  def print_status(self) -> None:
383
380
  """Print status of all services."""
384
381
  table = Table(title=f"Ecosystem: {self.config.name}")
@@ -387,17 +384,17 @@ class Orchestrator:
387
384
  table.add_column("Status", style="green")
388
385
  table.add_column("PID")
389
386
  table.add_column("Health")
390
-
387
+
391
388
  for name, service in self.config.services.items():
392
389
  # Get actual port from registry
393
390
  endpoint = self.service_registry.get(name)
394
391
  actual_port = endpoint.port if endpoint else service.port
395
-
392
+
396
393
  if name in self._running:
397
394
  proc = self._running[name]
398
395
  running = "🟢 Running" if proc.is_running else "🔴 Stopped"
399
396
  pid = str(proc.pid)
400
-
397
+
401
398
  health = self.check_health(name)
402
399
  if health.healthy:
403
400
  health_str = f"✓ {health.response_time_ms:.0f}ms" if health.response_time_ms else "✓"
@@ -408,7 +405,7 @@ class Orchestrator:
408
405
  pid = "-"
409
406
  health_str = "-"
410
407
  actual_port = service.port # Use config port if not registered
411
-
408
+
412
409
  table.add_row(
413
410
  name,
414
411
  str(actual_port) if actual_port else "-",
@@ -416,18 +413,18 @@ class Orchestrator:
416
413
  pid,
417
414
  health_str,
418
415
  )
419
-
416
+
420
417
  console.print(table)
421
-
418
+
422
419
  def print_graph(self) -> None:
423
420
  """Print dependency graph."""
424
421
  console.print(Panel(self.resolver.print_graph(), title="Dependency Graph"))
425
-
422
+
426
423
  def get_logs(self, service_name: str, lines: int = 100) -> Optional[str]:
427
424
  """Get recent logs from a service (if available)."""
428
425
  if service_name not in self._running:
429
426
  return None
430
-
427
+
431
428
  proc = self._running[service_name]
432
429
  if proc.process and proc.process.stdout:
433
430
  return proc.process.stdout.read()
@@ -437,12 +434,12 @@ class Orchestrator:
437
434
  def run_ecosystem(config_path: str | Path, wait: bool = True) -> Orchestrator:
438
435
  """Convenience function to start an ecosystem."""
439
436
  orch = Orchestrator.from_file(config_path)
440
-
437
+
441
438
  if not orch.validate():
442
439
  raise ValueError("Invalid ecosystem configuration")
443
-
440
+
444
441
  orch.start_all()
445
-
442
+
446
443
  if wait:
447
444
  try:
448
445
  console.print("\n[dim]Press Ctrl+C to stop all services[/dim]\n")
@@ -451,5 +448,5 @@ def run_ecosystem(config_path: str | Path, wait: bool = True) -> Orchestrator:
451
448
  except KeyboardInterrupt:
452
449
  console.print("\n[yellow]Shutting down...[/yellow]")
453
450
  orch.stop_all()
454
-
451
+
455
452
  return orch