@kapeta/local-cluster-service 0.76.1 → 0.76.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,11 @@
1
+ ## [0.76.2](https://github.com/kapetacom/local-cluster-service/compare/v0.76.1...v0.76.2) (2024-09-30)
2
+
3
+
4
+ ### Bug Fixes
5
+
6
+ * gracefully handle missing docker ([973c2d3](https://github.com/kapetacom/local-cluster-service/commit/973c2d383b0315e7fd6aa376b248417f76bd8f62))
7
+ * port conflict cant prevent server start ([0aac354](https://github.com/kapetacom/local-cluster-service/commit/0aac354742c3ccadd9da769a8802bea5b38df736))
8
+
1
9
  ## [0.76.1](https://github.com/kapetacom/local-cluster-service/compare/v0.76.0...v0.76.1) (2024-09-30)
2
10
 
3
11
 
@@ -427,6 +427,10 @@ class InstanceManager {
427
427
  if (!blockAsset) {
428
428
  throw new Error('Block not found: ' + blockRef);
429
429
  }
430
+ const isAlive = containerManager_1.containerManager.isAlive();
431
+ if (!isAlive) {
432
+ throw new Error('Docker is not running or is not responding');
433
+ }
430
434
  if (checkForSingleton && (await this.isSingletonOperator(blockAsset))) {
431
435
  const instances = await this.getAllInstancesForKind(systemId, blockAsset.data.kind);
432
436
  if (instances.length > 1) {
@@ -584,6 +588,10 @@ class InstanceManager {
584
588
  //console.log('\n## Checking instances:');
585
589
  let changed = false;
586
590
  const all = [...this._instances];
591
+ if (!containerManager_1.containerManager.isAlive()) {
592
+ // No need to check anything if docker is not running
593
+ return;
594
+ }
587
595
  while (all.length > 0) {
588
596
  // Check a few instances at a time - docker doesn't like too many concurrent requests
589
597
  const chunk = all.splice(0, 30);
@@ -712,6 +720,10 @@ class InstanceManager {
712
720
  }
713
721
  async getExternalStatus(instance) {
714
722
  if (instance.type === types_1.InstanceType.DOCKER) {
723
+ if (!containerManager_1.containerManager.isAlive()) {
724
+ // Consider making this "unknown"
725
+ return types_1.InstanceStatus.STOPPED;
726
+ }
715
727
  const containerName = await (0, utils_1.getBlockInstanceContainerName)(instance.systemId, instance.instanceId);
716
728
  const container = await containerManager_1.containerManager.getContainerByName(containerName);
717
729
  if (!container) {
@@ -73,13 +73,16 @@ router.post('/:systemId/:instanceId/start', async (req, res) => {
73
73
  taskId: result.id,
74
74
  });
75
75
  }
76
- else {
76
+ else if (result) {
77
77
  res.status(202).send({
78
78
  ok: true,
79
79
  pid: result.pid,
80
80
  type: result.type,
81
81
  });
82
82
  }
83
+ else {
84
+ res.status(500).send({ ok: false, error: 'Failed to start instance' });
85
+ }
83
86
  }
84
87
  catch (e) {
85
88
  res.status(500).send({ ok: false, error: e.message });
@@ -24,10 +24,15 @@ class ServiceManager {
24
24
  if (!this._systems) {
25
25
  this._systems = {};
26
26
  }
27
- lodash_1.default.forEach(this._systems, (system) => {
27
+ lodash_1.default.forEach(this._systems, (system, systemId) => {
28
28
  lodash_1.default.forEach(system, (services) => {
29
29
  lodash_1.default.forEach(services, (portInfo) => {
30
- clusterService_1.clusterService.reservePort(portInfo.port);
30
+ try {
31
+ clusterService_1.clusterService.reservePort(portInfo.port);
32
+ }
33
+ catch (e) {
34
+ console.warn('Failed to reserve port', systemId, portInfo.port, e);
35
+ }
31
36
  });
32
37
  });
33
38
  });
@@ -427,6 +427,10 @@ class InstanceManager {
427
427
  if (!blockAsset) {
428
428
  throw new Error('Block not found: ' + blockRef);
429
429
  }
430
+ const isAlive = containerManager_1.containerManager.isAlive();
431
+ if (!isAlive) {
432
+ throw new Error('Docker is not running or is not responding');
433
+ }
430
434
  if (checkForSingleton && (await this.isSingletonOperator(blockAsset))) {
431
435
  const instances = await this.getAllInstancesForKind(systemId, blockAsset.data.kind);
432
436
  if (instances.length > 1) {
@@ -584,6 +588,10 @@ class InstanceManager {
584
588
  //console.log('\n## Checking instances:');
585
589
  let changed = false;
586
590
  const all = [...this._instances];
591
+ if (!containerManager_1.containerManager.isAlive()) {
592
+ // No need to check anything if docker is not running
593
+ return;
594
+ }
587
595
  while (all.length > 0) {
588
596
  // Check a few instances at a time - docker doesn't like too many concurrent requests
589
597
  const chunk = all.splice(0, 30);
@@ -712,6 +720,10 @@ class InstanceManager {
712
720
  }
713
721
  async getExternalStatus(instance) {
714
722
  if (instance.type === types_1.InstanceType.DOCKER) {
723
+ if (!containerManager_1.containerManager.isAlive()) {
724
+ // Consider making this "unknown"
725
+ return types_1.InstanceStatus.STOPPED;
726
+ }
715
727
  const containerName = await (0, utils_1.getBlockInstanceContainerName)(instance.systemId, instance.instanceId);
716
728
  const container = await containerManager_1.containerManager.getContainerByName(containerName);
717
729
  if (!container) {
@@ -73,13 +73,16 @@ router.post('/:systemId/:instanceId/start', async (req, res) => {
73
73
  taskId: result.id,
74
74
  });
75
75
  }
76
- else {
76
+ else if (result) {
77
77
  res.status(202).send({
78
78
  ok: true,
79
79
  pid: result.pid,
80
80
  type: result.type,
81
81
  });
82
82
  }
83
+ else {
84
+ res.status(500).send({ ok: false, error: 'Failed to start instance' });
85
+ }
83
86
  }
84
87
  catch (e) {
85
88
  res.status(500).send({ ok: false, error: e.message });
@@ -24,10 +24,15 @@ class ServiceManager {
24
24
  if (!this._systems) {
25
25
  this._systems = {};
26
26
  }
27
- lodash_1.default.forEach(this._systems, (system) => {
27
+ lodash_1.default.forEach(this._systems, (system, systemId) => {
28
28
  lodash_1.default.forEach(system, (services) => {
29
29
  lodash_1.default.forEach(services, (portInfo) => {
30
- clusterService_1.clusterService.reservePort(portInfo.port);
30
+ try {
31
+ clusterService_1.clusterService.reservePort(portInfo.port);
32
+ }
33
+ catch (e) {
34
+ console.warn('Failed to reserve port', systemId, portInfo.port, e);
35
+ }
31
36
  });
32
37
  });
33
38
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@kapeta/local-cluster-service",
3
- "version": "0.76.1",
3
+ "version": "0.76.2",
4
4
  "description": "Manages configuration, ports and service discovery for locally running Kapeta systems",
5
5
  "type": "commonjs",
6
6
  "exports": {
@@ -575,6 +575,11 @@ export class InstanceManager {
575
575
  throw new Error('Block not found: ' + blockRef);
576
576
  }
577
577
 
578
+ const isAlive = containerManager.isAlive();
579
+ if (!isAlive) {
580
+ throw new Error('Docker is not running or is not responding');
581
+ }
582
+
578
583
  if (checkForSingleton && (await this.isSingletonOperator(blockAsset))) {
579
584
  const instances = await this.getAllInstancesForKind(systemId, blockAsset.data.kind);
580
585
  if (instances.length > 1) {
@@ -767,6 +772,12 @@ export class InstanceManager {
767
772
  //console.log('\n## Checking instances:');
768
773
  let changed = false;
769
774
  const all = [...this._instances];
775
+
776
+ if (!containerManager.isAlive()) {
777
+ // No need to check anything if docker is not running
778
+ return;
779
+ }
780
+
770
781
  while (all.length > 0) {
771
782
  // Check a few instances at a time - docker doesn't like too many concurrent requests
772
783
  const chunk = all.splice(0, 30);
@@ -932,6 +943,11 @@ export class InstanceManager {
932
943
 
933
944
  private async getExternalStatus(instance: InstanceInfo): Promise<InstanceStatus> {
934
945
  if (instance.type === InstanceType.DOCKER) {
946
+ if (!containerManager.isAlive()) {
947
+ // Consider making this "unknown"
948
+ return InstanceStatus.STOPPED;
949
+ }
950
+
935
951
  const containerName = await getBlockInstanceContainerName(instance.systemId, instance.instanceId);
936
952
  const container = await containerManager.getContainerByName(containerName);
937
953
  if (!container) {
@@ -12,6 +12,7 @@ import { kapetaHeaders, KapetaRequest } from '../middleware/kapeta';
12
12
  import { stringBody } from '../middleware/stringBody';
13
13
  import { DesiredInstanceStatus, InstanceInfo, InstanceOwner, InstanceType, KapetaBodyRequest } from '../types';
14
14
  import { Task } from '../taskManager';
15
+ import { containerManager } from '../containerManager';
15
16
 
16
17
  const router = Router();
17
18
  router.use('/', corsHandler);
@@ -76,12 +77,14 @@ router.post('/:systemId/:instanceId/start', async (req: Request, res: Response)
76
77
  ok: true,
77
78
  taskId: result.id,
78
79
  });
79
- } else {
80
+ } else if (result) {
80
81
  res.status(202).send({
81
82
  ok: true,
82
83
  pid: result.pid,
83
84
  type: result.type,
84
85
  });
86
+ } else {
87
+ res.status(500).send({ ok: false, error: 'Failed to start instance' });
85
88
  }
86
89
  } catch (e: any) {
87
90
  res.status(500).send({ ok: false, error: e.message });
@@ -26,10 +26,14 @@ class ServiceManager {
26
26
  this._systems = {};
27
27
  }
28
28
 
29
- _.forEach(this._systems, (system) => {
29
+ _.forEach(this._systems, (system, systemId) => {
30
30
  _.forEach(system, (services) => {
31
31
  _.forEach(services, (portInfo) => {
32
- clusterService.reservePort(portInfo.port);
32
+ try {
33
+ clusterService.reservePort(portInfo.port);
34
+ } catch (e) {
35
+ console.warn('Failed to reserve port', systemId, portInfo.port, e);
36
+ }
33
37
  });
34
38
  });
35
39
  });