@kapeta/local-cluster-service 0.5.0 → 0.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/package.json +1 -1
- package/src/config/routes.js +8 -1
- package/src/configManager.js +5 -3
- package/src/containerManager.js +22 -4
- package/src/instanceManager.js +20 -6
- package/src/operatorManager.js +12 -2
- package/src/utils/BlockInstanceRunner.js +27 -11
package/CHANGELOG.md
CHANGED
@@ -1,3 +1,17 @@
|
|
1
|
+
## [0.5.2](https://github.com/kapetacom/local-cluster-service/compare/v0.5.1...v0.5.2) (2023-06-06)
|
2
|
+
|
3
|
+
|
4
|
+
### Bug Fixes
|
5
|
+
|
6
|
+
* Use internal docker host when inside docker ([#28](https://github.com/kapetacom/local-cluster-service/issues/28)) ([3b0ae9d](https://github.com/kapetacom/local-cluster-service/commit/3b0ae9d7612ae54b38ec8e39f632932f8543206e))
|
7
|
+
|
8
|
+
## [0.5.1](https://github.com/kapetacom/local-cluster-service/compare/v0.5.0...v0.5.1) (2023-06-06)
|
9
|
+
|
10
|
+
|
11
|
+
### Bug Fixes
|
12
|
+
|
13
|
+
* Improve starting and stopping local instances ([#27](https://github.com/kapetacom/local-cluster-service/issues/27)) ([83ff53a](https://github.com/kapetacom/local-cluster-service/commit/83ff53a31e98aa8984ff6a9a1e80ddb94653ce18))
|
14
|
+
|
1
15
|
# [0.5.0](https://github.com/kapetacom/local-cluster-service/compare/v0.4.1...v0.5.0) (2023-06-02)
|
2
16
|
|
3
17
|
|
package/package.json
CHANGED
package/src/config/routes.js
CHANGED
@@ -86,6 +86,11 @@ router.get('/identity', async (req, res) => {
|
|
86
86
|
instanceId: req.kapeta.instanceId
|
87
87
|
};
|
88
88
|
|
89
|
+
if (!req.kapeta.blockRef) {
|
90
|
+
res.status(400).send({error: 'Missing required header "X-Kapeta-Block"'});
|
91
|
+
return;
|
92
|
+
}
|
93
|
+
|
89
94
|
try {
|
90
95
|
|
91
96
|
if (!identity.systemId ||
|
@@ -99,6 +104,7 @@ router.get('/identity', async (req, res) => {
|
|
99
104
|
|
100
105
|
res.send(identity);
|
101
106
|
} catch(err) {
|
107
|
+
console.warn('Failed to resolve identity', err);
|
102
108
|
res.status(400).send({error: err.message});
|
103
109
|
}
|
104
110
|
});
|
@@ -128,7 +134,8 @@ router.get('/consumes/resource/:resourceType/:portType/:name', async (req, res)
|
|
128
134
|
req.kapeta.instanceId,
|
129
135
|
req.params.resourceType,
|
130
136
|
req.params.portType,
|
131
|
-
req.params.name
|
137
|
+
req.params.name,
|
138
|
+
req.kapeta.environment
|
132
139
|
);
|
133
140
|
|
134
141
|
res.send(operatorInfo);
|
package/src/configManager.js
CHANGED
@@ -109,17 +109,19 @@ class ConfigManager {
|
|
109
109
|
|
110
110
|
async verifyIdentity(blockRef, systemId, instanceId) {
|
111
111
|
const planAssets = await assetManager.getPlans();
|
112
|
-
|
112
|
+
const systemUri = systemId ? parseKapetaUri(systemId) : null;
|
113
|
+
const blockUri = parseKapetaUri(blockRef);
|
113
114
|
let found = false;
|
114
115
|
planAssets.forEach((planAsset) => {
|
115
|
-
if (
|
116
|
+
if (systemUri &&
|
117
|
+
!parseKapetaUri(planAsset.ref).equals(systemUri)) {
|
116
118
|
//Skip plans that do not match systemid if provided
|
117
119
|
return;
|
118
120
|
}
|
119
121
|
|
120
122
|
planAsset.data.spec.blocks.forEach((blockInstance) => {
|
121
123
|
if (blockInstance.id === instanceId &&
|
122
|
-
blockInstance.block.ref
|
124
|
+
parseKapetaUri(blockInstance.block.ref).equals(blockUri)) {
|
123
125
|
found = true;
|
124
126
|
}
|
125
127
|
});
|
package/src/containerManager.js
CHANGED
@@ -13,7 +13,10 @@ const LABEL_PORT_PREFIX = 'kapeta_port-';
|
|
13
13
|
|
14
14
|
const NANO_SECOND = 1000000;
|
15
15
|
const HEALTH_CHECK_INTERVAL = 2000;
|
16
|
-
const HEALTH_CHECK_MAX =
|
16
|
+
const HEALTH_CHECK_MAX = 30;
|
17
|
+
const IMAGE_PULL_CACHE_TTL = 30 * 60 * 1000;
|
18
|
+
const IMAGE_PULL_CACHE = {};
|
19
|
+
|
17
20
|
|
18
21
|
const promisifyStream = (stream) =>
|
19
22
|
new Promise((resolve, reject) => {
|
@@ -63,6 +66,8 @@ class ContainerManager {
|
|
63
66
|
// silently ignore bad configs
|
64
67
|
}
|
65
68
|
}
|
69
|
+
|
70
|
+
throw new Error('Could not connect to docker daemon. Please make sure docker is running and working.');
|
66
71
|
}
|
67
72
|
|
68
73
|
isAlive() {
|
@@ -116,12 +121,21 @@ class ContainerManager {
|
|
116
121
|
});
|
117
122
|
}
|
118
123
|
|
119
|
-
async pull(image) {
|
124
|
+
async pull(image, cacheForMS = IMAGE_PULL_CACHE_TTL) {
|
120
125
|
let [imageName, tag] = image.split(/:/);
|
121
126
|
if (!tag) {
|
122
127
|
tag = 'latest';
|
123
128
|
}
|
124
129
|
|
130
|
+
if (tag !== 'latest') {
|
131
|
+
if (IMAGE_PULL_CACHE[image]) {
|
132
|
+
const timeSince = Date.now() - IMAGE_PULL_CACHE[image];
|
133
|
+
if (timeSince < cacheForMS) {
|
134
|
+
return;
|
135
|
+
}
|
136
|
+
}
|
137
|
+
}
|
138
|
+
|
125
139
|
await this.docker()
|
126
140
|
.image.create(
|
127
141
|
{},
|
@@ -131,6 +145,8 @@ class ContainerManager {
|
|
131
145
|
}
|
132
146
|
)
|
133
147
|
.then((stream) => promisifyStream(stream));
|
148
|
+
|
149
|
+
IMAGE_PULL_CACHE[image] = Date.now();
|
134
150
|
}
|
135
151
|
|
136
152
|
toDockerMounts(mounts) {
|
@@ -202,8 +218,6 @@ class ContainerManager {
|
|
202
218
|
|
203
219
|
if (opts.health) {
|
204
220
|
HealthCheck = this.toDockerHealth(opts.health);
|
205
|
-
|
206
|
-
console.log('Adding health check', HealthCheck);
|
207
221
|
}
|
208
222
|
|
209
223
|
const dockerContainer = await this.startContainer({
|
@@ -335,6 +349,10 @@ class ContainerInfo {
|
|
335
349
|
this._container = dockerContainer;
|
336
350
|
}
|
337
351
|
|
352
|
+
get native() {
|
353
|
+
return this._container;
|
354
|
+
}
|
355
|
+
|
338
356
|
async isRunning() {
|
339
357
|
const inspectResult = await this.getStatus();
|
340
358
|
|
package/src/instanceManager.js
CHANGED
@@ -253,21 +253,23 @@ class InstanceManager {
|
|
253
253
|
return [];
|
254
254
|
}
|
255
255
|
|
256
|
-
let
|
256
|
+
let promises = [];
|
257
257
|
let errors = [];
|
258
258
|
for(let blockInstance of Object.values(plan.spec.blocks)) {
|
259
259
|
try {
|
260
|
-
|
260
|
+
promises.push(this.createProcess(planRef, blockInstance.id));
|
261
261
|
} catch (e) {
|
262
262
|
errors.push(e);
|
263
263
|
}
|
264
264
|
}
|
265
265
|
|
266
|
+
const settled = await Promise.allSettled(promises);
|
267
|
+
|
266
268
|
if (errors.length > 0) {
|
267
269
|
throw errors[0];
|
268
270
|
}
|
269
271
|
|
270
|
-
return
|
272
|
+
return settled.map(p => p.value);
|
271
273
|
}
|
272
274
|
|
273
275
|
async _stopInstance(instance) {
|
@@ -283,7 +285,11 @@ class InstanceManager {
|
|
283
285
|
if (instance.type === 'docker') {
|
284
286
|
const container = await containerManager.get(instance.pid);
|
285
287
|
if (container) {
|
286
|
-
|
288
|
+
try {
|
289
|
+
await container.stop();
|
290
|
+
} catch (e) {
|
291
|
+
console.error('Failed to stop container', e);
|
292
|
+
}
|
287
293
|
}
|
288
294
|
return;
|
289
295
|
}
|
@@ -294,11 +300,16 @@ class InstanceManager {
|
|
294
300
|
}
|
295
301
|
|
296
302
|
async stopAllForPlan(planRef) {
|
303
|
+
|
297
304
|
if (this._processes[planRef]) {
|
305
|
+
const promises = [];
|
306
|
+
console.log('Stopping all processes for plan', planRef);
|
298
307
|
for(let instance of Object.values(this._processes[planRef])) {
|
299
|
-
|
308
|
+
promises.push(instance.stop());
|
300
309
|
}
|
301
310
|
|
311
|
+
await Promise.all(promises);
|
312
|
+
|
302
313
|
this._processes[planRef] = {};
|
303
314
|
}
|
304
315
|
|
@@ -306,9 +317,12 @@ class InstanceManager {
|
|
306
317
|
const instancesForPlan = this._instances
|
307
318
|
.filter(instance => instance.systemId === planRef);
|
308
319
|
|
320
|
+
const promises = [];
|
309
321
|
for(let instance of instancesForPlan) {
|
310
|
-
|
322
|
+
promises.push(this._stopInstance(instance));
|
311
323
|
}
|
324
|
+
|
325
|
+
await Promise.all(promises);
|
312
326
|
}
|
313
327
|
|
314
328
|
/**
|
package/src/operatorManager.js
CHANGED
@@ -71,7 +71,7 @@ class OperatorManager {
|
|
71
71
|
* @param {string} name
|
72
72
|
* @returns {Promise<{host: string, port: (*|string), type: *, protocol: *, credentials: *}>}
|
73
73
|
*/
|
74
|
-
async getResourceInfo(systemId, fromServiceId, resourceType, portType, name) {
|
74
|
+
async getResourceInfo(systemId, fromServiceId, resourceType, portType, name, environment) {
|
75
75
|
|
76
76
|
const operator = this.getOperator(resourceType);
|
77
77
|
|
@@ -88,7 +88,7 @@ class OperatorManager {
|
|
88
88
|
const dbName = name + '_' + fromServiceId.replace(/[^a-z0-9]/gi, '');
|
89
89
|
|
90
90
|
return {
|
91
|
-
host: 'localhost',
|
91
|
+
host: environment === 'docker' ? 'host.docker.internal' : 'localhost',
|
92
92
|
port: portInfo.hostPort,
|
93
93
|
type: portType,
|
94
94
|
protocol: portInfo.protocol,
|
@@ -167,6 +167,16 @@ class OperatorManager {
|
|
167
167
|
});
|
168
168
|
}
|
169
169
|
|
170
|
+
try {
|
171
|
+
if (operatorData.health) {
|
172
|
+
await containerManager.waitForHealthy(container.native);
|
173
|
+
} else {
|
174
|
+
await containerManager.waitForReady(container.native);
|
175
|
+
}
|
176
|
+
} catch (e) {
|
177
|
+
console.error(e.message);
|
178
|
+
}
|
179
|
+
|
170
180
|
return container;
|
171
181
|
}
|
172
182
|
}
|
@@ -35,7 +35,7 @@ function getProvider(uri) {
|
|
35
35
|
}
|
36
36
|
|
37
37
|
function getProviderPorts(assetVersion) {
|
38
|
-
return assetVersion.definition?.spec?.providers
|
38
|
+
return assetVersion.definition?.spec?.providers?.map(provider => {
|
39
39
|
return provider.spec?.port?.type
|
40
40
|
}).filter(t => !!t) ?? [];
|
41
41
|
}
|
@@ -168,7 +168,11 @@ class BlockInstanceRunner {
|
|
168
168
|
);
|
169
169
|
}
|
170
170
|
|
171
|
-
|
171
|
+
if (!assetVersion.definition.spec?.target?.kind) {
|
172
|
+
throw new Error('Missing target kind in block definition');
|
173
|
+
}
|
174
|
+
|
175
|
+
const kindUri = parseKapetaUri(assetVersion.definition.spec?.target?.kind);
|
172
176
|
|
173
177
|
const targetVersion = getProvider(kindUri);
|
174
178
|
|
@@ -270,10 +274,14 @@ class BlockInstanceRunner {
|
|
270
274
|
...dockerOpts
|
271
275
|
});
|
272
276
|
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
+
try {
|
278
|
+
if (HealthCheck) {
|
279
|
+
await containerManager.waitForHealthy(container);
|
280
|
+
} else {
|
281
|
+
await containerManager.waitForReady(container);
|
282
|
+
}
|
283
|
+
} catch (e) {
|
284
|
+
logs.addLog(e.message, 'ERROR');
|
277
285
|
}
|
278
286
|
|
279
287
|
return this._handleContainer(container, logs);
|
@@ -404,7 +412,11 @@ class BlockInstanceRunner {
|
|
404
412
|
}
|
405
413
|
});
|
406
414
|
|
407
|
-
|
415
|
+
try {
|
416
|
+
await containerManager.waitForReady(container);
|
417
|
+
} catch (e) {
|
418
|
+
logs.addLog(e.message, 'ERROR');
|
419
|
+
}
|
408
420
|
}
|
409
421
|
|
410
422
|
return this._handleContainer(container, logs);
|
@@ -539,10 +551,14 @@ class BlockInstanceRunner {
|
|
539
551
|
]
|
540
552
|
});
|
541
553
|
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
554
|
+
try {
|
555
|
+
if (HealthCheck) {
|
556
|
+
await containerManager.waitForHealthy(container);
|
557
|
+
} else {
|
558
|
+
await containerManager.waitForReady(container);
|
559
|
+
}
|
560
|
+
} catch (e) {
|
561
|
+
logs.addLog(e.message, 'ERROR');
|
546
562
|
}
|
547
563
|
}
|
548
564
|
|