@kapeta/local-cluster-service 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/package.json +1 -1
- package/src/config/routes.js +1 -3
- package/src/configManager.js +1 -1
- package/src/containerManager.js +34 -3
- package/src/instanceManager.js +1 -0
- package/src/utils/BlockInstanceRunner.js +153 -63
package/CHANGELOG.md
CHANGED
@@ -1,3 +1,10 @@
|
|
1
|
+
# [0.4.0](https://github.com/kapetacom/local-cluster-service/compare/v0.3.0...v0.4.0) (2023-06-01)
|
2
|
+
|
3
|
+
|
4
|
+
### Features
|
5
|
+
|
6
|
+
* Change to always run local code in docker container ([#25](https://github.com/kapetacom/local-cluster-service/issues/25)) ([6e4021e](https://github.com/kapetacom/local-cluster-service/commit/6e4021e67968467555f1043f2972fc7a877aa3b7))
|
7
|
+
|
1
8
|
# [0.3.0](https://github.com/kapetacom/local-cluster-service/compare/v0.2.1...v0.3.0) (2023-05-08)
|
2
9
|
|
3
10
|
|
package/package.json
CHANGED
package/src/config/routes.js
CHANGED
package/src/configManager.js
CHANGED
@@ -92,7 +92,7 @@ class ConfigManager {
|
|
92
92
|
throw new Error(`No uses of block "${blockRef}" was found in plan: "${systemId}"`)
|
93
93
|
}
|
94
94
|
|
95
|
-
throw new Error(`No uses of block "${blockRef}" was found any known plan`);
|
95
|
+
throw new Error(`No uses of block "${blockRef}" was found in any known plan`);
|
96
96
|
}
|
97
97
|
|
98
98
|
if (matchingIdentities.length > 1) {
|
package/src/containerManager.js
CHANGED
@@ -227,12 +227,37 @@ class ContainerManager {
|
|
227
227
|
|
228
228
|
async startContainer(opts) {
|
229
229
|
const dockerContainer = await this.docker().container.create(opts);
|
230
|
-
|
231
230
|
await dockerContainer.start();
|
232
|
-
|
233
231
|
return dockerContainer;
|
234
232
|
}
|
235
233
|
|
234
|
+
async waitForReady(container, attempt) {
|
235
|
+
if (!attempt) {
|
236
|
+
attempt = 0;
|
237
|
+
}
|
238
|
+
|
239
|
+
if (attempt >= HEALTH_CHECK_MAX) {
|
240
|
+
throw new Error(
|
241
|
+
'Container did not become ready within the timeout'
|
242
|
+
);
|
243
|
+
}
|
244
|
+
|
245
|
+
if (await this._isReady(container)) {
|
246
|
+
return;
|
247
|
+
}
|
248
|
+
|
249
|
+
return new Promise((resolve, reject) => {
|
250
|
+
setTimeout(async () => {
|
251
|
+
try {
|
252
|
+
await this.waitForReady(container, attempt + 1);
|
253
|
+
resolve();
|
254
|
+
} catch (err) {
|
255
|
+
reject(err);
|
256
|
+
}
|
257
|
+
}, HEALTH_CHECK_INTERVAL);
|
258
|
+
});
|
259
|
+
}
|
260
|
+
|
236
261
|
async waitForHealthy(container, attempt) {
|
237
262
|
if (!attempt) {
|
238
263
|
attempt = 0;
|
@@ -260,6 +285,13 @@ class ContainerManager {
|
|
260
285
|
});
|
261
286
|
}
|
262
287
|
|
288
|
+
async _isReady(container) {
|
289
|
+
const info = await container.status();
|
290
|
+
if (info?.data?.State?.Status === 'exited') {
|
291
|
+
throw new Error('Container exited unexpectedly');
|
292
|
+
}
|
293
|
+
return info?.data?.State?.Running;
|
294
|
+
}
|
263
295
|
async _isHealthy(container) {
|
264
296
|
const info = await container.status();
|
265
297
|
return info?.data?.State?.Health?.Status === 'healthy';
|
@@ -278,7 +310,6 @@ class ContainerManager {
|
|
278
310
|
await dockerContainer.status();
|
279
311
|
} catch (err) {
|
280
312
|
//Ignore
|
281
|
-
console.log('Container not available - creating it: %s', name);
|
282
313
|
dockerContainer = null;
|
283
314
|
}
|
284
315
|
|
package/src/instanceManager.js
CHANGED
@@ -27,6 +27,19 @@ const DOCKER_ENV_VARS = [
|
|
27
27
|
]
|
28
28
|
|
29
29
|
|
30
|
+
function getProvider(uri) {
|
31
|
+
return ClusterConfig.getProviderDefinitions().find(provider => {
|
32
|
+
const ref = `${provider.definition.metadata.name}:${provider.version}`
|
33
|
+
return parseKapetaUri(ref).id === uri.id;
|
34
|
+
});
|
35
|
+
}
|
36
|
+
|
37
|
+
function getProviderPorts(assetVersion) {
|
38
|
+
return assetVersion.definition?.spec?.providers.map(provider => {
|
39
|
+
return provider.spec?.port?.type
|
40
|
+
}).filter(t => !!t) ?? [];
|
41
|
+
}
|
42
|
+
|
30
43
|
class BlockInstanceRunner {
|
31
44
|
/**
|
32
45
|
* @param {string} [planReference]
|
@@ -66,7 +79,7 @@ class BlockInstanceRunner {
|
|
66
79
|
* @private
|
67
80
|
*/
|
68
81
|
async _execute(blockInstance) {
|
69
|
-
const env =
|
82
|
+
const env = {};
|
70
83
|
|
71
84
|
if (this._systemId) {
|
72
85
|
env[KAPETA_SYSTEM_ID] = this._systemId;
|
@@ -86,23 +99,20 @@ class BlockInstanceRunner {
|
|
86
99
|
blockUri.version = 'local';
|
87
100
|
}
|
88
101
|
|
89
|
-
const
|
102
|
+
const assetVersion = ClusterConfig.getDefinitions().find(definitions => {
|
90
103
|
const ref = `${definitions.definition.metadata.name}:${definitions.version}`
|
91
104
|
return parseKapetaUri(ref).id === blockUri.id;
|
92
105
|
});
|
93
106
|
|
94
|
-
if (!
|
107
|
+
if (!assetVersion) {
|
95
108
|
throw new Error(`Block definition not found: ${blockUri.id}`);
|
96
109
|
}
|
97
110
|
|
98
|
-
const kindUri = parseKapetaUri(
|
111
|
+
const kindUri = parseKapetaUri(assetVersion.definition.kind);
|
99
112
|
|
100
|
-
const
|
101
|
-
const ref = `${provider.definition.metadata.name}:${provider.version}`
|
102
|
-
return parseKapetaUri(ref).id === kindUri.id;
|
103
|
-
});
|
113
|
+
const providerVersion = getProvider(kindUri);
|
104
114
|
|
105
|
-
if (!
|
115
|
+
if (!providerVersion) {
|
106
116
|
throw new Error(`Kind not found: ${kindUri.id}`);
|
107
117
|
}
|
108
118
|
|
@@ -111,15 +121,14 @@ class BlockInstanceRunner {
|
|
111
121
|
*/
|
112
122
|
let processDetails;
|
113
123
|
|
114
|
-
if (
|
115
|
-
processDetails = await this._startOperatorProcess(blockInstance, blockUri,
|
124
|
+
if (providerVersion.definition.kind === KIND_BLOCK_TYPE_OPERATOR) {
|
125
|
+
processDetails = await this._startOperatorProcess(blockInstance, blockUri, providerVersion, env);
|
116
126
|
} else {
|
117
127
|
//We need a port type to know how to connect to the block consistently
|
118
|
-
const portTypes =
|
119
|
-
|
120
|
-
}).filter(t => !!t) ?? [];
|
128
|
+
const portTypes = getProviderPorts(assetVersion);
|
129
|
+
|
121
130
|
if (blockUri.version === 'local') {
|
122
|
-
processDetails = await this._startLocalProcess(blockInstance, blockUri, env);
|
131
|
+
processDetails = await this._startLocalProcess(blockInstance, blockUri, env, assetVersion);
|
123
132
|
} else {
|
124
133
|
processDetails = await this._startDockerProcess(blockInstance, blockUri, env);
|
125
134
|
}
|
@@ -141,10 +150,11 @@ class BlockInstanceRunner {
|
|
141
150
|
* @param {BlockInstanceInfo} blockInstance
|
142
151
|
* @param {BlockInfo} blockInfo
|
143
152
|
* @param {EnvironmentVariables} env
|
153
|
+
* @param assetVersion
|
144
154
|
* @return {ProcessDetails}
|
145
155
|
* @private
|
146
156
|
*/
|
147
|
-
_startLocalProcess(blockInstance, blockInfo, env) {
|
157
|
+
async _startLocalProcess(blockInstance, blockInfo, env, assetVersion) {
|
148
158
|
const baseDir = ClusterConfig.getRepositoryAssetPath(
|
149
159
|
blockInfo.handle,
|
150
160
|
blockInfo.name,
|
@@ -158,61 +168,125 @@ class BlockInstanceRunner {
|
|
158
168
|
);
|
159
169
|
}
|
160
170
|
|
161
|
-
const
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
)
|
171
|
+
const kindUri = parseKapetaUri(assetVersion.definition.spec.target.kind);
|
172
|
+
|
173
|
+
const targetVersion = getProvider(kindUri);
|
174
|
+
|
175
|
+
if (!targetVersion) {
|
176
|
+
throw new Error(`Target not found: ${kindUri.id}`);
|
167
177
|
}
|
168
178
|
|
179
|
+
const localContainer = targetVersion.definition.spec.local;
|
180
|
+
|
181
|
+
if (!localContainer) {
|
182
|
+
throw new Error(`Missing local container information from target: ${kindUri.id}`);
|
183
|
+
}
|
184
|
+
|
185
|
+
const dockerImage = localContainer.image;
|
186
|
+
if (!dockerImage) {
|
187
|
+
throw new Error(`Missing docker image information: ${JSON.stringify(localContainer)}`);
|
188
|
+
}
|
189
|
+
|
190
|
+
const containerName = `kapeta-block-instance-${blockInstance.id}`;
|
169
191
|
const logs = new LogData();
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
detached: true,
|
174
|
-
stdio: [
|
175
|
-
'pipe', 'pipe', 'pipe'
|
176
|
-
]
|
177
|
-
});
|
192
|
+
logs.addLog(`Starting block ${blockInstance.ref}`);
|
193
|
+
let container = await containerManager.getContainerByName(containerName);
|
194
|
+
console.log('Starting dev container', containerName);
|
178
195
|
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
pid: childProcess.pid,
|
188
|
-
output: outputEvents,
|
189
|
-
stderr: childProcess.stderr,
|
190
|
-
logs: () => {
|
191
|
-
return logs.getLogs();
|
192
|
-
},
|
193
|
-
stop: () => {
|
194
|
-
childProcess.kill('SIGTERM');
|
196
|
+
if (container) {
|
197
|
+
console.log(`Container already exists. Deleting...`);
|
198
|
+
try {
|
199
|
+
await container.delete({
|
200
|
+
force: true
|
201
|
+
})
|
202
|
+
} catch (e) {
|
203
|
+
throw new Error('Failed to delete existing container: ' + e.message);
|
195
204
|
}
|
196
|
-
|
205
|
+
container = null;
|
206
|
+
}
|
197
207
|
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
208
|
+
logs.addLog(`Creating new container for block: ${containerName}`);
|
209
|
+
console.log('Creating new dev container', containerName, dockerImage);
|
210
|
+
await containerManager.pull(dockerImage);
|
211
|
+
|
212
|
+
const startCmd = localContainer.handlers?.onCreate ? localContainer.handlers.onCreate : '';
|
213
|
+
const dockerOpts = localContainer.options ?? {};
|
214
|
+
const homeDir = localContainer.homeDir ? localContainer.homeDir : '/root';
|
215
|
+
const workingDir = localContainer.workingDir ? localContainer.workingDir : '/workspace';
|
216
|
+
|
217
|
+
const ExposedPorts = {};
|
218
|
+
const addonEnv = {};
|
219
|
+
const PortBindings = {};
|
220
|
+
|
221
|
+
const portTypes = getProviderPorts(assetVersion);
|
222
|
+
let port = 80;
|
223
|
+
const promises = portTypes
|
224
|
+
.map(async (portType) => {
|
225
|
+
const publicPort = await serviceManager.ensureServicePort(this._systemId, blockInstance.id, portType);
|
226
|
+
const thisPort = port++; //TODO: Not sure how we should handle multiple ports or non-HTTP ports
|
227
|
+
const dockerPort = `${thisPort}/tcp`;
|
228
|
+
ExposedPorts[dockerPort] = {};
|
229
|
+
addonEnv[`KAPETA_LOCAL_SERVER_PORT_${portType.toUpperCase()}`] = thisPort;
|
230
|
+
|
231
|
+
PortBindings[dockerPort] = [
|
232
|
+
{
|
233
|
+
HostIp: "127.0.0.1", //No public
|
234
|
+
HostPort: `${publicPort}`
|
235
|
+
}
|
236
|
+
];
|
237
|
+
});
|
202
238
|
|
203
|
-
|
204
|
-
logs.addLog(data.toString());
|
205
|
-
outputEvents.emit('data', data);
|
206
|
-
});
|
239
|
+
await Promise.all(promises);
|
207
240
|
|
208
|
-
|
209
|
-
|
210
|
-
|
241
|
+
let HealthCheck = undefined;
|
242
|
+
if (localContainer.healthcheck) {
|
243
|
+
HealthCheck = containerManager.toDockerHealth({cmd: localContainer.healthcheck});
|
244
|
+
}
|
245
|
+
|
246
|
+
container = await containerManager.startContainer({
|
247
|
+
Image: dockerImage,
|
248
|
+
name: containerName,
|
249
|
+
WorkingDir: workingDir,
|
250
|
+
Labels: {
|
251
|
+
'instance': blockInstance.id
|
252
|
+
},
|
253
|
+
HealthCheck,
|
254
|
+
ExposedPorts,
|
255
|
+
Cmd: startCmd ? startCmd.split(/\s+/g) : [],
|
256
|
+
Env: [
|
257
|
+
...DOCKER_ENV_VARS,
|
258
|
+
...Object.entries({
|
259
|
+
...env,
|
260
|
+
...addonEnv
|
261
|
+
}).map(([key, value]) => `${key}=${value}`)
|
262
|
+
],
|
263
|
+
HostConfig: {
|
264
|
+
Binds: [
|
265
|
+
`${ClusterConfig.getKapetaBasedir()}:${homeDir}/.kapeta`,
|
266
|
+
`${baseDir}:${workingDir}` //We mount
|
267
|
+
],
|
268
|
+
PortBindings
|
269
|
+
},
|
270
|
+
...dockerOpts
|
211
271
|
});
|
212
272
|
|
213
|
-
|
273
|
+
if (HealthCheck) {
|
274
|
+
await containerManager.waitForHealthy(container);
|
275
|
+
} else {
|
276
|
+
await containerManager.waitForReady(container);
|
277
|
+
}
|
278
|
+
|
279
|
+
return this._handleContainer(container, logs);
|
214
280
|
}
|
215
281
|
|
282
|
+
/**
|
283
|
+
*
|
284
|
+
* @param container
|
285
|
+
* @param logs
|
286
|
+
* @param deleteOnExit
|
287
|
+
* @return {Promise<ProcessDetails>}
|
288
|
+
* @private
|
289
|
+
*/
|
216
290
|
async _handleContainer(container, logs , deleteOnExit = false) {
|
217
291
|
const logStream = await container.logs({
|
218
292
|
follow: true,
|
@@ -315,22 +389,36 @@ class BlockInstanceRunner {
|
|
315
389
|
container = await containerManager.startContainer({
|
316
390
|
Image: dockerImage,
|
317
391
|
name: containerName,
|
318
|
-
Binds: [
|
319
|
-
`${ClusterConfig.getKapetaBasedir()}:${ClusterConfig.getKapetaBasedir()}`
|
320
|
-
],
|
321
392
|
Labels: {
|
322
393
|
'instance': blockInstance.id
|
323
394
|
},
|
324
395
|
Env: [
|
325
396
|
...DOCKER_ENV_VARS,
|
326
397
|
...Object.entries(env).map(([key, value]) => `${key}=${value}`)
|
327
|
-
]
|
398
|
+
],
|
399
|
+
HostConfig: {
|
400
|
+
Binds: [
|
401
|
+
`${ClusterConfig.getKapetaBasedir()}:${ClusterConfig.getKapetaBasedir()}`
|
402
|
+
],
|
403
|
+
|
404
|
+
}
|
328
405
|
});
|
406
|
+
|
407
|
+
await containerManager.waitForReady(container);
|
329
408
|
}
|
330
409
|
|
331
410
|
return this._handleContainer(container, logs);
|
332
411
|
}
|
333
412
|
|
413
|
+
/**
|
414
|
+
*
|
415
|
+
* @param blockInstance
|
416
|
+
* @param blockUri
|
417
|
+
* @param providerDefinition
|
418
|
+
* @param {{[key:string]:string}} env
|
419
|
+
* @return {Promise<ProcessDetails>}
|
420
|
+
* @private
|
421
|
+
*/
|
334
422
|
async _startOperatorProcess(blockInstance, blockUri, providerDefinition, env) {
|
335
423
|
const {assetFile} = ClusterConfig.getRepositoryAssetInfoPath(
|
336
424
|
blockUri.handle,
|
@@ -453,6 +541,8 @@ class BlockInstanceRunner {
|
|
453
541
|
|
454
542
|
if (HealthCheck) {
|
455
543
|
await containerManager.waitForHealthy(container);
|
544
|
+
} else {
|
545
|
+
await containerManager.waitForReady(container);
|
456
546
|
}
|
457
547
|
}
|
458
548
|
|