@kapeta/local-cluster-service 0.9.1 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/dist/cjs/src/containerManager.d.ts +4 -8
- package/dist/cjs/src/containerManager.js +69 -64
- package/dist/cjs/src/instanceManager.js +4 -2
- package/dist/cjs/src/operatorManager.js +40 -25
- package/dist/cjs/src/utils/BlockInstanceRunner.d.ts +1 -0
- package/dist/cjs/src/utils/BlockInstanceRunner.js +77 -169
- package/dist/esm/src/containerManager.d.ts +4 -8
- package/dist/esm/src/containerManager.js +69 -64
- package/dist/esm/src/instanceManager.js +4 -2
- package/dist/esm/src/operatorManager.js +42 -27
- package/dist/esm/src/utils/BlockInstanceRunner.d.ts +1 -0
- package/dist/esm/src/utils/BlockInstanceRunner.js +77 -169
- package/package.json +1 -1
- package/src/containerManager.ts +69 -73
- package/src/instanceManager.ts +8 -2
- package/src/operatorManager.ts +52 -26
- package/src/utils/BlockInstanceRunner.ts +86 -176
package/CHANGELOG.md
CHANGED
@@ -1,3 +1,10 @@
|
|
1
|
+
# [0.10.0](https://github.com/kapetacom/local-cluster-service/compare/v0.9.1...v0.10.0) (2023-07-26)
|
2
|
+
|
3
|
+
|
4
|
+
### Features
|
5
|
+
|
6
|
+
* Auto-reuse containers ([#50](https://github.com/kapetacom/local-cluster-service/issues/50)) ([ecb396b](https://github.com/kapetacom/local-cluster-service/commit/ecb396b541f9184302e0681f4803d2404336138e))
|
7
|
+
|
1
8
|
## [0.9.1](https://github.com/kapetacom/local-cluster-service/compare/v0.9.0...v0.9.1) (2023-07-26)
|
2
9
|
|
3
10
|
|
@@ -41,6 +41,7 @@ interface Health {
|
|
41
41
|
timeout?: number;
|
42
42
|
retries?: number;
|
43
43
|
}
|
44
|
+
export declare const CONTAINER_LABEL_PORT_PREFIX = "kapeta_port-";
|
44
45
|
export declare const HEALTH_CHECK_TIMEOUT: number;
|
45
46
|
declare class ContainerManager {
|
46
47
|
private _docker;
|
@@ -56,7 +57,7 @@ declare class ContainerManager {
|
|
56
57
|
ping(): Promise<void>;
|
57
58
|
docker(): Docker;
|
58
59
|
getContainerByName(containerName: string): Promise<ContainerInfo | undefined>;
|
59
|
-
pull(image: string, cacheForMS?: number): Promise<
|
60
|
+
pull(image: string, cacheForMS?: number): Promise<boolean>;
|
60
61
|
toDockerMounts(mounts: StringMap): DockerMounts[];
|
61
62
|
toDockerHealth(health: Health): {
|
62
63
|
Test: string[];
|
@@ -64,13 +65,8 @@ declare class ContainerManager {
|
|
64
65
|
Timeout: number;
|
65
66
|
Retries: number;
|
66
67
|
};
|
67
|
-
|
68
|
-
|
69
|
-
mounts: {};
|
70
|
-
env: {};
|
71
|
-
cmd: string;
|
72
|
-
health: Health;
|
73
|
-
}): Promise<ContainerInfo>;
|
68
|
+
private applyHash;
|
69
|
+
ensureContainer(opts: any): Promise<Container>;
|
74
70
|
startContainer(opts: any): Promise<Container>;
|
75
71
|
waitForReady(container: Container, attempt?: number): Promise<void>;
|
76
72
|
waitForHealthy(container: Container, attempt?: number): Promise<void>;
|
@@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
3
|
return (mod && mod.__esModule) ? mod : { "default": mod };
|
4
4
|
};
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
6
|
-
exports.containerManager = exports.toLocalBindVolume = exports.getExtraHosts = exports.ContainerInfo = exports.HEALTH_CHECK_TIMEOUT = void 0;
|
6
|
+
exports.containerManager = exports.toLocalBindVolume = exports.getExtraHosts = exports.ContainerInfo = exports.HEALTH_CHECK_TIMEOUT = exports.CONTAINER_LABEL_PORT_PREFIX = void 0;
|
7
7
|
const path_1 = __importDefault(require("path"));
|
8
8
|
const storageService_1 = require("./storageService");
|
9
9
|
const os_1 = __importDefault(require("os"));
|
@@ -12,9 +12,9 @@ const fs_extra_1 = __importDefault(require("fs-extra"));
|
|
12
12
|
const node_docker_api_1 = require("node-docker-api");
|
13
13
|
const nodejs_utils_1 = require("@kapeta/nodejs-utils");
|
14
14
|
const local_cluster_config_1 = __importDefault(require("@kapeta/local-cluster-config"));
|
15
|
-
const utils_1 = require("./utils/utils");
|
16
15
|
const node_uuid_1 = __importDefault(require("node-uuid"));
|
17
|
-
const
|
16
|
+
const md5_1 = __importDefault(require("md5"));
|
17
|
+
exports.CONTAINER_LABEL_PORT_PREFIX = 'kapeta_port-';
|
18
18
|
const NANO_SECOND = 1000000;
|
19
19
|
const HEALTH_CHECK_INTERVAL = 3000;
|
20
20
|
const HEALTH_CHECK_MAX = 20;
|
@@ -147,22 +147,19 @@ class ContainerManager {
|
|
147
147
|
if (!tag) {
|
148
148
|
tag = 'latest';
|
149
149
|
}
|
150
|
-
if (
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
return;
|
155
|
-
}
|
156
|
-
}
|
157
|
-
const imageTagList = (await this.docker().image.list())
|
158
|
-
.map((image) => image.data)
|
159
|
-
.filter((imageData) => !!imageData.RepoTags)
|
160
|
-
.map((imageData) => imageData.RepoTags);
|
161
|
-
if (imageTagList.some((imageTags) => imageTags.indexOf(image) > -1)) {
|
162
|
-
console.log('Image found: %s', image);
|
163
|
-
return;
|
150
|
+
if (IMAGE_PULL_CACHE[image]) {
|
151
|
+
const timeSince = Date.now() - IMAGE_PULL_CACHE[image];
|
152
|
+
if (timeSince < cacheForMS) {
|
153
|
+
return false;
|
164
154
|
}
|
165
|
-
|
155
|
+
}
|
156
|
+
const imageTagList = (await this.docker().image.list())
|
157
|
+
.map((image) => image.data)
|
158
|
+
.filter((imageData) => !!imageData.RepoTags)
|
159
|
+
.map((imageData) => imageData.RepoTags);
|
160
|
+
if (imageTagList.some((imageTags) => imageTags.indexOf(image) > -1)) {
|
161
|
+
console.log('Image found: %s', image);
|
162
|
+
return false;
|
166
163
|
}
|
167
164
|
console.log('Pulling image: %s', image);
|
168
165
|
await this.docker()
|
@@ -173,6 +170,7 @@ class ContainerManager {
|
|
173
170
|
.then((stream) => promisifyStream(stream));
|
174
171
|
IMAGE_PULL_CACHE[image] = Date.now();
|
175
172
|
console.log('Image pulled: %s', image);
|
173
|
+
return true;
|
176
174
|
}
|
177
175
|
toDockerMounts(mounts) {
|
178
176
|
const Mounts = [];
|
@@ -195,51 +193,58 @@ class ContainerManager {
|
|
195
193
|
Retries: health.retries || 10,
|
196
194
|
};
|
197
195
|
}
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
const Labels = {
|
202
|
-
kapeta: 'true',
|
203
|
-
};
|
204
|
-
await this.pull(image);
|
205
|
-
const bindHost = (0, utils_1.getBindHost)();
|
206
|
-
const ExposedPorts = {};
|
207
|
-
lodash_1.default.forEach(opts.ports, (portInfo, containerPort) => {
|
208
|
-
ExposedPorts['' + containerPort] = {};
|
209
|
-
PortBindings['' + containerPort] = [
|
210
|
-
{
|
211
|
-
HostPort: '' + portInfo.hostPort,
|
212
|
-
HostIp: bindHost,
|
213
|
-
},
|
214
|
-
];
|
215
|
-
Labels[LABEL_PORT_PREFIX + portInfo.hostPort] = portInfo.type;
|
216
|
-
});
|
217
|
-
const Mounts = this.toDockerMounts(opts.mounts);
|
218
|
-
lodash_1.default.forEach(opts.env, (value, name) => {
|
219
|
-
Env.push(name + '=' + value);
|
220
|
-
});
|
221
|
-
let HealthCheck = undefined;
|
222
|
-
if (opts.health) {
|
223
|
-
HealthCheck = this.toDockerHealth(opts.health);
|
224
|
-
}
|
225
|
-
const dockerContainer = await this.startContainer({
|
226
|
-
name: name,
|
227
|
-
Image: image,
|
228
|
-
Hostname: name + '.kapeta',
|
229
|
-
Labels,
|
230
|
-
Cmd: opts.cmd,
|
231
|
-
ExposedPorts,
|
232
|
-
Env,
|
233
|
-
HealthCheck,
|
234
|
-
HostConfig: {
|
235
|
-
PortBindings,
|
236
|
-
Mounts,
|
237
|
-
},
|
238
|
-
});
|
239
|
-
if (opts.health) {
|
240
|
-
await this.waitForHealthy(dockerContainer);
|
196
|
+
applyHash(dockerOpts) {
|
197
|
+
if (dockerOpts?.Labels?.HASH) {
|
198
|
+
delete dockerOpts.Labels.HASH;
|
241
199
|
}
|
242
|
-
|
200
|
+
const hash = (0, md5_1.default)(JSON.stringify(dockerOpts));
|
201
|
+
if (!dockerOpts.Labels) {
|
202
|
+
dockerOpts.Labels = {};
|
203
|
+
}
|
204
|
+
dockerOpts.Labels.HASH = hash;
|
205
|
+
}
|
206
|
+
async ensureContainer(opts) {
|
207
|
+
let imagePulled = false;
|
208
|
+
try {
|
209
|
+
imagePulled = await this.pull(opts.Image);
|
210
|
+
}
|
211
|
+
catch (e) {
|
212
|
+
console.warn('Failed to pull image. Continuing...', e);
|
213
|
+
}
|
214
|
+
this.applyHash(opts);
|
215
|
+
if (!opts.name) {
|
216
|
+
console.log('Starting unnamed container: %s', opts.Image);
|
217
|
+
return this.startContainer(opts);
|
218
|
+
}
|
219
|
+
const containerInfo = await this.getContainerByName(opts.name);
|
220
|
+
if (imagePulled) {
|
221
|
+
console.log('New version of image was pulled: %s', opts.Image);
|
222
|
+
}
|
223
|
+
else {
|
224
|
+
// If image was pulled always recreate
|
225
|
+
if (!containerInfo) {
|
226
|
+
console.log('Starting new container: %s', opts.name);
|
227
|
+
return this.startContainer(opts);
|
228
|
+
}
|
229
|
+
const containerData = containerInfo.native.data;
|
230
|
+
if (containerData?.Labels?.HASH === opts.Labels.HASH) {
|
231
|
+
if (!(await containerInfo.isRunning())) {
|
232
|
+
console.log('Starting previously created container: %s', opts.name);
|
233
|
+
await containerInfo.start();
|
234
|
+
}
|
235
|
+
else {
|
236
|
+
console.log('Previously created container already running: %s', opts.name);
|
237
|
+
}
|
238
|
+
return containerInfo.native;
|
239
|
+
}
|
240
|
+
}
|
241
|
+
if (containerInfo) {
|
242
|
+
// Remove the container and start a new one
|
243
|
+
console.log('Replacing previously created container: %s', opts.name);
|
244
|
+
await containerInfo.remove({ force: true });
|
245
|
+
}
|
246
|
+
console.log('Starting new container: %s', opts.name);
|
247
|
+
return this.startContainer(opts);
|
243
248
|
}
|
244
249
|
async startContainer(opts) {
|
245
250
|
const extraHosts = getExtraHosts(this._version);
|
@@ -417,10 +422,10 @@ class ContainerInfo {
|
|
417
422
|
const portTypes = {};
|
418
423
|
const ports = {};
|
419
424
|
lodash_1.default.forEach(inspectResult.Config.Labels, (portType, name) => {
|
420
|
-
if (!name.startsWith(
|
425
|
+
if (!name.startsWith(exports.CONTAINER_LABEL_PORT_PREFIX)) {
|
421
426
|
return;
|
422
427
|
}
|
423
|
-
const hostPort = name.substr(
|
428
|
+
const hostPort = name.substr(exports.CONTAINER_LABEL_PORT_PREFIX.length);
|
424
429
|
portTypes[hostPort] = portType;
|
425
430
|
});
|
426
431
|
lodash_1.default.forEach(inspectResult.HostConfig.PortBindings, (portBindings, containerPortSpec) => {
|
@@ -453,7 +453,8 @@ class InstanceManager {
|
|
453
453
|
changed = true;
|
454
454
|
}
|
455
455
|
}
|
456
|
-
if (instance.desiredStatus === types_1.DesiredInstanceStatus.RUN &&
|
456
|
+
if (instance.desiredStatus === types_1.DesiredInstanceStatus.RUN &&
|
457
|
+
[types_1.InstanceStatus.STOPPED, types_1.InstanceStatus.FAILED, types_1.InstanceStatus.STOPPING].includes(newStatus)) {
|
457
458
|
//If the instance is stopped but we want it to run, start it
|
458
459
|
try {
|
459
460
|
await this.start(instance.systemId, instance.instanceId);
|
@@ -463,7 +464,8 @@ class InstanceManager {
|
|
463
464
|
}
|
464
465
|
return;
|
465
466
|
}
|
466
|
-
if (instance.desiredStatus === types_1.DesiredInstanceStatus.STOP &&
|
467
|
+
if (instance.desiredStatus === types_1.DesiredInstanceStatus.STOP &&
|
468
|
+
[types_1.InstanceStatus.READY, types_1.InstanceStatus.STARTING, types_1.InstanceStatus.UNHEALTHY].includes(newStatus)) {
|
467
469
|
//If the instance is running but we want it to stop, stop it
|
468
470
|
try {
|
469
471
|
await this.stop(instance.systemId, instance.instanceId);
|
@@ -13,6 +13,7 @@ const containerManager_1 = require("./containerManager");
|
|
13
13
|
const fs_extra_1 = __importDefault(require("fs-extra"));
|
14
14
|
const definitionsManager_1 = require("./definitionsManager");
|
15
15
|
const utils_1 = require("./utils/utils");
|
16
|
+
const lodash_1 = __importDefault(require("lodash"));
|
16
17
|
const KIND_OPERATOR = 'core/resource-type-operator';
|
17
18
|
class Operator {
|
18
19
|
_data;
|
@@ -138,32 +139,46 @@ class OperatorManager {
|
|
138
139
|
}
|
139
140
|
const mounts = containerManager_1.containerManager.createMounts(resourceType, operatorData.mounts);
|
140
141
|
const containerName = containerBaseName + '-' + (0, md5_1.default)(nameParts.join('_'));
|
141
|
-
|
142
|
-
const
|
143
|
-
|
144
|
-
|
145
|
-
}
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
142
|
+
const PortBindings = {};
|
143
|
+
const Env = [];
|
144
|
+
const Labels = {
|
145
|
+
kapeta: 'true',
|
146
|
+
};
|
147
|
+
const bindHost = (0, utils_1.getBindHost)();
|
148
|
+
const ExposedPorts = {};
|
149
|
+
lodash_1.default.forEach(ports, (portInfo, containerPort) => {
|
150
|
+
ExposedPorts['' + containerPort] = {};
|
151
|
+
PortBindings['' + containerPort] = [
|
152
|
+
{
|
153
|
+
HostPort: '' + portInfo.hostPort,
|
154
|
+
HostIp: bindHost,
|
155
|
+
},
|
156
|
+
];
|
157
|
+
Labels[containerManager_1.CONTAINER_LABEL_PORT_PREFIX + portInfo.hostPort] = portInfo.type;
|
158
|
+
});
|
159
|
+
const Mounts = containerManager_1.containerManager.toDockerMounts(mounts);
|
160
|
+
lodash_1.default.forEach(operatorData.env, (value, name) => {
|
161
|
+
Env.push(name + '=' + value);
|
162
|
+
});
|
163
|
+
let HealthCheck = undefined;
|
164
|
+
if (operatorData.health) {
|
165
|
+
HealthCheck = containerManager_1.containerManager.toDockerHealth(operatorData.health);
|
165
166
|
}
|
166
|
-
|
167
|
+
const container = await containerManager_1.containerManager.ensureContainer({
|
168
|
+
name: containerName,
|
169
|
+
Image: operatorData.image,
|
170
|
+
Hostname: containerName + '.kapeta',
|
171
|
+
Labels,
|
172
|
+
Cmd: operatorData.cmd,
|
173
|
+
ExposedPorts,
|
174
|
+
Env,
|
175
|
+
HealthCheck,
|
176
|
+
HostConfig: {
|
177
|
+
PortBindings,
|
178
|
+
Mounts,
|
179
|
+
},
|
180
|
+
});
|
181
|
+
return new containerManager_1.ContainerInfo(container);
|
167
182
|
}
|
168
183
|
}
|
169
184
|
exports.operatorManager = new OperatorManager();
|
@@ -134,27 +134,6 @@ class BlockInstanceRunner {
|
|
134
134
|
throw new Error(`Missing docker image information: ${JSON.stringify(localContainer)}`);
|
135
135
|
}
|
136
136
|
const containerName = (0, utils_1.getBlockInstanceContainerName)(blockInstance.id);
|
137
|
-
const logs = new LogData_1.LogData();
|
138
|
-
logs.addLog(`Starting block ${blockInstance.ref}`);
|
139
|
-
let containerInfo = await containerManager_1.containerManager.getContainerByName(containerName);
|
140
|
-
let container = containerInfo?.native;
|
141
|
-
console.log('Starting dev container', containerName);
|
142
|
-
if (containerInfo) {
|
143
|
-
console.log(`Dev container already exists. Deleting...`);
|
144
|
-
try {
|
145
|
-
await containerInfo.remove({
|
146
|
-
force: true,
|
147
|
-
});
|
148
|
-
}
|
149
|
-
catch (e) {
|
150
|
-
throw new Error('Failed to delete existing container: ' + e.message);
|
151
|
-
}
|
152
|
-
container = undefined;
|
153
|
-
containerInfo = undefined;
|
154
|
-
}
|
155
|
-
logs.addLog(`Creating new container for block: ${containerName}`);
|
156
|
-
console.log('Creating new dev container', containerName, dockerImage);
|
157
|
-
await containerManager_1.containerManager.pull(dockerImage);
|
158
137
|
const startCmd = localContainer.handlers?.onCreate ? localContainer.handlers.onCreate : '';
|
159
138
|
const dockerOpts = localContainer.options ?? {};
|
160
139
|
const homeDir = localContainer.userHome ? localContainer.userHome : '/root';
|
@@ -183,8 +162,7 @@ class BlockInstanceRunner {
|
|
183
162
|
if (localContainer.healthcheck) {
|
184
163
|
HealthCheck = containerManager_1.containerManager.toDockerHealth({ cmd: localContainer.healthcheck });
|
185
164
|
}
|
186
|
-
|
187
|
-
container = await containerManager_1.containerManager.startContainer({
|
165
|
+
return this.ensureContainer({
|
188
166
|
Image: dockerImage,
|
189
167
|
name: containerName,
|
190
168
|
WorkingDir: workingDir,
|
@@ -211,8 +189,12 @@ class BlockInstanceRunner {
|
|
211
189
|
},
|
212
190
|
...dockerOpts,
|
213
191
|
});
|
192
|
+
}
|
193
|
+
async ensureContainer(opts) {
|
194
|
+
const logs = new LogData_1.LogData();
|
195
|
+
const container = await containerManager_1.containerManager.ensureContainer(opts);
|
214
196
|
try {
|
215
|
-
if (HealthCheck) {
|
197
|
+
if (opts.HealthCheck) {
|
216
198
|
await containerManager_1.containerManager.waitForHealthy(container);
|
217
199
|
}
|
218
200
|
else {
|
@@ -290,45 +272,23 @@ class BlockInstanceRunner {
|
|
290
272
|
}
|
291
273
|
const containerName = (0, utils_1.getBlockInstanceContainerName)(blockInstance.id);
|
292
274
|
const logs = new LogData_1.LogData();
|
293
|
-
const containerInfo = await containerManager_1.containerManager.getContainerByName(containerName);
|
294
|
-
let container = containerInfo?.native;
|
295
275
|
// For windows we need to default to root
|
296
276
|
const innerHome = process.platform === 'win32' ? '/root/.kapeta' : local_cluster_config_1.default.getKapetaBasedir();
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
Labels: {
|
313
|
-
instance: blockInstance.id,
|
314
|
-
},
|
315
|
-
Env: [
|
316
|
-
...DOCKER_ENV_VARS,
|
317
|
-
`KAPETA_LOCAL_CLUSTER_PORT=${clusterService_1.clusterService.getClusterServicePort()}`,
|
318
|
-
...Object.entries(env).map(([key, value]) => `${key}=${value}`),
|
319
|
-
],
|
320
|
-
HostConfig: {
|
321
|
-
Binds: [`${(0, containerManager_1.toLocalBindVolume)(local_cluster_config_1.default.getKapetaBasedir())}:${innerHome}`],
|
322
|
-
},
|
323
|
-
});
|
324
|
-
try {
|
325
|
-
await containerManager_1.containerManager.waitForReady(container);
|
326
|
-
}
|
327
|
-
catch (e) {
|
328
|
-
logs.addLog(e.message, 'ERROR');
|
329
|
-
}
|
330
|
-
}
|
331
|
-
return this._handleContainer(container, logs);
|
277
|
+
return this.ensureContainer({
|
278
|
+
Image: dockerImage,
|
279
|
+
name: containerName,
|
280
|
+
Labels: {
|
281
|
+
instance: blockInstance.id,
|
282
|
+
},
|
283
|
+
Env: [
|
284
|
+
...DOCKER_ENV_VARS,
|
285
|
+
`KAPETA_LOCAL_CLUSTER_PORT=${clusterService_1.clusterService.getClusterServicePort()}`,
|
286
|
+
...Object.entries(env).map(([key, value]) => `${key}=${value}`),
|
287
|
+
],
|
288
|
+
HostConfig: {
|
289
|
+
Binds: [`${(0, containerManager_1.toLocalBindVolume)(local_cluster_config_1.default.getKapetaBasedir())}:${innerHome}`],
|
290
|
+
},
|
291
|
+
});
|
332
292
|
}
|
333
293
|
/**
|
334
294
|
*
|
@@ -351,120 +311,68 @@ class BlockInstanceRunner {
|
|
351
311
|
throw new Error(`Provider did not have local image: ${providerRef}`);
|
352
312
|
}
|
353
313
|
const dockerImage = spec?.local?.image;
|
354
|
-
try {
|
355
|
-
await containerManager_1.containerManager.pull(dockerImage);
|
356
|
-
}
|
357
|
-
catch (e) {
|
358
|
-
console.warn('Failed to pull image. Continuing...', e);
|
359
|
-
}
|
360
314
|
const containerName = (0, utils_1.getBlockInstanceContainerName)(blockInstance.id);
|
361
315
|
const logs = new LogData_1.LogData();
|
362
|
-
const containerInfo = await containerManager_1.containerManager.getContainerByName(containerName);
|
363
|
-
let container = containerInfo?.native;
|
364
|
-
if (container) {
|
365
|
-
const containerData = container.data;
|
366
|
-
if (containerData.State === 'running') {
|
367
|
-
logs.addLog(`Found existing running container for block: ${containerName}`);
|
368
|
-
}
|
369
|
-
else {
|
370
|
-
if (containerData.State?.ExitCode > 0) {
|
371
|
-
logs.addLog(`Container exited with code: ${containerData.State.ExitCode}. Deleting...`);
|
372
|
-
try {
|
373
|
-
await containerManager_1.containerManager.remove(container);
|
374
|
-
}
|
375
|
-
catch (e) { }
|
376
|
-
container = undefined;
|
377
|
-
}
|
378
|
-
else {
|
379
|
-
logs.addLog(`Found existing container for block: ${containerName}. Starting now`);
|
380
|
-
try {
|
381
|
-
await container.start();
|
382
|
-
}
|
383
|
-
catch (e) {
|
384
|
-
console.warn('Failed to start container. Deleting...', e);
|
385
|
-
try {
|
386
|
-
await containerManager_1.containerManager.remove(container);
|
387
|
-
}
|
388
|
-
catch (e) { }
|
389
|
-
container = undefined;
|
390
|
-
}
|
391
|
-
}
|
392
|
-
}
|
393
|
-
}
|
394
316
|
const bindHost = (0, utils_1.getBindHost)();
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
const
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
HostPort: `${publicPort}`,
|
410
|
-
},
|
411
|
-
];
|
412
|
-
});
|
413
|
-
await Promise.all(promises);
|
414
|
-
if (spec.local?.env) {
|
415
|
-
Object.entries(spec.local.env).forEach(([key, value]) => {
|
416
|
-
addonEnv[key] = value;
|
417
|
-
});
|
418
|
-
}
|
419
|
-
if (spec.local?.mounts) {
|
420
|
-
const mounts = containerManager_1.containerManager.createMounts(blockUri.id, spec.local.mounts);
|
421
|
-
Mounts = containerManager_1.containerManager.toDockerMounts(mounts);
|
422
|
-
}
|
423
|
-
if (spec.local?.health) {
|
424
|
-
HealthCheck = containerManager_1.containerManager.toDockerHealth(spec.local?.health);
|
425
|
-
}
|
426
|
-
// For windows we need to default to root
|
427
|
-
const innerHome = process.platform === 'win32' ? '/root/.kapeta' : local_cluster_config_1.default.getKapetaBasedir();
|
428
|
-
logs.addLog(`Creating new container for block: ${containerName}`);
|
429
|
-
container = await containerManager_1.containerManager.startContainer({
|
430
|
-
Image: dockerImage,
|
431
|
-
name: containerName,
|
432
|
-
ExposedPorts,
|
433
|
-
HealthCheck,
|
434
|
-
HostConfig: {
|
435
|
-
Binds: [
|
436
|
-
`${(0, containerManager_1.toLocalBindVolume)(kapetaYmlPath)}:/kapeta.yml:ro`,
|
437
|
-
`${(0, containerManager_1.toLocalBindVolume)(local_cluster_config_1.default.getKapetaBasedir())}:${innerHome}`,
|
438
|
-
],
|
439
|
-
PortBindings,
|
440
|
-
Mounts,
|
441
|
-
},
|
442
|
-
Labels: {
|
443
|
-
instance: blockInstance.id,
|
317
|
+
const ExposedPorts = {};
|
318
|
+
const addonEnv = {};
|
319
|
+
const PortBindings = {};
|
320
|
+
let HealthCheck = undefined;
|
321
|
+
let Mounts = [];
|
322
|
+
const promises = Object.entries(spec.local.ports).map(async ([portType, value]) => {
|
323
|
+
const dockerPort = `${value.port}/${value.type}`;
|
324
|
+
ExposedPorts[dockerPort] = {};
|
325
|
+
addonEnv[`KAPETA_LOCAL_SERVER_PORT_${portType.toUpperCase()}`] = value.port;
|
326
|
+
const publicPort = await serviceManager_1.serviceManager.ensureServicePort(this._systemId, blockInstance.id, portType);
|
327
|
+
PortBindings[dockerPort] = [
|
328
|
+
{
|
329
|
+
HostIp: bindHost,
|
330
|
+
HostPort: `${publicPort}`,
|
444
331
|
},
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
...addonEnv,
|
452
|
-
}).map(([key, value]) => `${key}=${value}`),
|
453
|
-
],
|
332
|
+
];
|
333
|
+
});
|
334
|
+
await Promise.all(promises);
|
335
|
+
if (spec.local?.env) {
|
336
|
+
Object.entries(spec.local.env).forEach(([key, value]) => {
|
337
|
+
addonEnv[key] = value;
|
454
338
|
});
|
455
|
-
try {
|
456
|
-
if (HealthCheck) {
|
457
|
-
await containerManager_1.containerManager.waitForHealthy(container);
|
458
|
-
}
|
459
|
-
else {
|
460
|
-
await containerManager_1.containerManager.waitForReady(container);
|
461
|
-
}
|
462
|
-
}
|
463
|
-
catch (e) {
|
464
|
-
logs.addLog(e.message, 'ERROR');
|
465
|
-
}
|
466
339
|
}
|
467
|
-
|
340
|
+
if (spec.local?.mounts) {
|
341
|
+
const mounts = containerManager_1.containerManager.createMounts(blockUri.id, spec.local.mounts);
|
342
|
+
Mounts = containerManager_1.containerManager.toDockerMounts(mounts);
|
343
|
+
}
|
344
|
+
if (spec.local?.health) {
|
345
|
+
HealthCheck = containerManager_1.containerManager.toDockerHealth(spec.local?.health);
|
346
|
+
}
|
347
|
+
// For windows we need to default to root
|
348
|
+
const innerHome = process.platform === 'win32' ? '/root/.kapeta' : local_cluster_config_1.default.getKapetaBasedir();
|
349
|
+
logs.addLog(`Creating new container for block: ${containerName}`);
|
350
|
+
const out = await this.ensureContainer({
|
351
|
+
Image: dockerImage,
|
352
|
+
name: containerName,
|
353
|
+
ExposedPorts,
|
354
|
+
HealthCheck,
|
355
|
+
HostConfig: {
|
356
|
+
Binds: [
|
357
|
+
`${(0, containerManager_1.toLocalBindVolume)(kapetaYmlPath)}:/kapeta.yml:ro`,
|
358
|
+
`${(0, containerManager_1.toLocalBindVolume)(local_cluster_config_1.default.getKapetaBasedir())}:${innerHome}`,
|
359
|
+
],
|
360
|
+
PortBindings,
|
361
|
+
Mounts,
|
362
|
+
},
|
363
|
+
Labels: {
|
364
|
+
instance: blockInstance.id,
|
365
|
+
},
|
366
|
+
Env: [
|
367
|
+
`KAPETA_INSTANCE_NAME=${blockInstance.ref}`,
|
368
|
+
`KAPETA_LOCAL_CLUSTER_PORT=${clusterService_1.clusterService.getClusterServicePort()}`,
|
369
|
+
...DOCKER_ENV_VARS,
|
370
|
+
...Object.entries({
|
371
|
+
...env,
|
372
|
+
...addonEnv,
|
373
|
+
}).map(([key, value]) => `${key}=${value}`),
|
374
|
+
],
|
375
|
+
});
|
468
376
|
const portTypes = spec.local.ports ? Object.keys(spec.local.ports) : [];
|
469
377
|
if (portTypes.length > 0) {
|
470
378
|
out.portType = portTypes[0];
|
@@ -41,6 +41,7 @@ interface Health {
|
|
41
41
|
timeout?: number;
|
42
42
|
retries?: number;
|
43
43
|
}
|
44
|
+
export declare const CONTAINER_LABEL_PORT_PREFIX = "kapeta_port-";
|
44
45
|
export declare const HEALTH_CHECK_TIMEOUT: number;
|
45
46
|
declare class ContainerManager {
|
46
47
|
private _docker;
|
@@ -56,7 +57,7 @@ declare class ContainerManager {
|
|
56
57
|
ping(): Promise<void>;
|
57
58
|
docker(): Docker;
|
58
59
|
getContainerByName(containerName: string): Promise<ContainerInfo | undefined>;
|
59
|
-
pull(image: string, cacheForMS?: number): Promise<
|
60
|
+
pull(image: string, cacheForMS?: number): Promise<boolean>;
|
60
61
|
toDockerMounts(mounts: StringMap): DockerMounts[];
|
61
62
|
toDockerHealth(health: Health): {
|
62
63
|
Test: string[];
|
@@ -64,13 +65,8 @@ declare class ContainerManager {
|
|
64
65
|
Timeout: number;
|
65
66
|
Retries: number;
|
66
67
|
};
|
67
|
-
|
68
|
-
|
69
|
-
mounts: {};
|
70
|
-
env: {};
|
71
|
-
cmd: string;
|
72
|
-
health: Health;
|
73
|
-
}): Promise<ContainerInfo>;
|
68
|
+
private applyHash;
|
69
|
+
ensureContainer(opts: any): Promise<Container>;
|
74
70
|
startContainer(opts: any): Promise<Container>;
|
75
71
|
waitForReady(container: Container, attempt?: number): Promise<void>;
|
76
72
|
waitForHealthy(container: Container, attempt?: number): Promise<void>;
|