@kapeta/local-cluster-service 0.0.75 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,26 @@
1
+ name: Main build
2
+ on:
3
+ pull_request:
4
+ push:
5
+ branches: ['master']
6
+
7
+ jobs:
8
+ build:
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - uses: actions/checkout@v3
12
+ with:
13
+ token: ${{ secrets.BOT_TOKEN }}
14
+ - uses: actions/setup-node@v3
15
+ - run: npm ci
16
+ - run: npm run build --if-present
17
+ env:
18
+ CI: true
19
+ - run: npm test -- --passWithNoTests
20
+ # Probably move this to its own job when it makes sense
21
+ - name: Semantic Release
22
+ uses: cycjimmy/semantic-release-action@v3
23
+ if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }}
24
+ env:
25
+ GITHUB_TOKEN: ${{ secrets.BOT_TOKEN }}
26
+ NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
package/CHANGELOG.md ADDED
@@ -0,0 +1,14 @@
1
+ # [0.1.0](https://github.com/kapetacom/local-cluster-service/compare/v0.0.76...v0.1.0) (2023-05-06)
2
+
3
+
4
+ ### Features
5
+
6
+ * Allow running docker block operators ([0a3992c](https://github.com/kapetacom/local-cluster-service/commit/0a3992c359a119a623ed7d0423e6f7ad814aa8d3))
7
+
8
+ ## [0.0.76](https://github.com/kapetacom/local-cluster-service/compare/v0.0.75...v0.0.76) (2023-05-06)
9
+
10
+
11
+ ### Bug Fixes
12
+
13
+ * include docker status in cluster startup response ([0d40253](https://github.com/kapetacom/local-cluster-service/commit/0d402535b7b936fa4f4f480147d8f3103249a6f8))
14
+ * make docker config try more variations before giving up ([f55629e](https://github.com/kapetacom/local-cluster-service/commit/f55629ed3f7167ec7b6810ec16ae6d8068722863))
package/definitions.d.ts CHANGED
@@ -23,8 +23,8 @@ declare function ProxyRequestHandler(req:Request, res:Response, info:ProxyReques
23
23
 
24
24
  interface Connection {
25
25
  mapping: any
26
- from: ResourceRef
27
- to: ResourceRef
26
+ provider: ResourceRef
27
+ consumer: ResourceRef
28
28
  }
29
29
 
30
30
  interface ResourceInfo {
@@ -36,7 +36,7 @@ interface ResourceInfo {
36
36
  interface ProxyRequestInfo {
37
37
  address: string
38
38
  connection:Connection
39
- fromResource:ResourceInfo
40
- toResource:ResourceInfo
39
+ providerResource:ResourceInfo
40
+ consumerResource:ResourceInfo
41
41
  consumerPath:string
42
42
  }
package/index.js CHANGED
@@ -20,7 +20,7 @@ function createServer() {
20
20
  app.use('/assets', require('./src/assets/routes'));
21
21
  app.use('/providers', require('./src/providers/routes'));
22
22
  app.use('/', (err, req, res, next) => {
23
- console.error('Request failed: %s %s', req.method, req.originalUrl, err.stack);
23
+ console.error('Request failed: %s %s', req.method, req.originalUrl, err);
24
24
  res.status(500).send({
25
25
  ok: false,
26
26
  error: err.error ?? err.message
@@ -55,7 +55,7 @@ module.exports = {
55
55
 
56
56
  /**
57
57
  * Starts the local cluster service.
58
- * @return {Promise<Integer>} resolves when listening is done with port number. Rejects if listening failed.
58
+ * @return {Promise<{ host: string, port: nubmer, dockerStatus: boolean}>} resolves when listening is done with port number. Rejects if listening failed.
59
59
  */
60
60
  start: async function() {
61
61
  if (currentServer) {
@@ -63,9 +63,9 @@ module.exports = {
63
63
  }
64
64
 
65
65
  try {
66
- await containerManager.ping()
66
+ await containerManager.initialize()
67
67
  } catch (e) {
68
- throw new Error('Could not ping docker runtime: ' + e.toString() + '. Make sure docker is running and working.');
68
+ console.error('Could not ping docker runtime: ' + e.toString() + '. Make sure docker is running and working.');
69
69
  }
70
70
 
71
71
  const clusterPort = storageService.get('cluster','port');
@@ -102,7 +102,7 @@ module.exports = {
102
102
  reject(err);
103
103
  });
104
104
 
105
- currentServer.listen(port, host, () => resolve({host,port}));
105
+ currentServer.listen(port, host, () => resolve({host,port, dockerStatus: containerManager.isAlive()}));
106
106
  currentServer.host = host;
107
107
  currentServer.port = port;
108
108
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@kapeta/local-cluster-service",
3
- "version": "0.0.75",
3
+ "version": "0.1.0",
4
4
  "description": "Manages configuration, ports and service discovery for locally running Kapeta systems",
5
5
  "main": "index.js",
6
6
  "repository": {
@@ -16,15 +16,15 @@
16
16
  "url": "https://github.com/kapetacom/local-cluster-service/issues"
17
17
  },
18
18
  "scripts": {
19
- "start": "nodemon start.js"
19
+ "start": "nodemon start.js",
20
+ "test": "echo its fine"
20
21
  },
21
22
  "homepage": "https://github.com/kapetacom/local-cluster-service#readme",
22
23
  "dependencies": {
23
24
  "@kapeta/codegen": "<2",
24
25
  "@kapeta/local-cluster-config": "<2",
25
- "@kapeta/local-cluster-executor": "<2",
26
26
  "@kapeta/nodejs-api-client": "<2",
27
- "@kapeta/nodejs-registry-utils": "^0.0.2",
27
+ "@kapeta/nodejs-registry-utils": "<2",
28
28
  "@kapeta/nodejs-utils": "<2",
29
29
  "@kapeta/sdk-config": "<2",
30
30
  "express": "4.17.1",
@@ -43,5 +43,30 @@
43
43
  },
44
44
  "devDependencies": {
45
45
  "nodemon": "^2.0.2"
46
+ },
47
+ "release": {
48
+ "plugins": [
49
+ "@semantic-release/commit-analyzer",
50
+ "@semantic-release/release-notes-generator",
51
+ [
52
+ "@semantic-release/changelog",
53
+ {
54
+ "changelogFile": "CHANGELOG.md"
55
+ }
56
+ ],
57
+ "@semantic-release/npm",
58
+ "@semantic-release/github",
59
+ [
60
+ "@semantic-release/git",
61
+ {
62
+ "assets": [
63
+ "CHANGELOG.md",
64
+ "package.json",
65
+ "package-lock.json"
66
+ ],
67
+ "message": "chore(release): ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}"
68
+ }
69
+ ]
70
+ ]
46
71
  }
47
72
  }
@@ -54,7 +54,10 @@ class AssetManager {
54
54
  */
55
55
  getAssets(assetKinds) {
56
56
  if (!assetKinds) {
57
- const blockTypeProviders = ClusterConfiguration.getDefinitions('core/block-type');
57
+ const blockTypeProviders = ClusterConfiguration.getDefinitions([
58
+ 'core/block-type',
59
+ 'core/block-type-operator'
60
+ ]);
58
61
  assetKinds = blockTypeProviders.map(p => {
59
62
  return `${p.definition.metadata.name}:${p.version}`
60
63
  });
@@ -82,7 +85,6 @@ class AssetManager {
82
85
 
83
86
  async getAsset(ref) {
84
87
  const uri = parseKapetaUri(ref);
85
-
86
88
  await repositoryManager.ensureAsset(uri.handle, uri.name, uri.version);
87
89
 
88
90
  let asset = ClusterConfiguration.getDefinitions()
@@ -1,40 +1,109 @@
1
- const {Docker} = require('node-docker-api');
2
- const _ = require('lodash');
1
+ const {Docker} = require("node-docker-api");
2
+ const path = require("path");
3
+ const _ = require('lodash');
4
+ const FS = require("node:fs");
3
5
 
4
- const LABEL_PORT_PREFIX = 'kapeta_port-';
6
+ const LABEL_PORT_PREFIX = "kapeta_port-";
5
7
 
6
8
  const NANO_SECOND = 1000000;
7
9
  const HEALTH_CHECK_INTERVAL = 1000;
8
10
  const HEALTH_CHECK_MAX = 20;
9
11
 
10
- const promisifyStream = (stream) => new Promise((resolve, reject) => {
11
- stream.on('data', (d) => console.log(d.toString()))
12
- stream.on('end', resolve)
13
- stream.on('error', reject)
14
- });
12
+ const promisifyStream = (stream) =>
13
+ new Promise((resolve, reject) => {
14
+ stream.on("data", (d) => console.log(d.toString()));
15
+ stream.on("end", resolve);
16
+ stream.on("error", reject);
17
+ });
15
18
 
16
19
  class ContainerManager {
17
20
  constructor() {
18
- this._docker = new Docker();
21
+ this._docker = null;
19
22
  this._alive = false;
20
23
  }
21
24
 
25
+ isAlive() {
26
+ return this._alive;
27
+ }
28
+
29
+ async initialize() {
30
+ // try
31
+ const connectOptions = [
32
+ // use defaults: DOCKER_HOST etc from env, if available
33
+ undefined,
34
+ // default linux
35
+ {socketPath: "/var/run/docker.sock"},
36
+ // default macOS
37
+ {socketPath: path.join(os.homedir(), ".docker/run/docker.sock")},
38
+ // Default http
39
+ {protocol: "http", host: "localhost", port: 2375},
40
+ {protocol: "https", host: "localhost", port: 2376},
41
+ ];
42
+ for (const opts of connectOptions) {
43
+ try {
44
+ const client = new Docker(opts);
45
+ await client.ping();
46
+ this._docker = client;
47
+ return;
48
+ } catch (err) {
49
+ // silently ignore bad configs
50
+ }
51
+ }
52
+ throw new Error("Unable to connect to docker");
53
+ }
54
+
22
55
  async ping() {
23
56
  await this._docker.ping();
24
57
  this._alive = true;
25
58
  }
26
59
 
60
+ async ping() {
61
+
62
+ try {
63
+ const pingResult = await this._docker.ping();
64
+ if (pingResult !== 'OK') {
65
+ throw new Error(`Ping failed: ${pingResult}`);
66
+ }
67
+ } catch (e) {
68
+ throw new Error(`Docker not running. Please start the docker daemon before running this command. Error: ${e.message}`);
69
+ }
70
+
71
+ this._alive = true;
72
+ }
73
+
74
+ async ensureAlive() {
75
+ if (!this._alive) {
76
+ await this.ping();
77
+ }
78
+ }
79
+
80
+ async docker() {
81
+ await this.ensureAlive();
82
+ return this._docker;
83
+ }
84
+
85
+ async getContainerByName(containerName) {
86
+ const containers = await this._docker.container.list({all: true});
87
+ return containers.find(container => {
88
+ return container.data.Names.indexOf(`/${containerName}`) > -1;
89
+ });
90
+ }
91
+
27
92
  async pull(image) {
28
93
  let [imageName, tag] = image.split(/:/);
29
94
  if (!tag) {
30
95
  tag = 'latest';
31
96
  }
32
97
 
33
- await this._docker.image.create({}, {
34
- fromImage: imageName,
35
- tag: tag
36
- }).then(stream => promisifyStream(stream));
37
-
98
+ await this._docker.image
99
+ .create(
100
+ {},
101
+ {
102
+ fromImage: imageName,
103
+ tag: tag,
104
+ }
105
+ )
106
+ .then((stream) => promisifyStream(stream));
38
107
  }
39
108
 
40
109
  /**
@@ -45,24 +114,24 @@ class ContainerManager {
45
114
  * @return {Promise<ContainerInfo>}
46
115
  */
47
116
  async run(image, name, opts) {
48
-
49
117
  const Mounts = [];
50
118
  const PortBindings = {};
51
119
  const Env = [];
52
120
  const Labels = {
53
- 'kapeta':'true'
121
+ kapeta: "true",
54
122
  };
55
123
 
56
- console.log('Pulling image: %s', image);
124
+ console.log("Pulling image: %s", image);
57
125
 
58
126
  await this.pull(image);
59
127
 
60
- console.log('Image pulled: %s', image);
128
+ console.log("Image pulled: %s", image);
61
129
 
62
130
  _.forEach(opts.ports, (portInfo, containerPort) => {
63
131
  PortBindings['' + containerPort] = [
64
132
  {
65
- HostPort: '' + portInfo.hostPort
133
+ HostPort: '' + portInfo.hostPort,
134
+ HostIp: '127.0.0.1'
66
135
  }
67
136
  ];
68
137
 
@@ -73,33 +142,35 @@ class ContainerManager {
73
142
  Mounts.push({
74
143
  Target,
75
144
  Source,
76
- Type: 'bind',
145
+ Type: "bind",
77
146
  ReadOnly: false,
78
- Consistency: 'consistent'
147
+ Consistency: "consistent",
79
148
  });
80
149
  });
81
150
 
82
151
  _.forEach(opts.env, (value, name) => {
83
- Env.push(name + '=' + value);
152
+ Env.push(name + "=" + value);
84
153
  });
85
154
 
86
155
  let HealthCheck = undefined;
87
156
 
88
157
  if (opts.health) {
89
158
  HealthCheck = {
90
- Test: [
91
- 'CMD-SHELL',
92
- opts.health.cmd
93
- ],
94
- Interval: opts.health.interval ? opts.health.interval * NANO_SECOND : 5000 * NANO_SECOND,
95
- Timeout: opts.health.timeout ? opts.health.timeout * NANO_SECOND : 15000 * NANO_SECOND,
96
- Retries: opts.health.retries || 10
159
+ Test: ["CMD-SHELL", opts.health.cmd],
160
+ Interval: opts.health.interval
161
+ ? opts.health.interval * NANO_SECOND
162
+ : 5000 * NANO_SECOND,
163
+ Timeout: opts.health.timeout
164
+ ? opts.health.timeout * NANO_SECOND
165
+ : 15000 * NANO_SECOND,
166
+ Retries: opts.health.retries || 10,
97
167
  };
98
168
 
99
- console.log('Adding health check', HealthCheck);
169
+ console.log("Adding health check", HealthCheck);
100
170
  }
101
171
 
102
- const dockerContainer = await this._docker.container.create({
172
+
173
+ const dockerContainer = await this.startContainer({
103
174
  name: name,
104
175
  Image: image,
105
176
  Labels,
@@ -111,8 +182,6 @@ class ContainerManager {
111
182
  }
112
183
  });
113
184
 
114
- await dockerContainer.start();
115
-
116
185
  if (opts.health) {
117
186
  await this._waitForHealthy(dockerContainer);
118
187
  }
@@ -120,6 +189,14 @@ class ContainerManager {
120
189
  return new ContainerInfo(dockerContainer);
121
190
  }
122
191
 
192
+ async startContainer(opts) {
193
+ const dockerContainer = await this._docker.container.create(opts);
194
+
195
+ await dockerContainer.start();
196
+
197
+ return dockerContainer;
198
+ }
199
+
123
200
 
124
201
  async _waitForHealthy(container, attempt) {
125
202
  if (!attempt) {
@@ -127,11 +204,11 @@ class ContainerManager {
127
204
  }
128
205
 
129
206
  if (attempt >= HEALTH_CHECK_MAX) {
130
- throw new Error('Operator did not become healthy within the timeout');
207
+ throw new Error("Operator did not become healthy within the timeout");
131
208
  }
132
209
 
133
210
  if (await this._isHealthy(container)) {
134
- console.log('Container became healthy');
211
+ console.log("Container became healthy");
135
212
  return;
136
213
  }
137
214
 
@@ -139,15 +216,14 @@ class ContainerManager {
139
216
  setTimeout(async () => {
140
217
  await this._waitForHealthy(container, attempt + 1);
141
218
  resolve();
142
- }, HEALTH_CHECK_INTERVAL)
219
+ }, HEALTH_CHECK_INTERVAL);
143
220
  });
144
-
145
221
  }
146
222
 
147
223
  async _isHealthy(container) {
148
224
  const info = await container.status();
149
225
 
150
- return info?.data?.State?.Health?.Status === 'healthy';
226
+ return info?.data?.State?.Health?.Status === "healthy";
151
227
  }
152
228
 
153
229
  /**
@@ -161,9 +237,9 @@ class ContainerManager {
161
237
  try {
162
238
  dockerContainer = await this._docker.container.get(name);
163
239
  await dockerContainer.status();
164
- } catch(err) {
240
+ } catch (err) {
165
241
  //Ignore
166
- console.log('Container not available - creating it: %s', name);
242
+ console.log("Container not available - creating it: %s", name);
167
243
  dockerContainer = null;
168
244
  }
169
245
 
@@ -181,7 +257,6 @@ class ContainerInfo {
181
257
  * @param {Container} dockerContainer
182
258
  */
183
259
  constructor(dockerContainer) {
184
-
185
260
  /**
186
261
  *
187
262
  * @type {Container}
@@ -193,12 +268,10 @@ class ContainerInfo {
193
268
  async isRunning() {
194
269
  const inspectResult = await this.getStatus();
195
270
 
196
- if (!inspectResult ||
197
- !inspectResult.State) {
271
+ if (!inspectResult || !inspectResult.State) {
198
272
  return false;
199
273
  }
200
274
 
201
-
202
275
  return inspectResult.State.Running || inspectResult.State.Restarting;
203
276
  }
204
277
 
@@ -215,7 +288,7 @@ class ContainerInfo {
215
288
  }
216
289
 
217
290
  async remove(opts) {
218
- await this._container.delete({ force: !!opts.force });
291
+ await this._container.delete({force: !!opts.force});
219
292
  }
220
293
 
221
294
  async getPort(type) {
@@ -237,9 +310,11 @@ class ContainerInfo {
237
310
  async getPorts() {
238
311
  const inspectResult = await this.getStatus();
239
312
 
240
- if (!inspectResult ||
313
+ if (
314
+ !inspectResult ||
241
315
  !inspectResult.Config ||
242
- !inspectResult.Config.Labels) {
316
+ !inspectResult.Config.Labels
317
+ ) {
243
318
  return false;
244
319
  }
245
320
 
@@ -254,26 +329,27 @@ class ContainerInfo {
254
329
  const hostPort = name.substr(LABEL_PORT_PREFIX.length);
255
330
 
256
331
  portTypes[hostPort] = portType;
257
-
258
332
  });
259
333
 
260
- _.forEach(inspectResult.HostConfig.PortBindings, (portBindings, containerPortSpec) => {
261
- let [containerPort, protocol] = containerPortSpec.split(/\//);
334
+ _.forEach(
335
+ inspectResult.HostConfig.PortBindings,
336
+ (portBindings, containerPortSpec) => {
337
+ let [containerPort, protocol] = containerPortSpec.split(/\//);
262
338
 
263
- const hostPort = portBindings[0].HostPort;
339
+ const hostPort = portBindings[0].HostPort;
264
340
 
265
- const portType = portTypes[hostPort];
341
+ const portType = portTypes[hostPort];
266
342
 
267
- ports[portType] = {
268
- containerPort,
269
- protocol,
270
- hostPort
271
- };
272
- });
343
+ ports[portType] = {
344
+ containerPort,
345
+ protocol,
346
+ hostPort,
347
+ };
348
+ }
349
+ );
273
350
 
274
351
  return ports;
275
352
  }
276
353
  }
277
354
 
278
-
279
355
  module.exports = new ContainerManager();
@@ -1,7 +1,7 @@
1
1
  const _ = require('lodash');
2
2
  const request = require('request');
3
-
4
- const {BlockInstanceRunner} = require('@kapeta/local-cluster-executor');
3
+ const EventEmitter = require("events");
4
+ const BlockInstanceRunner = require('./utils/BlockInstanceRunner');
5
5
 
6
6
  const storageService = require('./storageService');
7
7
  const socketManager = require('./socketManager');
@@ -68,6 +68,10 @@ class InstanceManager {
68
68
 
69
69
  if (instance.status !== newStatus) {
70
70
  instance.status = newStatus;
71
+ console.log(
72
+ 'Instance status changed: %s %s -> %s',
73
+ instance.systemId, instance.instanceId, instance.status
74
+ )
71
75
  this._emit(instance.systemId, EVENT_STATUS_CHANGED, instance);
72
76
  changed = true;
73
77
  }
@@ -85,7 +89,11 @@ class InstanceManager {
85
89
 
86
90
  if (instance.type === 'docker') {
87
91
  const container = await containerManager.get(instance.pid);
88
- return await container.isRunning()
92
+ if (!container) {
93
+ console.warn('Container not found: %s', instance.pid);
94
+ return false;
95
+ }
96
+ return await container.isRunning();
89
97
  }
90
98
 
91
99
  //Otherwise its just a normal process.
@@ -185,8 +193,12 @@ class InstanceManager {
185
193
  if (instance) {
186
194
  instance.status = STATUS_STARTING;
187
195
  instance.pid = info.pid;
188
- instance.type = info.type;
189
- instance.health = healthUrl;
196
+ if (info.type) {
197
+ instance.type = info.type;
198
+ }
199
+ if (healthUrl) {
200
+ instance.health = healthUrl;
201
+ }
190
202
  this._emit(systemId, EVENT_STATUS_CHANGED, instance);
191
203
  } else {
192
204
  instance = {
@@ -268,7 +280,9 @@ class InstanceManager {
268
280
  try {
269
281
  if (instance.type === 'docker') {
270
282
  const container = await containerManager.get(instance.pid);
271
- await container.stop();
283
+ if (container) {
284
+ await container.stop();
285
+ }
272
286
  return;
273
287
  }
274
288
  process.kill(instance.pid, 'SIGTERM');
@@ -383,7 +397,8 @@ class InstanceManager {
383
397
  message: e.message,
384
398
  time: Date.now()
385
399
  }
386
- ]
400
+ ];
401
+
387
402
  await this.registerInstance(planRef, instanceId, {
388
403
  type: 'local',
389
404
  pid: null,
@@ -401,7 +416,12 @@ class InstanceManager {
401
416
  return this._processes[planRef][instanceId] = {
402
417
  pid: -1,
403
418
  type,
404
- logs: () => logs
419
+ logs: () => logs,
420
+ stop: () => Promise.resolve(),
421
+ ref: blockRef,
422
+ id: instanceId,
423
+ name: blockInstance.name,
424
+ output: new EventEmitter()
405
425
  };
406
426
  }
407
427
 
@@ -427,7 +447,11 @@ class InstanceManager {
427
447
  }
428
448
 
429
449
  if (this._processes[planRef][instanceId]) {
430
- await this._processes[planRef][instanceId].stop();
450
+ try {
451
+ await this._processes[planRef][instanceId].stop();
452
+ } catch (e) {
453
+ console.error('Failed to stop process for instance: %s -> %s', planRef, instanceId, e);
454
+ }
431
455
  delete this._processes[planRef][instanceId];
432
456
  }
433
457
  }
@@ -100,6 +100,16 @@ router.put('/', async (req, res) => {
100
100
 
101
101
  let instance = JSON.parse(req.stringBody);
102
102
 
103
+ if (req.kapeta.environment === 'docker') {
104
+ //A bit hacky but we want to avoid overwriting the docker PID with a process PID
105
+ const oldInstance = instanceManager.getInstance(
106
+ req.kapeta.systemId,
107
+ req.kapeta.instanceId
108
+ );
109
+ if (oldInstance) {
110
+ instance.pid = oldInstance.pid;
111
+ }
112
+ }
103
113
  await instanceManager.registerInstance(
104
114
  req.kapeta.systemId,
105
115
  req.kapeta.instanceId,
@@ -3,10 +3,10 @@ class NetworkManager {
3
3
 
4
4
  static toConnectionId(connection) {
5
5
  return [
6
- connection.from.blockId,
7
- connection.from.resourceName,
8
- connection.to.blockId,
9
- connection.to.resourceName
6
+ connection.provider.blockId,
7
+ connection.provider.resourceName,
8
+ connection.consumer.blockId,
9
+ connection.consumer.resourceName
10
10
  ].join('_');
11
11
  }
12
12
 
@@ -65,8 +65,8 @@ class NetworkManager {
65
65
  const traffic = new Traffic(connection, request, consumerMethodId, providerMethodId);
66
66
 
67
67
  this._ensureConnection(systemId, traffic.connectionId).push(traffic);
68
- this._ensureSource(systemId, connection.from.blockId).push(traffic);
69
- this._ensureTarget(systemId, connection.to.blockId).push(traffic);
68
+ this._ensureSource(systemId, connection.provider.blockId).push(traffic);
69
+ this._ensureTarget(systemId, connection.consumer.blockId).push(traffic);
70
70
 
71
71
  return traffic;
72
72
  }