teraslice 0.87.1 → 0.89.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/cluster-service.js +24 -18
  2. package/dist/src/index.js +42 -0
  3. package/package.json +10 -14
  4. package/service.js +4 -6
  5. package/worker-service.js +6 -6
  6. package/index.js +0 -21
  7. package/lib/cluster/cluster_master.js +0 -164
  8. package/lib/cluster/node_master.js +0 -393
  9. package/lib/cluster/services/api.js +0 -581
  10. package/lib/cluster/services/assets.js +0 -211
  11. package/lib/cluster/services/cluster/backends/kubernetes/deployments/worker.hbs +0 -86
  12. package/lib/cluster/services/cluster/backends/kubernetes/index.js +0 -225
  13. package/lib/cluster/services/cluster/backends/kubernetes/jobs/execution_controller.hbs +0 -69
  14. package/lib/cluster/services/cluster/backends/kubernetes/k8s.js +0 -450
  15. package/lib/cluster/services/cluster/backends/kubernetes/k8sResource.js +0 -443
  16. package/lib/cluster/services/cluster/backends/kubernetes/k8sState.js +0 -67
  17. package/lib/cluster/services/cluster/backends/kubernetes/utils.js +0 -58
  18. package/lib/cluster/services/cluster/backends/native/index.js +0 -611
  19. package/lib/cluster/services/cluster/backends/native/messaging.js +0 -563
  20. package/lib/cluster/services/cluster/backends/state-utils.js +0 -49
  21. package/lib/cluster/services/cluster/index.js +0 -15
  22. package/lib/cluster/services/execution.js +0 -459
  23. package/lib/cluster/services/jobs.js +0 -303
  24. package/lib/config/default-sysconfig.js +0 -47
  25. package/lib/config/index.js +0 -32
  26. package/lib/config/schemas/system.js +0 -333
  27. package/lib/processors/save_file/index.js +0 -9
  28. package/lib/processors/save_file/processor.js +0 -17
  29. package/lib/processors/save_file/schema.js +0 -17
  30. package/lib/processors/script.js +0 -130
  31. package/lib/processors/stdout/index.js +0 -9
  32. package/lib/processors/stdout/processor.js +0 -19
  33. package/lib/processors/stdout/schema.js +0 -18
  34. package/lib/storage/analytics.js +0 -106
  35. package/lib/storage/assets.js +0 -275
  36. package/lib/storage/backends/elasticsearch_store.js +0 -567
  37. package/lib/storage/backends/mappings/analytics.json +0 -49
  38. package/lib/storage/backends/mappings/asset.json +0 -40
  39. package/lib/storage/backends/mappings/ex.json +0 -55
  40. package/lib/storage/backends/mappings/job.json +0 -31
  41. package/lib/storage/backends/mappings/state.json +0 -37
  42. package/lib/storage/execution.js +0 -331
  43. package/lib/storage/index.js +0 -16
  44. package/lib/storage/jobs.js +0 -97
  45. package/lib/storage/state.js +0 -302
  46. package/lib/utils/api_utils.js +0 -173
  47. package/lib/utils/asset_utils.js +0 -117
  48. package/lib/utils/date_utils.js +0 -58
  49. package/lib/utils/encoding_utils.js +0 -29
  50. package/lib/utils/events.js +0 -7
  51. package/lib/utils/file_utils.js +0 -118
  52. package/lib/utils/id_utils.js +0 -19
  53. package/lib/utils/port_utils.js +0 -83
  54. package/lib/workers/assets/loader.js +0 -109
  55. package/lib/workers/assets/spawn.js +0 -78
  56. package/lib/workers/context/execution-context.js +0 -16
  57. package/lib/workers/context/terafoundation-context.js +0 -10
  58. package/lib/workers/execution-controller/execution-analytics.js +0 -211
  59. package/lib/workers/execution-controller/index.js +0 -1033
  60. package/lib/workers/execution-controller/recovery.js +0 -188
  61. package/lib/workers/execution-controller/scheduler.js +0 -461
  62. package/lib/workers/execution-controller/slice-analytics.js +0 -115
  63. package/lib/workers/helpers/job.js +0 -93
  64. package/lib/workers/helpers/op-analytics.js +0 -22
  65. package/lib/workers/helpers/terafoundation.js +0 -43
  66. package/lib/workers/helpers/worker-shutdown.js +0 -187
  67. package/lib/workers/metrics/index.js +0 -139
  68. package/lib/workers/worker/index.js +0 -344
  69. package/lib/workers/worker/slice.js +0 -143
@@ -1,333 +0,0 @@
1
- 'use strict';
2
-
3
- const ip = require('ip');
4
- const path = require('path');
5
- const {
6
- isPlainObject, isString, isArray, isInteger
7
- } = require('@terascope/utils');
8
-
9
- const workerCount = require('os').cpus().length;
10
-
11
- /**
12
- * This schema object is for the Teraslice configuration settings coming from
13
- * its configuration file.
14
- */
15
- const schema = {
16
- api_response_timeout: {
17
- doc: 'HTTP response timeout for the Teraslice API server',
18
- default: '5 minutes',
19
- format: 'duration'
20
- },
21
- assets_directory: {
22
- doc: 'directory to look for assets',
23
- default: path.join(process.cwd(), './assets'),
24
- format: (val) => {
25
- if (val) {
26
- if (isArray(val)) {
27
- const containStrings = val.every(isString);
28
- if (!containStrings) throw new Error('Invalid parameter assets_directory, if specified as an array, it must contain an array of strings');
29
- return;
30
- }
31
- if (!isString(val)) throw new Error('Invalid parameter assets_directory, it must either be a string or an array of strings');
32
- }
33
- }
34
- },
35
- assets_volume: {
36
- doc: 'name of shared asset volume (k8s)',
37
- default: '',
38
- format: 'optional_String'
39
- },
40
- autoload_directory: {
41
- doc: 'directory to look for assets to auto deploy when teraslice boots up',
42
- default: path.join(process.cwd(), './autoload'),
43
- format: 'optional_String'
44
- },
45
- hostname: {
46
- doc: 'IP or hostname for server',
47
- default: ip.address(),
48
- format: 'required_String'
49
- },
50
- workers: {
51
- doc: 'Number of workers per server',
52
- default: workerCount,
53
- format: 'nat'
54
- },
55
- master: {
56
- doc: 'boolean for determining if cluster_master should live on this node',
57
- default: false,
58
- format: Boolean
59
- },
60
- master_hostname: {
61
- doc:
62
- 'hostname where the cluster_master resides, used to notify all node_masters where to connect',
63
- default: 'localhost',
64
- format: 'required_String'
65
- },
66
- port: {
67
- doc: 'port for the cluster_master to listen on',
68
- default: 5678,
69
- format: 'port'
70
- },
71
- name: {
72
- doc: 'Name for the cluster itself, its used for naming log files/indices',
73
- default: 'teracluster',
74
- format: 'elasticsearch_Name'
75
- },
76
- state: {
77
- doc: 'Elasticsearch cluster where job state, analytics and logs are stored',
78
- default: { connection: 'default' },
79
- format(val) {
80
- if (!val.connection) {
81
- throw new Error('state parameter must be an object with a key named "connection"');
82
- }
83
- if (typeof val.connection !== 'string') {
84
- throw new Error(
85
- 'state parameter object with a key "connection" must be of type String as the value'
86
- );
87
- }
88
- }
89
- },
90
- index_settings: {
91
- analytics: {
92
- number_of_shards: {
93
- doc: 'The number of shards for the analytics index',
94
- default: 5
95
- },
96
- number_of_replicas: {
97
- doc: 'The number of replicas for the analytics index',
98
- default: 1
99
- }
100
- },
101
- assets: {
102
- number_of_shards: {
103
- doc: 'The number of shards for the assets index',
104
- default: 5
105
- },
106
- number_of_replicas: {
107
- doc: 'The number of replicas for the assets index',
108
- default: 1
109
- }
110
- },
111
- jobs: {
112
- number_of_shards: {
113
- doc: 'The number of shards for the jobs index',
114
- default: 5
115
- },
116
- number_of_replicas: {
117
- doc: 'The number of replicas for the jobs index',
118
- default: 1
119
- }
120
- },
121
- execution: {
122
- number_of_shards: {
123
- doc: 'The number of shards for the execution index',
124
- default: 5
125
- },
126
- number_of_replicas: {
127
- doc: 'The number of replicas for the execution index',
128
- default: 1
129
- }
130
- },
131
- state: {
132
- number_of_shards: {
133
- doc: 'The number of shards for the state index',
134
- default: 5
135
- },
136
- number_of_replicas: {
137
- doc: 'The number of replicas for the state index',
138
- default: 1
139
- }
140
- }
141
- },
142
- shutdown_timeout: {
143
- doc:
144
- 'time in milliseconds for workers and slicers to finish operations before forcefully shutting down',
145
- default: '1 minute',
146
- format: 'duration'
147
- },
148
- node_disconnect_timeout: {
149
- doc:
150
- 'time in milliseconds that the cluster will wait untill it drops that node from state and attempts to provision the lost workers',
151
- default: '5 minutes',
152
- format: 'duration'
153
- },
154
- worker_disconnect_timeout: {
155
- doc:
156
- 'time in milliseconds that the slicer will wait after all workers have disconnected before terminating the job',
157
- default: '5 minutes',
158
- format: 'duration'
159
- },
160
- slicer_timeout: {
161
- doc:
162
- 'time in milliseconds that the slicer will wait for worker connection before terminating the job',
163
- default: '3 minutes',
164
- format: 'duration'
165
- },
166
- action_timeout: {
167
- doc:
168
- 'time in milliseconds for waiting for a network message (pause/stop job, etc) to complete before throwing an error',
169
- default: '2 minutes',
170
- format: 'duration'
171
- },
172
- network_latency_buffer: {
173
- doc:
174
- 'time in milliseconds buffer which is combined with action_timeout to determine how long a network message will wait till it throws an error',
175
- default: '15 seconds',
176
- format: 'duration'
177
- },
178
- node_state_interval: {
179
- doc:
180
- 'time in milliseconds that indicates when the cluster master will ping nodes for their state',
181
- default: '5 seconds',
182
- format: 'duration'
183
- },
184
- analytics_rate: {
185
- doc: 'time in milliseconds in which to push analytics to cluster master',
186
- default: '1 minute',
187
- format: 'duration'
188
- },
189
- slicer_allocation_attempts: {
190
- doc: 'The number of times a slicer will try to be allocated before failing',
191
- default: 3,
192
- format: 'nat', // integer >=0 (natural number)
193
- },
194
- slicer_port_range: {
195
- doc: 'range of ports that slicers will use per node',
196
- default: '45679:46678',
197
- format(val) {
198
- const arr = val.split(':');
199
- if (arr.length !== 2) {
200
- throw new Error('slicer_port_range is formatted incorrectly');
201
- }
202
- arr.forEach((value) => {
203
- if (isInteger(value) !== false) {
204
- throw new Error(
205
- 'values specified in slicer_port_range must be a number specified as a string'
206
- );
207
- }
208
- });
209
- }
210
- },
211
- index_rollover_frequency: {
212
- state: {
213
- doc: 'How frequently the teraslice state indices are created',
214
- default: 'monthly',
215
- format: ['daily', 'monthly', 'yearly']
216
- },
217
- analytics: {
218
- doc: 'How frequently the analytics indices are created',
219
- default: 'monthly',
220
- format: ['daily', 'monthly', 'yearly']
221
- }
222
- },
223
- cluster_manager_type: {
224
- doc: 'determines which cluster system should be used',
225
- default: 'native',
226
- format: ['native', 'kubernetes']
227
- },
228
- cpu: {
229
- doc: 'number of cpus to reserve per teraslice worker in kubernetes',
230
- default: undefined,
231
- format: 'Number'
232
- },
233
- cpu_execution_controller: {
234
- doc: 'number of cpus to reserve per teraslice execution controller in kubernetes',
235
- default: 0.5,
236
- format: 'Number'
237
- },
238
- ephemeral_storage: {
239
- doc: 'Add ephemeral storage volume to worker and execution controller pods',
240
- default: false,
241
- format: Boolean
242
- },
243
- memory: {
244
- doc: 'memory, in bytes, to reserve per teraslice worker in kubernetes',
245
- default: undefined,
246
- format: 'Number'
247
- },
248
- memory_execution_controller: {
249
- doc: 'memory, in bytes, to reserve per teraslice execution controller in kubernetes',
250
- default: 512000000,
251
- format: 'Number'
252
- },
253
- env_vars: {
254
- default: {},
255
- doc: 'default environment variables to set on each the teraslice worker, in the format, { "EXAMPLE": "test" }',
256
- format(obj) {
257
- if (!isPlainObject(obj)) {
258
- throw new Error('must be object');
259
- }
260
- Object.entries(obj).forEach(([key, val]) => {
261
- if (key == null || key === '') {
262
- throw new Error('key must be not empty');
263
- }
264
-
265
- if (val == null || val === '') {
266
- throw new Error(`value for key "${key}" must be not empty`);
267
- }
268
- });
269
- },
270
- },
271
- execution_controller_targets: {
272
- default: null,
273
- doc: 'Specify an array of {"key": ..., "value": ...} targets for execution controllers',
274
- format(arr) {
275
- if (arr != null) {
276
- if (!Array.isArray(arr)) {
277
- throw new Error('labels is required to be an array');
278
- // FIXME: improve input and error handling
279
- }
280
- }
281
- }
282
- },
283
- kubernetes_api_poll_delay: {
284
- doc: 'Specify the delay between attempts to poll the kubernetes API',
285
- default: '1 second',
286
- format: 'duration'
287
- },
288
- kubernetes_image: {
289
- doc: 'Specify a custom image name for kubernetes, this only applies to kubernetes systems',
290
- default: 'terascope/teraslice',
291
- format: 'optional_String'
292
- },
293
- kubernetes_namespace: {
294
- doc: 'Specify a custom kubernetes namespace, this only applies to kubernetes systems',
295
- default: 'default',
296
- format: 'optional_String'
297
- },
298
- kubernetes_overrides_enabled: {
299
- doc: '',
300
- default: false,
301
- format: Boolean
302
- },
303
- kubernetes_priority_class_name: {
304
- doc: 'Priority class that the Teraslice master, execution controller, and stateful workers should run with',
305
- default: undefined,
306
- format: 'optional_String'
307
- },
308
- kubernetes_config_map_name: {
309
- doc: 'Specify the name of the Kubernetes ConfigMap used to configure worker pods',
310
- default: 'teraslice-worker',
311
- format: 'optional_String'
312
- },
313
- kubernetes_image_pull_secret: {
314
- doc: 'Name of Kubernetes secret used to pull docker images from private repository',
315
- default: '',
316
- format: 'optional_String'
317
- },
318
- kubernetes_worker_antiaffinity: {
319
- doc: 'Enable Teraslice woker pod AntiAffinity in Kubernetes',
320
- default: false,
321
- format: Boolean
322
- }
323
- };
324
-
325
- function configSchema() {
326
- return { teraslice: schema };
327
- }
328
-
329
- module.exports = {
330
- configSchema,
331
- config_schema: configSchema,
332
- schema
333
- };
@@ -1,9 +0,0 @@
1
- 'use strict';
2
-
3
- const { legacyProcessorShim } = require('@terascope/job-components');
4
- const Processor = require('./processor');
5
- const Schema = require('./schema');
6
-
7
- // This file for backwards compatibility and functionality will be limited
8
- // but it should allow you to write processors using the new way today
9
- module.exports = legacyProcessorShim(Processor, Schema);
@@ -1,17 +0,0 @@
1
- 'use strict';
2
-
3
- const fs = require('fs');
4
- const { EachProcessor } = require('@terascope/job-components');
5
-
6
- class SaveFile extends EachProcessor {
7
- constructor(...args) {
8
- super(...args);
9
- this.filePath = this.opConfig.file_path;
10
- }
11
-
12
- async forEach(record) {
13
- fs.appendFileSync(this.filePath, `${JSON.stringify(record)}\n`);
14
- }
15
- }
16
-
17
- module.exports = SaveFile;
@@ -1,17 +0,0 @@
1
- 'use strict';
2
-
3
- const { ConvictSchema } = require('@terascope/job-components');
4
-
5
- class Schema extends ConvictSchema {
6
- build() {
7
- return {
8
- file_path: {
9
- doc: 'Specify a number > 0 to limit the number of results printed to the console log.'
10
- + 'This prints results from the beginning of the result set.',
11
- default: __dirname
12
- }
13
- };
14
- }
15
- }
16
-
17
- module.exports = Schema;
@@ -1,130 +0,0 @@
1
- 'use strict';
2
-
3
- const { spawn } = require('child_process');
4
- const path = require('path');
5
- const { TSError } = require('@terascope/utils');
6
-
7
- function newProcessor(context, opConfig) {
8
- const { args, options } = opConfig;
9
-
10
- let command = '';
11
-
12
- return new Promise(((resolve, reject) => {
13
- if (opConfig.asset === undefined || opConfig.asset === '' || opConfig.asset === 'echo') {
14
- // this would be used when a path is defined to the asset in the job
15
- ({ command } = opConfig);
16
- resolve(procData);
17
- } else {
18
- context.apis.assets.getPath(opConfig.asset)
19
- .then((apath) => {
20
- command = path.join(apath, opConfig.command);
21
- resolve(procData);
22
- })
23
- .catch((err) => {
24
- reject(new TSError(err, {
25
- reason: 'asset not in specified path'
26
- }));
27
- });
28
- }
29
- }));
30
-
31
- function procData(data) {
32
- return new Promise(((resolve, reject) => {
33
- let inData = '';
34
- try {
35
- inData = JSON.stringify(data);
36
- } catch (err) {
37
- reject(new TSError(err, {
38
- reason: 'failed to convert input data to string',
39
- }));
40
- return;
41
- }
42
-
43
- let outErrors = '';
44
- let outData = '';
45
- let childProcess;
46
-
47
- try {
48
- childProcess = spawn(command, args, options);
49
- } catch (err) {
50
- reject(new TSError(err, {
51
- reason: 'when trying to run command'
52
- }));
53
- return;
54
- }
55
-
56
- childProcess.stdin.setEncoding('utf-8');
57
- childProcess.stdin.write(`${inData}\n`);
58
- childProcess.stdin.end();
59
-
60
- childProcess.on('error', (err) => {
61
- reject(err);
62
- });
63
-
64
- childProcess.stdout.on('data', (outDataItem) => {
65
- outData += outDataItem;
66
- });
67
-
68
- childProcess.stdout.on('end', () => {
69
- if (outErrors) {
70
- reject(outErrors);
71
- } else {
72
- try {
73
- const final = JSON.parse(outData);
74
- resolve(final);
75
- } catch (err) {
76
- reject(new TSError(err, {
77
- reason: 'processing script stdout pipe'
78
- }));
79
- }
80
- }
81
- });
82
-
83
- childProcess.stderr.on('data', (outError) => {
84
- outErrors += outError;
85
- });
86
-
87
- childProcess.on('close', (code) => {
88
- if (code === 0) return;
89
- reject(new Error('child process non-zero exit'));
90
- });
91
-
92
- childProcess.on('error', (err) => {
93
- reject(err);
94
- });
95
- }));
96
- }
97
- }
98
-
99
- function schema() {
100
- return {
101
- command: {
102
- doc: 'what command to run',
103
- default: 'echo',
104
- format: 'required_String'
105
- },
106
- args: {
107
- doc: 'arguments to pass along with the command',
108
- default: [],
109
- format(val) {
110
- if (!Array.isArray(val)) {
111
- throw new Error('args for script must be an array');
112
- }
113
- }
114
- },
115
- options: {
116
- doc: 'Obj containing options to pass into the process env',
117
- default: {}
118
- },
119
- asset: {
120
- doc: 'name of asset to use for op',
121
- default: 'echo',
122
- format: 'optional_String'
123
- }
124
- };
125
- }
126
-
127
- module.exports = {
128
- newProcessor,
129
- schema
130
- };
@@ -1,9 +0,0 @@
1
- 'use strict';
2
-
3
- const { legacyProcessorShim } = require('@terascope/job-components');
4
- const Processor = require('./processor');
5
- const Schema = require('./schema');
6
-
7
- // This file for backwards compatibility and functionality will be limited
8
- // but it should allow you to write processors using the new way today
9
- module.exports = legacyProcessorShim(Processor, Schema);
@@ -1,19 +0,0 @@
1
- 'use strict';
2
-
3
- /* eslint-disable no-console */
4
-
5
- const _ = require('lodash');
6
- const { BatchProcessor } = require('@terascope/job-components');
7
-
8
- class Stdout extends BatchProcessor {
9
- async onBatch(data) {
10
- if (this.opConfig.limit === 0) {
11
- console.log(data);
12
- } else {
13
- console.log(_.take(data, this.opConfig.limit));
14
- }
15
- return data;
16
- }
17
- }
18
-
19
- module.exports = Stdout;
@@ -1,18 +0,0 @@
1
- 'use strict';
2
-
3
- const { ConvictSchema } = require('@terascope/job-components');
4
-
5
- class Schema extends ConvictSchema {
6
- build() {
7
- return {
8
- limit: {
9
- doc: 'Specify a number > 0 to limit the number of results printed to the console log.'
10
- + 'This prints results from the beginning of the result set.',
11
- default: 0,
12
- format: 'nat'
13
- }
14
- };
15
- }
16
- }
17
-
18
- module.exports = Schema;
@@ -1,106 +0,0 @@
1
- 'use strict';
2
-
3
- const { makeLogger } = require('../workers/helpers/terafoundation');
4
- const { timeseriesIndex } = require('../utils/date_utils');
5
- const elasticsearchBackend = require('./backends/elasticsearch_store');
6
-
7
- // Module to manager job states in Elasticsearch.
8
- // All functions in this module return promises that must be resolved to
9
- // get the final result.
10
- module.exports = async function analyticsService(context) {
11
- const logger = makeLogger(context, 'analytics_storage');
12
- const config = context.sysconfig.teraslice;
13
- const workerId = `${context.sysconfig.teraslice.hostname}__${context.cluster.worker.id}`;
14
- const _index = `${config.name}__analytics`;
15
- // making this to pass down to backend for dynamic index searches
16
- const indexName = `${_index}*`;
17
- const timeseriesFormat = config.index_rollover_frequency.analytics;
18
-
19
- const backendConfig = {
20
- context,
21
- indexName,
22
- recordType: 'analytics',
23
- idField: '_id',
24
- fullResponse: false,
25
- logRecord: false,
26
- forceRefresh: false,
27
- storageName: 'analytics',
28
- };
29
-
30
- const backend = await elasticsearchBackend(backendConfig);
31
-
32
- async function log(job, sliceInfo, stats, state = 'completed') {
33
- const indexData = timeseriesIndex(timeseriesFormat, _index);
34
- const esIndex = indexData.index;
35
- const { timestamp } = indexData;
36
-
37
- // These records are uniquely identified by ex_id + slice_id
38
- const results = job.config.operations.map((op, index) => backend.bulk(
39
- {
40
- '@timestamp': timestamp,
41
- ex_id: job.config.ex_id,
42
- job_id: job.config.job_id,
43
- worker_id: workerId,
44
- slice_id: sliceInfo.slice_id,
45
- slicer_id: sliceInfo.slicer_id,
46
- op: op._op,
47
- state,
48
- order: index,
49
- count: stats.size[index],
50
- time: stats.time[index],
51
- memory: stats.memory[index],
52
- },
53
- 'index',
54
- esIndex
55
- ));
56
-
57
- return Promise.all(results);
58
- }
59
-
60
- async function getRecord(recordId, index) {
61
- return backend.get(recordId, index);
62
- }
63
-
64
- async function search(query, from, size, sort, fields) {
65
- return backend.search(query, from, size, sort, fields);
66
- }
67
-
68
- async function update(recordId, updateSpec, index) {
69
- return backend.update(recordId, updateSpec, index);
70
- }
71
-
72
- async function remove(recordId, index) {
73
- return backend.remove(recordId, index);
74
- }
75
-
76
- async function shutdown(forceShutdown) {
77
- logger.info('shutting down.');
78
- return backend.shutdown(forceShutdown);
79
- }
80
-
81
- async function refresh() {
82
- const { index } = timeseriesIndex(timeseriesFormat, _index);
83
- return backend.refresh(index);
84
- }
85
-
86
- function verifyClient() {
87
- return backend.verifyClient();
88
- }
89
-
90
- async function waitForClient() {
91
- return backend.waitForClient();
92
- }
93
-
94
- logger.info('analytics storage initialized');
95
- return {
96
- log,
97
- get: getRecord,
98
- search,
99
- update,
100
- remove,
101
- shutdown,
102
- refresh,
103
- waitForClient,
104
- verifyClient,
105
- };
106
- };