@certik/skynet 0.10.6 → 0.10.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.editorconfig +5 -5
- package/.eslintrc.js +13 -13
- package/.prettierrc.js +3 -3
- package/CHANGELOG.md +372 -372
- package/README.md +23 -23
- package/abi.js +353 -353
- package/ably.js +29 -0
- package/address.js +18 -18
- package/api.js +105 -105
- package/app.js +709 -709
- package/availability.js +58 -58
- package/block.js +83 -83
- package/cli.js +53 -53
- package/const.js +92 -92
- package/deploy.js +676 -676
- package/dynamodb.js +444 -444
- package/env.js +90 -90
- package/examples/api +73 -73
- package/examples/consumer +47 -47
- package/examples/indexer +65 -65
- package/examples/mode-indexer +82 -82
- package/examples/producer +80 -80
- package/indexer.js +595 -595
- package/inquiry.js +14 -14
- package/kafka.js +443 -443
- package/labelling.js +90 -90
- package/log.js +29 -29
- package/metric.js +65 -65
- package/monitor.js +191 -191
- package/opsgenie.js +55 -55
- package/package.json +37 -35
- package/price.js +48 -48
- package/primitive.js +77 -77
- package/proxy.js +157 -157
- package/rateLimit.js +21 -21
- package/s3.js +122 -122
- package/scan.js +74 -74
- package/selector.js +53 -53
- package/slack.js +87 -87
- package/snowflake.js +36 -36
- package/sqs.js +12 -12
- package/token.js +46 -46
- package/transaction.js +41 -41
- package/util.js +67 -67
- package/web3.js +117 -117
package/deploy.js
CHANGED
|
@@ -1,676 +1,676 @@
|
|
|
1
|
-
const fs = require("fs/promises");
|
|
2
|
-
const fso = require("fs");
|
|
3
|
-
const execa = require("execa");
|
|
4
|
-
const meow = require("meow");
|
|
5
|
-
const chalk = require("chalk");
|
|
6
|
-
const which = require("which");
|
|
7
|
-
const { getSelectorFlags, getSelectorDesc } = require("./selector");
|
|
8
|
-
const { getEnvOrThrow } = require("./env");
|
|
9
|
-
const { getBinaryName, detectSkynetDirectory } = require("./cli");
|
|
10
|
-
|
|
11
|
-
const INTERVAL_ALIASES = {
|
|
12
|
-
secondly: "*/1 * * * * * *",
|
|
13
|
-
"@secondly": "*/1 * * * * * *",
|
|
14
|
-
minutely: "0 * * * * * *",
|
|
15
|
-
"@minutely": "0 * * * * * *",
|
|
16
|
-
hourly: "0 0 * * * * *",
|
|
17
|
-
"@hourly": "0 0 * * * * *",
|
|
18
|
-
daily: "0 0 0 * * * *",
|
|
19
|
-
"@daily": "0 0 0 * * * *",
|
|
20
|
-
weekly: "0 0 0 * * 0 *",
|
|
21
|
-
"@weekly": "0 0 0 * * 0 *",
|
|
22
|
-
};
|
|
23
|
-
|
|
24
|
-
function buildEnvTemplate(additionalEnv, isProduction) {
|
|
25
|
-
return Object.keys(additionalEnv)
|
|
26
|
-
.map((key) => {
|
|
27
|
-
return `${key}="${additionalEnv[key] || getEnvironmentVariableValue(key, isProduction)}"`;
|
|
28
|
-
})
|
|
29
|
-
.join("\n");
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
function getEnvironmentVariableValue(name, isProduction) {
|
|
33
|
-
if (isProduction) {
|
|
34
|
-
return `{{key "secrets/${name}"}}`;
|
|
35
|
-
} else {
|
|
36
|
-
if (!process.env[name]) {
|
|
37
|
-
return "";
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
return process.env[name];
|
|
41
|
-
}
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
const genConfig = ({
|
|
45
|
-
jobName,
|
|
46
|
-
workingDirectory,
|
|
47
|
-
cmd,
|
|
48
|
-
cron,
|
|
49
|
-
restart,
|
|
50
|
-
cpu,
|
|
51
|
-
mem,
|
|
52
|
-
service,
|
|
53
|
-
additionalEnv = [],
|
|
54
|
-
region = "us-east-1",
|
|
55
|
-
isProduction,
|
|
56
|
-
}) => `job "${jobName}" {
|
|
57
|
-
datacenters = ["${region}"]
|
|
58
|
-
|
|
59
|
-
type = "batch"
|
|
60
|
-
|
|
61
|
-
${
|
|
62
|
-
cron
|
|
63
|
-
? `# Triggers periodically
|
|
64
|
-
periodic {
|
|
65
|
-
cron = "${cron}"
|
|
66
|
-
prohibit_overlap = true
|
|
67
|
-
}`
|
|
68
|
-
: ""
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
constraint {
|
|
72
|
-
attribute = "\${meta.has_nodejs}"
|
|
73
|
-
value = "true"
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
constraint {
|
|
77
|
-
attribute = "\${meta.has_skynet}"
|
|
78
|
-
value = "true"
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
group "default" {
|
|
82
|
-
reschedule {
|
|
83
|
-
attempts = 0
|
|
84
|
-
unlimited = false
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
${
|
|
88
|
-
service
|
|
89
|
-
? `# Setup Service Network
|
|
90
|
-
network {
|
|
91
|
-
port "http" {
|
|
92
|
-
static = ${service.port}
|
|
93
|
-
}
|
|
94
|
-
}`
|
|
95
|
-
: ""
|
|
96
|
-
}
|
|
97
|
-
|
|
98
|
-
task "log-shipper" {
|
|
99
|
-
driver = "raw_exec"
|
|
100
|
-
|
|
101
|
-
config {
|
|
102
|
-
command = "sh"
|
|
103
|
-
args = [
|
|
104
|
-
"-c",
|
|
105
|
-
"cd \${meta.skynet_code_path}/infra-nomad/log-shipper && yarn install --silent && exec bin/shipper --path ${jobName}"
|
|
106
|
-
]
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
env {
|
|
110
|
-
SKYNET_ENVIRONMENT="${isProduction ? "prd" : "dev"}"
|
|
111
|
-
AWS_REGION="${region}"
|
|
112
|
-
}
|
|
113
|
-
|
|
114
|
-
kill_timeout = "120s"
|
|
115
|
-
|
|
116
|
-
resources {
|
|
117
|
-
cpu = 100 # MHz
|
|
118
|
-
memory = 100 # MB
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
restart {
|
|
122
|
-
attempts = 0
|
|
123
|
-
mode = "fail"
|
|
124
|
-
}
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
task "main" {
|
|
128
|
-
driver = "raw_exec"
|
|
129
|
-
|
|
130
|
-
config {
|
|
131
|
-
command = "sh"
|
|
132
|
-
args = [
|
|
133
|
-
"-c",
|
|
134
|
-
"cd \${meta.skynet_code_path}/${workingDirectory} && yarn install --silent && exec ${cmd}"
|
|
135
|
-
]
|
|
136
|
-
}
|
|
137
|
-
|
|
138
|
-
${
|
|
139
|
-
service
|
|
140
|
-
? `# Setup API Routes
|
|
141
|
-
service {
|
|
142
|
-
name = "${jobName}"
|
|
143
|
-
port = "http"
|
|
144
|
-
|
|
145
|
-
tags = [
|
|
146
|
-
"urlprefix-${service.prefix} strip=${service.prefix}",
|
|
147
|
-
]
|
|
148
|
-
|
|
149
|
-
check {
|
|
150
|
-
type = "http"
|
|
151
|
-
path = "/"
|
|
152
|
-
interval = "10s"
|
|
153
|
-
timeout = "2s"
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
`
|
|
157
|
-
: ""
|
|
158
|
-
}
|
|
159
|
-
|
|
160
|
-
template {
|
|
161
|
-
change_mode = "restart"
|
|
162
|
-
destination = "secrets/context.env"
|
|
163
|
-
env = true
|
|
164
|
-
|
|
165
|
-
data = <<EOH
|
|
166
|
-
${buildEnvTemplate(additionalEnv, isProduction)}
|
|
167
|
-
EOH
|
|
168
|
-
}
|
|
169
|
-
|
|
170
|
-
# It is possible to set environment variables which will be
|
|
171
|
-
# available to the task when it runs.
|
|
172
|
-
env {
|
|
173
|
-
SKYNET_ENVIRONMENT="${isProduction ? "prd" : "dev"}"
|
|
174
|
-
AWS_REGION="${region}"
|
|
175
|
-
}
|
|
176
|
-
|
|
177
|
-
# Specify the maximum resources required to run the task,
|
|
178
|
-
# include CPU and memory.
|
|
179
|
-
resources {
|
|
180
|
-
cpu = ${cpu} # MHz
|
|
181
|
-
memory = ${mem} # MB
|
|
182
|
-
}
|
|
183
|
-
|
|
184
|
-
# Setting the server task as the leader of the task group allows Nomad to
|
|
185
|
-
# signal the log shipper task to gracefully shutdown when the server exits.
|
|
186
|
-
leader = true
|
|
187
|
-
|
|
188
|
-
${
|
|
189
|
-
restart
|
|
190
|
-
? `
|
|
191
|
-
# Restart the job if it fails
|
|
192
|
-
restart {
|
|
193
|
-
attempts = ${restart.attempts ?? 2}
|
|
194
|
-
mode = "${restart.mode ?? "fail"}"
|
|
195
|
-
interval = "${restart.interval ?? "30m"}"
|
|
196
|
-
delay = "${restart.delay ?? "15s"}"
|
|
197
|
-
}
|
|
198
|
-
`
|
|
199
|
-
: `
|
|
200
|
-
# do not retry from the periodical job will reschedule anyway
|
|
201
|
-
restart {
|
|
202
|
-
attempts = 0
|
|
203
|
-
mode = "fail"
|
|
204
|
-
}`
|
|
205
|
-
}
|
|
206
|
-
}
|
|
207
|
-
}
|
|
208
|
-
}`;
|
|
209
|
-
|
|
210
|
-
function normalizeSelectorValue(v) {
|
|
211
|
-
return v.replace(/[^A-Za-z0-9]+/g, "-");
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
function getJobName(name, selectorFlags, mode = null) {
|
|
215
|
-
const selectorNamePart = Object.keys(selectorFlags)
|
|
216
|
-
.sort()
|
|
217
|
-
.map((name) => selectorFlags[name])
|
|
218
|
-
.join("-");
|
|
219
|
-
|
|
220
|
-
let jobName = name;
|
|
221
|
-
|
|
222
|
-
if (mode) {
|
|
223
|
-
jobName += `-${mode}`;
|
|
224
|
-
}
|
|
225
|
-
|
|
226
|
-
if (selectorNamePart.length > 0) {
|
|
227
|
-
// handle special case
|
|
228
|
-
jobName += `-${normalizeSelectorValue(selectorNamePart)}`;
|
|
229
|
-
}
|
|
230
|
-
|
|
231
|
-
return jobName;
|
|
232
|
-
}
|
|
233
|
-
|
|
234
|
-
async function prepareNomad(isProduction) {
|
|
235
|
-
if (isProduction) {
|
|
236
|
-
console.log("Deploy to Production");
|
|
237
|
-
} else {
|
|
238
|
-
const skynetDir = detectSkynetDirectory();
|
|
239
|
-
|
|
240
|
-
if (!fso.existsSync("/tmp/skynet")) {
|
|
241
|
-
await execa("ln", ["-s", skynetDir, "/tmp/skynet"]);
|
|
242
|
-
}
|
|
243
|
-
|
|
244
|
-
console.log("Deploy locally, please start nomad server in a separate terminal");
|
|
245
|
-
console.log(`You can start nomad server by running ${chalk.inverse(`${skynetDir}/infra-nomad/dev/start.sh`)}`);
|
|
246
|
-
console.log(
|
|
247
|
-
`Then you can visit ${chalk.underline("http://localhost:4646/ui/jobs")} to check submitted dev jobs.\n`
|
|
248
|
-
);
|
|
249
|
-
}
|
|
250
|
-
}
|
|
251
|
-
|
|
252
|
-
async function getNomadAddr(isProduction) {
|
|
253
|
-
let nomadAddr;
|
|
254
|
-
|
|
255
|
-
if (isProduction) {
|
|
256
|
-
nomadAddr = getEnvOrThrow("SKYNET_NOMAD_PRODUCTION_ADDR");
|
|
257
|
-
} else {
|
|
258
|
-
nomadAddr = "http://127.0.0.1:4646";
|
|
259
|
-
}
|
|
260
|
-
|
|
261
|
-
return nomadAddr;
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
async function getNomadPath() {
|
|
265
|
-
try {
|
|
266
|
-
return await which("nomad");
|
|
267
|
-
} catch (missingNomad) {
|
|
268
|
-
console.log(
|
|
269
|
-
`Deploy requires ${chalk.bold("nomad")} binary, please follow ${chalk.underline(
|
|
270
|
-
"https://learn.hashicorp.com/tutorials/nomad/get-started-install"
|
|
271
|
-
)} for installation`
|
|
272
|
-
);
|
|
273
|
-
|
|
274
|
-
throw new Error("missing nomad binary");
|
|
275
|
-
}
|
|
276
|
-
}
|
|
277
|
-
|
|
278
|
-
async function runNomadJob(nomadPath, nomadAddr, jobName, nomadJobDefinition, isStop, isDryRun) {
|
|
279
|
-
try {
|
|
280
|
-
if (isStop) {
|
|
281
|
-
const nomad = execa(nomadPath, ["job", "stop", jobName], {
|
|
282
|
-
env: {
|
|
283
|
-
NOMAD_ADDR: nomadAddr,
|
|
284
|
-
},
|
|
285
|
-
});
|
|
286
|
-
nomad.stdout.pipe(process.stdout);
|
|
287
|
-
await nomad;
|
|
288
|
-
|
|
289
|
-
console.log(chalk.green(`Stopped nomad job ${jobName} in ${nomadAddr}`));
|
|
290
|
-
} else if (isDryRun) {
|
|
291
|
-
console.log("Definition for", jobName);
|
|
292
|
-
console.log("========================================");
|
|
293
|
-
console.log(nomadJobDefinition);
|
|
294
|
-
} else {
|
|
295
|
-
const jobFileName = `/tmp/job-${jobName}`;
|
|
296
|
-
|
|
297
|
-
await fs.writeFile(jobFileName, nomadJobDefinition);
|
|
298
|
-
|
|
299
|
-
const nomad = execa(nomadPath, ["job", "run", jobFileName], {
|
|
300
|
-
env: {
|
|
301
|
-
NOMAD_ADDR: nomadAddr,
|
|
302
|
-
},
|
|
303
|
-
});
|
|
304
|
-
nomad.stdout.pipe(process.stdout);
|
|
305
|
-
await nomad;
|
|
306
|
-
|
|
307
|
-
console.log(chalk.green(`Deployed nomad job ${jobName} to ${nomadAddr}`));
|
|
308
|
-
}
|
|
309
|
-
} catch (nomadExecErr) {
|
|
310
|
-
console.log("Nomad Execution Error:");
|
|
311
|
-
console.log(nomadExecErr.message);
|
|
312
|
-
console.log("");
|
|
313
|
-
|
|
314
|
-
console.log(
|
|
315
|
-
`Failed to run ${chalk.bold("nomad")} commands, please ensure nomad server is accessible at ${chalk.bold(
|
|
316
|
-
nomadAddr
|
|
317
|
-
)}`
|
|
318
|
-
);
|
|
319
|
-
|
|
320
|
-
throw new Error("nomad execution error");
|
|
321
|
-
}
|
|
322
|
-
}
|
|
323
|
-
|
|
324
|
-
function createModeDeploy({
|
|
325
|
-
binaryName,
|
|
326
|
-
name,
|
|
327
|
-
workingDirectory,
|
|
328
|
-
bin = "bin/indexer",
|
|
329
|
-
selector = {},
|
|
330
|
-
env = {},
|
|
331
|
-
region = "us-east-1",
|
|
332
|
-
check,
|
|
333
|
-
deltaSchedule,
|
|
334
|
-
validateSchedule,
|
|
335
|
-
deltaCpu,
|
|
336
|
-
deltaMem,
|
|
337
|
-
rebuildCpu,
|
|
338
|
-
rebuildMem,
|
|
339
|
-
validateCpu,
|
|
340
|
-
validateMem,
|
|
341
|
-
}) {
|
|
342
|
-
async function deployMode({
|
|
343
|
-
mode,
|
|
344
|
-
from,
|
|
345
|
-
to,
|
|
346
|
-
stop,
|
|
347
|
-
production,
|
|
348
|
-
dryRun,
|
|
349
|
-
verbose,
|
|
350
|
-
schedule: cmdSchedule,
|
|
351
|
-
...selectorFlags
|
|
352
|
-
}) {
|
|
353
|
-
if (mode === "delta") {
|
|
354
|
-
// delta mode will ignore from/to flags
|
|
355
|
-
from = 0;
|
|
356
|
-
to = 0;
|
|
357
|
-
}
|
|
358
|
-
|
|
359
|
-
const isPeriodic = from === 0 && to === 0 && ["delta", "validate"].includes(mode);
|
|
360
|
-
|
|
361
|
-
const jobName = getJobName(name, selectorFlags, mode);
|
|
362
|
-
|
|
363
|
-
const selectorCmdPart = Object.keys(selectorFlags)
|
|
364
|
-
.sort()
|
|
365
|
-
.map((name) => `--${name} ${selectorFlags[name]}`)
|
|
366
|
-
.join(" ");
|
|
367
|
-
let args = `--mode ${mode} ${selectorCmdPart}`;
|
|
368
|
-
|
|
369
|
-
if (verbose) {
|
|
370
|
-
args += ` --verbose`;
|
|
371
|
-
}
|
|
372
|
-
|
|
373
|
-
let rangeArgs = "";
|
|
374
|
-
|
|
375
|
-
if (from > 0) {
|
|
376
|
-
rangeArgs += ` --from ${from}`;
|
|
377
|
-
}
|
|
378
|
-
|
|
379
|
-
if (to > 0) {
|
|
380
|
-
rangeArgs += ` --to ${to}`;
|
|
381
|
-
}
|
|
382
|
-
|
|
383
|
-
const modeResouces = {
|
|
384
|
-
rebuild: { cpu: rebuildCpu, mem: rebuildMem },
|
|
385
|
-
"resume-rebuild": { cpu: rebuildCpu, mem: rebuildMem },
|
|
386
|
-
validate: { cpu: validateCpu || rebuildCpu, mem: validateMem || rebuildMem },
|
|
387
|
-
delta: { cpu: deltaCpu, mem: deltaMem },
|
|
388
|
-
};
|
|
389
|
-
|
|
390
|
-
// by default use delta cpu/mem settings
|
|
391
|
-
const { cpu, mem } = modeResouces[mode] || modeResouces.delta;
|
|
392
|
-
|
|
393
|
-
let deltaCron = typeof deltaSchedule === "function" ? deltaSchedule(jobName) : deltaSchedule;
|
|
394
|
-
|
|
395
|
-
if (deltaSchedule && cmdSchedule) {
|
|
396
|
-
deltaCron = cmdSchedule;
|
|
397
|
-
}
|
|
398
|
-
|
|
399
|
-
let validateCron = typeof validateSchedule === "function" ? validateSchedule(jobName) : validateSchedule;
|
|
400
|
-
|
|
401
|
-
if (validateSchedule && cmdSchedule) {
|
|
402
|
-
validateCron = cmdSchedule;
|
|
403
|
-
}
|
|
404
|
-
|
|
405
|
-
const modeIntervals = {
|
|
406
|
-
delta: INTERVAL_ALIASES[deltaCron] || deltaCron,
|
|
407
|
-
validate: INTERVAL_ALIASES[validateCron] || validateCron,
|
|
408
|
-
};
|
|
409
|
-
|
|
410
|
-
const mainJobDefinition = genConfig({
|
|
411
|
-
jobName,
|
|
412
|
-
cron: isPeriodic && modeIntervals[mode],
|
|
413
|
-
workingDirectory,
|
|
414
|
-
additionalEnv: env,
|
|
415
|
-
region,
|
|
416
|
-
cmd: `${bin} ${args} ${rangeArgs}`,
|
|
417
|
-
cpu,
|
|
418
|
-
mem,
|
|
419
|
-
isProduction: production,
|
|
420
|
-
});
|
|
421
|
-
|
|
422
|
-
const nomadPath = await getNomadPath();
|
|
423
|
-
await prepareNomad(production);
|
|
424
|
-
const nomadAddr = await getNomadAddr(production);
|
|
425
|
-
|
|
426
|
-
await runNomadJob(nomadPath, nomadAddr, jobName, mainJobDefinition, stop, dryRun);
|
|
427
|
-
|
|
428
|
-
if (check && check.bin) {
|
|
429
|
-
console.log("");
|
|
430
|
-
|
|
431
|
-
const monitorJobName = `${jobName}-monitor`;
|
|
432
|
-
const monitorJobDefinition = genConfig({
|
|
433
|
-
jobName: monitorJobName,
|
|
434
|
-
cron: INTERVAL_ALIASES[check.schedule] || check.schedule,
|
|
435
|
-
workingDirectory,
|
|
436
|
-
additionalEnv: {
|
|
437
|
-
...env,
|
|
438
|
-
SKYNET_NOMAD_PRODUCTION_ADDR: null,
|
|
439
|
-
SKYNET_SLACK_TOKEN: null,
|
|
440
|
-
OPSGENIE_API_KEY: null,
|
|
441
|
-
OPSGENIE_END_POINT: null,
|
|
442
|
-
},
|
|
443
|
-
region,
|
|
444
|
-
cmd: `${check.bin} ${args} ${production ? "--production" : ""}`,
|
|
445
|
-
cpu: check.cpu || 100,
|
|
446
|
-
mem: check.mem || 100,
|
|
447
|
-
isProduction: production,
|
|
448
|
-
});
|
|
449
|
-
|
|
450
|
-
await runNomadJob(nomadPath, nomadAddr, monitorJobName, monitorJobDefinition, stop, dryRun);
|
|
451
|
-
}
|
|
452
|
-
}
|
|
453
|
-
|
|
454
|
-
function deploy() {
|
|
455
|
-
if (!binaryName) {
|
|
456
|
-
binaryName = getBinaryName();
|
|
457
|
-
}
|
|
458
|
-
|
|
459
|
-
const cli = meow(
|
|
460
|
-
`
|
|
461
|
-
Usage
|
|
462
|
-
|
|
463
|
-
$ ${binaryName} <options>
|
|
464
|
-
|
|
465
|
-
Options
|
|
466
|
-
${getSelectorDesc(selector)}
|
|
467
|
-
--mode could be delta/rebuild/resume-rebuild/validate/one/range/reset
|
|
468
|
-
--from min id to build
|
|
469
|
-
--to max id to build
|
|
470
|
-
--stop stop job instead of running the job
|
|
471
|
-
--production deploy to production, default is development
|
|
472
|
-
--schedule override default schedule, support aliases: secondly, minutely, hourly, daily, weekly
|
|
473
|
-
--verbose Output debug messages
|
|
474
|
-
--dry-run print nomad job file but do not really execute it
|
|
475
|
-
|
|
476
|
-
Examples
|
|
477
|
-
${binaryName} --mode delta
|
|
478
|
-
${binaryName} --mode rebuild
|
|
479
|
-
${binaryName} --mode validate
|
|
480
|
-
`,
|
|
481
|
-
{
|
|
482
|
-
description: false,
|
|
483
|
-
version: false,
|
|
484
|
-
flags: {
|
|
485
|
-
...getSelectorFlags(selector),
|
|
486
|
-
mode: {
|
|
487
|
-
type: "string",
|
|
488
|
-
default: "delta",
|
|
489
|
-
},
|
|
490
|
-
from: {
|
|
491
|
-
alias: "since",
|
|
492
|
-
type: "number",
|
|
493
|
-
default: 0,
|
|
494
|
-
},
|
|
495
|
-
to: {
|
|
496
|
-
alias: "until",
|
|
497
|
-
type: "number",
|
|
498
|
-
default: 0,
|
|
499
|
-
},
|
|
500
|
-
schedule: {
|
|
501
|
-
type: "string",
|
|
502
|
-
},
|
|
503
|
-
verbose: {
|
|
504
|
-
type: "boolean",
|
|
505
|
-
default: false,
|
|
506
|
-
},
|
|
507
|
-
production: {
|
|
508
|
-
alias: "prd",
|
|
509
|
-
type: "boolean",
|
|
510
|
-
default: false,
|
|
511
|
-
},
|
|
512
|
-
dryRun: {
|
|
513
|
-
type: "boolean",
|
|
514
|
-
default: false,
|
|
515
|
-
},
|
|
516
|
-
stop: {
|
|
517
|
-
type: "boolean",
|
|
518
|
-
default: false,
|
|
519
|
-
},
|
|
520
|
-
},
|
|
521
|
-
}
|
|
522
|
-
);
|
|
523
|
-
|
|
524
|
-
deployMode(cli.flags).catch((err) => {
|
|
525
|
-
console.error(err);
|
|
526
|
-
process.exit(1);
|
|
527
|
-
});
|
|
528
|
-
}
|
|
529
|
-
|
|
530
|
-
return { deploy };
|
|
531
|
-
}
|
|
532
|
-
|
|
533
|
-
function createDeploy({
|
|
534
|
-
binaryName,
|
|
535
|
-
name,
|
|
536
|
-
workingDirectory,
|
|
537
|
-
bin = "bin/indexer",
|
|
538
|
-
selector = {},
|
|
539
|
-
region = "us-east-1",
|
|
540
|
-
env = {},
|
|
541
|
-
check,
|
|
542
|
-
schedule,
|
|
543
|
-
restart,
|
|
544
|
-
cpu,
|
|
545
|
-
mem,
|
|
546
|
-
service,
|
|
547
|
-
}) {
|
|
548
|
-
async function deployModeless({ production, stop, dryRun, verbose, schedule: cmdSchedule, ...selectorFlags }) {
|
|
549
|
-
const jobName = getJobName(name, selectorFlags, null);
|
|
550
|
-
|
|
551
|
-
const selectorCmdPart = Object.keys(selectorFlags)
|
|
552
|
-
.sort()
|
|
553
|
-
.map((name) => `--${name} ${selectorFlags[name]}`)
|
|
554
|
-
.join(" ");
|
|
555
|
-
let args = `${selectorCmdPart}`;
|
|
556
|
-
|
|
557
|
-
if (verbose) {
|
|
558
|
-
args += ` --verbose`;
|
|
559
|
-
}
|
|
560
|
-
|
|
561
|
-
let cron = typeof schedule === "function" ? schedule(jobName) : schedule;
|
|
562
|
-
|
|
563
|
-
if (schedule && cmdSchedule) {
|
|
564
|
-
// cmd schedule has higher priority
|
|
565
|
-
cron = cmdSchedule;
|
|
566
|
-
}
|
|
567
|
-
|
|
568
|
-
const nomadJobDefinition = genConfig({
|
|
569
|
-
jobName,
|
|
570
|
-
cron: INTERVAL_ALIASES[cron] || cron,
|
|
571
|
-
restart,
|
|
572
|
-
workingDirectory,
|
|
573
|
-
additionalEnv: env,
|
|
574
|
-
region,
|
|
575
|
-
cmd: `${bin} ${args}`,
|
|
576
|
-
cpu,
|
|
577
|
-
mem,
|
|
578
|
-
service,
|
|
579
|
-
isProduction: production,
|
|
580
|
-
});
|
|
581
|
-
|
|
582
|
-
const nomadPath = await getNomadPath();
|
|
583
|
-
await prepareNomad(production);
|
|
584
|
-
const nomadAddr = await getNomadAddr(production);
|
|
585
|
-
|
|
586
|
-
await runNomadJob(nomadPath, nomadAddr, jobName, nomadJobDefinition, stop, dryRun);
|
|
587
|
-
|
|
588
|
-
if (check && check.bin) {
|
|
589
|
-
console.log("");
|
|
590
|
-
|
|
591
|
-
const monitorJobName = `${jobName}-monitor`;
|
|
592
|
-
const monitorJobDefinition = genConfig({
|
|
593
|
-
jobName: monitorJobName,
|
|
594
|
-
cron: INTERVAL_ALIASES[check.schedule] || check.schedule,
|
|
595
|
-
workingDirectory,
|
|
596
|
-
additionalEnv: {
|
|
597
|
-
...env,
|
|
598
|
-
SKYNET_NOMAD_PRODUCTION_ADDR: null,
|
|
599
|
-
SKYNET_SLACK_TOKEN: null,
|
|
600
|
-
OPSGENIE_API_KEY: null,
|
|
601
|
-
OPSGENIE_END_POINT: null,
|
|
602
|
-
},
|
|
603
|
-
region,
|
|
604
|
-
cmd: `${check.bin} ${args} ${production ? "--production" : ""}`,
|
|
605
|
-
cpu: check.cpu || 100,
|
|
606
|
-
mem: check.mem || 100,
|
|
607
|
-
isProduction: production,
|
|
608
|
-
});
|
|
609
|
-
|
|
610
|
-
await runNomadJob(nomadPath, nomadAddr, monitorJobName, monitorJobDefinition, stop, dryRun);
|
|
611
|
-
}
|
|
612
|
-
}
|
|
613
|
-
|
|
614
|
-
function deploy() {
|
|
615
|
-
if (!binaryName) {
|
|
616
|
-
binaryName = getBinaryName();
|
|
617
|
-
}
|
|
618
|
-
|
|
619
|
-
const cli = meow(
|
|
620
|
-
`
|
|
621
|
-
Usage
|
|
622
|
-
|
|
623
|
-
$ ${binaryName} <options>
|
|
624
|
-
|
|
625
|
-
Options
|
|
626
|
-
${getSelectorDesc(selector)}
|
|
627
|
-
--stop stop job instead of running the job
|
|
628
|
-
--production deploy to production, default is development
|
|
629
|
-
--schedule override default schedule, support aliases: secondly, minutely, hourly, daily, weekly
|
|
630
|
-
--verbose Output debug messages
|
|
631
|
-
--dry-run print nomad job file but do not really execute it
|
|
632
|
-
`,
|
|
633
|
-
{
|
|
634
|
-
description: false,
|
|
635
|
-
version: false,
|
|
636
|
-
flags: {
|
|
637
|
-
...getSelectorFlags(selector),
|
|
638
|
-
schedule: {
|
|
639
|
-
type: "string",
|
|
640
|
-
},
|
|
641
|
-
verbose: {
|
|
642
|
-
type: "boolean",
|
|
643
|
-
default: false,
|
|
644
|
-
},
|
|
645
|
-
production: {
|
|
646
|
-
alias: "prd",
|
|
647
|
-
type: "boolean",
|
|
648
|
-
default: false,
|
|
649
|
-
},
|
|
650
|
-
dryRun: {
|
|
651
|
-
type: "boolean",
|
|
652
|
-
default: false,
|
|
653
|
-
},
|
|
654
|
-
stop: {
|
|
655
|
-
type: "boolean",
|
|
656
|
-
default: false,
|
|
657
|
-
},
|
|
658
|
-
},
|
|
659
|
-
}
|
|
660
|
-
);
|
|
661
|
-
|
|
662
|
-
deployModeless(cli.flags).catch((err) => {
|
|
663
|
-
console.error(err);
|
|
664
|
-
process.exit(1);
|
|
665
|
-
});
|
|
666
|
-
}
|
|
667
|
-
|
|
668
|
-
return { deploy };
|
|
669
|
-
}
|
|
670
|
-
|
|
671
|
-
module.exports = {
|
|
672
|
-
getJobName,
|
|
673
|
-
getNomadAddr,
|
|
674
|
-
createModeDeploy,
|
|
675
|
-
createDeploy,
|
|
676
|
-
};
|
|
1
|
+
const fs = require("fs/promises");
|
|
2
|
+
const fso = require("fs");
|
|
3
|
+
const execa = require("execa");
|
|
4
|
+
const meow = require("meow");
|
|
5
|
+
const chalk = require("chalk");
|
|
6
|
+
const which = require("which");
|
|
7
|
+
const { getSelectorFlags, getSelectorDesc } = require("./selector");
|
|
8
|
+
const { getEnvOrThrow } = require("./env");
|
|
9
|
+
const { getBinaryName, detectSkynetDirectory } = require("./cli");
|
|
10
|
+
|
|
11
|
+
const INTERVAL_ALIASES = {
|
|
12
|
+
secondly: "*/1 * * * * * *",
|
|
13
|
+
"@secondly": "*/1 * * * * * *",
|
|
14
|
+
minutely: "0 * * * * * *",
|
|
15
|
+
"@minutely": "0 * * * * * *",
|
|
16
|
+
hourly: "0 0 * * * * *",
|
|
17
|
+
"@hourly": "0 0 * * * * *",
|
|
18
|
+
daily: "0 0 0 * * * *",
|
|
19
|
+
"@daily": "0 0 0 * * * *",
|
|
20
|
+
weekly: "0 0 0 * * 0 *",
|
|
21
|
+
"@weekly": "0 0 0 * * 0 *",
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
function buildEnvTemplate(additionalEnv, isProduction) {
|
|
25
|
+
return Object.keys(additionalEnv)
|
|
26
|
+
.map((key) => {
|
|
27
|
+
return `${key}="${additionalEnv[key] || getEnvironmentVariableValue(key, isProduction)}"`;
|
|
28
|
+
})
|
|
29
|
+
.join("\n");
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
function getEnvironmentVariableValue(name, isProduction) {
|
|
33
|
+
if (isProduction) {
|
|
34
|
+
return `{{key "secrets/${name}"}}`;
|
|
35
|
+
} else {
|
|
36
|
+
if (!process.env[name]) {
|
|
37
|
+
return "";
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
return process.env[name];
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const genConfig = ({
|
|
45
|
+
jobName,
|
|
46
|
+
workingDirectory,
|
|
47
|
+
cmd,
|
|
48
|
+
cron,
|
|
49
|
+
restart,
|
|
50
|
+
cpu,
|
|
51
|
+
mem,
|
|
52
|
+
service,
|
|
53
|
+
additionalEnv = [],
|
|
54
|
+
region = "us-east-1",
|
|
55
|
+
isProduction,
|
|
56
|
+
}) => `job "${jobName}" {
|
|
57
|
+
datacenters = ["${region}"]
|
|
58
|
+
|
|
59
|
+
type = "batch"
|
|
60
|
+
|
|
61
|
+
${
|
|
62
|
+
cron
|
|
63
|
+
? `# Triggers periodically
|
|
64
|
+
periodic {
|
|
65
|
+
cron = "${cron}"
|
|
66
|
+
prohibit_overlap = true
|
|
67
|
+
}`
|
|
68
|
+
: ""
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
constraint {
|
|
72
|
+
attribute = "\${meta.has_nodejs}"
|
|
73
|
+
value = "true"
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
constraint {
|
|
77
|
+
attribute = "\${meta.has_skynet}"
|
|
78
|
+
value = "true"
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
group "default" {
|
|
82
|
+
reschedule {
|
|
83
|
+
attempts = 0
|
|
84
|
+
unlimited = false
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
${
|
|
88
|
+
service
|
|
89
|
+
? `# Setup Service Network
|
|
90
|
+
network {
|
|
91
|
+
port "http" {
|
|
92
|
+
static = ${service.port}
|
|
93
|
+
}
|
|
94
|
+
}`
|
|
95
|
+
: ""
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
task "log-shipper" {
|
|
99
|
+
driver = "raw_exec"
|
|
100
|
+
|
|
101
|
+
config {
|
|
102
|
+
command = "sh"
|
|
103
|
+
args = [
|
|
104
|
+
"-c",
|
|
105
|
+
"cd \${meta.skynet_code_path}/infra-nomad/log-shipper && yarn install --silent && exec bin/shipper --path ${jobName}"
|
|
106
|
+
]
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
env {
|
|
110
|
+
SKYNET_ENVIRONMENT="${isProduction ? "prd" : "dev"}"
|
|
111
|
+
AWS_REGION="${region}"
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
kill_timeout = "120s"
|
|
115
|
+
|
|
116
|
+
resources {
|
|
117
|
+
cpu = 100 # MHz
|
|
118
|
+
memory = 100 # MB
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
restart {
|
|
122
|
+
attempts = 0
|
|
123
|
+
mode = "fail"
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
task "main" {
|
|
128
|
+
driver = "raw_exec"
|
|
129
|
+
|
|
130
|
+
config {
|
|
131
|
+
command = "sh"
|
|
132
|
+
args = [
|
|
133
|
+
"-c",
|
|
134
|
+
"cd \${meta.skynet_code_path}/${workingDirectory} && yarn install --silent && exec ${cmd}"
|
|
135
|
+
]
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
${
|
|
139
|
+
service
|
|
140
|
+
? `# Setup API Routes
|
|
141
|
+
service {
|
|
142
|
+
name = "${jobName}"
|
|
143
|
+
port = "http"
|
|
144
|
+
|
|
145
|
+
tags = [
|
|
146
|
+
"urlprefix-${service.prefix} strip=${service.prefix}",
|
|
147
|
+
]
|
|
148
|
+
|
|
149
|
+
check {
|
|
150
|
+
type = "http"
|
|
151
|
+
path = "/"
|
|
152
|
+
interval = "10s"
|
|
153
|
+
timeout = "2s"
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
`
|
|
157
|
+
: ""
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
template {
|
|
161
|
+
change_mode = "restart"
|
|
162
|
+
destination = "secrets/context.env"
|
|
163
|
+
env = true
|
|
164
|
+
|
|
165
|
+
data = <<EOH
|
|
166
|
+
${buildEnvTemplate(additionalEnv, isProduction)}
|
|
167
|
+
EOH
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
# It is possible to set environment variables which will be
|
|
171
|
+
# available to the task when it runs.
|
|
172
|
+
env {
|
|
173
|
+
SKYNET_ENVIRONMENT="${isProduction ? "prd" : "dev"}"
|
|
174
|
+
AWS_REGION="${region}"
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
# Specify the maximum resources required to run the task,
|
|
178
|
+
# include CPU and memory.
|
|
179
|
+
resources {
|
|
180
|
+
cpu = ${cpu} # MHz
|
|
181
|
+
memory = ${mem} # MB
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
# Setting the server task as the leader of the task group allows Nomad to
|
|
185
|
+
# signal the log shipper task to gracefully shutdown when the server exits.
|
|
186
|
+
leader = true
|
|
187
|
+
|
|
188
|
+
${
|
|
189
|
+
restart
|
|
190
|
+
? `
|
|
191
|
+
# Restart the job if it fails
|
|
192
|
+
restart {
|
|
193
|
+
attempts = ${restart.attempts ?? 2}
|
|
194
|
+
mode = "${restart.mode ?? "fail"}"
|
|
195
|
+
interval = "${restart.interval ?? "30m"}"
|
|
196
|
+
delay = "${restart.delay ?? "15s"}"
|
|
197
|
+
}
|
|
198
|
+
`
|
|
199
|
+
: `
|
|
200
|
+
# do not retry from the periodical job will reschedule anyway
|
|
201
|
+
restart {
|
|
202
|
+
attempts = 0
|
|
203
|
+
mode = "fail"
|
|
204
|
+
}`
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
}`;
|
|
209
|
+
|
|
210
|
+
function normalizeSelectorValue(v) {
|
|
211
|
+
return v.replace(/[^A-Za-z0-9]+/g, "-");
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
function getJobName(name, selectorFlags, mode = null) {
|
|
215
|
+
const selectorNamePart = Object.keys(selectorFlags)
|
|
216
|
+
.sort()
|
|
217
|
+
.map((name) => selectorFlags[name])
|
|
218
|
+
.join("-");
|
|
219
|
+
|
|
220
|
+
let jobName = name;
|
|
221
|
+
|
|
222
|
+
if (mode) {
|
|
223
|
+
jobName += `-${mode}`;
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
if (selectorNamePart.length > 0) {
|
|
227
|
+
// handle special case
|
|
228
|
+
jobName += `-${normalizeSelectorValue(selectorNamePart)}`;
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
return jobName;
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
async function prepareNomad(isProduction) {
|
|
235
|
+
if (isProduction) {
|
|
236
|
+
console.log("Deploy to Production");
|
|
237
|
+
} else {
|
|
238
|
+
const skynetDir = detectSkynetDirectory();
|
|
239
|
+
|
|
240
|
+
if (!fso.existsSync("/tmp/skynet")) {
|
|
241
|
+
await execa("ln", ["-s", skynetDir, "/tmp/skynet"]);
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
console.log("Deploy locally, please start nomad server in a separate terminal");
|
|
245
|
+
console.log(`You can start nomad server by running ${chalk.inverse(`${skynetDir}/infra-nomad/dev/start.sh`)}`);
|
|
246
|
+
console.log(
|
|
247
|
+
`Then you can visit ${chalk.underline("http://localhost:4646/ui/jobs")} to check submitted dev jobs.\n`
|
|
248
|
+
);
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
async function getNomadAddr(isProduction) {
|
|
253
|
+
let nomadAddr;
|
|
254
|
+
|
|
255
|
+
if (isProduction) {
|
|
256
|
+
nomadAddr = getEnvOrThrow("SKYNET_NOMAD_PRODUCTION_ADDR");
|
|
257
|
+
} else {
|
|
258
|
+
nomadAddr = "http://127.0.0.1:4646";
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
return nomadAddr;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
async function getNomadPath() {
|
|
265
|
+
try {
|
|
266
|
+
return await which("nomad");
|
|
267
|
+
} catch (missingNomad) {
|
|
268
|
+
console.log(
|
|
269
|
+
`Deploy requires ${chalk.bold("nomad")} binary, please follow ${chalk.underline(
|
|
270
|
+
"https://learn.hashicorp.com/tutorials/nomad/get-started-install"
|
|
271
|
+
)} for installation`
|
|
272
|
+
);
|
|
273
|
+
|
|
274
|
+
throw new Error("missing nomad binary");
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
async function runNomadJob(nomadPath, nomadAddr, jobName, nomadJobDefinition, isStop, isDryRun) {
|
|
279
|
+
try {
|
|
280
|
+
if (isStop) {
|
|
281
|
+
const nomad = execa(nomadPath, ["job", "stop", jobName], {
|
|
282
|
+
env: {
|
|
283
|
+
NOMAD_ADDR: nomadAddr,
|
|
284
|
+
},
|
|
285
|
+
});
|
|
286
|
+
nomad.stdout.pipe(process.stdout);
|
|
287
|
+
await nomad;
|
|
288
|
+
|
|
289
|
+
console.log(chalk.green(`Stopped nomad job ${jobName} in ${nomadAddr}`));
|
|
290
|
+
} else if (isDryRun) {
|
|
291
|
+
console.log("Definition for", jobName);
|
|
292
|
+
console.log("========================================");
|
|
293
|
+
console.log(nomadJobDefinition);
|
|
294
|
+
} else {
|
|
295
|
+
const jobFileName = `/tmp/job-${jobName}`;
|
|
296
|
+
|
|
297
|
+
await fs.writeFile(jobFileName, nomadJobDefinition);
|
|
298
|
+
|
|
299
|
+
const nomad = execa(nomadPath, ["job", "run", jobFileName], {
|
|
300
|
+
env: {
|
|
301
|
+
NOMAD_ADDR: nomadAddr,
|
|
302
|
+
},
|
|
303
|
+
});
|
|
304
|
+
nomad.stdout.pipe(process.stdout);
|
|
305
|
+
await nomad;
|
|
306
|
+
|
|
307
|
+
console.log(chalk.green(`Deployed nomad job ${jobName} to ${nomadAddr}`));
|
|
308
|
+
}
|
|
309
|
+
} catch (nomadExecErr) {
|
|
310
|
+
console.log("Nomad Execution Error:");
|
|
311
|
+
console.log(nomadExecErr.message);
|
|
312
|
+
console.log("");
|
|
313
|
+
|
|
314
|
+
console.log(
|
|
315
|
+
`Failed to run ${chalk.bold("nomad")} commands, please ensure nomad server is accessible at ${chalk.bold(
|
|
316
|
+
nomadAddr
|
|
317
|
+
)}`
|
|
318
|
+
);
|
|
319
|
+
|
|
320
|
+
throw new Error("nomad execution error");
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
function createModeDeploy({
|
|
325
|
+
binaryName,
|
|
326
|
+
name,
|
|
327
|
+
workingDirectory,
|
|
328
|
+
bin = "bin/indexer",
|
|
329
|
+
selector = {},
|
|
330
|
+
env = {},
|
|
331
|
+
region = "us-east-1",
|
|
332
|
+
check,
|
|
333
|
+
deltaSchedule,
|
|
334
|
+
validateSchedule,
|
|
335
|
+
deltaCpu,
|
|
336
|
+
deltaMem,
|
|
337
|
+
rebuildCpu,
|
|
338
|
+
rebuildMem,
|
|
339
|
+
validateCpu,
|
|
340
|
+
validateMem,
|
|
341
|
+
}) {
|
|
342
|
+
async function deployMode({
|
|
343
|
+
mode,
|
|
344
|
+
from,
|
|
345
|
+
to,
|
|
346
|
+
stop,
|
|
347
|
+
production,
|
|
348
|
+
dryRun,
|
|
349
|
+
verbose,
|
|
350
|
+
schedule: cmdSchedule,
|
|
351
|
+
...selectorFlags
|
|
352
|
+
}) {
|
|
353
|
+
if (mode === "delta") {
|
|
354
|
+
// delta mode will ignore from/to flags
|
|
355
|
+
from = 0;
|
|
356
|
+
to = 0;
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
const isPeriodic = from === 0 && to === 0 && ["delta", "validate"].includes(mode);
|
|
360
|
+
|
|
361
|
+
const jobName = getJobName(name, selectorFlags, mode);
|
|
362
|
+
|
|
363
|
+
const selectorCmdPart = Object.keys(selectorFlags)
|
|
364
|
+
.sort()
|
|
365
|
+
.map((name) => `--${name} ${selectorFlags[name]}`)
|
|
366
|
+
.join(" ");
|
|
367
|
+
let args = `--mode ${mode} ${selectorCmdPart}`;
|
|
368
|
+
|
|
369
|
+
if (verbose) {
|
|
370
|
+
args += ` --verbose`;
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
let rangeArgs = "";
|
|
374
|
+
|
|
375
|
+
if (from > 0) {
|
|
376
|
+
rangeArgs += ` --from ${from}`;
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
if (to > 0) {
|
|
380
|
+
rangeArgs += ` --to ${to}`;
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
const modeResouces = {
|
|
384
|
+
rebuild: { cpu: rebuildCpu, mem: rebuildMem },
|
|
385
|
+
"resume-rebuild": { cpu: rebuildCpu, mem: rebuildMem },
|
|
386
|
+
validate: { cpu: validateCpu || rebuildCpu, mem: validateMem || rebuildMem },
|
|
387
|
+
delta: { cpu: deltaCpu, mem: deltaMem },
|
|
388
|
+
};
|
|
389
|
+
|
|
390
|
+
// by default use delta cpu/mem settings
|
|
391
|
+
const { cpu, mem } = modeResouces[mode] || modeResouces.delta;
|
|
392
|
+
|
|
393
|
+
let deltaCron = typeof deltaSchedule === "function" ? deltaSchedule(jobName) : deltaSchedule;
|
|
394
|
+
|
|
395
|
+
if (deltaSchedule && cmdSchedule) {
|
|
396
|
+
deltaCron = cmdSchedule;
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
let validateCron = typeof validateSchedule === "function" ? validateSchedule(jobName) : validateSchedule;
|
|
400
|
+
|
|
401
|
+
if (validateSchedule && cmdSchedule) {
|
|
402
|
+
validateCron = cmdSchedule;
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
const modeIntervals = {
|
|
406
|
+
delta: INTERVAL_ALIASES[deltaCron] || deltaCron,
|
|
407
|
+
validate: INTERVAL_ALIASES[validateCron] || validateCron,
|
|
408
|
+
};
|
|
409
|
+
|
|
410
|
+
const mainJobDefinition = genConfig({
|
|
411
|
+
jobName,
|
|
412
|
+
cron: isPeriodic && modeIntervals[mode],
|
|
413
|
+
workingDirectory,
|
|
414
|
+
additionalEnv: env,
|
|
415
|
+
region,
|
|
416
|
+
cmd: `${bin} ${args} ${rangeArgs}`,
|
|
417
|
+
cpu,
|
|
418
|
+
mem,
|
|
419
|
+
isProduction: production,
|
|
420
|
+
});
|
|
421
|
+
|
|
422
|
+
const nomadPath = await getNomadPath();
|
|
423
|
+
await prepareNomad(production);
|
|
424
|
+
const nomadAddr = await getNomadAddr(production);
|
|
425
|
+
|
|
426
|
+
await runNomadJob(nomadPath, nomadAddr, jobName, mainJobDefinition, stop, dryRun);
|
|
427
|
+
|
|
428
|
+
if (check && check.bin) {
|
|
429
|
+
console.log("");
|
|
430
|
+
|
|
431
|
+
const monitorJobName = `${jobName}-monitor`;
|
|
432
|
+
const monitorJobDefinition = genConfig({
|
|
433
|
+
jobName: monitorJobName,
|
|
434
|
+
cron: INTERVAL_ALIASES[check.schedule] || check.schedule,
|
|
435
|
+
workingDirectory,
|
|
436
|
+
additionalEnv: {
|
|
437
|
+
...env,
|
|
438
|
+
SKYNET_NOMAD_PRODUCTION_ADDR: null,
|
|
439
|
+
SKYNET_SLACK_TOKEN: null,
|
|
440
|
+
OPSGENIE_API_KEY: null,
|
|
441
|
+
OPSGENIE_END_POINT: null,
|
|
442
|
+
},
|
|
443
|
+
region,
|
|
444
|
+
cmd: `${check.bin} ${args} ${production ? "--production" : ""}`,
|
|
445
|
+
cpu: check.cpu || 100,
|
|
446
|
+
mem: check.mem || 100,
|
|
447
|
+
isProduction: production,
|
|
448
|
+
});
|
|
449
|
+
|
|
450
|
+
await runNomadJob(nomadPath, nomadAddr, monitorJobName, monitorJobDefinition, stop, dryRun);
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
function deploy() {
|
|
455
|
+
if (!binaryName) {
|
|
456
|
+
binaryName = getBinaryName();
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
const cli = meow(
|
|
460
|
+
`
|
|
461
|
+
Usage
|
|
462
|
+
|
|
463
|
+
$ ${binaryName} <options>
|
|
464
|
+
|
|
465
|
+
Options
|
|
466
|
+
${getSelectorDesc(selector)}
|
|
467
|
+
--mode could be delta/rebuild/resume-rebuild/validate/one/range/reset
|
|
468
|
+
--from min id to build
|
|
469
|
+
--to max id to build
|
|
470
|
+
--stop stop job instead of running the job
|
|
471
|
+
--production deploy to production, default is development
|
|
472
|
+
--schedule override default schedule, support aliases: secondly, minutely, hourly, daily, weekly
|
|
473
|
+
--verbose Output debug messages
|
|
474
|
+
--dry-run print nomad job file but do not really execute it
|
|
475
|
+
|
|
476
|
+
Examples
|
|
477
|
+
${binaryName} --mode delta
|
|
478
|
+
${binaryName} --mode rebuild
|
|
479
|
+
${binaryName} --mode validate
|
|
480
|
+
`,
|
|
481
|
+
{
|
|
482
|
+
description: false,
|
|
483
|
+
version: false,
|
|
484
|
+
flags: {
|
|
485
|
+
...getSelectorFlags(selector),
|
|
486
|
+
mode: {
|
|
487
|
+
type: "string",
|
|
488
|
+
default: "delta",
|
|
489
|
+
},
|
|
490
|
+
from: {
|
|
491
|
+
alias: "since",
|
|
492
|
+
type: "number",
|
|
493
|
+
default: 0,
|
|
494
|
+
},
|
|
495
|
+
to: {
|
|
496
|
+
alias: "until",
|
|
497
|
+
type: "number",
|
|
498
|
+
default: 0,
|
|
499
|
+
},
|
|
500
|
+
schedule: {
|
|
501
|
+
type: "string",
|
|
502
|
+
},
|
|
503
|
+
verbose: {
|
|
504
|
+
type: "boolean",
|
|
505
|
+
default: false,
|
|
506
|
+
},
|
|
507
|
+
production: {
|
|
508
|
+
alias: "prd",
|
|
509
|
+
type: "boolean",
|
|
510
|
+
default: false,
|
|
511
|
+
},
|
|
512
|
+
dryRun: {
|
|
513
|
+
type: "boolean",
|
|
514
|
+
default: false,
|
|
515
|
+
},
|
|
516
|
+
stop: {
|
|
517
|
+
type: "boolean",
|
|
518
|
+
default: false,
|
|
519
|
+
},
|
|
520
|
+
},
|
|
521
|
+
}
|
|
522
|
+
);
|
|
523
|
+
|
|
524
|
+
deployMode(cli.flags).catch((err) => {
|
|
525
|
+
console.error(err);
|
|
526
|
+
process.exit(1);
|
|
527
|
+
});
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
return { deploy };
|
|
531
|
+
}
|
|
532
|
+
|
|
533
|
+
function createDeploy({
|
|
534
|
+
binaryName,
|
|
535
|
+
name,
|
|
536
|
+
workingDirectory,
|
|
537
|
+
bin = "bin/indexer",
|
|
538
|
+
selector = {},
|
|
539
|
+
region = "us-east-1",
|
|
540
|
+
env = {},
|
|
541
|
+
check,
|
|
542
|
+
schedule,
|
|
543
|
+
restart,
|
|
544
|
+
cpu,
|
|
545
|
+
mem,
|
|
546
|
+
service,
|
|
547
|
+
}) {
|
|
548
|
+
async function deployModeless({ production, stop, dryRun, verbose, schedule: cmdSchedule, ...selectorFlags }) {
|
|
549
|
+
const jobName = getJobName(name, selectorFlags, null);
|
|
550
|
+
|
|
551
|
+
const selectorCmdPart = Object.keys(selectorFlags)
|
|
552
|
+
.sort()
|
|
553
|
+
.map((name) => `--${name} ${selectorFlags[name]}`)
|
|
554
|
+
.join(" ");
|
|
555
|
+
let args = `${selectorCmdPart}`;
|
|
556
|
+
|
|
557
|
+
if (verbose) {
|
|
558
|
+
args += ` --verbose`;
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
let cron = typeof schedule === "function" ? schedule(jobName) : schedule;
|
|
562
|
+
|
|
563
|
+
if (schedule && cmdSchedule) {
|
|
564
|
+
// cmd schedule has higher priority
|
|
565
|
+
cron = cmdSchedule;
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
const nomadJobDefinition = genConfig({
|
|
569
|
+
jobName,
|
|
570
|
+
cron: INTERVAL_ALIASES[cron] || cron,
|
|
571
|
+
restart,
|
|
572
|
+
workingDirectory,
|
|
573
|
+
additionalEnv: env,
|
|
574
|
+
region,
|
|
575
|
+
cmd: `${bin} ${args}`,
|
|
576
|
+
cpu,
|
|
577
|
+
mem,
|
|
578
|
+
service,
|
|
579
|
+
isProduction: production,
|
|
580
|
+
});
|
|
581
|
+
|
|
582
|
+
const nomadPath = await getNomadPath();
|
|
583
|
+
await prepareNomad(production);
|
|
584
|
+
const nomadAddr = await getNomadAddr(production);
|
|
585
|
+
|
|
586
|
+
await runNomadJob(nomadPath, nomadAddr, jobName, nomadJobDefinition, stop, dryRun);
|
|
587
|
+
|
|
588
|
+
if (check && check.bin) {
|
|
589
|
+
console.log("");
|
|
590
|
+
|
|
591
|
+
const monitorJobName = `${jobName}-monitor`;
|
|
592
|
+
const monitorJobDefinition = genConfig({
|
|
593
|
+
jobName: monitorJobName,
|
|
594
|
+
cron: INTERVAL_ALIASES[check.schedule] || check.schedule,
|
|
595
|
+
workingDirectory,
|
|
596
|
+
additionalEnv: {
|
|
597
|
+
...env,
|
|
598
|
+
SKYNET_NOMAD_PRODUCTION_ADDR: null,
|
|
599
|
+
SKYNET_SLACK_TOKEN: null,
|
|
600
|
+
OPSGENIE_API_KEY: null,
|
|
601
|
+
OPSGENIE_END_POINT: null,
|
|
602
|
+
},
|
|
603
|
+
region,
|
|
604
|
+
cmd: `${check.bin} ${args} ${production ? "--production" : ""}`,
|
|
605
|
+
cpu: check.cpu || 100,
|
|
606
|
+
mem: check.mem || 100,
|
|
607
|
+
isProduction: production,
|
|
608
|
+
});
|
|
609
|
+
|
|
610
|
+
await runNomadJob(nomadPath, nomadAddr, monitorJobName, monitorJobDefinition, stop, dryRun);
|
|
611
|
+
}
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
function deploy() {
|
|
615
|
+
if (!binaryName) {
|
|
616
|
+
binaryName = getBinaryName();
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
const cli = meow(
|
|
620
|
+
`
|
|
621
|
+
Usage
|
|
622
|
+
|
|
623
|
+
$ ${binaryName} <options>
|
|
624
|
+
|
|
625
|
+
Options
|
|
626
|
+
${getSelectorDesc(selector)}
|
|
627
|
+
--stop stop job instead of running the job
|
|
628
|
+
--production deploy to production, default is development
|
|
629
|
+
--schedule override default schedule, support aliases: secondly, minutely, hourly, daily, weekly
|
|
630
|
+
--verbose Output debug messages
|
|
631
|
+
--dry-run print nomad job file but do not really execute it
|
|
632
|
+
`,
|
|
633
|
+
{
|
|
634
|
+
description: false,
|
|
635
|
+
version: false,
|
|
636
|
+
flags: {
|
|
637
|
+
...getSelectorFlags(selector),
|
|
638
|
+
schedule: {
|
|
639
|
+
type: "string",
|
|
640
|
+
},
|
|
641
|
+
verbose: {
|
|
642
|
+
type: "boolean",
|
|
643
|
+
default: false,
|
|
644
|
+
},
|
|
645
|
+
production: {
|
|
646
|
+
alias: "prd",
|
|
647
|
+
type: "boolean",
|
|
648
|
+
default: false,
|
|
649
|
+
},
|
|
650
|
+
dryRun: {
|
|
651
|
+
type: "boolean",
|
|
652
|
+
default: false,
|
|
653
|
+
},
|
|
654
|
+
stop: {
|
|
655
|
+
type: "boolean",
|
|
656
|
+
default: false,
|
|
657
|
+
},
|
|
658
|
+
},
|
|
659
|
+
}
|
|
660
|
+
);
|
|
661
|
+
|
|
662
|
+
deployModeless(cli.flags).catch((err) => {
|
|
663
|
+
console.error(err);
|
|
664
|
+
process.exit(1);
|
|
665
|
+
});
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
return { deploy };
|
|
669
|
+
}
|
|
670
|
+
|
|
671
|
+
module.exports = {
|
|
672
|
+
getJobName,
|
|
673
|
+
getNomadAddr,
|
|
674
|
+
createModeDeploy,
|
|
675
|
+
createDeploy,
|
|
676
|
+
};
|