@certik/skynet 0.24.0 → 0.25.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/CHANGELOG.md +5 -0
  2. package/examples/api.ts +0 -0
  3. package/examples/indexer.ts +0 -0
  4. package/examples/mode-indexer.ts +0 -0
  5. package/package.json +1 -10
  6. package/src/graphql.ts +14 -4
  7. package/src/por.ts +18 -23
  8. package/.vscode/settings.json +0 -5
  9. package/dist/abi.d.ts +0 -111
  10. package/dist/abi.js +0 -571
  11. package/dist/address.d.ts +0 -2
  12. package/dist/address.js +0 -24
  13. package/dist/api.d.ts +0 -31
  14. package/dist/api.js +0 -260
  15. package/dist/app.d.ts +0 -101
  16. package/dist/app.js +0 -2077
  17. package/dist/availability.d.ts +0 -23
  18. package/dist/availability.js +0 -133
  19. package/dist/cli.d.ts +0 -5
  20. package/dist/cli.js +0 -41
  21. package/dist/const.d.ts +0 -34
  22. package/dist/const.js +0 -162
  23. package/dist/databricks.d.ts +0 -3
  24. package/dist/databricks.js +0 -208
  25. package/dist/date.d.ts +0 -5
  26. package/dist/date.js +0 -56
  27. package/dist/deploy.d.ts +0 -75
  28. package/dist/deploy.js +0 -587
  29. package/dist/dynamodb.d.ts +0 -16
  30. package/dist/dynamodb.js +0 -479
  31. package/dist/env.d.ts +0 -6
  32. package/dist/env.js +0 -26
  33. package/dist/goalert.d.ts +0 -19
  34. package/dist/goalert.js +0 -43
  35. package/dist/graphql.d.ts +0 -5
  36. package/dist/graphql.js +0 -28
  37. package/dist/indexer.d.ts +0 -69
  38. package/dist/indexer.js +0 -1099
  39. package/dist/log.d.ts +0 -13
  40. package/dist/log.js +0 -63
  41. package/dist/object-hash.d.ts +0 -1
  42. package/dist/object-hash.js +0 -61
  43. package/dist/por.d.ts +0 -37
  44. package/dist/por.js +0 -120
  45. package/dist/s3.d.ts +0 -20
  46. package/dist/s3.js +0 -122
  47. package/dist/search.d.ts +0 -5
  48. package/dist/search.js +0 -105
  49. package/dist/selector.d.ts +0 -17
  50. package/dist/selector.js +0 -44
  51. package/dist/slack.d.ts +0 -14
  52. package/dist/slack.js +0 -29
  53. package/dist/util.d.ts +0 -4
  54. package/dist/util.js +0 -27
  55. package/src/databricks.ts +0 -82
package/dist/deploy.d.ts DELETED
@@ -1,75 +0,0 @@
1
- import { getJobName } from "./selector";
2
- import type { Selector } from "./selector";
3
- export type Env = {
4
- [key: string]: string | null;
5
- };
6
- type Service = {
7
- prefix: string;
8
- port: number;
9
- };
10
- type Schedule = string | ((jobName: string) => string);
11
- type Check = {
12
- func: ({ protocol, state, verbose }: {
13
- protocol: string;
14
- state: string;
15
- verbose: boolean;
16
- }) => {
17
- type: string;
18
- message: string;
19
- };
20
- schedule: Schedule;
21
- slackChannel: string;
22
- killTimeout?: string;
23
- cpu: number;
24
- mem: number;
25
- };
26
- type Restart = {
27
- attempts: number;
28
- mode: string;
29
- interval: string;
30
- delay: string;
31
- };
32
- declare function getNomadAddr(isProduction: boolean): string;
33
- declare function createModeDeploy({ binaryName, name, workingDirectory, bin, selector, env, region, deltaSchedule, validateSchedule, deltaKillTimeout, deltaCpu, deltaMem, rebuildKillTimeout, rebuildCpu, rebuildMem, validateKillTimeout, validateCpu, validateMem, }: {
34
- binaryName: string;
35
- name: string;
36
- workingDirectory: string;
37
- bin?: string;
38
- selector?: Selector;
39
- env?: Env;
40
- region?: string;
41
- check?: Check;
42
- deltaSchedule?: Schedule;
43
- validateSchedule?: Schedule;
44
- deltaKillTimeout?: string;
45
- deltaCpu: number;
46
- deltaMem: number;
47
- rebuildKillTimeout?: string;
48
- rebuildCpu?: number;
49
- rebuildMem?: number;
50
- validateKillTimeout?: string;
51
- validateCpu?: number;
52
- validateMem?: number;
53
- }): {
54
- deploy: () => Promise<void>;
55
- };
56
- declare function createDeploy({ binaryName, name, workingDirectory, bin, selector, region, type, env, count, schedule, restart, killTimeout, cpu, mem, service, }: {
57
- binaryName: string;
58
- name: string;
59
- workingDirectory: string;
60
- bin?: string;
61
- selector?: Selector;
62
- env?: Env;
63
- region?: string;
64
- type?: string;
65
- count?: number;
66
- schedule?: Schedule;
67
- restart?: Restart;
68
- killTimeout?: string;
69
- cpu: number;
70
- mem: number;
71
- service?: Service;
72
- }): {
73
- deploy: () => Promise<void>;
74
- };
75
- export { getJobName, getNomadAddr, createModeDeploy, createDeploy };
package/dist/deploy.js DELETED
@@ -1,587 +0,0 @@
1
- // src/selector.ts
2
- function getSelectorDesc(selector) {
3
- return Object.keys(selector).map((name) => {
4
- return ` --${name.padEnd(14)}${selector[name].desc || selector[name].description || ""}`;
5
- }).join(`
6
- `);
7
- }
8
- function getSelectorFlags(selector) {
9
- return Object.keys(selector).reduce((acc, name) => {
10
- const flag = {
11
- type: selector[name].type || "string",
12
- ...selector[name]
13
- };
14
- if (!selector[name].optional && selector[name].isRequired !== false) {
15
- flag.isRequired = true;
16
- }
17
- return { ...acc, [name]: flag };
18
- }, {});
19
- }
20
- function toSelectorString(selectorFlags, delim = ",") {
21
- return Object.keys(selectorFlags).sort().map((flag) => {
22
- return `${flag}=${selectorFlags[flag]}`;
23
- }).join(delim);
24
- }
25
- function normalizeSelectorValue(v) {
26
- return v.replace(/[^A-Za-z0-9]+/g, "-");
27
- }
28
- function getJobName(name, selectorFlags, mode) {
29
- const selectorNamePart = Object.keys(selectorFlags).sort().map((name2) => selectorFlags[name2]).join("-");
30
- let jobName = name;
31
- if (mode) {
32
- jobName += `-${mode}`;
33
- }
34
- if (selectorNamePart.length > 0) {
35
- jobName += `-${normalizeSelectorValue(selectorNamePart)}`;
36
- }
37
- return jobName;
38
- }
39
- // src/env.ts
40
- function ensureAndGet(envName, defaultValue) {
41
- return process.env[envName] || defaultValue;
42
- }
43
- function getEnvironment() {
44
- return ensureAndGet("SKYNET_ENVIRONMENT", "dev");
45
- }
46
- function getEnvOrThrow(envName) {
47
- if (!process.env[envName]) {
48
- throw new Error(`Must set environment variable ${envName}`);
49
- }
50
- return process.env[envName];
51
- }
52
- function isProduction() {
53
- return getEnvironment() === "prd";
54
- }
55
- function isDev() {
56
- return getEnvironment() === "dev";
57
- }
58
- // src/cli.ts
59
- import path from "path";
60
- import fs from "fs";
61
- function getBinaryName() {
62
- const binaryNameParts = process.argv[1].split(path.sep);
63
- const binaryName = binaryNameParts[binaryNameParts.length - 1];
64
- return binaryName;
65
- }
66
- function detectSkynetDirectory() {
67
- return detectDirectory(process.argv[1], "SkynetAPIDefinitions.yml");
68
- }
69
- function detectWorkingDirectory() {
70
- const wd = detectDirectory(process.argv[1], "package.json");
71
- const skynetd = detectDirectory(process.argv[1], "SkynetAPIDefinitions.yml");
72
- return wd.slice(skynetd.length + path.sep.length).replace(path.sep, "/");
73
- }
74
- function detectDirectory(fullBinPath, sentinel = "package.json") {
75
- let parentFolder = path.dirname(fullBinPath);
76
- while (parentFolder) {
77
- const sentinelPath = path.join(parentFolder, sentinel);
78
- if (fs.existsSync(sentinelPath)) {
79
- return parentFolder;
80
- }
81
- const newParentFolder = path.dirname(parentFolder);
82
- if (newParentFolder === parentFolder) {
83
- break;
84
- }
85
- parentFolder = newParentFolder;
86
- }
87
- throw new Error("Cannot detect current working directory");
88
- }
89
- function detectBin() {
90
- const wd = detectDirectory(process.argv[1], "package.json");
91
- return process.argv[1].slice(wd.length + path.sep.length).replace(path.sep, "/");
92
- }
93
- // src/deploy.ts
94
- import fs2 from "fs/promises";
95
- import fso from "fs";
96
- import { execa } from "execa";
97
- import meow from "meow";
98
- import chalk from "chalk";
99
- import which from "which";
100
- var INTERVAL_ALIASES = {
101
- secondly: "*/1 * * * * * *",
102
- "@secondly": "*/1 * * * * * *",
103
- minutely: "0 * * * * * *",
104
- "@minutely": "0 * * * * * *",
105
- hourly: "0 0 * * * * *",
106
- "@hourly": "0 0 * * * * *",
107
- daily: "0 0 0 * * * *",
108
- "@daily": "0 0 0 * * * *",
109
- weekly: "0 0 0 * * 0 *",
110
- "@weekly": "0 0 0 * * 0 *"
111
- };
112
- var genConfig = ({
113
- jobName,
114
- workingDirectory,
115
- cmd,
116
- cron,
117
- count,
118
- restart,
119
- killTimeout,
120
- cpu,
121
- mem,
122
- service,
123
- additionalEnv = {},
124
- type = "batch",
125
- region = "skynet-dc1",
126
- isProduction: isProduction2
127
- }) => `job "${jobName}" {
128
- datacenters = ["${region}"]
129
-
130
- type = "${type}"
131
-
132
- ${cron ? `# Triggers periodically
133
- periodic {
134
- crons = ["${cron}"]
135
- prohibit_overlap = true
136
- }` : ""}
137
-
138
- constraint {
139
- attribute = "\${meta.has_nodejs}"
140
- value = "true"
141
- }
142
-
143
- constraint {
144
- attribute = "\${meta.has_skynet}"
145
- value = "true"
146
- }
147
-
148
- group "default" {
149
- ${count && count > 1 ? `count = ${count}` : ""}
150
- ${count && count > 1 ? `# Rolling Update
151
- update {
152
- max_parallel = 1
153
- min_healthy_time = "10s"
154
- }` : ""}
155
-
156
- reschedule {
157
- attempts = 0
158
- unlimited = false
159
- }
160
-
161
- ${service ? `# Setup Service Network
162
- network {
163
- port "http" {
164
- static = ${service.port}
165
- }
166
- }` : ""}
167
-
168
- task "main" {
169
- driver = "raw_exec"
170
-
171
- config {
172
- command = "sh"
173
- args = [
174
- "-c",
175
- "cd \${meta.skynet_code_path}/${workingDirectory} && if [ -e bun.lockb ]; then bun install --silent; else yarn install --silent; fi && exec ${cmd}"
176
- ]
177
- }
178
-
179
- ${service ? `# Setup API Routes
180
- service {
181
- name = "${jobName}"
182
- port = "http"
183
-
184
- tags = [
185
- "urlprefix-${service.prefix} strip=${service.prefix}",
186
- ]
187
-
188
- check {
189
- type = "http"
190
- path = "/"
191
- interval = "10s"
192
- timeout = "2s"
193
- }
194
- }
195
- ` : ""}
196
-
197
- # doppler integration support
198
- # it is always there but a project can decide to not use it
199
- template {
200
- change_mode = "restart"
201
- destination = "secrets/context.env"
202
- env = true
203
- data = "DOPPLER_TOKEN={{key \\"infra-nomad/doppler-token\\"}}"
204
- }
205
-
206
- # always update SKYNET_DEPLOYED_AT so that new deployment always triggers
207
- env {
208
- SKYNET_DEPLOYED_AT="${new Date().toISOString()}"
209
- HOME="/root"
210
- DOPPLER_PROJECT="${workingDirectory}"
211
- DOPPLER_CONFIG="${isProduction2 ? "prd" : "dev"}"
212
- SKYNET_ENVIRONMENT="${isProduction2 ? "prd" : "dev"}"
213
- ${Object.entries(additionalEnv).filter((kv) => !!kv[1]).map(([key, value]) => `${key}="${value}"`).join(`
214
- `)}
215
- }
216
-
217
- kill_timeout = "${killTimeout || "60s"}"
218
-
219
- # Specify the maximum resources required to run the task,
220
- # include CPU and memory.
221
- resources {
222
- cpu = ${cpu} # MHz
223
- memory = ${mem} # MB
224
- }
225
-
226
- # Setting the server task as the leader of the task group allows Nomad to
227
- # signal the log shipper task to gracefully shutdown when the server exits.
228
- leader = true
229
-
230
- ${restart ? `
231
- # Restart the job if it fails
232
- restart {
233
- attempts = ${restart.attempts ?? 2}
234
- mode = "${restart.mode ?? "fail"}"
235
- interval = "${restart.interval ?? "30m"}"
236
- delay = "${restart.delay ?? "15s"}"
237
- }
238
- ` : `
239
- # do not retry from the periodical job will reschedule anyway
240
- restart {
241
- attempts = 0
242
- mode = "fail"
243
- }`}
244
- }
245
- }
246
- }`;
247
- async function prepareNomad(isProduction2) {
248
- if (isProduction2) {
249
- console.log("Deploy to Production");
250
- } else {
251
- const skynetDir = detectSkynetDirectory();
252
- if (!fso.existsSync("/tmp/skynet")) {
253
- await execa("ln", ["-s", skynetDir, "/tmp/skynet"]);
254
- }
255
- console.log("Deploy locally, please start nomad server in a separate terminal");
256
- console.log(`You can start nomad server by running ${chalk.inverse(`${skynetDir}/infra-nomad/dev/start.sh`)}`);
257
- console.log(`Then you can visit ${chalk.underline("http://localhost:4646/ui/jobs")} to check submitted dev jobs.
258
- `);
259
- }
260
- }
261
- function getNomadAddr(isProduction2) {
262
- return isProduction2 ? getEnvOrThrow("NOMAD_ADDR") : "http://127.0.0.1:4646";
263
- }
264
- async function getNomadPath() {
265
- try {
266
- return await which("nomad");
267
- } catch (missingNomad) {
268
- console.log(`Deploy requires ${chalk.bold("nomad")} binary, please follow ${chalk.underline("https://learn.hashicorp.com/tutorials/nomad/get-started-install")} for installation`, missingNomad);
269
- throw new Error("missing nomad binary");
270
- }
271
- }
272
- async function runNomadJob(nomadPath, nomadAddr, jobName, nomadJobDefinition, isStop, isDryRun) {
273
- try {
274
- if (isStop) {
275
- const nomad = execa(nomadPath, ["job", "stop", jobName], {
276
- env: {
277
- NOMAD_ADDR: nomadAddr
278
- }
279
- });
280
- nomad.stdout.pipe(process.stdout);
281
- await nomad;
282
- console.log(chalk.green(`Stopped nomad job ${jobName} in ${nomadAddr}`));
283
- } else if (isDryRun) {
284
- console.log("Definition for", jobName);
285
- console.log("========================================");
286
- console.log(nomadJobDefinition);
287
- } else {
288
- const jobFileName = `/tmp/job-${jobName}`;
289
- await fs2.writeFile(jobFileName, nomadJobDefinition);
290
- const nomad = execa(nomadPath, ["job", "run", jobFileName], {
291
- env: {
292
- NOMAD_ADDR: nomadAddr
293
- }
294
- });
295
- nomad.stdout.pipe(process.stdout);
296
- await nomad;
297
- console.log(chalk.green(`Deployed nomad job ${jobName} to ${nomadAddr}`));
298
- }
299
- } catch (nomadExecErr) {
300
- if (nomadExecErr instanceof Error) {
301
- console.log("Nomad Execution Error:");
302
- console.log(nomadExecErr.message);
303
- console.log("");
304
- }
305
- console.log(`Failed to run ${chalk.bold("nomad")} commands, please ensure nomad server is accessible at ${chalk.bold(nomadAddr)}`);
306
- throw new Error("nomad execution error");
307
- }
308
- }
309
- function createModeDeploy({
310
- binaryName,
311
- name,
312
- workingDirectory,
313
- bin = "bin/indexer",
314
- selector = {},
315
- env = {},
316
- region = "skynet-dc1",
317
- deltaSchedule,
318
- validateSchedule,
319
- deltaKillTimeout,
320
- deltaCpu,
321
- deltaMem,
322
- rebuildKillTimeout,
323
- rebuildCpu,
324
- rebuildMem,
325
- validateKillTimeout,
326
- validateCpu,
327
- validateMem
328
- }) {
329
- async function deployMode({
330
- mode,
331
- from,
332
- to,
333
- stop,
334
- production,
335
- dryRun,
336
- verbose,
337
- schedule: cmdSchedule,
338
- ...selectorFlags
339
- }) {
340
- if (mode === "delta") {
341
- from = 0;
342
- to = 0;
343
- }
344
- const isPeriodic = from === 0 && to === 0 && ["delta", "validate"].includes(mode);
345
- const jobName = getJobName(name, selectorFlags, mode);
346
- const selectorCmdPart = Object.entries(selectorFlags).sort().map(([name2, value]) => `--${name2} ${value}`).join(" ");
347
- let args = `--mode ${mode} ${selectorCmdPart}`;
348
- if (verbose) {
349
- args += ` --verbose`;
350
- }
351
- let rangeArgs = "";
352
- if (from > 0) {
353
- rangeArgs += ` --from ${from}`;
354
- }
355
- if (to > 0) {
356
- rangeArgs += ` --to ${to}`;
357
- }
358
- const modeResouces = {
359
- rebuild: { cpu: rebuildCpu, mem: rebuildMem, killTimeout: rebuildKillTimeout },
360
- "resume-rebuild": { cpu: rebuildCpu, mem: rebuildMem, killTimeout: rebuildKillTimeout },
361
- validate: {
362
- cpu: validateCpu || rebuildCpu,
363
- mem: validateMem || rebuildMem,
364
- killTimeout: validateKillTimeout || rebuildKillTimeout
365
- },
366
- delta: { cpu: deltaCpu, mem: deltaMem, killTimeout: deltaKillTimeout }
367
- };
368
- const cpu = modeResouces[mode]?.cpu || deltaCpu;
369
- const mem = modeResouces[mode]?.mem || deltaMem;
370
- const killTimeout = modeResouces[mode]?.killTimeout || deltaKillTimeout;
371
- let deltaCron = typeof deltaSchedule === "function" ? deltaSchedule(jobName) : deltaSchedule;
372
- if (deltaSchedule && cmdSchedule) {
373
- deltaCron = cmdSchedule;
374
- }
375
- let validateCron = typeof validateSchedule === "function" ? validateSchedule(jobName) : validateSchedule;
376
- if (validateSchedule && cmdSchedule) {
377
- validateCron = cmdSchedule;
378
- }
379
- const modeIntervals = {
380
- delta: deltaCron ? INTERVAL_ALIASES[deltaCron] || deltaCron : undefined,
381
- validate: validateCron ? INTERVAL_ALIASES[validateCron] || validateCron : undefined
382
- };
383
- const mainJobDefinition = genConfig({
384
- jobName,
385
- cron: isPeriodic ? modeIntervals[mode] : undefined,
386
- workingDirectory,
387
- additionalEnv: env,
388
- region,
389
- cmd: `${bin} ${args} ${rangeArgs}`,
390
- killTimeout,
391
- cpu,
392
- mem,
393
- isProduction: production
394
- });
395
- const nomadPath = await getNomadPath();
396
- await prepareNomad(production);
397
- const nomadAddr = getNomadAddr(production);
398
- await runNomadJob(nomadPath, nomadAddr, jobName, mainJobDefinition, stop, dryRun);
399
- }
400
- async function deploy() {
401
- if (!binaryName) {
402
- binaryName = getBinaryName();
403
- }
404
- const cli = meow(`
405
- Usage
406
-
407
- $ ${binaryName} <options>
408
-
409
- Options
410
- ${getSelectorDesc(selector)}
411
- --mode could be delta/rebuild/resume-rebuild/validate/one/range/reset
412
- --from min id to build
413
- --to max id to build
414
- --stop stop job instead of running the job
415
- --production deploy to production, default is development
416
- --schedule override default schedule, support aliases: secondly, minutely, hourly, daily, weekly
417
- --verbose Output debug messages
418
- --dry-run print nomad job file but do not really execute it
419
-
420
- Examples
421
- ${binaryName} --mode delta
422
- ${binaryName} --mode rebuild
423
- ${binaryName} --mode validate
424
- `, {
425
- importMeta: import.meta,
426
- description: false,
427
- flags: {
428
- ...getSelectorFlags(selector),
429
- mode: {
430
- type: "string",
431
- default: "delta"
432
- },
433
- from: {
434
- aliases: ["since"],
435
- type: "number",
436
- default: 0
437
- },
438
- to: {
439
- aliases: ["until"],
440
- type: "number",
441
- default: 0
442
- },
443
- schedule: {
444
- type: "string"
445
- },
446
- verbose: {
447
- type: "boolean",
448
- default: false
449
- },
450
- production: {
451
- aliases: ["prd"],
452
- type: "boolean",
453
- default: false
454
- },
455
- dryRun: {
456
- type: "boolean",
457
- default: false
458
- },
459
- stop: {
460
- type: "boolean",
461
- default: false
462
- }
463
- }
464
- });
465
- try {
466
- return deployMode(cli.flags);
467
- } catch (err) {
468
- console.error(err);
469
- process.exit(1);
470
- }
471
- }
472
- return { deploy };
473
- }
474
- function createDeploy({
475
- binaryName,
476
- name,
477
- workingDirectory,
478
- bin = "bin/indexer",
479
- selector = {},
480
- region = "skynet-dc1",
481
- type = "batch",
482
- env = {},
483
- count,
484
- schedule,
485
- restart,
486
- killTimeout,
487
- cpu,
488
- mem,
489
- service
490
- }) {
491
- async function deployModeless({
492
- production,
493
- stop,
494
- dryRun,
495
- verbose,
496
- schedule: cmdSchedule,
497
- ...selectorFlags
498
- }) {
499
- const jobName = getJobName(name, selectorFlags);
500
- const selectorCmdPart = Object.entries(selectorFlags).sort().map(([name2, value]) => `--${name2} ${value}`).join(" ");
501
- let args = `${selectorCmdPart}`;
502
- if (verbose) {
503
- args += ` --verbose`;
504
- }
505
- let cron = typeof schedule === "function" ? schedule(jobName) : schedule;
506
- if (schedule && cmdSchedule) {
507
- cron = cmdSchedule;
508
- }
509
- const nomadJobDefinition = genConfig({
510
- jobName,
511
- cron: cron ? INTERVAL_ALIASES[cron] || cron : undefined,
512
- count,
513
- restart,
514
- workingDirectory,
515
- additionalEnv: env,
516
- region,
517
- type,
518
- cmd: `${bin} ${args}`,
519
- killTimeout,
520
- cpu,
521
- mem,
522
- service,
523
- isProduction: production
524
- });
525
- const nomadPath = await getNomadPath();
526
- await prepareNomad(production);
527
- const nomadAddr = getNomadAddr(production);
528
- await runNomadJob(nomadPath, nomadAddr, jobName, nomadJobDefinition, stop, dryRun);
529
- }
530
- async function deploy() {
531
- if (!binaryName) {
532
- binaryName = getBinaryName();
533
- }
534
- const cli = meow(`
535
- Usage
536
-
537
- $ ${binaryName} <options>
538
-
539
- Options
540
- ${getSelectorDesc(selector)}
541
- --stop stop job instead of running the job
542
- --production deploy to production, default is development
543
- --schedule override default schedule, support aliases: secondly, minutely, hourly, daily, weekly
544
- --verbose Output debug messages
545
- --dry-run print nomad job file but do not really execute it
546
- `, {
547
- importMeta: import.meta,
548
- description: false,
549
- flags: {
550
- ...getSelectorFlags(selector),
551
- schedule: {
552
- type: "string"
553
- },
554
- verbose: {
555
- type: "boolean",
556
- default: false
557
- },
558
- production: {
559
- aliases: ["prd"],
560
- type: "boolean",
561
- default: false
562
- },
563
- dryRun: {
564
- type: "boolean",
565
- default: false
566
- },
567
- stop: {
568
- type: "boolean",
569
- default: false
570
- }
571
- }
572
- });
573
- try {
574
- return deployModeless(cli.flags);
575
- } catch (err) {
576
- console.error(err);
577
- process.exit(1);
578
- }
579
- }
580
- return { deploy };
581
- }
582
- export {
583
- getNomadAddr,
584
- getJobName,
585
- createModeDeploy,
586
- createDeploy
587
- };
@@ -1,16 +0,0 @@
1
- import { DynamoDBDocumentClient, ScanCommand, BatchWriteCommand, GetCommand, PutCommand, QueryCommand, UpdateCommand } from "@aws-sdk/lib-dynamodb";
2
- import type { ScanCommandInput, PutCommandInput } from "@aws-sdk/lib-dynamodb";
3
- declare function getDocClient(forceNew?: boolean): DynamoDBDocumentClient;
4
- declare function scanWholeTable<T>(options: ScanCommandInput): Promise<{
5
- Items: T[];
6
- Count: number;
7
- ScannedCount: number;
8
- }>;
9
- declare function batchCreateRecords(tableName: string, records: Record<string, unknown>[], maxWritingCapacity?: number, verbose?: boolean): Promise<void>;
10
- declare function createRecord(tableName: string, fields: PutCommandInput["Item"], verbose?: boolean): Promise<import("@aws-sdk/lib-dynamodb").PutCommandOutput>;
11
- declare function getRecordsByKey<TReturn, TKey extends Record<string, unknown> = Record<string, unknown>>(tableName: string, keys: TKey, indexName?: string): Promise<TReturn[] | null>;
12
- declare function getRecordByKey<T>(tableName: string, keys: Record<string, unknown>, indexName?: string): Promise<T | null>;
13
- declare function updateRecordByKey(tableName: string, idKey: Record<string, unknown>, fields: Record<string, unknown>, conditionExpressions?: null, verbose?: boolean): Promise<Record<string, any> | undefined>;
14
- declare function batchDeleteRecords(tableName: string, keys: Record<string, unknown>[]): Promise<void>;
15
- declare function deleteRecordsByHashKey(tableName: string, indexName: string | undefined, hashKeyValue: string, verbose?: boolean): Promise<number>;
16
- export { getDocClient, ScanCommand, BatchWriteCommand, GetCommand, PutCommand, QueryCommand, UpdateCommand, scanWholeTable, batchCreateRecords, createRecord, getRecordsByKey, getRecordByKey, updateRecordByKey, batchDeleteRecords, deleteRecordsByHashKey, };