@platforma-sdk/bootstrap 3.5.18 → 3.5.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/block.cjs +172 -0
- package/dist/block.cjs.map +1 -0
- package/dist/block.d.ts +0 -1
- package/dist/block.js +151 -0
- package/dist/block.js.map +1 -0
- package/dist/cmd-opts.cjs +205 -0
- package/dist/cmd-opts.cjs.map +1 -0
- package/dist/cmd-opts.d.ts +36 -38
- package/dist/cmd-opts.js +181 -0
- package/dist/cmd-opts.js.map +1 -0
- package/dist/commands/create-block.cjs +22 -0
- package/dist/commands/create-block.cjs.map +1 -0
- package/dist/commands/create-block.d.ts +1 -3
- package/dist/commands/create-block.js +20 -0
- package/dist/commands/create-block.js.map +1 -0
- package/dist/commands/reset.cjs +23 -0
- package/dist/commands/reset.cjs.map +1 -0
- package/dist/commands/reset.d.ts +1 -3
- package/dist/commands/reset.js +21 -0
- package/dist/commands/reset.js.map +1 -0
- package/dist/commands/start/docker/s3.cjs +69 -0
- package/dist/commands/start/docker/s3.cjs.map +1 -0
- package/dist/commands/start/docker/s3.d.ts +21 -23
- package/dist/commands/start/docker/s3.js +67 -0
- package/dist/commands/start/docker/s3.js.map +1 -0
- package/dist/commands/start/docker.cjs +69 -0
- package/dist/commands/start/docker.cjs.map +1 -0
- package/dist/commands/start/docker.d.ts +21 -23
- package/dist/commands/start/docker.js +67 -0
- package/dist/commands/start/docker.js.map +1 -0
- package/dist/commands/start/local/s3.cjs +135 -0
- package/dist/commands/start/local/s3.cjs.map +1 -0
- package/dist/commands/start/local/s3.d.ts +25 -27
- package/dist/commands/start/local/s3.js +114 -0
- package/dist/commands/start/local/s3.js.map +1 -0
- package/dist/commands/start/local.cjs +122 -0
- package/dist/commands/start/local.cjs.map +1 -0
- package/dist/commands/start/local.d.ts +23 -25
- package/dist/commands/start/local.js +101 -0
- package/dist/commands/start/local.js.map +1 -0
- package/dist/commands/start.cjs +23 -0
- package/dist/commands/start.cjs.map +1 -0
- package/dist/commands/start.d.ts +1 -3
- package/dist/commands/start.js +21 -0
- package/dist/commands/start.js.map +1 -0
- package/dist/commands/stop.cjs +29 -0
- package/dist/commands/stop.cjs.map +1 -0
- package/dist/commands/stop.d.ts +1 -3
- package/dist/commands/stop.js +27 -0
- package/dist/commands/stop.js.map +1 -0
- package/dist/commands/svc/create/docker/s3.cjs +75 -0
- package/dist/commands/svc/create/docker/s3.cjs.map +1 -0
- package/dist/commands/svc/create/docker/s3.d.ts +22 -24
- package/dist/commands/svc/create/docker/s3.js +73 -0
- package/dist/commands/svc/create/docker/s3.js.map +1 -0
- package/dist/commands/svc/create/docker.cjs +72 -0
- package/dist/commands/svc/create/docker.cjs.map +1 -0
- package/dist/commands/svc/create/docker.d.ts +22 -24
- package/dist/commands/svc/create/docker.js +70 -0
- package/dist/commands/svc/create/docker.js.map +1 -0
- package/dist/commands/svc/create/local/s3.cjs +121 -0
- package/dist/commands/svc/create/local/s3.cjs.map +1 -0
- package/dist/commands/svc/create/local/s3.d.ts +26 -28
- package/dist/commands/svc/create/local/s3.js +100 -0
- package/dist/commands/svc/create/local/s3.js.map +1 -0
- package/dist/commands/svc/create/local.cjs +117 -0
- package/dist/commands/svc/create/local.cjs.map +1 -0
- package/dist/commands/svc/create/local.d.ts +24 -26
- package/dist/commands/svc/create/local.js +96 -0
- package/dist/commands/svc/create/local.js.map +1 -0
- package/dist/commands/svc/delete.cjs +40 -0
- package/dist/commands/svc/delete.cjs.map +1 -0
- package/dist/commands/svc/delete.d.ts +3 -5
- package/dist/commands/svc/delete.js +38 -0
- package/dist/commands/svc/delete.js.map +1 -0
- package/dist/commands/svc/down.cjs +32 -0
- package/dist/commands/svc/down.cjs.map +1 -0
- package/dist/commands/svc/down.d.ts +2 -4
- package/dist/commands/svc/down.js +30 -0
- package/dist/commands/svc/down.js.map +1 -0
- package/dist/commands/svc/list.cjs +31 -0
- package/dist/commands/svc/list.cjs.map +1 -0
- package/dist/commands/svc/list.d.ts +0 -1
- package/dist/commands/svc/list.js +29 -0
- package/dist/commands/svc/list.js.map +1 -0
- package/dist/commands/svc/up.cjs +51 -0
- package/dist/commands/svc/up.cjs.map +1 -0
- package/dist/commands/svc/up.d.ts +2 -4
- package/dist/commands/svc/up.js +49 -0
- package/dist/commands/svc/up.js.map +1 -0
- package/dist/core.cjs +742 -0
- package/dist/core.cjs.map +1 -0
- package/dist/core.d.ts +0 -1
- package/dist/core.js +737 -0
- package/dist/core.js.map +1 -0
- package/dist/index.cjs +42 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.ts +0 -1
- package/dist/index.js +39 -126
- package/dist/index.js.map +1 -1
- package/dist/package.cjs +27 -0
- package/dist/package.cjs.map +1 -0
- package/dist/package.d.ts +0 -1
- package/dist/package.js +22 -0
- package/dist/package.js.map +1 -0
- package/dist/platforma.cjs +169 -0
- package/dist/platforma.cjs.map +1 -0
- package/dist/platforma.d.ts +0 -1
- package/dist/platforma.js +141 -0
- package/dist/platforma.js.map +1 -0
- package/dist/run.cjs +68 -0
- package/dist/run.cjs.map +1 -0
- package/dist/run.d.ts +0 -1
- package/dist/run.js +64 -0
- package/dist/run.js.map +1 -0
- package/dist/state.cjs +143 -0
- package/dist/state.cjs.map +1 -0
- package/dist/state.d.ts +0 -1
- package/dist/state.js +141 -0
- package/dist/state.js.map +1 -0
- package/dist/templates/compose.cjs +67 -0
- package/dist/templates/compose.cjs.map +1 -0
- package/dist/templates/compose.d.ts +0 -1
- package/dist/templates/compose.js +65 -0
- package/dist/templates/compose.js.map +1 -0
- package/dist/templates/pl-config.cjs +264 -0
- package/dist/templates/pl-config.cjs.map +1 -0
- package/dist/templates/pl-config.d.ts +0 -1
- package/dist/templates/pl-config.js +260 -0
- package/dist/templates/pl-config.js.map +1 -0
- package/dist/templates/types.cjs +31 -0
- package/dist/templates/types.cjs.map +1 -0
- package/dist/templates/types.d.ts +0 -1
- package/dist/templates/types.js +28 -0
- package/dist/templates/types.js.map +1 -0
- package/dist/util.cjs +98 -0
- package/dist/util.cjs.map +1 -0
- package/dist/util.d.ts +0 -1
- package/dist/util.js +89 -0
- package/dist/util.js.map +1 -0
- package/package.json +10 -13
- package/dist/block.d.ts.map +0 -1
- package/dist/cmd-opts.d.ts.map +0 -1
- package/dist/commands/create-block.d.ts.map +0 -1
- package/dist/commands/reset.d.ts.map +0 -1
- package/dist/commands/start/docker/s3.d.ts.map +0 -1
- package/dist/commands/start/docker.d.ts.map +0 -1
- package/dist/commands/start/local/s3.d.ts.map +0 -1
- package/dist/commands/start/local.d.ts.map +0 -1
- package/dist/commands/start.d.ts.map +0 -1
- package/dist/commands/stop.d.ts.map +0 -1
- package/dist/commands/svc/create/docker/s3.d.ts.map +0 -1
- package/dist/commands/svc/create/docker.d.ts.map +0 -1
- package/dist/commands/svc/create/local/s3.d.ts.map +0 -1
- package/dist/commands/svc/create/local.d.ts.map +0 -1
- package/dist/commands/svc/delete.d.ts.map +0 -1
- package/dist/commands/svc/down.d.ts.map +0 -1
- package/dist/commands/svc/list.d.ts.map +0 -1
- package/dist/commands/svc/up.d.ts.map +0 -1
- package/dist/core.d.ts.map +0 -1
- package/dist/index.d.ts.map +0 -1
- package/dist/index.mjs +0 -1945
- package/dist/index.mjs.map +0 -1
- package/dist/package.d.ts.map +0 -1
- package/dist/platforma.d.ts.map +0 -1
- package/dist/run.d.ts.map +0 -1
- package/dist/state.d.ts.map +0 -1
- package/dist/templates/compose.d.ts.map +0 -1
- package/dist/templates/pl-config.d.ts.map +0 -1
- package/dist/templates/types.d.ts.map +0 -1
- package/dist/util.d.ts.map +0 -1
package/dist/index.mjs
DELETED
|
@@ -1,1945 +0,0 @@
|
|
|
1
|
-
var Sr = Object.defineProperty;
|
|
2
|
-
var $r = (a, e, t) => e in a ? Sr(a, e, { enumerable: !0, configurable: !0, writable: !0, value: t }) : a[e] = t;
|
|
3
|
-
var l = (a, e, t) => $r(a, typeof e != "symbol" ? e + "" : e, t);
|
|
4
|
-
import { Flags as P, Command as $, Args as Q } from "@oclif/core";
|
|
5
|
-
import * as me from "node:os";
|
|
6
|
-
import _ from "node:os";
|
|
7
|
-
import f, { createWriteStream as Lr } from "node:fs";
|
|
8
|
-
import m, { resolve as Ar } from "node:path";
|
|
9
|
-
import { execSync as xe, spawn as Ir, spawnSync as Or } from "node:child_process";
|
|
10
|
-
import fe from "winston";
|
|
11
|
-
import { randomBytes as Er } from "node:crypto";
|
|
12
|
-
import re from "readline-sync";
|
|
13
|
-
import * as M from "node:fs/promises";
|
|
14
|
-
import { Writable as Rr } from "node:stream";
|
|
15
|
-
import { z as C } from "zod";
|
|
16
|
-
import xr from "decompress";
|
|
17
|
-
import { getDefaultPlVersion as he } from "@milaboratories/pl-deployments";
|
|
18
|
-
import ur from "yaml";
|
|
19
|
-
import Tr from "node:https";
|
|
20
|
-
import * as Fr from "tar";
|
|
21
|
-
const L = {
|
|
22
|
-
"log-level": P.string({
|
|
23
|
-
description: "logging level",
|
|
24
|
-
default: "info",
|
|
25
|
-
options: ["error", "warn", "info", "debug"],
|
|
26
|
-
required: !1
|
|
27
|
-
})
|
|
28
|
-
};
|
|
29
|
-
P.string({
|
|
30
|
-
description: "name of instance",
|
|
31
|
-
required: !1
|
|
32
|
-
});
|
|
33
|
-
const ye = {
|
|
34
|
-
image: P.string({
|
|
35
|
-
description: "use custom docker image to run platforma"
|
|
36
|
-
})
|
|
37
|
-
}, j = {
|
|
38
|
-
version: P.string({
|
|
39
|
-
description: "use custom platforma release (official docker image or binary package)"
|
|
40
|
-
})
|
|
41
|
-
}, be = {
|
|
42
|
-
arch: P.string({
|
|
43
|
-
description: "override architecture. You can start amd64 linux image on arm-based host (say, Apple M family processor). I.e. arm64, amd64, amd64/v2"
|
|
44
|
-
})
|
|
45
|
-
}, B = {
|
|
46
|
-
license: P.string({
|
|
47
|
-
description: 'pass a license code. The license can be got from "https://licensing.milaboratories.com".'
|
|
48
|
-
}),
|
|
49
|
-
"license-file": P.file({
|
|
50
|
-
exists: !0,
|
|
51
|
-
description: "specify a path to the file with a license. The license can be got from 'https://licensing.milaboratories.com'."
|
|
52
|
-
})
|
|
53
|
-
}, U = {
|
|
54
|
-
"grpc-port": P.integer({
|
|
55
|
-
description: "port for Platforma Backend gRPC API. Default is 6345",
|
|
56
|
-
env: "PLATFORMA_GRPC_PORT"
|
|
57
|
-
}),
|
|
58
|
-
"grpc-listen": P.string({
|
|
59
|
-
description: "full listen addr for Platforma Backend gRPC API. Default is 127.0.0.1:6345",
|
|
60
|
-
env: "PLATFORMA_GRPC_LISTEN"
|
|
61
|
-
}),
|
|
62
|
-
"monitoring-port": P.integer({
|
|
63
|
-
description: "port for Platforma Backend monitoring API. Default is 9090",
|
|
64
|
-
env: "PLATFORMA_MONITORING_PORT"
|
|
65
|
-
}),
|
|
66
|
-
"monitoring-listen": P.string({
|
|
67
|
-
description: "full listen addr for Platforma Backend monitoring API. Default is 127.0.0.1:9090",
|
|
68
|
-
env: "PLATFORMA_MONITORING_LISTEN"
|
|
69
|
-
}),
|
|
70
|
-
"debug-port": P.integer({
|
|
71
|
-
description: "port for Platforma Backend debug API. Default is 9091",
|
|
72
|
-
env: "PLATFORMA_DEBUG_PORT"
|
|
73
|
-
}),
|
|
74
|
-
"debug-listen": P.string({
|
|
75
|
-
description: "full listen addr for Platforma Backend debug API. Default is 127.0.0.1:9091",
|
|
76
|
-
env: "PLATFORMA_DEBUG_LISTEN"
|
|
77
|
-
})
|
|
78
|
-
}, we = {
|
|
79
|
-
"s3-port": P.integer({
|
|
80
|
-
description: "port that S3 will listen, default is 9000",
|
|
81
|
-
default: 9e3,
|
|
82
|
-
env: "PLATFORMA_S3_PORT"
|
|
83
|
-
}),
|
|
84
|
-
"s3-console-port": P.integer({
|
|
85
|
-
description: "port that a console of S3 will listen, default is 9001",
|
|
86
|
-
default: 9001,
|
|
87
|
-
env: "PLATFORMA_S3_CONSOLE_PORT"
|
|
88
|
-
})
|
|
89
|
-
}, G = {
|
|
90
|
-
storage: P.string({
|
|
91
|
-
description: "specify path on host to be used as storage for all Platforma Backend data"
|
|
92
|
-
})
|
|
93
|
-
}, yr = {
|
|
94
|
-
"minio-presign-host": P.boolean({
|
|
95
|
-
description: "use 'minio' host instead of 'localhost' in presign URLs"
|
|
96
|
-
})
|
|
97
|
-
}, pe = {
|
|
98
|
-
mount: P.string({
|
|
99
|
-
multiple: !0,
|
|
100
|
-
description: "things to be mounted into platforma docker container. Targets will appear inside the container under the same absolute paths"
|
|
101
|
-
})
|
|
102
|
-
}, Pe = {
|
|
103
|
-
"pl-log-file": P.file({
|
|
104
|
-
description: "specify path for Platforma Backend log file"
|
|
105
|
-
})
|
|
106
|
-
}, ve = {
|
|
107
|
-
"pl-workdir": P.file({
|
|
108
|
-
description: "specify working directory for Platforma Backend process"
|
|
109
|
-
})
|
|
110
|
-
}, ke = {
|
|
111
|
-
"pl-binary": P.file({
|
|
112
|
-
description: "start given Platforma Backend binary instead of automatically downloaded version"
|
|
113
|
-
})
|
|
114
|
-
}, Se = {
|
|
115
|
-
"pl-sources": P.file({
|
|
116
|
-
description: "path to pl repository root: build Platforma Backend from sources and start the resulting binary"
|
|
117
|
-
})
|
|
118
|
-
}, $e = {
|
|
119
|
-
config: P.string({
|
|
120
|
-
description: "use custom Platforma Backend config"
|
|
121
|
-
})
|
|
122
|
-
};
|
|
123
|
-
P.file({
|
|
124
|
-
description: "specify path on host to be used as 'primary' storage"
|
|
125
|
-
});
|
|
126
|
-
const oe = {
|
|
127
|
-
"storage-work": P.file({
|
|
128
|
-
description: "specify path on host to be used as 'work' storage"
|
|
129
|
-
})
|
|
130
|
-
};
|
|
131
|
-
P.file({
|
|
132
|
-
description: "specify path on host to be used as 'library' storage"
|
|
133
|
-
});
|
|
134
|
-
const le = {
|
|
135
|
-
"storage-primary": P.string({
|
|
136
|
-
description: `specify 'primary' storage destination URL.
|
|
137
|
-
file:/path/to/dir for directory on local FS
|
|
138
|
-
s3://<bucket>/?region=<name> for real AWS bucket
|
|
139
|
-
s3e://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via http
|
|
140
|
-
s3es://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via https`
|
|
141
|
-
})
|
|
142
|
-
}, de = {
|
|
143
|
-
"storage-library": P.string({
|
|
144
|
-
description: `specify 'library' storage destination URL.
|
|
145
|
-
file:/path/to/dir for directory on local FS
|
|
146
|
-
s3://<bucket>/?region=<name> for real AWS bucket
|
|
147
|
-
s3e://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via http
|
|
148
|
-
s3es://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via https`
|
|
149
|
-
})
|
|
150
|
-
}, Dr = {
|
|
151
|
-
"auth-enabled": P.boolean({
|
|
152
|
-
description: "enable authorization"
|
|
153
|
-
})
|
|
154
|
-
}, Nr = {
|
|
155
|
-
"auth-htpasswd-file": P.file({
|
|
156
|
-
description: "path to .htpasswd file with Platforma users (static user DB auth source)"
|
|
157
|
-
})
|
|
158
|
-
}, _r = {
|
|
159
|
-
"auth-ldap-server": P.string({
|
|
160
|
-
description: "address of LDAP server to use for auth in Platforma (auth source)"
|
|
161
|
-
})
|
|
162
|
-
}, Cr = {
|
|
163
|
-
"auth-ldap-default-dn": P.string({
|
|
164
|
-
description: "DN to use when checking user with LDAP bind operation: e.g. cn=%u,ou=users,dc=example,dc=com"
|
|
165
|
-
})
|
|
166
|
-
}, J = {
|
|
167
|
-
...Dr,
|
|
168
|
-
...Nr,
|
|
169
|
-
..._r,
|
|
170
|
-
...Cr
|
|
171
|
-
};
|
|
172
|
-
function Mr(a) {
|
|
173
|
-
return re.question(`${a} [y/N] `).toLowerCase() === "y";
|
|
174
|
-
}
|
|
175
|
-
function H(a) {
|
|
176
|
-
throw new Error("this should never happen");
|
|
177
|
-
}
|
|
178
|
-
function A(a = "debug") {
|
|
179
|
-
return fe.createLogger({
|
|
180
|
-
level: a,
|
|
181
|
-
format: fe.format.printf(({ level: e, message: t }) => {
|
|
182
|
-
const r = " ".repeat(e.length + 2);
|
|
183
|
-
if (typeof t != "string") {
|
|
184
|
-
const i = JSON.stringify(t);
|
|
185
|
-
throw Error(`logger message ${i} is not a string`);
|
|
186
|
-
}
|
|
187
|
-
const n = t.split(`
|
|
188
|
-
`).map((i, c) => c === 0 ? i : r + i).join(`
|
|
189
|
-
`);
|
|
190
|
-
return `${((i) => fe.format.colorize().colorize(i, i))(e)}: ${n}`;
|
|
191
|
-
}),
|
|
192
|
-
transports: [
|
|
193
|
-
new fe.transports.Console({
|
|
194
|
-
stderrLevels: ["error", "warn", "info", "debug"],
|
|
195
|
-
handleExceptions: !0
|
|
196
|
-
})
|
|
197
|
-
]
|
|
198
|
-
});
|
|
199
|
-
}
|
|
200
|
-
function jr(a) {
|
|
201
|
-
return Er(Math.ceil(a / 2)).toString("hex").slice(0, a);
|
|
202
|
-
}
|
|
203
|
-
function Br(a) {
|
|
204
|
-
return a.startsWith("~") ? m.join(_.homedir(), a.slice(1)) : a;
|
|
205
|
-
}
|
|
206
|
-
function Ae(a, e) {
|
|
207
|
-
f.existsSync(a) || (f.mkdirSync(a, { recursive: !0 }), e != null && e.mode && f.chmodSync(a, e.mode));
|
|
208
|
-
}
|
|
209
|
-
function Ur(a) {
|
|
210
|
-
try {
|
|
211
|
-
if (_.platform() !== "win32")
|
|
212
|
-
return xe(`ps -p ${a} -o comm=`, { encoding: "utf8" }).trim();
|
|
213
|
-
const e = `wmic process where processid=${a} get Caption`, t = xe(e, { encoding: "utf8" }).split(`
|
|
214
|
-
`);
|
|
215
|
-
return t.length <= 1 ? "" : t[1].trim();
|
|
216
|
-
} catch {
|
|
217
|
-
return "";
|
|
218
|
-
}
|
|
219
|
-
}
|
|
220
|
-
function Gr(a) {
|
|
221
|
-
const e = xe(`docker compose ls --filter name=${a} --format json`, { encoding: "utf8" }).trim(), t = JSON.parse(e);
|
|
222
|
-
for (const r of t)
|
|
223
|
-
if (r.Name === a)
|
|
224
|
-
return r;
|
|
225
|
-
}
|
|
226
|
-
const Te = ["Python"], br = ["Tengo", "Python"], Jr = C.union([C.literal("Tengo"), C.literal("Python")]), Hr = C.object({
|
|
227
|
-
npmOrgName: C.string().min(1),
|
|
228
|
-
orgName: C.string().min(1, { message: "Organization name must be provided" }),
|
|
229
|
-
blockName: C.string().min(1, { message: "Block name must be provided" }),
|
|
230
|
-
softwarePlatforms: C.array(Jr).refine((a) => new Set(a).size === a.length, {
|
|
231
|
-
message: "Must be an array of unique software platforms"
|
|
232
|
-
})
|
|
233
|
-
});
|
|
234
|
-
async function Wr(a) {
|
|
235
|
-
const { npmOrgName: e, orgName: t, blockName: r, softwarePlatforms: n } = qr(), s = m.join(process.cwd(), r);
|
|
236
|
-
a.info("Downloading boilerplate code..."), await zr(
|
|
237
|
-
// 'https://github.com/milaboratory/platforma-block-boilerplate/archive/refs/heads/software_platforms.zip',
|
|
238
|
-
// 'platforma-block-boilerplate-software_platforms',
|
|
239
|
-
"https://github.com/milaboratory/platforma-block-boilerplate/archive/refs/heads/main.zip",
|
|
240
|
-
"platforma-block-boilerplate-main",
|
|
241
|
-
s
|
|
242
|
-
);
|
|
243
|
-
const i = br.filter((o) => n.indexOf(o) < 0), c = Te.length == i.length;
|
|
244
|
-
a.info(`Keep platforms '${n}', remove: '${i}'. Will remove all platforms? ${c}`);
|
|
245
|
-
for (const o of i)
|
|
246
|
-
await Yr(s, o);
|
|
247
|
-
c && await Vr(s), a.info("Replace everything in the template with provided options..."), Kr(s, [
|
|
248
|
-
// '@' literal ensures only npm org name will be renamed,
|
|
249
|
-
// as public registry for software also is called platforma-open, but without '@'.
|
|
250
|
-
// Also, don't rename an organization for runenv-python-3 package.
|
|
251
|
-
{ from: /@platforma-open(?!.*runenv-python-3)/g, to: `@${e}` },
|
|
252
|
-
{ from: /my-org/g, to: t },
|
|
253
|
-
{ from: /block-boilerplate/g, to: r }
|
|
254
|
-
]);
|
|
255
|
-
}
|
|
256
|
-
function qr() {
|
|
257
|
-
let a = re.question(
|
|
258
|
-
'Write an organization name for npm. Default is "platforma-open": '
|
|
259
|
-
);
|
|
260
|
-
a === "" && (a = "platforma-open");
|
|
261
|
-
let e = "";
|
|
262
|
-
for (; e.length < 1; )
|
|
263
|
-
e = re.question('Write an organization name, e.g. "my-org": ');
|
|
264
|
-
let t = "";
|
|
265
|
-
for (; t.length < 1; )
|
|
266
|
-
t = re.question('Write a name of the block, e.g. "hello-world": ');
|
|
267
|
-
const r = re.keyInYN("Create package for block's software?");
|
|
268
|
-
let n = ["Tengo"];
|
|
269
|
-
if (r)
|
|
270
|
-
for (; n.length < br.length; ) {
|
|
271
|
-
const i = re.keyInSelect(Te, "Choose software platform:");
|
|
272
|
-
if (i < 0) break;
|
|
273
|
-
n.push(Te[i]);
|
|
274
|
-
}
|
|
275
|
-
n = Array.from(new Set(n)).sort();
|
|
276
|
-
const s = Hr.safeParse({ npmOrgName: a, orgName: e, blockName: t, softwarePlatforms: n });
|
|
277
|
-
if (!s.success && s.error.issues.length)
|
|
278
|
-
throw new Error(s.error.issues.map((i) => i.message).join("; "));
|
|
279
|
-
return s.data;
|
|
280
|
-
}
|
|
281
|
-
async function zr(a, e, t) {
|
|
282
|
-
const n = await (await fetch(a)).blob(), s = await M.mkdtemp(m.join(_.tmpdir(), "create-repo")), i = m.join(s, "packed-repo.zip"), c = Rr.toWeb(Lr(i));
|
|
283
|
-
await n.stream().pipeTo(c);
|
|
284
|
-
const o = m.join(s, "unpacked-repo");
|
|
285
|
-
await M.mkdir(o), await xr(i, o), await M.cp(m.join(o, e), t, { recursive: !0 });
|
|
286
|
-
}
|
|
287
|
-
async function Yr(a, e) {
|
|
288
|
-
const t = e.toLowerCase();
|
|
289
|
-
await te(
|
|
290
|
-
m.join(a, "ui", "src", "pages", "MainPage.vue"),
|
|
291
|
-
new RegExp(`.*${t}Message.*\\n`, "g")
|
|
292
|
-
), await te(
|
|
293
|
-
m.join(a, "model", "src", "index.ts"),
|
|
294
|
-
new RegExp(`.*${t}Message.*\\n\\n`, "g")
|
|
295
|
-
), await te(
|
|
296
|
-
m.join(a, "workflow", "src", "main.tpl.tengo"),
|
|
297
|
-
new RegExp(`.*${t}.*exec.builder.*[\\s\\S]*?\\n\\n`, "g")
|
|
298
|
-
), await te(
|
|
299
|
-
m.join(a, "workflow", "src", "main.tpl.tengo"),
|
|
300
|
-
new RegExp(`.*${t}Message.*\\n`, "g")
|
|
301
|
-
), await te(
|
|
302
|
-
m.join(a, "workflow", "src", "wf.test.ts"),
|
|
303
|
-
new RegExp(`.*${t}Message.*\\n.*expect.*\\n\\n`, "g")
|
|
304
|
-
), await M.rm(m.join(a, "software", `src_${t}`), { recursive: !0 }), await We(
|
|
305
|
-
m.join(a, "software", "package.json"),
|
|
306
|
-
(r) => {
|
|
307
|
-
const n = JSON.parse(r);
|
|
308
|
-
return delete n["block-software"].artifacts[`hello-${t}-artifact`], delete n["block-software"].entrypoints[`hello-world-${t}`], JSON.stringify(n, null, 2);
|
|
309
|
-
}
|
|
310
|
-
);
|
|
311
|
-
}
|
|
312
|
-
async function Vr(a) {
|
|
313
|
-
await M.rm(m.join(a, "software"), { recursive: !0 }), await We(
|
|
314
|
-
m.join(a, "workflow", "package.json"),
|
|
315
|
-
(e) => {
|
|
316
|
-
const t = JSON.parse(e);
|
|
317
|
-
return delete t.dependencies["@platforma-open/my-org.block-boilerplate.software"], JSON.stringify(t, null, 2);
|
|
318
|
-
}
|
|
319
|
-
), await te(
|
|
320
|
-
m.join(a, "pnpm-workspace.yaml"),
|
|
321
|
-
/.*- software$\n/gm
|
|
322
|
-
);
|
|
323
|
-
}
|
|
324
|
-
async function Kr(a, e) {
|
|
325
|
-
const t = await Xr(a);
|
|
326
|
-
for (const { from: r, to: n } of e)
|
|
327
|
-
for (const s of t)
|
|
328
|
-
await wr(s, r, n);
|
|
329
|
-
}
|
|
330
|
-
async function Xr(a) {
|
|
331
|
-
return (await M.readdir(a, {
|
|
332
|
-
withFileTypes: !0,
|
|
333
|
-
recursive: !0
|
|
334
|
-
})).filter((t) => t.isFile()).map((t) => m.join(t.parentPath, t.name));
|
|
335
|
-
}
|
|
336
|
-
async function We(a, e) {
|
|
337
|
-
const t = await M.readFile(a), r = e(t.toString());
|
|
338
|
-
await M.writeFile(a, r);
|
|
339
|
-
}
|
|
340
|
-
async function wr(a, e, t) {
|
|
341
|
-
return await We(a, (r) => r.replaceAll(e, t));
|
|
342
|
-
}
|
|
343
|
-
async function te(a, e) {
|
|
344
|
-
return await wr(a, e, "");
|
|
345
|
-
}
|
|
346
|
-
const ae = class ae extends $ {
|
|
347
|
-
async run() {
|
|
348
|
-
const { flags: e } = await this.parse(ae), t = A(e["log-level"]);
|
|
349
|
-
await Wr(t);
|
|
350
|
-
}
|
|
351
|
-
};
|
|
352
|
-
l(ae, "description", "Helps to create a new block by downloading a block's template."), l(ae, "examples", ["<%= name %>"]), l(ae, "flags", {
|
|
353
|
-
...L
|
|
354
|
-
});
|
|
355
|
-
let Fe = ae;
|
|
356
|
-
function pr(...a) {
|
|
357
|
-
return Ar(__dirname, "..", ...a);
|
|
358
|
-
}
|
|
359
|
-
function Z(...a) {
|
|
360
|
-
return pr("assets", ...a);
|
|
361
|
-
}
|
|
362
|
-
function Ie(...a) {
|
|
363
|
-
return f.readFileSync(pr(...a));
|
|
364
|
-
}
|
|
365
|
-
function fr(a) {
|
|
366
|
-
return a || (a = he()), `quay.io/milaboratories/platforma:${a}`;
|
|
367
|
-
}
|
|
368
|
-
const W = class W {
|
|
369
|
-
constructor(e) {
|
|
370
|
-
l(this, "state", {
|
|
371
|
-
currentInstance: ""
|
|
372
|
-
});
|
|
373
|
-
l(this, "filePath");
|
|
374
|
-
l(this, "dirPath");
|
|
375
|
-
e = e ?? m.resolve(_.homedir(), ".config", "pl-bootstrap");
|
|
376
|
-
const t = m.join(e, "state.json");
|
|
377
|
-
this.dirPath = e, this.filePath = t, f.existsSync(e) || f.mkdirSync(e, { recursive: !0 }), f.existsSync(t) && (this.state = JSON.parse(Ie(t).toString()));
|
|
378
|
-
}
|
|
379
|
-
static getStateInstance() {
|
|
380
|
-
return W.instance || (W.instance = new W()), W.instance;
|
|
381
|
-
}
|
|
382
|
-
path(...e) {
|
|
383
|
-
return m.join(this.dirPath, ...e);
|
|
384
|
-
}
|
|
385
|
-
instanceDir(e, ...t) {
|
|
386
|
-
return e ? this.path("data", e, ...t) : this.path("data");
|
|
387
|
-
}
|
|
388
|
-
binaries(...e) {
|
|
389
|
-
return this.path("binaries", ...e);
|
|
390
|
-
}
|
|
391
|
-
writeState() {
|
|
392
|
-
f.writeFileSync(this.filePath, JSON.stringify(this.state));
|
|
393
|
-
}
|
|
394
|
-
get instanceList() {
|
|
395
|
-
return f.existsSync(this.instanceDir()) ? f.readdirSync(this.instanceDir()).filter((t) => this.instanceExists(t)) : [];
|
|
396
|
-
}
|
|
397
|
-
instanceExists(e) {
|
|
398
|
-
return f.existsSync(this.instanceDir(e, "instance.json"));
|
|
399
|
-
}
|
|
400
|
-
getInstanceInfo(e) {
|
|
401
|
-
const t = this.instanceDir(e, "instance.json");
|
|
402
|
-
if (!f.existsSync(t))
|
|
403
|
-
throw new Error(`platforma backend instance '${e}' does not exist or is corrupted`);
|
|
404
|
-
const r = JSON.parse(Ie(t).toString());
|
|
405
|
-
return {
|
|
406
|
-
name: e,
|
|
407
|
-
...r
|
|
408
|
-
};
|
|
409
|
-
}
|
|
410
|
-
setInstanceInfo(e, t) {
|
|
411
|
-
f.existsSync(this.instanceDir(e)) || f.mkdirSync(this.instanceDir(e), { recursive: !0 });
|
|
412
|
-
const r = this.instanceDir(e, "instance.json");
|
|
413
|
-
let n = {};
|
|
414
|
-
f.existsSync(r) && (n = JSON.parse(Ie(r).toString())), f.writeFileSync(r, JSON.stringify({ ...n, ...t }));
|
|
415
|
-
}
|
|
416
|
-
isInstanceActive(e) {
|
|
417
|
-
switch (e.type) {
|
|
418
|
-
case "docker": {
|
|
419
|
-
const r = Gr(`pl-${e.name}`);
|
|
420
|
-
return r ? r.Status.trim().startsWith("running") : !1;
|
|
421
|
-
}
|
|
422
|
-
case "process":
|
|
423
|
-
return e.pid ? mr(e.pid) : !1;
|
|
424
|
-
default:
|
|
425
|
-
throw H(), new Error("cli logic error: unknown service type, cannot check its state");
|
|
426
|
-
}
|
|
427
|
-
}
|
|
428
|
-
get isActive() {
|
|
429
|
-
for (const e of this.instanceList) {
|
|
430
|
-
const t = this.getInstanceInfo(e);
|
|
431
|
-
if (this.isInstanceActive(t))
|
|
432
|
-
return !0;
|
|
433
|
-
}
|
|
434
|
-
return !1;
|
|
435
|
-
}
|
|
436
|
-
isValidPID(e) {
|
|
437
|
-
return mr(e);
|
|
438
|
-
}
|
|
439
|
-
get currentInstance() {
|
|
440
|
-
const e = this.state.currentInstance;
|
|
441
|
-
if (e && this.instanceExists(e))
|
|
442
|
-
return this.getInstanceInfo(e);
|
|
443
|
-
}
|
|
444
|
-
get currentInstanceName() {
|
|
445
|
-
return this.state.currentInstance;
|
|
446
|
-
}
|
|
447
|
-
set currentInstanceName(e) {
|
|
448
|
-
this.state.currentInstance = e, this.writeState();
|
|
449
|
-
}
|
|
450
|
-
selectInstance(e) {
|
|
451
|
-
if (!this.instanceExists(e))
|
|
452
|
-
throw new Error(`instance '${e}' does not exist`);
|
|
453
|
-
this.state.currentInstance = e, this.writeState();
|
|
454
|
-
}
|
|
455
|
-
};
|
|
456
|
-
l(W, "instance");
|
|
457
|
-
let De = W;
|
|
458
|
-
function mr(a) {
|
|
459
|
-
const e = Ur(a);
|
|
460
|
-
return e === "platforma" || e.endsWith("/platforma") || e.endsWith("\\platforma");
|
|
461
|
-
}
|
|
462
|
-
const u = De.getStateInstance();
|
|
463
|
-
function Oe(a, e, t) {
|
|
464
|
-
const r = [], n = [];
|
|
465
|
-
for (const s of e) {
|
|
466
|
-
const i = {
|
|
467
|
-
cwd: s.workdir,
|
|
468
|
-
env: {
|
|
469
|
-
...s.envs,
|
|
470
|
-
...t == null ? void 0 : t.env
|
|
471
|
-
},
|
|
472
|
-
...s.runOpts,
|
|
473
|
-
...t
|
|
474
|
-
};
|
|
475
|
-
if (s.async) {
|
|
476
|
-
const c = Qr(a, s.cmd, s.args, i);
|
|
477
|
-
n.push(c);
|
|
478
|
-
} else {
|
|
479
|
-
const c = Zr(a, s.cmd, s.args, i);
|
|
480
|
-
if (r.push(c), c.error || c.status !== 0)
|
|
481
|
-
break;
|
|
482
|
-
}
|
|
483
|
-
}
|
|
484
|
-
return {
|
|
485
|
-
executed: r,
|
|
486
|
-
spawned: n
|
|
487
|
-
};
|
|
488
|
-
}
|
|
489
|
-
function Qr(a, e, t, r) {
|
|
490
|
-
var c;
|
|
491
|
-
a.debug(
|
|
492
|
-
`Running:
|
|
493
|
-
cmd: ${JSON.stringify([e, ...t])}
|
|
494
|
-
wd: ${(c = r.cwd) == null ? void 0 : c.toString()}`
|
|
495
|
-
), r.env = { ...process.env, ...r.env }, a.debug(" spawning child process");
|
|
496
|
-
const n = Ir(e, t, r);
|
|
497
|
-
let s = !1;
|
|
498
|
-
const i = () => {
|
|
499
|
-
n.kill("SIGINT"), s = !0;
|
|
500
|
-
};
|
|
501
|
-
return a.debug(" setting up signal handler"), process.on("SIGINT", i), n.on("close", (o) => {
|
|
502
|
-
process.removeListener("SIGINT", i), s && process.exit(o);
|
|
503
|
-
}), n;
|
|
504
|
-
}
|
|
505
|
-
function Zr(a, e, t, r) {
|
|
506
|
-
return a.debug(
|
|
507
|
-
`Running:
|
|
508
|
-
cmd: ${JSON.stringify([e, ...t])}
|
|
509
|
-
opts: ${JSON.stringify(r)}`
|
|
510
|
-
), r.env = { ...process.env, ...r.env }, Or(e, t, r);
|
|
511
|
-
}
|
|
512
|
-
function Ee(a, e, t, r, n) {
|
|
513
|
-
const s = f.readFileSync(a, { encoding: "utf-8" }), i = ur.parse(s.toString());
|
|
514
|
-
if (!i.services)
|
|
515
|
-
throw new Error(`file '${a}' seems to be not a docker-compose file or has unsupported version`);
|
|
516
|
-
if (r)
|
|
517
|
-
for (const c of Object.keys(i.services))
|
|
518
|
-
r.has(c) || delete i.services[c];
|
|
519
|
-
i.name = t;
|
|
520
|
-
for (const [c, o] of (r == null ? void 0 : r.entries()) ?? []) {
|
|
521
|
-
const g = i.services[c];
|
|
522
|
-
if (!g)
|
|
523
|
-
throw new Error(`docker compose '${a}' has no declaration of service '${c}'`);
|
|
524
|
-
if (o.platform && (g.platform = o.platform), o.envs) {
|
|
525
|
-
g.environment || (g.environment = []);
|
|
526
|
-
for (let d = 0; d < ((g == null ? void 0 : g.environment.length) ?? 0); ) {
|
|
527
|
-
const b = g.environment[d].split("=")[0];
|
|
528
|
-
if (o.envs[b]) {
|
|
529
|
-
const w = g.environment.pop();
|
|
530
|
-
w && g.environment.length !== d && (g.environment[d] = w);
|
|
531
|
-
} else
|
|
532
|
-
d++;
|
|
533
|
-
}
|
|
534
|
-
for (const [d, h] of Object.entries(o.envs))
|
|
535
|
-
g.environment.push(`${d}=${h}`);
|
|
536
|
-
}
|
|
537
|
-
if (o.mounts) {
|
|
538
|
-
g.volumes || (g.volumes = []);
|
|
539
|
-
for (const d of o.mounts)
|
|
540
|
-
g.volumes.push(`${d.hostPath}:${d.containerPath}`);
|
|
541
|
-
}
|
|
542
|
-
}
|
|
543
|
-
n != null && n.dropVolumes && delete i.volumes, f.writeFileSync(e, ur.stringify(i));
|
|
544
|
-
}
|
|
545
|
-
function et(a) {
|
|
546
|
-
return {
|
|
547
|
-
id: a,
|
|
548
|
-
type: "S3",
|
|
549
|
-
indexCachePeriod: "0s",
|
|
550
|
-
endpoint: "",
|
|
551
|
-
region: "",
|
|
552
|
-
bucketName: "",
|
|
553
|
-
createBucket: !1,
|
|
554
|
-
forcePathStyle: !1,
|
|
555
|
-
key: "",
|
|
556
|
-
secret: "",
|
|
557
|
-
keyPrefix: "",
|
|
558
|
-
accessPrefixes: [],
|
|
559
|
-
uploadKeyPrefix: ""
|
|
560
|
-
};
|
|
561
|
-
}
|
|
562
|
-
function Pr(a) {
|
|
563
|
-
return {
|
|
564
|
-
id: a,
|
|
565
|
-
type: "FS",
|
|
566
|
-
indexCachePeriod: "0s",
|
|
567
|
-
rootPath: ""
|
|
568
|
-
};
|
|
569
|
-
}
|
|
570
|
-
function ee(a, e, t) {
|
|
571
|
-
a = Br(a);
|
|
572
|
-
const r = new URL(a, `file:${e}`);
|
|
573
|
-
switch (r.protocol) {
|
|
574
|
-
case "s3:": {
|
|
575
|
-
const n = r.hostname, s = r.searchParams.get("region"), i = r.pathname.slice(1);
|
|
576
|
-
return {
|
|
577
|
-
...t,
|
|
578
|
-
type: "S3",
|
|
579
|
-
bucketName: n,
|
|
580
|
-
region: s,
|
|
581
|
-
keyPrefix: i
|
|
582
|
-
};
|
|
583
|
-
}
|
|
584
|
-
case "s3e:": {
|
|
585
|
-
const n = r.pathname.split("/").slice(1), s = n[0], i = n.length > 1 ? n.slice(1).join("/") : "";
|
|
586
|
-
return {
|
|
587
|
-
...t,
|
|
588
|
-
type: "S3",
|
|
589
|
-
endpoint: `http://${r.host}/`,
|
|
590
|
-
bucketName: s,
|
|
591
|
-
keyPrefix: i,
|
|
592
|
-
region: r.searchParams.get("region"),
|
|
593
|
-
key: r.username ? `static:${r.username}` : "",
|
|
594
|
-
secret: r.password ? `static:${r.password}` : ""
|
|
595
|
-
};
|
|
596
|
-
}
|
|
597
|
-
case "s3es:": {
|
|
598
|
-
const n = r.pathname.split("/").slice(1), s = n[0], i = n.length > 1 ? n.slice(1).join("/") : "";
|
|
599
|
-
return {
|
|
600
|
-
...t,
|
|
601
|
-
type: "S3",
|
|
602
|
-
endpoint: `https://${r.host}/`,
|
|
603
|
-
bucketName: s,
|
|
604
|
-
keyPrefix: i,
|
|
605
|
-
region: r.searchParams.get("region"),
|
|
606
|
-
key: r.username ? `static:${r.username}` : "",
|
|
607
|
-
secret: r.password ? `static:${r.password}` : ""
|
|
608
|
-
};
|
|
609
|
-
}
|
|
610
|
-
case "file:":
|
|
611
|
-
return {
|
|
612
|
-
type: "FS",
|
|
613
|
-
rootPath: r.pathname
|
|
614
|
-
};
|
|
615
|
-
default:
|
|
616
|
-
throw new Error(`storage protocol '${r.protocol}' is not supported`);
|
|
617
|
-
}
|
|
618
|
-
}
|
|
619
|
-
function rt(a, e) {
|
|
620
|
-
var w, y, k, S, I, p, v, E, ge, R, qe, ze, Ye, Ve, Ke, Xe, Qe, Ze, er, rr, tr, ar, nr, sr, ir, cr, or, lr, dr, gr;
|
|
621
|
-
const t = (e == null ? void 0 : e.localRoot) ?? u.instanceDir("default"), r = {
|
|
622
|
-
level: ((w = e == null ? void 0 : e.log) == null ? void 0 : w.level) ?? "info",
|
|
623
|
-
path: ((y = e == null ? void 0 : e.log) == null ? void 0 : y.path) ?? `${t}/logs/platforma.log`
|
|
624
|
-
}, n = {
|
|
625
|
-
listen: ((k = e == null ? void 0 : e.grpc) == null ? void 0 : k.listen) ?? "localhost:6345",
|
|
626
|
-
tls: {
|
|
627
|
-
enable: ue((I = (S = e == null ? void 0 : e.grpc) == null ? void 0 : S.tls) == null ? void 0 : I.enable, !1),
|
|
628
|
-
clientAuthMode: ((v = (p = e == null ? void 0 : e.grpc) == null ? void 0 : p.tls) == null ? void 0 : v.clientAuthMode) ?? "NoAuth",
|
|
629
|
-
certFile: ((ge = (E = e == null ? void 0 : e.grpc) == null ? void 0 : E.tls) == null ? void 0 : ge.certFile) ?? `${t}/certs/tls.cert`,
|
|
630
|
-
keyFile: ((qe = (R = e == null ? void 0 : e.grpc) == null ? void 0 : R.tls) == null ? void 0 : qe.keyFile) ?? `${t}/certs/tls.key`,
|
|
631
|
-
...(ze = e == null ? void 0 : e.grpc) == null ? void 0 : ze.tls
|
|
632
|
-
}
|
|
633
|
-
}, s = {
|
|
634
|
-
auth: {
|
|
635
|
-
enabled: ((Ve = (Ye = e == null ? void 0 : e.core) == null ? void 0 : Ye.auth) == null ? void 0 : Ve.enabled) ?? !1,
|
|
636
|
-
drivers: ((Xe = (Ke = e == null ? void 0 : e.core) == null ? void 0 : Ke.auth) == null ? void 0 : Xe.drivers) ?? [
|
|
637
|
-
{ driver: "jwt", key: a },
|
|
638
|
-
{ driver: "htpasswd", path: `${t}/users.htpasswd` }
|
|
639
|
-
]
|
|
640
|
-
},
|
|
641
|
-
db: {
|
|
642
|
-
path: `${t}/db`
|
|
643
|
-
}
|
|
644
|
-
}, i = hr(
|
|
645
|
-
"main",
|
|
646
|
-
`${t}/storages/main`,
|
|
647
|
-
"main-bucket",
|
|
648
|
-
(Qe = e == null ? void 0 : e.storages) == null ? void 0 : Qe.primary
|
|
649
|
-
);
|
|
650
|
-
let c;
|
|
651
|
-
switch ((er = (Ze = e == null ? void 0 : e.storages) == null ? void 0 : Ze.work) == null ? void 0 : er.type) {
|
|
652
|
-
case void 0:
|
|
653
|
-
case "FS":
|
|
654
|
-
c = Pr("work"), c.rootPath = ((tr = (rr = e == null ? void 0 : e.storages) == null ? void 0 : rr.work) == null ? void 0 : tr.rootPath) ?? `${t}/storages/work`, c.indexCachePeriod = ((nr = (ar = e == null ? void 0 : e.storages) == null ? void 0 : ar.work) == null ? void 0 : nr.indexCachePeriod) ?? "1m";
|
|
655
|
-
break;
|
|
656
|
-
default:
|
|
657
|
-
throw new Error("work storage MUST have 'FS' type as it is used for working directories management");
|
|
658
|
-
}
|
|
659
|
-
const g = hr(
|
|
660
|
-
"library",
|
|
661
|
-
`${t}/storages/library`,
|
|
662
|
-
"library-bucket",
|
|
663
|
-
(sr = e == null ? void 0 : e.storages) == null ? void 0 : sr.library
|
|
664
|
-
), d = {
|
|
665
|
-
enabled: ue((ir = e == null ? void 0 : e.monitoring) == null ? void 0 : ir.enabled, !0),
|
|
666
|
-
listen: ((cr = e == null ? void 0 : e.monitoring) == null ? void 0 : cr.listen) ?? "127.0.0.1:9090"
|
|
667
|
-
}, h = {
|
|
668
|
-
enabled: ue((or = e == null ? void 0 : e.debug) == null ? void 0 : or.enabled, !0),
|
|
669
|
-
listen: ((lr = e == null ? void 0 : e.debug) == null ? void 0 : lr.listen) ?? "127.0.0.1:9091"
|
|
670
|
-
}, b = {
|
|
671
|
-
value: ((dr = e == null ? void 0 : e.license) == null ? void 0 : dr.value) ?? "",
|
|
672
|
-
file: ((gr = e == null ? void 0 : e.license) == null ? void 0 : gr.file) ?? ""
|
|
673
|
-
};
|
|
674
|
-
return {
|
|
675
|
-
localRoot: t,
|
|
676
|
-
license: b,
|
|
677
|
-
log: r,
|
|
678
|
-
grpc: n,
|
|
679
|
-
core: s,
|
|
680
|
-
monitoring: d,
|
|
681
|
-
debug: h,
|
|
682
|
-
numCpu: e == null ? void 0 : e.numCpu,
|
|
683
|
-
storages: { primary: i, work: c, library: g },
|
|
684
|
-
hacks: { libraryDownloadable: !0 }
|
|
685
|
-
};
|
|
686
|
-
}
|
|
687
|
-
function hr(a, e, t, r) {
|
|
688
|
-
let n;
|
|
689
|
-
switch (r == null ? void 0 : r.type) {
|
|
690
|
-
case void 0:
|
|
691
|
-
case "FS":
|
|
692
|
-
n = Pr(a), n.rootPath = (r == null ? void 0 : r.rootPath) ?? e;
|
|
693
|
-
break;
|
|
694
|
-
case "S3":
|
|
695
|
-
n = et(a), n.endpoint = r == null ? void 0 : r.endpoint, n.region = r == null ? void 0 : r.region, n.presignEndpoint = (r == null ? void 0 : r.presignEndpoint) ?? (r == null ? void 0 : r.endpoint), n.bucketName = (r == null ? void 0 : r.bucketName) ?? t, n.createBucket = ue(r == null ? void 0 : r.createBucket, !0), n.forcePathStyle = ue(r == null ? void 0 : r.forcePathStyle, !0), n.key = (r == null ? void 0 : r.key) ?? "", n.secret = (r == null ? void 0 : r.secret) ?? "", n.keyPrefix = (r == null ? void 0 : r.keyPrefix) ?? "", n.accessPrefixes = (r == null ? void 0 : r.accessPrefixes) ?? [""], n.uploadKeyPrefix = (r == null ? void 0 : r.uploadKeyPrefix) ?? "";
|
|
696
|
-
break;
|
|
697
|
-
default:
|
|
698
|
-
throw H(), new Error("unknown storage type");
|
|
699
|
-
}
|
|
700
|
-
return n;
|
|
701
|
-
}
|
|
702
|
-
function tt(a) {
|
|
703
|
-
const e = a.monitoring.enabled ? "" : " disabled", t = a.debug.enabled ? "" : " disabled", r = a.hacks.libraryDownloadable ? "true" : "false";
|
|
704
|
-
let n = a.license.value;
|
|
705
|
-
a.license.file != "" && (n = f.readFileSync(a.license.file).toString().trimEnd());
|
|
706
|
-
let s = "";
|
|
707
|
-
return a.numCpu && (s = `
|
|
708
|
-
resources:
|
|
709
|
-
cpu: ${a.numCpu}
|
|
710
|
-
`), `
|
|
711
|
-
license:
|
|
712
|
-
value: '${a.license.value}'
|
|
713
|
-
file: '${a.license.file}'
|
|
714
|
-
|
|
715
|
-
logging:
|
|
716
|
-
level: '${a.log.level}'
|
|
717
|
-
destinations:
|
|
718
|
-
- path: '${a.log.path}'
|
|
719
|
-
rotation:
|
|
720
|
-
maxSize: 1GiB
|
|
721
|
-
maxBackups: 15
|
|
722
|
-
compress: true
|
|
723
|
-
|
|
724
|
-
monitoring${e}:
|
|
725
|
-
listen: '${a.monitoring.listen}'
|
|
726
|
-
|
|
727
|
-
debug${t}:
|
|
728
|
-
listen: '${a.debug.listen}'
|
|
729
|
-
|
|
730
|
-
core:
|
|
731
|
-
logging:
|
|
732
|
-
extendedInfo: true
|
|
733
|
-
dumpResourceData: false
|
|
734
|
-
|
|
735
|
-
grpc:
|
|
736
|
-
listen: '${a.grpc.listen}'
|
|
737
|
-
|
|
738
|
-
tlsEnabled: ${JSON.stringify(a.grpc.tls.enable)}
|
|
739
|
-
tls:
|
|
740
|
-
clientAuthMode: '${a.grpc.tls.clientAuthMode}'
|
|
741
|
-
certificates:
|
|
742
|
-
- certFile: '${a.grpc.tls.certFile}'
|
|
743
|
-
keyFile: '${a.grpc.tls.keyFile}'
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
authEnabled: ${JSON.stringify(a.core.auth.enabled)}
|
|
747
|
-
auth: ${JSON.stringify(a.core.auth.drivers)}
|
|
748
|
-
db:
|
|
749
|
-
path: '${a.core.db.path}'
|
|
750
|
-
|
|
751
|
-
controllers:
|
|
752
|
-
data:
|
|
753
|
-
main:
|
|
754
|
-
storages:
|
|
755
|
-
main:
|
|
756
|
-
mode: primary
|
|
757
|
-
downloadable: true
|
|
758
|
-
|
|
759
|
-
library:
|
|
760
|
-
mode: passive
|
|
761
|
-
downloadable: ${r}
|
|
762
|
-
|
|
763
|
-
work:
|
|
764
|
-
mode: active
|
|
765
|
-
downloadable: false
|
|
766
|
-
|
|
767
|
-
storages:
|
|
768
|
-
- ${JSON.stringify(a.storages.primary)}
|
|
769
|
-
- ${JSON.stringify(a.storages.library)}
|
|
770
|
-
- ${JSON.stringify(a.storages.work)}
|
|
771
|
-
|
|
772
|
-
runner:
|
|
773
|
-
type: local
|
|
774
|
-
storageRoot: '${a.storages.work.rootPath}'
|
|
775
|
-
workdirCacheOnSuccess: 20m
|
|
776
|
-
workdirCacheOnFailure: 1h
|
|
777
|
-
${s}
|
|
778
|
-
secrets:
|
|
779
|
-
- map:
|
|
780
|
-
MI_LICENSE: ${JSON.stringify(n)}
|
|
781
|
-
|
|
782
|
-
packageLoader:
|
|
783
|
-
packagesRoot: '${a.localRoot}/packages'
|
|
784
|
-
|
|
785
|
-
workflows:
|
|
786
|
-
features:
|
|
787
|
-
pureFutureFields: true
|
|
788
|
-
commandExpressions: true
|
|
789
|
-
workdirLimits: true
|
|
790
|
-
computeLimits: false
|
|
791
|
-
`;
|
|
792
|
-
}
|
|
793
|
-
function ue(a, e) {
|
|
794
|
-
return a === void 0 ? e : a;
|
|
795
|
-
}
|
|
796
|
-
const at = ["linux", "macos", "windows"];
|
|
797
|
-
function nt(a) {
|
|
798
|
-
const e = _.platform();
|
|
799
|
-
switch (e) {
|
|
800
|
-
case "darwin":
|
|
801
|
-
return "macos";
|
|
802
|
-
case "linux":
|
|
803
|
-
return "linux";
|
|
804
|
-
case "win32":
|
|
805
|
-
return "windows";
|
|
806
|
-
default:
|
|
807
|
-
throw new Error(
|
|
808
|
-
`operating system '${e}' is not currently supported by Platforma ecosystem. The list of OSes supported: ` + JSON.stringify(at)
|
|
809
|
-
);
|
|
810
|
-
}
|
|
811
|
-
}
|
|
812
|
-
const st = ["amd64", "arm64"];
|
|
813
|
-
function vr(a) {
|
|
814
|
-
const e = _.arch();
|
|
815
|
-
switch (e) {
|
|
816
|
-
case "arm64":
|
|
817
|
-
return "arm64";
|
|
818
|
-
case "x64":
|
|
819
|
-
return "amd64";
|
|
820
|
-
default:
|
|
821
|
-
throw new Error(
|
|
822
|
-
`processor architecture '${e}' is not currently supported by Platforma ecosystem. The list of architectures supported: ` + JSON.stringify(st)
|
|
823
|
-
);
|
|
824
|
-
}
|
|
825
|
-
}
|
|
826
|
-
function it(a, e) {
|
|
827
|
-
const t = (e == null ? void 0 : e.version) ?? he(), r = (e == null ? void 0 : e.showProgress) ?? process.stdout.isTTY, n = `pl-${t}-${vr()}.tgz`, s = (e == null ? void 0 : e.downloadURL) ?? `https://cdn.platforma.bio/software/pl/${nt()}/${n}`, i = (e == null ? void 0 : e.saveTo) ?? u.binaries(n);
|
|
828
|
-
if (f.existsSync(i))
|
|
829
|
-
return a.info(`Platforma Backend archive download skipped: '${i}' already exists`), Promise.resolve(i);
|
|
830
|
-
f.mkdirSync(m.dirname(i), { recursive: !0 }), a.info(`Downloading Platforma Backend archive:
|
|
831
|
-
URL: ${s}
|
|
832
|
-
Save to: ${i}`);
|
|
833
|
-
const c = Tr.get(s);
|
|
834
|
-
return new Promise((o, g) => {
|
|
835
|
-
c.on("response", (d) => {
|
|
836
|
-
if (!d.statusCode) {
|
|
837
|
-
const y = new Error("failed to download archive: no HTTP status code in response from server");
|
|
838
|
-
c.destroy(), g(y);
|
|
839
|
-
return;
|
|
840
|
-
}
|
|
841
|
-
if (d.statusCode !== 200) {
|
|
842
|
-
const y = new Error(`failed to download archive: ${d.statusCode} ${d.statusMessage}`);
|
|
843
|
-
c.destroy(), g(y);
|
|
844
|
-
return;
|
|
845
|
-
}
|
|
846
|
-
const h = parseInt(d.headers["content-length"] || "0", 10);
|
|
847
|
-
let b = 0;
|
|
848
|
-
const w = f.createWriteStream(i);
|
|
849
|
-
d.pipe(w), d.on("data", (y) => {
|
|
850
|
-
b += y.length;
|
|
851
|
-
const k = b / h * 100;
|
|
852
|
-
r && process.stdout.write(` downloading: ${k.toFixed(2)}%\r`);
|
|
853
|
-
}), d.on("error", (y) => {
|
|
854
|
-
f.unlinkSync(i), a.error(`Failed to download Platforma Binary: ${y.message}`), c.destroy(), g(y);
|
|
855
|
-
}), w.on("finish", () => {
|
|
856
|
-
w.close(), a.info(" ... download done."), c.destroy(), o(i);
|
|
857
|
-
});
|
|
858
|
-
});
|
|
859
|
-
});
|
|
860
|
-
}
|
|
861
|
-
function ct(a, e) {
|
|
862
|
-
a.debug("extracting archive...");
|
|
863
|
-
const t = (e == null ? void 0 : e.version) ?? he();
|
|
864
|
-
a.debug(` version: '${t}'`);
|
|
865
|
-
const r = `${kr({ version: t })}.tgz`, n = (e == null ? void 0 : e.archivePath) ?? u.binaries(r);
|
|
866
|
-
a.debug(` archive path: '${n}'`);
|
|
867
|
-
const s = (e == null ? void 0 : e.extractTo) ?? lt(n);
|
|
868
|
-
if (a.debug(` target dir: '${s}'`), f.existsSync(s))
|
|
869
|
-
return a.info(`Platforma Backend binaries unpack skipped: '${s}' exists`), s;
|
|
870
|
-
if (!f.existsSync(n)) {
|
|
871
|
-
const i = `Platforma Backend binary archive not found at '${n}'`;
|
|
872
|
-
throw a.error(i), new Error(i);
|
|
873
|
-
}
|
|
874
|
-
return f.existsSync(s) || (a.debug(` creating target dir '${s}'`), f.mkdirSync(s, { recursive: !0 })), a.info(`Unpacking Platforma Backend archive:
|
|
875
|
-
Archive: ${n}
|
|
876
|
-
Target dir: ${s}`), Fr.x({
|
|
877
|
-
file: n,
|
|
878
|
-
cwd: s,
|
|
879
|
-
gzip: !0,
|
|
880
|
-
sync: !0
|
|
881
|
-
}), a.info(" ... unpack done."), s;
|
|
882
|
-
}
|
|
883
|
-
function Le(a, e) {
|
|
884
|
-
return it(a, e).then((t) => ct(a, { archivePath: t }));
|
|
885
|
-
}
|
|
886
|
-
function kr(a) {
|
|
887
|
-
return `pl-${(a == null ? void 0 : a.version) ?? he()}-${vr()}`;
|
|
888
|
-
}
|
|
889
|
-
function ot(a, ...e) {
|
|
890
|
-
return u.binaries(kr({ version: a }), ...e);
|
|
891
|
-
}
|
|
892
|
-
function lt(a) {
|
|
893
|
-
const e = a.lastIndexOf(".");
|
|
894
|
-
return e === -1 ? a : a.slice(0, e);
|
|
895
|
-
}
|
|
896
|
-
class O {
|
|
897
|
-
constructor(e) {
|
|
898
|
-
this.logger = e;
|
|
899
|
-
}
|
|
900
|
-
startLast() {
|
|
901
|
-
const e = u.currentInstance;
|
|
902
|
-
if (!e)
|
|
903
|
-
throw this.logger.error("failed to bring back Platforma Backend in the last started configuration: no last configuration found"), new Error("no previous run info found");
|
|
904
|
-
return this.startInstance(e);
|
|
905
|
-
}
|
|
906
|
-
startInstance(e) {
|
|
907
|
-
if (e.runInfo) {
|
|
908
|
-
const r = this.renderRunInfo(e.runInfo);
|
|
909
|
-
this.logger.info(`Starting platforma backend instance '${e.name}':
|
|
910
|
-
${r}`);
|
|
911
|
-
}
|
|
912
|
-
const t = Oe(
|
|
913
|
-
this.logger,
|
|
914
|
-
e.upCommands
|
|
915
|
-
);
|
|
916
|
-
return Re(t.executed), t.spawned.length > 0 && e.type === "process" && (e.pid = t.spawned[t.spawned.length - 1].pid, u.setInstanceInfo(e.name, e), this.logger.info(`instance '${e.name}' started`)), u.currentInstanceName = e.name, t.spawned;
|
|
917
|
-
}
|
|
918
|
-
stopInstance(e) {
|
|
919
|
-
this.logger.info(`stopping platforma backend instance '${e.name}'...`);
|
|
920
|
-
const t = Oe(this.logger, e.downCommands);
|
|
921
|
-
switch (Re(t.executed), e.type) {
|
|
922
|
-
case "docker":
|
|
923
|
-
return;
|
|
924
|
-
case "process": {
|
|
925
|
-
e.pid && u.isInstanceActive(e) && process.kill(e.pid);
|
|
926
|
-
return;
|
|
927
|
-
}
|
|
928
|
-
default:
|
|
929
|
-
H();
|
|
930
|
-
}
|
|
931
|
-
}
|
|
932
|
-
switchInstance(e) {
|
|
933
|
-
for (const t of u.instanceList)
|
|
934
|
-
if (t !== e.name) {
|
|
935
|
-
const r = u.getInstanceInfo(t);
|
|
936
|
-
u.isInstanceActive(r) && this.stopInstance(r);
|
|
937
|
-
}
|
|
938
|
-
return this.startInstance(e);
|
|
939
|
-
}
|
|
940
|
-
createLocal(e, t) {
|
|
941
|
-
var d, h, b, w, y, k, S, I, p, v, E, ge;
|
|
942
|
-
let r = ot(t == null ? void 0 : t.version, "binaries", "platforma");
|
|
943
|
-
t != null && t.sourcesPath && (r = m.join(_.tmpdir(), "platforma-custom-build")), t != null && t.binaryPath && (r = t.binaryPath);
|
|
944
|
-
let n = t == null ? void 0 : t.configPath;
|
|
945
|
-
const s = (t == null ? void 0 : t.workdir) ?? (n ? process.cwd() : u.instanceDir(e));
|
|
946
|
-
t != null && t.primaryURL && (t.configOptions = {
|
|
947
|
-
...t.configOptions,
|
|
948
|
-
storages: {
|
|
949
|
-
...(d = t.configOptions) == null ? void 0 : d.storages,
|
|
950
|
-
primary: ee(t.primaryURL, s, (b = (h = t.configOptions) == null ? void 0 : h.storages) == null ? void 0 : b.primary)
|
|
951
|
-
}
|
|
952
|
-
}), t != null && t.libraryURL && (t.configOptions = {
|
|
953
|
-
...t.configOptions,
|
|
954
|
-
storages: {
|
|
955
|
-
...(w = t.configOptions) == null ? void 0 : w.storages,
|
|
956
|
-
library: ee(t.libraryURL, s, (k = (y = t.configOptions) == null ? void 0 : y.storages) == null ? void 0 : k.library)
|
|
957
|
-
}
|
|
958
|
-
});
|
|
959
|
-
const i = rt(this.getLastJwt(), t == null ? void 0 : t.configOptions);
|
|
960
|
-
this.logger.debug(" checking license..."), this.checkLicense((I = (S = t == null ? void 0 : t.configOptions) == null ? void 0 : S.license) == null ? void 0 : I.value, (v = (p = t == null ? void 0 : t.configOptions) == null ? void 0 : p.license) == null ? void 0 : v.file);
|
|
961
|
-
const c = [
|
|
962
|
-
`${i.localRoot}/packages`,
|
|
963
|
-
`${i.localRoot}/packages-local`,
|
|
964
|
-
`${i.localRoot}/blocks-local`
|
|
965
|
-
];
|
|
966
|
-
i.storages.primary.type === "FS" && c.push(i.storages.primary.rootPath), i.storages.library.type === "FS" && (c.push(i.storages.library.rootPath), i.hacks.libraryDownloadable = !1), i.storages.work.type === "FS" && c.push(i.storages.work.rootPath), this.logger.debug(" creating pl state directories...");
|
|
967
|
-
for (const R of c)
|
|
968
|
-
f.existsSync(R) || (this.logger.debug(` '${R}'`), f.mkdirSync(R, { recursive: !0 }));
|
|
969
|
-
for (const R of i.core.auth.drivers)
|
|
970
|
-
R.driver === "htpasswd" && (f.existsSync(R.path) || (this.logger.debug(` installing default 'users.htpasswd' to ${R.path}...`), f.copyFileSync(Z("users.htpasswd"), R.path)));
|
|
971
|
-
n || (n = m.join(i.localRoot, "config.yaml"), this.logger.debug(` rendering configuration '${n}'...`), f.writeFileSync(n, tt(i)));
|
|
972
|
-
const o = [];
|
|
973
|
-
t != null && t.sourcesPath && o.push({
|
|
974
|
-
cmd: "go",
|
|
975
|
-
args: ["build", "-o", r, "."],
|
|
976
|
-
workdir: m.resolve(t.sourcesPath, "cmd", "platforma"),
|
|
977
|
-
runOpts: {
|
|
978
|
-
stdio: "inherit"
|
|
979
|
-
}
|
|
980
|
-
});
|
|
981
|
-
const g = {
|
|
982
|
-
async: !0,
|
|
983
|
-
cmd: r,
|
|
984
|
-
args: ["--quiet", "--config", n],
|
|
985
|
-
workdir: s,
|
|
986
|
-
runOpts: {
|
|
987
|
-
stdio: "inherit"
|
|
988
|
-
}
|
|
989
|
-
};
|
|
990
|
-
return (E = t == null ? void 0 : t.configOptions) != null && E.numCpu && (g.runOpts.env = {
|
|
991
|
-
GOMAXPROCS: String((ge = t == null ? void 0 : t.configOptions) == null ? void 0 : ge.numCpu)
|
|
992
|
-
}), o.push(g), u.setInstanceInfo(e, {
|
|
993
|
-
type: "process",
|
|
994
|
-
upCommands: o,
|
|
995
|
-
downCommands: [],
|
|
996
|
-
cleanupCommands: [],
|
|
997
|
-
runInfo: {
|
|
998
|
-
configPath: n,
|
|
999
|
-
dbPath: i.core.db.path,
|
|
1000
|
-
apiAddr: i.grpc.listen,
|
|
1001
|
-
logPath: i.log.path,
|
|
1002
|
-
primary: i.storages.primary,
|
|
1003
|
-
work: i.storages.work,
|
|
1004
|
-
library: i.storages.library
|
|
1005
|
-
}
|
|
1006
|
-
}), u.getInstanceInfo(e);
|
|
1007
|
-
}
|
|
1008
|
-
createLocalS3(e, t) {
|
|
1009
|
-
var c;
|
|
1010
|
-
this.logger.debug("creating platforma instance in 'local s3' mode...");
|
|
1011
|
-
const r = (t == null ? void 0 : t.minioPort) ?? 9e3, n = this.createLocal(e, {
|
|
1012
|
-
...t,
|
|
1013
|
-
primaryURL: (t == null ? void 0 : t.primaryURL) ?? `s3e://testuser:testpassword@localhost:${r}/main-bucket/?region=no-region`,
|
|
1014
|
-
libraryURL: (t == null ? void 0 : t.libraryURL) ?? `s3e://testuser:testpassword@localhost:${r}/library-bucket/?region=no-region`
|
|
1015
|
-
}), s = (c = t == null ? void 0 : t.configOptions) == null ? void 0 : c.localRoot, i = this.createMinio(e, {
|
|
1016
|
-
minioPort: r,
|
|
1017
|
-
minioConsolePort: t == null ? void 0 : t.minioConsolePort,
|
|
1018
|
-
storage: s ? m.join(s, "minio") : void 0
|
|
1019
|
-
});
|
|
1020
|
-
return n.upCommands = [
|
|
1021
|
-
i.start,
|
|
1022
|
-
...n.upCommands
|
|
1023
|
-
], n.downCommands = [
|
|
1024
|
-
i.stop,
|
|
1025
|
-
...n.downCommands
|
|
1026
|
-
], n.cleanupCommands = [
|
|
1027
|
-
i.cleanup,
|
|
1028
|
-
...n.cleanupCommands
|
|
1029
|
-
], u.setInstanceInfo(e, n), n;
|
|
1030
|
-
}
|
|
1031
|
-
createMinio(e, t) {
|
|
1032
|
-
this.logger.debug(" creating docker compose for minio service...");
|
|
1033
|
-
const r = Z("compose-backend.yaml"), n = u.instanceDir(e, "compose-minio.yaml");
|
|
1034
|
-
Ee(
|
|
1035
|
-
r,
|
|
1036
|
-
n,
|
|
1037
|
-
`pl-${e}-minio`,
|
|
1038
|
-
/* @__PURE__ */ new Map([
|
|
1039
|
-
["minio", {}]
|
|
1040
|
-
]),
|
|
1041
|
-
{ dropVolumes: !0 }
|
|
1042
|
-
);
|
|
1043
|
-
const s = t != null && t.version ? `:${t.version}` : "";
|
|
1044
|
-
this.logger.debug(` minio version: ${s}`);
|
|
1045
|
-
const i = (t == null ? void 0 : t.image) ?? `quay.io/minio/minio${s}`;
|
|
1046
|
-
this.logger.debug(` minio image: ${i}`);
|
|
1047
|
-
const c = (t == null ? void 0 : t.storage) ?? u.instanceDir(e, "minio");
|
|
1048
|
-
Ae(c, { mode: "0775" });
|
|
1049
|
-
const o = (t == null ? void 0 : t.minioPort) ?? 9e3, g = (t == null ? void 0 : t.minioConsolePort) ?? 9001, d = {
|
|
1050
|
-
MINIO_IMAGE: i,
|
|
1051
|
-
MINIO_STORAGE: m.resolve(c),
|
|
1052
|
-
MINIO_PORT: o.toString(),
|
|
1053
|
-
MINIO_CONSOLE_PORT: g.toString()
|
|
1054
|
-
};
|
|
1055
|
-
return {
|
|
1056
|
-
start: {
|
|
1057
|
-
cmd: "docker",
|
|
1058
|
-
args: ["compose", `--file=${n}`, "up", "--detach", "--remove-orphans", "--pull=missing"],
|
|
1059
|
-
envs: d,
|
|
1060
|
-
workdir: u.instanceDir(e),
|
|
1061
|
-
runOpts: { stdio: "inherit" }
|
|
1062
|
-
},
|
|
1063
|
-
stop: {
|
|
1064
|
-
cmd: "docker",
|
|
1065
|
-
args: ["compose", `--file=${n}`, "down"],
|
|
1066
|
-
envs: d,
|
|
1067
|
-
workdir: u.instanceDir(e),
|
|
1068
|
-
runOpts: { stdio: "inherit" }
|
|
1069
|
-
},
|
|
1070
|
-
cleanup: {
|
|
1071
|
-
cmd: "docker",
|
|
1072
|
-
args: ["compose", `--file=${n}`, "down", "--volumes", "--remove-orphans"],
|
|
1073
|
-
envs: d,
|
|
1074
|
-
workdir: u.instanceDir(e),
|
|
1075
|
-
runOpts: { stdio: "inherit" }
|
|
1076
|
-
}
|
|
1077
|
-
};
|
|
1078
|
-
}
|
|
1079
|
-
createDockerS3(e, t, r) {
|
|
1080
|
-
this.logger.debug("creating platforma instance in 'docker s3' mode...");
|
|
1081
|
-
const n = Z("compose-backend.yaml"), s = (r == null ? void 0 : r.image) ?? fr(r == null ? void 0 : r.version);
|
|
1082
|
-
this.checkLicense(r == null ? void 0 : r.license, r == null ? void 0 : r.licenseFile);
|
|
1083
|
-
const i = (...v) => m.join(t, ...v), c = (v) => {
|
|
1084
|
-
const E = i(v);
|
|
1085
|
-
return Ae(E, { mode: "0775" }), E;
|
|
1086
|
-
}, o = i("logs", "platforma.log");
|
|
1087
|
-
f.existsSync(o) || (f.mkdirSync(m.dirname(o), { recursive: !0 }), f.writeFileSync(o, ""));
|
|
1088
|
-
const g = (r == null ? void 0 : r.presignHost) ?? "localhost", d = (r == null ? void 0 : r.s3Port) ?? 9e3, h = ee(`s3e://testuser:testpassword@minio:${d}/main-bucket`);
|
|
1089
|
-
if (h.type !== "S3")
|
|
1090
|
-
throw new Error("primary storage must have 'S3' type in 'docker s3' configuration");
|
|
1091
|
-
h.presignEndpoint = `http://${g}:${d}`;
|
|
1092
|
-
const b = ee(`s3e://testuser:testpassword@minio:${d}/library-bucket`);
|
|
1093
|
-
if (b.type !== "S3")
|
|
1094
|
-
throw new Error(`${b.type} storage type is not supported for library storage`);
|
|
1095
|
-
b.presignEndpoint = `http://${g}:${d}`;
|
|
1096
|
-
const w = c("db"), y = c("work"), k = i("users.htpasswd");
|
|
1097
|
-
f.existsSync(k) || f.copyFileSync(Z("users.htpasswd"), k);
|
|
1098
|
-
const S = i("compose.yaml");
|
|
1099
|
-
f.existsSync(S) && this.logger.info(`replacing docker compose file ${S}`);
|
|
1100
|
-
const I = [];
|
|
1101
|
-
for (const v of (r == null ? void 0 : r.customMounts) ?? [])
|
|
1102
|
-
I.push({
|
|
1103
|
-
hostPath: v.hostPath,
|
|
1104
|
-
containerPath: v.containerPath ?? v.hostPath
|
|
1105
|
-
});
|
|
1106
|
-
Ee(n, S, `pl-${e}`, /* @__PURE__ */ new Map([
|
|
1107
|
-
["minio", {}],
|
|
1108
|
-
["backend", {
|
|
1109
|
-
platform: r == null ? void 0 : r.platformOverride,
|
|
1110
|
-
mounts: I
|
|
1111
|
-
}]
|
|
1112
|
-
]));
|
|
1113
|
-
const p = {
|
|
1114
|
-
MINIO_IMAGE: "quay.io/minio/minio",
|
|
1115
|
-
MINIO_STORAGE: c("minio"),
|
|
1116
|
-
PL_IMAGE: s,
|
|
1117
|
-
PL_AUTH_HTPASSWD_PATH: k,
|
|
1118
|
-
PL_LICENSE: r == null ? void 0 : r.license,
|
|
1119
|
-
PL_LICENSE_FILE: r == null ? void 0 : r.licenseFile,
|
|
1120
|
-
PL_LOG_LEVEL: (r == null ? void 0 : r.logLevel) ?? "info",
|
|
1121
|
-
PL_LOG_DIR: m.dirname(o),
|
|
1122
|
-
PL_LOG_ROTATION_ENABLED: "true",
|
|
1123
|
-
PL_RUNNER_WD_CACHE_ON_FAILURE: "1h",
|
|
1124
|
-
PL_DATA_DB_ROOT: w,
|
|
1125
|
-
PL_DATA_PRIMARY_ROOT: c("primary"),
|
|
1126
|
-
PL_DATA_LIBRARY_ROOT: c("library"),
|
|
1127
|
-
PL_DATA_WORKDIR_ROOT: y,
|
|
1128
|
-
// Mount packages storage as volume, because APFS is case-insensitive on Mac OS X and this breaks some pl software installation.
|
|
1129
|
-
// PL_DATA_PACKAGE_ROOT: storageDir('packages'),
|
|
1130
|
-
...this.configureDockerStorage("primary", h),
|
|
1131
|
-
...this.configureDockerStorage("library", b)
|
|
1132
|
-
};
|
|
1133
|
-
if (r != null && r.grpcAddr && (p.PL_GRPC_ADDR = r.grpcAddr), r != null && r.grpcPort && (p.PL_GRPC_PORT = r.grpcPort.toString()), r != null && r.monitoringAddr && (p.PL_MONITORING_ADDR = r.monitoringAddr), r != null && r.monitoringPort && (p.PL_MONITORING_PORT = r.monitoringPort.toString()), r != null && r.debugAddr && (p.PL_DEBUG_ADDR = r.debugAddr), r != null && r.debugPort && (p.PL_DEBUG_PORT = r.debugPort.toString()), r != null && r.s3Port && (p.MINIO_PORT = r.s3Port.toString()), r != null && r.s3ConsolePort && (p.MINIO_CONSOLE_PORT = r.s3ConsolePort.toString()), r != null && r.auth && (r.auth.enabled && (p.PL_AUTH_ENABLED = "true"), r.auth.drivers)) {
|
|
1134
|
-
for (const v of r.auth.drivers)
|
|
1135
|
-
v.driver === "htpasswd" && (p.PL_AUTH_HTPASSWD_PATH = m.resolve(v.path), v.path = "/etc/platforma/users.htpasswd");
|
|
1136
|
-
p.PL_AUTH_DRIVERS = JSON.stringify(r.auth.drivers);
|
|
1137
|
-
}
|
|
1138
|
-
return u.setInstanceInfo(e, {
|
|
1139
|
-
type: "docker",
|
|
1140
|
-
upCommands: [{
|
|
1141
|
-
cmd: "docker",
|
|
1142
|
-
args: ["compose", `--file=${S}`, "up", "--detach", "--remove-orphans", "--pull=missing"],
|
|
1143
|
-
envs: p,
|
|
1144
|
-
runOpts: { stdio: "inherit" }
|
|
1145
|
-
}],
|
|
1146
|
-
downCommands: [{
|
|
1147
|
-
cmd: "docker",
|
|
1148
|
-
args: ["compose", `--file=${S}`, "down"],
|
|
1149
|
-
envs: p,
|
|
1150
|
-
runOpts: { stdio: "inherit" }
|
|
1151
|
-
}],
|
|
1152
|
-
cleanupCommands: [{
|
|
1153
|
-
cmd: "docker",
|
|
1154
|
-
args: ["compose", `--file=${S}`, "down", "--volumes", "--remove-orphans"],
|
|
1155
|
-
envs: p,
|
|
1156
|
-
runOpts: { stdio: "inherit" }
|
|
1157
|
-
}],
|
|
1158
|
-
runInfo: {
|
|
1159
|
-
apiPort: r == null ? void 0 : r.grpcPort,
|
|
1160
|
-
apiAddr: r == null ? void 0 : r.grpcAddr,
|
|
1161
|
-
logPath: o,
|
|
1162
|
-
primary: h,
|
|
1163
|
-
work: { type: "FS", rootPath: y },
|
|
1164
|
-
library: b,
|
|
1165
|
-
dbPath: w
|
|
1166
|
-
}
|
|
1167
|
-
}), u.getInstanceInfo(e);
|
|
1168
|
-
}
|
|
1169
|
-
createDocker(e, t, r) {
|
|
1170
|
-
this.logger.debug("creating platforma instance in 'docker' mode...");
|
|
1171
|
-
const n = Z("compose-backend.yaml"), s = (r == null ? void 0 : r.image) ?? fr(r == null ? void 0 : r.version);
|
|
1172
|
-
this.checkLicense(r == null ? void 0 : r.license, r == null ? void 0 : r.licenseFile);
|
|
1173
|
-
const i = (...v) => m.join(t, ...v), c = (v) => {
|
|
1174
|
-
const E = i(v);
|
|
1175
|
-
return Ae(E, { mode: "0775" }), E;
|
|
1176
|
-
}, o = i("logs", "platforma.log");
|
|
1177
|
-
f.existsSync(o) || (f.mkdirSync(m.dirname(o), { recursive: !0 }), f.writeFileSync(o, ""));
|
|
1178
|
-
const g = c("db"), d = c("primary"), h = c("library"), b = c("work"), w = i("users.htpasswd");
|
|
1179
|
-
f.existsSync(w) || f.copyFileSync(Z("users.htpasswd"), w);
|
|
1180
|
-
const y = i("compose.yaml");
|
|
1181
|
-
f.existsSync(y) && this.logger.info(`replacing docker compose file ${y}`);
|
|
1182
|
-
const k = [];
|
|
1183
|
-
for (const v of (r == null ? void 0 : r.customMounts) ?? [])
|
|
1184
|
-
k.push({
|
|
1185
|
-
hostPath: v.hostPath,
|
|
1186
|
-
containerPath: v.containerPath ?? v.hostPath
|
|
1187
|
-
});
|
|
1188
|
-
this.logger.debug(`Rendering docker compose file '${y}' using '${n}' as base template`), Ee(n, y, `pl-${e}`, /* @__PURE__ */ new Map([
|
|
1189
|
-
["backend", {
|
|
1190
|
-
platform: r == null ? void 0 : r.platformOverride,
|
|
1191
|
-
mounts: k
|
|
1192
|
-
}]
|
|
1193
|
-
]));
|
|
1194
|
-
const S = ee((r == null ? void 0 : r.primaryStorageURL) ?? `file:${d}`, "."), I = ee((r == null ? void 0 : r.libraryStorageURL) ?? `file:${h}`, "."), p = {
|
|
1195
|
-
PL_IMAGE: s,
|
|
1196
|
-
PL_AUTH_HTPASSWD_PATH: w,
|
|
1197
|
-
PL_LICENSE: r == null ? void 0 : r.license,
|
|
1198
|
-
PL_LICENSE_FILE: r == null ? void 0 : r.licenseFile,
|
|
1199
|
-
PL_LOG_LEVEL: "info",
|
|
1200
|
-
PL_LOG_DIR: m.dirname(o),
|
|
1201
|
-
PL_LOG_ROTATION_ENABLED: "true",
|
|
1202
|
-
PL_RUNNER_WD_CACHE_ON_FAILURE: "1h",
|
|
1203
|
-
PL_DATA_DB_ROOT: g,
|
|
1204
|
-
PL_DATA_PRIMARY_ROOT: d,
|
|
1205
|
-
PL_DATA_LIBRARY_ROOT: h,
|
|
1206
|
-
PL_DATA_WORKDIR_ROOT: b,
|
|
1207
|
-
PL_DATA_PACKAGE_ROOT: c("packages"),
|
|
1208
|
-
...this.configureDockerStorage("primary", S),
|
|
1209
|
-
...this.configureDockerStorage("library", I)
|
|
1210
|
-
};
|
|
1211
|
-
if (r != null && r.grpcAddr && (p.PL_GRPC_ADDR = r.grpcAddr), r != null && r.grpcPort && (p.PL_GRPC_PORT = r.grpcPort.toString()), r != null && r.monitoringAddr && (p.PL_MONITORING_ADDR = r.monitoringAddr), r != null && r.monitoringPort && (p.PL_MONITORING_PORT = r.monitoringPort.toString()), r != null && r.debugAddr && (p.PL_DEBUG_ADDR = r.debugAddr), r != null && r.debugPort && (p.PL_DEBUG_PORT = r.debugPort.toString()), r != null && r.auth && (r.auth.enabled && (p.PL_AUTH_ENABLED = "true"), r.auth.drivers)) {
|
|
1212
|
-
for (const v of r.auth.drivers)
|
|
1213
|
-
v.driver === "htpasswd" && (p.PL_AUTH_HTPASSWD_PATH = m.resolve(v.path), v.path = "/etc/platforma/users.htpasswd");
|
|
1214
|
-
p.PL_AUTH_DRIVERS = JSON.stringify(r.auth.drivers);
|
|
1215
|
-
}
|
|
1216
|
-
return u.setInstanceInfo(e, {
|
|
1217
|
-
type: "docker",
|
|
1218
|
-
upCommands: [{
|
|
1219
|
-
cmd: "docker",
|
|
1220
|
-
args: ["compose", `--file=${y}`, "up", "--detach", "--remove-orphans", "--pull=missing"],
|
|
1221
|
-
envs: p,
|
|
1222
|
-
runOpts: { stdio: "inherit" }
|
|
1223
|
-
}],
|
|
1224
|
-
downCommands: [{
|
|
1225
|
-
cmd: "docker",
|
|
1226
|
-
args: ["compose", `--file=${y}`, "down"],
|
|
1227
|
-
envs: p,
|
|
1228
|
-
runOpts: { stdio: "inherit" }
|
|
1229
|
-
}],
|
|
1230
|
-
cleanupCommands: [{
|
|
1231
|
-
cmd: "docker",
|
|
1232
|
-
args: ["compose", `--file=${y}`, "down", "--volumes", "--remove-orphans"],
|
|
1233
|
-
envs: p,
|
|
1234
|
-
runOpts: { stdio: "inherit" }
|
|
1235
|
-
}],
|
|
1236
|
-
runInfo: {
|
|
1237
|
-
apiPort: r == null ? void 0 : r.grpcPort,
|
|
1238
|
-
apiAddr: r == null ? void 0 : r.grpcAddr,
|
|
1239
|
-
logPath: o,
|
|
1240
|
-
primary: S,
|
|
1241
|
-
work: { type: "FS", rootPath: b },
|
|
1242
|
-
library: I,
|
|
1243
|
-
dbPath: g
|
|
1244
|
-
}
|
|
1245
|
-
}), u.getInstanceInfo(e);
|
|
1246
|
-
}
|
|
1247
|
-
cleanupInstance(e) {
|
|
1248
|
-
const t = [], r = /* @__PURE__ */ new Map();
|
|
1249
|
-
let n = "";
|
|
1250
|
-
if (e) {
|
|
1251
|
-
const s = u.getInstanceInfo(e);
|
|
1252
|
-
switch (r.set(e, s), s.type) {
|
|
1253
|
-
case "docker": {
|
|
1254
|
-
t.push(`docker service 'pl-${e}', including all its volumes and data in '${u.instanceDir(e)}' will be destroyed`);
|
|
1255
|
-
break;
|
|
1256
|
-
}
|
|
1257
|
-
case "process": {
|
|
1258
|
-
t.push(`directory '${u.instanceDir(e)}' would be deleted`), s.downCommands && t.push("associated docker service, including all volumes and data will be destroyed");
|
|
1259
|
-
break;
|
|
1260
|
-
}
|
|
1261
|
-
default:
|
|
1262
|
-
H();
|
|
1263
|
-
}
|
|
1264
|
-
e === u.currentInstanceName && t.push(
|
|
1265
|
-
"last command run cache ('pl-dev start' shorthand will stop working until next full start command call)"
|
|
1266
|
-
), n = `
|
|
1267
|
-
You are going to reset the state of platforma service '${e}':
|
|
1268
|
-
- ${t.join(`
|
|
1269
|
-
- `)}
|
|
1270
|
-
`;
|
|
1271
|
-
} else {
|
|
1272
|
-
for (const s of u.instanceList)
|
|
1273
|
-
r.set(s, u.getInstanceInfo(s));
|
|
1274
|
-
t.push(
|
|
1275
|
-
"last command run cache ('pl-dev start' shorthand will stop working until next full start command call)",
|
|
1276
|
-
`all service configurations stored in: ${u.instanceDir()} (including all associated docker containers and volumes)`
|
|
1277
|
-
), n = `
|
|
1278
|
-
You are going to reset the state of all platforma services configured with pl-bootstrap package.
|
|
1279
|
-
- ${t.join(`
|
|
1280
|
-
- `)}
|
|
1281
|
-
`;
|
|
1282
|
-
}
|
|
1283
|
-
if (this.logger.warn(n), !Mr("Are you sure?")) {
|
|
1284
|
-
this.logger.info("Reset action was canceled");
|
|
1285
|
-
return;
|
|
1286
|
-
}
|
|
1287
|
-
for (const [s, i] of r.entries()) {
|
|
1288
|
-
if (i.cleanupCommands.length) {
|
|
1289
|
-
this.logger.info(`Wiping instance ${s} services`);
|
|
1290
|
-
const c = Oe(this.logger, i.cleanupCommands);
|
|
1291
|
-
Re(c.executed, `failed to wipe instance ${s} services`);
|
|
1292
|
-
}
|
|
1293
|
-
this.logger.info(`Destroying instance '${s}' data directory`), f.rmSync(u.instanceDir(s), { recursive: !0, force: !0 });
|
|
1294
|
-
}
|
|
1295
|
-
e || (this.logger.info(`Destroying state dir '${u.path()}'`), f.rmSync(u.path(), { recursive: !0, force: !0 })), this.logger.info(
|
|
1296
|
-
`
|
|
1297
|
-
If you want to remove all downloaded platforma binaries, delete '${u.binaries()}' dir manually
|
|
1298
|
-
`
|
|
1299
|
-
);
|
|
1300
|
-
}
|
|
1301
|
-
mergeLicenseEnvs(e) {
|
|
1302
|
-
e.license === void 0 && ((process.env.MI_LICENSE ?? "") != "" ? e.license = process.env.MI_LICENSE : (process.env.PL_LICENSE ?? "") != "" && (e.license = process.env.PL_LICENSE)), e["license-file"] === void 0 && e.license === void 0 && ((process.env.MI_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.MI_LICENSE_FILE : (process.env.PL_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.PL_LICENSE_FILE : f.existsSync(m.resolve(_.homedir(), ".pl.license")) && (e["license-file"] = m.resolve(_.homedir(), ".pl.license")));
|
|
1303
|
-
}
|
|
1304
|
-
initAuthDriversList(e, t) {
|
|
1305
|
-
const r = [];
|
|
1306
|
-
if (e["auth-htpasswd-file"] && r.push({
|
|
1307
|
-
driver: "htpasswd",
|
|
1308
|
-
path: m.resolve(t, e["auth-htpasswd-file"])
|
|
1309
|
-
}), !!e["auth-ldap-server"] != !!e["auth-ldap-default-dn"])
|
|
1310
|
-
throw new Error("LDAP auth settings require both 'server' and 'default DN' options to be set");
|
|
1311
|
-
if (e["auth-ldap-server"] && r.push({
|
|
1312
|
-
driver: "ldap",
|
|
1313
|
-
serverUrl: e["auth-ldap-server"],
|
|
1314
|
-
defaultDN: e["auth-ldap-default-dn"]
|
|
1315
|
-
}), r.length !== 0)
|
|
1316
|
-
return [{ driver: "jwt", key: this.getLastJwt() }, ...r];
|
|
1317
|
-
}
|
|
1318
|
-
/** Gets the last stored JWT secret key or generates it and stores in a file. */
|
|
1319
|
-
getLastJwt() {
|
|
1320
|
-
const e = u.path("auth.jwt"), t = "utf-8";
|
|
1321
|
-
let r = "";
|
|
1322
|
-
return f.existsSync(e) && (r = f.readFileSync(e, { encoding: t })), r == "" && (r = jr(64), f.writeFileSync(e, r, { encoding: t })), r;
|
|
1323
|
-
}
|
|
1324
|
-
checkLicense(e, t) {
|
|
1325
|
-
if (!(e !== void 0 && e != "") && !(t !== void 0 && t != ""))
|
|
1326
|
-
throw this.logger.error(`A license for Platforma Backend must be set.
|
|
1327
|
-
|
|
1328
|
-
You can provide the license directly using the '--license' flag
|
|
1329
|
-
or use the '--license-file' flag if the license is stored in a file.
|
|
1330
|
-
|
|
1331
|
-
Alternatively, you can set it via the environment variables 'MI_LICENSE' or 'PL_LICENSE'.
|
|
1332
|
-
|
|
1333
|
-
The license file can also be set with the variables 'MI_LICENSE_FILE' or 'PL_LICENSE_FILE',
|
|
1334
|
-
or stored in '$HOME/.pl.license'.
|
|
1335
|
-
|
|
1336
|
-
You can obtain the license from "https://licensing.milaboratories.com".`), new Error("The license was not provided.");
|
|
1337
|
-
}
|
|
1338
|
-
configureDockerStorage(e, t) {
|
|
1339
|
-
const r = {}, n = t.type;
|
|
1340
|
-
switch (e = e.toUpperCase(), n) {
|
|
1341
|
-
case "S3":
|
|
1342
|
-
return r[`PL_DATA_${e}_TYPE`] = "S3", r[`PL_DATA_${e}_S3_BUCKET`] = t.bucketName, t.endpoint && (r[`PL_DATA_${e}_S3_ENDPOINT`] = t.endpoint), t.presignEndpoint && (r[`PL_DATA_${e}_S3_PRESIGN_ENDPOINT`] = t.presignEndpoint), t.region && (r[`PL_DATA_${e}_S3_REGION`] = t.region), t.key && (r[`PL_DATA_${e}_S3_KEY`] = t.key), t.secret && (r[`PL_DATA_${e}_S3_SECRET`] = t.secret), r;
|
|
1343
|
-
case "FS":
|
|
1344
|
-
return r[`PL_DATA_${e}_TYPE`] = "FS", r;
|
|
1345
|
-
default:
|
|
1346
|
-
H();
|
|
1347
|
-
}
|
|
1348
|
-
return {};
|
|
1349
|
-
}
|
|
1350
|
-
renderRunInfo(e, t = 10) {
|
|
1351
|
-
var c, o;
|
|
1352
|
-
const r = [], n = (g) => g.padStart(t, " ");
|
|
1353
|
-
switch (e.configPath && r.push(`${n("config")}: ${e.configPath}`), e.apiAddr ? r.push(`${n("API")}: ${e.apiAddr}`) : e.apiPort ? r.push(`${n("API")}: 127.0.0.1:${e.apiPort.toString()}`) : r.push(`${n("API")}: 127.0.0.1:6345`), e.logPath && r.push(`${n("log")}: ${e.logPath}`), (c = e.primary) == null ? void 0 : c.type) {
|
|
1354
|
-
case void 0:
|
|
1355
|
-
break;
|
|
1356
|
-
case "FS":
|
|
1357
|
-
r.push(`${n("primary")}: ${e.primary.rootPath}`);
|
|
1358
|
-
break;
|
|
1359
|
-
case "S3":
|
|
1360
|
-
r.push(
|
|
1361
|
-
`${n("primary")}: S3 at '${e.primary.endpoint ?? "AWS"}', bucket '${e.primary.bucketName}', prefix: '${e.primary.keyPrefix ?? ""}'`
|
|
1362
|
-
);
|
|
1363
|
-
break;
|
|
1364
|
-
default:
|
|
1365
|
-
H();
|
|
1366
|
-
}
|
|
1367
|
-
switch ((o = e.library) == null ? void 0 : o.type) {
|
|
1368
|
-
case void 0:
|
|
1369
|
-
break;
|
|
1370
|
-
case "FS":
|
|
1371
|
-
r.push(`${n("library")}: ${e.library.rootPath}`);
|
|
1372
|
-
break;
|
|
1373
|
-
case "S3":
|
|
1374
|
-
r.push(
|
|
1375
|
-
`${n("library")}: S3 at '${e.library.endpoint ?? "AWS"}', bucket '${e.library.bucketName}', prefix: '${e.library.keyPrefix ?? ""}'`
|
|
1376
|
-
);
|
|
1377
|
-
break;
|
|
1378
|
-
default:
|
|
1379
|
-
H();
|
|
1380
|
-
}
|
|
1381
|
-
return e.work && r.push(`${n("workdirs")}: ${e.work.rootPath}`), e.dbPath && r.push(`${n("db")}: ${e.dbPath}`), r.join(`
|
|
1382
|
-
`);
|
|
1383
|
-
}
|
|
1384
|
-
}
|
|
1385
|
-
function Re(a, e) {
|
|
1386
|
-
for (const t of a) {
|
|
1387
|
-
if (t.error)
|
|
1388
|
-
throw t.error;
|
|
1389
|
-
const r = e ?? "failed to run command";
|
|
1390
|
-
if (t.status !== 0)
|
|
1391
|
-
throw new Error(`${r}, process exited with code '${t.status}'`);
|
|
1392
|
-
}
|
|
1393
|
-
}
|
|
1394
|
-
const ne = class ne extends $ {
|
|
1395
|
-
async run() {
|
|
1396
|
-
const { flags: e } = await this.parse(ne), t = A(e["log-level"]);
|
|
1397
|
-
new O(t).cleanupInstance();
|
|
1398
|
-
}
|
|
1399
|
-
};
|
|
1400
|
-
l(ne, "description", "Clear service state (forget last run command, destroy docker services, volumes and so on)"), l(ne, "examples", ["<%= config.bin %> <%= command.id %>"]), l(ne, "flags", {
|
|
1401
|
-
...L
|
|
1402
|
-
});
|
|
1403
|
-
let Ne = ne;
|
|
1404
|
-
const se = class se extends $ {
|
|
1405
|
-
async run() {
|
|
1406
|
-
const { flags: e } = await this.parse(se), t = A(e["log-level"]);
|
|
1407
|
-
new O(t).startLast();
|
|
1408
|
-
}
|
|
1409
|
-
};
|
|
1410
|
-
l(se, "description", "Start last run service configuraiton"), l(se, "examples", ["<%= config.bin %> <%= command.id %>"]), l(se, "flags", {
|
|
1411
|
-
...L
|
|
1412
|
-
});
|
|
1413
|
-
let _e = se;
|
|
1414
|
-
const ie = class ie extends $ {
|
|
1415
|
-
async run() {
|
|
1416
|
-
const { flags: e } = await this.parse(ie), t = A(e["log-level"]), r = new O(t);
|
|
1417
|
-
u.currentInstance ? r.stopInstance(u.currentInstance) : t.warn("up/start command was not called for any instance, nothing to stop");
|
|
1418
|
-
}
|
|
1419
|
-
};
|
|
1420
|
-
l(ie, "description", "Stop platforma service"), l(ie, "examples", ["<%= config.bin %> <%= command.id %>"]), l(ie, "flags", {
|
|
1421
|
-
...L
|
|
1422
|
-
});
|
|
1423
|
-
let Ce = ie;
|
|
1424
|
-
var T;
|
|
1425
|
-
let dt = (T = class extends $ {
|
|
1426
|
-
async run() {
|
|
1427
|
-
const { flags: e } = await this.parse(T), t = A(e["log-level"]), r = new O(t);
|
|
1428
|
-
r.mergeLicenseEnvs(e);
|
|
1429
|
-
const n = "docker", s = e["auth-enabled"], i = s ? {
|
|
1430
|
-
enabled: s,
|
|
1431
|
-
drivers: r.initAuthDriversList(e, ".")
|
|
1432
|
-
} : void 0, c = e.storage ? m.join(".", e.storage) : u.instanceDir(n), o = [];
|
|
1433
|
-
for (const h of e.mount ?? [])
|
|
1434
|
-
o.push({ hostPath: h });
|
|
1435
|
-
const g = e.arch ? `linux/${e.arch}` : void 0, d = r.createDocker(n, c, {
|
|
1436
|
-
primaryStorageURL: e["storage-primary"],
|
|
1437
|
-
workStoragePath: e["storage-work"],
|
|
1438
|
-
libraryStorageURL: e["storage-library"],
|
|
1439
|
-
image: e.image,
|
|
1440
|
-
version: e.version,
|
|
1441
|
-
platformOverride: g,
|
|
1442
|
-
customMounts: o,
|
|
1443
|
-
license: e.license,
|
|
1444
|
-
licenseFile: e["license-file"],
|
|
1445
|
-
auth: i,
|
|
1446
|
-
grpcAddr: e["grpc-listen"],
|
|
1447
|
-
grpcPort: e["grpc-port"],
|
|
1448
|
-
monitoringAddr: e["monitoring-listen"],
|
|
1449
|
-
monitoringPort: e["monitoring-port"],
|
|
1450
|
-
debugAddr: e["debug-listen"],
|
|
1451
|
-
debugPort: e["debug-port"]
|
|
1452
|
-
});
|
|
1453
|
-
r.switchInstance(d);
|
|
1454
|
-
}
|
|
1455
|
-
}, l(T, "description", "Run platforma backend service with 'FS' primary storage type"), l(T, "examples", ["<%= config.bin %> <%= command.id %>"]), l(T, "flags", {
|
|
1456
|
-
...L,
|
|
1457
|
-
...U,
|
|
1458
|
-
...ye,
|
|
1459
|
-
...j,
|
|
1460
|
-
...be,
|
|
1461
|
-
...J,
|
|
1462
|
-
...B,
|
|
1463
|
-
...pe,
|
|
1464
|
-
...G,
|
|
1465
|
-
...le,
|
|
1466
|
-
...oe,
|
|
1467
|
-
...de
|
|
1468
|
-
}), T);
|
|
1469
|
-
var F;
|
|
1470
|
-
let gt = (F = class extends $ {
|
|
1471
|
-
async run() {
|
|
1472
|
-
const { flags: e } = await this.parse(F), t = A(e["log-level"]), r = new O(t);
|
|
1473
|
-
r.mergeLicenseEnvs(e);
|
|
1474
|
-
const n = "local", s = e["pl-workdir"] ?? ".", i = e.storage ? m.join(s, e.storage) : u.instanceDir(n), c = e["pl-log-file"] ? m.join(s, e["pl-log-file"]) : void 0, o = r.initAuthDriversList(e, s), g = e["auth-enabled"] ?? o !== void 0;
|
|
1475
|
-
let d = "127.0.0.1:6345";
|
|
1476
|
-
e["grpc-listen"] ? d = e["grpc-listen"] : e["grpc-port"] && (d = `127.0.0.1:${e["grpc-port"]}`);
|
|
1477
|
-
let h = "127.0.0.1:9090";
|
|
1478
|
-
e["monitoring-listen"] ? h = e["monitoring-listen"] : e["monitoring-port"] && (h = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1479
|
-
let b = "127.0.0.1:9091";
|
|
1480
|
-
e["debug-listen"] ? b = e["debug-listen"] : e["debug-port"] && (b = `127.0.0.1:${e["debug-port"]}`);
|
|
1481
|
-
const w = {
|
|
1482
|
-
sourcesPath: e["pl-sources"],
|
|
1483
|
-
binaryPath: e["pl-binary"],
|
|
1484
|
-
version: e.version,
|
|
1485
|
-
configPath: e.config,
|
|
1486
|
-
workdir: e["pl-workdir"],
|
|
1487
|
-
primaryURL: e["storage-primary"],
|
|
1488
|
-
libraryURL: e["storage-library"],
|
|
1489
|
-
configOptions: {
|
|
1490
|
-
grpc: { listen: d },
|
|
1491
|
-
monitoring: { listen: h },
|
|
1492
|
-
debug: { listen: b },
|
|
1493
|
-
license: { value: e.license, file: e["license-file"] },
|
|
1494
|
-
log: { path: c },
|
|
1495
|
-
localRoot: i,
|
|
1496
|
-
core: { auth: { enabled: g, drivers: o } },
|
|
1497
|
-
storages: {
|
|
1498
|
-
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1499
|
-
},
|
|
1500
|
-
// Backend could consume a lot of CPU power,
|
|
1501
|
-
// we want to keep at least a couple for UI and other apps to work.
|
|
1502
|
-
numCpu: Math.max(me.cpus().length - 2, 1)
|
|
1503
|
-
}
|
|
1504
|
-
}, y = r.createLocal(n, w);
|
|
1505
|
-
w.binaryPath || w.sourcesPath ? r.switchInstance(y) : Le(t, { version: e.version }).then(() => {
|
|
1506
|
-
const k = r.switchInstance(y);
|
|
1507
|
-
setTimeout(() => {
|
|
1508
|
-
for (const S of k)
|
|
1509
|
-
S.unref();
|
|
1510
|
-
}, 1e3);
|
|
1511
|
-
}).catch(function(k) {
|
|
1512
|
-
t.error(k.message);
|
|
1513
|
-
});
|
|
1514
|
-
}
|
|
1515
|
-
}, l(F, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(F, "examples", ["<%= config.bin %> <%= command.id %>"]), l(F, "flags", {
|
|
1516
|
-
...L,
|
|
1517
|
-
...j,
|
|
1518
|
-
...U,
|
|
1519
|
-
...ke,
|
|
1520
|
-
...Se,
|
|
1521
|
-
...$e,
|
|
1522
|
-
...B,
|
|
1523
|
-
...G,
|
|
1524
|
-
...le,
|
|
1525
|
-
...oe,
|
|
1526
|
-
...de,
|
|
1527
|
-
...Pe,
|
|
1528
|
-
...ve,
|
|
1529
|
-
...J
|
|
1530
|
-
}), F);
|
|
1531
|
-
const q = class q extends $ {
|
|
1532
|
-
async run() {
|
|
1533
|
-
const { flags: e, args: t } = await this.parse(q), r = A(e["log-level"]), n = new O(r), s = t.name;
|
|
1534
|
-
e.all && (n.cleanupInstance(), process.exit(0)), s || (r.error("Please, specify name of instance to be removed or set '--all' flag instead"), process.exit(1)), n.cleanupInstance(s);
|
|
1535
|
-
}
|
|
1536
|
-
};
|
|
1537
|
-
l(q, "description", "List available instances"), l(q, "examples", ["<%= config.bin %> <%= command.id %>"]), l(q, "flags", {
|
|
1538
|
-
...L,
|
|
1539
|
-
all: P.boolean({
|
|
1540
|
-
description: "remove all known instances",
|
|
1541
|
-
required: !1
|
|
1542
|
-
})
|
|
1543
|
-
}), l(q, "args", {
|
|
1544
|
-
name: Q.string({ required: !1 })
|
|
1545
|
-
});
|
|
1546
|
-
let Me = q;
|
|
1547
|
-
const z = class z extends $ {
|
|
1548
|
-
async run() {
|
|
1549
|
-
const { flags: e, args: t } = await this.parse(z), r = A(e["log-level"]), n = new O(r), s = t.name ?? u.currentInstanceName;
|
|
1550
|
-
s || (r.info("no pl service instance selected. No service was stopped"), process.exit(0)), n.stopInstance(u.getInstanceInfo(s));
|
|
1551
|
-
}
|
|
1552
|
-
};
|
|
1553
|
-
l(z, "description", "List available instances"), l(z, "examples", ["<%= config.bin %> <%= command.id %>"]), l(z, "flags", {
|
|
1554
|
-
...L
|
|
1555
|
-
}), l(z, "args", {
|
|
1556
|
-
name: Q.string({ required: !1 })
|
|
1557
|
-
});
|
|
1558
|
-
let je = z;
|
|
1559
|
-
const ce = class ce extends $ {
|
|
1560
|
-
async run() {
|
|
1561
|
-
await this.parse(ce);
|
|
1562
|
-
const e = u.instanceList, t = u.currentInstanceName;
|
|
1563
|
-
for (const r of e) {
|
|
1564
|
-
const n = [], s = u.getInstanceInfo(r);
|
|
1565
|
-
u.isInstanceActive(s) && n.push("status:up"), n.push(`type:${s.type}`), console.log(r === t ? ` * ${r} (${n.join(", ")})` : ` ${r} (${n.join(", ")})`);
|
|
1566
|
-
}
|
|
1567
|
-
}
|
|
1568
|
-
};
|
|
1569
|
-
l(ce, "description", "List available instances"), l(ce, "examples", ["<%= config.bin %> <%= command.id %>"]), l(ce, "flags", {});
|
|
1570
|
-
let Be = ce;
|
|
1571
|
-
const Y = class Y extends $ {
|
|
1572
|
-
async run() {
|
|
1573
|
-
const { flags: e, args: t } = await this.parse(Y), r = A(e["log-level"]), n = new O(r), s = t.name ?? u.currentInstanceName;
|
|
1574
|
-
s || (r.error("no pl service instance is selected. Select instance with 'select' command or provide name to 'up'"), process.exit(1));
|
|
1575
|
-
const i = n.switchInstance(u.getInstanceInfo(s)), c = [];
|
|
1576
|
-
for (const o of i)
|
|
1577
|
-
c.push(new Promise((g, d) => {
|
|
1578
|
-
o.on("close", g), o.on("error", d);
|
|
1579
|
-
}));
|
|
1580
|
-
await Promise.all(c);
|
|
1581
|
-
}
|
|
1582
|
-
};
|
|
1583
|
-
l(Y, "description", "List available instances"), l(Y, "examples", ["<%= config.bin %> <%= command.id %>"]), l(Y, "flags", {
|
|
1584
|
-
...L
|
|
1585
|
-
}), l(Y, "args", {
|
|
1586
|
-
name: Q.string({ required: !1 })
|
|
1587
|
-
});
|
|
1588
|
-
let Ue = Y;
|
|
1589
|
-
var D;
|
|
1590
|
-
let ut = (D = class extends $ {
|
|
1591
|
-
async run() {
|
|
1592
|
-
const { flags: e } = await this.parse(D), t = A(e["log-level"]), r = new O(t);
|
|
1593
|
-
r.mergeLicenseEnvs(e);
|
|
1594
|
-
const n = "docker-s3", s = e["auth-enabled"], i = s ? {
|
|
1595
|
-
enabled: s,
|
|
1596
|
-
drivers: r.initAuthDriversList(e, ".")
|
|
1597
|
-
} : void 0, c = e.storage ? m.join(".", e.storage) : u.instanceDir(n), o = [];
|
|
1598
|
-
for (const b of e.mount ?? [])
|
|
1599
|
-
o.push({ hostPath: b });
|
|
1600
|
-
const g = e.arch ? `linux/${e.arch}` : void 0, d = e["minio-presign-host"] ? "minio" : "localhost", h = r.createDockerS3(n, c, {
|
|
1601
|
-
image: e.image,
|
|
1602
|
-
version: e.version,
|
|
1603
|
-
license: e.license,
|
|
1604
|
-
licenseFile: e["license-file"],
|
|
1605
|
-
platformOverride: g,
|
|
1606
|
-
customMounts: o,
|
|
1607
|
-
auth: i,
|
|
1608
|
-
grpcAddr: e["grpc-listen"],
|
|
1609
|
-
grpcPort: e["grpc-port"],
|
|
1610
|
-
monitoringAddr: e["monitoring-listen"],
|
|
1611
|
-
monitoringPort: e["monitoring-port"],
|
|
1612
|
-
debugAddr: e["debug-listen"],
|
|
1613
|
-
debugPort: e["debug-port"],
|
|
1614
|
-
s3Port: e["s3-port"],
|
|
1615
|
-
s3ConsolePort: e["s3-console-port"],
|
|
1616
|
-
presignHost: d
|
|
1617
|
-
});
|
|
1618
|
-
r.switchInstance(h);
|
|
1619
|
-
}
|
|
1620
|
-
}, l(D, "description", "Run platforma backend service with 'S3' primary storage type"), l(D, "examples", ["<%= config.bin %> <%= command.id %>"]), l(D, "flags", {
|
|
1621
|
-
...L,
|
|
1622
|
-
...U,
|
|
1623
|
-
...we,
|
|
1624
|
-
...ye,
|
|
1625
|
-
...j,
|
|
1626
|
-
...be,
|
|
1627
|
-
...J,
|
|
1628
|
-
...B,
|
|
1629
|
-
...pe,
|
|
1630
|
-
...G,
|
|
1631
|
-
...yr
|
|
1632
|
-
}), D);
|
|
1633
|
-
var N;
|
|
1634
|
-
let ft = (N = class extends $ {
|
|
1635
|
-
async run() {
|
|
1636
|
-
const { flags: e } = await this.parse(N), t = A(e["log-level"]), r = new O(t);
|
|
1637
|
-
r.mergeLicenseEnvs(e);
|
|
1638
|
-
const n = "local-s3", s = e["pl-workdir"] ?? ".", i = e.storage ? m.join(s, e.storage) : u.instanceDir(n), c = e["pl-log-file"] ? m.join(s, e["pl-log-file"]) : void 0, o = r.initAuthDriversList(e, s), g = e["auth-enabled"] ?? o !== void 0;
|
|
1639
|
-
let d = "127.0.0.1:6345";
|
|
1640
|
-
e["grpc-listen"] ? d = e["grpc-listen"] : e["grpc-port"] && (d = `127.0.0.1:${e["grpc-port"]}`);
|
|
1641
|
-
let h = "127.0.0.1:9090";
|
|
1642
|
-
e["monitoring-listen"] ? h = e["monitoring-listen"] : e["monitoring-port"] && (h = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1643
|
-
let b = "127.0.0.1:9091";
|
|
1644
|
-
e["debug-listen"] ? b = e["debug-listen"] : e["debug-port"] && (b = `127.0.0.1:${e["debug-port"]}`);
|
|
1645
|
-
const w = {
|
|
1646
|
-
sourcesPath: e["pl-sources"],
|
|
1647
|
-
binaryPath: e["pl-binary"],
|
|
1648
|
-
version: e.version,
|
|
1649
|
-
configPath: e.config,
|
|
1650
|
-
workdir: e["pl-workdir"],
|
|
1651
|
-
primaryURL: e["storage-primary"],
|
|
1652
|
-
libraryURL: e["storage-library"],
|
|
1653
|
-
minioPort: e["s3-port"],
|
|
1654
|
-
minioConsolePort: e["s3-console-port"],
|
|
1655
|
-
configOptions: {
|
|
1656
|
-
grpc: { listen: d },
|
|
1657
|
-
monitoring: { listen: h },
|
|
1658
|
-
debug: { listen: b },
|
|
1659
|
-
license: { value: e.license, file: e["license-file"] },
|
|
1660
|
-
log: { path: c },
|
|
1661
|
-
localRoot: i,
|
|
1662
|
-
core: {
|
|
1663
|
-
auth: { enabled: g, drivers: o }
|
|
1664
|
-
},
|
|
1665
|
-
// Backend could consume a lot of CPU power,
|
|
1666
|
-
// we want to keep at least a couple for UI and other apps to work.
|
|
1667
|
-
numCpu: Math.max(me.cpus().length - 2, 1),
|
|
1668
|
-
storages: {
|
|
1669
|
-
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1670
|
-
}
|
|
1671
|
-
}
|
|
1672
|
-
}, y = r.createLocalS3(n, w);
|
|
1673
|
-
w.binaryPath || w.sourcesPath ? r.switchInstance(y) : await Le(t, { version: e.version }).then(() => {
|
|
1674
|
-
const k = r.switchInstance(y), S = [];
|
|
1675
|
-
for (const I of k)
|
|
1676
|
-
S.push(new Promise((p, v) => {
|
|
1677
|
-
I.on("close", p), I.on("error", v);
|
|
1678
|
-
}));
|
|
1679
|
-
return Promise.all(S);
|
|
1680
|
-
}).catch(function(k) {
|
|
1681
|
-
t.error(k.message);
|
|
1682
|
-
});
|
|
1683
|
-
}
|
|
1684
|
-
}, l(N, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(N, "examples", ["<%= config.bin %> <%= command.id %>"]), l(N, "flags", {
|
|
1685
|
-
...L,
|
|
1686
|
-
...j,
|
|
1687
|
-
...U,
|
|
1688
|
-
...we,
|
|
1689
|
-
...ke,
|
|
1690
|
-
...Se,
|
|
1691
|
-
...$e,
|
|
1692
|
-
...B,
|
|
1693
|
-
...G,
|
|
1694
|
-
...le,
|
|
1695
|
-
...oe,
|
|
1696
|
-
...de,
|
|
1697
|
-
...Pe,
|
|
1698
|
-
...ve,
|
|
1699
|
-
...J
|
|
1700
|
-
}), N);
|
|
1701
|
-
const V = class V extends $ {
|
|
1702
|
-
async run() {
|
|
1703
|
-
const { flags: e, args: t } = await this.parse(V), r = A(e["log-level"]), n = new O(r);
|
|
1704
|
-
n.mergeLicenseEnvs(e);
|
|
1705
|
-
const s = t.name, i = e["auth-enabled"], c = i ? {
|
|
1706
|
-
enabled: i,
|
|
1707
|
-
drivers: n.initAuthDriversList(e, ".")
|
|
1708
|
-
} : void 0, o = e.storage ? m.join(".", e.storage) : u.instanceDir(s), g = [];
|
|
1709
|
-
for (const h of e.mount ?? [])
|
|
1710
|
-
g.push({ hostPath: h });
|
|
1711
|
-
const d = e.arch ? `linux/${e.arch}` : void 0;
|
|
1712
|
-
n.createDocker(s, o, {
|
|
1713
|
-
primaryStorageURL: e["storage-primary"],
|
|
1714
|
-
workStoragePath: e["storage-work"],
|
|
1715
|
-
libraryStorageURL: e["storage-library"],
|
|
1716
|
-
image: e.image,
|
|
1717
|
-
version: e.version,
|
|
1718
|
-
platformOverride: d,
|
|
1719
|
-
customMounts: g,
|
|
1720
|
-
license: e.license,
|
|
1721
|
-
licenseFile: e["license-file"],
|
|
1722
|
-
auth: c,
|
|
1723
|
-
grpcAddr: e["grpc-listen"],
|
|
1724
|
-
grpcPort: e["grpc-port"],
|
|
1725
|
-
monitoringAddr: e["monitoring-listen"],
|
|
1726
|
-
monitoringPort: e["monitoring-port"],
|
|
1727
|
-
debugAddr: e["debug-listen"],
|
|
1728
|
-
debugPort: e["debug-port"]
|
|
1729
|
-
}), r.info(`Instance '${s}' was created. To start it run 'up' command`);
|
|
1730
|
-
}
|
|
1731
|
-
};
|
|
1732
|
-
l(V, "description", "Run Platforma Backend service as docker container on current host"), l(V, "examples", ["<%= config.bin %> <%= command.id %>"]), l(V, "flags", {
|
|
1733
|
-
...L,
|
|
1734
|
-
...U,
|
|
1735
|
-
...ye,
|
|
1736
|
-
...j,
|
|
1737
|
-
...be,
|
|
1738
|
-
...J,
|
|
1739
|
-
...B,
|
|
1740
|
-
...pe,
|
|
1741
|
-
...G,
|
|
1742
|
-
...le,
|
|
1743
|
-
...oe,
|
|
1744
|
-
...de
|
|
1745
|
-
}), l(V, "args", {
|
|
1746
|
-
name: Q.string({ required: !0 })
|
|
1747
|
-
});
|
|
1748
|
-
let Ge = V;
|
|
1749
|
-
var x;
|
|
1750
|
-
let mt = (x = class extends $ {
|
|
1751
|
-
async run() {
|
|
1752
|
-
const { flags: e, args: t } = await this.parse(x), r = A(e["log-level"]), n = new O(r);
|
|
1753
|
-
n.mergeLicenseEnvs(e);
|
|
1754
|
-
const s = t.name, i = e["pl-workdir"] ?? ".", c = e.storage ? m.join(i, e.storage) : u.instanceDir(s), o = e["pl-log-file"] ? m.join(i, e["pl-log-file"]) : void 0, g = n.initAuthDriversList(e, i), d = e["auth-enabled"] ?? g !== void 0;
|
|
1755
|
-
let h = "127.0.0.1:6345";
|
|
1756
|
-
e["grpc-listen"] ? h = e["grpc-listen"] : e["grpc-port"] && (h = `127.0.0.1:${e["grpc-port"]}`);
|
|
1757
|
-
let b = "127.0.0.1:9090";
|
|
1758
|
-
e["monitoring-listen"] ? b = e["monitoring-listen"] : e["monitoring-port"] && (b = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1759
|
-
let w = "127.0.0.1:9091";
|
|
1760
|
-
e["debug-listen"] ? w = e["debug-listen"] : e["debug-port"] && (w = `127.0.0.1:${e["debug-port"]}`);
|
|
1761
|
-
const y = {
|
|
1762
|
-
sourcesPath: e["pl-sources"],
|
|
1763
|
-
binaryPath: e["pl-binary"],
|
|
1764
|
-
version: e.version,
|
|
1765
|
-
configPath: e.config,
|
|
1766
|
-
workdir: e["pl-workdir"],
|
|
1767
|
-
primaryURL: e["storage-primary"],
|
|
1768
|
-
libraryURL: e["storage-library"],
|
|
1769
|
-
configOptions: {
|
|
1770
|
-
grpc: { listen: h },
|
|
1771
|
-
monitoring: { listen: b },
|
|
1772
|
-
debug: { listen: w },
|
|
1773
|
-
license: { value: e.license, file: e["license-file"] },
|
|
1774
|
-
log: { path: o },
|
|
1775
|
-
localRoot: c,
|
|
1776
|
-
core: { auth: { enabled: d, drivers: g } },
|
|
1777
|
-
storages: {
|
|
1778
|
-
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1779
|
-
},
|
|
1780
|
-
// Backend could consume a lot of CPU power,
|
|
1781
|
-
// we want to keep at least a couple for UI and other apps to work.
|
|
1782
|
-
numCpu: Math.max(me.cpus().length - 2, 1)
|
|
1783
|
-
}
|
|
1784
|
-
};
|
|
1785
|
-
if (n.createLocal(s, y), y.binaryPath || y.sourcesPath) {
|
|
1786
|
-
r.info(`Instance '${s}' was created. To start it run 'up' command`);
|
|
1787
|
-
return;
|
|
1788
|
-
}
|
|
1789
|
-
Le(r, { version: e.version }).then(() => r.info(`Instance '${s}' was created. To start it run 'svc up' command`)).catch(function(k) {
|
|
1790
|
-
r.error(k.message);
|
|
1791
|
-
});
|
|
1792
|
-
}
|
|
1793
|
-
}, l(x, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(x, "examples", ["<%= config.bin %> <%= command.id %>"]), l(x, "flags", {
|
|
1794
|
-
...L,
|
|
1795
|
-
...j,
|
|
1796
|
-
...U,
|
|
1797
|
-
...ke,
|
|
1798
|
-
...Se,
|
|
1799
|
-
...$e,
|
|
1800
|
-
...B,
|
|
1801
|
-
...G,
|
|
1802
|
-
...le,
|
|
1803
|
-
...oe,
|
|
1804
|
-
...de,
|
|
1805
|
-
...Pe,
|
|
1806
|
-
...ve,
|
|
1807
|
-
...J
|
|
1808
|
-
}), l(x, "args", {
|
|
1809
|
-
name: Q.string({ required: !0 })
|
|
1810
|
-
}), x);
|
|
1811
|
-
const K = class K extends $ {
|
|
1812
|
-
async run() {
|
|
1813
|
-
const { flags: e, args: t } = await this.parse(K), r = A(e["log-level"]), n = new O(r);
|
|
1814
|
-
n.mergeLicenseEnvs(e);
|
|
1815
|
-
const s = t.name, i = e["auth-enabled"], c = i ? {
|
|
1816
|
-
enabled: i,
|
|
1817
|
-
drivers: n.initAuthDriversList(e, ".")
|
|
1818
|
-
} : void 0, o = e.storage ? m.join(".", e.storage) : u.instanceDir(s), g = [];
|
|
1819
|
-
for (const b of e.mount ?? [])
|
|
1820
|
-
g.push({ hostPath: b });
|
|
1821
|
-
const d = e.arch ? `linux/${e.arch}` : void 0, h = e["minio-presign-host"] ? "minio" : "localhost";
|
|
1822
|
-
n.createDockerS3(s, o, {
|
|
1823
|
-
image: e.image,
|
|
1824
|
-
version: e.version,
|
|
1825
|
-
license: e.license,
|
|
1826
|
-
licenseFile: e["license-file"],
|
|
1827
|
-
platformOverride: d,
|
|
1828
|
-
customMounts: g,
|
|
1829
|
-
auth: c,
|
|
1830
|
-
grpcAddr: e["grpc-listen"],
|
|
1831
|
-
grpcPort: e["grpc-port"],
|
|
1832
|
-
monitoringAddr: e["monitoring-listen"],
|
|
1833
|
-
monitoringPort: e["monitoring-port"],
|
|
1834
|
-
debugAddr: e["debug-listen"],
|
|
1835
|
-
debugPort: e["debug-port"],
|
|
1836
|
-
s3Port: e["s3-port"],
|
|
1837
|
-
s3ConsolePort: e["s3-console-port"],
|
|
1838
|
-
presignHost: h
|
|
1839
|
-
}), r.info(`Instance '${s}' was created. To start it run 'up' command`), e["minio-presign-host"] && r.info(" NOTE: make sure you have 'minio' host in your hosts file as 127.0.0.1 address");
|
|
1840
|
-
}
|
|
1841
|
-
};
|
|
1842
|
-
l(K, "description", "Run Platforma Backend service as docker container on current host with MinIO as local S3 storage"), l(K, "examples", ["<%= config.bin %> <%= command.id %>"]), l(K, "flags", {
|
|
1843
|
-
...L,
|
|
1844
|
-
...U,
|
|
1845
|
-
...we,
|
|
1846
|
-
...ye,
|
|
1847
|
-
...j,
|
|
1848
|
-
...be,
|
|
1849
|
-
...J,
|
|
1850
|
-
...B,
|
|
1851
|
-
...pe,
|
|
1852
|
-
...G,
|
|
1853
|
-
...yr
|
|
1854
|
-
}), l(K, "args", {
|
|
1855
|
-
name: Q.string({ required: !0 })
|
|
1856
|
-
});
|
|
1857
|
-
let Je = K;
|
|
1858
|
-
const X = class X extends $ {
|
|
1859
|
-
async run() {
|
|
1860
|
-
const { flags: e, args: t } = await this.parse(X), r = A(e["log-level"]), n = new O(r);
|
|
1861
|
-
n.mergeLicenseEnvs(e);
|
|
1862
|
-
const s = t.name, i = e["pl-workdir"] ?? ".", c = e.storage ? m.join(i, e.storage) : u.instanceDir(s), o = e["pl-log-file"] ? m.join(i, e["pl-log-file"]) : void 0, g = n.initAuthDriversList(e, i), d = e["auth-enabled"] ?? g !== void 0;
|
|
1863
|
-
let h = "127.0.0.1:6345";
|
|
1864
|
-
e["grpc-listen"] ? h = e["grpc-listen"] : e["grpc-port"] && (h = `127.0.0.1:${e["grpc-port"]}`);
|
|
1865
|
-
let b = "127.0.0.1:9090";
|
|
1866
|
-
e["monitoring-listen"] ? b = e["monitoring-listen"] : e["monitoring-port"] && (b = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1867
|
-
let w = "127.0.0.1:9091";
|
|
1868
|
-
e["debug-listen"] ? w = e["debug-listen"] : e["debug-port"] && (w = `127.0.0.1:${e["debug-port"]}`);
|
|
1869
|
-
const y = {
|
|
1870
|
-
sourcesPath: e["pl-sources"],
|
|
1871
|
-
binaryPath: e["pl-binary"],
|
|
1872
|
-
version: e.version,
|
|
1873
|
-
configPath: e.config,
|
|
1874
|
-
workdir: e["pl-workdir"],
|
|
1875
|
-
primaryURL: e["storage-primary"],
|
|
1876
|
-
libraryURL: e["storage-library"],
|
|
1877
|
-
minioPort: e["s3-port"],
|
|
1878
|
-
minioConsolePort: e["s3-console-port"],
|
|
1879
|
-
configOptions: {
|
|
1880
|
-
grpc: { listen: h },
|
|
1881
|
-
monitoring: { listen: b },
|
|
1882
|
-
debug: { listen: w },
|
|
1883
|
-
license: { value: e.license, file: e["license-file"] },
|
|
1884
|
-
log: { path: o },
|
|
1885
|
-
localRoot: c,
|
|
1886
|
-
core: { auth: { enabled: d, drivers: g } },
|
|
1887
|
-
storages: {
|
|
1888
|
-
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1889
|
-
},
|
|
1890
|
-
// Backend could consume a lot of CPU power,
|
|
1891
|
-
// we want to keep at least a couple for UI and other apps to work.
|
|
1892
|
-
numCpu: Math.max(me.cpus().length - 2, 1)
|
|
1893
|
-
}
|
|
1894
|
-
};
|
|
1895
|
-
if (r.info("Creating instance configuration, data directory and other stuff..."), n.createLocalS3(s, y), y.binaryPath || y.sourcesPath) {
|
|
1896
|
-
r.info(`Instance '${s}' was created. To start it run 'up' command`);
|
|
1897
|
-
return;
|
|
1898
|
-
}
|
|
1899
|
-
Le(r, { version: e.version }).then(() => r.info(`Instance '${s}' was created. To start it run 'svc up' command`)).catch(function(k) {
|
|
1900
|
-
r.error(k.message);
|
|
1901
|
-
});
|
|
1902
|
-
}
|
|
1903
|
-
};
|
|
1904
|
-
l(X, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(X, "examples", ["<%= config.bin %> <%= command.id %>"]), l(X, "flags", {
|
|
1905
|
-
...L,
|
|
1906
|
-
...j,
|
|
1907
|
-
...we,
|
|
1908
|
-
...U,
|
|
1909
|
-
...ke,
|
|
1910
|
-
...Se,
|
|
1911
|
-
...$e,
|
|
1912
|
-
...B,
|
|
1913
|
-
...G,
|
|
1914
|
-
...le,
|
|
1915
|
-
...oe,
|
|
1916
|
-
...de,
|
|
1917
|
-
...Pe,
|
|
1918
|
-
...ve,
|
|
1919
|
-
...J
|
|
1920
|
-
}), l(X, "args", {
|
|
1921
|
-
name: Q.string({ required: !0 })
|
|
1922
|
-
});
|
|
1923
|
-
let He = X;
|
|
1924
|
-
const Rt = {
|
|
1925
|
-
"create-block": Fe,
|
|
1926
|
-
reset: Ne,
|
|
1927
|
-
start: _e,
|
|
1928
|
-
stop: Ce,
|
|
1929
|
-
"start:docker": dt,
|
|
1930
|
-
"start:local": gt,
|
|
1931
|
-
"svc:delete": Me,
|
|
1932
|
-
"svc:down": je,
|
|
1933
|
-
"svc:list": Be,
|
|
1934
|
-
"svc:up": Ue,
|
|
1935
|
-
"start:docker:s3": ut,
|
|
1936
|
-
"start:local:s3": ft,
|
|
1937
|
-
"svc:create:docker": Ge,
|
|
1938
|
-
"svc:create:local": mt,
|
|
1939
|
-
"svc:create:docker:s3": Je,
|
|
1940
|
-
"svc:create:local:s3": He
|
|
1941
|
-
};
|
|
1942
|
-
export {
|
|
1943
|
-
Rt as COMMANDS
|
|
1944
|
-
};
|
|
1945
|
-
//# sourceMappingURL=index.mjs.map
|