@platforma-sdk/bootstrap 3.1.2 → 3.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +23 -0
- package/dist/commands/svc/create/local/s3.d.ts +38 -0
- package/dist/commands/svc/create/local/s3.d.ts.map +1 -0
- package/dist/commands/svc/create/local.d.ts +0 -3
- package/dist/commands/svc/create/local.d.ts.map +1 -1
- package/dist/commands/svc/up.d.ts.map +1 -1
- package/dist/index.d.ts +2 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +17 -17
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +679 -630
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/index.mjs
CHANGED
|
@@ -1,183 +1,183 @@
|
|
|
1
|
-
var
|
|
2
|
-
var
|
|
3
|
-
var l = (a, e, t) =>
|
|
4
|
-
import { Flags as
|
|
5
|
-
import
|
|
6
|
-
import
|
|
7
|
-
import m, { resolve as
|
|
8
|
-
import { execSync as
|
|
9
|
-
import
|
|
10
|
-
import { randomBytes as
|
|
11
|
-
import
|
|
12
|
-
import * as
|
|
13
|
-
import { Writable as
|
|
14
|
-
import { z as
|
|
1
|
+
var Sr = Object.defineProperty;
|
|
2
|
+
var $r = (a, e, t) => e in a ? Sr(a, e, { enumerable: !0, configurable: !0, writable: !0, value: t }) : a[e] = t;
|
|
3
|
+
var l = (a, e, t) => $r(a, typeof e != "symbol" ? e + "" : e, t);
|
|
4
|
+
import { Flags as v, Command as L, Args as K } from "@oclif/core";
|
|
5
|
+
import D from "node:os";
|
|
6
|
+
import f, { createWriteStream as Lr } from "node:fs";
|
|
7
|
+
import m, { resolve as Ir } from "node:path";
|
|
8
|
+
import { execSync as Oe, spawn as Ar, spawnSync as mr } from "node:child_process";
|
|
9
|
+
import ge from "winston";
|
|
10
|
+
import { randomBytes as Or } from "node:crypto";
|
|
11
|
+
import Z from "readline-sync";
|
|
12
|
+
import * as _ from "node:fs/promises";
|
|
13
|
+
import { Writable as Rr } from "node:stream";
|
|
14
|
+
import { z as N } from "zod";
|
|
15
15
|
import Er from "decompress";
|
|
16
|
-
import
|
|
17
|
-
import { getDefaultPlVersion as
|
|
18
|
-
import
|
|
19
|
-
import * as
|
|
20
|
-
const
|
|
21
|
-
"log-level":
|
|
16
|
+
import fe from "yaml";
|
|
17
|
+
import { getDefaultPlVersion as me } from "@milaboratories/pl-deployments";
|
|
18
|
+
import Tr from "node:https";
|
|
19
|
+
import * as Fr from "tar";
|
|
20
|
+
const I = {
|
|
21
|
+
"log-level": v.string({
|
|
22
22
|
description: "logging level",
|
|
23
23
|
default: "info",
|
|
24
24
|
options: ["error", "warn", "info", "debug"],
|
|
25
25
|
required: !1
|
|
26
26
|
})
|
|
27
27
|
};
|
|
28
|
-
|
|
28
|
+
v.string({
|
|
29
29
|
description: "name of instance",
|
|
30
30
|
required: !1
|
|
31
31
|
});
|
|
32
|
-
const
|
|
33
|
-
image:
|
|
32
|
+
const he = {
|
|
33
|
+
image: v.string({
|
|
34
34
|
description: "use custom docker image to run platforma"
|
|
35
35
|
})
|
|
36
|
-
},
|
|
37
|
-
version:
|
|
36
|
+
}, C = {
|
|
37
|
+
version: v.string({
|
|
38
38
|
description: "use custom platforma release (official docker image or binary package)"
|
|
39
39
|
})
|
|
40
|
-
},
|
|
41
|
-
arch:
|
|
40
|
+
}, pe = {
|
|
41
|
+
arch: v.string({
|
|
42
42
|
description: "override architecture. You can start amd64 linux image on arm-based host (say, Apple M family processor). I.e. arm64, amd64, amd64/v2"
|
|
43
43
|
})
|
|
44
|
-
},
|
|
45
|
-
license:
|
|
44
|
+
}, M = {
|
|
45
|
+
license: v.string({
|
|
46
46
|
description: 'pass a license code. The license can be got from "https://licensing.milaboratories.com".'
|
|
47
47
|
}),
|
|
48
|
-
"license-file":
|
|
48
|
+
"license-file": v.file({
|
|
49
49
|
exists: !0,
|
|
50
50
|
description: "specify a path to the file with a license. The license can be got from 'https://licensing.milaboratories.com'."
|
|
51
51
|
})
|
|
52
|
-
},
|
|
53
|
-
"grpc-port":
|
|
52
|
+
}, j = {
|
|
53
|
+
"grpc-port": v.integer({
|
|
54
54
|
description: "port for Platforma Backend gRPC API. Default is 6345",
|
|
55
55
|
env: "PLATFORMA_GRPC_PORT"
|
|
56
56
|
}),
|
|
57
|
-
"grpc-listen":
|
|
57
|
+
"grpc-listen": v.string({
|
|
58
58
|
description: "full listen addr for Platforma Backend gRPC API. Default is 127.0.0.1:6345",
|
|
59
59
|
env: "PLATFORMA_GRPC_LISTEN"
|
|
60
60
|
}),
|
|
61
|
-
"monitoring-port":
|
|
61
|
+
"monitoring-port": v.integer({
|
|
62
62
|
description: "port for Platforma Backend monitoring API. Default is 9090",
|
|
63
63
|
env: "PLATFORMA_MONITORING_PORT"
|
|
64
64
|
}),
|
|
65
|
-
"monitoring-listen":
|
|
65
|
+
"monitoring-listen": v.string({
|
|
66
66
|
description: "full listen addr for Platforma Backend monitoring API. Default is 127.0.0.1:9090",
|
|
67
67
|
env: "PLATFORMA_MONITORING_LISTEN"
|
|
68
68
|
}),
|
|
69
|
-
"debug-port":
|
|
69
|
+
"debug-port": v.integer({
|
|
70
70
|
description: "port for Platforma Backend debug API. Default is 9091",
|
|
71
71
|
env: "PLATFORMA_DEBUG_PORT"
|
|
72
72
|
}),
|
|
73
|
-
"debug-listen":
|
|
73
|
+
"debug-listen": v.string({
|
|
74
74
|
description: "full listen addr for Platforma Backend debug API. Default is 127.0.0.1:9091",
|
|
75
75
|
env: "PLATFORMA_DEBUG_LISTEN"
|
|
76
76
|
})
|
|
77
|
-
},
|
|
78
|
-
"s3-port":
|
|
77
|
+
}, hr = {
|
|
78
|
+
"s3-port": v.integer({
|
|
79
79
|
description: "port that S3 will listen, default is 9000",
|
|
80
80
|
default: 9e3,
|
|
81
81
|
env: "PLATFORMA_S3_PORT"
|
|
82
82
|
}),
|
|
83
|
-
"s3-console-port":
|
|
83
|
+
"s3-console-port": v.integer({
|
|
84
84
|
description: "port that a console of S3 will listen, default is 9001",
|
|
85
85
|
default: 9001,
|
|
86
86
|
env: "PLATFORMA_S3_CONSOLE_PORT"
|
|
87
87
|
})
|
|
88
|
-
},
|
|
89
|
-
storage:
|
|
88
|
+
}, B = {
|
|
89
|
+
storage: v.string({
|
|
90
90
|
description: "specify path on host to be used as storage for all Platforma Backend data"
|
|
91
91
|
})
|
|
92
|
-
},
|
|
93
|
-
"minio-presign-host":
|
|
92
|
+
}, pr = {
|
|
93
|
+
"minio-presign-host": v.boolean({
|
|
94
94
|
description: "use 'minio' host instead of 'localhost' in presign URLs"
|
|
95
95
|
})
|
|
96
96
|
}, ye = {
|
|
97
|
-
mount:
|
|
97
|
+
mount: v.string({
|
|
98
98
|
multiple: !0,
|
|
99
99
|
description: "things to be mounted into platforma docker container. Targets will appear inside the container under the same absolute paths"
|
|
100
100
|
})
|
|
101
|
-
},
|
|
102
|
-
"pl-log-file":
|
|
101
|
+
}, be = {
|
|
102
|
+
"pl-log-file": v.file({
|
|
103
103
|
description: "specify path for Platforma Backend log file"
|
|
104
104
|
})
|
|
105
|
-
},
|
|
106
|
-
"pl-workdir":
|
|
105
|
+
}, we = {
|
|
106
|
+
"pl-workdir": v.file({
|
|
107
107
|
description: "specify working directory for Platforma Backend process"
|
|
108
108
|
})
|
|
109
|
-
},
|
|
110
|
-
"pl-binary":
|
|
109
|
+
}, ve = {
|
|
110
|
+
"pl-binary": v.file({
|
|
111
111
|
description: "start given Platforma Backend binary instead of automatically downloaded version"
|
|
112
112
|
})
|
|
113
|
-
},
|
|
114
|
-
"pl-sources":
|
|
113
|
+
}, Pe = {
|
|
114
|
+
"pl-sources": v.file({
|
|
115
115
|
description: "path to pl repository root: build Platforma Backend from sources and start the resulting binary"
|
|
116
116
|
})
|
|
117
|
-
},
|
|
118
|
-
config:
|
|
117
|
+
}, ke = {
|
|
118
|
+
config: v.string({
|
|
119
119
|
description: "use custom Platforma Backend config"
|
|
120
120
|
})
|
|
121
121
|
};
|
|
122
|
-
|
|
122
|
+
v.file({
|
|
123
123
|
description: "specify path on host to be used as 'primary' storage"
|
|
124
124
|
});
|
|
125
|
-
const
|
|
126
|
-
"storage-work":
|
|
125
|
+
const ie = {
|
|
126
|
+
"storage-work": v.file({
|
|
127
127
|
description: "specify path on host to be used as 'work' storage"
|
|
128
128
|
})
|
|
129
129
|
};
|
|
130
|
-
|
|
130
|
+
v.file({
|
|
131
131
|
description: "specify path on host to be used as 'library' storage"
|
|
132
132
|
});
|
|
133
|
-
const
|
|
134
|
-
"storage-primary":
|
|
133
|
+
const ce = {
|
|
134
|
+
"storage-primary": v.string({
|
|
135
135
|
description: `specify 'primary' storage destination URL.
|
|
136
136
|
file:/path/to/dir for directory on local FS
|
|
137
137
|
s3://<bucket>/?region=<name> for real AWS bucket
|
|
138
138
|
s3e://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via http
|
|
139
139
|
s3es://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via https`
|
|
140
140
|
})
|
|
141
|
-
},
|
|
142
|
-
"storage-library":
|
|
141
|
+
}, oe = {
|
|
142
|
+
"storage-library": v.string({
|
|
143
143
|
description: `specify 'library' storage destination URL.
|
|
144
144
|
file:/path/to/dir for directory on local FS
|
|
145
145
|
s3://<bucket>/?region=<name> for real AWS bucket
|
|
146
146
|
s3e://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via http
|
|
147
147
|
s3es://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via https`
|
|
148
148
|
})
|
|
149
|
-
},
|
|
150
|
-
"auth-enabled":
|
|
149
|
+
}, xr = {
|
|
150
|
+
"auth-enabled": v.boolean({
|
|
151
151
|
description: "enable authorization"
|
|
152
152
|
})
|
|
153
|
-
},
|
|
154
|
-
"auth-htpasswd-file":
|
|
153
|
+
}, Dr = {
|
|
154
|
+
"auth-htpasswd-file": v.file({
|
|
155
155
|
description: "path to .htpasswd file with Platforma users (static user DB auth source)"
|
|
156
156
|
})
|
|
157
|
-
},
|
|
158
|
-
"auth-ldap-server":
|
|
157
|
+
}, Nr = {
|
|
158
|
+
"auth-ldap-server": v.string({
|
|
159
159
|
description: "address of LDAP server to use for auth in Platforma (auth source)"
|
|
160
160
|
})
|
|
161
|
-
},
|
|
162
|
-
"auth-ldap-default-dn":
|
|
161
|
+
}, _r = {
|
|
162
|
+
"auth-ldap-default-dn": v.string({
|
|
163
163
|
description: "DN to use when checking user with LDAP bind operation: e.g. cn=%u,ou=users,dc=example,dc=com"
|
|
164
164
|
})
|
|
165
|
-
},
|
|
166
|
-
...Fr,
|
|
165
|
+
}, U = {
|
|
167
166
|
...xr,
|
|
168
167
|
...Dr,
|
|
169
|
-
...Nr
|
|
168
|
+
...Nr,
|
|
169
|
+
..._r
|
|
170
170
|
};
|
|
171
|
-
function
|
|
172
|
-
return
|
|
171
|
+
function Cr(a) {
|
|
172
|
+
return Z.question(`${a} [y/N] `).toLowerCase() === "y";
|
|
173
173
|
}
|
|
174
|
-
function
|
|
174
|
+
function G(a) {
|
|
175
175
|
throw new Error("this should never happen");
|
|
176
176
|
}
|
|
177
|
-
function
|
|
178
|
-
return
|
|
177
|
+
function A(a = "debug") {
|
|
178
|
+
return ge.createLogger({
|
|
179
179
|
level: a,
|
|
180
|
-
format:
|
|
180
|
+
format: ge.format.printf(({ level: e, message: t }) => {
|
|
181
181
|
const r = " ".repeat(e.length + 2);
|
|
182
182
|
if (typeof t != "string") {
|
|
183
183
|
const i = JSON.stringify(t);
|
|
@@ -186,64 +186,64 @@ function I(a = "debug") {
|
|
|
186
186
|
const n = t.split(`
|
|
187
187
|
`).map((i, c) => c === 0 ? i : r + i).join(`
|
|
188
188
|
`);
|
|
189
|
-
return `${((i) =>
|
|
189
|
+
return `${((i) => ge.format.colorize().colorize(i, i))(e)}: ${n}`;
|
|
190
190
|
}),
|
|
191
191
|
transports: [
|
|
192
|
-
new
|
|
192
|
+
new ge.transports.Console({
|
|
193
193
|
stderrLevels: ["error", "warn", "info", "debug"],
|
|
194
194
|
handleExceptions: !0
|
|
195
195
|
})
|
|
196
196
|
]
|
|
197
197
|
});
|
|
198
198
|
}
|
|
199
|
-
function Cr(a) {
|
|
200
|
-
return Ir(Math.ceil(a / 2)).toString("hex").slice(0, a);
|
|
201
|
-
}
|
|
202
199
|
function Mr(a) {
|
|
203
|
-
return a.
|
|
204
|
-
}
|
|
205
|
-
function pe(a, e) {
|
|
206
|
-
u.existsSync(a) || (u.mkdirSync(a, { recursive: !0 }), e != null && e.mode && u.chmodSync(a, e.mode));
|
|
200
|
+
return Or(Math.ceil(a / 2)).toString("hex").slice(0, a);
|
|
207
201
|
}
|
|
208
202
|
function jr(a) {
|
|
203
|
+
return a.startsWith("~") ? m.join(D.homedir(), a.slice(1)) : a;
|
|
204
|
+
}
|
|
205
|
+
function $e(a, e) {
|
|
206
|
+
f.existsSync(a) || (f.mkdirSync(a, { recursive: !0 }), e != null && e.mode && f.chmodSync(a, e.mode));
|
|
207
|
+
}
|
|
208
|
+
function Br(a) {
|
|
209
209
|
try {
|
|
210
|
-
if (
|
|
211
|
-
return
|
|
212
|
-
const e = `wmic process where processid=${a} get Caption`, t =
|
|
210
|
+
if (D.platform() !== "win32")
|
|
211
|
+
return Oe(`ps -p ${a} -o comm=`, { encoding: "utf8" }).trim();
|
|
212
|
+
const e = `wmic process where processid=${a} get Caption`, t = Oe(e, { encoding: "utf8" }).split(`
|
|
213
213
|
`);
|
|
214
214
|
return t.length <= 1 ? "" : t[1].trim();
|
|
215
215
|
} catch {
|
|
216
216
|
return "";
|
|
217
217
|
}
|
|
218
218
|
}
|
|
219
|
-
function
|
|
220
|
-
const e =
|
|
219
|
+
function Ur(a) {
|
|
220
|
+
const e = Oe(`docker compose ls --filter name=${a} --format json`, { encoding: "utf8" }).trim(), t = JSON.parse(e);
|
|
221
221
|
for (const r of t)
|
|
222
222
|
if (r.Name === a)
|
|
223
223
|
return r;
|
|
224
224
|
}
|
|
225
|
-
const
|
|
226
|
-
npmOrgName:
|
|
227
|
-
orgName:
|
|
228
|
-
blockName:
|
|
229
|
-
softwarePlatforms:
|
|
225
|
+
const Re = ["Python"], yr = ["Tengo", "Python"], Gr = N.union([N.literal("Tengo"), N.literal("Python")]), Jr = N.object({
|
|
226
|
+
npmOrgName: N.string().min(1),
|
|
227
|
+
orgName: N.string().min(1),
|
|
228
|
+
blockName: N.string().min(1),
|
|
229
|
+
softwarePlatforms: N.array(Gr).refine((a) => new Set(a).size === a.length, {
|
|
230
230
|
message: "Must be an array of unique software platforms"
|
|
231
231
|
})
|
|
232
232
|
});
|
|
233
|
-
async function
|
|
234
|
-
const { npmOrgName: e, orgName: t, blockName: r, softwarePlatforms: n } =
|
|
235
|
-
a.info("Downloading boilerplate code..."), await
|
|
233
|
+
async function Hr(a) {
|
|
234
|
+
const { npmOrgName: e, orgName: t, blockName: r, softwarePlatforms: n } = Wr(), s = m.join(process.cwd(), r);
|
|
235
|
+
a.info("Downloading boilerplate code..."), await qr(
|
|
236
236
|
// 'https://github.com/milaboratory/platforma-block-boilerplate/archive/refs/heads/software_platforms.zip',
|
|
237
237
|
// 'platforma-block-boilerplate-software_platforms',
|
|
238
238
|
"https://github.com/milaboratory/platforma-block-boilerplate/archive/refs/heads/main.zip",
|
|
239
239
|
"platforma-block-boilerplate-main",
|
|
240
240
|
s
|
|
241
241
|
);
|
|
242
|
-
const i = yr.filter((o) => n.indexOf(o) < 0), c =
|
|
242
|
+
const i = yr.filter((o) => n.indexOf(o) < 0), c = Re.length == i.length;
|
|
243
243
|
a.info(`Keep platforms '${n}', remove: '${i}'. Will remove all platforms? ${c}`);
|
|
244
244
|
for (const o of i)
|
|
245
|
-
await
|
|
246
|
-
c && await
|
|
245
|
+
await zr(s, o);
|
|
246
|
+
c && await Yr(s), a.info("Replace everything in the template with provided options..."), Vr(s, [
|
|
247
247
|
// '@' literal ensures only npm org name will be renamed,
|
|
248
248
|
// as public registry for software also is called platforma-open, but without '@'.
|
|
249
249
|
// Also, don't rename an organization for runenv-python-3 package.
|
|
@@ -252,45 +252,45 @@ async function Jr(a) {
|
|
|
252
252
|
{ from: /block-boilerplate/g, to: r }
|
|
253
253
|
]);
|
|
254
254
|
}
|
|
255
|
-
function
|
|
256
|
-
let a =
|
|
255
|
+
function Wr() {
|
|
256
|
+
let a = Z.question(
|
|
257
257
|
'Write an organization name for npm. Default is "platforma-open": '
|
|
258
258
|
);
|
|
259
259
|
a === "" && (a = "platforma-open");
|
|
260
|
-
const e =
|
|
260
|
+
const e = Z.question('Write an organization name, e.g. "my-org": '), t = Z.question('Write a name of the block, e.g. "hello-world": '), r = Z.keyInYN("Create package for block's software?");
|
|
261
261
|
let n = ["Tengo"];
|
|
262
262
|
if (r)
|
|
263
263
|
for (; n.length < yr.length; ) {
|
|
264
|
-
const s =
|
|
264
|
+
const s = Z.keyInSelect(Re, "Choose software platform:");
|
|
265
265
|
if (s < 0) break;
|
|
266
|
-
n.push(
|
|
266
|
+
n.push(Re[s]);
|
|
267
267
|
}
|
|
268
|
-
return n = Array.from(new Set(n)).sort(),
|
|
268
|
+
return n = Array.from(new Set(n)).sort(), Jr.parse({ npmOrgName: a, orgName: e, blockName: t, softwarePlatforms: n });
|
|
269
269
|
}
|
|
270
|
-
async function
|
|
271
|
-
const n = await (await fetch(a)).blob(), s = await
|
|
270
|
+
async function qr(a, e, t) {
|
|
271
|
+
const n = await (await fetch(a)).blob(), s = await _.mkdtemp(m.join(D.tmpdir(), "create-repo")), i = m.join(s, "packed-repo.zip"), c = Rr.toWeb(Lr(i));
|
|
272
272
|
await n.stream().pipeTo(c);
|
|
273
273
|
const o = m.join(s, "unpacked-repo");
|
|
274
|
-
await
|
|
274
|
+
await _.mkdir(o), await Er(i, o), await _.cp(m.join(o, e), t, { recursive: !0 });
|
|
275
275
|
}
|
|
276
|
-
async function
|
|
276
|
+
async function zr(a, e) {
|
|
277
277
|
const t = e.toLowerCase();
|
|
278
|
-
await
|
|
278
|
+
await ee(
|
|
279
279
|
m.join(a, "ui", "src", "pages", "MainPage.vue"),
|
|
280
280
|
new RegExp(`.*${t}Message.*\\n\\n`, "g")
|
|
281
|
-
), await
|
|
281
|
+
), await ee(
|
|
282
282
|
m.join(a, "model", "src", "index.ts"),
|
|
283
283
|
new RegExp(`.*${t}Message.*\\n\\n`, "g")
|
|
284
|
-
), await
|
|
284
|
+
), await ee(
|
|
285
285
|
m.join(a, "workflow", "src", "main.tpl.tengo"),
|
|
286
286
|
new RegExp(`.*${t}.*exec.builder.*[\\s\\S]*?\\n\\n`, "g")
|
|
287
|
-
), await
|
|
287
|
+
), await ee(
|
|
288
288
|
m.join(a, "workflow", "src", "main.tpl.tengo"),
|
|
289
289
|
new RegExp(`.*${t}Message.*\\n`, "g")
|
|
290
|
-
), await
|
|
290
|
+
), await ee(
|
|
291
291
|
m.join(a, "workflow", "src", "wf.test.ts"),
|
|
292
292
|
new RegExp(`.*${t}Message.*\\n.*expect.*\\n\\n`, "g")
|
|
293
|
-
), await
|
|
293
|
+
), await _.rm(m.join(a, "software", `src_${t}`), { recursive: !0 }), await Ge(
|
|
294
294
|
m.join(a, "software", "package.json"),
|
|
295
295
|
(r) => {
|
|
296
296
|
const n = JSON.parse(r);
|
|
@@ -298,75 +298,75 @@ async function qr(a, e) {
|
|
|
298
298
|
}
|
|
299
299
|
);
|
|
300
300
|
}
|
|
301
|
-
async function
|
|
302
|
-
await
|
|
301
|
+
async function Yr(a) {
|
|
302
|
+
await _.rm(m.join(a, "software"), { recursive: !0 }), await Ge(
|
|
303
303
|
m.join(a, "workflow", "package.json"),
|
|
304
304
|
(e) => {
|
|
305
305
|
const t = JSON.parse(e);
|
|
306
306
|
return delete t.dependencies["@platforma-open/my-org.block-boilerplate.software"], JSON.stringify(t, null, 2);
|
|
307
307
|
}
|
|
308
|
-
), await
|
|
308
|
+
), await ee(
|
|
309
309
|
m.join(a, "pnpm-workspace.yaml"),
|
|
310
310
|
/.*- software$\n/gm
|
|
311
311
|
);
|
|
312
312
|
}
|
|
313
|
-
async function
|
|
314
|
-
const t = await
|
|
313
|
+
async function Vr(a, e) {
|
|
314
|
+
const t = await Kr(a);
|
|
315
315
|
for (const { from: r, to: n } of e)
|
|
316
316
|
for (const s of t)
|
|
317
|
-
await
|
|
317
|
+
await br(s, r, n);
|
|
318
318
|
}
|
|
319
|
-
async function
|
|
320
|
-
return (await
|
|
319
|
+
async function Kr(a) {
|
|
320
|
+
return (await _.readdir(a, {
|
|
321
321
|
withFileTypes: !0,
|
|
322
322
|
recursive: !0
|
|
323
323
|
})).filter((t) => t.isFile()).map((t) => m.join(t.parentPath, t.name));
|
|
324
324
|
}
|
|
325
|
-
async function
|
|
326
|
-
const t = await
|
|
327
|
-
await
|
|
325
|
+
async function Ge(a, e) {
|
|
326
|
+
const t = await _.readFile(a), r = e(t.toString());
|
|
327
|
+
await _.writeFile(a, r);
|
|
328
328
|
}
|
|
329
|
-
async function
|
|
330
|
-
return await
|
|
329
|
+
async function br(a, e, t) {
|
|
330
|
+
return await Ge(a, (r) => r.replaceAll(e, t));
|
|
331
331
|
}
|
|
332
|
-
async function
|
|
333
|
-
return await
|
|
332
|
+
async function ee(a, e) {
|
|
333
|
+
return await br(a, e, "");
|
|
334
334
|
}
|
|
335
|
-
const
|
|
335
|
+
const re = class re extends L {
|
|
336
336
|
async run() {
|
|
337
|
-
const { flags: e } = await this.parse(
|
|
338
|
-
await
|
|
337
|
+
const { flags: e } = await this.parse(re), t = A(e["log-level"]);
|
|
338
|
+
await Hr(t);
|
|
339
339
|
}
|
|
340
340
|
};
|
|
341
|
-
l(
|
|
342
|
-
...
|
|
341
|
+
l(re, "description", "Helps to create a new block by downloading a block's template."), l(re, "examples", ["<%= name %>"]), l(re, "flags", {
|
|
342
|
+
...I
|
|
343
343
|
});
|
|
344
|
-
let
|
|
345
|
-
function
|
|
346
|
-
return
|
|
344
|
+
let Ee = re;
|
|
345
|
+
function wr(...a) {
|
|
346
|
+
return Ir(__dirname, "..", ...a);
|
|
347
347
|
}
|
|
348
|
-
function
|
|
349
|
-
return
|
|
348
|
+
function Q(...a) {
|
|
349
|
+
return wr("assets", ...a);
|
|
350
350
|
}
|
|
351
|
-
function
|
|
352
|
-
return
|
|
351
|
+
function Le(...a) {
|
|
352
|
+
return f.readFileSync(wr(...a));
|
|
353
353
|
}
|
|
354
|
-
function
|
|
355
|
-
return a || (a =
|
|
354
|
+
function gr(a) {
|
|
355
|
+
return a || (a = me()), `quay.io/milaboratories/platforma:${a}`;
|
|
356
356
|
}
|
|
357
|
-
const
|
|
357
|
+
const J = class J {
|
|
358
358
|
constructor(e) {
|
|
359
359
|
l(this, "state", {
|
|
360
360
|
currentInstance: ""
|
|
361
361
|
});
|
|
362
362
|
l(this, "filePath");
|
|
363
363
|
l(this, "dirPath");
|
|
364
|
-
e = e ?? m.resolve(
|
|
364
|
+
e = e ?? m.resolve(D.homedir(), ".config", "pl-bootstrap");
|
|
365
365
|
const t = m.join(e, "state.json");
|
|
366
|
-
this.dirPath = e, this.filePath = t,
|
|
366
|
+
this.dirPath = e, this.filePath = t, f.existsSync(e) || f.mkdirSync(e, { recursive: !0 }), f.existsSync(t) && (this.state = JSON.parse(Le(t).toString()));
|
|
367
367
|
}
|
|
368
368
|
static getStateInstance() {
|
|
369
|
-
return
|
|
369
|
+
return J.instance || (J.instance = new J()), J.instance;
|
|
370
370
|
}
|
|
371
371
|
path(...e) {
|
|
372
372
|
return m.join(this.dirPath, ...e);
|
|
@@ -378,40 +378,40 @@ const C = class C {
|
|
|
378
378
|
return this.path("binaries", ...e);
|
|
379
379
|
}
|
|
380
380
|
writeState() {
|
|
381
|
-
|
|
381
|
+
f.writeFileSync(this.filePath, JSON.stringify(this.state));
|
|
382
382
|
}
|
|
383
383
|
get instanceList() {
|
|
384
|
-
return
|
|
384
|
+
return f.existsSync(this.instanceDir()) ? f.readdirSync(this.instanceDir()).filter((t) => this.instanceExists(t)) : [];
|
|
385
385
|
}
|
|
386
386
|
instanceExists(e) {
|
|
387
|
-
return
|
|
387
|
+
return f.existsSync(this.instanceDir(e, "instance.json"));
|
|
388
388
|
}
|
|
389
389
|
getInstanceInfo(e) {
|
|
390
390
|
const t = this.instanceDir(e, "instance.json");
|
|
391
|
-
if (!
|
|
391
|
+
if (!f.existsSync(t))
|
|
392
392
|
throw new Error(`platforma backend instance '${e}' does not exist or is corrupted`);
|
|
393
|
-
const r = JSON.parse(
|
|
393
|
+
const r = JSON.parse(Le(t).toString());
|
|
394
394
|
return {
|
|
395
395
|
name: e,
|
|
396
396
|
...r
|
|
397
397
|
};
|
|
398
398
|
}
|
|
399
399
|
setInstanceInfo(e, t) {
|
|
400
|
-
|
|
400
|
+
f.existsSync(this.instanceDir(e)) || f.mkdirSync(this.instanceDir(e), { recursive: !0 });
|
|
401
401
|
const r = this.instanceDir(e, "instance.json");
|
|
402
402
|
let n = {};
|
|
403
|
-
|
|
403
|
+
f.existsSync(r) && (n = JSON.parse(Le(r).toString())), f.writeFileSync(r, JSON.stringify({ ...n, ...t }));
|
|
404
404
|
}
|
|
405
405
|
isInstanceActive(e) {
|
|
406
406
|
switch (e.type) {
|
|
407
407
|
case "docker": {
|
|
408
|
-
const r =
|
|
408
|
+
const r = Ur(`pl-${e.name}`);
|
|
409
409
|
return r ? r.Status.trim().startsWith("running") : !1;
|
|
410
410
|
}
|
|
411
411
|
case "process":
|
|
412
|
-
return e.pid ?
|
|
412
|
+
return e.pid ? ur(e.pid) : !1;
|
|
413
413
|
default:
|
|
414
|
-
throw
|
|
414
|
+
throw G(), new Error("cli logic error: unknown service type, cannot check its state");
|
|
415
415
|
}
|
|
416
416
|
}
|
|
417
417
|
get isActive() {
|
|
@@ -423,7 +423,7 @@ const C = class C {
|
|
|
423
423
|
return !1;
|
|
424
424
|
}
|
|
425
425
|
isValidPID(e) {
|
|
426
|
-
return
|
|
426
|
+
return ur(e);
|
|
427
427
|
}
|
|
428
428
|
get currentInstance() {
|
|
429
429
|
const e = this.state.currentInstance;
|
|
@@ -442,14 +442,14 @@ const C = class C {
|
|
|
442
442
|
this.state.currentInstance = e, this.writeState();
|
|
443
443
|
}
|
|
444
444
|
};
|
|
445
|
-
l(
|
|
446
|
-
let
|
|
447
|
-
function
|
|
448
|
-
const e =
|
|
445
|
+
l(J, "instance");
|
|
446
|
+
let Te = J;
|
|
447
|
+
function ur(a) {
|
|
448
|
+
const e = Br(a);
|
|
449
449
|
return e === "platforma" || e.endsWith("/platforma") || e.endsWith("\\platforma");
|
|
450
450
|
}
|
|
451
|
-
const
|
|
452
|
-
function
|
|
451
|
+
const u = Te.getStateInstance();
|
|
452
|
+
function Ie(a, e, t) {
|
|
453
453
|
const r = [], n = [];
|
|
454
454
|
for (const s of e)
|
|
455
455
|
if (t = {
|
|
@@ -461,10 +461,10 @@ function we(a, e, t) {
|
|
|
461
461
|
...s.runOpts,
|
|
462
462
|
...t
|
|
463
463
|
}, s.async) {
|
|
464
|
-
const i =
|
|
464
|
+
const i = Qr(a, s.cmd, s.args, t);
|
|
465
465
|
n.push(i);
|
|
466
466
|
} else {
|
|
467
|
-
const i =
|
|
467
|
+
const i = Xr(a, s.cmd, s.args, t);
|
|
468
468
|
if (r.push(i), i.error || i.status !== 0)
|
|
469
469
|
break;
|
|
470
470
|
}
|
|
@@ -473,7 +473,7 @@ function we(a, e, t) {
|
|
|
473
473
|
spawned: n
|
|
474
474
|
};
|
|
475
475
|
}
|
|
476
|
-
function
|
|
476
|
+
function Qr(a, e, t, r) {
|
|
477
477
|
var c;
|
|
478
478
|
a.debug(
|
|
479
479
|
`Running:
|
|
@@ -490,15 +490,15 @@ function Kr(a, e, t, r) {
|
|
|
490
490
|
process.removeListener("SIGINT", i), s && process.exit(o);
|
|
491
491
|
}), n;
|
|
492
492
|
}
|
|
493
|
-
function
|
|
493
|
+
function Xr(a, e, t, r) {
|
|
494
494
|
return a.debug(
|
|
495
495
|
`Running:
|
|
496
496
|
cmd: ${JSON.stringify([e, ...t])}
|
|
497
497
|
opts: ${JSON.stringify(r)}`
|
|
498
|
-
), r.env = { ...process.env, ...r.env },
|
|
498
|
+
), r.env = { ...process.env, ...r.env }, mr(e, t, r);
|
|
499
499
|
}
|
|
500
|
-
function
|
|
501
|
-
const s =
|
|
500
|
+
function Ae(a, e, t, r, n) {
|
|
501
|
+
const s = f.readFileSync(a, { encoding: "utf-8" }), i = fe.parse(s.toString());
|
|
502
502
|
if (!i.services)
|
|
503
503
|
throw new Error(`file '${a}' seems to be not a docker-compose file or has unsupported version`);
|
|
504
504
|
if (r)
|
|
@@ -506,31 +506,31 @@ function ve(a, e, t, r, n) {
|
|
|
506
506
|
r.has(c) || delete i.services[c];
|
|
507
507
|
i.name = t;
|
|
508
508
|
for (const [c, o] of (r == null ? void 0 : r.entries()) ?? []) {
|
|
509
|
-
const
|
|
510
|
-
if (!
|
|
509
|
+
const d = i.services[c];
|
|
510
|
+
if (!d)
|
|
511
511
|
throw new Error(`docker compose '${a}' has no declaration of service '${c}'`);
|
|
512
|
-
if (o.platform && (
|
|
513
|
-
|
|
514
|
-
for (let g = 0; g < ((
|
|
515
|
-
const
|
|
516
|
-
if (o.envs[
|
|
517
|
-
const
|
|
518
|
-
|
|
512
|
+
if (o.platform && (d.platform = o.platform), o.envs) {
|
|
513
|
+
d.environment || (d.environment = []);
|
|
514
|
+
for (let g = 0; g < ((d == null ? void 0 : d.environment.length) ?? 0); ) {
|
|
515
|
+
const b = d.environment[g].split("=")[0];
|
|
516
|
+
if (o.envs[b]) {
|
|
517
|
+
const w = d.environment.pop();
|
|
518
|
+
w && d.environment.length !== g && (d.environment[g] = w);
|
|
519
519
|
} else
|
|
520
520
|
g++;
|
|
521
521
|
}
|
|
522
522
|
for (const [g, h] of Object.entries(o.envs))
|
|
523
|
-
|
|
523
|
+
d.environment.push(`${g}=${h}`);
|
|
524
524
|
}
|
|
525
525
|
if (o.mounts) {
|
|
526
|
-
|
|
526
|
+
d.volumes || (d.volumes = []);
|
|
527
527
|
for (const g of o.mounts)
|
|
528
|
-
|
|
528
|
+
d.volumes.push(`${g.hostPath}:${g.containerPath}`);
|
|
529
529
|
}
|
|
530
530
|
}
|
|
531
|
-
n != null && n.dropVolumes && delete i.volumes,
|
|
531
|
+
n != null && n.dropVolumes && delete i.volumes, f.writeFileSync(e, fe.stringify(i));
|
|
532
532
|
}
|
|
533
|
-
function
|
|
533
|
+
function Zr(a) {
|
|
534
534
|
return {
|
|
535
535
|
id: a,
|
|
536
536
|
type: "S3",
|
|
@@ -548,7 +548,7 @@ function Xr(a) {
|
|
|
548
548
|
uploadKeyPrefix: ""
|
|
549
549
|
};
|
|
550
550
|
}
|
|
551
|
-
function
|
|
551
|
+
function vr(a) {
|
|
552
552
|
return {
|
|
553
553
|
id: a,
|
|
554
554
|
type: "FS",
|
|
@@ -556,8 +556,8 @@ function wr(a) {
|
|
|
556
556
|
rootPath: ""
|
|
557
557
|
};
|
|
558
558
|
}
|
|
559
|
-
function
|
|
560
|
-
a =
|
|
559
|
+
function X(a, e, t) {
|
|
560
|
+
a = jr(a);
|
|
561
561
|
const r = new URL(a, `file:${e}`);
|
|
562
562
|
switch (r.protocol) {
|
|
563
563
|
case "s3:":
|
|
@@ -601,24 +601,24 @@ function Q(a, e, t) {
|
|
|
601
601
|
throw new Error(`storage protocol '${r.protocol}' is not supported`);
|
|
602
602
|
}
|
|
603
603
|
}
|
|
604
|
-
function
|
|
605
|
-
var
|
|
606
|
-
const t = (e == null ? void 0 : e.localRoot) ??
|
|
607
|
-
level: ((
|
|
608
|
-
path: ((
|
|
604
|
+
function et(a, e) {
|
|
605
|
+
var w, p, P, S, k, y, $, le, Je, He, We, qe, ze, Ye, Ve, Ke, Qe, Xe, Ze, er, rr, tr, ar, nr, sr, ir, cr, or, lr, dr;
|
|
606
|
+
const t = (e == null ? void 0 : e.localRoot) ?? u.instanceDir("default"), r = {
|
|
607
|
+
level: ((w = e == null ? void 0 : e.log) == null ? void 0 : w.level) ?? "info",
|
|
608
|
+
path: ((p = e == null ? void 0 : e.log) == null ? void 0 : p.path) ?? `${t}/logs/platforma.log`
|
|
609
609
|
}, n = {
|
|
610
610
|
listen: ((P = e == null ? void 0 : e.grpc) == null ? void 0 : P.listen) ?? "localhost:6345",
|
|
611
611
|
tls: {
|
|
612
|
-
enable:
|
|
613
|
-
clientAuthMode: (($ = (
|
|
614
|
-
certFile: ((
|
|
615
|
-
keyFile: ((
|
|
616
|
-
...(
|
|
612
|
+
enable: de((k = (S = e == null ? void 0 : e.grpc) == null ? void 0 : S.tls) == null ? void 0 : k.enable, !1),
|
|
613
|
+
clientAuthMode: (($ = (y = e == null ? void 0 : e.grpc) == null ? void 0 : y.tls) == null ? void 0 : $.clientAuthMode) ?? "NoAuth",
|
|
614
|
+
certFile: ((Je = (le = e == null ? void 0 : e.grpc) == null ? void 0 : le.tls) == null ? void 0 : Je.certFile) ?? `${t}/certs/tls.cert`,
|
|
615
|
+
keyFile: ((We = (He = e == null ? void 0 : e.grpc) == null ? void 0 : He.tls) == null ? void 0 : We.keyFile) ?? `${t}/certs/tls.key`,
|
|
616
|
+
...(qe = e == null ? void 0 : e.grpc) == null ? void 0 : qe.tls
|
|
617
617
|
}
|
|
618
618
|
}, s = {
|
|
619
619
|
auth: {
|
|
620
|
-
enabled: ((
|
|
621
|
-
drivers: ((
|
|
620
|
+
enabled: ((Ye = (ze = e == null ? void 0 : e.core) == null ? void 0 : ze.auth) == null ? void 0 : Ye.enabled) ?? !1,
|
|
621
|
+
drivers: ((Ke = (Ve = e == null ? void 0 : e.core) == null ? void 0 : Ve.auth) == null ? void 0 : Ke.drivers) ?? [
|
|
622
622
|
{ driver: "jwt", key: a },
|
|
623
623
|
{ driver: "htpasswd", path: `${t}/users.htpasswd` }
|
|
624
624
|
]
|
|
@@ -626,67 +626,67 @@ function Zr(a, e) {
|
|
|
626
626
|
db: {
|
|
627
627
|
path: `${t}/db`
|
|
628
628
|
}
|
|
629
|
-
}, i =
|
|
629
|
+
}, i = fr(
|
|
630
630
|
"main",
|
|
631
631
|
`${t}/storages/main`,
|
|
632
632
|
"main-bucket",
|
|
633
|
-
(
|
|
633
|
+
(Qe = e == null ? void 0 : e.storages) == null ? void 0 : Qe.primary
|
|
634
634
|
);
|
|
635
635
|
let c;
|
|
636
|
-
switch ((
|
|
636
|
+
switch ((Ze = (Xe = e == null ? void 0 : e.storages) == null ? void 0 : Xe.work) == null ? void 0 : Ze.type) {
|
|
637
637
|
case void 0:
|
|
638
638
|
case "FS":
|
|
639
|
-
c =
|
|
639
|
+
c = vr("work"), c.rootPath = ((rr = (er = e == null ? void 0 : e.storages) == null ? void 0 : er.work) == null ? void 0 : rr.rootPath) ?? `${t}/storages/work`, c.indexCachePeriod = ((ar = (tr = e == null ? void 0 : e.storages) == null ? void 0 : tr.work) == null ? void 0 : ar.indexCachePeriod) ?? "1m";
|
|
640
640
|
break;
|
|
641
641
|
default:
|
|
642
642
|
throw new Error("work storage MUST have 'FS' type as it is used for working directories management");
|
|
643
643
|
}
|
|
644
|
-
const
|
|
644
|
+
const d = fr(
|
|
645
645
|
"library",
|
|
646
646
|
`${t}/storages/library`,
|
|
647
647
|
"library-bucket",
|
|
648
|
-
(
|
|
648
|
+
(nr = e == null ? void 0 : e.storages) == null ? void 0 : nr.library
|
|
649
649
|
), g = {
|
|
650
|
-
enabled:
|
|
651
|
-
listen: ((
|
|
650
|
+
enabled: de((sr = e == null ? void 0 : e.monitoring) == null ? void 0 : sr.enabled, !0),
|
|
651
|
+
listen: ((ir = e == null ? void 0 : e.monitoring) == null ? void 0 : ir.listen) ?? "127.0.0.1:9090"
|
|
652
652
|
}, h = {
|
|
653
|
-
enabled:
|
|
654
|
-
listen: ((
|
|
655
|
-
},
|
|
656
|
-
value: ((
|
|
657
|
-
file: ((
|
|
653
|
+
enabled: de((cr = e == null ? void 0 : e.debug) == null ? void 0 : cr.enabled, !0),
|
|
654
|
+
listen: ((or = e == null ? void 0 : e.debug) == null ? void 0 : or.listen) ?? "127.0.0.1:9091"
|
|
655
|
+
}, b = {
|
|
656
|
+
value: ((lr = e == null ? void 0 : e.license) == null ? void 0 : lr.value) ?? "",
|
|
657
|
+
file: ((dr = e == null ? void 0 : e.license) == null ? void 0 : dr.file) ?? ""
|
|
658
658
|
};
|
|
659
659
|
return {
|
|
660
660
|
localRoot: t,
|
|
661
|
-
license:
|
|
661
|
+
license: b,
|
|
662
662
|
log: r,
|
|
663
663
|
grpc: n,
|
|
664
664
|
core: s,
|
|
665
665
|
monitoring: g,
|
|
666
666
|
debug: h,
|
|
667
|
-
storages: { primary: i, work: c, library:
|
|
667
|
+
storages: { primary: i, work: c, library: d },
|
|
668
668
|
hacks: { libraryDownloadable: !0 }
|
|
669
669
|
};
|
|
670
670
|
}
|
|
671
|
-
function
|
|
671
|
+
function fr(a, e, t, r) {
|
|
672
672
|
let n;
|
|
673
673
|
switch (r == null ? void 0 : r.type) {
|
|
674
674
|
case void 0:
|
|
675
675
|
case "FS":
|
|
676
|
-
n =
|
|
676
|
+
n = vr(a), n.rootPath = (r == null ? void 0 : r.rootPath) ?? e;
|
|
677
677
|
break;
|
|
678
678
|
case "S3":
|
|
679
|
-
n =
|
|
679
|
+
n = Zr(a), n.endpoint = (r == null ? void 0 : r.endpoint) ?? "http://localhost:9000", n.presignEndpoint = (r == null ? void 0 : r.presignEndpoint) ?? "http://localhost:9000", n.bucketName = (r == null ? void 0 : r.bucketName) ?? t, n.createBucket = de(r == null ? void 0 : r.createBucket, !0), n.forcePathStyle = de(r == null ? void 0 : r.forcePathStyle, !0), n.key = (r == null ? void 0 : r.key) ?? "", n.secret = (r == null ? void 0 : r.secret) ?? "", n.keyPrefix = (r == null ? void 0 : r.keyPrefix) ?? "", n.accessPrefixes = (r == null ? void 0 : r.accessPrefixes) ?? [""], n.uploadKeyPrefix = (r == null ? void 0 : r.uploadKeyPrefix) ?? "";
|
|
680
680
|
break;
|
|
681
681
|
default:
|
|
682
|
-
throw
|
|
682
|
+
throw G(), new Error("unknown storage type");
|
|
683
683
|
}
|
|
684
684
|
return n;
|
|
685
685
|
}
|
|
686
|
-
function
|
|
686
|
+
function rt(a) {
|
|
687
687
|
const e = a.monitoring.enabled ? "" : " disabled", t = a.debug.enabled ? "" : " disabled", r = a.hacks.libraryDownloadable ? "true" : "false";
|
|
688
688
|
let n = a.license.value;
|
|
689
|
-
return a.license.file != "" && (n =
|
|
689
|
+
return a.license.file != "" && (n = f.readFileSync(a.license.file).toString().trimEnd()), `
|
|
690
690
|
license:
|
|
691
691
|
value: '${a.license.value}'
|
|
692
692
|
file: '${a.license.file}'
|
|
@@ -762,12 +762,12 @@ controllers:
|
|
|
762
762
|
workflows: {}
|
|
763
763
|
`;
|
|
764
764
|
}
|
|
765
|
-
function
|
|
765
|
+
function de(a, e) {
|
|
766
766
|
return a === void 0 ? e : a;
|
|
767
767
|
}
|
|
768
|
-
const
|
|
769
|
-
function
|
|
770
|
-
const e =
|
|
768
|
+
const tt = ["linux", "macos", "windows"];
|
|
769
|
+
function at(a) {
|
|
770
|
+
const e = D.platform();
|
|
771
771
|
switch (e) {
|
|
772
772
|
case "darwin":
|
|
773
773
|
return "macos";
|
|
@@ -777,13 +777,13 @@ function tt(a) {
|
|
|
777
777
|
return "windows";
|
|
778
778
|
default:
|
|
779
779
|
throw new Error(
|
|
780
|
-
`operating system '${e}' is not currently supported by Platforma ecosystem. The list of OSes supported: ` + JSON.stringify(
|
|
780
|
+
`operating system '${e}' is not currently supported by Platforma ecosystem. The list of OSes supported: ` + JSON.stringify(tt)
|
|
781
781
|
);
|
|
782
782
|
}
|
|
783
783
|
}
|
|
784
|
-
const
|
|
785
|
-
function
|
|
786
|
-
const e =
|
|
784
|
+
const nt = ["amd64", "arm64"];
|
|
785
|
+
function Pr(a) {
|
|
786
|
+
const e = D.arch();
|
|
787
787
|
switch (e) {
|
|
788
788
|
case "arm64":
|
|
789
789
|
return "arm64";
|
|
@@ -791,77 +791,77 @@ function vr(a) {
|
|
|
791
791
|
return "amd64";
|
|
792
792
|
default:
|
|
793
793
|
throw new Error(
|
|
794
|
-
`processor architecture '${e}' is not currently supported by Platforma ecosystem. The list of architectures supported: ` + JSON.stringify(
|
|
794
|
+
`processor architecture '${e}' is not currently supported by Platforma ecosystem. The list of architectures supported: ` + JSON.stringify(nt)
|
|
795
795
|
);
|
|
796
796
|
}
|
|
797
797
|
}
|
|
798
|
-
function
|
|
799
|
-
const t = (e == null ? void 0 : e.version) ??
|
|
800
|
-
if (
|
|
798
|
+
function st(a, e) {
|
|
799
|
+
const t = (e == null ? void 0 : e.version) ?? me(), r = (e == null ? void 0 : e.showProgress) ?? process.stdout.isTTY, n = `pl-${t}-${Pr()}.tgz`, s = (e == null ? void 0 : e.downloadURL) ?? `https://cdn.platforma.bio/software/pl/${at()}/${n}`, i = (e == null ? void 0 : e.saveTo) ?? u.binaries(n);
|
|
800
|
+
if (f.existsSync(i))
|
|
801
801
|
return a.info(`Platforma Backend archive download skipped: '${i}' already exists`), Promise.resolve(i);
|
|
802
|
-
|
|
802
|
+
f.mkdirSync(m.dirname(i), { recursive: !0 }), a.info(`Downloading Platforma Backend archive:
|
|
803
803
|
URL: ${s}
|
|
804
804
|
Save to: ${i}`);
|
|
805
|
-
const c =
|
|
806
|
-
return new Promise((o,
|
|
805
|
+
const c = Tr.get(s);
|
|
806
|
+
return new Promise((o, d) => {
|
|
807
807
|
c.on("response", (g) => {
|
|
808
808
|
if (!g.statusCode) {
|
|
809
|
-
const
|
|
810
|
-
c.destroy(),
|
|
809
|
+
const p = new Error("failed to download archive: no HTTP status code in response from server");
|
|
810
|
+
c.destroy(), d(p);
|
|
811
811
|
return;
|
|
812
812
|
}
|
|
813
813
|
if (g.statusCode !== 200) {
|
|
814
|
-
const
|
|
815
|
-
c.destroy(),
|
|
814
|
+
const p = new Error(`failed to download archive: ${g.statusCode} ${g.statusMessage}`);
|
|
815
|
+
c.destroy(), d(p);
|
|
816
816
|
return;
|
|
817
817
|
}
|
|
818
818
|
const h = parseInt(g.headers["content-length"] || "0", 10);
|
|
819
|
-
let
|
|
820
|
-
const
|
|
821
|
-
g.pipe(
|
|
822
|
-
|
|
823
|
-
const P =
|
|
819
|
+
let b = 0;
|
|
820
|
+
const w = f.createWriteStream(i);
|
|
821
|
+
g.pipe(w), g.on("data", (p) => {
|
|
822
|
+
b += p.length;
|
|
823
|
+
const P = b / h * 100;
|
|
824
824
|
r && process.stdout.write(` downloading: ${P.toFixed(2)}%\r`);
|
|
825
|
-
}), g.on("error", (
|
|
826
|
-
|
|
827
|
-
}),
|
|
828
|
-
|
|
825
|
+
}), g.on("error", (p) => {
|
|
826
|
+
f.unlinkSync(i), a.error(`Failed to download Platforma Binary: ${p.message}`), c.destroy(), d(p);
|
|
827
|
+
}), w.on("finish", () => {
|
|
828
|
+
w.close(), a.info(" ... download done."), c.destroy(), o(i);
|
|
829
829
|
});
|
|
830
830
|
});
|
|
831
831
|
});
|
|
832
832
|
}
|
|
833
|
-
function
|
|
833
|
+
function it(a, e) {
|
|
834
834
|
a.debug("extracting archive...");
|
|
835
|
-
const t = (e == null ? void 0 : e.version) ??
|
|
835
|
+
const t = (e == null ? void 0 : e.version) ?? me();
|
|
836
836
|
a.debug(` version: '${t}'`);
|
|
837
|
-
const r = `${
|
|
837
|
+
const r = `${kr({ version: t })}.tgz`, n = (e == null ? void 0 : e.archivePath) ?? u.binaries(r);
|
|
838
838
|
a.debug(` archive path: '${n}'`);
|
|
839
|
-
const s = (e == null ? void 0 : e.extractTo) ??
|
|
840
|
-
if (a.debug(` target dir: '${s}'`),
|
|
839
|
+
const s = (e == null ? void 0 : e.extractTo) ?? ot(n);
|
|
840
|
+
if (a.debug(` target dir: '${s}'`), f.existsSync(s))
|
|
841
841
|
return a.info(`Platforma Backend binaries unpack skipped: '${s}' exists`), s;
|
|
842
|
-
if (!
|
|
842
|
+
if (!f.existsSync(n)) {
|
|
843
843
|
const i = `Platforma Backend binary archive not found at '${n}'`;
|
|
844
844
|
throw a.error(i), new Error(i);
|
|
845
845
|
}
|
|
846
|
-
return
|
|
846
|
+
return f.existsSync(s) || (a.debug(` creating target dir '${s}'`), f.mkdirSync(s, { recursive: !0 })), a.info(`Unpacking Platforma Backend archive:
|
|
847
847
|
Archive: ${n}
|
|
848
|
-
Target dir: ${s}`),
|
|
848
|
+
Target dir: ${s}`), Fr.x({
|
|
849
849
|
file: n,
|
|
850
850
|
cwd: s,
|
|
851
851
|
gzip: !0,
|
|
852
852
|
sync: !0
|
|
853
853
|
}), a.info(" ... unpack done."), s;
|
|
854
854
|
}
|
|
855
|
-
function
|
|
856
|
-
return
|
|
855
|
+
function Se(a, e) {
|
|
856
|
+
return st(a, e).then((t) => it(a, { archivePath: t }));
|
|
857
857
|
}
|
|
858
|
-
function
|
|
859
|
-
return `pl-${(a == null ? void 0 : a.version) ??
|
|
858
|
+
function kr(a) {
|
|
859
|
+
return `pl-${(a == null ? void 0 : a.version) ?? me()}-${Pr()}`;
|
|
860
860
|
}
|
|
861
|
-
function
|
|
862
|
-
return
|
|
861
|
+
function ct(a, ...e) {
|
|
862
|
+
return u.binaries(kr({ version: a }), ...e);
|
|
863
863
|
}
|
|
864
|
-
function
|
|
864
|
+
function ot(a) {
|
|
865
865
|
const e = a.lastIndexOf(".");
|
|
866
866
|
return e === -1 ? a : a.slice(0, e);
|
|
867
867
|
}
|
|
@@ -870,7 +870,7 @@ class O {
|
|
|
870
870
|
this.logger = e;
|
|
871
871
|
}
|
|
872
872
|
startLast() {
|
|
873
|
-
const e =
|
|
873
|
+
const e = u.currentInstance;
|
|
874
874
|
if (!e)
|
|
875
875
|
throw this.logger.error("failed to bring back Platforma Backend in the last started configuration: no last configuration found"), new Error("no previous run info found");
|
|
876
876
|
return this.startInstance(e);
|
|
@@ -881,69 +881,69 @@ class O {
|
|
|
881
881
|
this.logger.info(`Starting platforma backend instance '${e.name}':
|
|
882
882
|
${r}`);
|
|
883
883
|
}
|
|
884
|
-
const t =
|
|
884
|
+
const t = Ie(
|
|
885
885
|
this.logger,
|
|
886
886
|
e.upCommands
|
|
887
887
|
);
|
|
888
|
-
return
|
|
888
|
+
return ue(t.executed), t.spawned.length > 0 && e.type === "process" && (e.pid = t.spawned[t.spawned.length - 1].pid, u.setInstanceInfo(e.name, e), this.logger.info(`instance '${e.name}' started`)), u.currentInstanceName = e.name, t.spawned;
|
|
889
889
|
}
|
|
890
890
|
stopInstance(e) {
|
|
891
|
-
if (!
|
|
891
|
+
if (!u.isInstanceActive(e)) {
|
|
892
892
|
this.logger.info(`instance '${e.name}' is not running`);
|
|
893
893
|
return;
|
|
894
894
|
}
|
|
895
895
|
this.logger.info(`stopping platforma backend instance '${e.name}'...`);
|
|
896
|
-
const t =
|
|
897
|
-
switch (
|
|
896
|
+
const t = Ie(this.logger, e.downCommands);
|
|
897
|
+
switch (ue(t.executed), e.type) {
|
|
898
898
|
case "docker":
|
|
899
899
|
return;
|
|
900
900
|
case "process": {
|
|
901
|
-
e.pid &&
|
|
901
|
+
e.pid && u.isValidPID(e.pid) && process.kill(e.pid);
|
|
902
902
|
return;
|
|
903
903
|
}
|
|
904
904
|
default:
|
|
905
|
-
|
|
905
|
+
G();
|
|
906
906
|
}
|
|
907
907
|
}
|
|
908
908
|
switchInstance(e) {
|
|
909
|
-
for (const t of
|
|
909
|
+
for (const t of u.instanceList)
|
|
910
910
|
if (t !== e.name) {
|
|
911
|
-
const r =
|
|
912
|
-
|
|
911
|
+
const r = u.getInstanceInfo(t);
|
|
912
|
+
u.isInstanceActive(r) && this.stopInstance(r);
|
|
913
913
|
}
|
|
914
914
|
return this.startInstance(e);
|
|
915
915
|
}
|
|
916
916
|
createLocal(e, t) {
|
|
917
|
-
var o,
|
|
918
|
-
const r = (t == null ? void 0 : t.binaryPath) ??
|
|
917
|
+
var o, d, g, h, b, w, p, P, S, k;
|
|
918
|
+
const r = (t == null ? void 0 : t.binaryPath) ?? ct(t == null ? void 0 : t.version, "binaries", "platforma");
|
|
919
919
|
let n = t == null ? void 0 : t.configPath;
|
|
920
|
-
const s = (t == null ? void 0 : t.workdir) ?? (n ? process.cwd() :
|
|
920
|
+
const s = (t == null ? void 0 : t.workdir) ?? (n ? process.cwd() : u.instanceDir(e));
|
|
921
921
|
t != null && t.primaryURL && (t.configOptions = {
|
|
922
922
|
...t.configOptions,
|
|
923
923
|
storages: {
|
|
924
924
|
...(o = t.configOptions) == null ? void 0 : o.storages,
|
|
925
|
-
primary:
|
|
925
|
+
primary: X(t.primaryURL, s, (g = (d = t.configOptions) == null ? void 0 : d.storages) == null ? void 0 : g.primary)
|
|
926
926
|
}
|
|
927
927
|
}), t != null && t.libraryURL && (t.configOptions = {
|
|
928
928
|
...t.configOptions,
|
|
929
929
|
storages: {
|
|
930
930
|
...(h = t.configOptions) == null ? void 0 : h.storages,
|
|
931
|
-
library:
|
|
931
|
+
library: X(t.libraryURL, s, (w = (b = t.configOptions) == null ? void 0 : b.storages) == null ? void 0 : w.library)
|
|
932
932
|
}
|
|
933
933
|
});
|
|
934
|
-
const i =
|
|
935
|
-
this.logger.debug(" checking license..."), this.checkLicense((P = (
|
|
934
|
+
const i = et(this.getLastJwt(), t == null ? void 0 : t.configOptions);
|
|
935
|
+
this.logger.debug(" checking license..."), this.checkLicense((P = (p = t == null ? void 0 : t.configOptions) == null ? void 0 : p.license) == null ? void 0 : P.value, (k = (S = t == null ? void 0 : t.configOptions) == null ? void 0 : S.license) == null ? void 0 : k.file);
|
|
936
936
|
const c = [
|
|
937
937
|
`${i.localRoot}/packages`,
|
|
938
938
|
`${i.localRoot}/packages-local`,
|
|
939
939
|
`${i.localRoot}/blocks-local`
|
|
940
940
|
];
|
|
941
941
|
i.storages.primary.type === "FS" && c.push(i.storages.primary.rootPath), i.storages.library.type === "FS" && (c.push(i.storages.library.rootPath), i.hacks.libraryDownloadable = !1), i.storages.work.type === "FS" && c.push(i.storages.work.rootPath), this.logger.debug(" creating pl state directories...");
|
|
942
|
-
for (const
|
|
943
|
-
|
|
944
|
-
for (const
|
|
945
|
-
|
|
946
|
-
return n || (n = m.join(i.localRoot, "config.yaml"), this.logger.debug(` rendering configuration '${n}'...`),
|
|
942
|
+
for (const y of c)
|
|
943
|
+
f.existsSync(y) || (this.logger.debug(` '${y}'`), f.mkdirSync(y, { recursive: !0 }));
|
|
944
|
+
for (const y of i.core.auth.drivers)
|
|
945
|
+
y.driver === "htpasswd" && (f.existsSync(y.path) || (this.logger.debug(` installing default 'users.htpasswd' to ${y.path}...`), f.copyFileSync(Q("users.htpasswd"), y.path)));
|
|
946
|
+
return n || (n = m.join(i.localRoot, "config.yaml"), this.logger.debug(` rendering configuration '${n}'...`), f.writeFileSync(n, rt(i))), u.setInstanceInfo(e, {
|
|
947
947
|
type: "process",
|
|
948
948
|
upCommands: [
|
|
949
949
|
{
|
|
@@ -965,7 +965,7 @@ ${r}`);
|
|
|
965
965
|
work: i.storages.work,
|
|
966
966
|
library: i.storages.library
|
|
967
967
|
}
|
|
968
|
-
}),
|
|
968
|
+
}), u.getInstanceInfo(e);
|
|
969
969
|
}
|
|
970
970
|
createLocalS3(e, t) {
|
|
971
971
|
var c;
|
|
@@ -988,12 +988,12 @@ ${r}`);
|
|
|
988
988
|
], n.cleanupCommands = [
|
|
989
989
|
i.cleanup,
|
|
990
990
|
...n.cleanupCommands
|
|
991
|
-
],
|
|
991
|
+
], u.setInstanceInfo(e, n), n;
|
|
992
992
|
}
|
|
993
993
|
createMinio(e, t) {
|
|
994
994
|
this.logger.debug(" creating docker compose for minio service...");
|
|
995
|
-
const r =
|
|
996
|
-
|
|
995
|
+
const r = Q("compose-backend.yaml"), n = u.instanceDir(e, "compose-minio.yaml");
|
|
996
|
+
Ae(
|
|
997
997
|
r,
|
|
998
998
|
n,
|
|
999
999
|
`pl-${e}-minio`,
|
|
@@ -1006,75 +1006,75 @@ ${r}`);
|
|
|
1006
1006
|
this.logger.debug(` minio version: ${s}`);
|
|
1007
1007
|
const i = (t == null ? void 0 : t.image) ?? `quay.io/minio/minio${s}`;
|
|
1008
1008
|
this.logger.debug(` minio image: ${i}`);
|
|
1009
|
-
const c = (t == null ? void 0 : t.storage) ??
|
|
1010
|
-
|
|
1011
|
-
const o = (t == null ? void 0 : t.minioPort) ?? 9e3,
|
|
1009
|
+
const c = (t == null ? void 0 : t.storage) ?? u.instanceDir(e, "minio");
|
|
1010
|
+
$e(c, { mode: "0775" });
|
|
1011
|
+
const o = (t == null ? void 0 : t.minioPort) ?? 9e3, d = (t == null ? void 0 : t.minioConsolePort) ?? 9001, g = {
|
|
1012
1012
|
MINIO_IMAGE: i,
|
|
1013
1013
|
MINIO_STORAGE: m.resolve(c),
|
|
1014
1014
|
MINIO_PORT: o.toString(),
|
|
1015
|
-
MINIO_CONSOLE_PORT:
|
|
1015
|
+
MINIO_CONSOLE_PORT: d.toString()
|
|
1016
1016
|
};
|
|
1017
1017
|
return {
|
|
1018
1018
|
start: {
|
|
1019
1019
|
cmd: "docker",
|
|
1020
1020
|
args: ["compose", `--file=${n}`, "up", "--detach", "--remove-orphans", "--pull=missing"],
|
|
1021
1021
|
envs: g,
|
|
1022
|
-
workdir:
|
|
1022
|
+
workdir: u.instanceDir(e),
|
|
1023
1023
|
runOpts: { stdio: "inherit" }
|
|
1024
1024
|
},
|
|
1025
1025
|
stop: {
|
|
1026
1026
|
cmd: "docker",
|
|
1027
1027
|
args: ["compose", `--file=${n}`, "down"],
|
|
1028
1028
|
envs: g,
|
|
1029
|
-
workdir:
|
|
1029
|
+
workdir: u.instanceDir(e),
|
|
1030
1030
|
runOpts: { stdio: "inherit" }
|
|
1031
1031
|
},
|
|
1032
1032
|
cleanup: {
|
|
1033
1033
|
cmd: "docker",
|
|
1034
1034
|
args: ["compose", `--file=${n}`, "down", "--volumes", "--remove-orphans"],
|
|
1035
1035
|
envs: g,
|
|
1036
|
-
workdir:
|
|
1036
|
+
workdir: u.instanceDir(e),
|
|
1037
1037
|
runOpts: { stdio: "inherit" }
|
|
1038
1038
|
}
|
|
1039
1039
|
};
|
|
1040
1040
|
}
|
|
1041
1041
|
buildPlatforma(e) {
|
|
1042
|
-
const t = m.resolve(e.repoRoot, "cmd", "platforma"), r = e.binPath ?? m.join(
|
|
1042
|
+
const t = m.resolve(e.repoRoot, "cmd", "platforma"), r = e.binPath ?? m.join(D.tmpdir(), "platforma-local-build");
|
|
1043
1043
|
this.logger.info("Building Platforma Backend binary from sources"), this.logger.info(` sources path: ${e.repoRoot}`), this.logger.info(` binary path: ${r}`);
|
|
1044
|
-
const n =
|
|
1044
|
+
const n = mr("go", ["build", "-o", r, "."], {
|
|
1045
1045
|
cwd: t,
|
|
1046
1046
|
stdio: "inherit"
|
|
1047
1047
|
});
|
|
1048
|
-
return
|
|
1048
|
+
return ue([n], "failed to build platforma binary from sources using 'go build' command"), r;
|
|
1049
1049
|
}
|
|
1050
1050
|
createDockerS3(e, t, r) {
|
|
1051
1051
|
this.logger.debug("creating platforma instance in 'docker s3' mode...");
|
|
1052
|
-
const n =
|
|
1052
|
+
const n = Q("compose-backend.yaml"), s = (r == null ? void 0 : r.image) ?? gr(r == null ? void 0 : r.version);
|
|
1053
1053
|
this.checkLicense(r == null ? void 0 : r.license, r == null ? void 0 : r.licenseFile);
|
|
1054
|
-
const i = (...
|
|
1055
|
-
const $ = i(
|
|
1056
|
-
return
|
|
1054
|
+
const i = (...y) => m.join(t, ...y), c = (y) => {
|
|
1055
|
+
const $ = i(y);
|
|
1056
|
+
return $e($, { mode: "0775" }), $;
|
|
1057
1057
|
}, o = i("logs", "platforma.log");
|
|
1058
|
-
|
|
1059
|
-
const
|
|
1058
|
+
f.existsSync(o) || (f.mkdirSync(m.dirname(o), { recursive: !0 }), f.writeFileSync(o, ""));
|
|
1059
|
+
const d = (r == null ? void 0 : r.presignHost) ?? "localhost", g = X("s3e://testuser:testpassword@minio:9000/main-bucket");
|
|
1060
1060
|
if (g.type !== "S3")
|
|
1061
1061
|
throw new Error("primary storage must have 'S3' type in 'docker s3' configuration");
|
|
1062
|
-
g.presignEndpoint = `http://${
|
|
1063
|
-
const h =
|
|
1062
|
+
g.presignEndpoint = `http://${d}:9000`;
|
|
1063
|
+
const h = X("s3e://testuser:testpassword@minio:9000/library-bucket");
|
|
1064
1064
|
if (h.type !== "S3")
|
|
1065
1065
|
throw new Error(`${h.type} storage type is not supported for library storage`);
|
|
1066
|
-
h.presignEndpoint = `http://${
|
|
1067
|
-
const
|
|
1068
|
-
|
|
1066
|
+
h.presignEndpoint = `http://${d}:9000`;
|
|
1067
|
+
const b = c("db"), w = c("work"), p = i("users.htpasswd");
|
|
1068
|
+
f.existsSync(p) || f.copyFileSync(Q("users.htpasswd"), p);
|
|
1069
1069
|
const P = i("compose.yaml");
|
|
1070
|
-
|
|
1070
|
+
f.existsSync(P) && this.logger.info(`replacing docker compose file ${P}`);
|
|
1071
1071
|
const S = [];
|
|
1072
|
-
for (const
|
|
1072
|
+
for (const y of (r == null ? void 0 : r.customMounts) ?? [])
|
|
1073
1073
|
S.push({
|
|
1074
|
-
hostPath:
|
|
1075
|
-
containerPath:
|
|
1074
|
+
hostPath: y.hostPath,
|
|
1075
|
+
containerPath: y.containerPath ?? y.hostPath
|
|
1076
1076
|
});
|
|
1077
|
-
|
|
1077
|
+
Ae(n, P, `pl-${e}`, /* @__PURE__ */ new Map([
|
|
1078
1078
|
["minio", {}],
|
|
1079
1079
|
["backend", {
|
|
1080
1080
|
platform: r == null ? void 0 : r.platformOverride,
|
|
@@ -1085,27 +1085,27 @@ ${r}`);
|
|
|
1085
1085
|
MINIO_IMAGE: "quay.io/minio/minio",
|
|
1086
1086
|
MINIO_STORAGE: c("minio"),
|
|
1087
1087
|
PL_IMAGE: s,
|
|
1088
|
-
PL_AUTH_HTPASSWD_PATH:
|
|
1088
|
+
PL_AUTH_HTPASSWD_PATH: p,
|
|
1089
1089
|
PL_LICENSE: r == null ? void 0 : r.license,
|
|
1090
1090
|
PL_LICENSE_FILE: r == null ? void 0 : r.licenseFile,
|
|
1091
1091
|
PL_LOG_LEVEL: (r == null ? void 0 : r.logLevel) ?? "info",
|
|
1092
1092
|
PL_LOG_DIR: m.dirname(o),
|
|
1093
1093
|
PL_LOG_ROTATION_ENABLED: "true",
|
|
1094
|
-
PL_DATA_DB_ROOT:
|
|
1094
|
+
PL_DATA_DB_ROOT: b,
|
|
1095
1095
|
PL_DATA_PRIMARY_ROOT: c("primary"),
|
|
1096
1096
|
PL_DATA_LIBRARY_ROOT: c("library"),
|
|
1097
|
-
PL_DATA_WORKDIR_ROOT:
|
|
1097
|
+
PL_DATA_WORKDIR_ROOT: w,
|
|
1098
1098
|
// Mount packages storage as volume, because APFS is case-insensitive on Mac OS X and this breaks some pl software installation.
|
|
1099
1099
|
// PL_DATA_PACKAGE_ROOT: storageDir('packages'),
|
|
1100
1100
|
...this.configureDockerStorage("primary", g),
|
|
1101
1101
|
...this.configureDockerStorage("library", h)
|
|
1102
1102
|
};
|
|
1103
1103
|
if (r != null && r.grpcAddr && (k.PL_GRPC_ADDR = r.grpcAddr), r != null && r.grpcPort && (k.PL_GRPC_PORT = r.grpcPort.toString()), r != null && r.monitoringAddr && (k.PL_MONITORING_ADDR = r.monitoringAddr), r != null && r.monitoringPort && (k.PL_MONITORING_PORT = r.monitoringPort.toString()), r != null && r.debugAddr && (k.PL_DEBUG_ADDR = r.debugAddr), r != null && r.debugPort && (k.PL_DEBUG_PORT = r.debugPort.toString()), r != null && r.auth && (r.auth.enabled && (k.PL_AUTH_ENABLED = "true"), r.auth.drivers)) {
|
|
1104
|
-
for (const
|
|
1105
|
-
|
|
1104
|
+
for (const y of r.auth.drivers)
|
|
1105
|
+
y.driver === "htpasswd" && (k.PL_AUTH_HTPASSWD_PATH = m.resolve(y.path), y.path = "/etc/platforma/users.htpasswd");
|
|
1106
1106
|
k.PL_AUTH_DRIVERS = JSON.stringify(r.auth.drivers);
|
|
1107
1107
|
}
|
|
1108
|
-
return
|
|
1108
|
+
return u.setInstanceInfo(e, {
|
|
1109
1109
|
type: "docker",
|
|
1110
1110
|
upCommands: [{
|
|
1111
1111
|
cmd: "docker",
|
|
@@ -1130,78 +1130,78 @@ ${r}`);
|
|
|
1130
1130
|
apiAddr: r == null ? void 0 : r.grpcAddr,
|
|
1131
1131
|
logPath: o,
|
|
1132
1132
|
primary: g,
|
|
1133
|
-
work: { type: "FS", rootPath:
|
|
1133
|
+
work: { type: "FS", rootPath: w },
|
|
1134
1134
|
library: h,
|
|
1135
|
-
dbPath:
|
|
1135
|
+
dbPath: b
|
|
1136
1136
|
}
|
|
1137
|
-
}),
|
|
1137
|
+
}), u.getInstanceInfo(e);
|
|
1138
1138
|
}
|
|
1139
1139
|
createDocker(e, t, r) {
|
|
1140
1140
|
this.logger.debug("creating platforma instance in 'docker' mode...");
|
|
1141
|
-
const n =
|
|
1141
|
+
const n = Q("compose-backend.yaml"), s = (r == null ? void 0 : r.image) ?? gr(r == null ? void 0 : r.version);
|
|
1142
1142
|
this.checkLicense(r == null ? void 0 : r.license, r == null ? void 0 : r.licenseFile);
|
|
1143
1143
|
const i = (...$) => m.join(t, ...$), c = ($) => {
|
|
1144
|
-
const
|
|
1145
|
-
return
|
|
1144
|
+
const le = i($);
|
|
1145
|
+
return $e(le, { mode: "0775" }), le;
|
|
1146
1146
|
}, o = i("logs", "platforma.log");
|
|
1147
|
-
|
|
1148
|
-
const
|
|
1149
|
-
|
|
1150
|
-
const
|
|
1151
|
-
|
|
1147
|
+
f.existsSync(o) || (f.mkdirSync(m.dirname(o), { recursive: !0 }), f.writeFileSync(o, ""));
|
|
1148
|
+
const d = c("db"), g = c("primary"), h = c("library"), b = c("work"), w = i("users.htpasswd");
|
|
1149
|
+
f.existsSync(w) || f.copyFileSync(Q("users.htpasswd"), w);
|
|
1150
|
+
const p = i("compose.yaml");
|
|
1151
|
+
f.existsSync(p) && this.logger.info(`replacing docker compose file ${p}`);
|
|
1152
1152
|
const P = [];
|
|
1153
1153
|
for (const $ of (r == null ? void 0 : r.customMounts) ?? [])
|
|
1154
1154
|
P.push({
|
|
1155
1155
|
hostPath: $.hostPath,
|
|
1156
1156
|
containerPath: $.containerPath ?? $.hostPath
|
|
1157
1157
|
});
|
|
1158
|
-
this.logger.debug(`Rendering docker compose file '${
|
|
1158
|
+
this.logger.debug(`Rendering docker compose file '${p}' using '${n}' as base template`), Ae(n, p, `pl-${e}`, /* @__PURE__ */ new Map([
|
|
1159
1159
|
["backend", {
|
|
1160
1160
|
platform: r == null ? void 0 : r.platformOverride,
|
|
1161
1161
|
mounts: P
|
|
1162
1162
|
}]
|
|
1163
1163
|
]));
|
|
1164
|
-
const S =
|
|
1164
|
+
const S = X((r == null ? void 0 : r.primaryStorageURL) ?? `file:${g}`, "."), k = X((r == null ? void 0 : r.libraryStorageURL) ?? `file:${h}`, "."), y = {
|
|
1165
1165
|
MINIO_IMAGE: "quay.io/minio/minio",
|
|
1166
1166
|
MINIO_STORAGE: c("minio"),
|
|
1167
1167
|
PL_IMAGE: s,
|
|
1168
|
-
PL_AUTH_HTPASSWD_PATH:
|
|
1168
|
+
PL_AUTH_HTPASSWD_PATH: w,
|
|
1169
1169
|
PL_LICENSE: r == null ? void 0 : r.license,
|
|
1170
1170
|
PL_LICENSE_FILE: r == null ? void 0 : r.licenseFile,
|
|
1171
1171
|
PL_LOG_LEVEL: "info",
|
|
1172
1172
|
PL_LOG_DIR: m.dirname(o),
|
|
1173
1173
|
PL_LOG_ROTATION_ENABLED: "true",
|
|
1174
|
-
PL_DATA_DB_ROOT:
|
|
1174
|
+
PL_DATA_DB_ROOT: d,
|
|
1175
1175
|
PL_DATA_PRIMARY_ROOT: g,
|
|
1176
1176
|
PL_DATA_LIBRARY_ROOT: h,
|
|
1177
|
-
PL_DATA_WORKDIR_ROOT:
|
|
1177
|
+
PL_DATA_WORKDIR_ROOT: b,
|
|
1178
1178
|
PL_DATA_PACKAGE_ROOT: c("packages"),
|
|
1179
1179
|
...this.configureDockerStorage("primary", S),
|
|
1180
1180
|
...this.configureDockerStorage("library", k)
|
|
1181
1181
|
};
|
|
1182
|
-
if (r != null && r.grpcAddr && (
|
|
1182
|
+
if (r != null && r.grpcAddr && (y.PL_GRPC_ADDR = r.grpcAddr), r != null && r.grpcPort && (y.PL_GRPC_PORT = r.grpcPort.toString()), r != null && r.monitoringAddr && (y.PL_MONITORING_ADDR = r.monitoringAddr), r != null && r.monitoringPort && (y.PL_MONITORING_PORT = r.monitoringPort.toString()), r != null && r.debugAddr && (y.PL_DEBUG_ADDR = r.debugAddr), r != null && r.debugPort && (y.PL_DEBUG_PORT = r.debugPort.toString()), r != null && r.auth && (r.auth.enabled && (y.PL_AUTH_ENABLED = "true"), r.auth.drivers)) {
|
|
1183
1183
|
for (const $ of r.auth.drivers)
|
|
1184
|
-
$.driver === "htpasswd" && (
|
|
1185
|
-
|
|
1184
|
+
$.driver === "htpasswd" && (y.PL_AUTH_HTPASSWD_PATH = m.resolve($.path), $.path = "/etc/platforma/users.htpasswd");
|
|
1185
|
+
y.PL_AUTH_DRIVERS = JSON.stringify(r.auth.drivers);
|
|
1186
1186
|
}
|
|
1187
|
-
return
|
|
1187
|
+
return u.setInstanceInfo(e, {
|
|
1188
1188
|
type: "docker",
|
|
1189
1189
|
upCommands: [{
|
|
1190
1190
|
cmd: "docker",
|
|
1191
|
-
args: ["compose", `--file=${
|
|
1192
|
-
envs:
|
|
1191
|
+
args: ["compose", `--file=${p}`, "up", "--detach", "--remove-orphans", "--pull=missing"],
|
|
1192
|
+
envs: y,
|
|
1193
1193
|
runOpts: { stdio: "inherit" }
|
|
1194
1194
|
}],
|
|
1195
1195
|
downCommands: [{
|
|
1196
1196
|
cmd: "docker",
|
|
1197
|
-
args: ["compose", `--file=${
|
|
1198
|
-
envs:
|
|
1197
|
+
args: ["compose", `--file=${p}`, "down"],
|
|
1198
|
+
envs: y,
|
|
1199
1199
|
runOpts: { stdio: "inherit" }
|
|
1200
1200
|
}],
|
|
1201
1201
|
cleanupCommands: [{
|
|
1202
1202
|
cmd: "docker",
|
|
1203
|
-
args: ["compose", `--file=${
|
|
1204
|
-
envs:
|
|
1203
|
+
args: ["compose", `--file=${p}`, "down", "--volumes", "--remove-orphans"],
|
|
1204
|
+
envs: y,
|
|
1205
1205
|
runOpts: { stdio: "inherit" }
|
|
1206
1206
|
}],
|
|
1207
1207
|
runInfo: {
|
|
@@ -1209,30 +1209,30 @@ ${r}`);
|
|
|
1209
1209
|
apiAddr: r == null ? void 0 : r.grpcAddr,
|
|
1210
1210
|
logPath: o,
|
|
1211
1211
|
primary: S,
|
|
1212
|
-
work: { type: "FS", rootPath:
|
|
1212
|
+
work: { type: "FS", rootPath: b },
|
|
1213
1213
|
library: k,
|
|
1214
|
-
dbPath:
|
|
1214
|
+
dbPath: d
|
|
1215
1215
|
}
|
|
1216
|
-
}),
|
|
1216
|
+
}), u.getInstanceInfo(e);
|
|
1217
1217
|
}
|
|
1218
1218
|
cleanupInstance(e) {
|
|
1219
1219
|
const t = [], r = /* @__PURE__ */ new Map();
|
|
1220
1220
|
let n = "";
|
|
1221
1221
|
if (e) {
|
|
1222
|
-
const s =
|
|
1222
|
+
const s = u.getInstanceInfo(e);
|
|
1223
1223
|
switch (r.set(e, s), s.type) {
|
|
1224
1224
|
case "docker": {
|
|
1225
|
-
t.push(`docker service 'pl-${e}', including all its volumes and data in '${
|
|
1225
|
+
t.push(`docker service 'pl-${e}', including all its volumes and data in '${u.instanceDir(e)}' will be destroyed`);
|
|
1226
1226
|
break;
|
|
1227
1227
|
}
|
|
1228
1228
|
case "process": {
|
|
1229
|
-
t.push(`directory '${
|
|
1229
|
+
t.push(`directory '${u.instanceDir(e)}' would be deleted`), s.downCommands && t.push("associated docker service, including all volumes and data will be destroyed");
|
|
1230
1230
|
break;
|
|
1231
1231
|
}
|
|
1232
1232
|
default:
|
|
1233
|
-
|
|
1233
|
+
G();
|
|
1234
1234
|
}
|
|
1235
|
-
e ===
|
|
1235
|
+
e === u.currentInstanceName && t.push(
|
|
1236
1236
|
"last command run cache ('pl-dev start' shorthand will stop working until next full start command call)"
|
|
1237
1237
|
), n = `
|
|
1238
1238
|
You are going to reset the state of platforma service '${e}':
|
|
@@ -1240,37 +1240,37 @@ You are going to reset the state of platforma service '${e}':
|
|
|
1240
1240
|
- `)}
|
|
1241
1241
|
`;
|
|
1242
1242
|
} else {
|
|
1243
|
-
for (const s of
|
|
1244
|
-
r.set(s,
|
|
1243
|
+
for (const s of u.instanceList)
|
|
1244
|
+
r.set(s, u.getInstanceInfo(s));
|
|
1245
1245
|
t.push(
|
|
1246
1246
|
"last command run cache ('pl-dev start' shorthand will stop working until next full start command call)",
|
|
1247
|
-
`all service configurations stored in: ${
|
|
1247
|
+
`all service configurations stored in: ${u.instanceDir()} (including all associated docker containers and volumes)`
|
|
1248
1248
|
), n = `
|
|
1249
1249
|
You are going to reset the state of all platforma services configured with pl-bootstrap package.
|
|
1250
1250
|
- ${t.join(`
|
|
1251
1251
|
- `)}
|
|
1252
1252
|
`;
|
|
1253
1253
|
}
|
|
1254
|
-
if (this.logger.warn(n), !
|
|
1254
|
+
if (this.logger.warn(n), !Cr("Are you sure?")) {
|
|
1255
1255
|
this.logger.info("Reset action was canceled");
|
|
1256
1256
|
return;
|
|
1257
1257
|
}
|
|
1258
1258
|
for (const [s, i] of r.entries()) {
|
|
1259
1259
|
if (i.cleanupCommands.length) {
|
|
1260
1260
|
this.logger.info(`Wiping instance ${s} services`);
|
|
1261
|
-
const c =
|
|
1262
|
-
|
|
1261
|
+
const c = Ie(this.logger, i.cleanupCommands);
|
|
1262
|
+
ue(c.executed, `failed to wipe instance ${s} services`);
|
|
1263
1263
|
}
|
|
1264
|
-
this.logger.info(`Destroying instance '${s}' data directory`),
|
|
1264
|
+
this.logger.info(`Destroying instance '${s}' data directory`), f.rmSync(u.instanceDir(s), { recursive: !0, force: !0 });
|
|
1265
1265
|
}
|
|
1266
|
-
e || (this.logger.info(`Destroying state dir '${
|
|
1266
|
+
e || (this.logger.info(`Destroying state dir '${u.path()}'`), f.rmSync(u.path(), { recursive: !0, force: !0 })), this.logger.info(
|
|
1267
1267
|
`
|
|
1268
|
-
If you want to remove all downloaded platforma binaries, delete '${
|
|
1268
|
+
If you want to remove all downloaded platforma binaries, delete '${u.binaries()}' dir manually
|
|
1269
1269
|
`
|
|
1270
1270
|
);
|
|
1271
1271
|
}
|
|
1272
1272
|
mergeLicenseEnvs(e) {
|
|
1273
|
-
e.license === void 0 && ((process.env.MI_LICENSE ?? "") != "" ? e.license = process.env.MI_LICENSE : (process.env.PL_LICENSE ?? "") != "" && (e.license = process.env.PL_LICENSE)), e["license-file"] === void 0 && e.license === void 0 && ((process.env.MI_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.MI_LICENSE_FILE : (process.env.PL_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.PL_LICENSE_FILE :
|
|
1273
|
+
e.license === void 0 && ((process.env.MI_LICENSE ?? "") != "" ? e.license = process.env.MI_LICENSE : (process.env.PL_LICENSE ?? "") != "" && (e.license = process.env.PL_LICENSE)), e["license-file"] === void 0 && e.license === void 0 && ((process.env.MI_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.MI_LICENSE_FILE : (process.env.PL_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.PL_LICENSE_FILE : f.existsSync(m.resolve(D.homedir(), ".pl.license")) && (e["license-file"] = m.resolve(D.homedir(), ".pl.license")));
|
|
1274
1274
|
}
|
|
1275
1275
|
initAuthDriversList(e, t) {
|
|
1276
1276
|
const r = [];
|
|
@@ -1288,9 +1288,9 @@ If you want to remove all downloaded platforma binaries, delete '${d.binaries()}
|
|
|
1288
1288
|
}
|
|
1289
1289
|
/** Gets the last stored JWT secret key or generates it and stores in a file. */
|
|
1290
1290
|
getLastJwt() {
|
|
1291
|
-
const e =
|
|
1291
|
+
const e = u.path("auth.jwt"), t = "utf-8";
|
|
1292
1292
|
let r = "";
|
|
1293
|
-
return
|
|
1293
|
+
return f.existsSync(e) && (r = f.readFileSync(e, { encoding: t })), r == "" && (r = Mr(64), f.writeFileSync(e, r, { encoding: t })), r;
|
|
1294
1294
|
}
|
|
1295
1295
|
checkLicense(e, t) {
|
|
1296
1296
|
if (!(e !== void 0 && e != "") && !(t !== void 0 && t != ""))
|
|
@@ -1314,13 +1314,13 @@ You can obtain the license from "https://licensing.milaboratories.com".`), new E
|
|
|
1314
1314
|
case "FS":
|
|
1315
1315
|
return r[`PL_DATA_${e}_TYPE`] = "FS", r;
|
|
1316
1316
|
default:
|
|
1317
|
-
|
|
1317
|
+
G();
|
|
1318
1318
|
}
|
|
1319
1319
|
return {};
|
|
1320
1320
|
}
|
|
1321
1321
|
renderRunInfo(e, t = 10) {
|
|
1322
1322
|
var c, o;
|
|
1323
|
-
const r = [], n = (
|
|
1323
|
+
const r = [], n = (d) => d.padStart(t, " ");
|
|
1324
1324
|
switch (e.configPath && r.push(`${n("config")}: ${e.configPath}`), e.apiAddr ? r.push(`${n("API")}: ${e.apiAddr}`) : e.apiPort ? r.push(`${n("API")}: 127.0.0.1:${e.apiPort.toString()}`) : r.push(`${n("API")}: 127.0.0.1:6345`), e.logPath && r.push(`${n("log")}: ${e.logPath}`), (c = e.primary) == null ? void 0 : c.type) {
|
|
1325
1325
|
case void 0:
|
|
1326
1326
|
break;
|
|
@@ -1333,7 +1333,7 @@ You can obtain the license from "https://licensing.milaboratories.com".`), new E
|
|
|
1333
1333
|
);
|
|
1334
1334
|
break;
|
|
1335
1335
|
default:
|
|
1336
|
-
|
|
1336
|
+
G();
|
|
1337
1337
|
}
|
|
1338
1338
|
switch ((o = e.library) == null ? void 0 : o.type) {
|
|
1339
1339
|
case void 0:
|
|
@@ -1347,21 +1347,21 @@ You can obtain the license from "https://licensing.milaboratories.com".`), new E
|
|
|
1347
1347
|
);
|
|
1348
1348
|
break;
|
|
1349
1349
|
default:
|
|
1350
|
-
|
|
1350
|
+
G();
|
|
1351
1351
|
}
|
|
1352
1352
|
return e.work && r.push(`${n("workdirs")}: ${e.work.rootPath}`), e.dbPath && r.push(`${n("db")}: ${e.dbPath}`), r.join(`
|
|
1353
1353
|
`);
|
|
1354
1354
|
}
|
|
1355
1355
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
1356
1356
|
readComposeFile(e) {
|
|
1357
|
-
const t =
|
|
1358
|
-
return
|
|
1357
|
+
const t = f.readFileSync(e);
|
|
1358
|
+
return fe.parse(t.toString());
|
|
1359
1359
|
}
|
|
1360
1360
|
writeComposeFile(e, t) {
|
|
1361
|
-
|
|
1361
|
+
f.writeFileSync(e, fe.stringify(t));
|
|
1362
1362
|
}
|
|
1363
1363
|
}
|
|
1364
|
-
function
|
|
1364
|
+
function ue(a, e) {
|
|
1365
1365
|
for (const t of a) {
|
|
1366
1366
|
if (t.error)
|
|
1367
1367
|
throw t.error;
|
|
@@ -1370,54 +1370,54 @@ function ge(a, e) {
|
|
|
1370
1370
|
throw new Error(`${r}, process exited with code '${t.status}'`);
|
|
1371
1371
|
}
|
|
1372
1372
|
}
|
|
1373
|
-
const
|
|
1373
|
+
const te = class te extends L {
|
|
1374
1374
|
async run() {
|
|
1375
|
-
const { flags: e } = await this.parse(
|
|
1375
|
+
const { flags: e } = await this.parse(te), t = A(e["log-level"]);
|
|
1376
1376
|
new O(t).cleanupInstance();
|
|
1377
1377
|
}
|
|
1378
1378
|
};
|
|
1379
|
-
l(
|
|
1380
|
-
...
|
|
1379
|
+
l(te, "description", "Clear service state (forget last run command, destroy docker services, volumes and so on)"), l(te, "examples", ["<%= config.bin %> <%= command.id %>"]), l(te, "flags", {
|
|
1380
|
+
...I
|
|
1381
1381
|
});
|
|
1382
|
-
let
|
|
1383
|
-
const
|
|
1382
|
+
let Fe = te;
|
|
1383
|
+
const ae = class ae extends L {
|
|
1384
1384
|
async run() {
|
|
1385
|
-
const { flags: e } = await this.parse(
|
|
1385
|
+
const { flags: e } = await this.parse(ae), t = A(e["log-level"]);
|
|
1386
1386
|
new O(t).startLast();
|
|
1387
1387
|
}
|
|
1388
1388
|
};
|
|
1389
|
-
l(
|
|
1390
|
-
...
|
|
1389
|
+
l(ae, "description", "Start last run service configuraiton"), l(ae, "examples", ["<%= config.bin %> <%= command.id %>"]), l(ae, "flags", {
|
|
1390
|
+
...I
|
|
1391
1391
|
});
|
|
1392
|
-
let
|
|
1393
|
-
const
|
|
1392
|
+
let xe = ae;
|
|
1393
|
+
const ne = class ne extends L {
|
|
1394
1394
|
async run() {
|
|
1395
|
-
const { flags: e } = await this.parse(
|
|
1396
|
-
|
|
1395
|
+
const { flags: e } = await this.parse(ne), t = A(e["log-level"]), r = new O(t);
|
|
1396
|
+
u.currentInstance ? r.stopInstance(u.currentInstance) : t.warn("up/start command was not called for any instance, nothing to stop");
|
|
1397
1397
|
}
|
|
1398
1398
|
};
|
|
1399
|
-
l(
|
|
1400
|
-
...
|
|
1399
|
+
l(ne, "description", "Stop platforma service"), l(ne, "examples", ["<%= config.bin %> <%= command.id %>"]), l(ne, "flags", {
|
|
1400
|
+
...I
|
|
1401
1401
|
});
|
|
1402
|
-
let
|
|
1402
|
+
let De = ne;
|
|
1403
1403
|
var E;
|
|
1404
|
-
let
|
|
1404
|
+
let lt = (E = class extends L {
|
|
1405
1405
|
async run() {
|
|
1406
|
-
const { flags: e } = await this.parse(E), t =
|
|
1406
|
+
const { flags: e } = await this.parse(E), t = A(e["log-level"]), r = new O(t);
|
|
1407
1407
|
r.mergeLicenseEnvs(e);
|
|
1408
1408
|
const n = "docker", s = e["auth-enabled"], i = s ? {
|
|
1409
1409
|
enabled: s,
|
|
1410
1410
|
drivers: r.initAuthDriversList(e, ".")
|
|
1411
|
-
} : void 0, c = e.storage ? m.join(".", e.storage) :
|
|
1411
|
+
} : void 0, c = e.storage ? m.join(".", e.storage) : u.instanceDir(n), o = [];
|
|
1412
1412
|
for (const h of e.mount ?? [])
|
|
1413
1413
|
o.push({ hostPath: h });
|
|
1414
|
-
const
|
|
1414
|
+
const d = e.arch ? `linux/${e.arch}` : void 0, g = r.createDocker(n, c, {
|
|
1415
1415
|
primaryStorageURL: e["storage-primary"],
|
|
1416
1416
|
workStoragePath: e["storage-work"],
|
|
1417
1417
|
libraryStorageURL: e["storage-library"],
|
|
1418
1418
|
image: e.image,
|
|
1419
1419
|
version: e.version,
|
|
1420
|
-
platformOverride:
|
|
1420
|
+
platformOverride: d,
|
|
1421
1421
|
customMounts: o,
|
|
1422
1422
|
license: e.license,
|
|
1423
1423
|
licenseFile: e["license-file"],
|
|
@@ -1432,34 +1432,34 @@ let ot = (E = class extends L {
|
|
|
1432
1432
|
r.switchInstance(g);
|
|
1433
1433
|
}
|
|
1434
1434
|
}, l(E, "description", "Run platforma backend service with 'FS' primary storage type"), l(E, "examples", ["<%= config.bin %> <%= command.id %>"]), l(E, "flags", {
|
|
1435
|
-
...
|
|
1436
|
-
...
|
|
1437
|
-
...me,
|
|
1438
|
-
...W,
|
|
1435
|
+
...I,
|
|
1436
|
+
...j,
|
|
1439
1437
|
...he,
|
|
1440
|
-
...
|
|
1441
|
-
...
|
|
1438
|
+
...C,
|
|
1439
|
+
...pe,
|
|
1440
|
+
...U,
|
|
1441
|
+
...M,
|
|
1442
1442
|
...ye,
|
|
1443
|
-
...
|
|
1444
|
-
...oe,
|
|
1443
|
+
...B,
|
|
1445
1444
|
...ce,
|
|
1446
|
-
...
|
|
1445
|
+
...ie,
|
|
1446
|
+
...oe
|
|
1447
1447
|
}), E);
|
|
1448
|
-
var
|
|
1449
|
-
let
|
|
1448
|
+
var T;
|
|
1449
|
+
let dt = (T = class extends L {
|
|
1450
1450
|
async run() {
|
|
1451
|
-
const { flags: e } = await this.parse(
|
|
1451
|
+
const { flags: e } = await this.parse(T), t = A(e["log-level"]), r = new O(t);
|
|
1452
1452
|
r.mergeLicenseEnvs(e);
|
|
1453
|
-
const n = "local", s = e["pl-workdir"] ?? ".", i = e.storage ? m.join(s, e.storage) :
|
|
1453
|
+
const n = "local", s = e["pl-workdir"] ?? ".", i = e.storage ? m.join(s, e.storage) : u.instanceDir(n), c = e["pl-log-file"] ? m.join(s, e["pl-log-file"]) : void 0, o = r.initAuthDriversList(e, s), d = e["auth-enabled"] ?? o !== void 0;
|
|
1454
1454
|
let g = e["pl-binary"];
|
|
1455
1455
|
e["pl-sources"] && (g = r.buildPlatforma({ repoRoot: e["pl-sources"] }));
|
|
1456
1456
|
let h = "127.0.0.1:6345";
|
|
1457
1457
|
e["grpc-listen"] ? h = e["grpc-listen"] : e["grpc-port"] && (h = `127.0.0.1:${e["grpc-port"]}`);
|
|
1458
|
-
let
|
|
1459
|
-
e["monitoring-listen"] ?
|
|
1460
|
-
let
|
|
1461
|
-
e["debug-listen"] ?
|
|
1462
|
-
const
|
|
1458
|
+
let b = "127.0.0.1:9090";
|
|
1459
|
+
e["monitoring-listen"] ? b = e["monitoring-listen"] : e["monitoring-port"] && (b = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1460
|
+
let w = "127.0.0.1:9091";
|
|
1461
|
+
e["debug-listen"] ? w = e["debug-listen"] : e["debug-port"] && (w = `127.0.0.1:${e["debug-port"]}`);
|
|
1462
|
+
const p = {
|
|
1463
1463
|
binaryPath: g,
|
|
1464
1464
|
version: e.version,
|
|
1465
1465
|
configPath: e.config,
|
|
@@ -1468,18 +1468,18 @@ let lt = (R = class extends L {
|
|
|
1468
1468
|
libraryURL: e["storage-library"],
|
|
1469
1469
|
configOptions: {
|
|
1470
1470
|
grpc: { listen: h },
|
|
1471
|
-
monitoring: { listen:
|
|
1472
|
-
debug: { listen:
|
|
1471
|
+
monitoring: { listen: b },
|
|
1472
|
+
debug: { listen: w },
|
|
1473
1473
|
license: { value: e.license, file: e["license-file"] },
|
|
1474
1474
|
log: { path: c },
|
|
1475
1475
|
localRoot: i,
|
|
1476
|
-
core: { auth: { enabled:
|
|
1476
|
+
core: { auth: { enabled: d, drivers: o } },
|
|
1477
1477
|
storages: {
|
|
1478
1478
|
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1479
1479
|
}
|
|
1480
1480
|
}
|
|
1481
|
-
}, P = r.createLocal(n,
|
|
1482
|
-
|
|
1481
|
+
}, P = r.createLocal(n, p);
|
|
1482
|
+
p.binaryPath ? r.switchInstance(P) : Se(t, { version: e.version }).then(() => {
|
|
1483
1483
|
const S = r.switchInstance(P);
|
|
1484
1484
|
setTimeout(() => {
|
|
1485
1485
|
for (const k of S)
|
|
@@ -1489,96 +1489,97 @@ let lt = (R = class extends L {
|
|
|
1489
1489
|
t.error(S.message);
|
|
1490
1490
|
});
|
|
1491
1491
|
}
|
|
1492
|
-
}, l(
|
|
1493
|
-
...
|
|
1494
|
-
...
|
|
1495
|
-
...
|
|
1496
|
-
...
|
|
1497
|
-
...
|
|
1498
|
-
...
|
|
1499
|
-
...
|
|
1500
|
-
...
|
|
1501
|
-
...oe,
|
|
1492
|
+
}, l(T, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(T, "examples", ["<%= config.bin %> <%= command.id %>"]), l(T, "flags", {
|
|
1493
|
+
...I,
|
|
1494
|
+
...C,
|
|
1495
|
+
...j,
|
|
1496
|
+
...ve,
|
|
1497
|
+
...Pe,
|
|
1498
|
+
...ke,
|
|
1499
|
+
...M,
|
|
1500
|
+
...B,
|
|
1502
1501
|
...ce,
|
|
1503
|
-
...
|
|
1504
|
-
...
|
|
1505
|
-
...
|
|
1506
|
-
...
|
|
1507
|
-
|
|
1508
|
-
|
|
1502
|
+
...ie,
|
|
1503
|
+
...oe,
|
|
1504
|
+
...be,
|
|
1505
|
+
...we,
|
|
1506
|
+
...U
|
|
1507
|
+
}), T);
|
|
1508
|
+
const H = class H extends L {
|
|
1509
1509
|
async run() {
|
|
1510
|
-
const { flags: e, args: t } = await this.parse(
|
|
1510
|
+
const { flags: e, args: t } = await this.parse(H), r = A(e["log-level"]), n = new O(r), s = t.name;
|
|
1511
1511
|
e.all && (n.cleanupInstance(), process.exit(0)), s || (r.error("Please, specify name of instance to be removed or set '--all' flag instead"), process.exit(1)), n.cleanupInstance(s);
|
|
1512
1512
|
}
|
|
1513
1513
|
};
|
|
1514
|
-
l(
|
|
1515
|
-
...
|
|
1516
|
-
all:
|
|
1514
|
+
l(H, "description", "List available instances"), l(H, "examples", ["<%= config.bin %> <%= command.id %>"]), l(H, "flags", {
|
|
1515
|
+
...I,
|
|
1516
|
+
all: v.boolean({
|
|
1517
1517
|
description: "remove all known instances",
|
|
1518
1518
|
required: !1
|
|
1519
1519
|
})
|
|
1520
|
-
}), l(
|
|
1521
|
-
name:
|
|
1520
|
+
}), l(H, "args", {
|
|
1521
|
+
name: K.string({ required: !1 })
|
|
1522
1522
|
});
|
|
1523
|
-
let
|
|
1524
|
-
const
|
|
1523
|
+
let Ne = H;
|
|
1524
|
+
const W = class W extends L {
|
|
1525
1525
|
async run() {
|
|
1526
|
-
const { flags: e, args: t } = await this.parse(
|
|
1527
|
-
s || (r.info("no pl service instance selected. No service was stopped"), process.exit(0)), n.stopInstance(
|
|
1526
|
+
const { flags: e, args: t } = await this.parse(W), r = A(e["log-level"]), n = new O(r), s = t.name ?? u.currentInstanceName;
|
|
1527
|
+
s || (r.info("no pl service instance selected. No service was stopped"), process.exit(0)), n.stopInstance(u.getInstanceInfo(s));
|
|
1528
1528
|
}
|
|
1529
1529
|
};
|
|
1530
|
-
l(
|
|
1531
|
-
...
|
|
1532
|
-
}), l(
|
|
1533
|
-
name:
|
|
1530
|
+
l(W, "description", "List available instances"), l(W, "examples", ["<%= config.bin %> <%= command.id %>"]), l(W, "flags", {
|
|
1531
|
+
...I
|
|
1532
|
+
}), l(W, "args", {
|
|
1533
|
+
name: K.string({ required: !1 })
|
|
1534
1534
|
});
|
|
1535
|
-
let
|
|
1536
|
-
const
|
|
1535
|
+
let _e = W;
|
|
1536
|
+
const se = class se extends L {
|
|
1537
1537
|
async run() {
|
|
1538
|
-
await this.parse(
|
|
1539
|
-
const e =
|
|
1538
|
+
await this.parse(se);
|
|
1539
|
+
const e = u.instanceList, t = u.currentInstanceName;
|
|
1540
1540
|
for (const r of e) {
|
|
1541
|
-
const n = [], s =
|
|
1542
|
-
|
|
1541
|
+
const n = [], s = u.getInstanceInfo(r);
|
|
1542
|
+
u.isInstanceActive(s) && n.push("status:up"), n.push(`type:${s.type}`), console.log(r === t ? ` * ${r} (${n.join(", ")})` : ` ${r} (${n.join(", ")})`);
|
|
1543
1543
|
}
|
|
1544
1544
|
}
|
|
1545
1545
|
};
|
|
1546
|
-
l(
|
|
1547
|
-
let
|
|
1548
|
-
const
|
|
1546
|
+
l(se, "description", "List available instances"), l(se, "examples", ["<%= config.bin %> <%= command.id %>"]), l(se, "flags", {});
|
|
1547
|
+
let Ce = se;
|
|
1548
|
+
const q = class q extends L {
|
|
1549
1549
|
async run() {
|
|
1550
|
-
const { flags: e, args: t } = await this.parse(
|
|
1550
|
+
const { flags: e, args: t } = await this.parse(q), r = A(e["log-level"]), n = new O(r), s = t.name ?? u.currentInstanceName;
|
|
1551
1551
|
s || (r.error("no pl service instance is selected. Select instance with 'select' command or provide name to 'up'"), process.exit(1));
|
|
1552
|
-
const i = n.switchInstance(
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1552
|
+
const i = n.switchInstance(u.getInstanceInfo(s)), c = [];
|
|
1553
|
+
for (const o of i)
|
|
1554
|
+
c.push(new Promise((d, g) => {
|
|
1555
|
+
o.on("close", d), o.on("error", g);
|
|
1556
|
+
}));
|
|
1557
|
+
await Promise.all(c);
|
|
1557
1558
|
}
|
|
1558
1559
|
};
|
|
1559
|
-
l(
|
|
1560
|
-
...
|
|
1561
|
-
}), l(
|
|
1562
|
-
name:
|
|
1560
|
+
l(q, "description", "List available instances"), l(q, "examples", ["<%= config.bin %> <%= command.id %>"]), l(q, "flags", {
|
|
1561
|
+
...I
|
|
1562
|
+
}), l(q, "args", {
|
|
1563
|
+
name: K.string({ required: !1 })
|
|
1563
1564
|
});
|
|
1564
|
-
let
|
|
1565
|
-
var
|
|
1566
|
-
let
|
|
1565
|
+
let Me = q;
|
|
1566
|
+
var F;
|
|
1567
|
+
let gt = (F = class extends L {
|
|
1567
1568
|
async run() {
|
|
1568
|
-
const { flags: e } = await this.parse(
|
|
1569
|
+
const { flags: e } = await this.parse(F), t = A(e["log-level"]), r = new O(t);
|
|
1569
1570
|
r.mergeLicenseEnvs(e);
|
|
1570
1571
|
const n = "docker-s3", s = e["auth-enabled"], i = s ? {
|
|
1571
1572
|
enabled: s,
|
|
1572
1573
|
drivers: r.initAuthDriversList(e, ".")
|
|
1573
|
-
} : void 0, c = e.storage ? m.join(".", e.storage) :
|
|
1574
|
-
for (const
|
|
1575
|
-
o.push({ hostPath:
|
|
1576
|
-
const
|
|
1574
|
+
} : void 0, c = e.storage ? m.join(".", e.storage) : u.instanceDir(n), o = [];
|
|
1575
|
+
for (const b of e.mount ?? [])
|
|
1576
|
+
o.push({ hostPath: b });
|
|
1577
|
+
const d = e.arch ? `linux/${e.arch}` : void 0, g = e["minio-presign-host"] ? "minio" : "localhost", h = r.createDockerS3(n, c, {
|
|
1577
1578
|
image: e.image,
|
|
1578
1579
|
version: e.version,
|
|
1579
1580
|
license: e.license,
|
|
1580
1581
|
licenseFile: e["license-file"],
|
|
1581
|
-
platformOverride:
|
|
1582
|
+
platformOverride: d,
|
|
1582
1583
|
customMounts: o,
|
|
1583
1584
|
auth: i,
|
|
1584
1585
|
grpcAddr: e["grpc-listen"],
|
|
@@ -1591,33 +1592,33 @@ let dt = (T = class extends L {
|
|
|
1591
1592
|
});
|
|
1592
1593
|
r.switchInstance(h);
|
|
1593
1594
|
}
|
|
1594
|
-
}, l(
|
|
1595
|
-
...
|
|
1596
|
-
...
|
|
1597
|
-
...me,
|
|
1598
|
-
...W,
|
|
1595
|
+
}, l(F, "description", "Run platforma backend service with 'S3' primary storage type"), l(F, "examples", ["<%= config.bin %> <%= command.id %>"]), l(F, "flags", {
|
|
1596
|
+
...I,
|
|
1597
|
+
...j,
|
|
1599
1598
|
...he,
|
|
1600
|
-
...
|
|
1601
|
-
...
|
|
1599
|
+
...C,
|
|
1600
|
+
...pe,
|
|
1601
|
+
...U,
|
|
1602
|
+
...M,
|
|
1602
1603
|
...ye,
|
|
1603
|
-
...
|
|
1604
|
-
...
|
|
1605
|
-
}),
|
|
1606
|
-
var
|
|
1607
|
-
let
|
|
1604
|
+
...B,
|
|
1605
|
+
...pr
|
|
1606
|
+
}), F);
|
|
1607
|
+
var x;
|
|
1608
|
+
let ut = (x = class extends L {
|
|
1608
1609
|
async run() {
|
|
1609
|
-
const { flags: e } = await this.parse(
|
|
1610
|
+
const { flags: e } = await this.parse(x), t = A(e["log-level"]), r = new O(t);
|
|
1610
1611
|
r.mergeLicenseEnvs(e);
|
|
1611
|
-
const n = "local-s3", s = e["pl-workdir"] ?? ".", i = e.storage ? m.join(s, e.storage) :
|
|
1612
|
+
const n = "local-s3", s = e["pl-workdir"] ?? ".", i = e.storage ? m.join(s, e.storage) : u.instanceDir(n), c = e["pl-log-file"] ? m.join(s, e["pl-log-file"]) : void 0, o = r.initAuthDriversList(e, s), d = e["auth-enabled"] ?? o !== void 0;
|
|
1612
1613
|
let g = e["pl-binary"];
|
|
1613
1614
|
e["pl-sources"] && (g = r.buildPlatforma({ repoRoot: e["pl-sources"] }));
|
|
1614
1615
|
let h = "127.0.0.1:6345";
|
|
1615
1616
|
e["grpc-listen"] ? h = e["grpc-listen"] : e["grpc-port"] && (h = `127.0.0.1:${e["grpc-port"]}`);
|
|
1616
|
-
let
|
|
1617
|
-
e["monitoring-listen"] ?
|
|
1618
|
-
let
|
|
1619
|
-
e["debug-listen"] ?
|
|
1620
|
-
const
|
|
1617
|
+
let b = "127.0.0.1:9090";
|
|
1618
|
+
e["monitoring-listen"] ? b = e["monitoring-listen"] : e["monitoring-port"] && (b = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1619
|
+
let w = "127.0.0.1:9091";
|
|
1620
|
+
e["debug-listen"] ? w = e["debug-listen"] : e["debug-port"] && (w = `127.0.0.1:${e["debug-port"]}`);
|
|
1621
|
+
const p = {
|
|
1621
1622
|
binaryPath: g,
|
|
1622
1623
|
version: e.version,
|
|
1623
1624
|
configPath: e.config,
|
|
@@ -1628,20 +1629,20 @@ let gt = (F = class extends L {
|
|
|
1628
1629
|
minioConsolePort: e["s3-console-port"],
|
|
1629
1630
|
configOptions: {
|
|
1630
1631
|
grpc: { listen: h },
|
|
1631
|
-
monitoring: { listen:
|
|
1632
|
-
debug: { listen:
|
|
1632
|
+
monitoring: { listen: b },
|
|
1633
|
+
debug: { listen: w },
|
|
1633
1634
|
license: { value: e.license, file: e["license-file"] },
|
|
1634
1635
|
log: { path: c },
|
|
1635
1636
|
localRoot: i,
|
|
1636
1637
|
core: {
|
|
1637
|
-
auth: { enabled:
|
|
1638
|
+
auth: { enabled: d, drivers: o }
|
|
1638
1639
|
},
|
|
1639
1640
|
storages: {
|
|
1640
1641
|
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1641
1642
|
}
|
|
1642
1643
|
}
|
|
1643
|
-
}, P = r.createLocalS3(n,
|
|
1644
|
-
|
|
1644
|
+
}, P = r.createLocalS3(n, p);
|
|
1645
|
+
p.binaryPath ? r.switchInstance(P) : Se(t, { version: e.version }).then(() => {
|
|
1645
1646
|
const S = r.switchInstance(P);
|
|
1646
1647
|
setTimeout(() => {
|
|
1647
1648
|
for (const k of S)
|
|
@@ -1651,33 +1652,33 @@ let gt = (F = class extends L {
|
|
|
1651
1652
|
t.error(S.message);
|
|
1652
1653
|
});
|
|
1653
1654
|
}
|
|
1654
|
-
}, l(
|
|
1655
|
-
...
|
|
1656
|
-
...
|
|
1657
|
-
...
|
|
1658
|
-
...
|
|
1659
|
-
...
|
|
1660
|
-
...
|
|
1661
|
-
...
|
|
1662
|
-
...
|
|
1663
|
-
...
|
|
1664
|
-
...oe,
|
|
1655
|
+
}, l(x, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(x, "examples", ["<%= config.bin %> <%= command.id %>"]), l(x, "flags", {
|
|
1656
|
+
...I,
|
|
1657
|
+
...C,
|
|
1658
|
+
...j,
|
|
1659
|
+
...hr,
|
|
1660
|
+
...ve,
|
|
1661
|
+
...Pe,
|
|
1662
|
+
...ke,
|
|
1663
|
+
...M,
|
|
1664
|
+
...B,
|
|
1665
1665
|
...ce,
|
|
1666
|
-
...
|
|
1667
|
-
...
|
|
1668
|
-
...
|
|
1669
|
-
...
|
|
1670
|
-
|
|
1671
|
-
|
|
1666
|
+
...ie,
|
|
1667
|
+
...oe,
|
|
1668
|
+
...be,
|
|
1669
|
+
...we,
|
|
1670
|
+
...U
|
|
1671
|
+
}), x);
|
|
1672
|
+
const z = class z extends L {
|
|
1672
1673
|
async run() {
|
|
1673
|
-
const { flags: e, args: t } = await this.parse(
|
|
1674
|
+
const { flags: e, args: t } = await this.parse(z), r = A(e["log-level"]), n = new O(r);
|
|
1674
1675
|
n.mergeLicenseEnvs(e);
|
|
1675
1676
|
const s = t.name, i = e["auth-enabled"], c = i ? {
|
|
1676
1677
|
enabled: i,
|
|
1677
1678
|
drivers: n.initAuthDriversList(e, ".")
|
|
1678
|
-
} : void 0, o = e.storage ? m.join(".", e.storage) :
|
|
1679
|
+
} : void 0, o = e.storage ? m.join(".", e.storage) : u.instanceDir(s), d = [];
|
|
1679
1680
|
for (const h of e.mount ?? [])
|
|
1680
|
-
|
|
1681
|
+
d.push({ hostPath: h });
|
|
1681
1682
|
const g = e.arch ? `linux/${e.arch}` : void 0;
|
|
1682
1683
|
n.createDocker(s, o, {
|
|
1683
1684
|
primaryStorageURL: e["storage-primary"],
|
|
@@ -1686,7 +1687,7 @@ const G = class G extends L {
|
|
|
1686
1687
|
image: e.image,
|
|
1687
1688
|
version: e.version,
|
|
1688
1689
|
platformOverride: g,
|
|
1689
|
-
customMounts:
|
|
1690
|
+
customMounts: d,
|
|
1690
1691
|
license: e.license,
|
|
1691
1692
|
licenseFile: e["license-file"],
|
|
1692
1693
|
auth: c,
|
|
@@ -1699,36 +1700,37 @@ const G = class G extends L {
|
|
|
1699
1700
|
}), r.info(`Instance '${s}' was created. To start it run 'up' command`);
|
|
1700
1701
|
}
|
|
1701
1702
|
};
|
|
1702
|
-
l(
|
|
1703
|
-
...
|
|
1704
|
-
...
|
|
1705
|
-
...me,
|
|
1706
|
-
...W,
|
|
1703
|
+
l(z, "description", "Run Platforma Backend service as docker container on current host"), l(z, "examples", ["<%= config.bin %> <%= command.id %>"]), l(z, "flags", {
|
|
1704
|
+
...I,
|
|
1705
|
+
...j,
|
|
1707
1706
|
...he,
|
|
1708
|
-
...
|
|
1709
|
-
...
|
|
1707
|
+
...C,
|
|
1708
|
+
...pe,
|
|
1709
|
+
...U,
|
|
1710
|
+
...M,
|
|
1710
1711
|
...ye,
|
|
1711
|
-
...
|
|
1712
|
-
...oe,
|
|
1712
|
+
...B,
|
|
1713
1713
|
...ce,
|
|
1714
|
-
...
|
|
1715
|
-
|
|
1716
|
-
|
|
1714
|
+
...ie,
|
|
1715
|
+
...oe
|
|
1716
|
+
}), l(z, "args", {
|
|
1717
|
+
name: K.string({ required: !0 })
|
|
1717
1718
|
});
|
|
1718
|
-
let
|
|
1719
|
-
|
|
1719
|
+
let je = z;
|
|
1720
|
+
var R;
|
|
1721
|
+
let ft = (R = class extends L {
|
|
1720
1722
|
async run() {
|
|
1721
|
-
const { flags: e, args: t } = await this.parse(
|
|
1723
|
+
const { flags: e, args: t } = await this.parse(R), r = A(e["log-level"]), n = new O(r);
|
|
1722
1724
|
n.mergeLicenseEnvs(e);
|
|
1723
|
-
const s = t.name, i = e["pl-workdir"] ?? ".", c = e.storage ? m.join(i, e.storage) :
|
|
1725
|
+
const s = t.name, i = e["pl-workdir"] ?? ".", c = e.storage ? m.join(i, e.storage) : u.instanceDir(s), o = e["pl-log-file"] ? m.join(i, e["pl-log-file"]) : void 0, d = n.initAuthDriversList(e, i), g = e["auth-enabled"] ?? d !== void 0;
|
|
1724
1726
|
let h = e["pl-binary"];
|
|
1725
1727
|
e["pl-sources"] && (h = n.buildPlatforma({ repoRoot: e["pl-sources"] }));
|
|
1726
|
-
let
|
|
1727
|
-
e["grpc-listen"] ?
|
|
1728
|
-
let
|
|
1729
|
-
e["monitoring-listen"] ?
|
|
1730
|
-
let
|
|
1731
|
-
e["debug-listen"] ?
|
|
1728
|
+
let b = "127.0.0.1:6345";
|
|
1729
|
+
e["grpc-listen"] ? b = e["grpc-listen"] : e["grpc-port"] && (b = `127.0.0.1:${e["grpc-port"]}`);
|
|
1730
|
+
let w = "127.0.0.1:9090";
|
|
1731
|
+
e["monitoring-listen"] ? w = e["monitoring-listen"] : e["monitoring-port"] && (w = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1732
|
+
let p = "127.0.0.1:9091";
|
|
1733
|
+
e["debug-listen"] ? p = e["debug-listen"] : e["debug-port"] && (p = `127.0.0.1:${e["debug-port"]}`);
|
|
1732
1734
|
const P = {
|
|
1733
1735
|
binaryPath: h,
|
|
1734
1736
|
version: e.version,
|
|
@@ -1737,72 +1739,54 @@ const U = class U extends L {
|
|
|
1737
1739
|
primaryURL: e["storage-primary"],
|
|
1738
1740
|
libraryURL: e["storage-library"],
|
|
1739
1741
|
configOptions: {
|
|
1740
|
-
grpc: { listen:
|
|
1741
|
-
monitoring: { listen:
|
|
1742
|
-
debug: { listen:
|
|
1742
|
+
grpc: { listen: b },
|
|
1743
|
+
monitoring: { listen: w },
|
|
1744
|
+
debug: { listen: p },
|
|
1743
1745
|
license: { value: e.license, file: e["license-file"] },
|
|
1744
1746
|
log: { path: o },
|
|
1745
1747
|
localRoot: c,
|
|
1746
|
-
core: { auth: { enabled: g, drivers:
|
|
1748
|
+
core: { auth: { enabled: g, drivers: d } },
|
|
1747
1749
|
storages: {
|
|
1748
1750
|
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1749
1751
|
}
|
|
1750
1752
|
}
|
|
1751
1753
|
};
|
|
1752
|
-
|
|
1753
|
-
case "s3": {
|
|
1754
|
-
r.info("Creating instance configuration, data directory and other stuff..."), n.createLocalS3(s, {
|
|
1755
|
-
...P,
|
|
1756
|
-
minioPort: e["s3-port"],
|
|
1757
|
-
minioConsolePort: e["s3-console-port"]
|
|
1758
|
-
});
|
|
1759
|
-
break;
|
|
1760
|
-
}
|
|
1761
|
-
case void 0: {
|
|
1762
|
-
e["s3-port"] && r.warn("flag 's3-port' is only for 's3' mode"), e["s3-console-port"] && r.warn("flag 's3-console-port' is only for 's3' mode"), n.createLocal(s, P);
|
|
1763
|
-
break;
|
|
1764
|
-
}
|
|
1765
|
-
}
|
|
1766
|
-
if (P.binaryPath) {
|
|
1754
|
+
if (n.createLocal(s, P), P.binaryPath) {
|
|
1767
1755
|
r.info(`Instance '${s}' was created. To start it run 'up' command`);
|
|
1768
1756
|
return;
|
|
1769
1757
|
}
|
|
1770
|
-
|
|
1771
|
-
r.error(
|
|
1758
|
+
Se(r, { version: e.version }).then(() => r.info(`Instance '${s}' was created. To start it run 'pl up' command`)).catch(function(S) {
|
|
1759
|
+
r.error(S.message);
|
|
1772
1760
|
});
|
|
1773
1761
|
}
|
|
1774
|
-
}
|
|
1775
|
-
|
|
1776
|
-
...
|
|
1777
|
-
...
|
|
1778
|
-
...
|
|
1779
|
-
...
|
|
1780
|
-
...
|
|
1781
|
-
...
|
|
1782
|
-
...
|
|
1783
|
-
...q,
|
|
1784
|
-
...Y,
|
|
1785
|
-
...oe,
|
|
1762
|
+
}, l(R, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(R, "examples", ["<%= config.bin %> <%= command.id %>"]), l(R, "flags", {
|
|
1763
|
+
...I,
|
|
1764
|
+
...C,
|
|
1765
|
+
...j,
|
|
1766
|
+
...ve,
|
|
1767
|
+
...Pe,
|
|
1768
|
+
...ke,
|
|
1769
|
+
...M,
|
|
1770
|
+
...B,
|
|
1786
1771
|
...ce,
|
|
1787
|
-
...
|
|
1788
|
-
...
|
|
1789
|
-
...
|
|
1790
|
-
...
|
|
1791
|
-
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
});
|
|
1795
|
-
|
|
1796
|
-
const J = class J extends L {
|
|
1772
|
+
...ie,
|
|
1773
|
+
...oe,
|
|
1774
|
+
...be,
|
|
1775
|
+
...we,
|
|
1776
|
+
...U
|
|
1777
|
+
}), l(R, "args", {
|
|
1778
|
+
name: K.string({ required: !0 })
|
|
1779
|
+
}), R);
|
|
1780
|
+
const Y = class Y extends L {
|
|
1797
1781
|
async run() {
|
|
1798
|
-
const { flags: e, args: t } = await this.parse(
|
|
1782
|
+
const { flags: e, args: t } = await this.parse(Y), r = A(e["log-level"]), n = new O(r);
|
|
1799
1783
|
n.mergeLicenseEnvs(e);
|
|
1800
1784
|
const s = t.name, i = e["auth-enabled"], c = i ? {
|
|
1801
1785
|
enabled: i,
|
|
1802
1786
|
drivers: n.initAuthDriversList(e, ".")
|
|
1803
|
-
} : void 0, o = e.storage ? m.join(".", e.storage) :
|
|
1804
|
-
for (const
|
|
1805
|
-
|
|
1787
|
+
} : void 0, o = e.storage ? m.join(".", e.storage) : u.instanceDir(s), d = [];
|
|
1788
|
+
for (const b of e.mount ?? [])
|
|
1789
|
+
d.push({ hostPath: b });
|
|
1806
1790
|
const g = e.arch ? `linux/${e.arch}` : void 0, h = e["minio-presign-host"] ? "minio" : "localhost";
|
|
1807
1791
|
n.createDockerS3(s, o, {
|
|
1808
1792
|
image: e.image,
|
|
@@ -1810,7 +1794,7 @@ const J = class J extends L {
|
|
|
1810
1794
|
license: e.license,
|
|
1811
1795
|
licenseFile: e["license-file"],
|
|
1812
1796
|
platformOverride: g,
|
|
1813
|
-
customMounts:
|
|
1797
|
+
customMounts: d,
|
|
1814
1798
|
auth: c,
|
|
1815
1799
|
grpcAddr: e["grpc-listen"],
|
|
1816
1800
|
grpcPort: e["grpc-port"],
|
|
@@ -1822,39 +1806,104 @@ const J = class J extends L {
|
|
|
1822
1806
|
}), r.info(`Instance '${s}' was created. To start it run 'up' command`), e["minio-presign-host"] && r.info(" NOTE: make sure you have 'minio' host in your hosts file as 127.0.0.1 address");
|
|
1823
1807
|
}
|
|
1824
1808
|
};
|
|
1825
|
-
l(
|
|
1826
|
-
...
|
|
1827
|
-
...
|
|
1828
|
-
...me,
|
|
1829
|
-
...W,
|
|
1809
|
+
l(Y, "description", "Run Platforma Backend service as docker container on current host with MinIO as local S3 storage"), l(Y, "examples", ["<%= config.bin %> <%= command.id %>"]), l(Y, "flags", {
|
|
1810
|
+
...I,
|
|
1811
|
+
...j,
|
|
1830
1812
|
...he,
|
|
1831
|
-
...
|
|
1832
|
-
...
|
|
1813
|
+
...C,
|
|
1814
|
+
...pe,
|
|
1815
|
+
...U,
|
|
1816
|
+
...M,
|
|
1833
1817
|
...ye,
|
|
1834
|
-
...
|
|
1835
|
-
...
|
|
1836
|
-
}), l(
|
|
1837
|
-
name:
|
|
1818
|
+
...B,
|
|
1819
|
+
...pr
|
|
1820
|
+
}), l(Y, "args", {
|
|
1821
|
+
name: K.string({ required: !0 })
|
|
1822
|
+
});
|
|
1823
|
+
let Be = Y;
|
|
1824
|
+
const V = class V extends L {
|
|
1825
|
+
async run() {
|
|
1826
|
+
const { flags: e, args: t } = await this.parse(V), r = A(e["log-level"]), n = new O(r);
|
|
1827
|
+
n.mergeLicenseEnvs(e);
|
|
1828
|
+
const s = t.name, i = e["pl-workdir"] ?? ".", c = e.storage ? m.join(i, e.storage) : u.instanceDir(s), o = e["pl-log-file"] ? m.join(i, e["pl-log-file"]) : void 0, d = n.initAuthDriversList(e, i), g = e["auth-enabled"] ?? d !== void 0;
|
|
1829
|
+
let h = e["pl-binary"];
|
|
1830
|
+
e["pl-sources"] && (h = n.buildPlatforma({ repoRoot: e["pl-sources"] }));
|
|
1831
|
+
let b = "127.0.0.1:6345";
|
|
1832
|
+
e["grpc-listen"] ? b = e["grpc-listen"] : e["grpc-port"] && (b = `127.0.0.1:${e["grpc-port"]}`);
|
|
1833
|
+
let w = "127.0.0.1:9090";
|
|
1834
|
+
e["monitoring-listen"] ? w = e["monitoring-listen"] : e["monitoring-port"] && (w = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1835
|
+
let p = "127.0.0.1:9091";
|
|
1836
|
+
e["debug-listen"] ? p = e["debug-listen"] : e["debug-port"] && (p = `127.0.0.1:${e["debug-port"]}`);
|
|
1837
|
+
const P = {
|
|
1838
|
+
binaryPath: h,
|
|
1839
|
+
version: e.version,
|
|
1840
|
+
configPath: e.config,
|
|
1841
|
+
workdir: e["pl-workdir"],
|
|
1842
|
+
primaryURL: e["storage-primary"],
|
|
1843
|
+
libraryURL: e["storage-library"],
|
|
1844
|
+
minioPort: e["s3-port"],
|
|
1845
|
+
minioConsolePort: e["s3-console-port"],
|
|
1846
|
+
configOptions: {
|
|
1847
|
+
grpc: { listen: b },
|
|
1848
|
+
monitoring: { listen: w },
|
|
1849
|
+
debug: { listen: p },
|
|
1850
|
+
license: { value: e.license, file: e["license-file"] },
|
|
1851
|
+
log: { path: o },
|
|
1852
|
+
localRoot: c,
|
|
1853
|
+
core: { auth: { enabled: g, drivers: d } },
|
|
1854
|
+
storages: {
|
|
1855
|
+
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1856
|
+
}
|
|
1857
|
+
}
|
|
1858
|
+
};
|
|
1859
|
+
if (r.info("Creating instance configuration, data directory and other stuff..."), n.createLocalS3(s, P), P.binaryPath) {
|
|
1860
|
+
r.info(`Instance '${s}' was created. To start it run 'up' command`);
|
|
1861
|
+
return;
|
|
1862
|
+
}
|
|
1863
|
+
Se(r, { version: e.version }).then(() => r.info(`Instance '${s}' was created. To start it run 'pl up' command`)).catch(function(S) {
|
|
1864
|
+
r.error(S.message);
|
|
1865
|
+
});
|
|
1866
|
+
}
|
|
1867
|
+
};
|
|
1868
|
+
l(V, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(V, "examples", ["<%= config.bin %> <%= command.id %>"]), l(V, "flags", {
|
|
1869
|
+
...I,
|
|
1870
|
+
...C,
|
|
1871
|
+
...hr,
|
|
1872
|
+
...j,
|
|
1873
|
+
...ve,
|
|
1874
|
+
...Pe,
|
|
1875
|
+
...ke,
|
|
1876
|
+
...M,
|
|
1877
|
+
...B,
|
|
1878
|
+
...ce,
|
|
1879
|
+
...ie,
|
|
1880
|
+
...oe,
|
|
1881
|
+
...be,
|
|
1882
|
+
...we,
|
|
1883
|
+
...U
|
|
1884
|
+
}), l(V, "args", {
|
|
1885
|
+
name: K.string({ required: !0 })
|
|
1838
1886
|
});
|
|
1839
|
-
let
|
|
1840
|
-
const
|
|
1841
|
-
"create-block":
|
|
1842
|
-
reset:
|
|
1843
|
-
start:
|
|
1844
|
-
stop:
|
|
1845
|
-
"start:docker":
|
|
1846
|
-
"start:local":
|
|
1847
|
-
"svc:delete":
|
|
1848
|
-
"svc:down":
|
|
1849
|
-
"svc:list":
|
|
1850
|
-
"svc:up":
|
|
1851
|
-
"start:docker:s3":
|
|
1852
|
-
"start:local:s3":
|
|
1853
|
-
"svc:create:docker":
|
|
1854
|
-
"svc:create:local":
|
|
1855
|
-
"svc:create:docker:s3":
|
|
1887
|
+
let Ue = V;
|
|
1888
|
+
const Rt = {
|
|
1889
|
+
"create-block": Ee,
|
|
1890
|
+
reset: Fe,
|
|
1891
|
+
start: xe,
|
|
1892
|
+
stop: De,
|
|
1893
|
+
"start:docker": lt,
|
|
1894
|
+
"start:local": dt,
|
|
1895
|
+
"svc:delete": Ne,
|
|
1896
|
+
"svc:down": _e,
|
|
1897
|
+
"svc:list": Ce,
|
|
1898
|
+
"svc:up": Me,
|
|
1899
|
+
"start:docker:s3": gt,
|
|
1900
|
+
"start:local:s3": ut,
|
|
1901
|
+
"svc:create:docker": je,
|
|
1902
|
+
"svc:create:local": ft,
|
|
1903
|
+
"svc:create:docker:s3": Be,
|
|
1904
|
+
"svc:create:local:s3": Ue
|
|
1856
1905
|
};
|
|
1857
1906
|
export {
|
|
1858
|
-
|
|
1907
|
+
Rt as COMMANDS
|
|
1859
1908
|
};
|
|
1860
1909
|
//# sourceMappingURL=index.mjs.map
|