@platforma-sdk/bootstrap 2.9.1 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/assets/compose-backend.yaml +12 -5
- package/dist/block.d.ts.map +1 -1
- package/dist/cmd-opts.d.ts +5 -2
- package/dist/cmd-opts.d.ts.map +1 -1
- package/dist/commands/start/docker/s3.d.ts.map +1 -1
- package/dist/commands/start/docker.d.ts.map +1 -1
- package/dist/commands/start/local/s3.d.ts +2 -2
- package/dist/commands/start/local/s3.d.ts.map +1 -1
- package/dist/commands/start/local.d.ts.map +1 -1
- package/dist/commands/start.d.ts.map +1 -1
- package/dist/commands/stop.d.ts.map +1 -1
- package/dist/commands/svc/create/docker/s3.d.ts +32 -0
- package/dist/commands/svc/create/docker/s3.d.ts.map +1 -0
- package/dist/commands/svc/create/docker.d.ts +34 -0
- package/dist/commands/svc/create/docker.d.ts.map +1 -0
- package/dist/commands/svc/create/local.d.ts +39 -0
- package/dist/commands/svc/create/local.d.ts.map +1 -0
- package/dist/commands/svc/delete.d.ts +15 -0
- package/dist/commands/svc/delete.d.ts.map +1 -0
- package/dist/commands/svc/down.d.ts +14 -0
- package/dist/commands/svc/down.d.ts.map +1 -0
- package/dist/commands/svc/list.d.ts +8 -0
- package/dist/commands/svc/list.d.ts.map +1 -0
- package/dist/commands/svc/up.d.ts +14 -0
- package/dist/commands/svc/up.d.ts.map +1 -0
- package/dist/core.d.ts +19 -13
- package/dist/core.d.ts.map +1 -1
- package/dist/index.d.ts +18 -4
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +31 -34
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1253 -950
- package/dist/index.mjs.map +1 -1
- package/dist/platforma.d.ts.map +1 -1
- package/dist/run.d.ts +11 -5
- package/dist/run.d.ts.map +1 -1
- package/dist/state.d.ts +43 -22
- package/dist/state.d.ts.map +1 -1
- package/dist/templates/compose.d.ts +3 -1
- package/dist/templates/compose.d.ts.map +1 -1
- package/dist/templates/types.d.ts.map +1 -1
- package/dist/util.d.ts +7 -1
- package/dist/util.d.ts.map +1 -1
- package/package.json +2 -2
package/dist/index.mjs
CHANGED
|
@@ -1,513 +1,540 @@
|
|
|
1
|
-
var
|
|
2
|
-
var
|
|
3
|
-
var
|
|
4
|
-
import { Flags as
|
|
5
|
-
import
|
|
6
|
-
import
|
|
7
|
-
import
|
|
8
|
-
import { execSync as
|
|
9
|
-
import
|
|
10
|
-
import { randomBytes as
|
|
11
|
-
import
|
|
12
|
-
import * as
|
|
13
|
-
import { Writable as
|
|
14
|
-
import { z as
|
|
15
|
-
import
|
|
16
|
-
import
|
|
17
|
-
import { getDefaultPlVersion as
|
|
18
|
-
import
|
|
19
|
-
import * as
|
|
20
|
-
const
|
|
21
|
-
"log-level":
|
|
1
|
+
var kr = Object.defineProperty;
|
|
2
|
+
var Sr = (a, e, t) => e in a ? kr(a, e, { enumerable: !0, configurable: !0, writable: !0, value: t }) : a[e] = t;
|
|
3
|
+
var l = (a, e, t) => Sr(a, typeof e != "symbol" ? e + "" : e, t);
|
|
4
|
+
import { Flags as b, Command as L, Args as H } from "@oclif/core";
|
|
5
|
+
import x from "node:os";
|
|
6
|
+
import u, { createWriteStream as $r } from "node:fs";
|
|
7
|
+
import m, { resolve as Lr } from "node:path";
|
|
8
|
+
import { execSync as Pe, spawn as Ar, spawnSync as fr } from "node:child_process";
|
|
9
|
+
import de from "winston";
|
|
10
|
+
import { randomBytes as Ir } from "node:crypto";
|
|
11
|
+
import X from "readline-sync";
|
|
12
|
+
import * as N from "node:fs/promises";
|
|
13
|
+
import { Writable as Or } from "node:stream";
|
|
14
|
+
import { z as D } from "zod";
|
|
15
|
+
import Er from "decompress";
|
|
16
|
+
import ue from "yaml";
|
|
17
|
+
import { getDefaultPlVersion as fe } from "@milaboratories/pl-local";
|
|
18
|
+
import Rr from "node:https";
|
|
19
|
+
import * as Tr from "tar";
|
|
20
|
+
const A = {
|
|
21
|
+
"log-level": b.string({
|
|
22
22
|
description: "logging level",
|
|
23
23
|
default: "info",
|
|
24
24
|
options: ["error", "warn", "info", "debug"],
|
|
25
25
|
required: !1
|
|
26
26
|
})
|
|
27
|
-
}
|
|
28
|
-
|
|
27
|
+
};
|
|
28
|
+
b.string({
|
|
29
|
+
description: "name of instance",
|
|
30
|
+
required: !1
|
|
31
|
+
});
|
|
32
|
+
const me = {
|
|
33
|
+
image: b.string({
|
|
29
34
|
description: "use custom docker image to run platforma"
|
|
30
35
|
})
|
|
31
|
-
},
|
|
32
|
-
version:
|
|
36
|
+
}, W = {
|
|
37
|
+
version: b.string({
|
|
33
38
|
description: "use custom platforma release (official docker image or binary package)"
|
|
34
39
|
})
|
|
35
|
-
},
|
|
36
|
-
arch:
|
|
40
|
+
}, he = {
|
|
41
|
+
arch: b.string({
|
|
37
42
|
description: "override architecture. You can start amd64 linux image on arm-based host (say, Apple M family processor)",
|
|
38
43
|
options: [
|
|
39
44
|
"amd64",
|
|
40
45
|
"arm64"
|
|
41
46
|
]
|
|
42
47
|
})
|
|
43
|
-
},
|
|
44
|
-
license:
|
|
48
|
+
}, q = {
|
|
49
|
+
license: b.string({
|
|
45
50
|
description: 'pass a license code. The license can be got from "https://licensing.milaboratories.com".'
|
|
46
51
|
}),
|
|
47
|
-
"license-file":
|
|
52
|
+
"license-file": b.file({
|
|
48
53
|
exists: !0,
|
|
49
54
|
description: "specify a path to the file with a license. The license can be got from 'https://licensing.milaboratories.com'."
|
|
50
55
|
})
|
|
51
|
-
},
|
|
52
|
-
"grpc-port":
|
|
56
|
+
}, z = {
|
|
57
|
+
"grpc-port": b.integer({
|
|
53
58
|
description: "port for Platforma Backend gRPC API. Default is 6345",
|
|
54
59
|
env: "PLATFORMA_GRPC_PORT"
|
|
55
60
|
}),
|
|
56
|
-
"grpc-listen":
|
|
61
|
+
"grpc-listen": b.string({
|
|
57
62
|
description: "full listen addr for Platforma Backend gRPC API. Default is 127.0.0.1:6345",
|
|
58
63
|
env: "PLATFORMA_GRPC_LISTEN"
|
|
59
64
|
}),
|
|
60
|
-
"monitoring-port":
|
|
65
|
+
"monitoring-port": b.integer({
|
|
61
66
|
description: "port for Platforma Backend monitoring API. Default is 9090",
|
|
62
67
|
env: "PLATFORMA_MONITORING_PORT"
|
|
63
68
|
}),
|
|
64
|
-
"monitoring-listen":
|
|
69
|
+
"monitoring-listen": b.string({
|
|
65
70
|
description: "full listen addr for Platforma Backend monitoring API. Default is 127.0.0.1:9090",
|
|
66
71
|
env: "PLATFORMA_MONITORING_LISTEN"
|
|
67
72
|
}),
|
|
68
|
-
"debug-port":
|
|
73
|
+
"debug-port": b.integer({
|
|
69
74
|
description: "port for Platforma Backend debug API. Default is 9091",
|
|
70
75
|
env: "PLATFORMA_DEBUG_PORT"
|
|
71
76
|
}),
|
|
72
|
-
"debug-listen":
|
|
77
|
+
"debug-listen": b.string({
|
|
73
78
|
description: "full listen addr for Platforma Backend debug API. Default is 127.0.0.1:9091",
|
|
74
79
|
env: "PLATFORMA_DEBUG_LISTEN"
|
|
75
80
|
})
|
|
76
|
-
},
|
|
77
|
-
"s3-
|
|
81
|
+
}, mr = {
|
|
82
|
+
"s3-port": b.integer({
|
|
78
83
|
description: "port that S3 will listen, default is 9000",
|
|
79
84
|
default: 9e3,
|
|
80
85
|
env: "PLATFORMA_S3_PORT"
|
|
81
86
|
}),
|
|
82
|
-
"s3-console-
|
|
87
|
+
"s3-console-port": b.integer({
|
|
83
88
|
description: "port that a console of S3 will listen, default is 9001",
|
|
84
89
|
default: 9001,
|
|
85
90
|
env: "PLATFORMA_S3_CONSOLE_PORT"
|
|
86
91
|
})
|
|
87
|
-
},
|
|
88
|
-
storage:
|
|
92
|
+
}, Y = {
|
|
93
|
+
storage: b.string({
|
|
89
94
|
description: "specify path on host to be used as storage for all Platforma Backend data"
|
|
90
95
|
})
|
|
91
|
-
},
|
|
92
|
-
"minio-presign-host":
|
|
96
|
+
}, hr = {
|
|
97
|
+
"minio-presign-host": b.boolean({
|
|
93
98
|
description: "use 'minio' host instead of 'localhost' in presign URLs"
|
|
94
99
|
})
|
|
95
|
-
},
|
|
96
|
-
mount:
|
|
100
|
+
}, ye = {
|
|
101
|
+
mount: b.string({
|
|
97
102
|
multiple: !0,
|
|
98
103
|
description: "things to be mounted into platforma docker container. Targets will appear inside the container under the same absolute paths"
|
|
99
104
|
})
|
|
100
|
-
},
|
|
101
|
-
"pl-log-file":
|
|
105
|
+
}, Ne = {
|
|
106
|
+
"pl-log-file": b.file({
|
|
102
107
|
description: "specify path for Platforma Backend log file"
|
|
103
108
|
})
|
|
104
|
-
},
|
|
105
|
-
"pl-workdir":
|
|
109
|
+
}, _e = {
|
|
110
|
+
"pl-workdir": b.file({
|
|
106
111
|
description: "specify working directory for Platforma Backend process"
|
|
107
112
|
})
|
|
108
|
-
},
|
|
109
|
-
"pl-binary":
|
|
113
|
+
}, Ce = {
|
|
114
|
+
"pl-binary": b.file({
|
|
110
115
|
description: "start given Platforma Backend binary instead of automatically downloaded version"
|
|
111
116
|
})
|
|
112
|
-
},
|
|
113
|
-
"pl-sources":
|
|
117
|
+
}, Me = {
|
|
118
|
+
"pl-sources": b.file({
|
|
114
119
|
description: "path to pl repository root: build Platforma Backend from sources and start the resulting binary"
|
|
115
120
|
})
|
|
116
|
-
},
|
|
117
|
-
config:
|
|
121
|
+
}, je = {
|
|
122
|
+
config: b.string({
|
|
118
123
|
description: "use custom Platforma Backend config"
|
|
119
124
|
})
|
|
120
125
|
};
|
|
121
|
-
|
|
126
|
+
b.file({
|
|
122
127
|
description: "specify path on host to be used as 'primary' storage"
|
|
123
128
|
});
|
|
124
|
-
const
|
|
125
|
-
"storage-work":
|
|
129
|
+
const ce = {
|
|
130
|
+
"storage-work": b.file({
|
|
126
131
|
description: "specify path on host to be used as 'work' storage"
|
|
127
132
|
})
|
|
128
133
|
};
|
|
129
|
-
|
|
134
|
+
b.file({
|
|
130
135
|
description: "specify path on host to be used as 'library' storage"
|
|
131
136
|
});
|
|
132
|
-
const
|
|
133
|
-
"storage-primary":
|
|
137
|
+
const oe = {
|
|
138
|
+
"storage-primary": b.string({
|
|
134
139
|
description: `specify 'primary' storage destination URL.
|
|
135
140
|
file:/path/to/dir for directory on local FS
|
|
136
141
|
s3://<bucket>/?region=<name> for real AWS bucket
|
|
137
142
|
s3e://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via http
|
|
138
143
|
s3es://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via https`
|
|
139
144
|
})
|
|
140
|
-
},
|
|
141
|
-
"storage-library":
|
|
145
|
+
}, le = {
|
|
146
|
+
"storage-library": b.string({
|
|
142
147
|
description: `specify 'library' storage destination URL.
|
|
143
148
|
file:/path/to/dir for directory on local FS
|
|
144
149
|
s3://<bucket>/?region=<name> for real AWS bucket
|
|
145
150
|
s3e://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via http
|
|
146
151
|
s3es://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via https`
|
|
147
152
|
})
|
|
148
|
-
},
|
|
149
|
-
"auth-enabled":
|
|
153
|
+
}, Fr = {
|
|
154
|
+
"auth-enabled": b.boolean({
|
|
150
155
|
description: "enable authorization"
|
|
151
156
|
})
|
|
152
|
-
},
|
|
153
|
-
"auth-htpasswd-file":
|
|
157
|
+
}, xr = {
|
|
158
|
+
"auth-htpasswd-file": b.file({
|
|
154
159
|
description: "path to .htpasswd file with Platforma users (static user DB auth source)"
|
|
155
160
|
})
|
|
156
|
-
},
|
|
157
|
-
"auth-ldap-server":
|
|
161
|
+
}, Dr = {
|
|
162
|
+
"auth-ldap-server": b.string({
|
|
158
163
|
description: "address of LDAP server to use for auth in Platforma (auth source)"
|
|
159
164
|
})
|
|
160
|
-
},
|
|
161
|
-
"auth-ldap-default-dn":
|
|
165
|
+
}, Nr = {
|
|
166
|
+
"auth-ldap-default-dn": b.string({
|
|
162
167
|
description: "DN to use when checking user with LDAP bind operation: e.g. cn=%u,ou=users,dc=example,dc=com"
|
|
163
168
|
})
|
|
164
|
-
},
|
|
165
|
-
...
|
|
166
|
-
...
|
|
167
|
-
...
|
|
168
|
-
...
|
|
169
|
+
}, V = {
|
|
170
|
+
...Fr,
|
|
171
|
+
...xr,
|
|
172
|
+
...Dr,
|
|
173
|
+
...Nr
|
|
169
174
|
};
|
|
170
|
-
function
|
|
171
|
-
return
|
|
175
|
+
function _r(a) {
|
|
176
|
+
return X.question(`${a} [y/N] `).toLowerCase() === "y";
|
|
172
177
|
}
|
|
173
|
-
function
|
|
178
|
+
function _(a) {
|
|
174
179
|
throw new Error("this should never happen");
|
|
175
180
|
}
|
|
176
|
-
function
|
|
177
|
-
return
|
|
181
|
+
function I(a = "debug") {
|
|
182
|
+
return de.createLogger({
|
|
178
183
|
level: a,
|
|
179
|
-
format:
|
|
180
|
-
const
|
|
181
|
-
if (typeof
|
|
182
|
-
const
|
|
183
|
-
throw Error(`logger message ${
|
|
184
|
+
format: de.format.printf(({ level: e, message: t }) => {
|
|
185
|
+
const r = " ".repeat(e.length + 2);
|
|
186
|
+
if (typeof t != "string") {
|
|
187
|
+
const i = JSON.stringify(t);
|
|
188
|
+
throw Error(`logger message ${i} is not a string`);
|
|
184
189
|
}
|
|
185
|
-
const
|
|
186
|
-
`).map((
|
|
190
|
+
const n = t.split(`
|
|
191
|
+
`).map((i, c) => c === 0 ? i : r + i).join(`
|
|
187
192
|
`);
|
|
188
|
-
return `${((
|
|
193
|
+
return `${((i) => de.format.colorize().colorize(i, i))(e)}: ${n}`;
|
|
189
194
|
}),
|
|
190
195
|
transports: [
|
|
191
|
-
new
|
|
196
|
+
new de.transports.Console({
|
|
192
197
|
stderrLevels: ["error", "warn", "info", "debug"],
|
|
193
198
|
handleExceptions: !0
|
|
194
199
|
})
|
|
195
200
|
]
|
|
196
201
|
});
|
|
197
202
|
}
|
|
198
|
-
function
|
|
199
|
-
return
|
|
203
|
+
function Cr(a) {
|
|
204
|
+
return Ir(Math.ceil(a / 2)).toString("hex").slice(0, a);
|
|
200
205
|
}
|
|
201
|
-
function
|
|
202
|
-
return a.startsWith("~") ?
|
|
206
|
+
function Mr(a) {
|
|
207
|
+
return a.startsWith("~") ? m.join(x.homedir(), a.slice(1)) : a;
|
|
203
208
|
}
|
|
204
|
-
function
|
|
205
|
-
|
|
209
|
+
function pe(a, e) {
|
|
210
|
+
u.existsSync(a) || (u.mkdirSync(a, { recursive: !0 }), e != null && e.mode && u.chmodSync(a, e.mode));
|
|
206
211
|
}
|
|
207
|
-
function
|
|
212
|
+
function jr(a) {
|
|
208
213
|
try {
|
|
209
|
-
if (
|
|
210
|
-
return
|
|
211
|
-
const e = `wmic process where processid=${a} get Caption`,
|
|
214
|
+
if (x.platform() !== "win32")
|
|
215
|
+
return Pe(`ps -p ${a} -o comm=`, { encoding: "utf8" }).trim();
|
|
216
|
+
const e = `wmic process where processid=${a} get Caption`, t = Pe(e, { encoding: "utf8" }).split(`
|
|
212
217
|
`);
|
|
213
|
-
return
|
|
218
|
+
return t.length <= 1 ? "" : t[1].trim();
|
|
214
219
|
} catch {
|
|
215
220
|
return "";
|
|
216
221
|
}
|
|
217
222
|
}
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
+
function Br(a) {
|
|
224
|
+
const e = Pe(`docker compose ls --filter name=${a} --format json`, { encoding: "utf8" }).trim(), t = JSON.parse(e);
|
|
225
|
+
for (const r of t)
|
|
226
|
+
if (r.Name === a)
|
|
227
|
+
return r;
|
|
228
|
+
}
|
|
229
|
+
const ke = ["Python"], yr = ["Tengo", "Python"], Gr = D.union([D.literal("Tengo"), D.literal("Python")]), Ur = D.object({
|
|
230
|
+
npmOrgName: D.string().min(1),
|
|
231
|
+
orgName: D.string().min(1),
|
|
232
|
+
blockName: D.string().min(1),
|
|
233
|
+
softwarePlatforms: D.array(Gr).refine((a) => new Set(a).size === a.length, {
|
|
223
234
|
message: "Must be an array of unique software platforms"
|
|
224
235
|
})
|
|
225
236
|
});
|
|
226
|
-
async function
|
|
227
|
-
const { npmOrgName: e, orgName:
|
|
228
|
-
a.info("Downloading boilerplate code..."), await
|
|
237
|
+
async function Jr(a) {
|
|
238
|
+
const { npmOrgName: e, orgName: t, blockName: r, softwarePlatforms: n } = Hr(), s = m.join(process.cwd(), r);
|
|
239
|
+
a.info("Downloading boilerplate code..."), await Wr(
|
|
229
240
|
// 'https://github.com/milaboratory/platforma-block-boilerplate/archive/refs/heads/software_platforms.zip',
|
|
230
241
|
// 'platforma-block-boilerplate-software_platforms',
|
|
231
242
|
"https://github.com/milaboratory/platforma-block-boilerplate/archive/refs/heads/main.zip",
|
|
232
243
|
"platforma-block-boilerplate-main",
|
|
233
|
-
|
|
244
|
+
s
|
|
234
245
|
);
|
|
235
|
-
const
|
|
236
|
-
a.info(`Keep platforms '${
|
|
237
|
-
for (const
|
|
238
|
-
await
|
|
239
|
-
c && await
|
|
246
|
+
const i = yr.filter((o) => n.indexOf(o) < 0), c = ke.length == i.length;
|
|
247
|
+
a.info(`Keep platforms '${n}', remove: '${i}'. Will remove all platforms? ${c}`);
|
|
248
|
+
for (const o of i)
|
|
249
|
+
await qr(s, o);
|
|
250
|
+
c && await zr(s), a.info("Replace everything in the template with provided options..."), Yr(s, [
|
|
240
251
|
// '@' literal ensures only npm org name will be renamed,
|
|
241
252
|
// as public registry for software also is called platforma-open, but without '@'.
|
|
242
253
|
// Also, don't rename an organization for runenv-python-3 package.
|
|
243
254
|
{ from: /@platforma-open(?!.*runenv-python-3)/g, to: `@${e}` },
|
|
244
|
-
{ from: /my-org/g, to:
|
|
245
|
-
{ from: /block-boilerplate/g, to:
|
|
255
|
+
{ from: /my-org/g, to: t },
|
|
256
|
+
{ from: /block-boilerplate/g, to: r }
|
|
246
257
|
]);
|
|
247
258
|
}
|
|
248
|
-
function
|
|
249
|
-
let a =
|
|
259
|
+
function Hr() {
|
|
260
|
+
let a = X.question(
|
|
250
261
|
'Write an organization name for npm. Default is "platforma-open": '
|
|
251
262
|
);
|
|
252
263
|
a === "" && (a = "platforma-open");
|
|
253
|
-
const e =
|
|
254
|
-
let
|
|
255
|
-
if (
|
|
256
|
-
for (;
|
|
257
|
-
const
|
|
258
|
-
if (
|
|
259
|
-
|
|
264
|
+
const e = X.question('Write an organization name, e.g. "my-org": '), t = X.question('Write a name of the block, e.g. "hello-world": '), r = X.keyInYN("Create package for block's software?");
|
|
265
|
+
let n = ["Tengo"];
|
|
266
|
+
if (r)
|
|
267
|
+
for (; n.length < yr.length; ) {
|
|
268
|
+
const s = X.keyInSelect(ke, "Choose software platform:");
|
|
269
|
+
if (s < 0) break;
|
|
270
|
+
n.push(ke[s]);
|
|
260
271
|
}
|
|
261
|
-
return
|
|
272
|
+
return n = Array.from(new Set(n)).sort(), Ur.parse({ npmOrgName: a, orgName: e, blockName: t, softwarePlatforms: n });
|
|
262
273
|
}
|
|
263
|
-
async function
|
|
264
|
-
const
|
|
265
|
-
await
|
|
266
|
-
const
|
|
267
|
-
await
|
|
274
|
+
async function Wr(a, e, t) {
|
|
275
|
+
const n = await (await fetch(a)).blob(), s = await N.mkdtemp(m.join(x.tmpdir(), "create-repo")), i = m.join(s, "packed-repo.zip"), c = Or.toWeb($r(i));
|
|
276
|
+
await n.stream().pipeTo(c);
|
|
277
|
+
const o = m.join(s, "unpacked-repo");
|
|
278
|
+
await N.mkdir(o), await Er(i, o), await N.cp(m.join(o, e), t, { recursive: !0 });
|
|
268
279
|
}
|
|
269
|
-
async function
|
|
270
|
-
const
|
|
271
|
-
await
|
|
272
|
-
|
|
273
|
-
new RegExp(`.*${
|
|
274
|
-
), await
|
|
275
|
-
|
|
276
|
-
new RegExp(`.*${
|
|
277
|
-
), await
|
|
278
|
-
|
|
279
|
-
new RegExp(`.*${
|
|
280
|
-
), await
|
|
281
|
-
|
|
282
|
-
new RegExp(`.*${
|
|
283
|
-
), await
|
|
284
|
-
|
|
285
|
-
new RegExp(`.*${
|
|
286
|
-
), await
|
|
287
|
-
|
|
288
|
-
(
|
|
289
|
-
const
|
|
290
|
-
return delete
|
|
280
|
+
async function qr(a, e) {
|
|
281
|
+
const t = e.toLowerCase();
|
|
282
|
+
await Z(
|
|
283
|
+
m.join(a, "ui", "src", "pages", "MainPage.vue"),
|
|
284
|
+
new RegExp(`.*${t}Message.*\\n\\n`, "g")
|
|
285
|
+
), await Z(
|
|
286
|
+
m.join(a, "model", "src", "index.ts"),
|
|
287
|
+
new RegExp(`.*${t}Message.*\\n\\n`, "g")
|
|
288
|
+
), await Z(
|
|
289
|
+
m.join(a, "workflow", "src", "main.tpl.tengo"),
|
|
290
|
+
new RegExp(`.*${t}.*exec.builder.*[\\s\\S]*?\\n\\n`, "g")
|
|
291
|
+
), await Z(
|
|
292
|
+
m.join(a, "workflow", "src", "main.tpl.tengo"),
|
|
293
|
+
new RegExp(`.*${t}Message.*\\n`, "g")
|
|
294
|
+
), await Z(
|
|
295
|
+
m.join(a, "workflow", "src", "wf.test.ts"),
|
|
296
|
+
new RegExp(`.*${t}Message.*\\n.*expect.*\\n\\n`, "g")
|
|
297
|
+
), await N.rm(m.join(a, "software", `src_${t}`), { recursive: !0 }), await Be(
|
|
298
|
+
m.join(a, "software", "package.json"),
|
|
299
|
+
(r) => {
|
|
300
|
+
const n = JSON.parse(r);
|
|
301
|
+
return delete n["block-software"].artifacts[`hello-${t}-artifact`], delete n["block-software"].entrypoints[`hello-world-${t}`], JSON.stringify(n, null, 2);
|
|
291
302
|
}
|
|
292
303
|
);
|
|
293
304
|
}
|
|
294
|
-
async function
|
|
295
|
-
await
|
|
296
|
-
|
|
305
|
+
async function zr(a) {
|
|
306
|
+
await N.rm(m.join(a, "software"), { recursive: !0 }), await Be(
|
|
307
|
+
m.join(a, "workflow", "package.json"),
|
|
297
308
|
(e) => {
|
|
298
|
-
const
|
|
299
|
-
return delete
|
|
309
|
+
const t = JSON.parse(e);
|
|
310
|
+
return delete t.dependencies["@platforma-open/my-org.block-boilerplate.software"], JSON.stringify(t, null, 2);
|
|
300
311
|
}
|
|
301
|
-
), await
|
|
302
|
-
|
|
312
|
+
), await Z(
|
|
313
|
+
m.join(a, "pnpm-workspace.yaml"),
|
|
303
314
|
/.*- software$\n/gm
|
|
304
315
|
);
|
|
305
316
|
}
|
|
306
|
-
async function
|
|
307
|
-
const
|
|
308
|
-
for (const { from:
|
|
309
|
-
for (const
|
|
310
|
-
await
|
|
317
|
+
async function Yr(a, e) {
|
|
318
|
+
const t = await Vr(a);
|
|
319
|
+
for (const { from: r, to: n } of e)
|
|
320
|
+
for (const s of t)
|
|
321
|
+
await pr(s, r, n);
|
|
311
322
|
}
|
|
312
|
-
async function
|
|
313
|
-
return (await
|
|
323
|
+
async function Vr(a) {
|
|
324
|
+
return (await N.readdir(a, {
|
|
314
325
|
withFileTypes: !0,
|
|
315
326
|
recursive: !0
|
|
316
|
-
})).filter((
|
|
327
|
+
})).filter((t) => t.isFile()).map((t) => m.join(t.parentPath, t.name));
|
|
317
328
|
}
|
|
318
|
-
async function
|
|
319
|
-
const
|
|
320
|
-
await
|
|
329
|
+
async function Be(a, e) {
|
|
330
|
+
const t = await N.readFile(a), r = e(t.toString());
|
|
331
|
+
await N.writeFile(a, r);
|
|
321
332
|
}
|
|
322
|
-
async function
|
|
323
|
-
return await
|
|
333
|
+
async function pr(a, e, t) {
|
|
334
|
+
return await Be(a, (r) => r.replaceAll(e, t));
|
|
324
335
|
}
|
|
325
|
-
async function
|
|
326
|
-
return await
|
|
336
|
+
async function Z(a, e) {
|
|
337
|
+
return await pr(a, e, "");
|
|
327
338
|
}
|
|
328
|
-
const
|
|
339
|
+
const ee = class ee extends L {
|
|
329
340
|
async run() {
|
|
330
|
-
const { flags: e } = await this.parse(
|
|
331
|
-
await
|
|
341
|
+
const { flags: e } = await this.parse(ee), t = I(e["log-level"]);
|
|
342
|
+
await Jr(t);
|
|
332
343
|
}
|
|
333
344
|
};
|
|
334
|
-
|
|
335
|
-
...
|
|
345
|
+
l(ee, "description", "Helps to create a new block by downloading a block's template."), l(ee, "examples", ["<%= name %>"]), l(ee, "flags", {
|
|
346
|
+
...A
|
|
336
347
|
});
|
|
337
|
-
let
|
|
338
|
-
function
|
|
339
|
-
return
|
|
340
|
-
}
|
|
341
|
-
function O(...a) {
|
|
342
|
-
return rr("assets", ...a);
|
|
348
|
+
let Se = ee;
|
|
349
|
+
function br(...a) {
|
|
350
|
+
return Lr(__dirname, "..", ...a);
|
|
343
351
|
}
|
|
344
|
-
function
|
|
345
|
-
return
|
|
352
|
+
function K(...a) {
|
|
353
|
+
return br("assets", ...a);
|
|
346
354
|
}
|
|
347
|
-
function
|
|
348
|
-
return
|
|
355
|
+
function be(...a) {
|
|
356
|
+
return u.readFileSync(br(...a));
|
|
349
357
|
}
|
|
350
|
-
function
|
|
351
|
-
return a || (a =
|
|
358
|
+
function dr(a) {
|
|
359
|
+
return a || (a = fe()), `quay.io/milaboratories/platforma:${a}`;
|
|
352
360
|
}
|
|
353
|
-
const
|
|
361
|
+
const C = class C {
|
|
354
362
|
constructor(e) {
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
isActive: !1
|
|
363
|
+
l(this, "state", {
|
|
364
|
+
currentInstance: ""
|
|
358
365
|
});
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
e = e ??
|
|
362
|
-
const
|
|
363
|
-
this.dirPath = e, this.filePath =
|
|
366
|
+
l(this, "filePath");
|
|
367
|
+
l(this, "dirPath");
|
|
368
|
+
e = e ?? m.resolve(x.homedir(), ".config", "pl-bootstrap");
|
|
369
|
+
const t = m.join(e, "state.json");
|
|
370
|
+
this.dirPath = e, this.filePath = t, u.existsSync(e) || u.mkdirSync(e, { recursive: !0 }), u.existsSync(t) && (this.state = JSON.parse(be(t).toString()));
|
|
364
371
|
}
|
|
365
|
-
static
|
|
366
|
-
return
|
|
372
|
+
static getStateInstance() {
|
|
373
|
+
return C.instance || (C.instance = new C()), C.instance;
|
|
367
374
|
}
|
|
368
375
|
path(...e) {
|
|
369
|
-
return
|
|
376
|
+
return m.join(this.dirPath, ...e);
|
|
370
377
|
}
|
|
371
|
-
|
|
372
|
-
return this.path("data", ...
|
|
378
|
+
instanceDir(e, ...t) {
|
|
379
|
+
return e ? this.path("data", e, ...t) : this.path("data");
|
|
373
380
|
}
|
|
374
381
|
binaries(...e) {
|
|
375
382
|
return this.path("binaries", ...e);
|
|
376
383
|
}
|
|
377
384
|
writeState() {
|
|
378
|
-
|
|
385
|
+
u.writeFileSync(this.filePath, JSON.stringify(this.state));
|
|
386
|
+
}
|
|
387
|
+
get instanceList() {
|
|
388
|
+
return u.existsSync(this.instanceDir()) ? u.readdirSync(this.instanceDir()).filter((t) => this.instanceExists(t)) : [];
|
|
389
|
+
}
|
|
390
|
+
instanceExists(e) {
|
|
391
|
+
return u.existsSync(this.instanceDir(e, "instance.json"));
|
|
392
|
+
}
|
|
393
|
+
getInstanceInfo(e) {
|
|
394
|
+
const t = this.instanceDir(e, "instance.json");
|
|
395
|
+
if (!u.existsSync(t))
|
|
396
|
+
throw new Error(`platforma backend instance '${e}' does not exist or is corrupted`);
|
|
397
|
+
const r = JSON.parse(be(t).toString());
|
|
398
|
+
return {
|
|
399
|
+
name: e,
|
|
400
|
+
...r
|
|
401
|
+
};
|
|
402
|
+
}
|
|
403
|
+
setInstanceInfo(e, t) {
|
|
404
|
+
u.existsSync(this.instanceDir(e)) || u.mkdirSync(this.instanceDir(e), { recursive: !0 });
|
|
405
|
+
const r = this.instanceDir(e, "instance.json");
|
|
406
|
+
let n = {};
|
|
407
|
+
u.existsSync(r) && (n = JSON.parse(be(r).toString())), u.writeFileSync(r, JSON.stringify({ ...n, ...t }));
|
|
408
|
+
}
|
|
409
|
+
isInstanceActive(e) {
|
|
410
|
+
switch (e.type) {
|
|
411
|
+
case "docker": {
|
|
412
|
+
const r = Br(`pl-${e.name}`);
|
|
413
|
+
return r ? r.Status.trim().startsWith("running") : !1;
|
|
414
|
+
}
|
|
415
|
+
case "process":
|
|
416
|
+
return e.pid ? gr(e.pid) : !1;
|
|
417
|
+
default:
|
|
418
|
+
throw _(), new Error("cli logic error: unknown service type, cannot check its state");
|
|
419
|
+
}
|
|
379
420
|
}
|
|
380
421
|
get isActive() {
|
|
381
|
-
|
|
382
|
-
|
|
422
|
+
for (const e of this.instanceList) {
|
|
423
|
+
const t = this.getInstanceInfo(e);
|
|
424
|
+
if (this.isInstanceActive(t))
|
|
425
|
+
return !0;
|
|
426
|
+
}
|
|
427
|
+
return !1;
|
|
428
|
+
}
|
|
429
|
+
isValidPID(e) {
|
|
430
|
+
return gr(e);
|
|
383
431
|
}
|
|
384
|
-
get
|
|
385
|
-
|
|
386
|
-
if (
|
|
387
|
-
return
|
|
388
|
-
const e = Lr(this.state.lastRun.process.pid);
|
|
389
|
-
return e === "platforma" || e.endsWith("/platforma") || e.endsWith("\\platforma");
|
|
432
|
+
get currentInstance() {
|
|
433
|
+
const e = this.state.currentInstance;
|
|
434
|
+
if (e && this.instanceExists(e))
|
|
435
|
+
return this.getInstanceInfo(e);
|
|
390
436
|
}
|
|
391
|
-
|
|
392
|
-
this.state.
|
|
437
|
+
get currentInstanceName() {
|
|
438
|
+
return this.state.currentInstance;
|
|
393
439
|
}
|
|
394
|
-
|
|
395
|
-
|
|
440
|
+
set currentInstanceName(e) {
|
|
441
|
+
this.state.currentInstance = e, this.writeState();
|
|
396
442
|
}
|
|
397
|
-
|
|
398
|
-
|
|
443
|
+
selectInstance(e) {
|
|
444
|
+
if (!this.instanceExists(e))
|
|
445
|
+
throw new Error(`instance '${e}' does not exist`);
|
|
446
|
+
this.state.currentInstance = e, this.writeState();
|
|
399
447
|
}
|
|
400
448
|
};
|
|
401
|
-
|
|
402
|
-
let
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
return d.lastRun = {
|
|
407
|
-
...d.lastRun,
|
|
408
|
-
mode: "docker",
|
|
409
|
-
cmd: "docker",
|
|
410
|
-
args: e,
|
|
411
|
-
workdir: r.cwd,
|
|
412
|
-
envs: r.env,
|
|
413
|
-
docker: {
|
|
414
|
-
...(s = d.lastRun) == null ? void 0 : s.docker,
|
|
415
|
-
...t
|
|
416
|
-
}
|
|
417
|
-
}, tr(a, "docker", e, r);
|
|
449
|
+
l(C, "instance");
|
|
450
|
+
let $e = C;
|
|
451
|
+
function gr(a) {
|
|
452
|
+
const e = jr(a);
|
|
453
|
+
return e === "platforma" || e.endsWith("/platforma") || e.endsWith("\\platforma");
|
|
418
454
|
}
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
...
|
|
430
|
-
...
|
|
455
|
+
const d = $e.getStateInstance();
|
|
456
|
+
function we(a, e, t) {
|
|
457
|
+
const r = [], n = [];
|
|
458
|
+
for (const s of e)
|
|
459
|
+
if (t = {
|
|
460
|
+
cwd: s.workdir,
|
|
461
|
+
env: {
|
|
462
|
+
...s.envs,
|
|
463
|
+
...t == null ? void 0 : t.env
|
|
464
|
+
},
|
|
465
|
+
...s.runOpts,
|
|
466
|
+
...t
|
|
467
|
+
}, s.async) {
|
|
468
|
+
const i = Kr(a, s.cmd, s.args, t);
|
|
469
|
+
n.push(i);
|
|
470
|
+
} else {
|
|
471
|
+
const i = Qr(a, s.cmd, s.args, t);
|
|
472
|
+
if (r.push(i), i.error || i.status !== 0)
|
|
473
|
+
break;
|
|
431
474
|
}
|
|
475
|
+
return {
|
|
476
|
+
executed: r,
|
|
477
|
+
spawned: n
|
|
432
478
|
};
|
|
433
|
-
const i = Gr(a, e, r, t);
|
|
434
|
-
return d.lastRun.process = {
|
|
435
|
-
...d.lastRun.process,
|
|
436
|
-
pid: i.pid
|
|
437
|
-
}, i;
|
|
438
|
-
}
|
|
439
|
-
function Br(a, e) {
|
|
440
|
-
if (!d.lastRun)
|
|
441
|
-
throw new Error("no previous run info found: this is the first run after package installation");
|
|
442
|
-
return e = {
|
|
443
|
-
cwd: d.lastRun.workdir,
|
|
444
|
-
env: {
|
|
445
|
-
...d.lastRun.envs,
|
|
446
|
-
...e.env
|
|
447
|
-
},
|
|
448
|
-
...e
|
|
449
|
-
}, tr(a, d.lastRun.cmd, d.lastRun.args, e);
|
|
450
479
|
}
|
|
451
|
-
function
|
|
480
|
+
function Kr(a, e, t, r) {
|
|
481
|
+
var c;
|
|
452
482
|
a.debug(
|
|
453
483
|
`Running:
|
|
454
|
-
env: ${JSON.stringify(
|
|
455
|
-
cmd: ${JSON.stringify([e, ...
|
|
456
|
-
wd: ${
|
|
457
|
-
),
|
|
458
|
-
const
|
|
459
|
-
|
|
460
|
-
const
|
|
461
|
-
|
|
484
|
+
env: ${JSON.stringify(r.env)}
|
|
485
|
+
cmd: ${JSON.stringify([e, ...t])}
|
|
486
|
+
wd: ${(c = r.cwd) == null ? void 0 : c.toString()}`
|
|
487
|
+
), r.env = { ...process.env, ...r.env }, a.debug(" spawning child process");
|
|
488
|
+
const n = Ar(e, t, r);
|
|
489
|
+
let s = !1;
|
|
490
|
+
const i = () => {
|
|
491
|
+
n.kill("SIGINT"), s = !0;
|
|
462
492
|
};
|
|
463
|
-
return a.debug(" setting up signal handler"), process.on("SIGINT",
|
|
464
|
-
process.removeListener("SIGINT",
|
|
465
|
-
}),
|
|
493
|
+
return a.debug(" setting up signal handler"), process.on("SIGINT", i), n.on("close", (o) => {
|
|
494
|
+
process.removeListener("SIGINT", i), s && process.exit(o);
|
|
495
|
+
}), n;
|
|
466
496
|
}
|
|
467
|
-
function
|
|
497
|
+
function Qr(a, e, t, r) {
|
|
468
498
|
return a.debug(
|
|
469
499
|
`Running:
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
), t.env = { ...process.env, ...t.env }, K(e, r, t);
|
|
500
|
+
cmd: ${JSON.stringify([e, ...t])}
|
|
501
|
+
opts: ${JSON.stringify(r)}`
|
|
502
|
+
), r.env = { ...process.env, ...r.env }, fr(e, t, r);
|
|
474
503
|
}
|
|
475
|
-
function
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
return;
|
|
479
|
-
}
|
|
480
|
-
const t = o.readFileSync(a, { encoding: "utf-8" }), s = Z.parse(t.toString());
|
|
481
|
-
if (!s.services)
|
|
504
|
+
function ve(a, e, t, r, n) {
|
|
505
|
+
const s = u.readFileSync(a, { encoding: "utf-8" }), i = ue.parse(s.toString());
|
|
506
|
+
if (!i.services)
|
|
482
507
|
throw new Error(`file '${a}' seems to be not a docker-compose file or has unsupported version`);
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
if (
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
508
|
+
if (r)
|
|
509
|
+
for (const c of Object.keys(i.services))
|
|
510
|
+
r.has(c) || delete i.services[c];
|
|
511
|
+
i.name = t;
|
|
512
|
+
for (const [c, o] of (r == null ? void 0 : r.entries()) ?? []) {
|
|
513
|
+
const f = i.services[c];
|
|
514
|
+
if (!f)
|
|
515
|
+
throw new Error(`docker compose '${a}' has no declaration of service '${c}'`);
|
|
516
|
+
if (o.platform && (f.platform = o.platform), o.envs) {
|
|
517
|
+
f.environment || (f.environment = []);
|
|
518
|
+
for (let g = 0; g < ((f == null ? void 0 : f.environment.length) ?? 0); ) {
|
|
519
|
+
const w = f.environment[g].split("=")[0];
|
|
520
|
+
if (o.envs[w]) {
|
|
521
|
+
const v = f.environment.pop();
|
|
522
|
+
v && f.environment.length !== g && (f.environment[g] = v);
|
|
496
523
|
} else
|
|
497
|
-
|
|
524
|
+
g++;
|
|
498
525
|
}
|
|
499
|
-
for (const [
|
|
500
|
-
|
|
526
|
+
for (const [g, h] of Object.entries(o.envs))
|
|
527
|
+
f.environment.push(`${g}=${h}`);
|
|
501
528
|
}
|
|
502
|
-
if (
|
|
503
|
-
|
|
504
|
-
for (const
|
|
505
|
-
|
|
529
|
+
if (o.mounts) {
|
|
530
|
+
f.volumes || (f.volumes = []);
|
|
531
|
+
for (const g of o.mounts)
|
|
532
|
+
f.volumes.push(`${g.hostPath}:${g.containerPath}`);
|
|
506
533
|
}
|
|
507
534
|
}
|
|
508
|
-
|
|
535
|
+
n != null && n.dropVolumes && delete i.volumes, u.writeFileSync(e, ue.stringify(i));
|
|
509
536
|
}
|
|
510
|
-
function
|
|
537
|
+
function Xr(a) {
|
|
511
538
|
return {
|
|
512
539
|
id: a,
|
|
513
540
|
type: "S3",
|
|
@@ -525,7 +552,7 @@ function jr(a) {
|
|
|
525
552
|
uploadKeyPrefix: ""
|
|
526
553
|
};
|
|
527
554
|
}
|
|
528
|
-
function
|
|
555
|
+
function wr(a) {
|
|
529
556
|
return {
|
|
530
557
|
id: a,
|
|
531
558
|
type: "FS",
|
|
@@ -533,137 +560,137 @@ function ar(a) {
|
|
|
533
560
|
rootPath: ""
|
|
534
561
|
};
|
|
535
562
|
}
|
|
536
|
-
function
|
|
537
|
-
a =
|
|
538
|
-
const
|
|
539
|
-
switch (
|
|
563
|
+
function Q(a, e, t) {
|
|
564
|
+
a = Mr(a);
|
|
565
|
+
const r = new URL(a, `file:${e}`);
|
|
566
|
+
switch (r.protocol) {
|
|
540
567
|
case "s3:":
|
|
541
|
-
var
|
|
568
|
+
var i = r.hostname, n = r.searchParams.get("region");
|
|
542
569
|
return {
|
|
543
|
-
...
|
|
570
|
+
...t,
|
|
544
571
|
type: "S3",
|
|
545
|
-
bucketName:
|
|
546
|
-
region:
|
|
572
|
+
bucketName: i,
|
|
573
|
+
region: n
|
|
547
574
|
};
|
|
548
575
|
case "s3e:":
|
|
549
|
-
var
|
|
576
|
+
var s = r.pathname.split("/").slice(1), i = s[0], c = s.length > 1 ? s[1] : "";
|
|
550
577
|
return {
|
|
551
|
-
...
|
|
578
|
+
...t,
|
|
552
579
|
type: "S3",
|
|
553
|
-
endpoint: `http://${
|
|
554
|
-
bucketName:
|
|
580
|
+
endpoint: `http://${r.host}/`,
|
|
581
|
+
bucketName: i,
|
|
555
582
|
keyPrefix: c,
|
|
556
|
-
region:
|
|
557
|
-
key:
|
|
558
|
-
secret:
|
|
583
|
+
region: r.searchParams.get("region"),
|
|
584
|
+
key: r.username ? `static:${r.username}` : "",
|
|
585
|
+
secret: r.password ? `static:${r.password}` : ""
|
|
559
586
|
};
|
|
560
587
|
case "s3es:":
|
|
561
|
-
var
|
|
588
|
+
var s = r.pathname.split("/").slice(1), i = s[0], c = s.length > 1 ? s[1] : "";
|
|
562
589
|
return {
|
|
563
|
-
...
|
|
590
|
+
...t,
|
|
564
591
|
type: "S3",
|
|
565
|
-
endpoint: `https://${
|
|
566
|
-
bucketName:
|
|
592
|
+
endpoint: `https://${r.host}/`,
|
|
593
|
+
bucketName: i,
|
|
567
594
|
keyPrefix: c,
|
|
568
|
-
region:
|
|
569
|
-
key:
|
|
570
|
-
secret:
|
|
595
|
+
region: r.searchParams.get("region"),
|
|
596
|
+
key: r.username ? `static:${r.username}` : "",
|
|
597
|
+
secret: r.password ? `static:${r.password}` : ""
|
|
571
598
|
};
|
|
572
599
|
case "file:":
|
|
573
600
|
return {
|
|
574
601
|
type: "FS",
|
|
575
|
-
rootPath:
|
|
602
|
+
rootPath: r.pathname
|
|
576
603
|
};
|
|
577
604
|
default:
|
|
578
|
-
throw new Error(`storage protocol '${
|
|
605
|
+
throw new Error(`storage protocol '${r.protocol}' is not supported`);
|
|
579
606
|
}
|
|
580
607
|
}
|
|
581
|
-
function
|
|
582
|
-
var
|
|
583
|
-
const
|
|
584
|
-
level: ((
|
|
585
|
-
path: ((y = e == null ? void 0 : e.log) == null ? void 0 : y.path) ?? `${
|
|
586
|
-
},
|
|
587
|
-
listen: ((
|
|
608
|
+
function Zr(a, e) {
|
|
609
|
+
var v, y, P, S, k, p, $, se, Ue, Je, He, We, qe, ze, Ye, Ve, Ke, Qe, Xe, Ze, er, rr, tr, ar, nr, sr, ir, cr, or, lr;
|
|
610
|
+
const t = (e == null ? void 0 : e.localRoot) ?? d.instanceDir("default"), r = {
|
|
611
|
+
level: ((v = e == null ? void 0 : e.log) == null ? void 0 : v.level) ?? "info",
|
|
612
|
+
path: ((y = e == null ? void 0 : e.log) == null ? void 0 : y.path) ?? `${t}/logs/platforma.log`
|
|
613
|
+
}, n = {
|
|
614
|
+
listen: ((P = e == null ? void 0 : e.grpc) == null ? void 0 : P.listen) ?? "localhost:6345",
|
|
588
615
|
tls: {
|
|
589
|
-
enable:
|
|
590
|
-
clientAuthMode: ((
|
|
591
|
-
certFile: ((
|
|
592
|
-
keyFile: ((
|
|
593
|
-
...(
|
|
616
|
+
enable: ie((k = (S = e == null ? void 0 : e.grpc) == null ? void 0 : S.tls) == null ? void 0 : k.enable, !1),
|
|
617
|
+
clientAuthMode: (($ = (p = e == null ? void 0 : e.grpc) == null ? void 0 : p.tls) == null ? void 0 : $.clientAuthMode) ?? "NoAuth",
|
|
618
|
+
certFile: ((Ue = (se = e == null ? void 0 : e.grpc) == null ? void 0 : se.tls) == null ? void 0 : Ue.certFile) ?? `${t}/certs/tls.cert`,
|
|
619
|
+
keyFile: ((He = (Je = e == null ? void 0 : e.grpc) == null ? void 0 : Je.tls) == null ? void 0 : He.keyFile) ?? `${t}/certs/tls.key`,
|
|
620
|
+
...(We = e == null ? void 0 : e.grpc) == null ? void 0 : We.tls
|
|
594
621
|
}
|
|
595
|
-
},
|
|
622
|
+
}, s = {
|
|
596
623
|
auth: {
|
|
597
|
-
enabled: ((
|
|
598
|
-
drivers: ((
|
|
624
|
+
enabled: ((ze = (qe = e == null ? void 0 : e.core) == null ? void 0 : qe.auth) == null ? void 0 : ze.enabled) ?? !1,
|
|
625
|
+
drivers: ((Ve = (Ye = e == null ? void 0 : e.core) == null ? void 0 : Ye.auth) == null ? void 0 : Ve.drivers) ?? [
|
|
599
626
|
{ driver: "jwt", key: a },
|
|
600
|
-
{ driver: "htpasswd", path: `${
|
|
627
|
+
{ driver: "htpasswd", path: `${t}/users.htpasswd` }
|
|
601
628
|
]
|
|
602
629
|
},
|
|
603
630
|
db: {
|
|
604
|
-
path: `${
|
|
631
|
+
path: `${t}/db`
|
|
605
632
|
}
|
|
606
|
-
},
|
|
633
|
+
}, i = ur(
|
|
607
634
|
"main",
|
|
608
|
-
`${
|
|
635
|
+
`${t}/storages/main`,
|
|
609
636
|
"main-bucket",
|
|
610
|
-
(
|
|
637
|
+
(Ke = e == null ? void 0 : e.storages) == null ? void 0 : Ke.primary
|
|
611
638
|
);
|
|
612
|
-
|
|
613
|
-
switch ((
|
|
639
|
+
let c;
|
|
640
|
+
switch ((Xe = (Qe = e == null ? void 0 : e.storages) == null ? void 0 : Qe.work) == null ? void 0 : Xe.type) {
|
|
614
641
|
case void 0:
|
|
615
642
|
case "FS":
|
|
616
|
-
c =
|
|
643
|
+
c = wr("work"), c.rootPath = ((er = (Ze = e == null ? void 0 : e.storages) == null ? void 0 : Ze.work) == null ? void 0 : er.rootPath) ?? `${t}/storages/work`, c.indexCachePeriod = ((tr = (rr = e == null ? void 0 : e.storages) == null ? void 0 : rr.work) == null ? void 0 : tr.indexCachePeriod) ?? "1m";
|
|
617
644
|
break;
|
|
618
645
|
default:
|
|
619
646
|
throw new Error("work storage MUST have 'FS' type as it is used for working directories management");
|
|
620
647
|
}
|
|
621
|
-
const f =
|
|
648
|
+
const f = ur(
|
|
622
649
|
"library",
|
|
623
|
-
`${
|
|
650
|
+
`${t}/storages/library`,
|
|
624
651
|
"library-bucket",
|
|
625
|
-
(
|
|
626
|
-
),
|
|
627
|
-
enabled:
|
|
628
|
-
listen: ((
|
|
629
|
-
},
|
|
630
|
-
enabled:
|
|
631
|
-
listen: ((
|
|
632
|
-
},
|
|
633
|
-
value: ((
|
|
634
|
-
file: ((
|
|
652
|
+
(ar = e == null ? void 0 : e.storages) == null ? void 0 : ar.library
|
|
653
|
+
), g = {
|
|
654
|
+
enabled: ie((nr = e == null ? void 0 : e.monitoring) == null ? void 0 : nr.enabled, !0),
|
|
655
|
+
listen: ((sr = e == null ? void 0 : e.monitoring) == null ? void 0 : sr.listen) ?? "127.0.0.1:9090"
|
|
656
|
+
}, h = {
|
|
657
|
+
enabled: ie((ir = e == null ? void 0 : e.debug) == null ? void 0 : ir.enabled, !0),
|
|
658
|
+
listen: ((cr = e == null ? void 0 : e.debug) == null ? void 0 : cr.listen) ?? "127.0.0.1:9091"
|
|
659
|
+
}, w = {
|
|
660
|
+
value: ((or = e == null ? void 0 : e.license) == null ? void 0 : or.value) ?? "",
|
|
661
|
+
file: ((lr = e == null ? void 0 : e.license) == null ? void 0 : lr.file) ?? ""
|
|
635
662
|
};
|
|
636
663
|
return {
|
|
637
|
-
localRoot:
|
|
638
|
-
license:
|
|
639
|
-
log:
|
|
640
|
-
grpc:
|
|
641
|
-
core:
|
|
642
|
-
monitoring:
|
|
643
|
-
debug:
|
|
644
|
-
storages: { primary:
|
|
664
|
+
localRoot: t,
|
|
665
|
+
license: w,
|
|
666
|
+
log: r,
|
|
667
|
+
grpc: n,
|
|
668
|
+
core: s,
|
|
669
|
+
monitoring: g,
|
|
670
|
+
debug: h,
|
|
671
|
+
storages: { primary: i, work: c, library: f },
|
|
645
672
|
hacks: { libraryDownloadable: !0 }
|
|
646
673
|
};
|
|
647
674
|
}
|
|
648
|
-
function
|
|
649
|
-
|
|
650
|
-
switch (
|
|
675
|
+
function ur(a, e, t, r) {
|
|
676
|
+
let n;
|
|
677
|
+
switch (r == null ? void 0 : r.type) {
|
|
651
678
|
case void 0:
|
|
652
679
|
case "FS":
|
|
653
|
-
|
|
680
|
+
n = wr(a), n.rootPath = (r == null ? void 0 : r.rootPath) ?? e;
|
|
654
681
|
break;
|
|
655
682
|
case "S3":
|
|
656
|
-
|
|
683
|
+
n = Xr(a), n.endpoint = (r == null ? void 0 : r.endpoint) ?? "http://localhost:9000", n.presignEndpoint = (r == null ? void 0 : r.presignEndpoint) ?? "http://localhost:9000", n.bucketName = (r == null ? void 0 : r.bucketName) ?? t, n.createBucket = ie(r == null ? void 0 : r.createBucket, !0), n.forcePathStyle = ie(r == null ? void 0 : r.forcePathStyle, !0), n.key = (r == null ? void 0 : r.key) ?? "", n.secret = (r == null ? void 0 : r.secret) ?? "", n.keyPrefix = (r == null ? void 0 : r.keyPrefix) ?? "", n.accessPrefixes = (r == null ? void 0 : r.accessPrefixes) ?? [""], n.uploadKeyPrefix = (r == null ? void 0 : r.uploadKeyPrefix) ?? "";
|
|
657
684
|
break;
|
|
658
685
|
default:
|
|
659
|
-
throw
|
|
686
|
+
throw _(), new Error("unknown storage type");
|
|
660
687
|
}
|
|
661
|
-
return
|
|
688
|
+
return n;
|
|
662
689
|
}
|
|
663
|
-
function
|
|
664
|
-
const e = a.monitoring.enabled ? "" : " disabled",
|
|
665
|
-
|
|
666
|
-
return a.license.file != "" && (
|
|
690
|
+
function et(a) {
|
|
691
|
+
const e = a.monitoring.enabled ? "" : " disabled", t = a.debug.enabled ? "" : " disabled", r = a.hacks.libraryDownloadable ? "true" : "false";
|
|
692
|
+
let n = a.license.value;
|
|
693
|
+
return a.license.file != "" && (n = u.readFileSync(a.license.file).toString().trimEnd()), `
|
|
667
694
|
license:
|
|
668
695
|
value: '${a.license.value}'
|
|
669
696
|
file: '${a.license.file}'
|
|
@@ -680,7 +707,7 @@ logging:
|
|
|
680
707
|
monitoring${e}:
|
|
681
708
|
listen: '${a.monitoring.listen}'
|
|
682
709
|
|
|
683
|
-
debug${
|
|
710
|
+
debug${t}:
|
|
684
711
|
listen: '${a.debug.listen}'
|
|
685
712
|
|
|
686
713
|
core:
|
|
@@ -714,7 +741,7 @@ controllers:
|
|
|
714
741
|
|
|
715
742
|
library:
|
|
716
743
|
mode: passive
|
|
717
|
-
downloadable: ${
|
|
744
|
+
downloadable: ${r}
|
|
718
745
|
|
|
719
746
|
work:
|
|
720
747
|
mode: active
|
|
@@ -731,7 +758,7 @@ controllers:
|
|
|
731
758
|
workdirCacheOnFailure: 1h
|
|
732
759
|
secrets:
|
|
733
760
|
- map:
|
|
734
|
-
MI_LICENSE: ${JSON.stringify(
|
|
761
|
+
MI_LICENSE: ${JSON.stringify(n)}
|
|
735
762
|
|
|
736
763
|
packageLoader:
|
|
737
764
|
packagesRoot: '${a.localRoot}/packages'
|
|
@@ -739,12 +766,12 @@ controllers:
|
|
|
739
766
|
workflows: {}
|
|
740
767
|
`;
|
|
741
768
|
}
|
|
742
|
-
function
|
|
769
|
+
function ie(a, e) {
|
|
743
770
|
return a === void 0 ? e : a;
|
|
744
771
|
}
|
|
745
|
-
const
|
|
746
|
-
function
|
|
747
|
-
const e =
|
|
772
|
+
const rt = ["linux", "macos", "windows"];
|
|
773
|
+
function tt(a) {
|
|
774
|
+
const e = x.platform();
|
|
748
775
|
switch (e) {
|
|
749
776
|
case "darwin":
|
|
750
777
|
return "macos";
|
|
@@ -754,13 +781,13 @@ function Hr(a) {
|
|
|
754
781
|
return "windows";
|
|
755
782
|
default:
|
|
756
783
|
throw new Error(
|
|
757
|
-
`operating system '${e}' is not currently supported by Platforma ecosystem. The list of OSes supported: ` + JSON.stringify(
|
|
784
|
+
`operating system '${e}' is not currently supported by Platforma ecosystem. The list of OSes supported: ` + JSON.stringify(rt)
|
|
758
785
|
);
|
|
759
786
|
}
|
|
760
787
|
}
|
|
761
|
-
const
|
|
762
|
-
function
|
|
763
|
-
const e =
|
|
788
|
+
const at = ["amd64", "arm64"];
|
|
789
|
+
function vr(a) {
|
|
790
|
+
const e = x.arch();
|
|
764
791
|
switch (e) {
|
|
765
792
|
case "arm64":
|
|
766
793
|
return "arm64";
|
|
@@ -768,477 +795,509 @@ function sr(a) {
|
|
|
768
795
|
return "amd64";
|
|
769
796
|
default:
|
|
770
797
|
throw new Error(
|
|
771
|
-
`processor architecture '${e}' is not currently supported by Platforma ecosystem. The list of architectures supported: ` + JSON.stringify(
|
|
798
|
+
`processor architecture '${e}' is not currently supported by Platforma ecosystem. The list of architectures supported: ` + JSON.stringify(at)
|
|
772
799
|
);
|
|
773
800
|
}
|
|
774
801
|
}
|
|
775
|
-
function
|
|
776
|
-
const
|
|
777
|
-
if (
|
|
778
|
-
return a.info(`Platforma Backend archive download skipped: '${
|
|
779
|
-
|
|
780
|
-
URL: ${
|
|
781
|
-
Save to: ${
|
|
782
|
-
const c =
|
|
783
|
-
return new Promise((
|
|
784
|
-
c.on("response", (
|
|
785
|
-
if (!
|
|
802
|
+
function nt(a, e) {
|
|
803
|
+
const t = (e == null ? void 0 : e.version) ?? fe(), r = (e == null ? void 0 : e.showProgress) ?? process.stdout.isTTY, n = `pl-${t}-${vr()}.tgz`, s = (e == null ? void 0 : e.downloadURL) ?? `https://cdn.platforma.bio/software/pl/${tt()}/${n}`, i = (e == null ? void 0 : e.saveTo) ?? d.binaries(n);
|
|
804
|
+
if (u.existsSync(i))
|
|
805
|
+
return a.info(`Platforma Backend archive download skipped: '${i}' already exists`), Promise.resolve(i);
|
|
806
|
+
u.mkdirSync(m.dirname(i), { recursive: !0 }), a.info(`Downloading Platforma Backend archive:
|
|
807
|
+
URL: ${s}
|
|
808
|
+
Save to: ${i}`);
|
|
809
|
+
const c = Rr.get(s);
|
|
810
|
+
return new Promise((o, f) => {
|
|
811
|
+
c.on("response", (g) => {
|
|
812
|
+
if (!g.statusCode) {
|
|
786
813
|
const y = new Error("failed to download archive: no HTTP status code in response from server");
|
|
787
814
|
c.destroy(), f(y);
|
|
788
815
|
return;
|
|
789
816
|
}
|
|
790
|
-
if (
|
|
791
|
-
const y = new Error(`failed to download archive: ${
|
|
817
|
+
if (g.statusCode !== 200) {
|
|
818
|
+
const y = new Error(`failed to download archive: ${g.statusCode} ${g.statusMessage}`);
|
|
792
819
|
c.destroy(), f(y);
|
|
793
820
|
return;
|
|
794
821
|
}
|
|
795
|
-
const
|
|
796
|
-
let
|
|
797
|
-
const
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
const
|
|
801
|
-
|
|
802
|
-
}),
|
|
803
|
-
|
|
804
|
-
}),
|
|
805
|
-
|
|
822
|
+
const h = parseInt(g.headers["content-length"] || "0", 10);
|
|
823
|
+
let w = 0;
|
|
824
|
+
const v = u.createWriteStream(i);
|
|
825
|
+
g.pipe(v), g.on("data", (y) => {
|
|
826
|
+
w += y.length;
|
|
827
|
+
const P = w / h * 100;
|
|
828
|
+
r && process.stdout.write(` downloading: ${P.toFixed(2)}%\r`);
|
|
829
|
+
}), g.on("error", (y) => {
|
|
830
|
+
u.unlinkSync(i), a.error(`Failed to download Platforma Binary: ${y.message}`), c.destroy(), f(y);
|
|
831
|
+
}), v.on("finish", () => {
|
|
832
|
+
v.close(), a.info(" ... download done."), c.destroy(), o(i);
|
|
806
833
|
});
|
|
807
834
|
});
|
|
808
835
|
});
|
|
809
836
|
}
|
|
810
|
-
function
|
|
837
|
+
function st(a, e) {
|
|
811
838
|
a.debug("extracting archive...");
|
|
812
|
-
const
|
|
813
|
-
a.debug(` version: '${
|
|
814
|
-
const
|
|
815
|
-
a.debug(` archive path: '${
|
|
816
|
-
const
|
|
817
|
-
if (a.debug(` target dir: '${
|
|
818
|
-
return a.info(`Platforma Backend binaries unpack skipped: '${
|
|
819
|
-
if (!
|
|
820
|
-
const
|
|
821
|
-
throw a.error(
|
|
839
|
+
const t = (e == null ? void 0 : e.version) ?? fe();
|
|
840
|
+
a.debug(` version: '${t}'`);
|
|
841
|
+
const r = `${Pr({ version: t })}.tgz`, n = (e == null ? void 0 : e.archivePath) ?? d.binaries(r);
|
|
842
|
+
a.debug(` archive path: '${n}'`);
|
|
843
|
+
const s = (e == null ? void 0 : e.extractTo) ?? ct(n);
|
|
844
|
+
if (a.debug(` target dir: '${s}'`), u.existsSync(s))
|
|
845
|
+
return a.info(`Platforma Backend binaries unpack skipped: '${s}' exists`), s;
|
|
846
|
+
if (!u.existsSync(n)) {
|
|
847
|
+
const i = `Platforma Backend binary archive not found at '${n}'`;
|
|
848
|
+
throw a.error(i), new Error(i);
|
|
822
849
|
}
|
|
823
|
-
return
|
|
824
|
-
Archive: ${
|
|
825
|
-
Target dir: ${
|
|
826
|
-
file:
|
|
827
|
-
cwd:
|
|
850
|
+
return u.existsSync(s) || (a.debug(` creating target dir '${s}'`), u.mkdirSync(s, { recursive: !0 })), a.info(`Unpacking Platforma Backend archive:
|
|
851
|
+
Archive: ${n}
|
|
852
|
+
Target dir: ${s}`), Tr.x({
|
|
853
|
+
file: n,
|
|
854
|
+
cwd: s,
|
|
828
855
|
gzip: !0,
|
|
829
856
|
sync: !0
|
|
830
|
-
}), a.info(" ... unpack done."),
|
|
857
|
+
}), a.info(" ... unpack done."), s;
|
|
831
858
|
}
|
|
832
|
-
function
|
|
833
|
-
return
|
|
859
|
+
function Ge(a, e) {
|
|
860
|
+
return nt(a, e).then((t) => st(a, { archivePath: t }));
|
|
834
861
|
}
|
|
835
|
-
function
|
|
836
|
-
return `pl-${(a == null ? void 0 : a.version) ??
|
|
862
|
+
function Pr(a) {
|
|
863
|
+
return `pl-${(a == null ? void 0 : a.version) ?? fe()}-${vr()}`;
|
|
837
864
|
}
|
|
838
|
-
function
|
|
839
|
-
return d.binaries(
|
|
865
|
+
function it(a, ...e) {
|
|
866
|
+
return d.binaries(Pr({ version: a }), ...e);
|
|
840
867
|
}
|
|
841
|
-
function
|
|
868
|
+
function ct(a) {
|
|
842
869
|
const e = a.lastIndexOf(".");
|
|
843
870
|
return e === -1 ? a : a.slice(0, e);
|
|
844
871
|
}
|
|
845
|
-
class
|
|
872
|
+
class O {
|
|
846
873
|
constructor(e) {
|
|
847
874
|
this.logger = e;
|
|
848
875
|
}
|
|
849
876
|
startLast() {
|
|
850
|
-
const e =
|
|
851
|
-
|
|
877
|
+
const e = d.currentInstance;
|
|
878
|
+
if (!e)
|
|
879
|
+
throw this.logger.error("failed to bring back Platforma Backend in the last started configuration: no last configuration found"), new Error("no previous run info found");
|
|
880
|
+
return this.startInstance(e);
|
|
881
|
+
}
|
|
882
|
+
startInstance(e) {
|
|
883
|
+
if (e.runInfo) {
|
|
884
|
+
const r = this.renderRunInfo(e.runInfo);
|
|
885
|
+
this.logger.info(`Starting platforma backend instance '${e.name}':
|
|
886
|
+
${r}`);
|
|
887
|
+
}
|
|
888
|
+
const t = we(
|
|
889
|
+
this.logger,
|
|
890
|
+
e.upCommands
|
|
891
|
+
);
|
|
892
|
+
return ge(t.executed), t.spawned.length > 0 && e.type === "process" && (e.pid = t.spawned[t.spawned.length - 1].pid, d.setInstanceInfo(e.name, e), this.logger.info(`instance '${e.name}' started`)), d.currentInstanceName = e.name, t.spawned;
|
|
893
|
+
}
|
|
894
|
+
stopInstance(e) {
|
|
895
|
+
if (!d.isInstanceActive(e)) {
|
|
896
|
+
this.logger.info(`instance '${e.name}' is not running`);
|
|
897
|
+
return;
|
|
898
|
+
}
|
|
899
|
+
this.logger.info(`stopping platforma backend instance '${e.name}'...`);
|
|
900
|
+
const t = we(this.logger, e.downCommands);
|
|
901
|
+
switch (ge(t.executed), e.type) {
|
|
902
|
+
case "docker":
|
|
903
|
+
return;
|
|
904
|
+
case "process": {
|
|
905
|
+
e.pid && d.isValidPID(e.pid) && process.kill(e.pid);
|
|
906
|
+
return;
|
|
907
|
+
}
|
|
908
|
+
default:
|
|
909
|
+
_();
|
|
910
|
+
}
|
|
852
911
|
}
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
912
|
+
switchInstance(e) {
|
|
913
|
+
for (const t of d.instanceList)
|
|
914
|
+
if (t !== e.name) {
|
|
915
|
+
const r = d.getInstanceInfo(t);
|
|
916
|
+
d.isInstanceActive(r) && this.stopInstance(r);
|
|
917
|
+
}
|
|
918
|
+
return this.startInstance(e);
|
|
919
|
+
}
|
|
920
|
+
createLocal(e, t) {
|
|
921
|
+
var o, f, g, h, w, v, y, P, S, k;
|
|
922
|
+
const r = (t == null ? void 0 : t.binaryPath) ?? it(t == null ? void 0 : t.version, "binaries", "platforma");
|
|
923
|
+
let n = t == null ? void 0 : t.configPath;
|
|
924
|
+
const s = (t == null ? void 0 : t.workdir) ?? (n ? process.cwd() : d.instanceDir(e));
|
|
925
|
+
t != null && t.primaryURL && (t.configOptions = {
|
|
926
|
+
...t.configOptions,
|
|
860
927
|
storages: {
|
|
861
|
-
...(
|
|
862
|
-
primary:
|
|
928
|
+
...(o = t.configOptions) == null ? void 0 : o.storages,
|
|
929
|
+
primary: Q(t.primaryURL, s, (g = (f = t.configOptions) == null ? void 0 : f.storages) == null ? void 0 : g.primary)
|
|
863
930
|
}
|
|
864
|
-
}),
|
|
865
|
-
...
|
|
931
|
+
}), t != null && t.libraryURL && (t.configOptions = {
|
|
932
|
+
...t.configOptions,
|
|
866
933
|
storages: {
|
|
867
|
-
...(
|
|
868
|
-
library:
|
|
934
|
+
...(h = t.configOptions) == null ? void 0 : h.storages,
|
|
935
|
+
library: Q(t.libraryURL, s, (v = (w = t.configOptions) == null ? void 0 : w.storages) == null ? void 0 : v.library)
|
|
869
936
|
}
|
|
870
937
|
});
|
|
871
|
-
const i =
|
|
872
|
-
this.logger.debug(" checking license..."), this.checkLicense((
|
|
873
|
-
const
|
|
938
|
+
const i = Zr(this.getLastJwt(), t == null ? void 0 : t.configOptions);
|
|
939
|
+
this.logger.debug(" checking license..."), this.checkLicense((P = (y = t == null ? void 0 : t.configOptions) == null ? void 0 : y.license) == null ? void 0 : P.value, (k = (S = t == null ? void 0 : t.configOptions) == null ? void 0 : S.license) == null ? void 0 : k.file);
|
|
940
|
+
const c = [
|
|
874
941
|
`${i.localRoot}/packages`,
|
|
875
942
|
`${i.localRoot}/packages-local`,
|
|
876
943
|
`${i.localRoot}/blocks-local`
|
|
877
944
|
];
|
|
878
|
-
i.storages.primary.type === "FS" &&
|
|
879
|
-
for (const
|
|
880
|
-
|
|
881
|
-
for (const
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
945
|
+
i.storages.primary.type === "FS" && c.push(i.storages.primary.rootPath), i.storages.library.type === "FS" && (c.push(i.storages.library.rootPath), i.hacks.libraryDownloadable = !1), i.storages.work.type === "FS" && c.push(i.storages.work.rootPath), this.logger.debug(" creating pl state directories...");
|
|
946
|
+
for (const p of c)
|
|
947
|
+
u.existsSync(p) || (this.logger.debug(` '${p}'`), u.mkdirSync(p, { recursive: !0 }));
|
|
948
|
+
for (const p of i.core.auth.drivers)
|
|
949
|
+
p.driver === "htpasswd" && (u.existsSync(p.path) || (this.logger.debug(` installing default 'users.htpasswd' to ${p.path}...`), u.copyFileSync(K("users.htpasswd"), p.path)));
|
|
950
|
+
return n || (n = m.join(i.localRoot, "config.yaml"), this.logger.debug(` rendering configuration '${n}'...`), u.writeFileSync(n, et(i))), d.setInstanceInfo(e, {
|
|
951
|
+
type: "process",
|
|
952
|
+
upCommands: [
|
|
953
|
+
{
|
|
954
|
+
async: !0,
|
|
955
|
+
cmd: r,
|
|
956
|
+
args: ["-config", n],
|
|
957
|
+
workdir: s,
|
|
958
|
+
runOpts: { stdio: "inherit" }
|
|
959
|
+
}
|
|
960
|
+
],
|
|
961
|
+
downCommands: [],
|
|
962
|
+
cleanupCommands: [],
|
|
963
|
+
runInfo: {
|
|
964
|
+
configPath: n,
|
|
965
|
+
dbPath: i.core.db.path,
|
|
966
|
+
apiAddr: i.grpc.listen,
|
|
967
|
+
logPath: i.log.path,
|
|
968
|
+
primary: i.storages.primary,
|
|
969
|
+
work: i.storages.work,
|
|
970
|
+
library: i.storages.library
|
|
904
971
|
}
|
|
905
|
-
);
|
|
972
|
+
}), d.getInstanceInfo(e);
|
|
906
973
|
}
|
|
907
|
-
|
|
908
|
-
var
|
|
909
|
-
this.logger.debug("
|
|
910
|
-
const r = (
|
|
911
|
-
|
|
974
|
+
createLocalS3(e, t) {
|
|
975
|
+
var c;
|
|
976
|
+
this.logger.debug("creating platforma instance in 'local s3' mode...");
|
|
977
|
+
const r = (t == null ? void 0 : t.minioPort) ?? 9e3, n = this.createLocal(e, {
|
|
978
|
+
...t,
|
|
979
|
+
primaryURL: (t == null ? void 0 : t.primaryURL) ?? `s3e://testuser:testpassword@localhost:${r}/main-bucket/?region=no-region`,
|
|
980
|
+
libraryURL: (t == null ? void 0 : t.libraryURL) ?? `s3e://testuser:testpassword@localhost:${r}/library-bucket/?region=no-region`
|
|
981
|
+
}), s = (c = t == null ? void 0 : t.configOptions) == null ? void 0 : c.localRoot, i = this.createMinio(e, {
|
|
912
982
|
minioPort: r,
|
|
913
|
-
minioConsolePort:
|
|
914
|
-
storage:
|
|
915
|
-
}), this.startLocal({
|
|
916
|
-
...e,
|
|
917
|
-
primaryURL: (e == null ? void 0 : e.primaryURL) ?? `s3e://testuser:testpassword@localhost:${r}/main-bucket/?region=no-region`,
|
|
918
|
-
libraryURL: (e == null ? void 0 : e.libraryURL) ?? `s3e://testuser:testpassword@localhost:${r}/library-bucket/?region=no-region`
|
|
983
|
+
minioConsolePort: t == null ? void 0 : t.minioConsolePort,
|
|
984
|
+
storage: s ? m.join(s, "minio") : void 0
|
|
919
985
|
});
|
|
986
|
+
return n.upCommands = [
|
|
987
|
+
i.start,
|
|
988
|
+
...n.upCommands
|
|
989
|
+
], n.downCommands = [
|
|
990
|
+
i.stop,
|
|
991
|
+
...n.downCommands
|
|
992
|
+
], n.cleanupCommands = [
|
|
993
|
+
i.cleanup,
|
|
994
|
+
...n.cleanupCommands
|
|
995
|
+
], d.setInstanceInfo(e, n), n;
|
|
920
996
|
}
|
|
921
|
-
|
|
922
|
-
this.logger.debug("
|
|
923
|
-
const r =
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
997
|
+
createMinio(e, t) {
|
|
998
|
+
this.logger.debug(" creating docker compose for minio service...");
|
|
999
|
+
const r = K("compose-backend.yaml"), n = d.instanceDir(e, "compose-minio.yaml");
|
|
1000
|
+
ve(
|
|
1001
|
+
r,
|
|
1002
|
+
n,
|
|
1003
|
+
`pl-${e}-minio`,
|
|
1004
|
+
/* @__PURE__ */ new Map([
|
|
1005
|
+
["minio", {}]
|
|
1006
|
+
]),
|
|
1007
|
+
{ dropVolumes: !0 }
|
|
1008
|
+
);
|
|
1009
|
+
const s = t != null && t.version ? `:${t.version}` : "";
|
|
1010
|
+
this.logger.debug(` minio version: ${s}`);
|
|
1011
|
+
const i = (t == null ? void 0 : t.image) ?? `quay.io/minio/minio${s}`;
|
|
1012
|
+
this.logger.debug(` minio image: ${i}`);
|
|
1013
|
+
const c = (t == null ? void 0 : t.storage) ?? d.instanceDir(e, "minio");
|
|
1014
|
+
pe(c, { mode: "0775" });
|
|
1015
|
+
const o = (t == null ? void 0 : t.minioPort) ?? 9e3, f = (t == null ? void 0 : t.minioConsolePort) ?? 9001, g = {
|
|
1016
|
+
MINIO_IMAGE: i,
|
|
1017
|
+
MINIO_STORAGE: m.resolve(c),
|
|
1018
|
+
MINIO_PORT: o.toString(),
|
|
1019
|
+
MINIO_CONSOLE_PORT: f.toString()
|
|
942
1020
|
};
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
1021
|
+
return {
|
|
1022
|
+
start: {
|
|
1023
|
+
cmd: "docker",
|
|
1024
|
+
args: ["compose", `--file=${n}`, "up", "--detach", "--remove-orphans", "--pull=missing"],
|
|
1025
|
+
envs: g,
|
|
1026
|
+
workdir: d.instanceDir(e),
|
|
1027
|
+
runOpts: { stdio: "inherit" }
|
|
1028
|
+
},
|
|
1029
|
+
stop: {
|
|
1030
|
+
cmd: "docker",
|
|
1031
|
+
args: ["compose", `--file=${n}`, "down"],
|
|
1032
|
+
envs: g,
|
|
1033
|
+
workdir: d.instanceDir(e),
|
|
1034
|
+
runOpts: { stdio: "inherit" }
|
|
1035
|
+
},
|
|
1036
|
+
cleanup: {
|
|
1037
|
+
cmd: "docker",
|
|
1038
|
+
args: ["compose", `--file=${n}`, "down", "--volumes", "--remove-orphans"],
|
|
1039
|
+
envs: g,
|
|
1040
|
+
workdir: d.instanceDir(e),
|
|
1041
|
+
runOpts: { stdio: "inherit" }
|
|
953
1042
|
}
|
|
954
|
-
|
|
955
|
-
z(u, "failed to start MinIO service in docker");
|
|
1043
|
+
};
|
|
956
1044
|
}
|
|
957
1045
|
buildPlatforma(e) {
|
|
958
|
-
const
|
|
959
|
-
this.logger.info("Building Platforma Backend binary from sources"), this.logger.info(` sources path: ${e.repoRoot}`), this.logger.info(` binary path: ${
|
|
960
|
-
const
|
|
961
|
-
cwd:
|
|
1046
|
+
const t = m.resolve(e.repoRoot, "cmd", "platforma"), r = e.binPath ?? m.join(x.tmpdir(), "platforma-local-build");
|
|
1047
|
+
this.logger.info("Building Platforma Backend binary from sources"), this.logger.info(` sources path: ${e.repoRoot}`), this.logger.info(` binary path: ${r}`);
|
|
1048
|
+
const n = fr("go", ["build", "-o", r, "."], {
|
|
1049
|
+
cwd: t,
|
|
962
1050
|
stdio: "inherit"
|
|
963
1051
|
});
|
|
964
|
-
return
|
|
1052
|
+
return ge([n], "failed to build platforma binary from sources using 'go build' command"), r;
|
|
965
1053
|
}
|
|
966
|
-
|
|
967
|
-
|
|
1054
|
+
createDockerS3(e, t, r) {
|
|
1055
|
+
this.logger.debug("creating platforma instance in 'docker s3' mode...");
|
|
1056
|
+
const n = K("compose-backend.yaml"), s = (r == null ? void 0 : r.image) ?? dr(r == null ? void 0 : r.version);
|
|
968
1057
|
this.checkLicense(r == null ? void 0 : r.license, r == null ? void 0 : r.licenseFile);
|
|
969
|
-
const i = (...
|
|
970
|
-
const
|
|
971
|
-
return
|
|
972
|
-
},
|
|
973
|
-
|
|
974
|
-
const
|
|
975
|
-
if (
|
|
1058
|
+
const i = (...p) => m.join(t, ...p), c = (p) => {
|
|
1059
|
+
const $ = i(p);
|
|
1060
|
+
return pe($, { mode: "0775" }), $;
|
|
1061
|
+
}, o = i("logs", "platforma.log");
|
|
1062
|
+
u.existsSync(o) || (u.mkdirSync(m.dirname(o), { recursive: !0 }), u.writeFileSync(o, ""));
|
|
1063
|
+
const f = (r == null ? void 0 : r.presignHost) ?? "localhost", g = Q("s3e://testuser:testpassword@minio:9000/main-bucket");
|
|
1064
|
+
if (g.type !== "S3")
|
|
976
1065
|
throw new Error("primary storage must have 'S3' type in 'docker s3' configuration");
|
|
977
|
-
|
|
978
|
-
const
|
|
979
|
-
if (
|
|
980
|
-
throw new Error(`${
|
|
981
|
-
|
|
982
|
-
const
|
|
983
|
-
|
|
984
|
-
const
|
|
985
|
-
|
|
986
|
-
const
|
|
987
|
-
for (const
|
|
988
|
-
|
|
989
|
-
hostPath:
|
|
990
|
-
containerPath:
|
|
1066
|
+
g.presignEndpoint = `http://${f}:9000`;
|
|
1067
|
+
const h = Q("s3e://testuser:testpassword@minio:9000/library-bucket");
|
|
1068
|
+
if (h.type !== "S3")
|
|
1069
|
+
throw new Error(`${h.type} storage type is not supported for library storage`);
|
|
1070
|
+
h.presignEndpoint = `http://${f}:9000`;
|
|
1071
|
+
const w = c("db"), v = c("work"), y = i("users.htpasswd");
|
|
1072
|
+
u.existsSync(y) || u.copyFileSync(K("users.htpasswd"), y);
|
|
1073
|
+
const P = i("compose.yaml");
|
|
1074
|
+
u.existsSync(P) && this.logger.info(`replacing docker compose file ${P}`);
|
|
1075
|
+
const S = [];
|
|
1076
|
+
for (const p of (r == null ? void 0 : r.customMounts) ?? [])
|
|
1077
|
+
S.push({
|
|
1078
|
+
hostPath: p.hostPath,
|
|
1079
|
+
containerPath: p.containerPath ?? p.hostPath
|
|
991
1080
|
});
|
|
992
|
-
|
|
1081
|
+
ve(n, P, `pl-${e}`, /* @__PURE__ */ new Map([
|
|
993
1082
|
["minio", {}],
|
|
994
1083
|
["backend", {
|
|
995
1084
|
platform: r == null ? void 0 : r.platformOverride,
|
|
996
|
-
mounts:
|
|
1085
|
+
mounts: S
|
|
997
1086
|
}]
|
|
998
1087
|
]));
|
|
999
|
-
const
|
|
1088
|
+
const k = {
|
|
1000
1089
|
MINIO_IMAGE: "quay.io/minio/minio",
|
|
1001
|
-
MINIO_STORAGE:
|
|
1090
|
+
MINIO_STORAGE: c("minio"),
|
|
1002
1091
|
PL_IMAGE: s,
|
|
1003
|
-
PL_AUTH_HTPASSWD_PATH:
|
|
1092
|
+
PL_AUTH_HTPASSWD_PATH: y,
|
|
1004
1093
|
PL_LICENSE: r == null ? void 0 : r.license,
|
|
1005
1094
|
PL_LICENSE_FILE: r == null ? void 0 : r.licenseFile,
|
|
1006
1095
|
PL_LOG_LEVEL: (r == null ? void 0 : r.logLevel) ?? "info",
|
|
1007
|
-
PL_LOG_DIR:
|
|
1096
|
+
PL_LOG_DIR: m.dirname(o),
|
|
1008
1097
|
PL_LOG_ROTATION_ENABLED: "true",
|
|
1009
|
-
PL_DATA_DB_ROOT:
|
|
1010
|
-
PL_DATA_PRIMARY_ROOT:
|
|
1011
|
-
PL_DATA_LIBRARY_ROOT:
|
|
1098
|
+
PL_DATA_DB_ROOT: w,
|
|
1099
|
+
PL_DATA_PRIMARY_ROOT: c("primary"),
|
|
1100
|
+
PL_DATA_LIBRARY_ROOT: c("library"),
|
|
1012
1101
|
PL_DATA_WORKDIR_ROOT: v,
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
...this.configureDockerStorage("
|
|
1102
|
+
// Mount packages storage as volume, because APFS is case-insensitive on Mac OS X and this breaks some pl software installation.
|
|
1103
|
+
// PL_DATA_PACKAGE_ROOT: storageDir('packages'),
|
|
1104
|
+
...this.configureDockerStorage("primary", g),
|
|
1105
|
+
...this.configureDockerStorage("library", h)
|
|
1016
1106
|
};
|
|
1017
|
-
if (r != null && r.grpcAddr && (
|
|
1018
|
-
for (const
|
|
1019
|
-
|
|
1020
|
-
|
|
1107
|
+
if (r != null && r.grpcAddr && (k.PL_GRPC_ADDR = r.grpcAddr), r != null && r.grpcPort && (k.PL_GRPC_PORT = r.grpcPort.toString()), r != null && r.monitoringAddr && (k.PL_MONITORING_ADDR = r.monitoringAddr), r != null && r.monitoringPort && (k.PL_MONITORING_PORT = r.monitoringPort.toString()), r != null && r.debugAddr && (k.PL_DEBUG_ADDR = r.debugAddr), r != null && r.debugPort && (k.PL_DEBUG_PORT = r.debugPort.toString()), r != null && r.auth && (r.auth.enabled && (k.PL_AUTH_ENABLED = "true"), r.auth.drivers)) {
|
|
1108
|
+
for (const p of r.auth.drivers)
|
|
1109
|
+
p.driver === "htpasswd" && (k.PL_AUTH_HTPASSWD_PATH = m.resolve(p.path), p.path = "/etc/platforma/users.htpasswd");
|
|
1110
|
+
k.PL_AUTH_DRIVERS = JSON.stringify(r.auth.drivers);
|
|
1021
1111
|
}
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
[
|
|
1025
|
-
"
|
|
1026
|
-
`--file=${
|
|
1027
|
-
|
|
1028
|
-
"
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
"
|
|
1032
|
-
"
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1112
|
+
return d.setInstanceInfo(e, {
|
|
1113
|
+
type: "docker",
|
|
1114
|
+
upCommands: [{
|
|
1115
|
+
cmd: "docker",
|
|
1116
|
+
args: ["compose", `--file=${P}`, "up", "--detach", "--remove-orphans", "--pull=missing"],
|
|
1117
|
+
envs: k,
|
|
1118
|
+
runOpts: { stdio: "inherit" }
|
|
1119
|
+
}],
|
|
1120
|
+
downCommands: [{
|
|
1121
|
+
cmd: "docker",
|
|
1122
|
+
args: ["compose", `--file=${P}`, "down"],
|
|
1123
|
+
envs: k,
|
|
1124
|
+
runOpts: { stdio: "inherit" }
|
|
1125
|
+
}],
|
|
1126
|
+
cleanupCommands: [{
|
|
1127
|
+
cmd: "docker",
|
|
1128
|
+
args: ["compose", `--file=${P}`, "down", "--volumes", "--remove-orphans"],
|
|
1129
|
+
envs: k,
|
|
1130
|
+
runOpts: { stdio: "inherit" }
|
|
1131
|
+
}],
|
|
1132
|
+
runInfo: {
|
|
1133
|
+
apiPort: r == null ? void 0 : r.grpcPort,
|
|
1134
|
+
apiAddr: r == null ? void 0 : r.grpcAddr,
|
|
1135
|
+
logPath: o,
|
|
1136
|
+
primary: g,
|
|
1137
|
+
work: { type: "FS", rootPath: v },
|
|
1138
|
+
library: h,
|
|
1139
|
+
dbPath: w
|
|
1041
1140
|
}
|
|
1042
|
-
);
|
|
1043
|
-
z(k, "failed to start Platforma Backend in Docker"), d.isActive = !0;
|
|
1044
|
-
const R = this.renderRunInfo({
|
|
1045
|
-
apiPort: r == null ? void 0 : r.grpcPort,
|
|
1046
|
-
apiAddr: r == null ? void 0 : r.grpcAddr,
|
|
1047
|
-
logPath: c,
|
|
1048
|
-
primary: f,
|
|
1049
|
-
work: { type: "FS", rootPath: v },
|
|
1050
|
-
library: u,
|
|
1051
|
-
dbPath: b
|
|
1052
|
-
});
|
|
1053
|
-
this.logger.info(`Started platforma:
|
|
1054
|
-
${R}`);
|
|
1141
|
+
}), d.getInstanceInfo(e);
|
|
1055
1142
|
}
|
|
1056
|
-
|
|
1057
|
-
|
|
1143
|
+
createDocker(e, t, r) {
|
|
1144
|
+
this.logger.debug("creating platforma instance in 'docker' mode...");
|
|
1145
|
+
const n = K("compose-backend.yaml"), s = (r == null ? void 0 : r.image) ?? dr(r == null ? void 0 : r.version);
|
|
1058
1146
|
this.checkLicense(r == null ? void 0 : r.license, r == null ? void 0 : r.licenseFile);
|
|
1059
|
-
const i = (
|
|
1060
|
-
const
|
|
1061
|
-
return
|
|
1062
|
-
},
|
|
1063
|
-
|
|
1064
|
-
const
|
|
1065
|
-
|
|
1066
|
-
const
|
|
1067
|
-
|
|
1068
|
-
const
|
|
1069
|
-
for (const
|
|
1070
|
-
|
|
1071
|
-
hostPath:
|
|
1072
|
-
containerPath:
|
|
1147
|
+
const i = (...$) => m.join(t, ...$), c = ($) => {
|
|
1148
|
+
const se = i($);
|
|
1149
|
+
return pe(se, { mode: "0775" }), se;
|
|
1150
|
+
}, o = i("logs", "platforma.log");
|
|
1151
|
+
u.existsSync(o) || (u.mkdirSync(m.dirname(o), { recursive: !0 }), u.writeFileSync(o, ""));
|
|
1152
|
+
const f = c("db"), g = c("primary"), h = c("library"), w = c("work"), v = i("users.htpasswd");
|
|
1153
|
+
u.existsSync(v) || u.copyFileSync(K("users.htpasswd"), v);
|
|
1154
|
+
const y = i("compose.yaml");
|
|
1155
|
+
u.existsSync(y) && this.logger.info(`replacing docker compose file ${y}`);
|
|
1156
|
+
const P = [];
|
|
1157
|
+
for (const $ of (r == null ? void 0 : r.customMounts) ?? [])
|
|
1158
|
+
P.push({
|
|
1159
|
+
hostPath: $.hostPath,
|
|
1160
|
+
containerPath: $.containerPath ?? $.hostPath
|
|
1073
1161
|
});
|
|
1074
|
-
this.logger.debug(`Rendering docker compose file '${
|
|
1162
|
+
this.logger.debug(`Rendering docker compose file '${y}' using '${n}' as base template`), ve(n, y, `pl-${e}`, /* @__PURE__ */ new Map([
|
|
1075
1163
|
["backend", {
|
|
1076
1164
|
platform: r == null ? void 0 : r.platformOverride,
|
|
1077
|
-
mounts:
|
|
1165
|
+
mounts: P
|
|
1078
1166
|
}]
|
|
1079
1167
|
]));
|
|
1080
|
-
const
|
|
1168
|
+
const S = Q((r == null ? void 0 : r.primaryStorageURL) ?? `file:${g}`, "."), k = Q((r == null ? void 0 : r.libraryStorageURL) ?? `file:${h}`, "."), p = {
|
|
1081
1169
|
MINIO_IMAGE: "quay.io/minio/minio",
|
|
1082
|
-
MINIO_STORAGE:
|
|
1170
|
+
MINIO_STORAGE: c("minio"),
|
|
1083
1171
|
PL_IMAGE: s,
|
|
1084
1172
|
PL_AUTH_HTPASSWD_PATH: v,
|
|
1085
1173
|
PL_LICENSE: r == null ? void 0 : r.license,
|
|
1086
1174
|
PL_LICENSE_FILE: r == null ? void 0 : r.licenseFile,
|
|
1087
1175
|
PL_LOG_LEVEL: "info",
|
|
1088
|
-
PL_LOG_DIR:
|
|
1176
|
+
PL_LOG_DIR: m.dirname(o),
|
|
1089
1177
|
PL_LOG_ROTATION_ENABLED: "true",
|
|
1090
|
-
PL_DATA_DB_ROOT:
|
|
1091
|
-
PL_DATA_PRIMARY_ROOT:
|
|
1092
|
-
PL_DATA_LIBRARY_ROOT:
|
|
1093
|
-
PL_DATA_WORKDIR_ROOT:
|
|
1094
|
-
PL_DATA_PACKAGE_ROOT:
|
|
1095
|
-
...this.configureDockerStorage("primary",
|
|
1096
|
-
...this.configureDockerStorage("library",
|
|
1178
|
+
PL_DATA_DB_ROOT: f,
|
|
1179
|
+
PL_DATA_PRIMARY_ROOT: g,
|
|
1180
|
+
PL_DATA_LIBRARY_ROOT: h,
|
|
1181
|
+
PL_DATA_WORKDIR_ROOT: w,
|
|
1182
|
+
PL_DATA_PACKAGE_ROOT: c("packages"),
|
|
1183
|
+
...this.configureDockerStorage("primary", S),
|
|
1184
|
+
...this.configureDockerStorage("library", k)
|
|
1097
1185
|
};
|
|
1098
|
-
if (r != null && r.grpcAddr && (
|
|
1099
|
-
for (const
|
|
1100
|
-
|
|
1101
|
-
|
|
1186
|
+
if (r != null && r.grpcAddr && (p.PL_GRPC_ADDR = r.grpcAddr), r != null && r.grpcPort && (p.PL_GRPC_PORT = r.grpcPort.toString()), r != null && r.monitoringAddr && (p.PL_MONITORING_ADDR = r.monitoringAddr), r != null && r.monitoringPort && (p.PL_MONITORING_PORT = r.monitoringPort.toString()), r != null && r.debugAddr && (p.PL_DEBUG_ADDR = r.debugAddr), r != null && r.debugPort && (p.PL_DEBUG_PORT = r.debugPort.toString()), r != null && r.auth && (r.auth.enabled && (p.PL_AUTH_ENABLED = "true"), r.auth.drivers)) {
|
|
1187
|
+
for (const $ of r.auth.drivers)
|
|
1188
|
+
$.driver === "htpasswd" && (p.PL_AUTH_HTPASSWD_PATH = m.resolve($.path), $.path = "/etc/platforma/users.htpasswd");
|
|
1189
|
+
p.PL_AUTH_DRIVERS = JSON.stringify(r.auth.drivers);
|
|
1102
1190
|
}
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
[
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1191
|
+
return d.setInstanceInfo(e, {
|
|
1192
|
+
type: "docker",
|
|
1193
|
+
upCommands: [{
|
|
1194
|
+
cmd: "docker",
|
|
1195
|
+
args: ["compose", `--file=${y}`, "up", "--detach", "--remove-orphans", "--pull=missing"],
|
|
1196
|
+
envs: p,
|
|
1197
|
+
runOpts: { stdio: "inherit" }
|
|
1198
|
+
}],
|
|
1199
|
+
downCommands: [{
|
|
1200
|
+
cmd: "docker",
|
|
1201
|
+
args: ["compose", `--file=${y}`, "down"],
|
|
1202
|
+
envs: p,
|
|
1203
|
+
runOpts: { stdio: "inherit" }
|
|
1204
|
+
}],
|
|
1205
|
+
cleanupCommands: [{
|
|
1206
|
+
cmd: "docker",
|
|
1207
|
+
args: ["compose", `--file=${y}`, "down", "--volumes", "--remove-orphans"],
|
|
1208
|
+
envs: p,
|
|
1209
|
+
runOpts: { stdio: "inherit" }
|
|
1210
|
+
}],
|
|
1211
|
+
runInfo: {
|
|
1212
|
+
apiPort: r == null ? void 0 : r.grpcPort,
|
|
1213
|
+
apiAddr: r == null ? void 0 : r.grpcAddr,
|
|
1214
|
+
logPath: o,
|
|
1215
|
+
primary: S,
|
|
1216
|
+
work: { type: "FS", rootPath: w },
|
|
1217
|
+
library: k,
|
|
1218
|
+
dbPath: f
|
|
1116
1219
|
}
|
|
1117
|
-
);
|
|
1118
|
-
z(R, "failed to start Platforma Backend in Docker"), d.isActive = !0;
|
|
1119
|
-
const A = this.renderRunInfo({
|
|
1120
|
-
apiPort: r == null ? void 0 : r.grpcPort,
|
|
1121
|
-
apiAddr: r == null ? void 0 : r.grpcAddr,
|
|
1122
|
-
logPath: c,
|
|
1123
|
-
primary: L,
|
|
1124
|
-
work: { type: "FS", rootPath: b },
|
|
1125
|
-
library: w,
|
|
1126
|
-
dbPath: l
|
|
1127
|
-
});
|
|
1128
|
-
this.logger.info(`Started platforma:
|
|
1129
|
-
${A}`);
|
|
1220
|
+
}), d.getInstanceInfo(e);
|
|
1130
1221
|
}
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
return;
|
|
1148
|
-
}
|
|
1149
|
-
case "process": {
|
|
1150
|
-
d.isValidPID && process.kill(e.process.pid), d.isActive = !1;
|
|
1151
|
-
return;
|
|
1222
|
+
cleanupInstance(e) {
|
|
1223
|
+
const t = [], r = /* @__PURE__ */ new Map();
|
|
1224
|
+
let n = "";
|
|
1225
|
+
if (e) {
|
|
1226
|
+
const s = d.getInstanceInfo(e);
|
|
1227
|
+
switch (r.set(e, s), s.type) {
|
|
1228
|
+
case "docker": {
|
|
1229
|
+
t.push(`docker service 'pl-${e}', including all its volumes and data in '${d.instanceDir(e)}' will be destroyed`);
|
|
1230
|
+
break;
|
|
1231
|
+
}
|
|
1232
|
+
case "process": {
|
|
1233
|
+
t.push(`directory '${d.instanceDir(e)}' would be deleted`), s.downCommands && t.push("associated docker service, including all volumes and data will be destroyed");
|
|
1234
|
+
break;
|
|
1235
|
+
}
|
|
1236
|
+
default:
|
|
1237
|
+
_();
|
|
1152
1238
|
}
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
var c, l, f, u, b, v, m, y, L, w, k, R, A, S;
|
|
1159
|
-
const e = [
|
|
1160
|
-
"last command run cache ('pl-service start' shorthand will stop working until next full start command call)",
|
|
1161
|
-
"'platforma' docker compose service containers and volumes"
|
|
1162
|
-
], r = d.data(), t = [r];
|
|
1163
|
-
if ((l = (c = d.lastRun) == null ? void 0 : c.docker) != null && l.primaryPath) {
|
|
1164
|
-
const p = (u = (f = d.lastRun) == null ? void 0 : f.docker) == null ? void 0 : u.primaryPath;
|
|
1165
|
-
p.startsWith(r) || t.push(p);
|
|
1166
|
-
}
|
|
1167
|
-
if ((v = (b = d.lastRun) == null ? void 0 : b.docker) != null && v.workPath) {
|
|
1168
|
-
const p = (y = (m = d.lastRun) == null ? void 0 : m.docker) == null ? void 0 : y.workPath;
|
|
1169
|
-
p.startsWith(r) || t.push(p);
|
|
1170
|
-
}
|
|
1171
|
-
if ((w = (L = d.lastRun) == null ? void 0 : L.process) != null && w.storagePath) {
|
|
1172
|
-
const p = (R = (k = d.lastRun) == null ? void 0 : k.process) == null ? void 0 : R.storagePath;
|
|
1173
|
-
p.startsWith(r) || t.push(p);
|
|
1174
|
-
}
|
|
1175
|
-
const s = t.length > 0 ? ` - storages (you'll loose all projects and calculation results stored in service instances):
|
|
1176
|
-
- ${t.join(`
|
|
1177
|
-
- `)}` : "", i = `
|
|
1178
|
-
You are going to reset the state of platforma service
|
|
1179
|
-
Things to be removed:
|
|
1180
|
-
- ${e.join(`
|
|
1239
|
+
e === d.currentInstanceName && t.push(
|
|
1240
|
+
"last command run cache ('pl-service start' shorthand will stop working until next full start command call)"
|
|
1241
|
+
), n = `
|
|
1242
|
+
You are going to reset the state of platforma service '${e}':
|
|
1243
|
+
- ${t.join(`
|
|
1181
1244
|
- `)}
|
|
1182
|
-
${s}
|
|
1183
1245
|
`;
|
|
1184
|
-
|
|
1246
|
+
} else {
|
|
1247
|
+
for (const s of d.instanceList)
|
|
1248
|
+
r.set(s, d.getInstanceInfo(s));
|
|
1249
|
+
t.push(
|
|
1250
|
+
"last command run cache ('pl-service start' shorthand will stop working until next full start command call)",
|
|
1251
|
+
`all service configurations stored in: ${d.instanceDir()} (including all associated docker containers and volumes)`
|
|
1252
|
+
), n = `
|
|
1253
|
+
You are going to reset the state of all platforma services configured with pl-bootstrap package.
|
|
1254
|
+
- ${t.join(`
|
|
1255
|
+
- `)}
|
|
1256
|
+
`;
|
|
1257
|
+
}
|
|
1258
|
+
if (this.logger.warn(n), !_r("Are you sure?")) {
|
|
1185
1259
|
this.logger.info("Reset action was canceled");
|
|
1186
1260
|
return;
|
|
1187
1261
|
}
|
|
1188
|
-
const
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1262
|
+
for (const [s, i] of r.entries()) {
|
|
1263
|
+
if (i.cleanupCommands.length) {
|
|
1264
|
+
this.logger.info(`Wiping instance ${s} services`);
|
|
1265
|
+
const c = we(this.logger, i.cleanupCommands);
|
|
1266
|
+
ge(c.executed, `failed to wipe instance ${s} services`);
|
|
1267
|
+
}
|
|
1268
|
+
this.logger.info(`Destroying instance '${s}' data directory`), u.rmSync(d.instanceDir(s), { recursive: !0, force: !0 });
|
|
1269
|
+
}
|
|
1270
|
+
e || (this.logger.info(`Destroying state dir '${d.path()}'`), u.rmSync(d.path(), { recursive: !0, force: !0 })), this.logger.info(
|
|
1195
1271
|
`
|
|
1196
1272
|
If you want to remove all downloaded platforma binaries, delete '${d.binaries()}' dir manually
|
|
1197
1273
|
`
|
|
1198
1274
|
);
|
|
1199
1275
|
}
|
|
1200
1276
|
mergeLicenseEnvs(e) {
|
|
1201
|
-
e.license === void 0 && ((process.env.MI_LICENSE ?? "") != "" ? e.license = process.env.MI_LICENSE : (process.env.PL_LICENSE ?? "") != "" && (e.license = process.env.PL_LICENSE)), e["license-file"] === void 0 && e.license === void 0 && ((process.env.MI_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.MI_LICENSE_FILE : (process.env.PL_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.PL_LICENSE_FILE :
|
|
1277
|
+
e.license === void 0 && ((process.env.MI_LICENSE ?? "") != "" ? e.license = process.env.MI_LICENSE : (process.env.PL_LICENSE ?? "") != "" && (e.license = process.env.PL_LICENSE)), e["license-file"] === void 0 && e.license === void 0 && ((process.env.MI_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.MI_LICENSE_FILE : (process.env.PL_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.PL_LICENSE_FILE : u.existsSync(m.resolve(x.homedir(), ".pl.license")) && (e["license-file"] = m.resolve(x.homedir(), ".pl.license")));
|
|
1202
1278
|
}
|
|
1203
|
-
initAuthDriversList(e,
|
|
1204
|
-
const
|
|
1205
|
-
if (e["auth-htpasswd-file"] &&
|
|
1279
|
+
initAuthDriversList(e, t) {
|
|
1280
|
+
const r = [];
|
|
1281
|
+
if (e["auth-htpasswd-file"] && r.push({
|
|
1206
1282
|
driver: "htpasswd",
|
|
1207
|
-
path:
|
|
1283
|
+
path: m.resolve(t, e["auth-htpasswd-file"])
|
|
1208
1284
|
}), !!e["auth-ldap-server"] != !!e["auth-ldap-default-dn"])
|
|
1209
1285
|
throw new Error("LDAP auth settings require both 'server' and 'default DN' options to be set");
|
|
1210
|
-
if (e["auth-ldap-server"] &&
|
|
1286
|
+
if (e["auth-ldap-server"] && r.push({
|
|
1211
1287
|
driver: "ldap",
|
|
1212
1288
|
serverUrl: e["auth-ldap-server"],
|
|
1213
1289
|
defaultDN: e["auth-ldap-default-dn"]
|
|
1214
|
-
}),
|
|
1215
|
-
return [{ driver: "jwt", key: this.getLastJwt() }, ...
|
|
1290
|
+
}), r.length !== 0)
|
|
1291
|
+
return [{ driver: "jwt", key: this.getLastJwt() }, ...r];
|
|
1216
1292
|
}
|
|
1217
1293
|
/** Gets the last stored JWT secret key or generates it and stores in a file. */
|
|
1218
1294
|
getLastJwt() {
|
|
1219
|
-
const e = d.path("auth.jwt"),
|
|
1220
|
-
let
|
|
1221
|
-
return
|
|
1295
|
+
const e = d.path("auth.jwt"), t = "utf-8";
|
|
1296
|
+
let r = "";
|
|
1297
|
+
return u.existsSync(e) && (r = u.readFileSync(e, { encoding: t })), r == "" && (r = Cr(64), u.writeFileSync(e, r, { encoding: t })), r;
|
|
1222
1298
|
}
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
env: {
|
|
1226
|
-
...process.env,
|
|
1227
|
-
PL_IMAGE: "scratch",
|
|
1228
|
-
PL_DATA_DB_ROOT: t,
|
|
1229
|
-
PL_DATA_PRIMARY_ROOT: t,
|
|
1230
|
-
PL_DATA_LIBRARY_ROOT: t,
|
|
1231
|
-
PL_DATA_WORKDIR_ROOT: t,
|
|
1232
|
-
PL_DATA_PACKAGE_ROOT: t,
|
|
1233
|
-
MINIO_IMAGE: "scratch",
|
|
1234
|
-
MINIO_STORAGE: t
|
|
1235
|
-
},
|
|
1236
|
-
stdio: "inherit"
|
|
1237
|
-
});
|
|
1238
|
-
s.status !== 0 && process.exit(s.status);
|
|
1239
|
-
}
|
|
1240
|
-
checkLicense(e, r) {
|
|
1241
|
-
if (!(e !== void 0 && e != "") && !(r !== void 0 && r != ""))
|
|
1299
|
+
checkLicense(e, t) {
|
|
1300
|
+
if (!(e !== void 0 && e != "") && !(t !== void 0 && t != ""))
|
|
1242
1301
|
throw this.logger.error(`A license for Platforma Backend must be set.
|
|
1243
1302
|
|
|
1244
1303
|
You can provide the license directly using the '--license' flag
|
|
@@ -1251,117 +1310,119 @@ or stored in '$HOME/.pl.license'.
|
|
|
1251
1310
|
|
|
1252
1311
|
You can obtain the license from "https://licensing.milaboratories.com".`), new Error("The license was not provided.");
|
|
1253
1312
|
}
|
|
1254
|
-
configureDockerStorage(e,
|
|
1255
|
-
const
|
|
1256
|
-
switch (e = e.toUpperCase(),
|
|
1313
|
+
configureDockerStorage(e, t) {
|
|
1314
|
+
const r = {}, n = t.type;
|
|
1315
|
+
switch (e = e.toUpperCase(), n) {
|
|
1257
1316
|
case "S3":
|
|
1258
|
-
return
|
|
1317
|
+
return r[`PL_DATA_${e}_TYPE`] = "S3", r[`PL_DATA_${e}_S3_BUCKET`] = t.bucketName, t.endpoint && (r[`PL_DATA_${e}_S3_ENDPOINT`] = t.endpoint), t.presignEndpoint && (r[`PL_DATA_${e}_S3_PRESIGN_ENDPOINT`] = t.presignEndpoint), t.region && (r[`PL_DATA_${e}_S3_REGION`] = t.region), t.key && (r[`PL_DATA_${e}_S3_KEY`] = t.key), t.secret && (r[`PL_DATA_${e}_S3_SECRET`] = t.secret), r;
|
|
1259
1318
|
case "FS":
|
|
1260
|
-
return
|
|
1319
|
+
return r[`PL_DATA_${e}_TYPE`] = "FS", r;
|
|
1261
1320
|
default:
|
|
1262
|
-
|
|
1321
|
+
_();
|
|
1263
1322
|
}
|
|
1264
1323
|
return {};
|
|
1265
1324
|
}
|
|
1266
|
-
renderRunInfo(e,
|
|
1267
|
-
var c,
|
|
1268
|
-
const
|
|
1269
|
-
switch (e.configPath &&
|
|
1325
|
+
renderRunInfo(e, t = 10) {
|
|
1326
|
+
var c, o;
|
|
1327
|
+
const r = [], n = (f) => f.padStart(t, " ");
|
|
1328
|
+
switch (e.configPath && r.push(`${n("config")}: ${e.configPath}`), e.apiAddr ? r.push(`${n("API")}: ${e.apiAddr}`) : e.apiPort ? r.push(`${n("API")}: 127.0.0.1:${e.apiPort.toString()}`) : r.push(`${n("API")}: 127.0.0.1:6345`), e.logPath && r.push(`${n("log")}: ${e.logPath}`), (c = e.primary) == null ? void 0 : c.type) {
|
|
1270
1329
|
case void 0:
|
|
1271
1330
|
break;
|
|
1272
1331
|
case "FS":
|
|
1273
|
-
|
|
1332
|
+
r.push(`${n("primary")}: ${e.primary.rootPath}`);
|
|
1274
1333
|
break;
|
|
1275
1334
|
case "S3":
|
|
1276
|
-
|
|
1277
|
-
`${
|
|
1335
|
+
r.push(
|
|
1336
|
+
`${n("primary")}: S3 at '${e.primary.endpoint ?? "AWS"}', bucket '${e.primary.bucketName}', prefix: '${e.primary.keyPrefix ?? ""}'`
|
|
1278
1337
|
);
|
|
1279
1338
|
break;
|
|
1280
1339
|
default:
|
|
1281
|
-
|
|
1340
|
+
_();
|
|
1282
1341
|
}
|
|
1283
|
-
switch ((
|
|
1342
|
+
switch ((o = e.library) == null ? void 0 : o.type) {
|
|
1284
1343
|
case void 0:
|
|
1285
1344
|
break;
|
|
1286
1345
|
case "FS":
|
|
1287
|
-
|
|
1346
|
+
r.push(`${n("library")}: ${e.library.rootPath}`);
|
|
1288
1347
|
break;
|
|
1289
1348
|
case "S3":
|
|
1290
|
-
|
|
1291
|
-
`${
|
|
1349
|
+
r.push(
|
|
1350
|
+
`${n("library")}: S3 at '${e.library.endpoint ?? "AWS"}', bucket '${e.library.bucketName}', prefix: '${e.library.keyPrefix ?? ""}'`
|
|
1292
1351
|
);
|
|
1293
1352
|
break;
|
|
1294
1353
|
default:
|
|
1295
|
-
|
|
1354
|
+
_();
|
|
1296
1355
|
}
|
|
1297
|
-
return e.work &&
|
|
1356
|
+
return e.work && r.push(`${n("workdirs")}: ${e.work.rootPath}`), e.dbPath && r.push(`${n("db")}: ${e.dbPath}`), r.join(`
|
|
1298
1357
|
`);
|
|
1299
1358
|
}
|
|
1300
1359
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
1301
1360
|
readComposeFile(e) {
|
|
1302
|
-
const
|
|
1303
|
-
return
|
|
1361
|
+
const t = u.readFileSync(e);
|
|
1362
|
+
return ue.parse(t.toString());
|
|
1304
1363
|
}
|
|
1305
|
-
writeComposeFile(e,
|
|
1306
|
-
|
|
1364
|
+
writeComposeFile(e, t) {
|
|
1365
|
+
u.writeFileSync(e, ue.stringify(t));
|
|
1307
1366
|
}
|
|
1308
1367
|
}
|
|
1309
|
-
function
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1368
|
+
function ge(a, e) {
|
|
1369
|
+
for (const t of a) {
|
|
1370
|
+
if (t.error)
|
|
1371
|
+
throw t.error;
|
|
1372
|
+
const r = e ?? "failed to run command";
|
|
1373
|
+
if (t.status !== 0)
|
|
1374
|
+
throw new Error(`${r}, process exited with code '${t.status}'`);
|
|
1375
|
+
}
|
|
1315
1376
|
}
|
|
1316
|
-
const
|
|
1377
|
+
const re = class re extends L {
|
|
1317
1378
|
async run() {
|
|
1318
|
-
const { flags: e } = await this.parse(
|
|
1319
|
-
new
|
|
1379
|
+
const { flags: e } = await this.parse(re), t = I(e["log-level"]);
|
|
1380
|
+
new O(t).cleanupInstance();
|
|
1320
1381
|
}
|
|
1321
1382
|
};
|
|
1322
|
-
|
|
1323
|
-
...
|
|
1383
|
+
l(re, "description", "Clear service state (forget last run command, destroy docker services, volumes and so on)"), l(re, "examples", ["<%= config.bin %> <%= command.id %>"]), l(re, "flags", {
|
|
1384
|
+
...A
|
|
1324
1385
|
});
|
|
1325
|
-
let
|
|
1326
|
-
const
|
|
1386
|
+
let Le = re;
|
|
1387
|
+
const te = class te extends L {
|
|
1327
1388
|
async run() {
|
|
1328
|
-
const { flags: e } = await this.parse(
|
|
1329
|
-
new
|
|
1389
|
+
const { flags: e } = await this.parse(te), t = I(e["log-level"]);
|
|
1390
|
+
new O(t).startLast();
|
|
1330
1391
|
}
|
|
1331
1392
|
};
|
|
1332
|
-
|
|
1333
|
-
...
|
|
1393
|
+
l(te, "description", "Start last run service configuraiton"), l(te, "examples", ["<%= config.bin %> <%= command.id %>"]), l(te, "flags", {
|
|
1394
|
+
...A
|
|
1334
1395
|
});
|
|
1335
|
-
let
|
|
1336
|
-
const
|
|
1396
|
+
let Ae = te;
|
|
1397
|
+
const ae = class ae extends L {
|
|
1337
1398
|
async run() {
|
|
1338
|
-
const { flags: e } = await this.parse(
|
|
1339
|
-
|
|
1399
|
+
const { flags: e } = await this.parse(ae), t = I(e["log-level"]), r = new O(t);
|
|
1400
|
+
d.currentInstance ? r.stopInstance(d.currentInstance) : t.warn("up/start command was not called for any instance, nothing to stop");
|
|
1340
1401
|
}
|
|
1341
1402
|
};
|
|
1342
|
-
|
|
1343
|
-
...
|
|
1403
|
+
l(ae, "description", "Stop platforma service"), l(ae, "examples", ["<%= config.bin %> <%= command.id %>"]), l(ae, "flags", {
|
|
1404
|
+
...A
|
|
1344
1405
|
});
|
|
1345
|
-
let
|
|
1346
|
-
|
|
1406
|
+
let Ie = ae;
|
|
1407
|
+
var E;
|
|
1408
|
+
let ot = (E = class extends L {
|
|
1347
1409
|
async run() {
|
|
1348
|
-
const { flags: e } = await this.parse(
|
|
1349
|
-
|
|
1350
|
-
const s = e["auth-enabled"], i = s ? {
|
|
1410
|
+
const { flags: e } = await this.parse(E), t = I(e["log-level"]), r = new O(t);
|
|
1411
|
+
r.mergeLicenseEnvs(e);
|
|
1412
|
+
const n = "docker", s = e["auth-enabled"], i = s ? {
|
|
1351
1413
|
enabled: s,
|
|
1352
|
-
drivers:
|
|
1353
|
-
} : void 0,
|
|
1354
|
-
for (const
|
|
1355
|
-
|
|
1356
|
-
const
|
|
1357
|
-
t.startDocker(n, {
|
|
1414
|
+
drivers: r.initAuthDriversList(e, ".")
|
|
1415
|
+
} : void 0, c = e.storage ? m.join(".", e.storage) : d.instanceDir(n), o = [];
|
|
1416
|
+
for (const h of e.mount ?? [])
|
|
1417
|
+
o.push({ hostPath: h });
|
|
1418
|
+
const f = e.arch ? `linux/${e.arch}` : void 0, g = r.createDocker(n, c, {
|
|
1358
1419
|
primaryStorageURL: e["storage-primary"],
|
|
1359
1420
|
workStoragePath: e["storage-work"],
|
|
1360
1421
|
libraryStorageURL: e["storage-library"],
|
|
1361
1422
|
image: e.image,
|
|
1362
1423
|
version: e.version,
|
|
1363
|
-
platformOverride:
|
|
1364
|
-
customMounts:
|
|
1424
|
+
platformOverride: f,
|
|
1425
|
+
customMounts: o,
|
|
1365
1426
|
license: e.license,
|
|
1366
1427
|
licenseFile: e["license-file"],
|
|
1367
1428
|
auth: i,
|
|
@@ -1372,97 +1433,157 @@ const J = class J extends F {
|
|
|
1372
1433
|
debugAddr: e["debug-listen"],
|
|
1373
1434
|
debugPort: e["debug-port"]
|
|
1374
1435
|
});
|
|
1436
|
+
r.switchInstance(g);
|
|
1375
1437
|
}
|
|
1376
|
-
}
|
|
1377
|
-
|
|
1378
|
-
...
|
|
1379
|
-
...
|
|
1380
|
-
...
|
|
1381
|
-
...
|
|
1382
|
-
...
|
|
1383
|
-
...
|
|
1384
|
-
...te,
|
|
1385
|
-
...ze,
|
|
1386
|
-
...se,
|
|
1387
|
-
...Pe,
|
|
1438
|
+
}, l(E, "description", "Run platforma backend service with 'FS' primary storage type"), l(E, "examples", ["<%= config.bin %> <%= command.id %>"]), l(E, "flags", {
|
|
1439
|
+
...A,
|
|
1440
|
+
...z,
|
|
1441
|
+
...me,
|
|
1442
|
+
...W,
|
|
1443
|
+
...he,
|
|
1444
|
+
...V,
|
|
1445
|
+
...q,
|
|
1388
1446
|
...ye,
|
|
1389
|
-
...
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1447
|
+
...Y,
|
|
1448
|
+
...oe,
|
|
1449
|
+
...ce,
|
|
1450
|
+
...le
|
|
1451
|
+
}), E);
|
|
1452
|
+
var R;
|
|
1453
|
+
let lt = (R = class extends L {
|
|
1393
1454
|
async run() {
|
|
1394
|
-
const { flags: e } = await this.parse(
|
|
1395
|
-
|
|
1396
|
-
const s = e["pl-workdir"] ?? ".", i = e.storage ?
|
|
1397
|
-
|
|
1398
|
-
e["pl-sources"] && (
|
|
1399
|
-
|
|
1400
|
-
e["grpc-listen"] ?
|
|
1401
|
-
|
|
1402
|
-
e["monitoring-listen"] ?
|
|
1403
|
-
|
|
1455
|
+
const { flags: e } = await this.parse(R), t = I(e["log-level"]), r = new O(t);
|
|
1456
|
+
r.mergeLicenseEnvs(e);
|
|
1457
|
+
const n = "local", s = e["pl-workdir"] ?? ".", i = e.storage ? m.join(s, e.storage) : d.instanceDir(n), c = e["pl-log-file"] ? m.join(s, e["pl-log-file"]) : void 0, o = r.initAuthDriversList(e, s), f = e["auth-enabled"] ?? o !== void 0;
|
|
1458
|
+
let g = e["pl-binary"];
|
|
1459
|
+
e["pl-sources"] && (g = r.buildPlatforma({ repoRoot: e["pl-sources"] }));
|
|
1460
|
+
let h = "127.0.0.1:6345";
|
|
1461
|
+
e["grpc-listen"] ? h = e["grpc-listen"] : e["grpc-port"] && (h = `127.0.0.1:${e["grpc-port"]}`);
|
|
1462
|
+
let w = "127.0.0.1:9090";
|
|
1463
|
+
e["monitoring-listen"] ? w = e["monitoring-listen"] : e["monitoring-port"] && (w = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1464
|
+
let v = "127.0.0.1:9091";
|
|
1404
1465
|
e["debug-listen"] ? v = e["debug-listen"] : e["debug-port"] && (v = `127.0.0.1:${e["debug-port"]}`);
|
|
1405
|
-
const
|
|
1406
|
-
binaryPath:
|
|
1466
|
+
const y = {
|
|
1467
|
+
binaryPath: g,
|
|
1407
1468
|
version: e.version,
|
|
1408
1469
|
configPath: e.config,
|
|
1409
1470
|
workdir: e["pl-workdir"],
|
|
1410
1471
|
primaryURL: e["storage-primary"],
|
|
1411
1472
|
libraryURL: e["storage-library"],
|
|
1412
1473
|
configOptions: {
|
|
1413
|
-
grpc: { listen:
|
|
1414
|
-
monitoring: { listen:
|
|
1474
|
+
grpc: { listen: h },
|
|
1475
|
+
monitoring: { listen: w },
|
|
1415
1476
|
debug: { listen: v },
|
|
1416
1477
|
license: { value: e.license, file: e["license-file"] },
|
|
1417
|
-
log: { path:
|
|
1478
|
+
log: { path: c },
|
|
1418
1479
|
localRoot: i,
|
|
1419
|
-
core: { auth: { enabled:
|
|
1480
|
+
core: { auth: { enabled: f, drivers: o } },
|
|
1420
1481
|
storages: {
|
|
1421
1482
|
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1422
1483
|
}
|
|
1423
1484
|
}
|
|
1424
|
-
};
|
|
1425
|
-
|
|
1426
|
-
r.
|
|
1485
|
+
}, P = r.createLocal(n, y);
|
|
1486
|
+
y.binaryPath ? r.switchInstance(P) : Ge(t, { version: e.version }).then(() => {
|
|
1487
|
+
const S = r.switchInstance(P);
|
|
1488
|
+
setTimeout(() => {
|
|
1489
|
+
for (const k of S)
|
|
1490
|
+
k.unref();
|
|
1491
|
+
}, 1e3);
|
|
1492
|
+
}).catch(function(S) {
|
|
1493
|
+
t.error(S.message);
|
|
1427
1494
|
});
|
|
1428
1495
|
}
|
|
1496
|
+
}, l(R, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(R, "examples", ["<%= config.bin %> <%= command.id %>"]), l(R, "flags", {
|
|
1497
|
+
...A,
|
|
1498
|
+
...W,
|
|
1499
|
+
...z,
|
|
1500
|
+
...Ce,
|
|
1501
|
+
...Me,
|
|
1502
|
+
...je,
|
|
1503
|
+
...q,
|
|
1504
|
+
...Y,
|
|
1505
|
+
...oe,
|
|
1506
|
+
...ce,
|
|
1507
|
+
...le,
|
|
1508
|
+
...Ne,
|
|
1509
|
+
..._e,
|
|
1510
|
+
...V
|
|
1511
|
+
}), R);
|
|
1512
|
+
const M = class M extends L {
|
|
1513
|
+
async run() {
|
|
1514
|
+
const { flags: e, args: t } = await this.parse(M), r = I(e["log-level"]), n = new O(r), s = t.name;
|
|
1515
|
+
e.all && (n.cleanupInstance(), process.exit(0)), s || (r.error("Please, specify name of instance to be removed or set '--all' flag instead"), process.exit(1)), n.cleanupInstance(s);
|
|
1516
|
+
}
|
|
1429
1517
|
};
|
|
1430
|
-
|
|
1431
|
-
...
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
...se,
|
|
1439
|
-
...Pe,
|
|
1440
|
-
...ye,
|
|
1441
|
-
...be,
|
|
1442
|
-
...Ke,
|
|
1443
|
-
...qe,
|
|
1444
|
-
...ie
|
|
1518
|
+
l(M, "description", "List available instances"), l(M, "examples", ["<%= config.bin %> <%= command.id %>"]), l(M, "flags", {
|
|
1519
|
+
...A,
|
|
1520
|
+
all: b.boolean({
|
|
1521
|
+
description: "remove all known instances",
|
|
1522
|
+
required: !1
|
|
1523
|
+
})
|
|
1524
|
+
}), l(M, "args", {
|
|
1525
|
+
name: H.string({ required: !1 })
|
|
1445
1526
|
});
|
|
1446
|
-
let
|
|
1447
|
-
|
|
1448
|
-
let Qr = ($ = class extends F {
|
|
1527
|
+
let Oe = M;
|
|
1528
|
+
const j = class j extends L {
|
|
1449
1529
|
async run() {
|
|
1450
|
-
const { flags: e } = await this.parse(
|
|
1451
|
-
|
|
1452
|
-
|
|
1530
|
+
const { flags: e, args: t } = await this.parse(j), r = I(e["log-level"]), n = new O(r), s = t.name ?? d.currentInstanceName;
|
|
1531
|
+
s || (r.info("no pl service instance selected. No service was stopped"), process.exit(0)), n.stopInstance(d.getInstanceInfo(s));
|
|
1532
|
+
}
|
|
1533
|
+
};
|
|
1534
|
+
l(j, "description", "List available instances"), l(j, "examples", ["<%= config.bin %> <%= command.id %>"]), l(j, "flags", {
|
|
1535
|
+
...A
|
|
1536
|
+
}), l(j, "args", {
|
|
1537
|
+
name: H.string({ required: !1 })
|
|
1538
|
+
});
|
|
1539
|
+
let Ee = j;
|
|
1540
|
+
const ne = class ne extends L {
|
|
1541
|
+
async run() {
|
|
1542
|
+
await this.parse(ne);
|
|
1543
|
+
const e = d.instanceList, t = d.currentInstanceName;
|
|
1544
|
+
for (const r of e) {
|
|
1545
|
+
const n = [], s = d.getInstanceInfo(r);
|
|
1546
|
+
d.isInstanceActive(s) && n.push("status:up"), n.push(`type:${s.type}`), console.log(r === t ? ` * ${r} (${n.join(", ")})` : ` ${r} (${n.join(", ")})`);
|
|
1547
|
+
}
|
|
1548
|
+
}
|
|
1549
|
+
};
|
|
1550
|
+
l(ne, "description", "List available instances"), l(ne, "examples", ["<%= config.bin %> <%= command.id %>"]), l(ne, "flags", {});
|
|
1551
|
+
let Re = ne;
|
|
1552
|
+
const B = class B extends L {
|
|
1553
|
+
async run() {
|
|
1554
|
+
const { flags: e, args: t } = await this.parse(B), r = I(e["log-level"]), n = new O(r), s = t.name ?? d.currentInstanceName;
|
|
1555
|
+
s || (r.error("no pl service instance is selected. Select instance with 'select' command or provide name to 'up'"), process.exit(1));
|
|
1556
|
+
const i = n.switchInstance(d.getInstanceInfo(s));
|
|
1557
|
+
setTimeout(() => {
|
|
1558
|
+
for (const c of i)
|
|
1559
|
+
c.unref();
|
|
1560
|
+
}, 1e3);
|
|
1561
|
+
}
|
|
1562
|
+
};
|
|
1563
|
+
l(B, "description", "List available instances"), l(B, "examples", ["<%= config.bin %> <%= command.id %>"]), l(B, "flags", {
|
|
1564
|
+
...A
|
|
1565
|
+
}), l(B, "args", {
|
|
1566
|
+
name: H.string({ required: !1 })
|
|
1567
|
+
});
|
|
1568
|
+
let Te = B;
|
|
1569
|
+
var T;
|
|
1570
|
+
let dt = (T = class extends L {
|
|
1571
|
+
async run() {
|
|
1572
|
+
const { flags: e } = await this.parse(T), t = I(e["log-level"]), r = new O(t);
|
|
1573
|
+
r.mergeLicenseEnvs(e);
|
|
1574
|
+
const n = "docker-s3", s = e["auth-enabled"], i = s ? {
|
|
1453
1575
|
enabled: s,
|
|
1454
|
-
drivers:
|
|
1455
|
-
} : void 0,
|
|
1456
|
-
for (const
|
|
1457
|
-
|
|
1458
|
-
const
|
|
1459
|
-
t.startDockerS3(n, {
|
|
1576
|
+
drivers: r.initAuthDriversList(e, ".")
|
|
1577
|
+
} : void 0, c = e.storage ? m.join(".", e.storage) : d.instanceDir(n), o = [];
|
|
1578
|
+
for (const w of e.mount ?? [])
|
|
1579
|
+
o.push({ hostPath: w });
|
|
1580
|
+
const f = e.arch ? `linux/${e.arch}` : void 0, g = e["minio-presign-host"] ? "minio" : "localhost", h = r.createDockerS3(n, c, {
|
|
1460
1581
|
image: e.image,
|
|
1461
1582
|
version: e.version,
|
|
1462
1583
|
license: e.license,
|
|
1463
1584
|
licenseFile: e["license-file"],
|
|
1464
|
-
platformOverride:
|
|
1465
|
-
customMounts:
|
|
1585
|
+
platformOverride: f,
|
|
1586
|
+
customMounts: o,
|
|
1466
1587
|
auth: i,
|
|
1467
1588
|
grpcAddr: e["grpc-listen"],
|
|
1468
1589
|
grpcPort: e["grpc-port"],
|
|
@@ -1470,92 +1591,274 @@ let Qr = ($ = class extends F {
|
|
|
1470
1591
|
monitoringPort: e["monitoring-port"],
|
|
1471
1592
|
debugAddr: e["debug-listen"],
|
|
1472
1593
|
debugPort: e["debug-port"],
|
|
1473
|
-
presignHost:
|
|
1594
|
+
presignHost: g
|
|
1474
1595
|
});
|
|
1596
|
+
r.switchInstance(h);
|
|
1475
1597
|
}
|
|
1476
|
-
},
|
|
1477
|
-
...
|
|
1478
|
-
...
|
|
1479
|
-
...
|
|
1480
|
-
...
|
|
1481
|
-
...
|
|
1482
|
-
...
|
|
1483
|
-
...
|
|
1484
|
-
...
|
|
1485
|
-
...
|
|
1486
|
-
...
|
|
1487
|
-
}),
|
|
1488
|
-
|
|
1598
|
+
}, l(T, "description", "Run platforma backend service with 'S3' primary storage type"), l(T, "examples", ["<%= config.bin %> <%= command.id %>"]), l(T, "flags", {
|
|
1599
|
+
...A,
|
|
1600
|
+
...z,
|
|
1601
|
+
...me,
|
|
1602
|
+
...W,
|
|
1603
|
+
...he,
|
|
1604
|
+
...V,
|
|
1605
|
+
...q,
|
|
1606
|
+
...ye,
|
|
1607
|
+
...Y,
|
|
1608
|
+
...hr
|
|
1609
|
+
}), T);
|
|
1610
|
+
var F;
|
|
1611
|
+
let gt = (F = class extends L {
|
|
1489
1612
|
async run() {
|
|
1490
|
-
const { flags: e } = await this.parse(
|
|
1491
|
-
|
|
1492
|
-
const s = e["pl-workdir"] ?? ".", i = e.storage ?
|
|
1493
|
-
|
|
1494
|
-
e["pl-sources"] && (
|
|
1495
|
-
|
|
1496
|
-
e["grpc-listen"] ?
|
|
1497
|
-
|
|
1498
|
-
e["monitoring-listen"] ?
|
|
1499
|
-
|
|
1613
|
+
const { flags: e } = await this.parse(F), t = I(e["log-level"]), r = new O(t);
|
|
1614
|
+
r.mergeLicenseEnvs(e);
|
|
1615
|
+
const n = "local-s3", s = e["pl-workdir"] ?? ".", i = e.storage ? m.join(s, e.storage) : d.instanceDir(n), c = e["pl-log-file"] ? m.join(s, e["pl-log-file"]) : void 0, o = r.initAuthDriversList(e, s), f = e["auth-enabled"] ?? o !== void 0;
|
|
1616
|
+
let g = e["pl-binary"];
|
|
1617
|
+
e["pl-sources"] && (g = r.buildPlatforma({ repoRoot: e["pl-sources"] }));
|
|
1618
|
+
let h = "127.0.0.1:6345";
|
|
1619
|
+
e["grpc-listen"] ? h = e["grpc-listen"] : e["grpc-port"] && (h = `127.0.0.1:${e["grpc-port"]}`);
|
|
1620
|
+
let w = "127.0.0.1:9090";
|
|
1621
|
+
e["monitoring-listen"] ? w = e["monitoring-listen"] : e["monitoring-port"] && (w = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1622
|
+
let v = "127.0.0.1:9091";
|
|
1500
1623
|
e["debug-listen"] ? v = e["debug-listen"] : e["debug-port"] && (v = `127.0.0.1:${e["debug-port"]}`);
|
|
1501
|
-
const
|
|
1502
|
-
binaryPath:
|
|
1624
|
+
const y = {
|
|
1625
|
+
binaryPath: g,
|
|
1503
1626
|
version: e.version,
|
|
1504
1627
|
configPath: e.config,
|
|
1505
1628
|
workdir: e["pl-workdir"],
|
|
1506
1629
|
primaryURL: e["storage-primary"],
|
|
1507
1630
|
libraryURL: e["storage-library"],
|
|
1508
|
-
minioPort: e["s3-
|
|
1509
|
-
minioConsolePort: e["s3-console-
|
|
1631
|
+
minioPort: e["s3-port"],
|
|
1632
|
+
minioConsolePort: e["s3-console-port"],
|
|
1510
1633
|
configOptions: {
|
|
1511
|
-
grpc: { listen:
|
|
1512
|
-
monitoring: { listen:
|
|
1634
|
+
grpc: { listen: h },
|
|
1635
|
+
monitoring: { listen: w },
|
|
1513
1636
|
debug: { listen: v },
|
|
1514
1637
|
license: { value: e.license, file: e["license-file"] },
|
|
1515
|
-
log: { path:
|
|
1638
|
+
log: { path: c },
|
|
1516
1639
|
localRoot: i,
|
|
1517
1640
|
core: {
|
|
1518
|
-
auth: { enabled:
|
|
1641
|
+
auth: { enabled: f, drivers: o }
|
|
1519
1642
|
},
|
|
1520
1643
|
storages: {
|
|
1521
1644
|
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1522
1645
|
}
|
|
1523
1646
|
}
|
|
1647
|
+
}, P = r.createLocalS3(n, y);
|
|
1648
|
+
y.binaryPath ? r.switchInstance(P) : Ge(t, { version: e.version }).then(() => {
|
|
1649
|
+
const S = r.switchInstance(P);
|
|
1650
|
+
setTimeout(() => {
|
|
1651
|
+
for (const k of S)
|
|
1652
|
+
k.unref();
|
|
1653
|
+
}, 1e3);
|
|
1654
|
+
}).catch(function(S) {
|
|
1655
|
+
t.error(S.message);
|
|
1656
|
+
});
|
|
1657
|
+
}
|
|
1658
|
+
}, l(F, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(F, "examples", ["<%= config.bin %> <%= command.id %>"]), l(F, "flags", {
|
|
1659
|
+
...A,
|
|
1660
|
+
...W,
|
|
1661
|
+
...z,
|
|
1662
|
+
...mr,
|
|
1663
|
+
...Ce,
|
|
1664
|
+
...Me,
|
|
1665
|
+
...je,
|
|
1666
|
+
...q,
|
|
1667
|
+
...Y,
|
|
1668
|
+
...oe,
|
|
1669
|
+
...ce,
|
|
1670
|
+
...le,
|
|
1671
|
+
...Ne,
|
|
1672
|
+
..._e,
|
|
1673
|
+
...V
|
|
1674
|
+
}), F);
|
|
1675
|
+
const G = class G extends L {
|
|
1676
|
+
async run() {
|
|
1677
|
+
const { flags: e, args: t } = await this.parse(G), r = I(e["log-level"]), n = new O(r);
|
|
1678
|
+
n.mergeLicenseEnvs(e);
|
|
1679
|
+
const s = t.name, i = e["auth-enabled"], c = i ? {
|
|
1680
|
+
enabled: i,
|
|
1681
|
+
drivers: n.initAuthDriversList(e, ".")
|
|
1682
|
+
} : void 0, o = e.storage ? m.join(".", e.storage) : d.instanceDir(s), f = [];
|
|
1683
|
+
for (const h of e.mount ?? [])
|
|
1684
|
+
f.push({ hostPath: h });
|
|
1685
|
+
const g = e.arch ? `linux/${e.arch}` : void 0;
|
|
1686
|
+
n.createDocker(s, o, {
|
|
1687
|
+
primaryStorageURL: e["storage-primary"],
|
|
1688
|
+
workStoragePath: e["storage-work"],
|
|
1689
|
+
libraryStorageURL: e["storage-library"],
|
|
1690
|
+
image: e.image,
|
|
1691
|
+
version: e.version,
|
|
1692
|
+
platformOverride: g,
|
|
1693
|
+
customMounts: f,
|
|
1694
|
+
license: e.license,
|
|
1695
|
+
licenseFile: e["license-file"],
|
|
1696
|
+
auth: c,
|
|
1697
|
+
grpcAddr: e["grpc-listen"],
|
|
1698
|
+
grpcPort: e["grpc-port"],
|
|
1699
|
+
monitoringAddr: e["monitoring-listen"],
|
|
1700
|
+
monitoringPort: e["monitoring-port"],
|
|
1701
|
+
debugAddr: e["debug-listen"],
|
|
1702
|
+
debugPort: e["debug-port"]
|
|
1703
|
+
}), r.info(`Instance '${s}' was created. To start it run 'up' command`);
|
|
1704
|
+
}
|
|
1705
|
+
};
|
|
1706
|
+
l(G, "description", "Run Platforma Backend service as docker container on current host"), l(G, "examples", ["<%= config.bin %> <%= command.id %>"]), l(G, "flags", {
|
|
1707
|
+
...A,
|
|
1708
|
+
...z,
|
|
1709
|
+
...me,
|
|
1710
|
+
...W,
|
|
1711
|
+
...he,
|
|
1712
|
+
...V,
|
|
1713
|
+
...q,
|
|
1714
|
+
...ye,
|
|
1715
|
+
...Y,
|
|
1716
|
+
...oe,
|
|
1717
|
+
...ce,
|
|
1718
|
+
...le
|
|
1719
|
+
}), l(G, "args", {
|
|
1720
|
+
name: H.string({ required: !0 })
|
|
1721
|
+
});
|
|
1722
|
+
let Fe = G;
|
|
1723
|
+
const U = class U extends L {
|
|
1724
|
+
async run() {
|
|
1725
|
+
const { flags: e, args: t } = await this.parse(U), r = I(e["log-level"]), n = new O(r);
|
|
1726
|
+
n.mergeLicenseEnvs(e);
|
|
1727
|
+
const s = t.name, i = e["pl-workdir"] ?? ".", c = e.storage ? m.join(i, e.storage) : d.instanceDir(s), o = e["pl-log-file"] ? m.join(i, e["pl-log-file"]) : void 0, f = n.initAuthDriversList(e, i), g = e["auth-enabled"] ?? f !== void 0;
|
|
1728
|
+
let h = e["pl-binary"];
|
|
1729
|
+
e["pl-sources"] && (h = n.buildPlatforma({ repoRoot: e["pl-sources"] }));
|
|
1730
|
+
let w = "127.0.0.1:6345";
|
|
1731
|
+
e["grpc-listen"] ? w = e["grpc-listen"] : e["grpc-port"] && (w = `127.0.0.1:${e["grpc-port"]}`);
|
|
1732
|
+
let v = "127.0.0.1:9090";
|
|
1733
|
+
e["monitoring-listen"] ? v = e["monitoring-listen"] : e["monitoring-port"] && (v = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1734
|
+
let y = "127.0.0.1:9091";
|
|
1735
|
+
e["debug-listen"] ? y = e["debug-listen"] : e["debug-port"] && (y = `127.0.0.1:${e["debug-port"]}`);
|
|
1736
|
+
const P = {
|
|
1737
|
+
binaryPath: h,
|
|
1738
|
+
version: e.version,
|
|
1739
|
+
configPath: e.config,
|
|
1740
|
+
workdir: e["pl-workdir"],
|
|
1741
|
+
primaryURL: e["storage-primary"],
|
|
1742
|
+
libraryURL: e["storage-library"],
|
|
1743
|
+
configOptions: {
|
|
1744
|
+
grpc: { listen: w },
|
|
1745
|
+
monitoring: { listen: v },
|
|
1746
|
+
debug: { listen: y },
|
|
1747
|
+
license: { value: e.license, file: e["license-file"] },
|
|
1748
|
+
log: { path: o },
|
|
1749
|
+
localRoot: c,
|
|
1750
|
+
core: { auth: { enabled: g, drivers: f } },
|
|
1751
|
+
storages: {
|
|
1752
|
+
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1753
|
+
}
|
|
1754
|
+
}
|
|
1524
1755
|
};
|
|
1525
|
-
|
|
1526
|
-
|
|
1756
|
+
switch (t.mode) {
|
|
1757
|
+
case "s3": {
|
|
1758
|
+
r.info("Creating instance configuration, data directory and other stuff..."), n.createLocalS3(s, {
|
|
1759
|
+
...P,
|
|
1760
|
+
minioPort: e["s3-port"],
|
|
1761
|
+
minioConsolePort: e["s3-console-port"]
|
|
1762
|
+
});
|
|
1763
|
+
break;
|
|
1764
|
+
}
|
|
1765
|
+
case void 0: {
|
|
1766
|
+
e["s3-port"] && r.warn("flag 's3-port' is only for 's3' mode"), e["s3-console-port"] && r.warn("flag 's3-console-port' is only for 's3' mode"), n.createLocal(s, P);
|
|
1767
|
+
break;
|
|
1768
|
+
}
|
|
1769
|
+
}
|
|
1770
|
+
if (P.binaryPath) {
|
|
1771
|
+
r.info(`Instance '${s}' was created. To start it run 'up' command`);
|
|
1772
|
+
return;
|
|
1773
|
+
}
|
|
1774
|
+
Ge(r, { version: e.version }).then(() => r.info(`Instance '${s}' was created. To start it run 'pl up' command`)).catch(function(k) {
|
|
1775
|
+
r.error(k.message);
|
|
1527
1776
|
});
|
|
1528
1777
|
}
|
|
1529
1778
|
};
|
|
1530
|
-
|
|
1531
|
-
...
|
|
1532
|
-
...
|
|
1533
|
-
...
|
|
1534
|
-
...
|
|
1535
|
-
...
|
|
1536
|
-
...
|
|
1537
|
-
...
|
|
1538
|
-
...
|
|
1539
|
-
...
|
|
1540
|
-
...
|
|
1779
|
+
l(U, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(U, "examples", ["<%= config.bin %> <%= command.id %>"]), l(U, "flags", {
|
|
1780
|
+
...A,
|
|
1781
|
+
...W,
|
|
1782
|
+
...mr,
|
|
1783
|
+
...z,
|
|
1784
|
+
...Ce,
|
|
1785
|
+
...Me,
|
|
1786
|
+
...je,
|
|
1787
|
+
...q,
|
|
1788
|
+
...Y,
|
|
1789
|
+
...oe,
|
|
1790
|
+
...ce,
|
|
1791
|
+
...le,
|
|
1792
|
+
...Ne,
|
|
1793
|
+
..._e,
|
|
1794
|
+
...V
|
|
1795
|
+
}), l(U, "args", {
|
|
1796
|
+
name: H.string({ required: !0 }),
|
|
1797
|
+
mode: H.string({ options: ["s3"], required: !1 })
|
|
1798
|
+
});
|
|
1799
|
+
let xe = U;
|
|
1800
|
+
const J = class J extends L {
|
|
1801
|
+
async run() {
|
|
1802
|
+
const { flags: e, args: t } = await this.parse(J), r = I(e["log-level"]), n = new O(r);
|
|
1803
|
+
n.mergeLicenseEnvs(e);
|
|
1804
|
+
const s = t.name, i = e["auth-enabled"], c = i ? {
|
|
1805
|
+
enabled: i,
|
|
1806
|
+
drivers: n.initAuthDriversList(e, ".")
|
|
1807
|
+
} : void 0, o = e.storage ? m.join(".", e.storage) : d.instanceDir(s), f = [];
|
|
1808
|
+
for (const w of e.mount ?? [])
|
|
1809
|
+
f.push({ hostPath: w });
|
|
1810
|
+
const g = e.arch ? `linux/${e.arch}` : void 0, h = e["minio-presign-host"] ? "minio" : "localhost";
|
|
1811
|
+
n.createDockerS3(s, o, {
|
|
1812
|
+
image: e.image,
|
|
1813
|
+
version: e.version,
|
|
1814
|
+
license: e.license,
|
|
1815
|
+
licenseFile: e["license-file"],
|
|
1816
|
+
platformOverride: g,
|
|
1817
|
+
customMounts: f,
|
|
1818
|
+
auth: c,
|
|
1819
|
+
grpcAddr: e["grpc-listen"],
|
|
1820
|
+
grpcPort: e["grpc-port"],
|
|
1821
|
+
monitoringAddr: e["monitoring-listen"],
|
|
1822
|
+
monitoringPort: e["monitoring-port"],
|
|
1823
|
+
debugAddr: e["debug-listen"],
|
|
1824
|
+
debugPort: e["debug-port"],
|
|
1825
|
+
presignHost: h
|
|
1826
|
+
}), r.info(`Instance '${s}' was created. To start it run 'up' command`), e["minio-presign-host"] && r.info(" NOTE: make sure you have 'minio' host in your hosts file as 127.0.0.1 address");
|
|
1827
|
+
}
|
|
1828
|
+
};
|
|
1829
|
+
l(J, "description", "Run Platforma Backend service as docker container on current host with MinIO as local S3 storage"), l(J, "examples", ["<%= config.bin %> <%= command.id %>"]), l(J, "flags", {
|
|
1830
|
+
...A,
|
|
1831
|
+
...z,
|
|
1832
|
+
...me,
|
|
1833
|
+
...W,
|
|
1834
|
+
...he,
|
|
1835
|
+
...V,
|
|
1836
|
+
...q,
|
|
1541
1837
|
...ye,
|
|
1542
|
-
...
|
|
1543
|
-
...
|
|
1544
|
-
|
|
1545
|
-
|
|
1838
|
+
...Y,
|
|
1839
|
+
...hr
|
|
1840
|
+
}), l(J, "args", {
|
|
1841
|
+
name: H.string({ required: !0 })
|
|
1546
1842
|
});
|
|
1547
|
-
let
|
|
1548
|
-
const
|
|
1549
|
-
"create-block":
|
|
1550
|
-
reset:
|
|
1551
|
-
start:
|
|
1552
|
-
stop:
|
|
1553
|
-
"start:docker":
|
|
1554
|
-
"start:local":
|
|
1555
|
-
"
|
|
1556
|
-
"
|
|
1843
|
+
let De = J;
|
|
1844
|
+
const It = {
|
|
1845
|
+
"create-block": Se,
|
|
1846
|
+
reset: Le,
|
|
1847
|
+
start: Ae,
|
|
1848
|
+
stop: Ie,
|
|
1849
|
+
"start:docker": ot,
|
|
1850
|
+
"start:local": lt,
|
|
1851
|
+
"svc:delete": Oe,
|
|
1852
|
+
"svc:down": Ee,
|
|
1853
|
+
"svc:list": Re,
|
|
1854
|
+
"svc:up": Te,
|
|
1855
|
+
"start:docker:s3": dt,
|
|
1856
|
+
"start:local:s3": gt,
|
|
1857
|
+
"svc:create:docker": Fe,
|
|
1858
|
+
"svc:create:local": xe,
|
|
1859
|
+
"svc:create:docker:s3": De
|
|
1557
1860
|
};
|
|
1558
1861
|
export {
|
|
1559
|
-
|
|
1862
|
+
It as COMMANDS
|
|
1560
1863
|
};
|
|
1561
1864
|
//# sourceMappingURL=index.mjs.map
|