@platforma-sdk/bootstrap 2.9.0 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/assets/compose-backend.yaml +12 -5
- package/dist/block.d.ts.map +1 -1
- package/dist/cmd-opts.d.ts +8 -2
- package/dist/cmd-opts.d.ts.map +1 -1
- package/dist/commands/start/docker/s3.d.ts +2 -1
- package/dist/commands/start/docker/s3.d.ts.map +1 -1
- package/dist/commands/start/docker.d.ts.map +1 -1
- package/dist/commands/start/local/s3.d.ts +2 -2
- package/dist/commands/start/local/s3.d.ts.map +1 -1
- package/dist/commands/start/local.d.ts.map +1 -1
- package/dist/commands/start.d.ts.map +1 -1
- package/dist/commands/stop.d.ts.map +1 -1
- package/dist/commands/svc/create/docker/s3.d.ts +32 -0
- package/dist/commands/svc/create/docker/s3.d.ts.map +1 -0
- package/dist/commands/svc/create/docker.d.ts +34 -0
- package/dist/commands/svc/create/docker.d.ts.map +1 -0
- package/dist/commands/svc/create/local.d.ts +39 -0
- package/dist/commands/svc/create/local.d.ts.map +1 -0
- package/dist/commands/svc/delete.d.ts +15 -0
- package/dist/commands/svc/delete.d.ts.map +1 -0
- package/dist/commands/svc/down.d.ts +14 -0
- package/dist/commands/svc/down.d.ts.map +1 -0
- package/dist/commands/svc/list.d.ts +8 -0
- package/dist/commands/svc/list.d.ts.map +1 -0
- package/dist/commands/svc/up.d.ts +14 -0
- package/dist/commands/svc/up.d.ts.map +1 -0
- package/dist/core.d.ts +20 -13
- package/dist/core.d.ts.map +1 -1
- package/dist/index.d.ts +18 -4
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +31 -34
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1259 -950
- package/dist/index.mjs.map +1 -1
- package/dist/platforma.d.ts.map +1 -1
- package/dist/run.d.ts +11 -5
- package/dist/run.d.ts.map +1 -1
- package/dist/state.d.ts +43 -22
- package/dist/state.d.ts.map +1 -1
- package/dist/templates/compose.d.ts +3 -1
- package/dist/templates/compose.d.ts.map +1 -1
- package/dist/templates/types.d.ts.map +1 -1
- package/dist/util.d.ts +7 -1
- package/dist/util.d.ts.map +1 -1
- package/package.json +2 -2
package/dist/index.mjs
CHANGED
|
@@ -1,509 +1,540 @@
|
|
|
1
|
-
var
|
|
2
|
-
var
|
|
3
|
-
var
|
|
4
|
-
import { Flags as
|
|
5
|
-
import
|
|
6
|
-
import
|
|
7
|
-
import
|
|
8
|
-
import { execSync as
|
|
9
|
-
import
|
|
10
|
-
import { randomBytes as
|
|
11
|
-
import
|
|
12
|
-
import * as
|
|
13
|
-
import { Writable as
|
|
14
|
-
import { z as
|
|
15
|
-
import
|
|
16
|
-
import
|
|
17
|
-
import { getDefaultPlVersion as
|
|
18
|
-
import
|
|
19
|
-
import * as
|
|
20
|
-
const
|
|
21
|
-
"log-level":
|
|
1
|
+
var kr = Object.defineProperty;
|
|
2
|
+
var Sr = (a, e, t) => e in a ? kr(a, e, { enumerable: !0, configurable: !0, writable: !0, value: t }) : a[e] = t;
|
|
3
|
+
var l = (a, e, t) => Sr(a, typeof e != "symbol" ? e + "" : e, t);
|
|
4
|
+
import { Flags as b, Command as L, Args as H } from "@oclif/core";
|
|
5
|
+
import x from "node:os";
|
|
6
|
+
import u, { createWriteStream as $r } from "node:fs";
|
|
7
|
+
import m, { resolve as Lr } from "node:path";
|
|
8
|
+
import { execSync as Pe, spawn as Ar, spawnSync as fr } from "node:child_process";
|
|
9
|
+
import de from "winston";
|
|
10
|
+
import { randomBytes as Ir } from "node:crypto";
|
|
11
|
+
import X from "readline-sync";
|
|
12
|
+
import * as N from "node:fs/promises";
|
|
13
|
+
import { Writable as Or } from "node:stream";
|
|
14
|
+
import { z as D } from "zod";
|
|
15
|
+
import Er from "decompress";
|
|
16
|
+
import ue from "yaml";
|
|
17
|
+
import { getDefaultPlVersion as fe } from "@milaboratories/pl-local";
|
|
18
|
+
import Rr from "node:https";
|
|
19
|
+
import * as Tr from "tar";
|
|
20
|
+
const A = {
|
|
21
|
+
"log-level": b.string({
|
|
22
22
|
description: "logging level",
|
|
23
23
|
default: "info",
|
|
24
24
|
options: ["error", "warn", "info", "debug"],
|
|
25
25
|
required: !1
|
|
26
26
|
})
|
|
27
|
-
}
|
|
28
|
-
|
|
27
|
+
};
|
|
28
|
+
b.string({
|
|
29
|
+
description: "name of instance",
|
|
30
|
+
required: !1
|
|
31
|
+
});
|
|
32
|
+
const me = {
|
|
33
|
+
image: b.string({
|
|
29
34
|
description: "use custom docker image to run platforma"
|
|
30
35
|
})
|
|
31
|
-
},
|
|
32
|
-
version:
|
|
36
|
+
}, W = {
|
|
37
|
+
version: b.string({
|
|
33
38
|
description: "use custom platforma release (official docker image or binary package)"
|
|
34
39
|
})
|
|
35
|
-
},
|
|
36
|
-
arch:
|
|
40
|
+
}, he = {
|
|
41
|
+
arch: b.string({
|
|
37
42
|
description: "override architecture. You can start amd64 linux image on arm-based host (say, Apple M family processor)",
|
|
38
43
|
options: [
|
|
39
44
|
"amd64",
|
|
40
45
|
"arm64"
|
|
41
46
|
]
|
|
42
47
|
})
|
|
43
|
-
},
|
|
44
|
-
license:
|
|
48
|
+
}, q = {
|
|
49
|
+
license: b.string({
|
|
45
50
|
description: 'pass a license code. The license can be got from "https://licensing.milaboratories.com".'
|
|
46
51
|
}),
|
|
47
|
-
"license-file":
|
|
52
|
+
"license-file": b.file({
|
|
48
53
|
exists: !0,
|
|
49
54
|
description: "specify a path to the file with a license. The license can be got from 'https://licensing.milaboratories.com'."
|
|
50
55
|
})
|
|
51
|
-
},
|
|
52
|
-
"grpc-port":
|
|
56
|
+
}, z = {
|
|
57
|
+
"grpc-port": b.integer({
|
|
53
58
|
description: "port for Platforma Backend gRPC API. Default is 6345",
|
|
54
59
|
env: "PLATFORMA_GRPC_PORT"
|
|
55
60
|
}),
|
|
56
|
-
"grpc-listen":
|
|
61
|
+
"grpc-listen": b.string({
|
|
57
62
|
description: "full listen addr for Platforma Backend gRPC API. Default is 127.0.0.1:6345",
|
|
58
63
|
env: "PLATFORMA_GRPC_LISTEN"
|
|
59
64
|
}),
|
|
60
|
-
"monitoring-port":
|
|
65
|
+
"monitoring-port": b.integer({
|
|
61
66
|
description: "port for Platforma Backend monitoring API. Default is 9090",
|
|
62
67
|
env: "PLATFORMA_MONITORING_PORT"
|
|
63
68
|
}),
|
|
64
|
-
"monitoring-listen":
|
|
69
|
+
"monitoring-listen": b.string({
|
|
65
70
|
description: "full listen addr for Platforma Backend monitoring API. Default is 127.0.0.1:9090",
|
|
66
71
|
env: "PLATFORMA_MONITORING_LISTEN"
|
|
67
72
|
}),
|
|
68
|
-
"debug-port":
|
|
73
|
+
"debug-port": b.integer({
|
|
69
74
|
description: "port for Platforma Backend debug API. Default is 9091",
|
|
70
75
|
env: "PLATFORMA_DEBUG_PORT"
|
|
71
76
|
}),
|
|
72
|
-
"debug-listen":
|
|
77
|
+
"debug-listen": b.string({
|
|
73
78
|
description: "full listen addr for Platforma Backend debug API. Default is 127.0.0.1:9091",
|
|
74
79
|
env: "PLATFORMA_DEBUG_LISTEN"
|
|
75
80
|
})
|
|
76
|
-
},
|
|
77
|
-
"s3-
|
|
81
|
+
}, mr = {
|
|
82
|
+
"s3-port": b.integer({
|
|
78
83
|
description: "port that S3 will listen, default is 9000",
|
|
79
84
|
default: 9e3,
|
|
80
85
|
env: "PLATFORMA_S3_PORT"
|
|
81
86
|
}),
|
|
82
|
-
"s3-console-
|
|
87
|
+
"s3-console-port": b.integer({
|
|
83
88
|
description: "port that a console of S3 will listen, default is 9001",
|
|
84
89
|
default: 9001,
|
|
85
90
|
env: "PLATFORMA_S3_CONSOLE_PORT"
|
|
86
91
|
})
|
|
87
|
-
},
|
|
88
|
-
storage:
|
|
92
|
+
}, Y = {
|
|
93
|
+
storage: b.string({
|
|
89
94
|
description: "specify path on host to be used as storage for all Platforma Backend data"
|
|
90
95
|
})
|
|
91
|
-
},
|
|
92
|
-
|
|
96
|
+
}, hr = {
|
|
97
|
+
"minio-presign-host": b.boolean({
|
|
98
|
+
description: "use 'minio' host instead of 'localhost' in presign URLs"
|
|
99
|
+
})
|
|
100
|
+
}, ye = {
|
|
101
|
+
mount: b.string({
|
|
93
102
|
multiple: !0,
|
|
94
103
|
description: "things to be mounted into platforma docker container. Targets will appear inside the container under the same absolute paths"
|
|
95
104
|
})
|
|
96
|
-
},
|
|
97
|
-
"pl-log-file":
|
|
105
|
+
}, Ne = {
|
|
106
|
+
"pl-log-file": b.file({
|
|
98
107
|
description: "specify path for Platforma Backend log file"
|
|
99
108
|
})
|
|
100
|
-
},
|
|
101
|
-
"pl-workdir":
|
|
109
|
+
}, _e = {
|
|
110
|
+
"pl-workdir": b.file({
|
|
102
111
|
description: "specify working directory for Platforma Backend process"
|
|
103
112
|
})
|
|
104
|
-
},
|
|
105
|
-
"pl-binary":
|
|
113
|
+
}, Ce = {
|
|
114
|
+
"pl-binary": b.file({
|
|
106
115
|
description: "start given Platforma Backend binary instead of automatically downloaded version"
|
|
107
116
|
})
|
|
108
|
-
},
|
|
109
|
-
"pl-sources":
|
|
117
|
+
}, Me = {
|
|
118
|
+
"pl-sources": b.file({
|
|
110
119
|
description: "path to pl repository root: build Platforma Backend from sources and start the resulting binary"
|
|
111
120
|
})
|
|
112
|
-
},
|
|
113
|
-
config:
|
|
121
|
+
}, je = {
|
|
122
|
+
config: b.string({
|
|
114
123
|
description: "use custom Platforma Backend config"
|
|
115
124
|
})
|
|
116
125
|
};
|
|
117
|
-
|
|
126
|
+
b.file({
|
|
118
127
|
description: "specify path on host to be used as 'primary' storage"
|
|
119
128
|
});
|
|
120
|
-
const
|
|
121
|
-
"storage-work":
|
|
129
|
+
const ce = {
|
|
130
|
+
"storage-work": b.file({
|
|
122
131
|
description: "specify path on host to be used as 'work' storage"
|
|
123
132
|
})
|
|
124
133
|
};
|
|
125
|
-
|
|
134
|
+
b.file({
|
|
126
135
|
description: "specify path on host to be used as 'library' storage"
|
|
127
136
|
});
|
|
128
|
-
const
|
|
129
|
-
"storage-primary":
|
|
137
|
+
const oe = {
|
|
138
|
+
"storage-primary": b.string({
|
|
130
139
|
description: `specify 'primary' storage destination URL.
|
|
131
140
|
file:/path/to/dir for directory on local FS
|
|
132
141
|
s3://<bucket>/?region=<name> for real AWS bucket
|
|
133
142
|
s3e://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via http
|
|
134
143
|
s3es://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via https`
|
|
135
144
|
})
|
|
136
|
-
},
|
|
137
|
-
"storage-library":
|
|
145
|
+
}, le = {
|
|
146
|
+
"storage-library": b.string({
|
|
138
147
|
description: `specify 'library' storage destination URL.
|
|
139
148
|
file:/path/to/dir for directory on local FS
|
|
140
149
|
s3://<bucket>/?region=<name> for real AWS bucket
|
|
141
150
|
s3e://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via http
|
|
142
151
|
s3es://<endpoint>/<bucket>/?region=<name> for bucket behind custom endpoint via https`
|
|
143
152
|
})
|
|
144
|
-
},
|
|
145
|
-
"auth-enabled":
|
|
153
|
+
}, Fr = {
|
|
154
|
+
"auth-enabled": b.boolean({
|
|
146
155
|
description: "enable authorization"
|
|
147
156
|
})
|
|
148
|
-
},
|
|
149
|
-
"auth-htpasswd-file":
|
|
157
|
+
}, xr = {
|
|
158
|
+
"auth-htpasswd-file": b.file({
|
|
150
159
|
description: "path to .htpasswd file with Platforma users (static user DB auth source)"
|
|
151
160
|
})
|
|
152
|
-
},
|
|
153
|
-
"auth-ldap-server":
|
|
161
|
+
}, Dr = {
|
|
162
|
+
"auth-ldap-server": b.string({
|
|
154
163
|
description: "address of LDAP server to use for auth in Platforma (auth source)"
|
|
155
164
|
})
|
|
156
|
-
},
|
|
157
|
-
"auth-ldap-default-dn":
|
|
165
|
+
}, Nr = {
|
|
166
|
+
"auth-ldap-default-dn": b.string({
|
|
158
167
|
description: "DN to use when checking user with LDAP bind operation: e.g. cn=%u,ou=users,dc=example,dc=com"
|
|
159
168
|
})
|
|
160
|
-
},
|
|
161
|
-
...
|
|
162
|
-
...
|
|
163
|
-
...
|
|
164
|
-
...
|
|
169
|
+
}, V = {
|
|
170
|
+
...Fr,
|
|
171
|
+
...xr,
|
|
172
|
+
...Dr,
|
|
173
|
+
...Nr
|
|
165
174
|
};
|
|
166
|
-
function
|
|
167
|
-
return
|
|
175
|
+
function _r(a) {
|
|
176
|
+
return X.question(`${a} [y/N] `).toLowerCase() === "y";
|
|
168
177
|
}
|
|
169
|
-
function
|
|
178
|
+
function _(a) {
|
|
170
179
|
throw new Error("this should never happen");
|
|
171
180
|
}
|
|
172
|
-
function
|
|
173
|
-
return
|
|
181
|
+
function I(a = "debug") {
|
|
182
|
+
return de.createLogger({
|
|
174
183
|
level: a,
|
|
175
|
-
format:
|
|
176
|
-
const
|
|
177
|
-
if (typeof
|
|
178
|
-
const
|
|
179
|
-
throw Error(`logger message ${
|
|
184
|
+
format: de.format.printf(({ level: e, message: t }) => {
|
|
185
|
+
const r = " ".repeat(e.length + 2);
|
|
186
|
+
if (typeof t != "string") {
|
|
187
|
+
const i = JSON.stringify(t);
|
|
188
|
+
throw Error(`logger message ${i} is not a string`);
|
|
180
189
|
}
|
|
181
|
-
const
|
|
182
|
-
`).map((
|
|
190
|
+
const n = t.split(`
|
|
191
|
+
`).map((i, c) => c === 0 ? i : r + i).join(`
|
|
183
192
|
`);
|
|
184
|
-
return `${((
|
|
193
|
+
return `${((i) => de.format.colorize().colorize(i, i))(e)}: ${n}`;
|
|
185
194
|
}),
|
|
186
195
|
transports: [
|
|
187
|
-
new
|
|
196
|
+
new de.transports.Console({
|
|
188
197
|
stderrLevels: ["error", "warn", "info", "debug"],
|
|
189
198
|
handleExceptions: !0
|
|
190
199
|
})
|
|
191
200
|
]
|
|
192
201
|
});
|
|
193
202
|
}
|
|
194
|
-
function
|
|
195
|
-
return
|
|
203
|
+
function Cr(a) {
|
|
204
|
+
return Ir(Math.ceil(a / 2)).toString("hex").slice(0, a);
|
|
196
205
|
}
|
|
197
|
-
function
|
|
198
|
-
return a.startsWith("~") ?
|
|
206
|
+
function Mr(a) {
|
|
207
|
+
return a.startsWith("~") ? m.join(x.homedir(), a.slice(1)) : a;
|
|
199
208
|
}
|
|
200
|
-
function
|
|
201
|
-
|
|
209
|
+
function pe(a, e) {
|
|
210
|
+
u.existsSync(a) || (u.mkdirSync(a, { recursive: !0 }), e != null && e.mode && u.chmodSync(a, e.mode));
|
|
202
211
|
}
|
|
203
|
-
function
|
|
212
|
+
function jr(a) {
|
|
204
213
|
try {
|
|
205
|
-
if (
|
|
206
|
-
return
|
|
207
|
-
const e = `wmic process where processid=${a} get Caption`,
|
|
214
|
+
if (x.platform() !== "win32")
|
|
215
|
+
return Pe(`ps -p ${a} -o comm=`, { encoding: "utf8" }).trim();
|
|
216
|
+
const e = `wmic process where processid=${a} get Caption`, t = Pe(e, { encoding: "utf8" }).split(`
|
|
208
217
|
`);
|
|
209
|
-
return
|
|
218
|
+
return t.length <= 1 ? "" : t[1].trim();
|
|
210
219
|
} catch {
|
|
211
220
|
return "";
|
|
212
221
|
}
|
|
213
222
|
}
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
223
|
+
function Br(a) {
|
|
224
|
+
const e = Pe(`docker compose ls --filter name=${a} --format json`, { encoding: "utf8" }).trim(), t = JSON.parse(e);
|
|
225
|
+
for (const r of t)
|
|
226
|
+
if (r.Name === a)
|
|
227
|
+
return r;
|
|
228
|
+
}
|
|
229
|
+
const ke = ["Python"], yr = ["Tengo", "Python"], Gr = D.union([D.literal("Tengo"), D.literal("Python")]), Ur = D.object({
|
|
230
|
+
npmOrgName: D.string().min(1),
|
|
231
|
+
orgName: D.string().min(1),
|
|
232
|
+
blockName: D.string().min(1),
|
|
233
|
+
softwarePlatforms: D.array(Gr).refine((a) => new Set(a).size === a.length, {
|
|
219
234
|
message: "Must be an array of unique software platforms"
|
|
220
235
|
})
|
|
221
236
|
});
|
|
222
|
-
async function
|
|
223
|
-
const { npmOrgName: e, orgName:
|
|
224
|
-
a.info("Downloading boilerplate code..."), await
|
|
237
|
+
async function Jr(a) {
|
|
238
|
+
const { npmOrgName: e, orgName: t, blockName: r, softwarePlatforms: n } = Hr(), s = m.join(process.cwd(), r);
|
|
239
|
+
a.info("Downloading boilerplate code..."), await Wr(
|
|
225
240
|
// 'https://github.com/milaboratory/platforma-block-boilerplate/archive/refs/heads/software_platforms.zip',
|
|
226
241
|
// 'platforma-block-boilerplate-software_platforms',
|
|
227
242
|
"https://github.com/milaboratory/platforma-block-boilerplate/archive/refs/heads/main.zip",
|
|
228
243
|
"platforma-block-boilerplate-main",
|
|
229
|
-
|
|
244
|
+
s
|
|
230
245
|
);
|
|
231
|
-
const
|
|
232
|
-
a.info(`Keep platforms '${
|
|
233
|
-
for (const
|
|
234
|
-
await
|
|
235
|
-
c && await
|
|
246
|
+
const i = yr.filter((o) => n.indexOf(o) < 0), c = ke.length == i.length;
|
|
247
|
+
a.info(`Keep platforms '${n}', remove: '${i}'. Will remove all platforms? ${c}`);
|
|
248
|
+
for (const o of i)
|
|
249
|
+
await qr(s, o);
|
|
250
|
+
c && await zr(s), a.info("Replace everything in the template with provided options..."), Yr(s, [
|
|
236
251
|
// '@' literal ensures only npm org name will be renamed,
|
|
237
252
|
// as public registry for software also is called platforma-open, but without '@'.
|
|
238
253
|
// Also, don't rename an organization for runenv-python-3 package.
|
|
239
254
|
{ from: /@platforma-open(?!.*runenv-python-3)/g, to: `@${e}` },
|
|
240
|
-
{ from: /my-org/g, to:
|
|
241
|
-
{ from: /block-boilerplate/g, to:
|
|
255
|
+
{ from: /my-org/g, to: t },
|
|
256
|
+
{ from: /block-boilerplate/g, to: r }
|
|
242
257
|
]);
|
|
243
258
|
}
|
|
244
|
-
function
|
|
245
|
-
let a =
|
|
259
|
+
function Hr() {
|
|
260
|
+
let a = X.question(
|
|
246
261
|
'Write an organization name for npm. Default is "platforma-open": '
|
|
247
262
|
);
|
|
248
263
|
a === "" && (a = "platforma-open");
|
|
249
|
-
const e =
|
|
250
|
-
let
|
|
251
|
-
if (
|
|
252
|
-
for (;
|
|
253
|
-
const
|
|
254
|
-
if (
|
|
255
|
-
|
|
264
|
+
const e = X.question('Write an organization name, e.g. "my-org": '), t = X.question('Write a name of the block, e.g. "hello-world": '), r = X.keyInYN("Create package for block's software?");
|
|
265
|
+
let n = ["Tengo"];
|
|
266
|
+
if (r)
|
|
267
|
+
for (; n.length < yr.length; ) {
|
|
268
|
+
const s = X.keyInSelect(ke, "Choose software platform:");
|
|
269
|
+
if (s < 0) break;
|
|
270
|
+
n.push(ke[s]);
|
|
256
271
|
}
|
|
257
|
-
return
|
|
272
|
+
return n = Array.from(new Set(n)).sort(), Ur.parse({ npmOrgName: a, orgName: e, blockName: t, softwarePlatforms: n });
|
|
258
273
|
}
|
|
259
|
-
async function
|
|
260
|
-
const
|
|
261
|
-
await
|
|
262
|
-
const
|
|
263
|
-
await
|
|
274
|
+
async function Wr(a, e, t) {
|
|
275
|
+
const n = await (await fetch(a)).blob(), s = await N.mkdtemp(m.join(x.tmpdir(), "create-repo")), i = m.join(s, "packed-repo.zip"), c = Or.toWeb($r(i));
|
|
276
|
+
await n.stream().pipeTo(c);
|
|
277
|
+
const o = m.join(s, "unpacked-repo");
|
|
278
|
+
await N.mkdir(o), await Er(i, o), await N.cp(m.join(o, e), t, { recursive: !0 });
|
|
264
279
|
}
|
|
265
|
-
async function
|
|
266
|
-
const
|
|
267
|
-
await
|
|
268
|
-
|
|
269
|
-
new RegExp(`.*${
|
|
270
|
-
), await
|
|
271
|
-
|
|
272
|
-
new RegExp(`.*${
|
|
273
|
-
), await
|
|
274
|
-
|
|
275
|
-
new RegExp(`.*${
|
|
276
|
-
), await
|
|
277
|
-
|
|
278
|
-
new RegExp(`.*${
|
|
279
|
-
), await
|
|
280
|
-
|
|
281
|
-
new RegExp(`.*${
|
|
282
|
-
), await
|
|
283
|
-
|
|
284
|
-
(
|
|
285
|
-
const
|
|
286
|
-
return delete
|
|
280
|
+
async function qr(a, e) {
|
|
281
|
+
const t = e.toLowerCase();
|
|
282
|
+
await Z(
|
|
283
|
+
m.join(a, "ui", "src", "pages", "MainPage.vue"),
|
|
284
|
+
new RegExp(`.*${t}Message.*\\n\\n`, "g")
|
|
285
|
+
), await Z(
|
|
286
|
+
m.join(a, "model", "src", "index.ts"),
|
|
287
|
+
new RegExp(`.*${t}Message.*\\n\\n`, "g")
|
|
288
|
+
), await Z(
|
|
289
|
+
m.join(a, "workflow", "src", "main.tpl.tengo"),
|
|
290
|
+
new RegExp(`.*${t}.*exec.builder.*[\\s\\S]*?\\n\\n`, "g")
|
|
291
|
+
), await Z(
|
|
292
|
+
m.join(a, "workflow", "src", "main.tpl.tengo"),
|
|
293
|
+
new RegExp(`.*${t}Message.*\\n`, "g")
|
|
294
|
+
), await Z(
|
|
295
|
+
m.join(a, "workflow", "src", "wf.test.ts"),
|
|
296
|
+
new RegExp(`.*${t}Message.*\\n.*expect.*\\n\\n`, "g")
|
|
297
|
+
), await N.rm(m.join(a, "software", `src_${t}`), { recursive: !0 }), await Be(
|
|
298
|
+
m.join(a, "software", "package.json"),
|
|
299
|
+
(r) => {
|
|
300
|
+
const n = JSON.parse(r);
|
|
301
|
+
return delete n["block-software"].artifacts[`hello-${t}-artifact`], delete n["block-software"].entrypoints[`hello-world-${t}`], JSON.stringify(n, null, 2);
|
|
287
302
|
}
|
|
288
303
|
);
|
|
289
304
|
}
|
|
290
|
-
async function
|
|
291
|
-
await
|
|
292
|
-
|
|
305
|
+
async function zr(a) {
|
|
306
|
+
await N.rm(m.join(a, "software"), { recursive: !0 }), await Be(
|
|
307
|
+
m.join(a, "workflow", "package.json"),
|
|
293
308
|
(e) => {
|
|
294
|
-
const
|
|
295
|
-
return delete
|
|
309
|
+
const t = JSON.parse(e);
|
|
310
|
+
return delete t.dependencies["@platforma-open/my-org.block-boilerplate.software"], JSON.stringify(t, null, 2);
|
|
296
311
|
}
|
|
297
|
-
), await
|
|
298
|
-
|
|
312
|
+
), await Z(
|
|
313
|
+
m.join(a, "pnpm-workspace.yaml"),
|
|
299
314
|
/.*- software$\n/gm
|
|
300
315
|
);
|
|
301
316
|
}
|
|
302
|
-
async function
|
|
303
|
-
const
|
|
304
|
-
for (const { from:
|
|
305
|
-
for (const
|
|
306
|
-
await
|
|
317
|
+
async function Yr(a, e) {
|
|
318
|
+
const t = await Vr(a);
|
|
319
|
+
for (const { from: r, to: n } of e)
|
|
320
|
+
for (const s of t)
|
|
321
|
+
await pr(s, r, n);
|
|
307
322
|
}
|
|
308
|
-
async function
|
|
309
|
-
return (await
|
|
323
|
+
async function Vr(a) {
|
|
324
|
+
return (await N.readdir(a, {
|
|
310
325
|
withFileTypes: !0,
|
|
311
326
|
recursive: !0
|
|
312
|
-
})).filter((
|
|
327
|
+
})).filter((t) => t.isFile()).map((t) => m.join(t.parentPath, t.name));
|
|
313
328
|
}
|
|
314
|
-
async function
|
|
315
|
-
const
|
|
316
|
-
await
|
|
329
|
+
async function Be(a, e) {
|
|
330
|
+
const t = await N.readFile(a), r = e(t.toString());
|
|
331
|
+
await N.writeFile(a, r);
|
|
317
332
|
}
|
|
318
|
-
async function
|
|
319
|
-
return await
|
|
333
|
+
async function pr(a, e, t) {
|
|
334
|
+
return await Be(a, (r) => r.replaceAll(e, t));
|
|
320
335
|
}
|
|
321
|
-
async function
|
|
322
|
-
return await
|
|
336
|
+
async function Z(a, e) {
|
|
337
|
+
return await pr(a, e, "");
|
|
323
338
|
}
|
|
324
|
-
const
|
|
339
|
+
const ee = class ee extends L {
|
|
325
340
|
async run() {
|
|
326
|
-
const { flags: e } = await this.parse(
|
|
327
|
-
await
|
|
341
|
+
const { flags: e } = await this.parse(ee), t = I(e["log-level"]);
|
|
342
|
+
await Jr(t);
|
|
328
343
|
}
|
|
329
344
|
};
|
|
330
|
-
|
|
331
|
-
...
|
|
345
|
+
l(ee, "description", "Helps to create a new block by downloading a block's template."), l(ee, "examples", ["<%= name %>"]), l(ee, "flags", {
|
|
346
|
+
...A
|
|
332
347
|
});
|
|
333
|
-
let
|
|
334
|
-
function
|
|
335
|
-
return
|
|
336
|
-
}
|
|
337
|
-
function O(...a) {
|
|
338
|
-
return rr("assets", ...a);
|
|
348
|
+
let Se = ee;
|
|
349
|
+
function br(...a) {
|
|
350
|
+
return Lr(__dirname, "..", ...a);
|
|
339
351
|
}
|
|
340
|
-
function
|
|
341
|
-
return
|
|
352
|
+
function K(...a) {
|
|
353
|
+
return br("assets", ...a);
|
|
342
354
|
}
|
|
343
|
-
function
|
|
344
|
-
return
|
|
355
|
+
function be(...a) {
|
|
356
|
+
return u.readFileSync(br(...a));
|
|
345
357
|
}
|
|
346
|
-
function
|
|
347
|
-
return a || (a =
|
|
358
|
+
function dr(a) {
|
|
359
|
+
return a || (a = fe()), `quay.io/milaboratories/platforma:${a}`;
|
|
348
360
|
}
|
|
349
|
-
const
|
|
361
|
+
const C = class C {
|
|
350
362
|
constructor(e) {
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
isActive: !1
|
|
363
|
+
l(this, "state", {
|
|
364
|
+
currentInstance: ""
|
|
354
365
|
});
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
e = e ??
|
|
358
|
-
const
|
|
359
|
-
this.dirPath = e, this.filePath =
|
|
366
|
+
l(this, "filePath");
|
|
367
|
+
l(this, "dirPath");
|
|
368
|
+
e = e ?? m.resolve(x.homedir(), ".config", "pl-bootstrap");
|
|
369
|
+
const t = m.join(e, "state.json");
|
|
370
|
+
this.dirPath = e, this.filePath = t, u.existsSync(e) || u.mkdirSync(e, { recursive: !0 }), u.existsSync(t) && (this.state = JSON.parse(be(t).toString()));
|
|
360
371
|
}
|
|
361
|
-
static
|
|
362
|
-
return
|
|
372
|
+
static getStateInstance() {
|
|
373
|
+
return C.instance || (C.instance = new C()), C.instance;
|
|
363
374
|
}
|
|
364
375
|
path(...e) {
|
|
365
|
-
return
|
|
376
|
+
return m.join(this.dirPath, ...e);
|
|
366
377
|
}
|
|
367
|
-
|
|
368
|
-
return this.path("data", ...
|
|
378
|
+
instanceDir(e, ...t) {
|
|
379
|
+
return e ? this.path("data", e, ...t) : this.path("data");
|
|
369
380
|
}
|
|
370
381
|
binaries(...e) {
|
|
371
382
|
return this.path("binaries", ...e);
|
|
372
383
|
}
|
|
373
384
|
writeState() {
|
|
374
|
-
|
|
385
|
+
u.writeFileSync(this.filePath, JSON.stringify(this.state));
|
|
386
|
+
}
|
|
387
|
+
get instanceList() {
|
|
388
|
+
return u.existsSync(this.instanceDir()) ? u.readdirSync(this.instanceDir()).filter((t) => this.instanceExists(t)) : [];
|
|
389
|
+
}
|
|
390
|
+
instanceExists(e) {
|
|
391
|
+
return u.existsSync(this.instanceDir(e, "instance.json"));
|
|
392
|
+
}
|
|
393
|
+
getInstanceInfo(e) {
|
|
394
|
+
const t = this.instanceDir(e, "instance.json");
|
|
395
|
+
if (!u.existsSync(t))
|
|
396
|
+
throw new Error(`platforma backend instance '${e}' does not exist or is corrupted`);
|
|
397
|
+
const r = JSON.parse(be(t).toString());
|
|
398
|
+
return {
|
|
399
|
+
name: e,
|
|
400
|
+
...r
|
|
401
|
+
};
|
|
402
|
+
}
|
|
403
|
+
setInstanceInfo(e, t) {
|
|
404
|
+
u.existsSync(this.instanceDir(e)) || u.mkdirSync(this.instanceDir(e), { recursive: !0 });
|
|
405
|
+
const r = this.instanceDir(e, "instance.json");
|
|
406
|
+
let n = {};
|
|
407
|
+
u.existsSync(r) && (n = JSON.parse(be(r).toString())), u.writeFileSync(r, JSON.stringify({ ...n, ...t }));
|
|
408
|
+
}
|
|
409
|
+
isInstanceActive(e) {
|
|
410
|
+
switch (e.type) {
|
|
411
|
+
case "docker": {
|
|
412
|
+
const r = Br(`pl-${e.name}`);
|
|
413
|
+
return r ? r.Status.trim().startsWith("running") : !1;
|
|
414
|
+
}
|
|
415
|
+
case "process":
|
|
416
|
+
return e.pid ? gr(e.pid) : !1;
|
|
417
|
+
default:
|
|
418
|
+
throw _(), new Error("cli logic error: unknown service type, cannot check its state");
|
|
419
|
+
}
|
|
375
420
|
}
|
|
376
421
|
get isActive() {
|
|
377
|
-
|
|
378
|
-
|
|
422
|
+
for (const e of this.instanceList) {
|
|
423
|
+
const t = this.getInstanceInfo(e);
|
|
424
|
+
if (this.isInstanceActive(t))
|
|
425
|
+
return !0;
|
|
426
|
+
}
|
|
427
|
+
return !1;
|
|
428
|
+
}
|
|
429
|
+
isValidPID(e) {
|
|
430
|
+
return gr(e);
|
|
379
431
|
}
|
|
380
|
-
get
|
|
381
|
-
|
|
382
|
-
if (
|
|
383
|
-
return
|
|
384
|
-
const e = Rr(this.state.lastRun.process.pid);
|
|
385
|
-
return e === "platforma" || e.endsWith("/platforma") || e.endsWith("\\platforma");
|
|
432
|
+
get currentInstance() {
|
|
433
|
+
const e = this.state.currentInstance;
|
|
434
|
+
if (e && this.instanceExists(e))
|
|
435
|
+
return this.getInstanceInfo(e);
|
|
386
436
|
}
|
|
387
|
-
|
|
388
|
-
this.state.
|
|
437
|
+
get currentInstanceName() {
|
|
438
|
+
return this.state.currentInstance;
|
|
389
439
|
}
|
|
390
|
-
|
|
391
|
-
|
|
440
|
+
set currentInstanceName(e) {
|
|
441
|
+
this.state.currentInstance = e, this.writeState();
|
|
392
442
|
}
|
|
393
|
-
|
|
394
|
-
|
|
443
|
+
selectInstance(e) {
|
|
444
|
+
if (!this.instanceExists(e))
|
|
445
|
+
throw new Error(`instance '${e}' does not exist`);
|
|
446
|
+
this.state.currentInstance = e, this.writeState();
|
|
395
447
|
}
|
|
396
448
|
};
|
|
397
|
-
|
|
398
|
-
let
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
return d.lastRun = {
|
|
403
|
-
...d.lastRun,
|
|
404
|
-
mode: "docker",
|
|
405
|
-
cmd: "docker",
|
|
406
|
-
args: e,
|
|
407
|
-
workdir: r.cwd,
|
|
408
|
-
envs: r.env,
|
|
409
|
-
docker: {
|
|
410
|
-
...(s = d.lastRun) == null ? void 0 : s.docker,
|
|
411
|
-
...t
|
|
412
|
-
}
|
|
413
|
-
}, tr(a, "docker", e, r);
|
|
449
|
+
l(C, "instance");
|
|
450
|
+
let $e = C;
|
|
451
|
+
function gr(a) {
|
|
452
|
+
const e = jr(a);
|
|
453
|
+
return e === "platforma" || e.endsWith("/platforma") || e.endsWith("\\platforma");
|
|
414
454
|
}
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
...
|
|
426
|
-
...
|
|
455
|
+
const d = $e.getStateInstance();
|
|
456
|
+
function we(a, e, t) {
|
|
457
|
+
const r = [], n = [];
|
|
458
|
+
for (const s of e)
|
|
459
|
+
if (t = {
|
|
460
|
+
cwd: s.workdir,
|
|
461
|
+
env: {
|
|
462
|
+
...s.envs,
|
|
463
|
+
...t == null ? void 0 : t.env
|
|
464
|
+
},
|
|
465
|
+
...s.runOpts,
|
|
466
|
+
...t
|
|
467
|
+
}, s.async) {
|
|
468
|
+
const i = Kr(a, s.cmd, s.args, t);
|
|
469
|
+
n.push(i);
|
|
470
|
+
} else {
|
|
471
|
+
const i = Qr(a, s.cmd, s.args, t);
|
|
472
|
+
if (r.push(i), i.error || i.status !== 0)
|
|
473
|
+
break;
|
|
427
474
|
}
|
|
475
|
+
return {
|
|
476
|
+
executed: r,
|
|
477
|
+
spawned: n
|
|
428
478
|
};
|
|
429
|
-
const i = Br(a, e, r, t);
|
|
430
|
-
return d.lastRun.process = {
|
|
431
|
-
...d.lastRun.process,
|
|
432
|
-
pid: i.pid
|
|
433
|
-
}, i;
|
|
434
|
-
}
|
|
435
|
-
function Cr(a, e) {
|
|
436
|
-
if (!d.lastRun)
|
|
437
|
-
throw new Error("no previous run info found: this is the first run after package installation");
|
|
438
|
-
return e = {
|
|
439
|
-
cwd: d.lastRun.workdir,
|
|
440
|
-
env: {
|
|
441
|
-
...d.lastRun.envs,
|
|
442
|
-
...e.env
|
|
443
|
-
},
|
|
444
|
-
...e
|
|
445
|
-
}, tr(a, d.lastRun.cmd, d.lastRun.args, e);
|
|
446
479
|
}
|
|
447
|
-
function
|
|
480
|
+
function Kr(a, e, t, r) {
|
|
481
|
+
var c;
|
|
448
482
|
a.debug(
|
|
449
483
|
`Running:
|
|
450
|
-
env: ${JSON.stringify(
|
|
451
|
-
cmd: ${JSON.stringify([e, ...
|
|
452
|
-
wd: ${
|
|
453
|
-
),
|
|
454
|
-
const
|
|
455
|
-
|
|
456
|
-
const
|
|
457
|
-
|
|
484
|
+
env: ${JSON.stringify(r.env)}
|
|
485
|
+
cmd: ${JSON.stringify([e, ...t])}
|
|
486
|
+
wd: ${(c = r.cwd) == null ? void 0 : c.toString()}`
|
|
487
|
+
), r.env = { ...process.env, ...r.env }, a.debug(" spawning child process");
|
|
488
|
+
const n = Ar(e, t, r);
|
|
489
|
+
let s = !1;
|
|
490
|
+
const i = () => {
|
|
491
|
+
n.kill("SIGINT"), s = !0;
|
|
458
492
|
};
|
|
459
|
-
return a.debug(" setting up signal handler"), process.on("SIGINT",
|
|
460
|
-
process.removeListener("SIGINT",
|
|
461
|
-
}),
|
|
493
|
+
return a.debug(" setting up signal handler"), process.on("SIGINT", i), n.on("close", (o) => {
|
|
494
|
+
process.removeListener("SIGINT", i), s && process.exit(o);
|
|
495
|
+
}), n;
|
|
462
496
|
}
|
|
463
|
-
function
|
|
497
|
+
function Qr(a, e, t, r) {
|
|
464
498
|
return a.debug(
|
|
465
499
|
`Running:
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
), t.env = { ...process.env, ...t.env }, K(e, r, t);
|
|
500
|
+
cmd: ${JSON.stringify([e, ...t])}
|
|
501
|
+
opts: ${JSON.stringify(r)}`
|
|
502
|
+
), r.env = { ...process.env, ...r.env }, fr(e, t, r);
|
|
470
503
|
}
|
|
471
|
-
function
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
return;
|
|
475
|
-
}
|
|
476
|
-
const t = o.readFileSync(a, { encoding: "utf-8" }), s = Z.parse(t.toString());
|
|
477
|
-
if (!s.services)
|
|
504
|
+
function ve(a, e, t, r, n) {
|
|
505
|
+
const s = u.readFileSync(a, { encoding: "utf-8" }), i = ue.parse(s.toString());
|
|
506
|
+
if (!i.services)
|
|
478
507
|
throw new Error(`file '${a}' seems to be not a docker-compose file or has unsupported version`);
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
if (
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
508
|
+
if (r)
|
|
509
|
+
for (const c of Object.keys(i.services))
|
|
510
|
+
r.has(c) || delete i.services[c];
|
|
511
|
+
i.name = t;
|
|
512
|
+
for (const [c, o] of (r == null ? void 0 : r.entries()) ?? []) {
|
|
513
|
+
const f = i.services[c];
|
|
514
|
+
if (!f)
|
|
515
|
+
throw new Error(`docker compose '${a}' has no declaration of service '${c}'`);
|
|
516
|
+
if (o.platform && (f.platform = o.platform), o.envs) {
|
|
517
|
+
f.environment || (f.environment = []);
|
|
518
|
+
for (let g = 0; g < ((f == null ? void 0 : f.environment.length) ?? 0); ) {
|
|
519
|
+
const w = f.environment[g].split("=")[0];
|
|
520
|
+
if (o.envs[w]) {
|
|
521
|
+
const v = f.environment.pop();
|
|
522
|
+
v && f.environment.length !== g && (f.environment[g] = v);
|
|
492
523
|
} else
|
|
493
|
-
|
|
524
|
+
g++;
|
|
494
525
|
}
|
|
495
|
-
for (const [
|
|
496
|
-
|
|
526
|
+
for (const [g, h] of Object.entries(o.envs))
|
|
527
|
+
f.environment.push(`${g}=${h}`);
|
|
497
528
|
}
|
|
498
|
-
if (
|
|
499
|
-
|
|
500
|
-
for (const
|
|
501
|
-
|
|
529
|
+
if (o.mounts) {
|
|
530
|
+
f.volumes || (f.volumes = []);
|
|
531
|
+
for (const g of o.mounts)
|
|
532
|
+
f.volumes.push(`${g.hostPath}:${g.containerPath}`);
|
|
502
533
|
}
|
|
503
534
|
}
|
|
504
|
-
|
|
535
|
+
n != null && n.dropVolumes && delete i.volumes, u.writeFileSync(e, ue.stringify(i));
|
|
505
536
|
}
|
|
506
|
-
function
|
|
537
|
+
function Xr(a) {
|
|
507
538
|
return {
|
|
508
539
|
id: a,
|
|
509
540
|
type: "S3",
|
|
@@ -521,7 +552,7 @@ function Gr(a) {
|
|
|
521
552
|
uploadKeyPrefix: ""
|
|
522
553
|
};
|
|
523
554
|
}
|
|
524
|
-
function
|
|
555
|
+
function wr(a) {
|
|
525
556
|
return {
|
|
526
557
|
id: a,
|
|
527
558
|
type: "FS",
|
|
@@ -529,137 +560,137 @@ function ar(a) {
|
|
|
529
560
|
rootPath: ""
|
|
530
561
|
};
|
|
531
562
|
}
|
|
532
|
-
function
|
|
533
|
-
a =
|
|
534
|
-
const
|
|
535
|
-
switch (
|
|
563
|
+
function Q(a, e, t) {
|
|
564
|
+
a = Mr(a);
|
|
565
|
+
const r = new URL(a, `file:${e}`);
|
|
566
|
+
switch (r.protocol) {
|
|
536
567
|
case "s3:":
|
|
537
|
-
var
|
|
568
|
+
var i = r.hostname, n = r.searchParams.get("region");
|
|
538
569
|
return {
|
|
539
|
-
...
|
|
570
|
+
...t,
|
|
540
571
|
type: "S3",
|
|
541
|
-
bucketName:
|
|
542
|
-
region:
|
|
572
|
+
bucketName: i,
|
|
573
|
+
region: n
|
|
543
574
|
};
|
|
544
575
|
case "s3e:":
|
|
545
|
-
var
|
|
576
|
+
var s = r.pathname.split("/").slice(1), i = s[0], c = s.length > 1 ? s[1] : "";
|
|
546
577
|
return {
|
|
547
|
-
...
|
|
578
|
+
...t,
|
|
548
579
|
type: "S3",
|
|
549
|
-
endpoint: `http://${
|
|
550
|
-
bucketName:
|
|
580
|
+
endpoint: `http://${r.host}/`,
|
|
581
|
+
bucketName: i,
|
|
551
582
|
keyPrefix: c,
|
|
552
|
-
region:
|
|
553
|
-
key:
|
|
554
|
-
secret:
|
|
583
|
+
region: r.searchParams.get("region"),
|
|
584
|
+
key: r.username ? `static:${r.username}` : "",
|
|
585
|
+
secret: r.password ? `static:${r.password}` : ""
|
|
555
586
|
};
|
|
556
587
|
case "s3es:":
|
|
557
|
-
var
|
|
588
|
+
var s = r.pathname.split("/").slice(1), i = s[0], c = s.length > 1 ? s[1] : "";
|
|
558
589
|
return {
|
|
559
|
-
...
|
|
590
|
+
...t,
|
|
560
591
|
type: "S3",
|
|
561
|
-
endpoint: `https://${
|
|
562
|
-
bucketName:
|
|
592
|
+
endpoint: `https://${r.host}/`,
|
|
593
|
+
bucketName: i,
|
|
563
594
|
keyPrefix: c,
|
|
564
|
-
region:
|
|
565
|
-
key:
|
|
566
|
-
secret:
|
|
595
|
+
region: r.searchParams.get("region"),
|
|
596
|
+
key: r.username ? `static:${r.username}` : "",
|
|
597
|
+
secret: r.password ? `static:${r.password}` : ""
|
|
567
598
|
};
|
|
568
599
|
case "file:":
|
|
569
600
|
return {
|
|
570
601
|
type: "FS",
|
|
571
|
-
rootPath:
|
|
602
|
+
rootPath: r.pathname
|
|
572
603
|
};
|
|
573
604
|
default:
|
|
574
|
-
throw new Error(`storage protocol '${
|
|
605
|
+
throw new Error(`storage protocol '${r.protocol}' is not supported`);
|
|
575
606
|
}
|
|
576
607
|
}
|
|
577
|
-
function
|
|
578
|
-
var
|
|
579
|
-
const
|
|
580
|
-
level: ((
|
|
581
|
-
path: ((
|
|
582
|
-
},
|
|
583
|
-
listen: ((
|
|
608
|
+
function Zr(a, e) {
|
|
609
|
+
var v, y, P, S, k, p, $, se, Ue, Je, He, We, qe, ze, Ye, Ve, Ke, Qe, Xe, Ze, er, rr, tr, ar, nr, sr, ir, cr, or, lr;
|
|
610
|
+
const t = (e == null ? void 0 : e.localRoot) ?? d.instanceDir("default"), r = {
|
|
611
|
+
level: ((v = e == null ? void 0 : e.log) == null ? void 0 : v.level) ?? "info",
|
|
612
|
+
path: ((y = e == null ? void 0 : e.log) == null ? void 0 : y.path) ?? `${t}/logs/platforma.log`
|
|
613
|
+
}, n = {
|
|
614
|
+
listen: ((P = e == null ? void 0 : e.grpc) == null ? void 0 : P.listen) ?? "localhost:6345",
|
|
584
615
|
tls: {
|
|
585
|
-
enable:
|
|
586
|
-
clientAuthMode: ((
|
|
587
|
-
certFile: ((
|
|
588
|
-
keyFile: ((
|
|
589
|
-
...(
|
|
616
|
+
enable: ie((k = (S = e == null ? void 0 : e.grpc) == null ? void 0 : S.tls) == null ? void 0 : k.enable, !1),
|
|
617
|
+
clientAuthMode: (($ = (p = e == null ? void 0 : e.grpc) == null ? void 0 : p.tls) == null ? void 0 : $.clientAuthMode) ?? "NoAuth",
|
|
618
|
+
certFile: ((Ue = (se = e == null ? void 0 : e.grpc) == null ? void 0 : se.tls) == null ? void 0 : Ue.certFile) ?? `${t}/certs/tls.cert`,
|
|
619
|
+
keyFile: ((He = (Je = e == null ? void 0 : e.grpc) == null ? void 0 : Je.tls) == null ? void 0 : He.keyFile) ?? `${t}/certs/tls.key`,
|
|
620
|
+
...(We = e == null ? void 0 : e.grpc) == null ? void 0 : We.tls
|
|
590
621
|
}
|
|
591
|
-
},
|
|
622
|
+
}, s = {
|
|
592
623
|
auth: {
|
|
593
|
-
enabled: ((
|
|
594
|
-
drivers: ((
|
|
624
|
+
enabled: ((ze = (qe = e == null ? void 0 : e.core) == null ? void 0 : qe.auth) == null ? void 0 : ze.enabled) ?? !1,
|
|
625
|
+
drivers: ((Ve = (Ye = e == null ? void 0 : e.core) == null ? void 0 : Ye.auth) == null ? void 0 : Ve.drivers) ?? [
|
|
595
626
|
{ driver: "jwt", key: a },
|
|
596
|
-
{ driver: "htpasswd", path: `${
|
|
627
|
+
{ driver: "htpasswd", path: `${t}/users.htpasswd` }
|
|
597
628
|
]
|
|
598
629
|
},
|
|
599
630
|
db: {
|
|
600
|
-
path: `${
|
|
631
|
+
path: `${t}/db`
|
|
601
632
|
}
|
|
602
|
-
},
|
|
633
|
+
}, i = ur(
|
|
603
634
|
"main",
|
|
604
|
-
`${
|
|
635
|
+
`${t}/storages/main`,
|
|
605
636
|
"main-bucket",
|
|
606
|
-
(
|
|
637
|
+
(Ke = e == null ? void 0 : e.storages) == null ? void 0 : Ke.primary
|
|
607
638
|
);
|
|
608
|
-
|
|
609
|
-
switch ((
|
|
639
|
+
let c;
|
|
640
|
+
switch ((Xe = (Qe = e == null ? void 0 : e.storages) == null ? void 0 : Qe.work) == null ? void 0 : Xe.type) {
|
|
610
641
|
case void 0:
|
|
611
642
|
case "FS":
|
|
612
|
-
c =
|
|
643
|
+
c = wr("work"), c.rootPath = ((er = (Ze = e == null ? void 0 : e.storages) == null ? void 0 : Ze.work) == null ? void 0 : er.rootPath) ?? `${t}/storages/work`, c.indexCachePeriod = ((tr = (rr = e == null ? void 0 : e.storages) == null ? void 0 : rr.work) == null ? void 0 : tr.indexCachePeriod) ?? "1m";
|
|
613
644
|
break;
|
|
614
645
|
default:
|
|
615
646
|
throw new Error("work storage MUST have 'FS' type as it is used for working directories management");
|
|
616
647
|
}
|
|
617
|
-
const
|
|
648
|
+
const f = ur(
|
|
618
649
|
"library",
|
|
619
|
-
`${
|
|
650
|
+
`${t}/storages/library`,
|
|
620
651
|
"library-bucket",
|
|
621
|
-
(
|
|
622
|
-
),
|
|
623
|
-
enabled:
|
|
624
|
-
listen: ((
|
|
625
|
-
},
|
|
626
|
-
enabled:
|
|
627
|
-
listen: ((
|
|
652
|
+
(ar = e == null ? void 0 : e.storages) == null ? void 0 : ar.library
|
|
653
|
+
), g = {
|
|
654
|
+
enabled: ie((nr = e == null ? void 0 : e.monitoring) == null ? void 0 : nr.enabled, !0),
|
|
655
|
+
listen: ((sr = e == null ? void 0 : e.monitoring) == null ? void 0 : sr.listen) ?? "127.0.0.1:9090"
|
|
656
|
+
}, h = {
|
|
657
|
+
enabled: ie((ir = e == null ? void 0 : e.debug) == null ? void 0 : ir.enabled, !0),
|
|
658
|
+
listen: ((cr = e == null ? void 0 : e.debug) == null ? void 0 : cr.listen) ?? "127.0.0.1:9091"
|
|
628
659
|
}, w = {
|
|
629
|
-
value: ((
|
|
630
|
-
file: ((
|
|
660
|
+
value: ((or = e == null ? void 0 : e.license) == null ? void 0 : or.value) ?? "",
|
|
661
|
+
file: ((lr = e == null ? void 0 : e.license) == null ? void 0 : lr.file) ?? ""
|
|
631
662
|
};
|
|
632
663
|
return {
|
|
633
|
-
localRoot:
|
|
664
|
+
localRoot: t,
|
|
634
665
|
license: w,
|
|
635
|
-
log:
|
|
636
|
-
grpc:
|
|
637
|
-
core:
|
|
638
|
-
monitoring:
|
|
639
|
-
debug:
|
|
640
|
-
storages: { primary:
|
|
666
|
+
log: r,
|
|
667
|
+
grpc: n,
|
|
668
|
+
core: s,
|
|
669
|
+
monitoring: g,
|
|
670
|
+
debug: h,
|
|
671
|
+
storages: { primary: i, work: c, library: f },
|
|
641
672
|
hacks: { libraryDownloadable: !0 }
|
|
642
673
|
};
|
|
643
674
|
}
|
|
644
|
-
function
|
|
645
|
-
|
|
646
|
-
switch (
|
|
675
|
+
function ur(a, e, t, r) {
|
|
676
|
+
let n;
|
|
677
|
+
switch (r == null ? void 0 : r.type) {
|
|
647
678
|
case void 0:
|
|
648
679
|
case "FS":
|
|
649
|
-
|
|
680
|
+
n = wr(a), n.rootPath = (r == null ? void 0 : r.rootPath) ?? e;
|
|
650
681
|
break;
|
|
651
682
|
case "S3":
|
|
652
|
-
|
|
683
|
+
n = Xr(a), n.endpoint = (r == null ? void 0 : r.endpoint) ?? "http://localhost:9000", n.presignEndpoint = (r == null ? void 0 : r.presignEndpoint) ?? "http://localhost:9000", n.bucketName = (r == null ? void 0 : r.bucketName) ?? t, n.createBucket = ie(r == null ? void 0 : r.createBucket, !0), n.forcePathStyle = ie(r == null ? void 0 : r.forcePathStyle, !0), n.key = (r == null ? void 0 : r.key) ?? "", n.secret = (r == null ? void 0 : r.secret) ?? "", n.keyPrefix = (r == null ? void 0 : r.keyPrefix) ?? "", n.accessPrefixes = (r == null ? void 0 : r.accessPrefixes) ?? [""], n.uploadKeyPrefix = (r == null ? void 0 : r.uploadKeyPrefix) ?? "";
|
|
653
684
|
break;
|
|
654
685
|
default:
|
|
655
|
-
throw
|
|
686
|
+
throw _(), new Error("unknown storage type");
|
|
656
687
|
}
|
|
657
|
-
return
|
|
688
|
+
return n;
|
|
658
689
|
}
|
|
659
|
-
function
|
|
660
|
-
const e = a.monitoring.enabled ? "" : " disabled",
|
|
661
|
-
|
|
662
|
-
return a.license.file != "" && (
|
|
690
|
+
function et(a) {
|
|
691
|
+
const e = a.monitoring.enabled ? "" : " disabled", t = a.debug.enabled ? "" : " disabled", r = a.hacks.libraryDownloadable ? "true" : "false";
|
|
692
|
+
let n = a.license.value;
|
|
693
|
+
return a.license.file != "" && (n = u.readFileSync(a.license.file).toString().trimEnd()), `
|
|
663
694
|
license:
|
|
664
695
|
value: '${a.license.value}'
|
|
665
696
|
file: '${a.license.file}'
|
|
@@ -676,7 +707,7 @@ logging:
|
|
|
676
707
|
monitoring${e}:
|
|
677
708
|
listen: '${a.monitoring.listen}'
|
|
678
709
|
|
|
679
|
-
debug${
|
|
710
|
+
debug${t}:
|
|
680
711
|
listen: '${a.debug.listen}'
|
|
681
712
|
|
|
682
713
|
core:
|
|
@@ -710,7 +741,7 @@ controllers:
|
|
|
710
741
|
|
|
711
742
|
library:
|
|
712
743
|
mode: passive
|
|
713
|
-
downloadable: ${
|
|
744
|
+
downloadable: ${r}
|
|
714
745
|
|
|
715
746
|
work:
|
|
716
747
|
mode: active
|
|
@@ -727,7 +758,7 @@ controllers:
|
|
|
727
758
|
workdirCacheOnFailure: 1h
|
|
728
759
|
secrets:
|
|
729
760
|
- map:
|
|
730
|
-
MI_LICENSE: ${JSON.stringify(
|
|
761
|
+
MI_LICENSE: ${JSON.stringify(n)}
|
|
731
762
|
|
|
732
763
|
packageLoader:
|
|
733
764
|
packagesRoot: '${a.localRoot}/packages'
|
|
@@ -735,12 +766,12 @@ controllers:
|
|
|
735
766
|
workflows: {}
|
|
736
767
|
`;
|
|
737
768
|
}
|
|
738
|
-
function
|
|
769
|
+
function ie(a, e) {
|
|
739
770
|
return a === void 0 ? e : a;
|
|
740
771
|
}
|
|
741
|
-
const
|
|
742
|
-
function
|
|
743
|
-
const e =
|
|
772
|
+
const rt = ["linux", "macos", "windows"];
|
|
773
|
+
function tt(a) {
|
|
774
|
+
const e = x.platform();
|
|
744
775
|
switch (e) {
|
|
745
776
|
case "darwin":
|
|
746
777
|
return "macos";
|
|
@@ -750,13 +781,13 @@ function Jr(a) {
|
|
|
750
781
|
return "windows";
|
|
751
782
|
default:
|
|
752
783
|
throw new Error(
|
|
753
|
-
`operating system '${e}' is not currently supported by Platforma ecosystem. The list of OSes supported: ` + JSON.stringify(
|
|
784
|
+
`operating system '${e}' is not currently supported by Platforma ecosystem. The list of OSes supported: ` + JSON.stringify(rt)
|
|
754
785
|
);
|
|
755
786
|
}
|
|
756
787
|
}
|
|
757
|
-
const
|
|
758
|
-
function
|
|
759
|
-
const e =
|
|
788
|
+
const at = ["amd64", "arm64"];
|
|
789
|
+
function vr(a) {
|
|
790
|
+
const e = x.arch();
|
|
760
791
|
switch (e) {
|
|
761
792
|
case "arm64":
|
|
762
793
|
return "arm64";
|
|
@@ -764,477 +795,509 @@ function sr(a) {
|
|
|
764
795
|
return "amd64";
|
|
765
796
|
default:
|
|
766
797
|
throw new Error(
|
|
767
|
-
`processor architecture '${e}' is not currently supported by Platforma ecosystem. The list of architectures supported: ` + JSON.stringify(
|
|
798
|
+
`processor architecture '${e}' is not currently supported by Platforma ecosystem. The list of architectures supported: ` + JSON.stringify(at)
|
|
768
799
|
);
|
|
769
800
|
}
|
|
770
801
|
}
|
|
771
|
-
function
|
|
772
|
-
const
|
|
773
|
-
if (
|
|
774
|
-
return a.info(`Platforma Backend archive download skipped: '${
|
|
775
|
-
|
|
776
|
-
URL: ${
|
|
777
|
-
Save to: ${
|
|
778
|
-
const c =
|
|
779
|
-
return new Promise((
|
|
780
|
-
c.on("response", (
|
|
781
|
-
if (!
|
|
782
|
-
const
|
|
783
|
-
c.destroy(),
|
|
802
|
+
function nt(a, e) {
|
|
803
|
+
const t = (e == null ? void 0 : e.version) ?? fe(), r = (e == null ? void 0 : e.showProgress) ?? process.stdout.isTTY, n = `pl-${t}-${vr()}.tgz`, s = (e == null ? void 0 : e.downloadURL) ?? `https://cdn.platforma.bio/software/pl/${tt()}/${n}`, i = (e == null ? void 0 : e.saveTo) ?? d.binaries(n);
|
|
804
|
+
if (u.existsSync(i))
|
|
805
|
+
return a.info(`Platforma Backend archive download skipped: '${i}' already exists`), Promise.resolve(i);
|
|
806
|
+
u.mkdirSync(m.dirname(i), { recursive: !0 }), a.info(`Downloading Platforma Backend archive:
|
|
807
|
+
URL: ${s}
|
|
808
|
+
Save to: ${i}`);
|
|
809
|
+
const c = Rr.get(s);
|
|
810
|
+
return new Promise((o, f) => {
|
|
811
|
+
c.on("response", (g) => {
|
|
812
|
+
if (!g.statusCode) {
|
|
813
|
+
const y = new Error("failed to download archive: no HTTP status code in response from server");
|
|
814
|
+
c.destroy(), f(y);
|
|
784
815
|
return;
|
|
785
816
|
}
|
|
786
|
-
if (
|
|
787
|
-
const
|
|
788
|
-
c.destroy(),
|
|
817
|
+
if (g.statusCode !== 200) {
|
|
818
|
+
const y = new Error(`failed to download archive: ${g.statusCode} ${g.statusMessage}`);
|
|
819
|
+
c.destroy(), f(y);
|
|
789
820
|
return;
|
|
790
821
|
}
|
|
791
|
-
const
|
|
822
|
+
const h = parseInt(g.headers["content-length"] || "0", 10);
|
|
792
823
|
let w = 0;
|
|
793
|
-
const
|
|
794
|
-
|
|
795
|
-
w +=
|
|
796
|
-
const
|
|
797
|
-
|
|
798
|
-
}),
|
|
799
|
-
|
|
800
|
-
}),
|
|
801
|
-
|
|
824
|
+
const v = u.createWriteStream(i);
|
|
825
|
+
g.pipe(v), g.on("data", (y) => {
|
|
826
|
+
w += y.length;
|
|
827
|
+
const P = w / h * 100;
|
|
828
|
+
r && process.stdout.write(` downloading: ${P.toFixed(2)}%\r`);
|
|
829
|
+
}), g.on("error", (y) => {
|
|
830
|
+
u.unlinkSync(i), a.error(`Failed to download Platforma Binary: ${y.message}`), c.destroy(), f(y);
|
|
831
|
+
}), v.on("finish", () => {
|
|
832
|
+
v.close(), a.info(" ... download done."), c.destroy(), o(i);
|
|
802
833
|
});
|
|
803
834
|
});
|
|
804
835
|
});
|
|
805
836
|
}
|
|
806
|
-
function
|
|
837
|
+
function st(a, e) {
|
|
807
838
|
a.debug("extracting archive...");
|
|
808
|
-
const
|
|
809
|
-
a.debug(` version: '${
|
|
810
|
-
const
|
|
811
|
-
a.debug(` archive path: '${
|
|
812
|
-
const
|
|
813
|
-
if (a.debug(` target dir: '${
|
|
814
|
-
return a.info(`Platforma Backend binaries unpack skipped: '${
|
|
815
|
-
if (!
|
|
816
|
-
const
|
|
817
|
-
throw a.error(
|
|
839
|
+
const t = (e == null ? void 0 : e.version) ?? fe();
|
|
840
|
+
a.debug(` version: '${t}'`);
|
|
841
|
+
const r = `${Pr({ version: t })}.tgz`, n = (e == null ? void 0 : e.archivePath) ?? d.binaries(r);
|
|
842
|
+
a.debug(` archive path: '${n}'`);
|
|
843
|
+
const s = (e == null ? void 0 : e.extractTo) ?? ct(n);
|
|
844
|
+
if (a.debug(` target dir: '${s}'`), u.existsSync(s))
|
|
845
|
+
return a.info(`Platforma Backend binaries unpack skipped: '${s}' exists`), s;
|
|
846
|
+
if (!u.existsSync(n)) {
|
|
847
|
+
const i = `Platforma Backend binary archive not found at '${n}'`;
|
|
848
|
+
throw a.error(i), new Error(i);
|
|
818
849
|
}
|
|
819
|
-
return
|
|
820
|
-
Archive: ${
|
|
821
|
-
Target dir: ${
|
|
822
|
-
file:
|
|
823
|
-
cwd:
|
|
850
|
+
return u.existsSync(s) || (a.debug(` creating target dir '${s}'`), u.mkdirSync(s, { recursive: !0 })), a.info(`Unpacking Platforma Backend archive:
|
|
851
|
+
Archive: ${n}
|
|
852
|
+
Target dir: ${s}`), Tr.x({
|
|
853
|
+
file: n,
|
|
854
|
+
cwd: s,
|
|
824
855
|
gzip: !0,
|
|
825
856
|
sync: !0
|
|
826
|
-
}), a.info(" ... unpack done."),
|
|
857
|
+
}), a.info(" ... unpack done."), s;
|
|
827
858
|
}
|
|
828
|
-
function
|
|
829
|
-
return
|
|
859
|
+
function Ge(a, e) {
|
|
860
|
+
return nt(a, e).then((t) => st(a, { archivePath: t }));
|
|
830
861
|
}
|
|
831
|
-
function
|
|
832
|
-
return `pl-${(a == null ? void 0 : a.version) ??
|
|
862
|
+
function Pr(a) {
|
|
863
|
+
return `pl-${(a == null ? void 0 : a.version) ?? fe()}-${vr()}`;
|
|
833
864
|
}
|
|
834
|
-
function
|
|
835
|
-
return d.binaries(
|
|
865
|
+
function it(a, ...e) {
|
|
866
|
+
return d.binaries(Pr({ version: a }), ...e);
|
|
836
867
|
}
|
|
837
|
-
function
|
|
868
|
+
function ct(a) {
|
|
838
869
|
const e = a.lastIndexOf(".");
|
|
839
870
|
return e === -1 ? a : a.slice(0, e);
|
|
840
871
|
}
|
|
841
|
-
class
|
|
872
|
+
class O {
|
|
842
873
|
constructor(e) {
|
|
843
874
|
this.logger = e;
|
|
844
875
|
}
|
|
845
876
|
startLast() {
|
|
846
|
-
const e =
|
|
847
|
-
|
|
877
|
+
const e = d.currentInstance;
|
|
878
|
+
if (!e)
|
|
879
|
+
throw this.logger.error("failed to bring back Platforma Backend in the last started configuration: no last configuration found"), new Error("no previous run info found");
|
|
880
|
+
return this.startInstance(e);
|
|
881
|
+
}
|
|
882
|
+
startInstance(e) {
|
|
883
|
+
if (e.runInfo) {
|
|
884
|
+
const r = this.renderRunInfo(e.runInfo);
|
|
885
|
+
this.logger.info(`Starting platforma backend instance '${e.name}':
|
|
886
|
+
${r}`);
|
|
887
|
+
}
|
|
888
|
+
const t = we(
|
|
889
|
+
this.logger,
|
|
890
|
+
e.upCommands
|
|
891
|
+
);
|
|
892
|
+
return ge(t.executed), t.spawned.length > 0 && e.type === "process" && (e.pid = t.spawned[t.spawned.length - 1].pid, d.setInstanceInfo(e.name, e), this.logger.info(`instance '${e.name}' started`)), d.currentInstanceName = e.name, t.spawned;
|
|
893
|
+
}
|
|
894
|
+
stopInstance(e) {
|
|
895
|
+
if (!d.isInstanceActive(e)) {
|
|
896
|
+
this.logger.info(`instance '${e.name}' is not running`);
|
|
897
|
+
return;
|
|
898
|
+
}
|
|
899
|
+
this.logger.info(`stopping platforma backend instance '${e.name}'...`);
|
|
900
|
+
const t = we(this.logger, e.downCommands);
|
|
901
|
+
switch (ge(t.executed), e.type) {
|
|
902
|
+
case "docker":
|
|
903
|
+
return;
|
|
904
|
+
case "process": {
|
|
905
|
+
e.pid && d.isValidPID(e.pid) && process.kill(e.pid);
|
|
906
|
+
return;
|
|
907
|
+
}
|
|
908
|
+
default:
|
|
909
|
+
_();
|
|
910
|
+
}
|
|
848
911
|
}
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
912
|
+
switchInstance(e) {
|
|
913
|
+
for (const t of d.instanceList)
|
|
914
|
+
if (t !== e.name) {
|
|
915
|
+
const r = d.getInstanceInfo(t);
|
|
916
|
+
d.isInstanceActive(r) && this.stopInstance(r);
|
|
917
|
+
}
|
|
918
|
+
return this.startInstance(e);
|
|
919
|
+
}
|
|
920
|
+
createLocal(e, t) {
|
|
921
|
+
var o, f, g, h, w, v, y, P, S, k;
|
|
922
|
+
const r = (t == null ? void 0 : t.binaryPath) ?? it(t == null ? void 0 : t.version, "binaries", "platforma");
|
|
923
|
+
let n = t == null ? void 0 : t.configPath;
|
|
924
|
+
const s = (t == null ? void 0 : t.workdir) ?? (n ? process.cwd() : d.instanceDir(e));
|
|
925
|
+
t != null && t.primaryURL && (t.configOptions = {
|
|
926
|
+
...t.configOptions,
|
|
856
927
|
storages: {
|
|
857
|
-
...(
|
|
858
|
-
primary:
|
|
928
|
+
...(o = t.configOptions) == null ? void 0 : o.storages,
|
|
929
|
+
primary: Q(t.primaryURL, s, (g = (f = t.configOptions) == null ? void 0 : f.storages) == null ? void 0 : g.primary)
|
|
859
930
|
}
|
|
860
|
-
}),
|
|
861
|
-
...
|
|
931
|
+
}), t != null && t.libraryURL && (t.configOptions = {
|
|
932
|
+
...t.configOptions,
|
|
862
933
|
storages: {
|
|
863
|
-
...(
|
|
864
|
-
library:
|
|
934
|
+
...(h = t.configOptions) == null ? void 0 : h.storages,
|
|
935
|
+
library: Q(t.libraryURL, s, (v = (w = t.configOptions) == null ? void 0 : w.storages) == null ? void 0 : v.library)
|
|
865
936
|
}
|
|
866
937
|
});
|
|
867
|
-
const i =
|
|
868
|
-
this.logger.debug(" checking license..."), this.checkLicense((
|
|
869
|
-
const
|
|
938
|
+
const i = Zr(this.getLastJwt(), t == null ? void 0 : t.configOptions);
|
|
939
|
+
this.logger.debug(" checking license..."), this.checkLicense((P = (y = t == null ? void 0 : t.configOptions) == null ? void 0 : y.license) == null ? void 0 : P.value, (k = (S = t == null ? void 0 : t.configOptions) == null ? void 0 : S.license) == null ? void 0 : k.file);
|
|
940
|
+
const c = [
|
|
870
941
|
`${i.localRoot}/packages`,
|
|
871
942
|
`${i.localRoot}/packages-local`,
|
|
872
943
|
`${i.localRoot}/blocks-local`
|
|
873
944
|
];
|
|
874
|
-
i.storages.primary.type === "FS" &&
|
|
875
|
-
for (const
|
|
876
|
-
|
|
877
|
-
for (const
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
945
|
+
i.storages.primary.type === "FS" && c.push(i.storages.primary.rootPath), i.storages.library.type === "FS" && (c.push(i.storages.library.rootPath), i.hacks.libraryDownloadable = !1), i.storages.work.type === "FS" && c.push(i.storages.work.rootPath), this.logger.debug(" creating pl state directories...");
|
|
946
|
+
for (const p of c)
|
|
947
|
+
u.existsSync(p) || (this.logger.debug(` '${p}'`), u.mkdirSync(p, { recursive: !0 }));
|
|
948
|
+
for (const p of i.core.auth.drivers)
|
|
949
|
+
p.driver === "htpasswd" && (u.existsSync(p.path) || (this.logger.debug(` installing default 'users.htpasswd' to ${p.path}...`), u.copyFileSync(K("users.htpasswd"), p.path)));
|
|
950
|
+
return n || (n = m.join(i.localRoot, "config.yaml"), this.logger.debug(` rendering configuration '${n}'...`), u.writeFileSync(n, et(i))), d.setInstanceInfo(e, {
|
|
951
|
+
type: "process",
|
|
952
|
+
upCommands: [
|
|
953
|
+
{
|
|
954
|
+
async: !0,
|
|
955
|
+
cmd: r,
|
|
956
|
+
args: ["-config", n],
|
|
957
|
+
workdir: s,
|
|
958
|
+
runOpts: { stdio: "inherit" }
|
|
959
|
+
}
|
|
960
|
+
],
|
|
961
|
+
downCommands: [],
|
|
962
|
+
cleanupCommands: [],
|
|
963
|
+
runInfo: {
|
|
964
|
+
configPath: n,
|
|
965
|
+
dbPath: i.core.db.path,
|
|
966
|
+
apiAddr: i.grpc.listen,
|
|
967
|
+
logPath: i.log.path,
|
|
968
|
+
primary: i.storages.primary,
|
|
969
|
+
work: i.storages.work,
|
|
970
|
+
library: i.storages.library
|
|
900
971
|
}
|
|
901
|
-
);
|
|
972
|
+
}), d.getInstanceInfo(e);
|
|
902
973
|
}
|
|
903
|
-
|
|
904
|
-
var
|
|
905
|
-
this.logger.debug("
|
|
906
|
-
const r = (
|
|
907
|
-
|
|
974
|
+
createLocalS3(e, t) {
|
|
975
|
+
var c;
|
|
976
|
+
this.logger.debug("creating platforma instance in 'local s3' mode...");
|
|
977
|
+
const r = (t == null ? void 0 : t.minioPort) ?? 9e3, n = this.createLocal(e, {
|
|
978
|
+
...t,
|
|
979
|
+
primaryURL: (t == null ? void 0 : t.primaryURL) ?? `s3e://testuser:testpassword@localhost:${r}/main-bucket/?region=no-region`,
|
|
980
|
+
libraryURL: (t == null ? void 0 : t.libraryURL) ?? `s3e://testuser:testpassword@localhost:${r}/library-bucket/?region=no-region`
|
|
981
|
+
}), s = (c = t == null ? void 0 : t.configOptions) == null ? void 0 : c.localRoot, i = this.createMinio(e, {
|
|
908
982
|
minioPort: r,
|
|
909
|
-
minioConsolePort:
|
|
910
|
-
storage:
|
|
911
|
-
}), this.startLocal({
|
|
912
|
-
...e,
|
|
913
|
-
primaryURL: (e == null ? void 0 : e.primaryURL) ?? `s3e://testuser:testpassword@localhost:${r}/main-bucket/?region=no-region`,
|
|
914
|
-
libraryURL: (e == null ? void 0 : e.libraryURL) ?? `s3e://testuser:testpassword@localhost:${r}/library-bucket/?region=no-region`
|
|
983
|
+
minioConsolePort: t == null ? void 0 : t.minioConsolePort,
|
|
984
|
+
storage: s ? m.join(s, "minio") : void 0
|
|
915
985
|
});
|
|
986
|
+
return n.upCommands = [
|
|
987
|
+
i.start,
|
|
988
|
+
...n.upCommands
|
|
989
|
+
], n.downCommands = [
|
|
990
|
+
i.stop,
|
|
991
|
+
...n.downCommands
|
|
992
|
+
], n.cleanupCommands = [
|
|
993
|
+
i.cleanup,
|
|
994
|
+
...n.cleanupCommands
|
|
995
|
+
], d.setInstanceInfo(e, n), n;
|
|
916
996
|
}
|
|
917
|
-
|
|
918
|
-
this.logger.debug("
|
|
919
|
-
const r =
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
997
|
+
createMinio(e, t) {
|
|
998
|
+
this.logger.debug(" creating docker compose for minio service...");
|
|
999
|
+
const r = K("compose-backend.yaml"), n = d.instanceDir(e, "compose-minio.yaml");
|
|
1000
|
+
ve(
|
|
1001
|
+
r,
|
|
1002
|
+
n,
|
|
1003
|
+
`pl-${e}-minio`,
|
|
1004
|
+
/* @__PURE__ */ new Map([
|
|
1005
|
+
["minio", {}]
|
|
1006
|
+
]),
|
|
1007
|
+
{ dropVolumes: !0 }
|
|
1008
|
+
);
|
|
1009
|
+
const s = t != null && t.version ? `:${t.version}` : "";
|
|
1010
|
+
this.logger.debug(` minio version: ${s}`);
|
|
1011
|
+
const i = (t == null ? void 0 : t.image) ?? `quay.io/minio/minio${s}`;
|
|
1012
|
+
this.logger.debug(` minio image: ${i}`);
|
|
1013
|
+
const c = (t == null ? void 0 : t.storage) ?? d.instanceDir(e, "minio");
|
|
1014
|
+
pe(c, { mode: "0775" });
|
|
1015
|
+
const o = (t == null ? void 0 : t.minioPort) ?? 9e3, f = (t == null ? void 0 : t.minioConsolePort) ?? 9001, g = {
|
|
1016
|
+
MINIO_IMAGE: i,
|
|
1017
|
+
MINIO_STORAGE: m.resolve(c),
|
|
1018
|
+
MINIO_PORT: o.toString(),
|
|
1019
|
+
MINIO_CONSOLE_PORT: f.toString()
|
|
938
1020
|
};
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
1021
|
+
return {
|
|
1022
|
+
start: {
|
|
1023
|
+
cmd: "docker",
|
|
1024
|
+
args: ["compose", `--file=${n}`, "up", "--detach", "--remove-orphans", "--pull=missing"],
|
|
1025
|
+
envs: g,
|
|
1026
|
+
workdir: d.instanceDir(e),
|
|
1027
|
+
runOpts: { stdio: "inherit" }
|
|
1028
|
+
},
|
|
1029
|
+
stop: {
|
|
1030
|
+
cmd: "docker",
|
|
1031
|
+
args: ["compose", `--file=${n}`, "down"],
|
|
1032
|
+
envs: g,
|
|
1033
|
+
workdir: d.instanceDir(e),
|
|
1034
|
+
runOpts: { stdio: "inherit" }
|
|
1035
|
+
},
|
|
1036
|
+
cleanup: {
|
|
1037
|
+
cmd: "docker",
|
|
1038
|
+
args: ["compose", `--file=${n}`, "down", "--volumes", "--remove-orphans"],
|
|
1039
|
+
envs: g,
|
|
1040
|
+
workdir: d.instanceDir(e),
|
|
1041
|
+
runOpts: { stdio: "inherit" }
|
|
949
1042
|
}
|
|
950
|
-
|
|
951
|
-
z(f, "failed to start MinIO service in docker");
|
|
1043
|
+
};
|
|
952
1044
|
}
|
|
953
1045
|
buildPlatforma(e) {
|
|
954
|
-
const
|
|
955
|
-
this.logger.info("Building Platforma Backend binary from sources"), this.logger.info(` sources path: ${e.repoRoot}`), this.logger.info(` binary path: ${
|
|
956
|
-
const
|
|
957
|
-
cwd:
|
|
1046
|
+
const t = m.resolve(e.repoRoot, "cmd", "platforma"), r = e.binPath ?? m.join(x.tmpdir(), "platforma-local-build");
|
|
1047
|
+
this.logger.info("Building Platforma Backend binary from sources"), this.logger.info(` sources path: ${e.repoRoot}`), this.logger.info(` binary path: ${r}`);
|
|
1048
|
+
const n = fr("go", ["build", "-o", r, "."], {
|
|
1049
|
+
cwd: t,
|
|
958
1050
|
stdio: "inherit"
|
|
959
1051
|
});
|
|
960
|
-
return
|
|
1052
|
+
return ge([n], "failed to build platforma binary from sources using 'go build' command"), r;
|
|
961
1053
|
}
|
|
962
|
-
|
|
963
|
-
|
|
1054
|
+
createDockerS3(e, t, r) {
|
|
1055
|
+
this.logger.debug("creating platforma instance in 'docker s3' mode...");
|
|
1056
|
+
const n = K("compose-backend.yaml"), s = (r == null ? void 0 : r.image) ?? dr(r == null ? void 0 : r.version);
|
|
964
1057
|
this.checkLicense(r == null ? void 0 : r.license, r == null ? void 0 : r.licenseFile);
|
|
965
|
-
const i = (...
|
|
966
|
-
const
|
|
967
|
-
return
|
|
968
|
-
},
|
|
969
|
-
|
|
970
|
-
const
|
|
971
|
-
if (
|
|
1058
|
+
const i = (...p) => m.join(t, ...p), c = (p) => {
|
|
1059
|
+
const $ = i(p);
|
|
1060
|
+
return pe($, { mode: "0775" }), $;
|
|
1061
|
+
}, o = i("logs", "platforma.log");
|
|
1062
|
+
u.existsSync(o) || (u.mkdirSync(m.dirname(o), { recursive: !0 }), u.writeFileSync(o, ""));
|
|
1063
|
+
const f = (r == null ? void 0 : r.presignHost) ?? "localhost", g = Q("s3e://testuser:testpassword@minio:9000/main-bucket");
|
|
1064
|
+
if (g.type !== "S3")
|
|
972
1065
|
throw new Error("primary storage must have 'S3' type in 'docker s3' configuration");
|
|
973
|
-
|
|
974
|
-
const
|
|
975
|
-
if (
|
|
976
|
-
throw new Error(`${
|
|
977
|
-
|
|
978
|
-
const
|
|
979
|
-
|
|
980
|
-
const
|
|
981
|
-
|
|
982
|
-
const
|
|
983
|
-
for (const
|
|
984
|
-
|
|
985
|
-
hostPath:
|
|
986
|
-
containerPath:
|
|
1066
|
+
g.presignEndpoint = `http://${f}:9000`;
|
|
1067
|
+
const h = Q("s3e://testuser:testpassword@minio:9000/library-bucket");
|
|
1068
|
+
if (h.type !== "S3")
|
|
1069
|
+
throw new Error(`${h.type} storage type is not supported for library storage`);
|
|
1070
|
+
h.presignEndpoint = `http://${f}:9000`;
|
|
1071
|
+
const w = c("db"), v = c("work"), y = i("users.htpasswd");
|
|
1072
|
+
u.existsSync(y) || u.copyFileSync(K("users.htpasswd"), y);
|
|
1073
|
+
const P = i("compose.yaml");
|
|
1074
|
+
u.existsSync(P) && this.logger.info(`replacing docker compose file ${P}`);
|
|
1075
|
+
const S = [];
|
|
1076
|
+
for (const p of (r == null ? void 0 : r.customMounts) ?? [])
|
|
1077
|
+
S.push({
|
|
1078
|
+
hostPath: p.hostPath,
|
|
1079
|
+
containerPath: p.containerPath ?? p.hostPath
|
|
987
1080
|
});
|
|
988
|
-
|
|
1081
|
+
ve(n, P, `pl-${e}`, /* @__PURE__ */ new Map([
|
|
989
1082
|
["minio", {}],
|
|
990
1083
|
["backend", {
|
|
991
1084
|
platform: r == null ? void 0 : r.platformOverride,
|
|
992
|
-
mounts:
|
|
1085
|
+
mounts: S
|
|
993
1086
|
}]
|
|
994
1087
|
]));
|
|
995
1088
|
const k = {
|
|
996
1089
|
MINIO_IMAGE: "quay.io/minio/minio",
|
|
997
|
-
MINIO_STORAGE:
|
|
1090
|
+
MINIO_STORAGE: c("minio"),
|
|
998
1091
|
PL_IMAGE: s,
|
|
999
|
-
PL_AUTH_HTPASSWD_PATH:
|
|
1092
|
+
PL_AUTH_HTPASSWD_PATH: y,
|
|
1000
1093
|
PL_LICENSE: r == null ? void 0 : r.license,
|
|
1001
1094
|
PL_LICENSE_FILE: r == null ? void 0 : r.licenseFile,
|
|
1002
1095
|
PL_LOG_LEVEL: (r == null ? void 0 : r.logLevel) ?? "info",
|
|
1003
|
-
PL_LOG_DIR:
|
|
1096
|
+
PL_LOG_DIR: m.dirname(o),
|
|
1004
1097
|
PL_LOG_ROTATION_ENABLED: "true",
|
|
1005
|
-
PL_DATA_DB_ROOT:
|
|
1006
|
-
PL_DATA_PRIMARY_ROOT:
|
|
1007
|
-
PL_DATA_LIBRARY_ROOT:
|
|
1008
|
-
PL_DATA_WORKDIR_ROOT:
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
...this.configureDockerStorage("
|
|
1098
|
+
PL_DATA_DB_ROOT: w,
|
|
1099
|
+
PL_DATA_PRIMARY_ROOT: c("primary"),
|
|
1100
|
+
PL_DATA_LIBRARY_ROOT: c("library"),
|
|
1101
|
+
PL_DATA_WORKDIR_ROOT: v,
|
|
1102
|
+
// Mount packages storage as volume, because APFS is case-insensitive on Mac OS X and this breaks some pl software installation.
|
|
1103
|
+
// PL_DATA_PACKAGE_ROOT: storageDir('packages'),
|
|
1104
|
+
...this.configureDockerStorage("primary", g),
|
|
1105
|
+
...this.configureDockerStorage("library", h)
|
|
1012
1106
|
};
|
|
1013
1107
|
if (r != null && r.grpcAddr && (k.PL_GRPC_ADDR = r.grpcAddr), r != null && r.grpcPort && (k.PL_GRPC_PORT = r.grpcPort.toString()), r != null && r.monitoringAddr && (k.PL_MONITORING_ADDR = r.monitoringAddr), r != null && r.monitoringPort && (k.PL_MONITORING_PORT = r.monitoringPort.toString()), r != null && r.debugAddr && (k.PL_DEBUG_ADDR = r.debugAddr), r != null && r.debugPort && (k.PL_DEBUG_PORT = r.debugPort.toString()), r != null && r.auth && (r.auth.enabled && (k.PL_AUTH_ENABLED = "true"), r.auth.drivers)) {
|
|
1014
|
-
for (const
|
|
1015
|
-
|
|
1108
|
+
for (const p of r.auth.drivers)
|
|
1109
|
+
p.driver === "htpasswd" && (k.PL_AUTH_HTPASSWD_PATH = m.resolve(p.path), p.path = "/etc/platforma/users.htpasswd");
|
|
1016
1110
|
k.PL_AUTH_DRIVERS = JSON.stringify(r.auth.drivers);
|
|
1017
1111
|
}
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
[
|
|
1021
|
-
"
|
|
1022
|
-
`--file=${
|
|
1023
|
-
|
|
1024
|
-
"
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
"
|
|
1028
|
-
"
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1112
|
+
return d.setInstanceInfo(e, {
|
|
1113
|
+
type: "docker",
|
|
1114
|
+
upCommands: [{
|
|
1115
|
+
cmd: "docker",
|
|
1116
|
+
args: ["compose", `--file=${P}`, "up", "--detach", "--remove-orphans", "--pull=missing"],
|
|
1117
|
+
envs: k,
|
|
1118
|
+
runOpts: { stdio: "inherit" }
|
|
1119
|
+
}],
|
|
1120
|
+
downCommands: [{
|
|
1121
|
+
cmd: "docker",
|
|
1122
|
+
args: ["compose", `--file=${P}`, "down"],
|
|
1123
|
+
envs: k,
|
|
1124
|
+
runOpts: { stdio: "inherit" }
|
|
1125
|
+
}],
|
|
1126
|
+
cleanupCommands: [{
|
|
1127
|
+
cmd: "docker",
|
|
1128
|
+
args: ["compose", `--file=${P}`, "down", "--volumes", "--remove-orphans"],
|
|
1129
|
+
envs: k,
|
|
1130
|
+
runOpts: { stdio: "inherit" }
|
|
1131
|
+
}],
|
|
1132
|
+
runInfo: {
|
|
1133
|
+
apiPort: r == null ? void 0 : r.grpcPort,
|
|
1134
|
+
apiAddr: r == null ? void 0 : r.grpcAddr,
|
|
1135
|
+
logPath: o,
|
|
1136
|
+
primary: g,
|
|
1137
|
+
work: { type: "FS", rootPath: v },
|
|
1138
|
+
library: h,
|
|
1139
|
+
dbPath: w
|
|
1037
1140
|
}
|
|
1038
|
-
);
|
|
1039
|
-
z(R, "failed to start Platforma Backend in Docker"), d.isActive = !0;
|
|
1040
|
-
const S = this.renderRunInfo({
|
|
1041
|
-
apiPort: r == null ? void 0 : r.grpcPort,
|
|
1042
|
-
apiAddr: r == null ? void 0 : r.grpcAddr,
|
|
1043
|
-
logPath: c,
|
|
1044
|
-
primary: l,
|
|
1045
|
-
work: { type: "FS", rootPath: b },
|
|
1046
|
-
library: u,
|
|
1047
|
-
dbPath: f
|
|
1048
|
-
});
|
|
1049
|
-
this.logger.info(`Started platforma:
|
|
1050
|
-
${S}`);
|
|
1141
|
+
}), d.getInstanceInfo(e);
|
|
1051
1142
|
}
|
|
1052
|
-
|
|
1053
|
-
|
|
1143
|
+
createDocker(e, t, r) {
|
|
1144
|
+
this.logger.debug("creating platforma instance in 'docker' mode...");
|
|
1145
|
+
const n = K("compose-backend.yaml"), s = (r == null ? void 0 : r.image) ?? dr(r == null ? void 0 : r.version);
|
|
1054
1146
|
this.checkLicense(r == null ? void 0 : r.license, r == null ? void 0 : r.licenseFile);
|
|
1055
|
-
const i = (
|
|
1056
|
-
const
|
|
1057
|
-
return
|
|
1058
|
-
},
|
|
1059
|
-
|
|
1060
|
-
const
|
|
1061
|
-
|
|
1062
|
-
const
|
|
1063
|
-
|
|
1064
|
-
const
|
|
1065
|
-
for (const
|
|
1066
|
-
|
|
1067
|
-
hostPath:
|
|
1068
|
-
containerPath:
|
|
1147
|
+
const i = (...$) => m.join(t, ...$), c = ($) => {
|
|
1148
|
+
const se = i($);
|
|
1149
|
+
return pe(se, { mode: "0775" }), se;
|
|
1150
|
+
}, o = i("logs", "platforma.log");
|
|
1151
|
+
u.existsSync(o) || (u.mkdirSync(m.dirname(o), { recursive: !0 }), u.writeFileSync(o, ""));
|
|
1152
|
+
const f = c("db"), g = c("primary"), h = c("library"), w = c("work"), v = i("users.htpasswd");
|
|
1153
|
+
u.existsSync(v) || u.copyFileSync(K("users.htpasswd"), v);
|
|
1154
|
+
const y = i("compose.yaml");
|
|
1155
|
+
u.existsSync(y) && this.logger.info(`replacing docker compose file ${y}`);
|
|
1156
|
+
const P = [];
|
|
1157
|
+
for (const $ of (r == null ? void 0 : r.customMounts) ?? [])
|
|
1158
|
+
P.push({
|
|
1159
|
+
hostPath: $.hostPath,
|
|
1160
|
+
containerPath: $.containerPath ?? $.hostPath
|
|
1069
1161
|
});
|
|
1070
|
-
this.logger.debug(`Rendering docker compose file '${
|
|
1162
|
+
this.logger.debug(`Rendering docker compose file '${y}' using '${n}' as base template`), ve(n, y, `pl-${e}`, /* @__PURE__ */ new Map([
|
|
1071
1163
|
["backend", {
|
|
1072
1164
|
platform: r == null ? void 0 : r.platformOverride,
|
|
1073
|
-
mounts:
|
|
1165
|
+
mounts: P
|
|
1074
1166
|
}]
|
|
1075
1167
|
]));
|
|
1076
|
-
const
|
|
1168
|
+
const S = Q((r == null ? void 0 : r.primaryStorageURL) ?? `file:${g}`, "."), k = Q((r == null ? void 0 : r.libraryStorageURL) ?? `file:${h}`, "."), p = {
|
|
1077
1169
|
MINIO_IMAGE: "quay.io/minio/minio",
|
|
1078
|
-
MINIO_STORAGE:
|
|
1170
|
+
MINIO_STORAGE: c("minio"),
|
|
1079
1171
|
PL_IMAGE: s,
|
|
1080
|
-
PL_AUTH_HTPASSWD_PATH:
|
|
1172
|
+
PL_AUTH_HTPASSWD_PATH: v,
|
|
1081
1173
|
PL_LICENSE: r == null ? void 0 : r.license,
|
|
1082
1174
|
PL_LICENSE_FILE: r == null ? void 0 : r.licenseFile,
|
|
1083
1175
|
PL_LOG_LEVEL: "info",
|
|
1084
|
-
PL_LOG_DIR:
|
|
1176
|
+
PL_LOG_DIR: m.dirname(o),
|
|
1085
1177
|
PL_LOG_ROTATION_ENABLED: "true",
|
|
1086
|
-
PL_DATA_DB_ROOT:
|
|
1087
|
-
PL_DATA_PRIMARY_ROOT:
|
|
1088
|
-
PL_DATA_LIBRARY_ROOT:
|
|
1089
|
-
PL_DATA_WORKDIR_ROOT:
|
|
1090
|
-
PL_DATA_PACKAGE_ROOT:
|
|
1091
|
-
...this.configureDockerStorage("primary",
|
|
1092
|
-
...this.configureDockerStorage("library",
|
|
1178
|
+
PL_DATA_DB_ROOT: f,
|
|
1179
|
+
PL_DATA_PRIMARY_ROOT: g,
|
|
1180
|
+
PL_DATA_LIBRARY_ROOT: h,
|
|
1181
|
+
PL_DATA_WORKDIR_ROOT: w,
|
|
1182
|
+
PL_DATA_PACKAGE_ROOT: c("packages"),
|
|
1183
|
+
...this.configureDockerStorage("primary", S),
|
|
1184
|
+
...this.configureDockerStorage("library", k)
|
|
1093
1185
|
};
|
|
1094
|
-
if (r != null && r.grpcAddr && (
|
|
1095
|
-
for (const
|
|
1096
|
-
|
|
1097
|
-
|
|
1186
|
+
if (r != null && r.grpcAddr && (p.PL_GRPC_ADDR = r.grpcAddr), r != null && r.grpcPort && (p.PL_GRPC_PORT = r.grpcPort.toString()), r != null && r.monitoringAddr && (p.PL_MONITORING_ADDR = r.monitoringAddr), r != null && r.monitoringPort && (p.PL_MONITORING_PORT = r.monitoringPort.toString()), r != null && r.debugAddr && (p.PL_DEBUG_ADDR = r.debugAddr), r != null && r.debugPort && (p.PL_DEBUG_PORT = r.debugPort.toString()), r != null && r.auth && (r.auth.enabled && (p.PL_AUTH_ENABLED = "true"), r.auth.drivers)) {
|
|
1187
|
+
for (const $ of r.auth.drivers)
|
|
1188
|
+
$.driver === "htpasswd" && (p.PL_AUTH_HTPASSWD_PATH = m.resolve($.path), $.path = "/etc/platforma/users.htpasswd");
|
|
1189
|
+
p.PL_AUTH_DRIVERS = JSON.stringify(r.auth.drivers);
|
|
1098
1190
|
}
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
[
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1191
|
+
return d.setInstanceInfo(e, {
|
|
1192
|
+
type: "docker",
|
|
1193
|
+
upCommands: [{
|
|
1194
|
+
cmd: "docker",
|
|
1195
|
+
args: ["compose", `--file=${y}`, "up", "--detach", "--remove-orphans", "--pull=missing"],
|
|
1196
|
+
envs: p,
|
|
1197
|
+
runOpts: { stdio: "inherit" }
|
|
1198
|
+
}],
|
|
1199
|
+
downCommands: [{
|
|
1200
|
+
cmd: "docker",
|
|
1201
|
+
args: ["compose", `--file=${y}`, "down"],
|
|
1202
|
+
envs: p,
|
|
1203
|
+
runOpts: { stdio: "inherit" }
|
|
1204
|
+
}],
|
|
1205
|
+
cleanupCommands: [{
|
|
1206
|
+
cmd: "docker",
|
|
1207
|
+
args: ["compose", `--file=${y}`, "down", "--volumes", "--remove-orphans"],
|
|
1208
|
+
envs: p,
|
|
1209
|
+
runOpts: { stdio: "inherit" }
|
|
1210
|
+
}],
|
|
1211
|
+
runInfo: {
|
|
1212
|
+
apiPort: r == null ? void 0 : r.grpcPort,
|
|
1213
|
+
apiAddr: r == null ? void 0 : r.grpcAddr,
|
|
1214
|
+
logPath: o,
|
|
1215
|
+
primary: S,
|
|
1216
|
+
work: { type: "FS", rootPath: w },
|
|
1217
|
+
library: k,
|
|
1218
|
+
dbPath: f
|
|
1112
1219
|
}
|
|
1113
|
-
);
|
|
1114
|
-
z(P, "failed to start Platforma Backend in Docker"), d.isActive = !0;
|
|
1115
|
-
const L = this.renderRunInfo({
|
|
1116
|
-
apiPort: r == null ? void 0 : r.grpcPort,
|
|
1117
|
-
apiAddr: r == null ? void 0 : r.grpcAddr,
|
|
1118
|
-
logPath: c,
|
|
1119
|
-
primary: k,
|
|
1120
|
-
work: { type: "FS", rootPath: b },
|
|
1121
|
-
library: R,
|
|
1122
|
-
dbPath: l
|
|
1123
|
-
});
|
|
1124
|
-
this.logger.info(`Started platforma:
|
|
1125
|
-
${L}`);
|
|
1220
|
+
}), d.getInstanceInfo(e);
|
|
1126
1221
|
}
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
return;
|
|
1144
|
-
}
|
|
1145
|
-
case "process": {
|
|
1146
|
-
d.isValidPID && process.kill(e.process.pid), d.isActive = !1;
|
|
1147
|
-
return;
|
|
1222
|
+
cleanupInstance(e) {
|
|
1223
|
+
const t = [], r = /* @__PURE__ */ new Map();
|
|
1224
|
+
let n = "";
|
|
1225
|
+
if (e) {
|
|
1226
|
+
const s = d.getInstanceInfo(e);
|
|
1227
|
+
switch (r.set(e, s), s.type) {
|
|
1228
|
+
case "docker": {
|
|
1229
|
+
t.push(`docker service 'pl-${e}', including all its volumes and data in '${d.instanceDir(e)}' will be destroyed`);
|
|
1230
|
+
break;
|
|
1231
|
+
}
|
|
1232
|
+
case "process": {
|
|
1233
|
+
t.push(`directory '${d.instanceDir(e)}' would be deleted`), s.downCommands && t.push("associated docker service, including all volumes and data will be destroyed");
|
|
1234
|
+
break;
|
|
1235
|
+
}
|
|
1236
|
+
default:
|
|
1237
|
+
_();
|
|
1148
1238
|
}
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
var c, l, u, f, b, w, m, v, k, R, S, P, L, A;
|
|
1155
|
-
const e = [
|
|
1156
|
-
"last command run cache ('pl-service start' shorthand will stop working until next full start command call)",
|
|
1157
|
-
"'platforma' docker compose service containers and volumes"
|
|
1158
|
-
], r = d.data(), t = [r];
|
|
1159
|
-
if ((l = (c = d.lastRun) == null ? void 0 : c.docker) != null && l.primaryPath) {
|
|
1160
|
-
const p = (f = (u = d.lastRun) == null ? void 0 : u.docker) == null ? void 0 : f.primaryPath;
|
|
1161
|
-
p.startsWith(r) || t.push(p);
|
|
1162
|
-
}
|
|
1163
|
-
if ((w = (b = d.lastRun) == null ? void 0 : b.docker) != null && w.workPath) {
|
|
1164
|
-
const p = (v = (m = d.lastRun) == null ? void 0 : m.docker) == null ? void 0 : v.workPath;
|
|
1165
|
-
p.startsWith(r) || t.push(p);
|
|
1166
|
-
}
|
|
1167
|
-
if ((R = (k = d.lastRun) == null ? void 0 : k.process) != null && R.storagePath) {
|
|
1168
|
-
const p = (P = (S = d.lastRun) == null ? void 0 : S.process) == null ? void 0 : P.storagePath;
|
|
1169
|
-
p.startsWith(r) || t.push(p);
|
|
1170
|
-
}
|
|
1171
|
-
const s = t.length > 0 ? ` - storages (you'll loose all projects and calculation results stored in service instances):
|
|
1172
|
-
- ${t.join(`
|
|
1173
|
-
- `)}` : "", i = `
|
|
1174
|
-
You are going to reset the state of platforma service
|
|
1175
|
-
Things to be removed:
|
|
1176
|
-
- ${e.join(`
|
|
1239
|
+
e === d.currentInstanceName && t.push(
|
|
1240
|
+
"last command run cache ('pl-service start' shorthand will stop working until next full start command call)"
|
|
1241
|
+
), n = `
|
|
1242
|
+
You are going to reset the state of platforma service '${e}':
|
|
1243
|
+
- ${t.join(`
|
|
1177
1244
|
- `)}
|
|
1178
|
-
${s}
|
|
1179
1245
|
`;
|
|
1180
|
-
|
|
1246
|
+
} else {
|
|
1247
|
+
for (const s of d.instanceList)
|
|
1248
|
+
r.set(s, d.getInstanceInfo(s));
|
|
1249
|
+
t.push(
|
|
1250
|
+
"last command run cache ('pl-service start' shorthand will stop working until next full start command call)",
|
|
1251
|
+
`all service configurations stored in: ${d.instanceDir()} (including all associated docker containers and volumes)`
|
|
1252
|
+
), n = `
|
|
1253
|
+
You are going to reset the state of all platforma services configured with pl-bootstrap package.
|
|
1254
|
+
- ${t.join(`
|
|
1255
|
+
- `)}
|
|
1256
|
+
`;
|
|
1257
|
+
}
|
|
1258
|
+
if (this.logger.warn(n), !_r("Are you sure?")) {
|
|
1181
1259
|
this.logger.info("Reset action was canceled");
|
|
1182
1260
|
return;
|
|
1183
1261
|
}
|
|
1184
|
-
const
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1262
|
+
for (const [s, i] of r.entries()) {
|
|
1263
|
+
if (i.cleanupCommands.length) {
|
|
1264
|
+
this.logger.info(`Wiping instance ${s} services`);
|
|
1265
|
+
const c = we(this.logger, i.cleanupCommands);
|
|
1266
|
+
ge(c.executed, `failed to wipe instance ${s} services`);
|
|
1267
|
+
}
|
|
1268
|
+
this.logger.info(`Destroying instance '${s}' data directory`), u.rmSync(d.instanceDir(s), { recursive: !0, force: !0 });
|
|
1269
|
+
}
|
|
1270
|
+
e || (this.logger.info(`Destroying state dir '${d.path()}'`), u.rmSync(d.path(), { recursive: !0, force: !0 })), this.logger.info(
|
|
1191
1271
|
`
|
|
1192
1272
|
If you want to remove all downloaded platforma binaries, delete '${d.binaries()}' dir manually
|
|
1193
1273
|
`
|
|
1194
1274
|
);
|
|
1195
1275
|
}
|
|
1196
1276
|
mergeLicenseEnvs(e) {
|
|
1197
|
-
e.license === void 0 && ((process.env.MI_LICENSE ?? "") != "" ? e.license = process.env.MI_LICENSE : (process.env.PL_LICENSE ?? "") != "" && (e.license = process.env.PL_LICENSE)), e["license-file"] === void 0 && e.license === void 0 && ((process.env.MI_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.MI_LICENSE_FILE : (process.env.PL_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.PL_LICENSE_FILE :
|
|
1277
|
+
e.license === void 0 && ((process.env.MI_LICENSE ?? "") != "" ? e.license = process.env.MI_LICENSE : (process.env.PL_LICENSE ?? "") != "" && (e.license = process.env.PL_LICENSE)), e["license-file"] === void 0 && e.license === void 0 && ((process.env.MI_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.MI_LICENSE_FILE : (process.env.PL_LICENSE_FILE ?? "") != "" ? e["license-file"] = process.env.PL_LICENSE_FILE : u.existsSync(m.resolve(x.homedir(), ".pl.license")) && (e["license-file"] = m.resolve(x.homedir(), ".pl.license")));
|
|
1198
1278
|
}
|
|
1199
|
-
initAuthDriversList(e,
|
|
1200
|
-
const
|
|
1201
|
-
if (e["auth-htpasswd-file"] &&
|
|
1279
|
+
initAuthDriversList(e, t) {
|
|
1280
|
+
const r = [];
|
|
1281
|
+
if (e["auth-htpasswd-file"] && r.push({
|
|
1202
1282
|
driver: "htpasswd",
|
|
1203
|
-
path:
|
|
1283
|
+
path: m.resolve(t, e["auth-htpasswd-file"])
|
|
1204
1284
|
}), !!e["auth-ldap-server"] != !!e["auth-ldap-default-dn"])
|
|
1205
1285
|
throw new Error("LDAP auth settings require both 'server' and 'default DN' options to be set");
|
|
1206
|
-
if (e["auth-ldap-server"] &&
|
|
1286
|
+
if (e["auth-ldap-server"] && r.push({
|
|
1207
1287
|
driver: "ldap",
|
|
1208
1288
|
serverUrl: e["auth-ldap-server"],
|
|
1209
1289
|
defaultDN: e["auth-ldap-default-dn"]
|
|
1210
|
-
}),
|
|
1211
|
-
return [{ driver: "jwt", key: this.getLastJwt() }, ...
|
|
1290
|
+
}), r.length !== 0)
|
|
1291
|
+
return [{ driver: "jwt", key: this.getLastJwt() }, ...r];
|
|
1212
1292
|
}
|
|
1213
1293
|
/** Gets the last stored JWT secret key or generates it and stores in a file. */
|
|
1214
1294
|
getLastJwt() {
|
|
1215
|
-
const e = d.path("auth.jwt"),
|
|
1216
|
-
let
|
|
1217
|
-
return
|
|
1295
|
+
const e = d.path("auth.jwt"), t = "utf-8";
|
|
1296
|
+
let r = "";
|
|
1297
|
+
return u.existsSync(e) && (r = u.readFileSync(e, { encoding: t })), r == "" && (r = Cr(64), u.writeFileSync(e, r, { encoding: t })), r;
|
|
1218
1298
|
}
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
env: {
|
|
1222
|
-
...process.env,
|
|
1223
|
-
PL_IMAGE: "scratch",
|
|
1224
|
-
PL_DATA_DB_ROOT: t,
|
|
1225
|
-
PL_DATA_PRIMARY_ROOT: t,
|
|
1226
|
-
PL_DATA_LIBRARY_ROOT: t,
|
|
1227
|
-
PL_DATA_WORKDIR_ROOT: t,
|
|
1228
|
-
PL_DATA_PACKAGE_ROOT: t,
|
|
1229
|
-
MINIO_IMAGE: "scratch",
|
|
1230
|
-
MINIO_STORAGE: t
|
|
1231
|
-
},
|
|
1232
|
-
stdio: "inherit"
|
|
1233
|
-
});
|
|
1234
|
-
s.status !== 0 && process.exit(s.status);
|
|
1235
|
-
}
|
|
1236
|
-
checkLicense(e, r) {
|
|
1237
|
-
if (!(e !== void 0 && e != "") && !(r !== void 0 && r != ""))
|
|
1299
|
+
checkLicense(e, t) {
|
|
1300
|
+
if (!(e !== void 0 && e != "") && !(t !== void 0 && t != ""))
|
|
1238
1301
|
throw this.logger.error(`A license for Platforma Backend must be set.
|
|
1239
1302
|
|
|
1240
1303
|
You can provide the license directly using the '--license' flag
|
|
@@ -1247,117 +1310,119 @@ or stored in '$HOME/.pl.license'.
|
|
|
1247
1310
|
|
|
1248
1311
|
You can obtain the license from "https://licensing.milaboratories.com".`), new Error("The license was not provided.");
|
|
1249
1312
|
}
|
|
1250
|
-
configureDockerStorage(e,
|
|
1251
|
-
const
|
|
1252
|
-
switch (e = e.toUpperCase(),
|
|
1313
|
+
configureDockerStorage(e, t) {
|
|
1314
|
+
const r = {}, n = t.type;
|
|
1315
|
+
switch (e = e.toUpperCase(), n) {
|
|
1253
1316
|
case "S3":
|
|
1254
|
-
return
|
|
1317
|
+
return r[`PL_DATA_${e}_TYPE`] = "S3", r[`PL_DATA_${e}_S3_BUCKET`] = t.bucketName, t.endpoint && (r[`PL_DATA_${e}_S3_ENDPOINT`] = t.endpoint), t.presignEndpoint && (r[`PL_DATA_${e}_S3_PRESIGN_ENDPOINT`] = t.presignEndpoint), t.region && (r[`PL_DATA_${e}_S3_REGION`] = t.region), t.key && (r[`PL_DATA_${e}_S3_KEY`] = t.key), t.secret && (r[`PL_DATA_${e}_S3_SECRET`] = t.secret), r;
|
|
1255
1318
|
case "FS":
|
|
1256
|
-
return
|
|
1319
|
+
return r[`PL_DATA_${e}_TYPE`] = "FS", r;
|
|
1257
1320
|
default:
|
|
1258
|
-
|
|
1321
|
+
_();
|
|
1259
1322
|
}
|
|
1260
1323
|
return {};
|
|
1261
1324
|
}
|
|
1262
|
-
renderRunInfo(e,
|
|
1263
|
-
var c,
|
|
1264
|
-
const
|
|
1265
|
-
switch (e.configPath &&
|
|
1325
|
+
renderRunInfo(e, t = 10) {
|
|
1326
|
+
var c, o;
|
|
1327
|
+
const r = [], n = (f) => f.padStart(t, " ");
|
|
1328
|
+
switch (e.configPath && r.push(`${n("config")}: ${e.configPath}`), e.apiAddr ? r.push(`${n("API")}: ${e.apiAddr}`) : e.apiPort ? r.push(`${n("API")}: 127.0.0.1:${e.apiPort.toString()}`) : r.push(`${n("API")}: 127.0.0.1:6345`), e.logPath && r.push(`${n("log")}: ${e.logPath}`), (c = e.primary) == null ? void 0 : c.type) {
|
|
1266
1329
|
case void 0:
|
|
1267
1330
|
break;
|
|
1268
1331
|
case "FS":
|
|
1269
|
-
|
|
1332
|
+
r.push(`${n("primary")}: ${e.primary.rootPath}`);
|
|
1270
1333
|
break;
|
|
1271
1334
|
case "S3":
|
|
1272
|
-
|
|
1273
|
-
`${
|
|
1335
|
+
r.push(
|
|
1336
|
+
`${n("primary")}: S3 at '${e.primary.endpoint ?? "AWS"}', bucket '${e.primary.bucketName}', prefix: '${e.primary.keyPrefix ?? ""}'`
|
|
1274
1337
|
);
|
|
1275
1338
|
break;
|
|
1276
1339
|
default:
|
|
1277
|
-
|
|
1340
|
+
_();
|
|
1278
1341
|
}
|
|
1279
|
-
switch ((
|
|
1342
|
+
switch ((o = e.library) == null ? void 0 : o.type) {
|
|
1280
1343
|
case void 0:
|
|
1281
1344
|
break;
|
|
1282
1345
|
case "FS":
|
|
1283
|
-
|
|
1346
|
+
r.push(`${n("library")}: ${e.library.rootPath}`);
|
|
1284
1347
|
break;
|
|
1285
1348
|
case "S3":
|
|
1286
|
-
|
|
1287
|
-
`${
|
|
1349
|
+
r.push(
|
|
1350
|
+
`${n("library")}: S3 at '${e.library.endpoint ?? "AWS"}', bucket '${e.library.bucketName}', prefix: '${e.library.keyPrefix ?? ""}'`
|
|
1288
1351
|
);
|
|
1289
1352
|
break;
|
|
1290
1353
|
default:
|
|
1291
|
-
|
|
1354
|
+
_();
|
|
1292
1355
|
}
|
|
1293
|
-
return e.work &&
|
|
1356
|
+
return e.work && r.push(`${n("workdirs")}: ${e.work.rootPath}`), e.dbPath && r.push(`${n("db")}: ${e.dbPath}`), r.join(`
|
|
1294
1357
|
`);
|
|
1295
1358
|
}
|
|
1296
1359
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
1297
1360
|
readComposeFile(e) {
|
|
1298
|
-
const
|
|
1299
|
-
return
|
|
1361
|
+
const t = u.readFileSync(e);
|
|
1362
|
+
return ue.parse(t.toString());
|
|
1300
1363
|
}
|
|
1301
|
-
writeComposeFile(e,
|
|
1302
|
-
|
|
1364
|
+
writeComposeFile(e, t) {
|
|
1365
|
+
u.writeFileSync(e, ue.stringify(t));
|
|
1303
1366
|
}
|
|
1304
1367
|
}
|
|
1305
|
-
function
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1368
|
+
function ge(a, e) {
|
|
1369
|
+
for (const t of a) {
|
|
1370
|
+
if (t.error)
|
|
1371
|
+
throw t.error;
|
|
1372
|
+
const r = e ?? "failed to run command";
|
|
1373
|
+
if (t.status !== 0)
|
|
1374
|
+
throw new Error(`${r}, process exited with code '${t.status}'`);
|
|
1375
|
+
}
|
|
1311
1376
|
}
|
|
1312
|
-
const
|
|
1377
|
+
const re = class re extends L {
|
|
1313
1378
|
async run() {
|
|
1314
|
-
const { flags: e } = await this.parse(
|
|
1315
|
-
new
|
|
1379
|
+
const { flags: e } = await this.parse(re), t = I(e["log-level"]);
|
|
1380
|
+
new O(t).cleanupInstance();
|
|
1316
1381
|
}
|
|
1317
1382
|
};
|
|
1318
|
-
|
|
1319
|
-
...
|
|
1383
|
+
l(re, "description", "Clear service state (forget last run command, destroy docker services, volumes and so on)"), l(re, "examples", ["<%= config.bin %> <%= command.id %>"]), l(re, "flags", {
|
|
1384
|
+
...A
|
|
1320
1385
|
});
|
|
1321
|
-
let
|
|
1322
|
-
const
|
|
1386
|
+
let Le = re;
|
|
1387
|
+
const te = class te extends L {
|
|
1323
1388
|
async run() {
|
|
1324
|
-
const { flags: e } = await this.parse(
|
|
1325
|
-
new
|
|
1389
|
+
const { flags: e } = await this.parse(te), t = I(e["log-level"]);
|
|
1390
|
+
new O(t).startLast();
|
|
1326
1391
|
}
|
|
1327
1392
|
};
|
|
1328
|
-
|
|
1329
|
-
...
|
|
1393
|
+
l(te, "description", "Start last run service configuraiton"), l(te, "examples", ["<%= config.bin %> <%= command.id %>"]), l(te, "flags", {
|
|
1394
|
+
...A
|
|
1330
1395
|
});
|
|
1331
|
-
let
|
|
1332
|
-
const
|
|
1396
|
+
let Ae = te;
|
|
1397
|
+
const ae = class ae extends L {
|
|
1333
1398
|
async run() {
|
|
1334
|
-
const { flags: e } = await this.parse(
|
|
1335
|
-
|
|
1399
|
+
const { flags: e } = await this.parse(ae), t = I(e["log-level"]), r = new O(t);
|
|
1400
|
+
d.currentInstance ? r.stopInstance(d.currentInstance) : t.warn("up/start command was not called for any instance, nothing to stop");
|
|
1336
1401
|
}
|
|
1337
1402
|
};
|
|
1338
|
-
|
|
1339
|
-
...
|
|
1403
|
+
l(ae, "description", "Stop platforma service"), l(ae, "examples", ["<%= config.bin %> <%= command.id %>"]), l(ae, "flags", {
|
|
1404
|
+
...A
|
|
1340
1405
|
});
|
|
1341
|
-
let
|
|
1342
|
-
|
|
1406
|
+
let Ie = ae;
|
|
1407
|
+
var E;
|
|
1408
|
+
let ot = (E = class extends L {
|
|
1343
1409
|
async run() {
|
|
1344
|
-
const { flags: e } = await this.parse(
|
|
1345
|
-
|
|
1346
|
-
const s = e["auth-enabled"], i = s ? {
|
|
1410
|
+
const { flags: e } = await this.parse(E), t = I(e["log-level"]), r = new O(t);
|
|
1411
|
+
r.mergeLicenseEnvs(e);
|
|
1412
|
+
const n = "docker", s = e["auth-enabled"], i = s ? {
|
|
1347
1413
|
enabled: s,
|
|
1348
|
-
drivers:
|
|
1349
|
-
} : void 0,
|
|
1350
|
-
for (const
|
|
1351
|
-
|
|
1352
|
-
const
|
|
1353
|
-
t.startDocker(n, {
|
|
1414
|
+
drivers: r.initAuthDriversList(e, ".")
|
|
1415
|
+
} : void 0, c = e.storage ? m.join(".", e.storage) : d.instanceDir(n), o = [];
|
|
1416
|
+
for (const h of e.mount ?? [])
|
|
1417
|
+
o.push({ hostPath: h });
|
|
1418
|
+
const f = e.arch ? `linux/${e.arch}` : void 0, g = r.createDocker(n, c, {
|
|
1354
1419
|
primaryStorageURL: e["storage-primary"],
|
|
1355
1420
|
workStoragePath: e["storage-work"],
|
|
1356
1421
|
libraryStorageURL: e["storage-library"],
|
|
1357
1422
|
image: e.image,
|
|
1358
1423
|
version: e.version,
|
|
1359
|
-
platformOverride:
|
|
1360
|
-
customMounts:
|
|
1424
|
+
platformOverride: f,
|
|
1425
|
+
customMounts: o,
|
|
1361
1426
|
license: e.license,
|
|
1362
1427
|
licenseFile: e["license-file"],
|
|
1363
1428
|
auth: i,
|
|
@@ -1368,188 +1433,432 @@ const J = class J extends F {
|
|
|
1368
1433
|
debugAddr: e["debug-listen"],
|
|
1369
1434
|
debugPort: e["debug-port"]
|
|
1370
1435
|
});
|
|
1436
|
+
r.switchInstance(g);
|
|
1371
1437
|
}
|
|
1372
|
-
}
|
|
1373
|
-
|
|
1374
|
-
...
|
|
1375
|
-
...
|
|
1376
|
-
...
|
|
1377
|
-
...
|
|
1378
|
-
...
|
|
1379
|
-
...
|
|
1380
|
-
...te,
|
|
1381
|
-
...ze,
|
|
1382
|
-
...se,
|
|
1383
|
-
...Pe,
|
|
1438
|
+
}, l(E, "description", "Run platforma backend service with 'FS' primary storage type"), l(E, "examples", ["<%= config.bin %> <%= command.id %>"]), l(E, "flags", {
|
|
1439
|
+
...A,
|
|
1440
|
+
...z,
|
|
1441
|
+
...me,
|
|
1442
|
+
...W,
|
|
1443
|
+
...he,
|
|
1444
|
+
...V,
|
|
1445
|
+
...q,
|
|
1384
1446
|
...ye,
|
|
1385
|
-
...
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1447
|
+
...Y,
|
|
1448
|
+
...oe,
|
|
1449
|
+
...ce,
|
|
1450
|
+
...le
|
|
1451
|
+
}), E);
|
|
1452
|
+
var R;
|
|
1453
|
+
let lt = (R = class extends L {
|
|
1389
1454
|
async run() {
|
|
1390
|
-
const { flags: e } = await this.parse(
|
|
1391
|
-
|
|
1392
|
-
const s = e["pl-workdir"] ?? ".", i = e.storage ?
|
|
1393
|
-
|
|
1394
|
-
e["pl-sources"] && (
|
|
1395
|
-
|
|
1396
|
-
e["grpc-listen"] ?
|
|
1397
|
-
|
|
1398
|
-
e["monitoring-listen"] ?
|
|
1399
|
-
|
|
1400
|
-
e["debug-listen"] ?
|
|
1401
|
-
const
|
|
1402
|
-
binaryPath:
|
|
1455
|
+
const { flags: e } = await this.parse(R), t = I(e["log-level"]), r = new O(t);
|
|
1456
|
+
r.mergeLicenseEnvs(e);
|
|
1457
|
+
const n = "local", s = e["pl-workdir"] ?? ".", i = e.storage ? m.join(s, e.storage) : d.instanceDir(n), c = e["pl-log-file"] ? m.join(s, e["pl-log-file"]) : void 0, o = r.initAuthDriversList(e, s), f = e["auth-enabled"] ?? o !== void 0;
|
|
1458
|
+
let g = e["pl-binary"];
|
|
1459
|
+
e["pl-sources"] && (g = r.buildPlatforma({ repoRoot: e["pl-sources"] }));
|
|
1460
|
+
let h = "127.0.0.1:6345";
|
|
1461
|
+
e["grpc-listen"] ? h = e["grpc-listen"] : e["grpc-port"] && (h = `127.0.0.1:${e["grpc-port"]}`);
|
|
1462
|
+
let w = "127.0.0.1:9090";
|
|
1463
|
+
e["monitoring-listen"] ? w = e["monitoring-listen"] : e["monitoring-port"] && (w = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1464
|
+
let v = "127.0.0.1:9091";
|
|
1465
|
+
e["debug-listen"] ? v = e["debug-listen"] : e["debug-port"] && (v = `127.0.0.1:${e["debug-port"]}`);
|
|
1466
|
+
const y = {
|
|
1467
|
+
binaryPath: g,
|
|
1403
1468
|
version: e.version,
|
|
1404
1469
|
configPath: e.config,
|
|
1405
1470
|
workdir: e["pl-workdir"],
|
|
1406
1471
|
primaryURL: e["storage-primary"],
|
|
1407
1472
|
libraryURL: e["storage-library"],
|
|
1408
1473
|
configOptions: {
|
|
1409
|
-
grpc: { listen:
|
|
1410
|
-
monitoring: { listen:
|
|
1411
|
-
debug: { listen:
|
|
1474
|
+
grpc: { listen: h },
|
|
1475
|
+
monitoring: { listen: w },
|
|
1476
|
+
debug: { listen: v },
|
|
1412
1477
|
license: { value: e.license, file: e["license-file"] },
|
|
1413
|
-
log: { path:
|
|
1478
|
+
log: { path: c },
|
|
1414
1479
|
localRoot: i,
|
|
1415
|
-
core: { auth: { enabled:
|
|
1480
|
+
core: { auth: { enabled: f, drivers: o } },
|
|
1416
1481
|
storages: {
|
|
1417
1482
|
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1418
1483
|
}
|
|
1419
1484
|
}
|
|
1420
|
-
};
|
|
1421
|
-
|
|
1422
|
-
r.
|
|
1485
|
+
}, P = r.createLocal(n, y);
|
|
1486
|
+
y.binaryPath ? r.switchInstance(P) : Ge(t, { version: e.version }).then(() => {
|
|
1487
|
+
const S = r.switchInstance(P);
|
|
1488
|
+
setTimeout(() => {
|
|
1489
|
+
for (const k of S)
|
|
1490
|
+
k.unref();
|
|
1491
|
+
}, 1e3);
|
|
1492
|
+
}).catch(function(S) {
|
|
1493
|
+
t.error(S.message);
|
|
1423
1494
|
});
|
|
1424
1495
|
}
|
|
1496
|
+
}, l(R, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(R, "examples", ["<%= config.bin %> <%= command.id %>"]), l(R, "flags", {
|
|
1497
|
+
...A,
|
|
1498
|
+
...W,
|
|
1499
|
+
...z,
|
|
1500
|
+
...Ce,
|
|
1501
|
+
...Me,
|
|
1502
|
+
...je,
|
|
1503
|
+
...q,
|
|
1504
|
+
...Y,
|
|
1505
|
+
...oe,
|
|
1506
|
+
...ce,
|
|
1507
|
+
...le,
|
|
1508
|
+
...Ne,
|
|
1509
|
+
..._e,
|
|
1510
|
+
...V
|
|
1511
|
+
}), R);
|
|
1512
|
+
const M = class M extends L {
|
|
1513
|
+
async run() {
|
|
1514
|
+
const { flags: e, args: t } = await this.parse(M), r = I(e["log-level"]), n = new O(r), s = t.name;
|
|
1515
|
+
e.all && (n.cleanupInstance(), process.exit(0)), s || (r.error("Please, specify name of instance to be removed or set '--all' flag instead"), process.exit(1)), n.cleanupInstance(s);
|
|
1516
|
+
}
|
|
1425
1517
|
};
|
|
1426
|
-
|
|
1427
|
-
...
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
...se,
|
|
1435
|
-
...Pe,
|
|
1436
|
-
...ye,
|
|
1437
|
-
...be,
|
|
1438
|
-
...Ke,
|
|
1439
|
-
...qe,
|
|
1440
|
-
...ie
|
|
1518
|
+
l(M, "description", "List available instances"), l(M, "examples", ["<%= config.bin %> <%= command.id %>"]), l(M, "flags", {
|
|
1519
|
+
...A,
|
|
1520
|
+
all: b.boolean({
|
|
1521
|
+
description: "remove all known instances",
|
|
1522
|
+
required: !1
|
|
1523
|
+
})
|
|
1524
|
+
}), l(M, "args", {
|
|
1525
|
+
name: H.string({ required: !1 })
|
|
1441
1526
|
});
|
|
1442
|
-
let
|
|
1443
|
-
|
|
1444
|
-
let Vr = ($ = class extends F {
|
|
1527
|
+
let Oe = M;
|
|
1528
|
+
const j = class j extends L {
|
|
1445
1529
|
async run() {
|
|
1446
|
-
const { flags: e } = await this.parse(
|
|
1447
|
-
|
|
1448
|
-
|
|
1530
|
+
const { flags: e, args: t } = await this.parse(j), r = I(e["log-level"]), n = new O(r), s = t.name ?? d.currentInstanceName;
|
|
1531
|
+
s || (r.info("no pl service instance selected. No service was stopped"), process.exit(0)), n.stopInstance(d.getInstanceInfo(s));
|
|
1532
|
+
}
|
|
1533
|
+
};
|
|
1534
|
+
l(j, "description", "List available instances"), l(j, "examples", ["<%= config.bin %> <%= command.id %>"]), l(j, "flags", {
|
|
1535
|
+
...A
|
|
1536
|
+
}), l(j, "args", {
|
|
1537
|
+
name: H.string({ required: !1 })
|
|
1538
|
+
});
|
|
1539
|
+
let Ee = j;
|
|
1540
|
+
const ne = class ne extends L {
|
|
1541
|
+
async run() {
|
|
1542
|
+
await this.parse(ne);
|
|
1543
|
+
const e = d.instanceList, t = d.currentInstanceName;
|
|
1544
|
+
for (const r of e) {
|
|
1545
|
+
const n = [], s = d.getInstanceInfo(r);
|
|
1546
|
+
d.isInstanceActive(s) && n.push("status:up"), n.push(`type:${s.type}`), console.log(r === t ? ` * ${r} (${n.join(", ")})` : ` ${r} (${n.join(", ")})`);
|
|
1547
|
+
}
|
|
1548
|
+
}
|
|
1549
|
+
};
|
|
1550
|
+
l(ne, "description", "List available instances"), l(ne, "examples", ["<%= config.bin %> <%= command.id %>"]), l(ne, "flags", {});
|
|
1551
|
+
let Re = ne;
|
|
1552
|
+
const B = class B extends L {
|
|
1553
|
+
async run() {
|
|
1554
|
+
const { flags: e, args: t } = await this.parse(B), r = I(e["log-level"]), n = new O(r), s = t.name ?? d.currentInstanceName;
|
|
1555
|
+
s || (r.error("no pl service instance is selected. Select instance with 'select' command or provide name to 'up'"), process.exit(1));
|
|
1556
|
+
const i = n.switchInstance(d.getInstanceInfo(s));
|
|
1557
|
+
setTimeout(() => {
|
|
1558
|
+
for (const c of i)
|
|
1559
|
+
c.unref();
|
|
1560
|
+
}, 1e3);
|
|
1561
|
+
}
|
|
1562
|
+
};
|
|
1563
|
+
l(B, "description", "List available instances"), l(B, "examples", ["<%= config.bin %> <%= command.id %>"]), l(B, "flags", {
|
|
1564
|
+
...A
|
|
1565
|
+
}), l(B, "args", {
|
|
1566
|
+
name: H.string({ required: !1 })
|
|
1567
|
+
});
|
|
1568
|
+
let Te = B;
|
|
1569
|
+
var T;
|
|
1570
|
+
let dt = (T = class extends L {
|
|
1571
|
+
async run() {
|
|
1572
|
+
const { flags: e } = await this.parse(T), t = I(e["log-level"]), r = new O(t);
|
|
1573
|
+
r.mergeLicenseEnvs(e);
|
|
1574
|
+
const n = "docker-s3", s = e["auth-enabled"], i = s ? {
|
|
1449
1575
|
enabled: s,
|
|
1450
|
-
drivers:
|
|
1451
|
-
} : void 0,
|
|
1452
|
-
for (const
|
|
1453
|
-
|
|
1454
|
-
const
|
|
1455
|
-
t.startDockerS3(n, {
|
|
1576
|
+
drivers: r.initAuthDriversList(e, ".")
|
|
1577
|
+
} : void 0, c = e.storage ? m.join(".", e.storage) : d.instanceDir(n), o = [];
|
|
1578
|
+
for (const w of e.mount ?? [])
|
|
1579
|
+
o.push({ hostPath: w });
|
|
1580
|
+
const f = e.arch ? `linux/${e.arch}` : void 0, g = e["minio-presign-host"] ? "minio" : "localhost", h = r.createDockerS3(n, c, {
|
|
1456
1581
|
image: e.image,
|
|
1457
1582
|
version: e.version,
|
|
1458
1583
|
license: e.license,
|
|
1459
1584
|
licenseFile: e["license-file"],
|
|
1460
|
-
platformOverride:
|
|
1461
|
-
customMounts:
|
|
1585
|
+
platformOverride: f,
|
|
1586
|
+
customMounts: o,
|
|
1462
1587
|
auth: i,
|
|
1463
1588
|
grpcAddr: e["grpc-listen"],
|
|
1464
1589
|
grpcPort: e["grpc-port"],
|
|
1465
1590
|
monitoringAddr: e["monitoring-listen"],
|
|
1466
1591
|
monitoringPort: e["monitoring-port"],
|
|
1467
1592
|
debugAddr: e["debug-listen"],
|
|
1468
|
-
debugPort: e["debug-port"]
|
|
1593
|
+
debugPort: e["debug-port"],
|
|
1594
|
+
presignHost: g
|
|
1469
1595
|
});
|
|
1596
|
+
r.switchInstance(h);
|
|
1470
1597
|
}
|
|
1471
|
-
},
|
|
1472
|
-
...
|
|
1473
|
-
...
|
|
1474
|
-
...
|
|
1475
|
-
...
|
|
1476
|
-
...
|
|
1477
|
-
...
|
|
1478
|
-
...
|
|
1479
|
-
...
|
|
1480
|
-
...
|
|
1481
|
-
|
|
1482
|
-
|
|
1598
|
+
}, l(T, "description", "Run platforma backend service with 'S3' primary storage type"), l(T, "examples", ["<%= config.bin %> <%= command.id %>"]), l(T, "flags", {
|
|
1599
|
+
...A,
|
|
1600
|
+
...z,
|
|
1601
|
+
...me,
|
|
1602
|
+
...W,
|
|
1603
|
+
...he,
|
|
1604
|
+
...V,
|
|
1605
|
+
...q,
|
|
1606
|
+
...ye,
|
|
1607
|
+
...Y,
|
|
1608
|
+
...hr
|
|
1609
|
+
}), T);
|
|
1610
|
+
var F;
|
|
1611
|
+
let gt = (F = class extends L {
|
|
1483
1612
|
async run() {
|
|
1484
|
-
const { flags: e } = await this.parse(
|
|
1485
|
-
|
|
1486
|
-
const s = e["pl-workdir"] ?? ".", i = e.storage ?
|
|
1487
|
-
|
|
1488
|
-
e["pl-sources"] && (
|
|
1489
|
-
|
|
1490
|
-
e["grpc-listen"] ?
|
|
1491
|
-
|
|
1492
|
-
e["monitoring-listen"] ?
|
|
1493
|
-
|
|
1494
|
-
e["debug-listen"] ?
|
|
1495
|
-
const
|
|
1496
|
-
binaryPath:
|
|
1613
|
+
const { flags: e } = await this.parse(F), t = I(e["log-level"]), r = new O(t);
|
|
1614
|
+
r.mergeLicenseEnvs(e);
|
|
1615
|
+
const n = "local-s3", s = e["pl-workdir"] ?? ".", i = e.storage ? m.join(s, e.storage) : d.instanceDir(n), c = e["pl-log-file"] ? m.join(s, e["pl-log-file"]) : void 0, o = r.initAuthDriversList(e, s), f = e["auth-enabled"] ?? o !== void 0;
|
|
1616
|
+
let g = e["pl-binary"];
|
|
1617
|
+
e["pl-sources"] && (g = r.buildPlatforma({ repoRoot: e["pl-sources"] }));
|
|
1618
|
+
let h = "127.0.0.1:6345";
|
|
1619
|
+
e["grpc-listen"] ? h = e["grpc-listen"] : e["grpc-port"] && (h = `127.0.0.1:${e["grpc-port"]}`);
|
|
1620
|
+
let w = "127.0.0.1:9090";
|
|
1621
|
+
e["monitoring-listen"] ? w = e["monitoring-listen"] : e["monitoring-port"] && (w = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1622
|
+
let v = "127.0.0.1:9091";
|
|
1623
|
+
e["debug-listen"] ? v = e["debug-listen"] : e["debug-port"] && (v = `127.0.0.1:${e["debug-port"]}`);
|
|
1624
|
+
const y = {
|
|
1625
|
+
binaryPath: g,
|
|
1497
1626
|
version: e.version,
|
|
1498
1627
|
configPath: e.config,
|
|
1499
1628
|
workdir: e["pl-workdir"],
|
|
1500
1629
|
primaryURL: e["storage-primary"],
|
|
1501
1630
|
libraryURL: e["storage-library"],
|
|
1502
|
-
minioPort: e["s3-
|
|
1503
|
-
minioConsolePort: e["s3-console-
|
|
1631
|
+
minioPort: e["s3-port"],
|
|
1632
|
+
minioConsolePort: e["s3-console-port"],
|
|
1504
1633
|
configOptions: {
|
|
1505
|
-
grpc: { listen:
|
|
1506
|
-
monitoring: { listen:
|
|
1507
|
-
debug: { listen:
|
|
1634
|
+
grpc: { listen: h },
|
|
1635
|
+
monitoring: { listen: w },
|
|
1636
|
+
debug: { listen: v },
|
|
1508
1637
|
license: { value: e.license, file: e["license-file"] },
|
|
1509
|
-
log: { path:
|
|
1638
|
+
log: { path: c },
|
|
1510
1639
|
localRoot: i,
|
|
1511
1640
|
core: {
|
|
1512
|
-
auth: { enabled:
|
|
1641
|
+
auth: { enabled: f, drivers: o }
|
|
1513
1642
|
},
|
|
1514
1643
|
storages: {
|
|
1515
1644
|
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1516
1645
|
}
|
|
1517
1646
|
}
|
|
1647
|
+
}, P = r.createLocalS3(n, y);
|
|
1648
|
+
y.binaryPath ? r.switchInstance(P) : Ge(t, { version: e.version }).then(() => {
|
|
1649
|
+
const S = r.switchInstance(P);
|
|
1650
|
+
setTimeout(() => {
|
|
1651
|
+
for (const k of S)
|
|
1652
|
+
k.unref();
|
|
1653
|
+
}, 1e3);
|
|
1654
|
+
}).catch(function(S) {
|
|
1655
|
+
t.error(S.message);
|
|
1656
|
+
});
|
|
1657
|
+
}
|
|
1658
|
+
}, l(F, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(F, "examples", ["<%= config.bin %> <%= command.id %>"]), l(F, "flags", {
|
|
1659
|
+
...A,
|
|
1660
|
+
...W,
|
|
1661
|
+
...z,
|
|
1662
|
+
...mr,
|
|
1663
|
+
...Ce,
|
|
1664
|
+
...Me,
|
|
1665
|
+
...je,
|
|
1666
|
+
...q,
|
|
1667
|
+
...Y,
|
|
1668
|
+
...oe,
|
|
1669
|
+
...ce,
|
|
1670
|
+
...le,
|
|
1671
|
+
...Ne,
|
|
1672
|
+
..._e,
|
|
1673
|
+
...V
|
|
1674
|
+
}), F);
|
|
1675
|
+
const G = class G extends L {
|
|
1676
|
+
async run() {
|
|
1677
|
+
const { flags: e, args: t } = await this.parse(G), r = I(e["log-level"]), n = new O(r);
|
|
1678
|
+
n.mergeLicenseEnvs(e);
|
|
1679
|
+
const s = t.name, i = e["auth-enabled"], c = i ? {
|
|
1680
|
+
enabled: i,
|
|
1681
|
+
drivers: n.initAuthDriversList(e, ".")
|
|
1682
|
+
} : void 0, o = e.storage ? m.join(".", e.storage) : d.instanceDir(s), f = [];
|
|
1683
|
+
for (const h of e.mount ?? [])
|
|
1684
|
+
f.push({ hostPath: h });
|
|
1685
|
+
const g = e.arch ? `linux/${e.arch}` : void 0;
|
|
1686
|
+
n.createDocker(s, o, {
|
|
1687
|
+
primaryStorageURL: e["storage-primary"],
|
|
1688
|
+
workStoragePath: e["storage-work"],
|
|
1689
|
+
libraryStorageURL: e["storage-library"],
|
|
1690
|
+
image: e.image,
|
|
1691
|
+
version: e.version,
|
|
1692
|
+
platformOverride: g,
|
|
1693
|
+
customMounts: f,
|
|
1694
|
+
license: e.license,
|
|
1695
|
+
licenseFile: e["license-file"],
|
|
1696
|
+
auth: c,
|
|
1697
|
+
grpcAddr: e["grpc-listen"],
|
|
1698
|
+
grpcPort: e["grpc-port"],
|
|
1699
|
+
monitoringAddr: e["monitoring-listen"],
|
|
1700
|
+
monitoringPort: e["monitoring-port"],
|
|
1701
|
+
debugAddr: e["debug-listen"],
|
|
1702
|
+
debugPort: e["debug-port"]
|
|
1703
|
+
}), r.info(`Instance '${s}' was created. To start it run 'up' command`);
|
|
1704
|
+
}
|
|
1705
|
+
};
|
|
1706
|
+
l(G, "description", "Run Platforma Backend service as docker container on current host"), l(G, "examples", ["<%= config.bin %> <%= command.id %>"]), l(G, "flags", {
|
|
1707
|
+
...A,
|
|
1708
|
+
...z,
|
|
1709
|
+
...me,
|
|
1710
|
+
...W,
|
|
1711
|
+
...he,
|
|
1712
|
+
...V,
|
|
1713
|
+
...q,
|
|
1714
|
+
...ye,
|
|
1715
|
+
...Y,
|
|
1716
|
+
...oe,
|
|
1717
|
+
...ce,
|
|
1718
|
+
...le
|
|
1719
|
+
}), l(G, "args", {
|
|
1720
|
+
name: H.string({ required: !0 })
|
|
1721
|
+
});
|
|
1722
|
+
let Fe = G;
|
|
1723
|
+
const U = class U extends L {
|
|
1724
|
+
async run() {
|
|
1725
|
+
const { flags: e, args: t } = await this.parse(U), r = I(e["log-level"]), n = new O(r);
|
|
1726
|
+
n.mergeLicenseEnvs(e);
|
|
1727
|
+
const s = t.name, i = e["pl-workdir"] ?? ".", c = e.storage ? m.join(i, e.storage) : d.instanceDir(s), o = e["pl-log-file"] ? m.join(i, e["pl-log-file"]) : void 0, f = n.initAuthDriversList(e, i), g = e["auth-enabled"] ?? f !== void 0;
|
|
1728
|
+
let h = e["pl-binary"];
|
|
1729
|
+
e["pl-sources"] && (h = n.buildPlatforma({ repoRoot: e["pl-sources"] }));
|
|
1730
|
+
let w = "127.0.0.1:6345";
|
|
1731
|
+
e["grpc-listen"] ? w = e["grpc-listen"] : e["grpc-port"] && (w = `127.0.0.1:${e["grpc-port"]}`);
|
|
1732
|
+
let v = "127.0.0.1:9090";
|
|
1733
|
+
e["monitoring-listen"] ? v = e["monitoring-listen"] : e["monitoring-port"] && (v = `127.0.0.1:${e["monitoring-port"]}`);
|
|
1734
|
+
let y = "127.0.0.1:9091";
|
|
1735
|
+
e["debug-listen"] ? y = e["debug-listen"] : e["debug-port"] && (y = `127.0.0.1:${e["debug-port"]}`);
|
|
1736
|
+
const P = {
|
|
1737
|
+
binaryPath: h,
|
|
1738
|
+
version: e.version,
|
|
1739
|
+
configPath: e.config,
|
|
1740
|
+
workdir: e["pl-workdir"],
|
|
1741
|
+
primaryURL: e["storage-primary"],
|
|
1742
|
+
libraryURL: e["storage-library"],
|
|
1743
|
+
configOptions: {
|
|
1744
|
+
grpc: { listen: w },
|
|
1745
|
+
monitoring: { listen: v },
|
|
1746
|
+
debug: { listen: y },
|
|
1747
|
+
license: { value: e.license, file: e["license-file"] },
|
|
1748
|
+
log: { path: o },
|
|
1749
|
+
localRoot: c,
|
|
1750
|
+
core: { auth: { enabled: g, drivers: f } },
|
|
1751
|
+
storages: {
|
|
1752
|
+
work: { type: "FS", rootPath: e["storage-work"] }
|
|
1753
|
+
}
|
|
1754
|
+
}
|
|
1518
1755
|
};
|
|
1519
|
-
|
|
1520
|
-
|
|
1756
|
+
switch (t.mode) {
|
|
1757
|
+
case "s3": {
|
|
1758
|
+
r.info("Creating instance configuration, data directory and other stuff..."), n.createLocalS3(s, {
|
|
1759
|
+
...P,
|
|
1760
|
+
minioPort: e["s3-port"],
|
|
1761
|
+
minioConsolePort: e["s3-console-port"]
|
|
1762
|
+
});
|
|
1763
|
+
break;
|
|
1764
|
+
}
|
|
1765
|
+
case void 0: {
|
|
1766
|
+
e["s3-port"] && r.warn("flag 's3-port' is only for 's3' mode"), e["s3-console-port"] && r.warn("flag 's3-console-port' is only for 's3' mode"), n.createLocal(s, P);
|
|
1767
|
+
break;
|
|
1768
|
+
}
|
|
1769
|
+
}
|
|
1770
|
+
if (P.binaryPath) {
|
|
1771
|
+
r.info(`Instance '${s}' was created. To start it run 'up' command`);
|
|
1772
|
+
return;
|
|
1773
|
+
}
|
|
1774
|
+
Ge(r, { version: e.version }).then(() => r.info(`Instance '${s}' was created. To start it run 'pl up' command`)).catch(function(k) {
|
|
1775
|
+
r.error(k.message);
|
|
1521
1776
|
});
|
|
1522
1777
|
}
|
|
1523
1778
|
};
|
|
1524
|
-
|
|
1525
|
-
...
|
|
1526
|
-
...
|
|
1527
|
-
...
|
|
1528
|
-
...
|
|
1529
|
-
...
|
|
1530
|
-
...
|
|
1531
|
-
...
|
|
1532
|
-
...
|
|
1533
|
-
...
|
|
1534
|
-
...
|
|
1779
|
+
l(U, "description", "Run Platforma Backend service as local process on current host (no docker container)"), l(U, "examples", ["<%= config.bin %> <%= command.id %>"]), l(U, "flags", {
|
|
1780
|
+
...A,
|
|
1781
|
+
...W,
|
|
1782
|
+
...mr,
|
|
1783
|
+
...z,
|
|
1784
|
+
...Ce,
|
|
1785
|
+
...Me,
|
|
1786
|
+
...je,
|
|
1787
|
+
...q,
|
|
1788
|
+
...Y,
|
|
1789
|
+
...oe,
|
|
1790
|
+
...ce,
|
|
1791
|
+
...le,
|
|
1792
|
+
...Ne,
|
|
1793
|
+
..._e,
|
|
1794
|
+
...V
|
|
1795
|
+
}), l(U, "args", {
|
|
1796
|
+
name: H.string({ required: !0 }),
|
|
1797
|
+
mode: H.string({ options: ["s3"], required: !1 })
|
|
1798
|
+
});
|
|
1799
|
+
let xe = U;
|
|
1800
|
+
const J = class J extends L {
|
|
1801
|
+
async run() {
|
|
1802
|
+
const { flags: e, args: t } = await this.parse(J), r = I(e["log-level"]), n = new O(r);
|
|
1803
|
+
n.mergeLicenseEnvs(e);
|
|
1804
|
+
const s = t.name, i = e["auth-enabled"], c = i ? {
|
|
1805
|
+
enabled: i,
|
|
1806
|
+
drivers: n.initAuthDriversList(e, ".")
|
|
1807
|
+
} : void 0, o = e.storage ? m.join(".", e.storage) : d.instanceDir(s), f = [];
|
|
1808
|
+
for (const w of e.mount ?? [])
|
|
1809
|
+
f.push({ hostPath: w });
|
|
1810
|
+
const g = e.arch ? `linux/${e.arch}` : void 0, h = e["minio-presign-host"] ? "minio" : "localhost";
|
|
1811
|
+
n.createDockerS3(s, o, {
|
|
1812
|
+
image: e.image,
|
|
1813
|
+
version: e.version,
|
|
1814
|
+
license: e.license,
|
|
1815
|
+
licenseFile: e["license-file"],
|
|
1816
|
+
platformOverride: g,
|
|
1817
|
+
customMounts: f,
|
|
1818
|
+
auth: c,
|
|
1819
|
+
grpcAddr: e["grpc-listen"],
|
|
1820
|
+
grpcPort: e["grpc-port"],
|
|
1821
|
+
monitoringAddr: e["monitoring-listen"],
|
|
1822
|
+
monitoringPort: e["monitoring-port"],
|
|
1823
|
+
debugAddr: e["debug-listen"],
|
|
1824
|
+
debugPort: e["debug-port"],
|
|
1825
|
+
presignHost: h
|
|
1826
|
+
}), r.info(`Instance '${s}' was created. To start it run 'up' command`), e["minio-presign-host"] && r.info(" NOTE: make sure you have 'minio' host in your hosts file as 127.0.0.1 address");
|
|
1827
|
+
}
|
|
1828
|
+
};
|
|
1829
|
+
l(J, "description", "Run Platforma Backend service as docker container on current host with MinIO as local S3 storage"), l(J, "examples", ["<%= config.bin %> <%= command.id %>"]), l(J, "flags", {
|
|
1830
|
+
...A,
|
|
1831
|
+
...z,
|
|
1832
|
+
...me,
|
|
1833
|
+
...W,
|
|
1834
|
+
...he,
|
|
1835
|
+
...V,
|
|
1836
|
+
...q,
|
|
1535
1837
|
...ye,
|
|
1536
|
-
...
|
|
1537
|
-
...
|
|
1538
|
-
|
|
1539
|
-
|
|
1838
|
+
...Y,
|
|
1839
|
+
...hr
|
|
1840
|
+
}), l(J, "args", {
|
|
1841
|
+
name: H.string({ required: !0 })
|
|
1540
1842
|
});
|
|
1541
|
-
let
|
|
1542
|
-
const
|
|
1543
|
-
"create-block":
|
|
1544
|
-
reset:
|
|
1545
|
-
start:
|
|
1546
|
-
stop:
|
|
1547
|
-
"start:docker":
|
|
1548
|
-
"start:local":
|
|
1549
|
-
"
|
|
1550
|
-
"
|
|
1843
|
+
let De = J;
|
|
1844
|
+
const It = {
|
|
1845
|
+
"create-block": Se,
|
|
1846
|
+
reset: Le,
|
|
1847
|
+
start: Ae,
|
|
1848
|
+
stop: Ie,
|
|
1849
|
+
"start:docker": ot,
|
|
1850
|
+
"start:local": lt,
|
|
1851
|
+
"svc:delete": Oe,
|
|
1852
|
+
"svc:down": Ee,
|
|
1853
|
+
"svc:list": Re,
|
|
1854
|
+
"svc:up": Te,
|
|
1855
|
+
"start:docker:s3": dt,
|
|
1856
|
+
"start:local:s3": gt,
|
|
1857
|
+
"svc:create:docker": Fe,
|
|
1858
|
+
"svc:create:local": xe,
|
|
1859
|
+
"svc:create:docker:s3": De
|
|
1551
1860
|
};
|
|
1552
1861
|
export {
|
|
1553
|
-
|
|
1862
|
+
It as COMMANDS
|
|
1554
1863
|
};
|
|
1555
1864
|
//# sourceMappingURL=index.mjs.map
|