@archlast/cli 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +141 -0
- package/dist/analyzer.d.ts +96 -0
- package/dist/analyzer.d.ts.map +1 -0
- package/dist/analyzer.js +404 -0
- package/dist/auth.d.ts +14 -0
- package/dist/auth.d.ts.map +1 -0
- package/dist/auth.js +106 -0
- package/dist/cli.d.ts +3 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +322875 -0
- package/dist/commands/build.d.ts +6 -0
- package/dist/commands/build.d.ts.map +1 -0
- package/dist/commands/build.js +36 -0
- package/dist/commands/config.d.ts +8 -0
- package/dist/commands/config.d.ts.map +1 -0
- package/dist/commands/config.js +23 -0
- package/dist/commands/data.d.ts +6 -0
- package/dist/commands/data.d.ts.map +1 -0
- package/dist/commands/data.js +300 -0
- package/dist/commands/deploy.d.ts +9 -0
- package/dist/commands/deploy.d.ts.map +1 -0
- package/dist/commands/deploy.js +59 -0
- package/dist/commands/dev.d.ts +10 -0
- package/dist/commands/dev.d.ts.map +1 -0
- package/dist/commands/dev.js +132 -0
- package/dist/commands/generate.d.ts +6 -0
- package/dist/commands/generate.d.ts.map +1 -0
- package/dist/commands/generate.js +100 -0
- package/dist/commands/init.d.ts +7 -0
- package/dist/commands/init.d.ts.map +1 -0
- package/dist/commands/logs.d.ts +10 -0
- package/dist/commands/logs.d.ts.map +1 -0
- package/dist/commands/logs.js +38 -0
- package/dist/commands/pull.d.ts +16 -0
- package/dist/commands/pull.d.ts.map +1 -0
- package/dist/commands/pull.js +415 -0
- package/dist/commands/restart.d.ts +11 -0
- package/dist/commands/restart.d.ts.map +1 -0
- package/dist/commands/restart.js +63 -0
- package/dist/commands/start.d.ts +11 -0
- package/dist/commands/start.d.ts.map +1 -0
- package/dist/commands/start.js +74 -0
- package/dist/commands/status.d.ts +8 -0
- package/dist/commands/status.d.ts.map +1 -0
- package/dist/commands/status.js +69 -0
- package/dist/commands/stop.d.ts +8 -0
- package/dist/commands/stop.d.ts.map +1 -0
- package/dist/commands/stop.js +23 -0
- package/dist/commands/upgrade.d.ts +12 -0
- package/dist/commands/upgrade.d.ts.map +1 -0
- package/dist/commands/upgrade.js +77 -0
- package/dist/docker/compose.d.ts +3 -0
- package/dist/docker/compose.d.ts.map +1 -0
- package/dist/docker/compose.js +47 -0
- package/dist/docker/config.d.ts +12 -0
- package/dist/docker/config.d.ts.map +1 -0
- package/dist/docker/config.js +183 -0
- package/dist/docker/manager.d.ts +19 -0
- package/dist/docker/manager.d.ts.map +1 -0
- package/dist/docker/manager.js +239 -0
- package/dist/docker/ports.d.ts +6 -0
- package/dist/docker/ports.d.ts.map +1 -0
- package/dist/docker/restart-on-deploy.d.ts +6 -0
- package/dist/docker/restart-on-deploy.d.ts.map +1 -0
- package/dist/docker/types.d.ts +36 -0
- package/dist/docker/types.d.ts.map +1 -0
- package/dist/docker/types.js +1 -0
- package/dist/events-listener.d.ts +19 -0
- package/dist/events-listener.d.ts.map +1 -0
- package/dist/events-listener.js +105 -0
- package/dist/generator.d.ts +44 -0
- package/dist/generator.d.ts.map +1 -0
- package/dist/generator.js +1816 -0
- package/dist/generators/di.d.ts +21 -0
- package/dist/generators/di.d.ts.map +1 -0
- package/dist/generators/di.js +100 -0
- package/dist/index.d.ts +7 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +4 -0
- package/dist/project.d.ts +18 -0
- package/dist/project.d.ts.map +1 -0
- package/dist/protocol.d.ts +58 -0
- package/dist/protocol.d.ts.map +1 -0
- package/dist/protocol.js +5 -0
- package/dist/uploader.d.ts +63 -0
- package/dist/uploader.d.ts.map +1 -0
- package/dist/uploader.js +255 -0
- package/dist/watcher.d.ts +13 -0
- package/dist/watcher.d.ts.map +1 -0
- package/dist/watcher.js +38 -0
- package/package.json +58 -0
- package/scripts/postinstall.cjs +65 -0
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
import * as fs from "fs";
|
|
2
|
+
import { Readable } from "stream";
|
|
3
|
+
import Docker from "dockerode";
|
|
4
|
+
export class DockerManager {
|
|
5
|
+
docker;
|
|
6
|
+
constructor(docker) {
|
|
7
|
+
this.docker = docker ?? new Docker();
|
|
8
|
+
}
|
|
9
|
+
async checkAvailable() {
|
|
10
|
+
try {
|
|
11
|
+
await this.docker.ping();
|
|
12
|
+
return true;
|
|
13
|
+
}
|
|
14
|
+
catch {
|
|
15
|
+
return false;
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
async pullImage(imageTag, onProgress) {
|
|
19
|
+
const stream = await this.docker.pull(imageTag);
|
|
20
|
+
await new Promise((resolve, reject) => {
|
|
21
|
+
this.docker.modem.followProgress(stream, (error) => {
|
|
22
|
+
if (error) {
|
|
23
|
+
reject(error);
|
|
24
|
+
return;
|
|
25
|
+
}
|
|
26
|
+
resolve();
|
|
27
|
+
}, onProgress);
|
|
28
|
+
});
|
|
29
|
+
}
|
|
30
|
+
async ensureImage(imageTag) {
|
|
31
|
+
try {
|
|
32
|
+
await this.docker.getImage(imageTag).inspect();
|
|
33
|
+
}
|
|
34
|
+
catch (error) {
|
|
35
|
+
await this.pullImage(imageTag);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
async ensureVolume(volumeName) {
|
|
39
|
+
try {
|
|
40
|
+
await this.docker.getVolume(volumeName).inspect();
|
|
41
|
+
}
|
|
42
|
+
catch (error) {
|
|
43
|
+
await this.docker.createVolume({ Name: volumeName });
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
async start(config) {
|
|
47
|
+
const imageTag = `${config.image}:${config.tag}`;
|
|
48
|
+
await this.ensureImage(imageTag);
|
|
49
|
+
await this.ensureVolume(config.dataVolumeName);
|
|
50
|
+
fs.mkdirSync(config.configDir, { recursive: true });
|
|
51
|
+
fs.mkdirSync(config.deployDir, { recursive: true });
|
|
52
|
+
const container = this.docker.getContainer(config.containerName);
|
|
53
|
+
try {
|
|
54
|
+
const info = await container.inspect();
|
|
55
|
+
const imageMatches = info.Config?.Image === imageTag;
|
|
56
|
+
const envMatches = (info.Config?.Env || []).includes(`ARCHLAST_PORT=${config.port}`);
|
|
57
|
+
const portKey = `${config.port}/tcp`;
|
|
58
|
+
const portBindings = info.HostConfig?.PortBindings || {};
|
|
59
|
+
const portMatches = Array.isArray(portBindings[portKey]) &&
|
|
60
|
+
portBindings[portKey]?.some((binding) => binding.HostPort === String(config.port));
|
|
61
|
+
const bindSources = info.HostConfig?.Binds || [];
|
|
62
|
+
const expectedBinds = [
|
|
63
|
+
`${config.dataVolumeName}:/data`,
|
|
64
|
+
`${formatBindPath(config.configDir)}:/config:ro`,
|
|
65
|
+
`${formatBindPath(config.deployDir)}:/app/server/.archlast-deploy:ro`,
|
|
66
|
+
];
|
|
67
|
+
const bindsMatch = expectedBinds.every((bind) => bindSources.includes(bind));
|
|
68
|
+
if (!imageMatches || !envMatches || !portMatches || !bindsMatch) {
|
|
69
|
+
await container.remove({ force: true });
|
|
70
|
+
}
|
|
71
|
+
else if (info.State?.Running) {
|
|
72
|
+
return;
|
|
73
|
+
}
|
|
74
|
+
else {
|
|
75
|
+
await container.start();
|
|
76
|
+
return;
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
catch (error) {
|
|
80
|
+
if (error?.statusCode !== 404) {
|
|
81
|
+
throw error;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
const env = Object.entries(config.env).map(([key, value]) => `${key}=${value}`);
|
|
85
|
+
await this.docker.createContainer({
|
|
86
|
+
name: config.containerName,
|
|
87
|
+
Image: imageTag,
|
|
88
|
+
Env: env,
|
|
89
|
+
ExposedPorts: {
|
|
90
|
+
[`${config.port}/tcp`]: {},
|
|
91
|
+
},
|
|
92
|
+
HostConfig: {
|
|
93
|
+
Binds: [
|
|
94
|
+
`${config.dataVolumeName}:/data`,
|
|
95
|
+
`${formatBindPath(config.configDir)}:/config:ro`,
|
|
96
|
+
`${formatBindPath(config.deployDir)}:/app/server/.archlast-deploy:ro`,
|
|
97
|
+
],
|
|
98
|
+
PortBindings: {
|
|
99
|
+
[`${config.port}/tcp`]: [{ HostPort: String(config.port) }],
|
|
100
|
+
},
|
|
101
|
+
RestartPolicy: {
|
|
102
|
+
Name: "unless-stopped",
|
|
103
|
+
},
|
|
104
|
+
},
|
|
105
|
+
}).then((created) => created.start());
|
|
106
|
+
}
|
|
107
|
+
async stop(containerName) {
|
|
108
|
+
const container = this.docker.getContainer(containerName);
|
|
109
|
+
try {
|
|
110
|
+
const info = await container.inspect();
|
|
111
|
+
if (info.State?.Running) {
|
|
112
|
+
await container.stop({ t: 10 });
|
|
113
|
+
}
|
|
114
|
+
await container.remove({ force: true });
|
|
115
|
+
return true;
|
|
116
|
+
}
|
|
117
|
+
catch (error) {
|
|
118
|
+
if (error?.statusCode === 404) {
|
|
119
|
+
return false;
|
|
120
|
+
}
|
|
121
|
+
throw error;
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
async restart(config) {
|
|
125
|
+
await this.stop(config.containerName);
|
|
126
|
+
await this.start(config);
|
|
127
|
+
}
|
|
128
|
+
async status(containerName) {
|
|
129
|
+
const container = this.docker.getContainer(containerName);
|
|
130
|
+
try {
|
|
131
|
+
const info = await container.inspect();
|
|
132
|
+
const ports = parsePorts(info.NetworkSettings?.Ports || {});
|
|
133
|
+
const stats = await this.fetchStats(container);
|
|
134
|
+
const startedAt = info.State?.StartedAt || undefined;
|
|
135
|
+
const uptimeSeconds = startedAt
|
|
136
|
+
? Math.max(0, Math.floor((Date.now() - new Date(startedAt).getTime()) / 1000))
|
|
137
|
+
: undefined;
|
|
138
|
+
return {
|
|
139
|
+
exists: true,
|
|
140
|
+
state: info.State?.Status,
|
|
141
|
+
status: info.State?.Status,
|
|
142
|
+
health: info.State?.Health?.Status,
|
|
143
|
+
startedAt,
|
|
144
|
+
uptimeSeconds,
|
|
145
|
+
ports,
|
|
146
|
+
cpuPercent: stats?.cpuPercent,
|
|
147
|
+
memoryUsageBytes: stats?.memoryUsageBytes,
|
|
148
|
+
memoryLimitBytes: stats?.memoryLimitBytes,
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
catch (error) {
|
|
152
|
+
if (error?.statusCode === 404) {
|
|
153
|
+
return { exists: false };
|
|
154
|
+
}
|
|
155
|
+
throw error;
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
async logs(containerName, options = {}) {
|
|
159
|
+
const container = this.docker.getContainer(containerName);
|
|
160
|
+
const tail = normalizeTail(options.tail);
|
|
161
|
+
const baseOptions = {
|
|
162
|
+
stdout: true,
|
|
163
|
+
stderr: true,
|
|
164
|
+
timestamps: true,
|
|
165
|
+
...(tail !== undefined ? { tail } : {}),
|
|
166
|
+
};
|
|
167
|
+
if (options.follow === false) {
|
|
168
|
+
const buffer = await container.logs({
|
|
169
|
+
...baseOptions,
|
|
170
|
+
follow: false,
|
|
171
|
+
});
|
|
172
|
+
return Readable.from(buffer);
|
|
173
|
+
}
|
|
174
|
+
return container.logs({
|
|
175
|
+
...baseOptions,
|
|
176
|
+
follow: true,
|
|
177
|
+
});
|
|
178
|
+
}
|
|
179
|
+
demuxStream(stream, stdout, stderr) {
|
|
180
|
+
this.docker.modem.demuxStream(stream, stdout, stderr);
|
|
181
|
+
}
|
|
182
|
+
async fetchStats(container) {
|
|
183
|
+
try {
|
|
184
|
+
const stats = await container.stats({ stream: false });
|
|
185
|
+
const cpuDelta = stats.cpu_stats.cpu_usage.total_usage -
|
|
186
|
+
stats.precpu_stats.cpu_usage.total_usage;
|
|
187
|
+
const systemDelta = stats.cpu_stats.system_cpu_usage - stats.precpu_stats.system_cpu_usage;
|
|
188
|
+
const cpuCount = stats.cpu_stats.online_cpus ||
|
|
189
|
+
stats.cpu_stats.cpu_usage.percpu_usage?.length ||
|
|
190
|
+
1;
|
|
191
|
+
let cpuPercent;
|
|
192
|
+
if (systemDelta > 0 && cpuDelta > 0) {
|
|
193
|
+
cpuPercent = (cpuDelta / systemDelta) * cpuCount * 100;
|
|
194
|
+
}
|
|
195
|
+
return {
|
|
196
|
+
cpuPercent,
|
|
197
|
+
memoryUsageBytes: stats.memory_stats.usage,
|
|
198
|
+
memoryLimitBytes: stats.memory_stats.limit,
|
|
199
|
+
};
|
|
200
|
+
}
|
|
201
|
+
catch {
|
|
202
|
+
return undefined;
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
function formatBindPath(input) {
|
|
207
|
+
if (process.platform === "win32") {
|
|
208
|
+
return input.replace(/\\/g, "/");
|
|
209
|
+
}
|
|
210
|
+
return input;
|
|
211
|
+
}
|
|
212
|
+
function parsePorts(ports) {
|
|
213
|
+
const results = [];
|
|
214
|
+
for (const [containerPort, bindings] of Object.entries(ports)) {
|
|
215
|
+
const portNumber = parseInt(containerPort.split("/")[0], 10);
|
|
216
|
+
if (!bindings || bindings.length === 0) {
|
|
217
|
+
results.push({ containerPort: portNumber });
|
|
218
|
+
continue;
|
|
219
|
+
}
|
|
220
|
+
for (const binding of bindings) {
|
|
221
|
+
results.push({
|
|
222
|
+
containerPort: portNumber,
|
|
223
|
+
hostPort: parseInt(binding.HostPort, 10),
|
|
224
|
+
hostIp: binding.HostIp,
|
|
225
|
+
});
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
return results;
|
|
229
|
+
}
|
|
230
|
+
function normalizeTail(value) {
|
|
231
|
+
if (typeof value === "number") {
|
|
232
|
+
return value;
|
|
233
|
+
}
|
|
234
|
+
if (typeof value === "string" && value.trim() !== "") {
|
|
235
|
+
const parsed = Number(value);
|
|
236
|
+
return Number.isNaN(parsed) ? undefined : parsed;
|
|
237
|
+
}
|
|
238
|
+
return undefined;
|
|
239
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ports.d.ts","sourceRoot":"","sources":["../../src/docker/ports.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,YAAY,EAAE,MAAM,SAAS,CAAC;AAKvC,wBAAsB,mBAAmB,CAAC,MAAM,EAAE,YAAY,GAAG,OAAO,CAAC;IACrE,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,OAAO,CAAC;CACpB,CAAC,CAmBD"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"restart-on-deploy.d.ts","sourceRoot":"","sources":["../../src/docker/restart-on-deploy.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,YAAY,EAAE,MAAM,SAAS,CAAC;AAGvC,wBAAsB,qBAAqB,CACvC,MAAM,EAAE,YAAY,EACpB,SAAS,EAAE,MAAM,EACjB,aAAa,GAAE,OAAc,GAC9B,OAAO,CAAC;IAAE,SAAS,EAAE,OAAO,CAAC;IAAC,MAAM,CAAC,EAAE,MAAM,CAAA;CAAE,CAAC,CA8BlD"}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
export interface DockerConfig {
|
|
2
|
+
projectPath: string;
|
|
3
|
+
image: string;
|
|
4
|
+
tag: string;
|
|
5
|
+
containerName: string;
|
|
6
|
+
port: number;
|
|
7
|
+
dataVolumeName: string;
|
|
8
|
+
configDir: string;
|
|
9
|
+
deployDir: string;
|
|
10
|
+
composePath: string;
|
|
11
|
+
env: Record<string, string>;
|
|
12
|
+
restartOnDeploy: boolean;
|
|
13
|
+
configFilePath?: string;
|
|
14
|
+
envFilePath?: string;
|
|
15
|
+
}
|
|
16
|
+
export interface DockerContainerStatus {
|
|
17
|
+
exists: boolean;
|
|
18
|
+
state?: string;
|
|
19
|
+
status?: string;
|
|
20
|
+
health?: string;
|
|
21
|
+
startedAt?: string;
|
|
22
|
+
uptimeSeconds?: number;
|
|
23
|
+
ports?: Array<{
|
|
24
|
+
containerPort: number;
|
|
25
|
+
hostPort?: number;
|
|
26
|
+
hostIp?: string;
|
|
27
|
+
}>;
|
|
28
|
+
cpuPercent?: number;
|
|
29
|
+
memoryUsageBytes?: number;
|
|
30
|
+
memoryLimitBytes?: number;
|
|
31
|
+
}
|
|
32
|
+
export interface DockerLogsOptions {
|
|
33
|
+
follow?: boolean;
|
|
34
|
+
tail?: number | string;
|
|
35
|
+
}
|
|
36
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/docker/types.ts"],"names":[],"mappings":"AAAA,MAAM,WAAW,YAAY;IACzB,WAAW,EAAE,MAAM,CAAC;IACpB,KAAK,EAAE,MAAM,CAAC;IACd,GAAG,EAAE,MAAM,CAAC;IACZ,aAAa,EAAE,MAAM,CAAC;IACtB,IAAI,EAAE,MAAM,CAAC;IACb,cAAc,EAAE,MAAM,CAAC;IACvB,SAAS,EAAE,MAAM,CAAC;IAClB,SAAS,EAAE,MAAM,CAAC;IAClB,WAAW,EAAE,MAAM,CAAC;IACpB,GAAG,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAC5B,eAAe,EAAE,OAAO,CAAC;IACzB,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,WAAW,CAAC,EAAE,MAAM,CAAC;CACxB;AAED,MAAM,WAAW,qBAAqB;IAClC,MAAM,EAAE,OAAO,CAAC;IAChB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,KAAK,CAAC,EAAE,KAAK,CAAC;QAAE,aAAa,EAAE,MAAM,CAAC;QAAC,QAAQ,CAAC,EAAE,MAAM,CAAC;QAAC,MAAM,CAAC,EAAE,MAAM,CAAA;KAAE,CAAC,CAAC;IAC7E,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,gBAAgB,CAAC,EAAE,MAAM,CAAC;IAC1B,gBAAgB,CAAC,EAAE,MAAM,CAAC;CAC7B;AAED,MAAM,WAAW,iBAAiB;IAC9B,MAAM,CAAC,EAAE,OAAO,CAAC;IACjB,IAAI,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;CAC1B"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Server Events Listener
|
|
3
|
+
* Listens to Server-Sent Events (SSE) from the Archlast server
|
|
4
|
+
* Enables reactive deployments - when server changes happen, CLI responds
|
|
5
|
+
*/
|
|
6
|
+
export interface EventListenerOptions {
|
|
7
|
+
serverUrl: string;
|
|
8
|
+
adminToken?: string;
|
|
9
|
+
onSchemaUpdated?: () => Promise<void> | void;
|
|
10
|
+
onDeploymentComplete?: (type: "full" | "delta") => Promise<void> | void;
|
|
11
|
+
onDeploymentFailed?: () => Promise<void> | void;
|
|
12
|
+
onError?: (error: Error) => void;
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Listen to server deployment events via SSE
|
|
16
|
+
* Returns a cleanup function to stop listening
|
|
17
|
+
*/
|
|
18
|
+
export declare function listenToServerEvents(options: EventListenerOptions): () => void;
|
|
19
|
+
//# sourceMappingURL=events-listener.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"events-listener.d.ts","sourceRoot":"","sources":["../src/events-listener.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAYH,MAAM,WAAW,oBAAoB;IACjC,SAAS,EAAE,MAAM,CAAC;IAClB,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,eAAe,CAAC,EAAE,MAAM,OAAO,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC;IAC7C,oBAAoB,CAAC,EAAE,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,KAAK,OAAO,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC;IACxE,kBAAkB,CAAC,EAAE,MAAM,OAAO,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC;IAChD,OAAO,CAAC,EAAE,CAAC,KAAK,EAAE,KAAK,KAAK,IAAI,CAAC;CACpC;AAED;;;GAGG;AACH,wBAAgB,oBAAoB,CAAC,OAAO,EAAE,oBAAoB,GAAG,MAAM,IAAI,CA8G9E"}
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Server Events Listener
|
|
3
|
+
* Listens to Server-Sent Events (SSE) from the Archlast server
|
|
4
|
+
* Enables reactive deployments - when server changes happen, CLI responds
|
|
5
|
+
*/
|
|
6
|
+
import chalk from "chalk";
|
|
7
|
+
/**
|
|
8
|
+
* Listen to server deployment events via SSE
|
|
9
|
+
* Returns a cleanup function to stop listening
|
|
10
|
+
*/
|
|
11
|
+
export function listenToServerEvents(options) {
|
|
12
|
+
const { serverUrl, adminToken, onSchemaUpdated, onDeploymentComplete, onDeploymentFailed, onError } = options;
|
|
13
|
+
const eventsUrl = new URL("/_archlast/admin/deployments/events", serverUrl);
|
|
14
|
+
// Build request headers
|
|
15
|
+
const headers = {
|
|
16
|
+
"Accept": "text/event-stream",
|
|
17
|
+
};
|
|
18
|
+
if (adminToken) {
|
|
19
|
+
headers["X-Admin-Token"] = adminToken;
|
|
20
|
+
}
|
|
21
|
+
// Use EventSource for SSE (native browser API, need polyfill for Node)
|
|
22
|
+
// Since Node doesn't have native EventSource, we'll use fetch with streaming
|
|
23
|
+
let aborted = false;
|
|
24
|
+
let abortController = null;
|
|
25
|
+
async function connect() {
|
|
26
|
+
if (aborted)
|
|
27
|
+
return;
|
|
28
|
+
abortController = new AbortController();
|
|
29
|
+
try {
|
|
30
|
+
const response = await fetch(eventsUrl.toString(), {
|
|
31
|
+
headers,
|
|
32
|
+
signal: abortController.signal,
|
|
33
|
+
});
|
|
34
|
+
if (!response.ok) {
|
|
35
|
+
throw new Error(`SSE connection failed: ${response.status} ${response.statusText}`);
|
|
36
|
+
}
|
|
37
|
+
if (!response.body) {
|
|
38
|
+
throw new Error("Response body is null");
|
|
39
|
+
}
|
|
40
|
+
const reader = response.body.getReader();
|
|
41
|
+
const decoder = new TextDecoder();
|
|
42
|
+
let buffer = "";
|
|
43
|
+
while (!aborted) {
|
|
44
|
+
const { done, value } = await reader.read();
|
|
45
|
+
if (done) {
|
|
46
|
+
// Connection closed, try to reconnect
|
|
47
|
+
if (!aborted) {
|
|
48
|
+
setTimeout(connect, 3000);
|
|
49
|
+
}
|
|
50
|
+
break;
|
|
51
|
+
}
|
|
52
|
+
// Decode and process SSE data
|
|
53
|
+
buffer += decoder.decode(value, { stream: true });
|
|
54
|
+
const lines = buffer.split("\n");
|
|
55
|
+
buffer = lines.pop() || ""; // Keep incomplete line in buffer
|
|
56
|
+
for (const line of lines) {
|
|
57
|
+
if (line.startsWith("data: ")) {
|
|
58
|
+
const data = line.slice(6);
|
|
59
|
+
if (data.trim() === "" || data === ": heartbeat")
|
|
60
|
+
continue;
|
|
61
|
+
try {
|
|
62
|
+
const event = JSON.parse(data);
|
|
63
|
+
handleEvent(event);
|
|
64
|
+
}
|
|
65
|
+
catch (e) {
|
|
66
|
+
// Ignore invalid JSON
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
catch (error) {
|
|
73
|
+
if (!aborted && error.name !== "AbortError") {
|
|
74
|
+
onError?.(error);
|
|
75
|
+
// Reconnect after delay
|
|
76
|
+
setTimeout(connect, 5000);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
async function handleEvent(event) {
|
|
81
|
+
switch (event.type) {
|
|
82
|
+
case "schema_updated":
|
|
83
|
+
console.log(chalk.magenta("\n📊 Schema updated on server - regenerating types..."));
|
|
84
|
+
await onSchemaUpdated?.();
|
|
85
|
+
break;
|
|
86
|
+
case "deployment_complete":
|
|
87
|
+
const deploymentType = event.data?.deploymentType || "delta";
|
|
88
|
+
await onDeploymentComplete?.(deploymentType);
|
|
89
|
+
break;
|
|
90
|
+
case "deployment_failed":
|
|
91
|
+
console.log(chalk.red("\n❌ Deployment failed on server"));
|
|
92
|
+
await onDeploymentFailed?.();
|
|
93
|
+
break;
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
// Start connection
|
|
97
|
+
connect().catch((err) => {
|
|
98
|
+
onError?.(err);
|
|
99
|
+
});
|
|
100
|
+
// Return cleanup function
|
|
101
|
+
return () => {
|
|
102
|
+
aborted = true;
|
|
103
|
+
abortController?.abort();
|
|
104
|
+
};
|
|
105
|
+
}
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import { AnalysisResult } from "./analyzer";
|
|
2
|
+
export declare class TypeGenerator {
|
|
3
|
+
private archlastPath;
|
|
4
|
+
constructor(archlastPath: string);
|
|
5
|
+
generate(analysis: AnalysisResult): Promise<void>;
|
|
6
|
+
private analyzeTypesWithTsMorph;
|
|
7
|
+
private analyzeRpcTypesWithTsMorph;
|
|
8
|
+
private extractRpcTypeInfo;
|
|
9
|
+
private extractTypeInfo;
|
|
10
|
+
private parseZodSchema;
|
|
11
|
+
private inferReturnType;
|
|
12
|
+
private generateDataModelFromSchema;
|
|
13
|
+
/**
|
|
14
|
+
* Scan schema folder for all .ts files
|
|
15
|
+
*/
|
|
16
|
+
private scanSchemaFolder;
|
|
17
|
+
private generateApiTypes;
|
|
18
|
+
private generateRpcTypes;
|
|
19
|
+
/**
|
|
20
|
+
* Generate runtime tRPC router definition file
|
|
21
|
+
* This creates a real t.router() that imports and wraps the actual RPC procedures
|
|
22
|
+
*/
|
|
23
|
+
private generateTrpcRouter;
|
|
24
|
+
private generateServerTypes;
|
|
25
|
+
private generateIndexExport;
|
|
26
|
+
/**
|
|
27
|
+
* Generate CRUD handlers for all collections in the schema
|
|
28
|
+
* Creates _generated/crud/ directory with individual collection files
|
|
29
|
+
*/
|
|
30
|
+
generateCrudHandlers(generatedDir: string): Promise<void>;
|
|
31
|
+
/**
|
|
32
|
+
* Extract table names from the DataModel type string
|
|
33
|
+
*/
|
|
34
|
+
private extractTableNames;
|
|
35
|
+
/**
|
|
36
|
+
* Generate CRUD handler code for a single table (public for CLI command)
|
|
37
|
+
*/
|
|
38
|
+
generateCrudHandlerForTable(tableName: string): string;
|
|
39
|
+
/**
|
|
40
|
+
* Generate the index.ts file for CRUD handlers
|
|
41
|
+
*/
|
|
42
|
+
private generateCrudIndex;
|
|
43
|
+
}
|
|
44
|
+
//# sourceMappingURL=generator.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"generator.d.ts","sourceRoot":"","sources":["../src/generator.ts"],"names":[],"mappings":"AAKA,OAAO,EAAE,cAAc,EAAE,MAAM,YAAY,CAAC;AAqD5C,qBAAa,aAAa;IACtB,OAAO,CAAC,YAAY,CAAS;gBAEjB,YAAY,EAAE,MAAM;IAK1B,QAAQ,CAAC,QAAQ,EAAE,cAAc,GAAG,OAAO,CAAC,IAAI,CAAC;YAiDzC,uBAAuB;YAgDvB,0BAA0B;IA6CxC,OAAO,CAAC,kBAAkB;IA6H1B,OAAO,CAAC,eAAe;IA+GvB,OAAO,CAAC,cAAc;IAiDtB,OAAO,CAAC,eAAe;YA6RT,2BAA2B;IA4PzC;;OAEG;YACW,gBAAgB;YAyBhB,gBAAgB;YAuDhB,gBAAgB;IAgF9B;;;OAGG;YACW,kBAAkB;YA2GlB,mBAAmB;IA0iBjC,OAAO,CAAC,mBAAmB;IAS3B;;;OAGG;IACG,oBAAoB,CAAC,YAAY,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAoC/D;;OAEG;IACH,OAAO,CAAC,iBAAiB;IAuBzB;;OAEG;IACH,2BAA2B,CAAC,SAAS,EAAE,MAAM,GAAG,MAAM;IA2HtD;;OAEG;IACH,OAAO,CAAC,iBAAiB;CAkB5B"}
|