tova 0.12.0 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/analyzer/analyzer.js +37 -0
- package/src/cli/deploy.js +117 -5
- package/src/codegen/codegen.js +17 -51
- package/src/deploy/deploy.js +22 -20
- package/src/deploy/infer.js +13 -3
- package/src/deploy/provision.js +67 -50
- package/src/deploy/ssh-runner.js +330 -0
- package/src/parser/parser.js +7 -26
- package/src/version.js +1 -1
package/package.json
CHANGED
package/src/analyzer/analyzer.js
CHANGED
|
@@ -751,6 +751,11 @@ export class Analyzer {
|
|
|
751
751
|
// ─── Visitors ─────────────────────────────────────────────
|
|
752
752
|
|
|
753
753
|
visitProgram(node) {
|
|
754
|
+
// Hoist top-level function and type declarations so forward references
|
|
755
|
+
// within the same module (or merged directory group) resolve correctly —
|
|
756
|
+
// matches JavaScript hoisting semantics of the emitted code.
|
|
757
|
+
this._hoistTopLevel(node.body);
|
|
758
|
+
|
|
754
759
|
for (const stmt of node.body) {
|
|
755
760
|
if (this.tolerant) {
|
|
756
761
|
try { this.visitNode(stmt); } catch (e) { /* skip nodes that crash in tolerant mode */ }
|
|
@@ -760,6 +765,37 @@ export class Analyzer {
|
|
|
760
765
|
}
|
|
761
766
|
}
|
|
762
767
|
|
|
768
|
+
_hoistTopLevel(body) {
|
|
769
|
+
for (const stmt of body) {
|
|
770
|
+
const target = stmt && stmt.type === 'ExportDefault' ? stmt.declaration : stmt;
|
|
771
|
+
if (!target) continue;
|
|
772
|
+
if (target.type === 'FunctionDeclaration') {
|
|
773
|
+
if (this.currentScope.lookupLocal && this.currentScope.lookupLocal(target.name)) continue;
|
|
774
|
+
try {
|
|
775
|
+
const sym = new Symbol(target.name, 'function', target.returnType, false, target.loc);
|
|
776
|
+
sym._params = target.params.map(p => p.name);
|
|
777
|
+
sym._totalParamCount = target.params.length;
|
|
778
|
+
sym._requiredParamCount = target.params.filter(p => !p.defaultValue).length;
|
|
779
|
+
sym._paramTypes = target.params.map(p => p.typeAnnotation || null);
|
|
780
|
+
sym._typeParams = target.typeParams || [];
|
|
781
|
+
sym.isPublic = target.isPublic || false;
|
|
782
|
+
sym.isWasm = !!(target.decorators && target.decorators.some(d => d.name === 'wasm'));
|
|
783
|
+
sym._forward = true;
|
|
784
|
+
this.currentScope.define(target.name, sym);
|
|
785
|
+
} catch { /* duplicate — visitor will report */ }
|
|
786
|
+
} else if (target.type === 'TypeDeclaration' || target.type === 'TypeAlias') {
|
|
787
|
+
if (this.currentScope.lookupLocal && this.currentScope.lookupLocal(target.name)) continue;
|
|
788
|
+
try {
|
|
789
|
+
const sym = new Symbol(target.name, 'type', null, false, target.loc);
|
|
790
|
+
sym._typeParams = target.typeParams || [];
|
|
791
|
+
sym._forward = true;
|
|
792
|
+
sym.isPublic = target.isPublic || false;
|
|
793
|
+
this.currentScope.define(target.name, sym);
|
|
794
|
+
} catch { /* duplicate — visitor will report */ }
|
|
795
|
+
}
|
|
796
|
+
}
|
|
797
|
+
}
|
|
798
|
+
|
|
763
799
|
visitNode(node) {
|
|
764
800
|
if (!node) return;
|
|
765
801
|
|
|
@@ -3152,6 +3188,7 @@ export class Analyzer {
|
|
|
3152
3188
|
}
|
|
3153
3189
|
|
|
3154
3190
|
visitTypeAlias(node) {
|
|
3191
|
+
this._checkNamingConvention(node.name, 'type', node.loc);
|
|
3155
3192
|
try {
|
|
3156
3193
|
const typeSym = new Symbol(node.name, 'type', null, false, node.loc);
|
|
3157
3194
|
// Store type alias info for resolution
|
package/src/cli/deploy.js
CHANGED
|
@@ -1,7 +1,18 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { resolve } from 'path';
|
|
2
|
+
import { readFileSync, statSync } from 'fs';
|
|
3
|
+
import { createInterface } from 'readline';
|
|
4
|
+
import { color, findFiles } from './utils.js';
|
|
5
|
+
import { resolveConfig } from '../config/resolve.js';
|
|
6
|
+
import { Lexer } from '../lexer/lexer.js';
|
|
7
|
+
import { Parser } from '../parser/parser.js';
|
|
8
|
+
import { Program } from '../parser/ast.js';
|
|
9
|
+
import { CodeGenerator } from '../codegen/codegen.js';
|
|
2
10
|
|
|
3
11
|
export async function deployCommand(args) {
|
|
4
|
-
const { parseDeployArgs } = await import('../deploy/deploy.js');
|
|
12
|
+
const { parseDeployArgs, deploy } = await import('../deploy/deploy.js');
|
|
13
|
+
const { makeRunner, realExecutor, makeDryRunExecutor } = await import('../deploy/ssh-runner.js');
|
|
14
|
+
const { buildProject } = await import('./build.js');
|
|
15
|
+
|
|
5
16
|
const deployArgs = parseDeployArgs(args);
|
|
6
17
|
|
|
7
18
|
if (!deployArgs.envName && !deployArgs.list) {
|
|
@@ -9,7 +20,108 @@ export async function deployCommand(args) {
|
|
|
9
20
|
process.exit(1);
|
|
10
21
|
}
|
|
11
22
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
23
|
+
const projectDir = process.cwd();
|
|
24
|
+
const isDryRun = process.env.TOVA_DEPLOY_DRY_RUN === '1';
|
|
25
|
+
const executor = isDryRun ? makeDryRunExecutor() : realExecutor;
|
|
26
|
+
const runner = makeRunner(executor);
|
|
27
|
+
|
|
28
|
+
deployArgs.confirm = isDryRun ? null : promptConfirm;
|
|
29
|
+
|
|
30
|
+
if (isDryRun) {
|
|
31
|
+
console.log(color.cyan(' [dry-run] No SSH commands will be executed.'));
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
// --list with no env: skip parsing the project
|
|
35
|
+
if (deployArgs.list && !deployArgs.envName) {
|
|
36
|
+
if (!deployArgs.server) {
|
|
37
|
+
console.error(color.red('Error: --list requires --server <user@host> when no environment is specified'));
|
|
38
|
+
process.exit(1);
|
|
39
|
+
}
|
|
40
|
+
try {
|
|
41
|
+
await deploy({ type: 'Program', body: [] }, {}, deployArgs, projectDir, runner);
|
|
42
|
+
} catch (err) {
|
|
43
|
+
console.error(color.red(`Error: ${err.message}`));
|
|
44
|
+
process.exit(1);
|
|
45
|
+
}
|
|
46
|
+
return;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
const config = resolveConfig(projectDir);
|
|
50
|
+
const entry = resolve(config.project?.entry || '.');
|
|
51
|
+
const scanDir = existsDir(entry) ? entry : projectDir;
|
|
52
|
+
// Run the production build using the resolved scanDir so the deploy command
|
|
53
|
+
// honours the same fallback (entry → project root) as the deploy parser.
|
|
54
|
+
deployArgs.buildProject = (extraArgs) => buildProject([scanDir, ...extraArgs]);
|
|
55
|
+
const tovaFiles = findFiles(scanDir, '.tova');
|
|
56
|
+
|
|
57
|
+
if (tovaFiles.length === 0) {
|
|
58
|
+
console.error(color.red(`Error: no .tova files found in ${scanDir}`));
|
|
59
|
+
process.exit(1);
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
let ast, output;
|
|
63
|
+
try {
|
|
64
|
+
({ ast, output } = parseAndGenerate(tovaFiles));
|
|
65
|
+
} catch (err) {
|
|
66
|
+
console.error(color.red(`Error parsing project: ${err.message}`));
|
|
67
|
+
process.exit(1);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
if (!output.deploy || !output.deploy[deployArgs.envName]) {
|
|
71
|
+
const available = output.deploy ? Object.keys(output.deploy) : [];
|
|
72
|
+
const hint = available.length > 0
|
|
73
|
+
? ` Available environments: ${available.join(', ')}`
|
|
74
|
+
: ' No deploy blocks found in project.';
|
|
75
|
+
console.error(color.red(`Error: no deploy block named "${deployArgs.envName}".${hint}`));
|
|
76
|
+
process.exit(1);
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
try {
|
|
80
|
+
await deploy(ast, output, deployArgs, projectDir, runner);
|
|
81
|
+
} catch (err) {
|
|
82
|
+
console.error(color.red(`Error: ${err.message}`));
|
|
83
|
+
process.exit(1);
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
function existsDir(path) {
|
|
88
|
+
try {
|
|
89
|
+
return statSync(path).isDirectory();
|
|
90
|
+
} catch {
|
|
91
|
+
return false;
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
function parseAndGenerate(files) {
|
|
96
|
+
const mergedBody = [];
|
|
97
|
+
for (const file of files) {
|
|
98
|
+
const source = readFileSync(file, 'utf-8');
|
|
99
|
+
const lexer = new Lexer(source, file);
|
|
100
|
+
const tokens = lexer.tokenize();
|
|
101
|
+
const parser = new Parser(tokens, file);
|
|
102
|
+
const fileAst = parser.parse();
|
|
103
|
+
if (parser.errors && parser.errors.length > 0) {
|
|
104
|
+
throw new Error(`${file}: ${parser.errors[0].message || parser.errors[0]}`);
|
|
105
|
+
}
|
|
106
|
+
for (const node of fileAst.body) mergedBody.push(node);
|
|
107
|
+
}
|
|
108
|
+
const ast = new Program(mergedBody);
|
|
109
|
+
const gen = new CodeGenerator(ast);
|
|
110
|
+
const output = gen.generate();
|
|
111
|
+
return { ast, output };
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
function promptConfirm(message) {
|
|
115
|
+
// Without a TTY (e.g., CI, piped scripts), refuse the destructive action.
|
|
116
|
+
if (!process.stdin.isTTY) {
|
|
117
|
+
console.error(color.red(`${message} Refusing without a TTY — re-run interactively to confirm.`));
|
|
118
|
+
return Promise.resolve(false);
|
|
119
|
+
}
|
|
120
|
+
return new Promise(resolve => {
|
|
121
|
+
const rl = createInterface({ input: process.stdin, output: process.stdout });
|
|
122
|
+
rl.question(`${message} Type 'yes' to confirm: `, answer => {
|
|
123
|
+
rl.close();
|
|
124
|
+
resolve(answer.trim().toLowerCase() === 'yes');
|
|
125
|
+
});
|
|
126
|
+
});
|
|
15
127
|
}
|
package/src/codegen/codegen.js
CHANGED
|
@@ -5,57 +5,23 @@
|
|
|
5
5
|
import { SharedCodegen } from './shared-codegen.js';
|
|
6
6
|
import { BUILTIN_NAMES } from '../stdlib/inline.js';
|
|
7
7
|
import { BlockRegistry } from '../registry/register-all.js';
|
|
8
|
-
import {
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
function
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
function getSecurityCodegen() {
|
|
26
|
-
if (!_SecurityCodegen) _SecurityCodegen = _require('./security-codegen.js').SecurityCodegen;
|
|
27
|
-
return _SecurityCodegen;
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
let _CliCodegen = null;
|
|
31
|
-
function getCliCodegen() {
|
|
32
|
-
if (!_CliCodegen) _CliCodegen = _require('./cli-codegen.js').CliCodegen;
|
|
33
|
-
return _CliCodegen;
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
let _EdgeCodegen = null;
|
|
37
|
-
function getEdgeCodegen() {
|
|
38
|
-
if (!_EdgeCodegen) _EdgeCodegen = _require('./edge-codegen.js').EdgeCodegen;
|
|
39
|
-
return _EdgeCodegen;
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
let _DeployCodegen = null;
|
|
43
|
-
function getDeployCodegen() {
|
|
44
|
-
if (!_DeployCodegen) _DeployCodegen = _require('./deploy-codegen.js').DeployCodegen;
|
|
45
|
-
return _DeployCodegen;
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
let _ThemeCodegen = null;
|
|
49
|
-
function getThemeCodegen() {
|
|
50
|
-
if (!_ThemeCodegen) _ThemeCodegen = _require('./theme-codegen.js').ThemeCodegen;
|
|
51
|
-
return _ThemeCodegen;
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
let _AuthCodegen = null;
|
|
55
|
-
function getAuthCodegen() {
|
|
56
|
-
if (!_AuthCodegen) _AuthCodegen = _require('./auth-codegen.js').AuthCodegen;
|
|
57
|
-
return _AuthCodegen;
|
|
58
|
-
}
|
|
8
|
+
import { ServerCodegen } from './server-codegen.js';
|
|
9
|
+
import { BrowserCodegen } from './browser-codegen.js';
|
|
10
|
+
import { SecurityCodegen } from './security-codegen.js';
|
|
11
|
+
import { CliCodegen } from './cli-codegen.js';
|
|
12
|
+
import { EdgeCodegen } from './edge-codegen.js';
|
|
13
|
+
import { DeployCodegen } from './deploy-codegen.js';
|
|
14
|
+
import { ThemeCodegen } from './theme-codegen.js';
|
|
15
|
+
import { AuthCodegen } from './auth-codegen.js';
|
|
16
|
+
|
|
17
|
+
function getServerCodegen() { return ServerCodegen; }
|
|
18
|
+
function getBrowserCodegen() { return BrowserCodegen; }
|
|
19
|
+
function getSecurityCodegen() { return SecurityCodegen; }
|
|
20
|
+
function getCliCodegen() { return CliCodegen; }
|
|
21
|
+
function getEdgeCodegen() { return EdgeCodegen; }
|
|
22
|
+
function getDeployCodegen() { return DeployCodegen; }
|
|
23
|
+
function getThemeCodegen() { return ThemeCodegen; }
|
|
24
|
+
function getAuthCodegen() { return AuthCodegen; }
|
|
59
25
|
|
|
60
26
|
export class CodeGenerator {
|
|
61
27
|
constructor(ast, filename = '<stdin>', options = {}) {
|
package/src/deploy/deploy.js
CHANGED
|
@@ -134,9 +134,11 @@ export function printPlan(infra) {
|
|
|
134
134
|
* @param {string} projectDir - Absolute path to the project directory
|
|
135
135
|
* @returns {Object} result with plan, infra, and status
|
|
136
136
|
*/
|
|
137
|
-
export async function deploy(ast, buildResult, deployArgs, projectDir) {
|
|
138
|
-
// Infer full infrastructure manifest from AST
|
|
139
|
-
|
|
137
|
+
export async function deploy(ast, buildResult, deployArgs, projectDir, runner = null) {
|
|
138
|
+
// Infer full infrastructure manifest from AST. When an env name is given,
|
|
139
|
+
// filter to that deploy block so env vars and databases do not leak across
|
|
140
|
+
// environments in multi-environment programs.
|
|
141
|
+
const infra = inferInfrastructure(ast, deployArgs.envName);
|
|
140
142
|
|
|
141
143
|
// Override environment name from CLI args
|
|
142
144
|
if (deployArgs.envName) {
|
|
@@ -153,65 +155,65 @@ export async function deploy(ast, buildResult, deployArgs, projectDir) {
|
|
|
153
155
|
if (envConfig.branch) infra.branch = envConfig.branch;
|
|
154
156
|
}
|
|
155
157
|
|
|
156
|
-
// Plan mode — just show what would be deployed
|
|
158
|
+
// Plan mode — just show what would be deployed (no SSH execution)
|
|
157
159
|
if (deployArgs.plan) {
|
|
158
160
|
printPlan(infra);
|
|
159
161
|
return { action: 'plan', infra };
|
|
160
162
|
}
|
|
161
163
|
|
|
162
|
-
// Rollback
|
|
164
|
+
// Rollback
|
|
163
165
|
if (deployArgs.rollback) {
|
|
164
166
|
console.log(` Rolling back ${deployArgs.envName}...`);
|
|
165
|
-
|
|
167
|
+
if (runner) await runner.rollback(infra);
|
|
166
168
|
return { action: 'rollback', infra };
|
|
167
169
|
}
|
|
168
170
|
|
|
169
|
-
// Logs
|
|
171
|
+
// Logs
|
|
170
172
|
if (deployArgs.logs) {
|
|
171
173
|
const since = deployArgs.since || '1 hour ago';
|
|
172
174
|
const instance = deployArgs.instance !== null ? ` (instance ${deployArgs.instance})` : '';
|
|
173
175
|
console.log(` Fetching logs for ${deployArgs.envName}${instance} since ${since}...`);
|
|
174
|
-
|
|
176
|
+
if (runner) await runner.logs(infra, { since, instance: deployArgs.instance });
|
|
175
177
|
return { action: 'logs', infra };
|
|
176
178
|
}
|
|
177
179
|
|
|
178
|
-
// Status
|
|
180
|
+
// Status
|
|
179
181
|
if (deployArgs.status) {
|
|
180
182
|
console.log(` Checking status of ${deployArgs.envName}...`);
|
|
181
|
-
|
|
183
|
+
if (runner) await runner.status(infra);
|
|
182
184
|
return { action: 'status', infra };
|
|
183
185
|
}
|
|
184
186
|
|
|
185
|
-
// SSH
|
|
187
|
+
// Interactive SSH
|
|
186
188
|
if (deployArgs.ssh) {
|
|
187
189
|
console.log(` Opening SSH session to ${deployArgs.envName}...`);
|
|
188
|
-
|
|
190
|
+
if (runner) await runner.ssh(infra);
|
|
189
191
|
return { action: 'ssh', infra };
|
|
190
192
|
}
|
|
191
193
|
|
|
192
|
-
//
|
|
194
|
+
// Git push-to-deploy
|
|
193
195
|
if (deployArgs.setupGit) {
|
|
194
196
|
console.log(` Setting up git push-to-deploy for ${deployArgs.envName}...`);
|
|
195
|
-
|
|
197
|
+
if (runner) await runner.setupGit(infra);
|
|
196
198
|
return { action: 'setup-git', infra };
|
|
197
199
|
}
|
|
198
200
|
|
|
199
|
-
// Remove
|
|
201
|
+
// Remove
|
|
200
202
|
if (deployArgs.remove) {
|
|
201
203
|
console.log(` Removing deployment ${deployArgs.envName}...`);
|
|
202
|
-
|
|
204
|
+
if (runner) await runner.remove(infra, { confirm: deployArgs.confirm });
|
|
203
205
|
return { action: 'remove', infra };
|
|
204
206
|
}
|
|
205
207
|
|
|
206
|
-
// List
|
|
208
|
+
// List
|
|
207
209
|
if (deployArgs.list) {
|
|
208
210
|
console.log(' Listing deployments...');
|
|
209
|
-
|
|
211
|
+
if (runner) await runner.list(infra, { server: deployArgs.server });
|
|
210
212
|
return { action: 'list', infra };
|
|
211
213
|
}
|
|
212
214
|
|
|
213
|
-
// Default
|
|
215
|
+
// Default — full deploy
|
|
214
216
|
console.log(` Deploying to ${deployArgs.envName}...`);
|
|
215
|
-
|
|
217
|
+
if (runner) await runner.deploy(infra, { projectDir, buildProject: deployArgs.buildProject });
|
|
216
218
|
return { action: 'deploy', infra };
|
|
217
219
|
}
|
package/src/deploy/infer.js
CHANGED
|
@@ -75,9 +75,13 @@ function collectEnvCalls(node) {
|
|
|
75
75
|
* Infer infrastructure requirements from the full program AST.
|
|
76
76
|
*
|
|
77
77
|
* @param {Object} ast - Program AST with ast.body array of top-level blocks
|
|
78
|
+
* @param {string} [envName] - Optional environment name. When provided, only
|
|
79
|
+
* the matching deploy block contributes env, databases, and config fields,
|
|
80
|
+
* so multi-environment programs do not leak env vars or databases across
|
|
81
|
+
* environments. When omitted, all deploy blocks are merged (legacy behavior).
|
|
78
82
|
* @returns {Object} Complete infrastructure manifest
|
|
79
83
|
*/
|
|
80
|
-
export function inferInfrastructure(ast) {
|
|
84
|
+
export function inferInfrastructure(ast, envName) {
|
|
81
85
|
const manifest = JSON.parse(JSON.stringify(MANIFEST_DEFAULTS));
|
|
82
86
|
const blockTypes = new Set();
|
|
83
87
|
const deployBlocks = [];
|
|
@@ -171,9 +175,15 @@ export function inferInfrastructure(ast) {
|
|
|
171
175
|
}
|
|
172
176
|
}
|
|
173
177
|
|
|
178
|
+
// Filter to the named environment if provided, so multi-env programs do
|
|
179
|
+
// not leak env vars or databases across environments.
|
|
180
|
+
const blocksToMerge = envName
|
|
181
|
+
? deployBlocks.filter(b => b.name === envName)
|
|
182
|
+
: deployBlocks;
|
|
183
|
+
|
|
174
184
|
// Merge explicit deploy config via DeployCodegen
|
|
175
|
-
if (
|
|
176
|
-
const deployConfig = DeployCodegen.mergeDeployBlocks(
|
|
185
|
+
if (blocksToMerge.length > 0) {
|
|
186
|
+
const deployConfig = DeployCodegen.mergeDeployBlocks(blocksToMerge);
|
|
177
187
|
// Apply deploy config fields to manifest
|
|
178
188
|
if (deployConfig.name) manifest.name = deployConfig.name;
|
|
179
189
|
if (deployConfig.server) manifest.server = deployConfig.server;
|
package/src/deploy/provision.js
CHANGED
|
@@ -1,12 +1,18 @@
|
|
|
1
1
|
// Provisioning Script Generator for the Tova language
|
|
2
2
|
// Generates idempotent bash scripts from an infrastructure manifest.
|
|
3
|
+
//
|
|
4
|
+
// The script auto-detects whether it is running as root or as a non-root user
|
|
5
|
+
// with `sudo`, so it works on both styles of VPS access (root@host on a
|
|
6
|
+
// freshly-imaged DigitalOcean droplet, or ubuntu@host on EC2). All privileged
|
|
7
|
+
// commands route through the `$SUDO` shell variable.
|
|
3
8
|
|
|
4
9
|
/**
|
|
5
10
|
* Generate a complete provisioning shell script from an infrastructure manifest.
|
|
6
11
|
*
|
|
7
12
|
* The script is idempotent — it checks before installing (command -v, dpkg, etc.)
|
|
8
13
|
* and is organized in layers:
|
|
9
|
-
* Layer
|
|
14
|
+
* Layer 0: Privilege detection
|
|
15
|
+
* Layer 1: System (tova user, Bun, Caddy, UFW)
|
|
10
16
|
* Layer 2: Databases (PostgreSQL, Redis — conditional)
|
|
11
17
|
* Layer 3: App directories
|
|
12
18
|
* Layer 5: Caddy config
|
|
@@ -26,18 +32,42 @@ export function generateProvisionScript(manifest) {
|
|
|
26
32
|
lines.push(`# Generated by Tova deploy — idempotent, safe to re-run`);
|
|
27
33
|
lines.push('');
|
|
28
34
|
|
|
35
|
+
// ── Layer 0: Privilege detection ──────────────────────────
|
|
36
|
+
lines.push('# Detect privilege model: root → no prefix, otherwise route through sudo');
|
|
37
|
+
lines.push('if [ "$(id -u)" -eq 0 ]; then');
|
|
38
|
+
lines.push(' SUDO=""');
|
|
39
|
+
lines.push('elif command -v sudo >/dev/null 2>&1; then');
|
|
40
|
+
lines.push(' SUDO="sudo"');
|
|
41
|
+
lines.push('else');
|
|
42
|
+
lines.push(' echo "Error: this script requires either root or sudo" >&2');
|
|
43
|
+
lines.push(' exit 1');
|
|
44
|
+
lines.push('fi');
|
|
45
|
+
lines.push('');
|
|
46
|
+
|
|
29
47
|
// ── Layer 1: System ────────────────────────────────────────
|
|
30
48
|
lines.push('# ═══════════════════════════════════════════════════════════');
|
|
31
49
|
lines.push('# Layer 1: System dependencies');
|
|
32
50
|
lines.push('# ═══════════════════════════════════════════════════════════');
|
|
33
51
|
lines.push('');
|
|
34
52
|
|
|
53
|
+
// Create the tova user FIRST so Bun lands at /home/tova/.bun where the
|
|
54
|
+
// generated systemd unit expects it.
|
|
55
|
+
lines.push('# Create tova system user (created early so Bun installs into its $HOME)');
|
|
56
|
+
lines.push('if ! id "tova" &>/dev/null; then');
|
|
57
|
+
lines.push(' $SUDO useradd --system --create-home --shell /bin/bash tova');
|
|
58
|
+
lines.push('fi');
|
|
59
|
+
lines.push('');
|
|
60
|
+
|
|
35
61
|
if (manifest.requires && manifest.requires.bun) {
|
|
36
|
-
lines.push('# Install Bun runtime');
|
|
37
|
-
lines.push('if !
|
|
38
|
-
lines.push(' echo "Installing Bun..."');
|
|
39
|
-
|
|
40
|
-
lines.push('
|
|
62
|
+
lines.push('# Install Bun runtime as the tova user (required by curl|bash installer)');
|
|
63
|
+
lines.push('if [ ! -x /home/tova/.bun/bin/bun ]; then');
|
|
64
|
+
lines.push(' echo "Installing Bun for the tova user..."');
|
|
65
|
+
// unzip is required by the official Bun installer.
|
|
66
|
+
lines.push(' if ! command -v unzip >/dev/null 2>&1; then');
|
|
67
|
+
lines.push(' $SUDO apt-get update -qq');
|
|
68
|
+
lines.push(' $SUDO apt-get install -y -qq unzip ca-certificates curl');
|
|
69
|
+
lines.push(' fi');
|
|
70
|
+
lines.push(" $SUDO -u tova bash -c 'curl -fsSL https://bun.sh/install | bash'");
|
|
41
71
|
lines.push('fi');
|
|
42
72
|
lines.push('');
|
|
43
73
|
}
|
|
@@ -46,12 +76,12 @@ export function generateProvisionScript(manifest) {
|
|
|
46
76
|
lines.push('# Install Caddy web server');
|
|
47
77
|
lines.push('if ! command -v caddy &>/dev/null; then');
|
|
48
78
|
lines.push(' echo "Installing Caddy..."');
|
|
49
|
-
lines.push(' apt-get update -qq');
|
|
50
|
-
lines.push(' apt-get install -y -qq debian-keyring debian-archive-keyring apt-transport-https curl');
|
|
51
|
-
lines.push(' curl -1sLf "https://dl.cloudsmith.io/public/caddy/stable/gpg.key" | gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg');
|
|
52
|
-
lines.push(' curl -1sLf "https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt" | tee /etc/apt/sources.list.d/caddy-stable.list');
|
|
53
|
-
lines.push(' apt-get update -qq');
|
|
54
|
-
lines.push(' apt-get install -y -qq caddy');
|
|
79
|
+
lines.push(' $SUDO apt-get update -qq');
|
|
80
|
+
lines.push(' $SUDO apt-get install -y -qq debian-keyring debian-archive-keyring apt-transport-https curl');
|
|
81
|
+
lines.push(' curl -1sLf "https://dl.cloudsmith.io/public/caddy/stable/gpg.key" | $SUDO gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg');
|
|
82
|
+
lines.push(' curl -1sLf "https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt" | $SUDO tee /etc/apt/sources.list.d/caddy-stable.list >/dev/null');
|
|
83
|
+
lines.push(' $SUDO apt-get update -qq');
|
|
84
|
+
lines.push(' $SUDO apt-get install -y -qq caddy');
|
|
55
85
|
lines.push('fi');
|
|
56
86
|
lines.push('');
|
|
57
87
|
}
|
|
@@ -59,21 +89,14 @@ export function generateProvisionScript(manifest) {
|
|
|
59
89
|
if (manifest.requires && manifest.requires.ufw) {
|
|
60
90
|
lines.push('# Configure UFW firewall');
|
|
61
91
|
lines.push('if command -v ufw &>/dev/null; then');
|
|
62
|
-
lines.push(' ufw allow 22/tcp');
|
|
63
|
-
lines.push(' ufw allow 80/tcp');
|
|
64
|
-
lines.push(' ufw allow 443/tcp');
|
|
65
|
-
lines.push(' echo "y" | ufw enable || true');
|
|
92
|
+
lines.push(' $SUDO ufw allow 22/tcp');
|
|
93
|
+
lines.push(' $SUDO ufw allow 80/tcp');
|
|
94
|
+
lines.push(' $SUDO ufw allow 443/tcp');
|
|
95
|
+
lines.push(' echo "y" | $SUDO ufw enable || true');
|
|
66
96
|
lines.push('fi');
|
|
67
97
|
lines.push('');
|
|
68
98
|
}
|
|
69
99
|
|
|
70
|
-
// Create tova system user
|
|
71
|
-
lines.push('# Create tova system user');
|
|
72
|
-
lines.push('if ! id "tova" &>/dev/null; then');
|
|
73
|
-
lines.push(' useradd --system --create-home --shell /bin/bash tova');
|
|
74
|
-
lines.push('fi');
|
|
75
|
-
lines.push('');
|
|
76
|
-
|
|
77
100
|
// ── Layer 2: Databases ─────────────────────────────────────
|
|
78
101
|
const databases = manifest.databases || [];
|
|
79
102
|
const hasPostgres = databases.some(d => d.engine === 'postgres');
|
|
@@ -92,14 +115,14 @@ export function generateProvisionScript(manifest) {
|
|
|
92
115
|
lines.push('# Install PostgreSQL');
|
|
93
116
|
lines.push('if ! command -v psql &>/dev/null; then');
|
|
94
117
|
lines.push(' echo "Installing PostgreSQL..."');
|
|
95
|
-
lines.push(' apt-get update -qq');
|
|
96
|
-
lines.push(' apt-get install -y -qq postgresql postgresql-contrib');
|
|
97
|
-
lines.push(' systemctl enable postgresql');
|
|
98
|
-
lines.push(' systemctl start postgresql');
|
|
118
|
+
lines.push(' $SUDO apt-get update -qq');
|
|
119
|
+
lines.push(' $SUDO apt-get install -y -qq postgresql postgresql-contrib');
|
|
120
|
+
lines.push(' $SUDO systemctl enable postgresql');
|
|
121
|
+
lines.push(' $SUDO systemctl start postgresql');
|
|
99
122
|
lines.push('fi');
|
|
100
123
|
lines.push('');
|
|
101
124
|
lines.push(`# Create database: ${dbName}`);
|
|
102
|
-
lines.push(
|
|
125
|
+
lines.push(`$SUDO -u postgres psql -tc "SELECT 1 FROM pg_database WHERE datname = '${dbName}'" | grep -q 1 || $SUDO -u postgres createdb "${dbName}"`);
|
|
103
126
|
lines.push('');
|
|
104
127
|
}
|
|
105
128
|
|
|
@@ -107,10 +130,10 @@ export function generateProvisionScript(manifest) {
|
|
|
107
130
|
lines.push('# Install Redis');
|
|
108
131
|
lines.push('if ! command -v redis-server &>/dev/null; then');
|
|
109
132
|
lines.push(' echo "Installing Redis..."');
|
|
110
|
-
lines.push(' apt-get update -qq');
|
|
111
|
-
lines.push(' apt-get install -y -qq redis-server');
|
|
112
|
-
lines.push(' systemctl enable redis-server');
|
|
113
|
-
lines.push(' systemctl start redis-server');
|
|
133
|
+
lines.push(' $SUDO apt-get update -qq');
|
|
134
|
+
lines.push(' $SUDO apt-get install -y -qq redis-server');
|
|
135
|
+
lines.push(' $SUDO systemctl enable redis-server');
|
|
136
|
+
lines.push(' $SUDO systemctl start redis-server');
|
|
114
137
|
lines.push('fi');
|
|
115
138
|
lines.push('');
|
|
116
139
|
}
|
|
@@ -120,12 +143,12 @@ export function generateProvisionScript(manifest) {
|
|
|
120
143
|
lines.push('# Layer 3: App directories');
|
|
121
144
|
lines.push('# ═══════════════════════════════════════════════════════════');
|
|
122
145
|
lines.push('');
|
|
123
|
-
lines.push('mkdir -p /opt/tova/apps');
|
|
146
|
+
lines.push('$SUDO mkdir -p /opt/tova/apps');
|
|
124
147
|
lines.push(`APP_DIR="/opt/tova/apps/${appName}"`);
|
|
125
|
-
lines.push('mkdir -p "$APP_DIR/releases"');
|
|
126
|
-
lines.push('mkdir -p "$APP_DIR/shared/logs"');
|
|
127
|
-
lines.push('mkdir -p "$APP_DIR/shared/data"');
|
|
128
|
-
lines.push('chown -R tova:tova /opt/tova');
|
|
148
|
+
lines.push('$SUDO mkdir -p "$APP_DIR/releases"');
|
|
149
|
+
lines.push('$SUDO mkdir -p "$APP_DIR/shared/logs"');
|
|
150
|
+
lines.push('$SUDO mkdir -p "$APP_DIR/shared/data"');
|
|
151
|
+
lines.push('$SUDO chown -R tova:tova /opt/tova');
|
|
129
152
|
lines.push('');
|
|
130
153
|
|
|
131
154
|
// ── Layer 5: Caddy config ──────────────────────────────────
|
|
@@ -142,11 +165,12 @@ export function generateProvisionScript(manifest) {
|
|
|
142
165
|
health_timeout: manifest.health_timeout,
|
|
143
166
|
hasWebSocket: manifest.hasWebSocket,
|
|
144
167
|
});
|
|
145
|
-
|
|
168
|
+
// Use `tee` rather than shell redirection so the write itself is privileged.
|
|
169
|
+
lines.push(`$SUDO tee /etc/caddy/Caddyfile >/dev/null <<'CADDY_EOF'`);
|
|
146
170
|
lines.push(caddyConfig);
|
|
147
171
|
lines.push('CADDY_EOF');
|
|
148
172
|
lines.push('');
|
|
149
|
-
lines.push('systemctl reload caddy || systemctl restart caddy');
|
|
173
|
+
lines.push('$SUDO systemctl reload caddy || $SUDO systemctl restart caddy');
|
|
150
174
|
lines.push('');
|
|
151
175
|
}
|
|
152
176
|
|
|
@@ -160,17 +184,17 @@ export function generateProvisionScript(manifest) {
|
|
|
160
184
|
restart_on_failure: manifest.restart_on_failure !== false,
|
|
161
185
|
env: manifest.env || {},
|
|
162
186
|
});
|
|
163
|
-
lines.push(
|
|
187
|
+
lines.push(`$SUDO tee /etc/systemd/system/${appName}@.service >/dev/null <<'SYSTEMD_EOF'`);
|
|
164
188
|
lines.push(serviceUnit);
|
|
165
189
|
lines.push('SYSTEMD_EOF');
|
|
166
190
|
lines.push('');
|
|
167
|
-
lines.push('systemctl daemon-reload');
|
|
191
|
+
lines.push('$SUDO systemctl daemon-reload');
|
|
168
192
|
|
|
169
|
-
// Enable
|
|
193
|
+
// Enable instances
|
|
170
194
|
const instances = manifest.instances || 1;
|
|
171
195
|
for (let i = 0; i < instances; i++) {
|
|
172
196
|
const port = 3000 + i;
|
|
173
|
-
lines.push(
|
|
197
|
+
lines.push(`$SUDO systemctl enable ${appName}@${port}`);
|
|
174
198
|
}
|
|
175
199
|
lines.push('');
|
|
176
200
|
|
|
@@ -226,7 +250,6 @@ export function generateSystemdService(appName, config = {}) {
|
|
|
226
250
|
lines.push(`MemoryMax=${memLimit}`);
|
|
227
251
|
lines.push('');
|
|
228
252
|
|
|
229
|
-
// Environment — load secrets from .env.production, then set inline defaults
|
|
230
253
|
lines.push(`EnvironmentFile=-/opt/tova/apps/${appName}/.env.production`);
|
|
231
254
|
lines.push('Environment=NODE_ENV=production');
|
|
232
255
|
lines.push('Environment=PORT=%i');
|
|
@@ -237,7 +260,6 @@ export function generateSystemdService(appName, config = {}) {
|
|
|
237
260
|
}
|
|
238
261
|
lines.push('');
|
|
239
262
|
|
|
240
|
-
// Logging
|
|
241
263
|
lines.push('StandardOutput=journal');
|
|
242
264
|
lines.push('StandardError=journal');
|
|
243
265
|
lines.push(`SyslogIdentifier=${appName}-%i`);
|
|
@@ -266,11 +288,9 @@ export function generateCaddyConfig(appName, opts = {}) {
|
|
|
266
288
|
const lines = [];
|
|
267
289
|
lines.push(`${domain} {`);
|
|
268
290
|
|
|
269
|
-
// Upstream / reverse proxy
|
|
270
291
|
if (instances === 1) {
|
|
271
292
|
lines.push(' reverse_proxy localhost:3000 {');
|
|
272
293
|
} else {
|
|
273
|
-
// Multiple instances with round-robin load balancing
|
|
274
294
|
const upstreams = [];
|
|
275
295
|
for (let i = 0; i < instances; i++) {
|
|
276
296
|
upstreams.push(`localhost:${3000 + i}`);
|
|
@@ -279,14 +299,12 @@ export function generateCaddyConfig(appName, opts = {}) {
|
|
|
279
299
|
lines.push(' lb_policy round_robin');
|
|
280
300
|
}
|
|
281
301
|
|
|
282
|
-
// Health check
|
|
283
302
|
lines.push(` health_uri ${health}`);
|
|
284
303
|
lines.push(` health_interval ${healthInterval}s`);
|
|
285
304
|
lines.push(` health_timeout ${healthTimeout}s`);
|
|
286
305
|
|
|
287
306
|
lines.push(' }');
|
|
288
307
|
|
|
289
|
-
// WebSocket support
|
|
290
308
|
if (hasWebSocket) {
|
|
291
309
|
lines.push('');
|
|
292
310
|
lines.push(' @websocket {');
|
|
@@ -304,7 +322,6 @@ export function generateCaddyConfig(appName, opts = {}) {
|
|
|
304
322
|
}
|
|
305
323
|
}
|
|
306
324
|
|
|
307
|
-
// Logging
|
|
308
325
|
lines.push('');
|
|
309
326
|
lines.push(' log {');
|
|
310
327
|
lines.push(` output file /var/log/caddy/${appName}.log`);
|
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
// SSH/scp/rsync runner for the Tova deploy command.
|
|
2
|
+
//
|
|
3
|
+
// The runner is split from the orchestrator so tests can exercise the
|
|
4
|
+
// orchestrator with a no-op runner while the CLI plugs in a real one. A
|
|
5
|
+
// dry-run runner prints commands instead of executing them — useful for
|
|
6
|
+
// verifying without a reachable server.
|
|
7
|
+
//
|
|
8
|
+
// Before running any privileged action, the runner probes the SSH user once
|
|
9
|
+
// to decide whether commands need a `sudo ` prefix. The result is cached for
|
|
10
|
+
// the lifetime of the runner instance. Tests can pre-seed this via
|
|
11
|
+
// `makeRunner(executor, { env: { ... } })` to skip the probe.
|
|
12
|
+
|
|
13
|
+
import { spawn, spawnSync } from 'child_process';
|
|
14
|
+
import { writeFileSync, mkdtempSync, rmSync, existsSync } from 'fs';
|
|
15
|
+
import { join } from 'path';
|
|
16
|
+
import { tmpdir } from 'os';
|
|
17
|
+
import { generateProvisionScript } from './provision.js';
|
|
18
|
+
|
|
19
|
+
// ── Executors ───────────────────────────────────────────────
|
|
20
|
+
|
|
21
|
+
export const realExecutor = {
|
|
22
|
+
exec(cmd, args, opts = {}) {
|
|
23
|
+
const result = spawnSync(cmd, args, { encoding: 'utf-8', ...opts });
|
|
24
|
+
if (result.stdout) process.stdout.write(result.stdout);
|
|
25
|
+
if (result.stderr) process.stderr.write(result.stderr);
|
|
26
|
+
return result;
|
|
27
|
+
},
|
|
28
|
+
inherit(cmd, args) {
|
|
29
|
+
return new Promise((resolve, reject) => {
|
|
30
|
+
const child = spawn(cmd, args, { stdio: 'inherit' });
|
|
31
|
+
child.on('close', code => code === 0 ? resolve(0) : reject(new Error(`${cmd} exited with code ${code}`)));
|
|
32
|
+
child.on('error', reject);
|
|
33
|
+
});
|
|
34
|
+
},
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
export function makeDryRunExecutor() {
|
|
38
|
+
return {
|
|
39
|
+
log: [],
|
|
40
|
+
exec(cmd, args) {
|
|
41
|
+
const line = `${cmd} ${args.map(quote).join(' ')}`;
|
|
42
|
+
this.log.push(line);
|
|
43
|
+
console.log(' [dry-run] ' + line);
|
|
44
|
+
// Empty stdout signals to probeRemote that this is a dry-run, so it
|
|
45
|
+
// returns the typical "non-root + sudo" environment for the preview.
|
|
46
|
+
return { status: 0, stdout: '', stderr: '' };
|
|
47
|
+
},
|
|
48
|
+
inherit(cmd, args) {
|
|
49
|
+
const line = `${cmd} ${args.map(quote).join(' ')}`;
|
|
50
|
+
this.log.push(line);
|
|
51
|
+
console.log(' [dry-run] ' + line);
|
|
52
|
+
return Promise.resolve(0);
|
|
53
|
+
},
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
function quote(s) {
|
|
58
|
+
if (s === '' || /[\s'"$`\\!*?(){}|<>;&]/.test(s)) {
|
|
59
|
+
return `'${String(s).replace(/'/g, `'\\''`)}'`;
|
|
60
|
+
}
|
|
61
|
+
return s;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// ── SSH primitives ──────────────────────────────────────────
|
|
65
|
+
|
|
66
|
+
function ssh(executor, server, command) {
|
|
67
|
+
return executor.exec('ssh', ['-o', 'BatchMode=yes', server, command]);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
function scp(executor, localPath, server, remotePath) {
|
|
71
|
+
return executor.exec('scp', ['-o', 'BatchMode=yes', localPath, `${server}:${remotePath}`]);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
function rsyncDir(executor, localDir, server, remotePath, useSudo) {
|
|
75
|
+
const args = ['-az', '--delete', '-e', 'ssh -o BatchMode=yes'];
|
|
76
|
+
// Run rsync as root on the remote when the SSH user isn't already root,
|
|
77
|
+
// so we can write under /opt/tova/apps. Requires passwordless sudo for
|
|
78
|
+
// rsync on the server side.
|
|
79
|
+
if (useSudo) args.push('--rsync-path=sudo rsync');
|
|
80
|
+
args.push(`${localDir}/`, `${server}:${remotePath}/`);
|
|
81
|
+
return executor.exec('rsync', args);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
function timestamp() {
|
|
85
|
+
const d = new Date();
|
|
86
|
+
const pad = n => String(n).padStart(2, '0');
|
|
87
|
+
return `${d.getUTCFullYear()}${pad(d.getUTCMonth() + 1)}${pad(d.getUTCDate())}-${pad(d.getUTCHours())}${pad(d.getUTCMinutes())}${pad(d.getUTCSeconds())}`;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// ── Privilege probe ─────────────────────────────────────────
|
|
91
|
+
|
|
92
|
+
// Returns { isRoot, hasSudo, sudoPrefix }. Throws on connection failure or
|
|
93
|
+
// when the remote is unprivileged and lacks sudo.
|
|
94
|
+
async function probeRemote(executor, server) {
|
|
95
|
+
const probe = 'printf "uid:%s;sudo:%s\\n" "$(id -u)" "$(command -v sudo >/dev/null 2>&1 && echo yes || echo no)"';
|
|
96
|
+
const r = executor.exec('ssh', ['-o', 'BatchMode=yes', server, probe]);
|
|
97
|
+
const stdout = (r.stdout || '').trim();
|
|
98
|
+
|
|
99
|
+
// Empty output AND zero exit → dry-run executor: assume the typical case
|
|
100
|
+
// (non-root with sudo) so the preview shows realistic commands.
|
|
101
|
+
if (!stdout && r.status === 0) {
|
|
102
|
+
return { isRoot: false, hasSudo: true, sudoPrefix: 'sudo ' };
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (r.status !== 0) {
|
|
106
|
+
throw new Error(`Cannot reach ${server}: ${(r.stderr || '').trim() || `ssh exited ${r.status}`}`);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
const uidMatch = stdout.match(/uid:(\d+)/);
|
|
110
|
+
const sudoMatch = stdout.match(/sudo:(yes|no)/);
|
|
111
|
+
const uid = uidMatch ? parseInt(uidMatch[1], 10) : null;
|
|
112
|
+
const hasSudo = sudoMatch ? sudoMatch[1] === 'yes' : false;
|
|
113
|
+
|
|
114
|
+
if (uid !== 0 && !hasSudo) {
|
|
115
|
+
throw new Error(`${server}: SSH user is not root and sudo is not installed — cannot deploy`);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
return {
|
|
119
|
+
isRoot: uid === 0,
|
|
120
|
+
hasSudo,
|
|
121
|
+
sudoPrefix: uid === 0 ? '' : 'sudo ',
|
|
122
|
+
};
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
// ── Runner ──────────────────────────────────────────────────
|
|
126
|
+
|
|
127
|
+
export function makeRunner(executor = realExecutor, options = {}) {
|
|
128
|
+
// Cached privilege info for this runner instance — populated lazily.
|
|
129
|
+
let _env = options.env || null;
|
|
130
|
+
|
|
131
|
+
async function ensureEnv(infra) {
|
|
132
|
+
if (_env) return _env;
|
|
133
|
+
_env = await probeRemote(executor, infra.server);
|
|
134
|
+
return _env;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
return {
|
|
138
|
+
async rollback(infra) {
|
|
139
|
+
requireServer(infra);
|
|
140
|
+
const { sudoPrefix } = await ensureEnv(infra);
|
|
141
|
+
const name = infra.name;
|
|
142
|
+
const cmd = `set -e
|
|
143
|
+
cd /opt/tova/apps/${name}
|
|
144
|
+
PREV=$(ls -1t releases 2>/dev/null | sed -n '2p')
|
|
145
|
+
if [ -z "$PREV" ]; then
|
|
146
|
+
echo "No previous release available to roll back to" >&2
|
|
147
|
+
exit 1
|
|
148
|
+
fi
|
|
149
|
+
${sudoPrefix}ln -sfn "releases/$PREV" current
|
|
150
|
+
${sudoPrefix}systemctl restart '${name}@*.service'
|
|
151
|
+
echo "Rolled back to releases/$PREV"`;
|
|
152
|
+
const r = ssh(executor, infra.server, cmd);
|
|
153
|
+
if (r.status !== 0) throw new Error(`Rollback failed (exit ${r.status})`);
|
|
154
|
+
},
|
|
155
|
+
|
|
156
|
+
async status(infra) {
|
|
157
|
+
requireServer(infra);
|
|
158
|
+
const { sudoPrefix } = await ensureEnv(infra);
|
|
159
|
+
const r = ssh(executor, infra.server, `${sudoPrefix}systemctl status '${infra.name}@*.service' --no-pager`);
|
|
160
|
+
if (r.status !== 0 && r.status !== 3) {
|
|
161
|
+
throw new Error(`Status check failed (exit ${r.status})`);
|
|
162
|
+
}
|
|
163
|
+
},
|
|
164
|
+
|
|
165
|
+
async logs(infra, { since = '1 hour ago', instance = null } = {}) {
|
|
166
|
+
requireServer(infra);
|
|
167
|
+
const { sudoPrefix } = await ensureEnv(infra);
|
|
168
|
+
const unit = instance != null
|
|
169
|
+
? `${infra.name}@${3000 + Number(instance)}.service`
|
|
170
|
+
: `${infra.name}@*.service`;
|
|
171
|
+
// journalctl typically requires root or membership in the systemd-journal
|
|
172
|
+
// group to read other users' units, so prefix with sudo when needed.
|
|
173
|
+
return executor.inherit('ssh', [
|
|
174
|
+
'-o', 'BatchMode=yes',
|
|
175
|
+
infra.server,
|
|
176
|
+
`${sudoPrefix}journalctl -u '${unit}' --no-pager --since '${since}'`,
|
|
177
|
+
]);
|
|
178
|
+
},
|
|
179
|
+
|
|
180
|
+
async ssh(infra) {
|
|
181
|
+
requireServer(infra);
|
|
182
|
+
return executor.inherit('ssh', [infra.server]);
|
|
183
|
+
},
|
|
184
|
+
|
|
185
|
+
async setupGit(infra) {
|
|
186
|
+
requireServer(infra);
|
|
187
|
+
const { sudoPrefix } = await ensureEnv(infra);
|
|
188
|
+
const name = infra.name;
|
|
189
|
+
const cmd = `set -e
|
|
190
|
+
REPO=/opt/tova/apps/${name}/repo.git
|
|
191
|
+
${sudoPrefix}mkdir -p "$(dirname "$REPO")"
|
|
192
|
+
if [ ! -d "$REPO" ]; then
|
|
193
|
+
${sudoPrefix}git init --bare "$REPO"
|
|
194
|
+
fi
|
|
195
|
+
${sudoPrefix}tee "$REPO/hooks/post-receive" >/dev/null <<'HOOK'
|
|
196
|
+
#!/bin/bash
|
|
197
|
+
set -e
|
|
198
|
+
TARGET=/opt/tova/apps/${name}/source
|
|
199
|
+
mkdir -p "$TARGET"
|
|
200
|
+
GIT_WORK_TREE="$TARGET" git checkout -f
|
|
201
|
+
cd "$TARGET"
|
|
202
|
+
if [ -f package.json ] || [ -f tova.toml ]; then
|
|
203
|
+
/home/tova/.bun/bin/bun install --silent || true
|
|
204
|
+
/home/tova/.bun/bin/bun /home/tova/.bun/bin/tova build --production --quiet || true
|
|
205
|
+
fi
|
|
206
|
+
systemctl restart '${name}@*.service' || true
|
|
207
|
+
HOOK
|
|
208
|
+
${sudoPrefix}chmod +x "$REPO/hooks/post-receive"
|
|
209
|
+
${sudoPrefix}chown -R tova:tova "$REPO"
|
|
210
|
+
echo "Configured. Add the remote with:"
|
|
211
|
+
echo " git remote add ${name} ${infra.server}:$REPO"
|
|
212
|
+
echo "Then deploy with:"
|
|
213
|
+
echo " git push ${name} ${infra.branch || 'main'}"`;
|
|
214
|
+
const r = ssh(executor, infra.server, cmd);
|
|
215
|
+
if (r.status !== 0) throw new Error(`setup-git failed (exit ${r.status})`);
|
|
216
|
+
},
|
|
217
|
+
|
|
218
|
+
async remove(infra, { confirm = null } = {}) {
|
|
219
|
+
requireServer(infra);
|
|
220
|
+
if (confirm !== null) {
|
|
221
|
+
const ok = await confirm(`This will permanently delete /opt/tova/apps/${infra.name} on ${infra.server}.`);
|
|
222
|
+
if (!ok) {
|
|
223
|
+
console.log(' Aborted.');
|
|
224
|
+
return;
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
const { sudoPrefix } = await ensureEnv(infra);
|
|
228
|
+
const name = infra.name;
|
|
229
|
+
const cmd = `set -e
|
|
230
|
+
${sudoPrefix}systemctl stop '${name}@*.service' 2>/dev/null || true
|
|
231
|
+
${sudoPrefix}systemctl disable '${name}@*.service' 2>/dev/null || true
|
|
232
|
+
${sudoPrefix}rm -f /etc/systemd/system/${name}@.service
|
|
233
|
+
${sudoPrefix}systemctl daemon-reload
|
|
234
|
+
${sudoPrefix}rm -rf /opt/tova/apps/${name}
|
|
235
|
+
echo "Removed deployment ${name}"`;
|
|
236
|
+
const r = ssh(executor, infra.server, cmd);
|
|
237
|
+
if (r.status !== 0) throw new Error(`Remove failed (exit ${r.status})`);
|
|
238
|
+
},
|
|
239
|
+
|
|
240
|
+
async list(infra, { server = null } = {}) {
|
|
241
|
+
const target = server || infra.server;
|
|
242
|
+
if (!target) throw new Error('--list requires --server <user@host> when no deploy block is loaded');
|
|
243
|
+
// /opt/tova/apps is created world-readable by the provisioner, so no
|
|
244
|
+
// sudo is needed for `ls`.
|
|
245
|
+
const r = ssh(executor, target, 'if [ -d /opt/tova/apps ]; then ls -1 /opt/tova/apps; else echo "(none)"; fi');
|
|
246
|
+
if (r.status !== 0) throw new Error(`List failed (exit ${r.status})`);
|
|
247
|
+
},
|
|
248
|
+
|
|
249
|
+
async deploy(infra, { projectDir, buildOutDir = null, buildProject = null } = {}) {
|
|
250
|
+
requireServer(infra);
|
|
251
|
+
const { sudoPrefix, isRoot } = await ensureEnv(infra);
|
|
252
|
+
const name = infra.name;
|
|
253
|
+
|
|
254
|
+
// Step 1 — produce a build to ship
|
|
255
|
+
const outDir = buildOutDir || join(projectDir, '.tova-out');
|
|
256
|
+
if (buildProject) {
|
|
257
|
+
console.log(' Building project...');
|
|
258
|
+
const origCwd = process.cwd();
|
|
259
|
+
process.chdir(projectDir);
|
|
260
|
+
try {
|
|
261
|
+
await buildProject(['--production', '--quiet']);
|
|
262
|
+
} finally {
|
|
263
|
+
process.chdir(origCwd);
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
if (!existsSync(outDir)) {
|
|
267
|
+
throw new Error(`Build output directory not found: ${outDir}`);
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
// Step 2 — write provisioning script to a temp dir
|
|
271
|
+
const tmp = mkdtempSync(join(tmpdir(), 'tova-deploy-'));
|
|
272
|
+
const scriptPath = join(tmp, 'provision.sh');
|
|
273
|
+
writeFileSync(scriptPath, generateProvisionScript(infra));
|
|
274
|
+
|
|
275
|
+
try {
|
|
276
|
+
// Step 3 — provision (idempotent)
|
|
277
|
+
console.log(` Provisioning ${infra.server}...`);
|
|
278
|
+
const upScript = scp(executor, scriptPath, infra.server, '/tmp/tova-provision.sh');
|
|
279
|
+
if (upScript.status !== 0) throw new Error('Failed to upload provision script');
|
|
280
|
+
// bash explicitly so the script runs on shells without /bin/bash as login shell
|
|
281
|
+
const provR = ssh(executor, infra.server, 'bash /tmp/tova-provision.sh');
|
|
282
|
+
if (provR.status !== 0) throw new Error(`Provisioning failed (exit ${provR.status})`);
|
|
283
|
+
|
|
284
|
+
// Step 4 — upload the new release
|
|
285
|
+
const ts = timestamp();
|
|
286
|
+
const releasePath = `/opt/tova/apps/${name}/releases/${ts}`;
|
|
287
|
+
console.log(` Uploading release ${ts}...`);
|
|
288
|
+
const mk = ssh(
|
|
289
|
+
executor,
|
|
290
|
+
infra.server,
|
|
291
|
+
`${sudoPrefix}mkdir -p '${releasePath}'`,
|
|
292
|
+
);
|
|
293
|
+
if (mk.status !== 0) throw new Error('Failed to create release directory');
|
|
294
|
+
const sync = rsyncDir(executor, outDir, infra.server, releasePath, !isRoot);
|
|
295
|
+
if (sync.status !== 0) throw new Error('rsync failed');
|
|
296
|
+
|
|
297
|
+
// Step 5 — re-chown after rsync. rsync writes files as the SSH user;
|
|
298
|
+
// the systemd unit runs as `User=tova` and needs write access to the
|
|
299
|
+
// working directory for things like SQLite databases or log rotation.
|
|
300
|
+
const chown = ssh(
|
|
301
|
+
executor,
|
|
302
|
+
infra.server,
|
|
303
|
+
`${sudoPrefix}chown -R tova:tova /opt/tova/apps/${name}`,
|
|
304
|
+
);
|
|
305
|
+
if (chown.status !== 0) throw new Error('Failed to chown release directory');
|
|
306
|
+
|
|
307
|
+
// Step 6 — flip the symlink, restart, prune old releases
|
|
308
|
+
const keep = infra.keep_releases || 5;
|
|
309
|
+
const flip = `set -e
|
|
310
|
+
${sudoPrefix}ln -sfn '${releasePath}' /opt/tova/apps/${name}/current
|
|
311
|
+
${sudoPrefix}systemctl restart '${name}@*.service'
|
|
312
|
+
cd /opt/tova/apps/${name}/releases
|
|
313
|
+
ls -1t | tail -n +$((${keep} + 1)) | xargs -r ${sudoPrefix}rm -rf
|
|
314
|
+
echo "Activated release ${ts}"`;
|
|
315
|
+
const flipR = ssh(executor, infra.server, flip);
|
|
316
|
+
if (flipR.status !== 0) throw new Error('Activation failed');
|
|
317
|
+
|
|
318
|
+
console.log(` Deployed ${name} (release ${ts})`);
|
|
319
|
+
} finally {
|
|
320
|
+
try { rmSync(tmp, { recursive: true, force: true }); } catch {}
|
|
321
|
+
}
|
|
322
|
+
},
|
|
323
|
+
};
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
function requireServer(infra) {
|
|
327
|
+
if (!infra || !infra.server) {
|
|
328
|
+
throw new Error('Deploy block has no `server` field');
|
|
329
|
+
}
|
|
330
|
+
}
|
package/src/parser/parser.js
CHANGED
|
@@ -560,7 +560,13 @@ export class Parser {
|
|
|
560
560
|
if (this.check(TokenType.TYPE)) return this.parseTypeDeclaration();
|
|
561
561
|
if (this.check(TokenType.MUT)) this.error("'mut' is not supported in Tova. Use 'var' for mutable variables");
|
|
562
562
|
if (this.check(TokenType.VAR)) return this.parseVarDeclaration();
|
|
563
|
-
if (this.check(TokenType.LET))
|
|
563
|
+
if (this.check(TokenType.LET)) {
|
|
564
|
+
const next = this.peek(1);
|
|
565
|
+
if (next && (next.type === TokenType.LBRACE || next.type === TokenType.LBRACKET || next.type === TokenType.LPAREN)) {
|
|
566
|
+
this.error("'let' is not needed in Tova. Destructure directly: '{a, b} = obj', '[a, b] = list', or '(a, b) = pair'");
|
|
567
|
+
}
|
|
568
|
+
this.error("'let' is not needed in Tova. Use 'name = value' for binding or 'var name = value' for mutable");
|
|
569
|
+
}
|
|
564
570
|
if (this.check(TokenType.IF)) return this.parseIfStatement();
|
|
565
571
|
if (this.check(TokenType.FOR)) return this.parseForStatement();
|
|
566
572
|
if (this.check(TokenType.WHILE)) return this.parseWhileStatement();
|
|
@@ -1303,31 +1309,6 @@ export class Parser {
|
|
|
1303
1309
|
return new AST.VarDeclaration(targets, values, l);
|
|
1304
1310
|
}
|
|
1305
1311
|
|
|
1306
|
-
parseLetDestructure() {
|
|
1307
|
-
const l = this.loc();
|
|
1308
|
-
this.expect(TokenType.LET);
|
|
1309
|
-
|
|
1310
|
-
let pattern;
|
|
1311
|
-
if (this.check(TokenType.LBRACE)) {
|
|
1312
|
-
pattern = this.parseObjectPattern();
|
|
1313
|
-
} else if (this.check(TokenType.LBRACKET)) {
|
|
1314
|
-
pattern = this.parseArrayPattern();
|
|
1315
|
-
} else if (this.check(TokenType.LPAREN)) {
|
|
1316
|
-
// Tuple destructuring: let (a, b) = expr
|
|
1317
|
-
pattern = this.parseTuplePattern();
|
|
1318
|
-
} else if (this.check(TokenType.IDENTIFIER)) {
|
|
1319
|
-
const name = this.current().value;
|
|
1320
|
-
this.error(`Use '${name} = value' for binding or 'var ${name} = value' for mutable. 'let' is only for destructuring: let {a, b} = obj`);
|
|
1321
|
-
} else {
|
|
1322
|
-
this.error("Expected '{', '[', or '(' after 'let' for destructuring");
|
|
1323
|
-
}
|
|
1324
|
-
|
|
1325
|
-
this.expect(TokenType.ASSIGN, "Expected '=' in destructuring");
|
|
1326
|
-
const value = this.parseExpression();
|
|
1327
|
-
|
|
1328
|
-
return new AST.LetDestructure(pattern, value, l);
|
|
1329
|
-
}
|
|
1330
|
-
|
|
1331
1312
|
parseObjectPattern() {
|
|
1332
1313
|
const l = this.loc();
|
|
1333
1314
|
this.expect(TokenType.LBRACE);
|
package/src/version.js
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
// Auto-generated by scripts/embed-runtime.js — do not edit
|
|
2
|
-
export const VERSION = "0.
|
|
2
|
+
export const VERSION = "0.13.0";
|