kadi-deploy 0.19.0 → 0.19.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +331 -15
- package/agent.json +2 -2
- package/package.json +1 -1
- package/src/commands/deploy-local.ts +2 -1
- package/src/commands/down.ts +49 -2
- package/src/commands/list.ts +241 -0
- package/src/index.ts +50 -0
- package/src/infrastructure/registry.ts +7 -4
- package/src/types.ts +8 -0
- package/src/utils/engine.ts +88 -0
package/README.md
CHANGED
|
@@ -21,8 +21,13 @@ kadi install kadi-deploy
|
|
|
21
21
|
| `kadi deploy` | Deploy using first available profile |
|
|
22
22
|
| `kadi deploy --profile production` | Deploy using a specific profile |
|
|
23
23
|
| `kadi deploy --autonomous` | Fully autonomous deployment (no human interaction) |
|
|
24
|
-
| `kadi deploy --dry-run` | Preview deployment without executing
|
|
24
|
+
| `kadi deploy --dry-run` | Preview deployment without executing |
|
|
25
|
+
| `kadi deploy list` | List all active deployments |
|
|
26
|
+
| `kadi deploy list --json` | List deployments as JSON |
|
|
27
|
+
| `kadi deploy down` | Tear down an active deployment |
|
|
25
28
|
| `kadi deploy down --profile <name>` | Tear down a specific profile's deployment |
|
|
29
|
+
| `kadi deploy down --instance <id>` | Tear down a specific instance by ID |
|
|
30
|
+
| `kadi deploy down --label <label>` | Tear down a deployment by its label |
|
|
26
31
|
| `kadi deploy down --autonomous` | Tear down Akash deployment without human interaction |
|
|
27
32
|
| `kadi deploy down --yes` | Tear down without confirmation prompt |
|
|
28
33
|
---
|
|
@@ -60,6 +65,103 @@ That's it. For Akash Network deployment, see [Deploying to Akash](#deploying-to-
|
|
|
60
65
|
|
|
61
66
|
---
|
|
62
67
|
|
|
68
|
+
## Configuration & Secrets
|
|
69
|
+
|
|
70
|
+
`kadi deploy` uses three configuration sources — **agent.json** for deploy profiles, **config.yml** for infrastructure settings, and **encrypted vaults** for secrets. No `.env` files needed.
|
|
71
|
+
|
|
72
|
+
### Where Things Live
|
|
73
|
+
|
|
74
|
+
| What | Where | Purpose |
|
|
75
|
+
|------|-------|---------|
|
|
76
|
+
| Deploy profiles | `agent.json` (project root) | Service definitions, target, network, secrets delivery |
|
|
77
|
+
| Infrastructure config | `config.yml` (project or ancestor) | Tunnel server, registry port, container engine |
|
|
78
|
+
| Global infrastructure config | `~/.kadi/config.yml` | Machine-wide defaults (shared across all projects) |
|
|
79
|
+
| Project secrets | `secrets.toml` (project or ancestor) | Agent-specific API keys, DB passwords — encrypted |
|
|
80
|
+
| Infrastructure secrets | `~/.kadi/secrets/config.toml` | Tunnel tokens, Akash wallet — encrypted, machine-scoped |
|
|
81
|
+
|
|
82
|
+
Configuration resolves with walk-up discovery — `kadi deploy` searches from your CWD upward through parent directories until it finds `config.yml` or `secrets.toml`. Global `~/.kadi/` is the final fallback.
|
|
83
|
+
|
|
84
|
+
### Setting Up Secrets
|
|
85
|
+
|
|
86
|
+
**Tunnel token** (required for deploying local images to Akash):
|
|
87
|
+
|
|
88
|
+
```bash
|
|
89
|
+
# Create the global tunnel vault (one-time)
|
|
90
|
+
kadi secret create tunnel -g
|
|
91
|
+
|
|
92
|
+
# Store the token
|
|
93
|
+
kadi secret set KADI_TUNNEL_TOKEN "your-token" -v tunnel
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
The tunnel vault lives at `~/.kadi/secrets/config.toml` (user-level) so it works from any project directory.
|
|
97
|
+
|
|
98
|
+
**Akash wallet** (required for autonomous deployment):
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
# Store in the global vault (already exists by default)
|
|
102
|
+
kadi secret set AKASH_WALLET "your twelve or twenty four word mnemonic" -v global
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
**Deployment secrets** (shared with deployed containers):
|
|
106
|
+
|
|
107
|
+
```bash
|
|
108
|
+
# Create a project-level vault for your agent
|
|
109
|
+
kadi secret create my-agent
|
|
110
|
+
|
|
111
|
+
# Store secrets the deployed container will receive
|
|
112
|
+
kadi secret set API_KEY "sk-..." -v my-agent
|
|
113
|
+
kadi secret set DB_URL "postgres://..." -v my-agent
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
Then reference it in your deploy profile's `secrets` block (see [Sharing Secrets with Deployments](#sharing-secrets-with-deployments)).
|
|
117
|
+
|
|
118
|
+
### Setting Up config.yml
|
|
119
|
+
|
|
120
|
+
Create a `config.yml` in your project root (or any ancestor directory):
|
|
121
|
+
|
|
122
|
+
```yaml
|
|
123
|
+
tunnel:
|
|
124
|
+
server_addr: broker.kadi.build
|
|
125
|
+
tunnel_domain: tunnel.kadi.build
|
|
126
|
+
server_port: 7000
|
|
127
|
+
ssh_port: 2200
|
|
128
|
+
mode: frpc
|
|
129
|
+
transport: wss
|
|
130
|
+
wss_control_host: tunnel-control.kadi.build
|
|
131
|
+
|
|
132
|
+
deploy:
|
|
133
|
+
registry_port: 3000
|
|
134
|
+
container_engine: docker # docker | podman
|
|
135
|
+
auto_shutdown: true
|
|
136
|
+
registry_duration: 600000 # 10 minutes
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
All values have sensible defaults — you only need `config.yml` if you want to override something. See `config.sample.yml` in [tunnel-services](../abilities/tunnel-services/config.sample.yml) and [deploy-ability](../abilities/deploy-ability/config.sample.yml) for full reference.
|
|
140
|
+
|
|
141
|
+
### Resolution Priority
|
|
142
|
+
|
|
143
|
+
Each setting resolves independently from highest to lowest priority:
|
|
144
|
+
|
|
145
|
+
1. **CLI flags** — `--engine podman`, `--network testnet`
|
|
146
|
+
2. **Environment variables** — `KADI_TUNNEL_SERVER`, `KADI_TUNNEL_TOKEN`
|
|
147
|
+
3. **Encrypted vault** — `secrets.toml` via `kadi secret`
|
|
148
|
+
4. **Project config.yml** — walk-up from CWD
|
|
149
|
+
5. **Global `~/.kadi/config.yml`** — machine-wide defaults
|
|
150
|
+
6. **Built-in defaults** — e.g. `docker`, `broker.kadi.build`
|
|
151
|
+
|
|
152
|
+
### Project-Level vs Global (User-Level)
|
|
153
|
+
|
|
154
|
+
| Setting | Scope | Location |
|
|
155
|
+
|---------|-------|----------|
|
|
156
|
+
| `KADI_TUNNEL_TOKEN` | **Global** — same token for all projects | `~/.kadi/secrets/config.toml` → `tunnel` vault |
|
|
157
|
+
| `AKASH_WALLET` | **Global** — your wallet, not project-specific | `~/.kadi/secrets/config.toml` → `global` vault |
|
|
158
|
+
| `tunnel:` config | **Global or Project** — usually same infrastructure | `~/.kadi/config.yml` or project `config.yml` |
|
|
159
|
+
| `deploy:` config | **Project** — may differ per project | Project `config.yml` |
|
|
160
|
+
| Agent secrets (API keys, DB creds) | **Project** — specific to one agent | Project `secrets.toml` → custom vault |
|
|
161
|
+
| Deploy profiles | **Project** — specific to one agent | `agent.json` |
|
|
162
|
+
|
|
163
|
+
---
|
|
164
|
+
|
|
63
165
|
## Deploying Locally
|
|
64
166
|
|
|
65
167
|
Local deployment uses Docker or Podman to run your services via docker-compose.
|
|
@@ -94,6 +196,19 @@ kadi deploy --profile local
|
|
|
94
196
|
| `network` | Docker network name (optional) |
|
|
95
197
|
| `services` | Service definitions (see below) |
|
|
96
198
|
|
|
199
|
+
### Container Engine Resolution
|
|
200
|
+
|
|
201
|
+
The container engine is resolved using the following priority (highest wins):
|
|
202
|
+
|
|
203
|
+
1. **CLI `--engine` flag** — explicit per-command override
|
|
204
|
+
2. **Profile `engine`** — set in the deploy profile in `agent.json`
|
|
205
|
+
3. **Global config `preferences.containerEngine`** — user default set via `kadi config set preferences.containerEngine podman`
|
|
206
|
+
4. **Default** — `docker`
|
|
207
|
+
|
|
208
|
+
This matches the resolution pattern used by `kadi broker`.
|
|
209
|
+
|
|
210
|
+
When using Podman, compose commands automatically prefer `podman-compose` (the standalone Python tool) over `podman compose`, which avoids issues on systems where `podman compose` delegates to a broken Docker Desktop shim.
|
|
211
|
+
|
|
97
212
|
---
|
|
98
213
|
|
|
99
214
|
## Deploying to Akash
|
|
@@ -228,28 +343,92 @@ The entire flow completes without any human interaction.
|
|
|
228
343
|
|
|
229
344
|
---
|
|
230
345
|
|
|
346
|
+
## Listing Active Deployments
|
|
347
|
+
|
|
348
|
+
The `kadi deploy list` command (alias `kadi deploy ls`) shows all active deployments from the `.kadi-deploy.lock` file without needing to open it manually.
|
|
349
|
+
|
|
350
|
+
```bash
|
|
351
|
+
$ kadi deploy list
|
|
352
|
+
|
|
353
|
+
Active Deployments (2)
|
|
354
|
+
──────────────────────────────────────────────────────────────────────
|
|
355
|
+
|
|
356
|
+
INSTANCE PROFILE TARGET LABEL DETAILS DEPLOYED
|
|
357
|
+
02f5 production akash broker-east dseq=19234567 Mar 10, 2026, 02:15 PM
|
|
358
|
+
a3f7 dev local — engine=docker Mar 11, 2026, 09:30 AM
|
|
359
|
+
|
|
360
|
+
Use `kadi deploy down --instance <id>` or `kadi deploy down --label <label>` to tear down a deployment.
|
|
361
|
+
```
|
|
362
|
+
|
|
363
|
+
### List Flags
|
|
364
|
+
|
|
365
|
+
| Flag | Description | Default |
|
|
366
|
+
|------|-------------|---------|
|
|
367
|
+
| `-p, --project <path>` | Path to project with `.kadi-deploy.lock` | Current directory |
|
|
368
|
+
| `--profile <profile>` | Filter by profile name | Show all |
|
|
369
|
+
| `--json` | Output as JSON (for scripting / automation) | `false` |
|
|
370
|
+
| `--verbose` | Show additional details (provider address, services, network) | `false` |
|
|
371
|
+
|
|
372
|
+
### JSON Output
|
|
373
|
+
|
|
374
|
+
Use `--json` for machine-readable output, useful in scripts and CI pipelines:
|
|
375
|
+
|
|
376
|
+
```bash
|
|
377
|
+
$ kadi deploy list --json
|
|
378
|
+
[
|
|
379
|
+
{
|
|
380
|
+
"instanceId": "02f5",
|
|
381
|
+
"profile": "production",
|
|
382
|
+
"target": "akash",
|
|
383
|
+
"label": "broker-east",
|
|
384
|
+
"deployedAt": "2026-03-10T14:15:00.000Z",
|
|
385
|
+
"dseq": 19234567,
|
|
386
|
+
"owner": "akash1abc...",
|
|
387
|
+
"provider": "akash1xyz...",
|
|
388
|
+
"providerUri": "https://provider.example.com",
|
|
389
|
+
"network": "mainnet"
|
|
390
|
+
}
|
|
391
|
+
]
|
|
392
|
+
```
|
|
393
|
+
|
|
394
|
+
---
|
|
395
|
+
|
|
231
396
|
## Tearing Down Deployments
|
|
232
397
|
|
|
233
398
|
The `kadi deploy down` command tears down an active deployment launched by `kadi deploy`. It works for both local (Docker/Podman) and Akash deployments.
|
|
234
399
|
|
|
235
400
|
After a successful deployment, kadi-deploy writes a `.kadi-deploy.lock` file to the project root that records everything needed to tear it down. The lock file supports **multiple simultaneous deployments** — you can have a local dev deployment and an Akash production deployment active at the same time.
|
|
236
401
|
|
|
237
|
-
### Multi-
|
|
402
|
+
### Multi-Instance Support
|
|
403
|
+
|
|
404
|
+
Each deployment gets a unique 4-character instance ID (e.g. `a3f7`) and an optional human-readable label. Use `kadi deploy list` to see all active deployments.
|
|
238
405
|
|
|
239
|
-
When multiple deployments are active
|
|
406
|
+
When multiple deployments are active, you can identify which one to tear down using `--profile`, `--instance`, or `--label`:
|
|
240
407
|
|
|
241
408
|
```bash
|
|
242
409
|
# Specify which profile to tear down
|
|
243
410
|
kadi deploy down --profile dev
|
|
244
411
|
kadi deploy down --profile production
|
|
245
412
|
|
|
413
|
+
# Tear down by instance ID (4-char hex shown in deploy output and `deploy list`)
|
|
414
|
+
kadi deploy down --instance a3f7
|
|
415
|
+
|
|
416
|
+
# Tear down by label (set during deploy with --label)
|
|
417
|
+
kadi deploy down --label my-broker
|
|
418
|
+
|
|
246
419
|
# If only one deployment is active, it's auto-selected
|
|
247
420
|
kadi deploy down
|
|
248
421
|
|
|
249
|
-
# If multiple are active and no
|
|
422
|
+
# If multiple are active and no selector given, you'll be prompted to choose
|
|
250
423
|
kadi deploy down --yes
|
|
251
424
|
```
|
|
252
425
|
|
|
426
|
+
> **Tip:** When deploying, use `--label` to give your deployment a memorable name:
|
|
427
|
+
> ```bash
|
|
428
|
+
> kadi deploy --profile production --label broker-east
|
|
429
|
+
> ```
|
|
430
|
+
> Then tear it down easily: `kadi deploy down --label broker-east`
|
|
431
|
+
|
|
253
432
|
### Local Teardown
|
|
254
433
|
|
|
255
434
|
```bash
|
|
@@ -263,7 +442,7 @@ kadi deploy down --yes
|
|
|
263
442
|
kadi deploy down --engine podman
|
|
264
443
|
```
|
|
265
444
|
|
|
266
|
-
This runs `docker compose down --remove-orphans` (or `podman
|
|
445
|
+
This runs `docker compose down --remove-orphans` (or `podman-compose` for Podman) against the compose file recorded in the lock. Engine resolution follows the same priority as deploy (see [Container Engine Resolution](#container-engine-resolution)).
|
|
267
446
|
|
|
268
447
|
### Akash Teardown (Interactive)
|
|
269
448
|
|
|
@@ -297,7 +476,7 @@ kadi deploy down --autonomous --profile production
|
|
|
297
476
|
|
|
298
477
|
The autonomous path skips all interactive prompts (confirmation and profile selection), reads the wallet mnemonic from the secrets vault (same as `kadi deploy --autonomous`), signs the close transaction directly, and refunds the remaining escrow to your wallet.
|
|
299
478
|
|
|
300
|
-
> **Note:** If multiple deployments are active, `--autonomous` requires `--profile` to specify which one to tear down (it cannot prompt for selection).
|
|
479
|
+
> **Note:** If multiple deployments are active, `--autonomous` requires `--profile`, `--instance`, or `--label` to specify which one to tear down (it cannot prompt for selection).
|
|
301
480
|
|
|
302
481
|
### Down Flags
|
|
303
482
|
|
|
@@ -305,6 +484,9 @@ The autonomous path skips all interactive prompts (confirmation and profile sele
|
|
|
305
484
|
|------|-------------|----------|
|
|
306
485
|
| `-p, --project <path>` | Path to project with `.kadi-deploy.lock` | Current directory |
|
|
307
486
|
| `--profile <profile>` | Profile name to tear down (prompts if multiple; **required** in autonomous mode with multiple deployments) | Auto-select |
|
|
487
|
+
| `--instance <id>` | Instance ID to tear down (4-char hex from deploy output or `deploy list`) | — |
|
|
488
|
+
| `--label <label>` | Tear down the deployment matching this label | — |
|
|
489
|
+
| `--all` | Tear down all active deployments | `false` |
|
|
308
490
|
| `--engine <engine>` | Override container engine (`docker`/`podman`) | From lock file |
|
|
309
491
|
| `--network <network>` | Override Akash network (`mainnet`/`testnet`) | From lock file |
|
|
310
492
|
| `--autonomous` | No human interaction — skips confirmation, uses vault mnemonic for Akash | `false` |
|
|
@@ -312,25 +494,30 @@ The autonomous path skips all interactive prompts (confirmation and profile sele
|
|
|
312
494
|
| `-y, --yes` | Skip confirmation prompt (implied by `--autonomous`) | `false` |
|
|
313
495
|
| `--verbose` | Detailed output | `false` |
|
|
314
496
|
|
|
497
|
+
**Resolution priority:** `--instance` > `--label` > `--all` > `--profile` > auto-select (single deployment) > interactive prompt.
|
|
498
|
+
|
|
315
499
|
### The Lock File
|
|
316
500
|
|
|
317
|
-
The `.kadi-deploy.lock` file is written to the project root after every successful deployment. It uses a
|
|
501
|
+
The `.kadi-deploy.lock` file is written to the project root after every successful deployment. It uses a v3 format that supports multiple simultaneous deployments — including multiple instances of the same profile — keyed by `{profile}:{instanceId}`:
|
|
318
502
|
|
|
319
503
|
```json
|
|
320
504
|
{
|
|
321
|
-
"version":
|
|
505
|
+
"version": 3,
|
|
322
506
|
"deployments": {
|
|
323
|
-
"
|
|
507
|
+
"local:a3f7": {
|
|
508
|
+
"instanceId": "a3f7",
|
|
324
509
|
"target": "local",
|
|
325
|
-
"profile": "
|
|
510
|
+
"profile": "local",
|
|
326
511
|
"deployedAt": "2026-02-25T12:00:00.000Z",
|
|
327
|
-
"local": { "composePath": "...", "engine": "docker", ... }
|
|
512
|
+
"local": { "composePath": "...", "engine": "docker", "...": "..." }
|
|
328
513
|
},
|
|
329
|
-
"
|
|
514
|
+
"akash:b9e2": {
|
|
515
|
+
"instanceId": "b9e2",
|
|
330
516
|
"target": "akash",
|
|
331
|
-
"profile": "
|
|
517
|
+
"profile": "akash",
|
|
518
|
+
"label": "broker-east",
|
|
332
519
|
"deployedAt": "2026-02-25T14:00:00.000Z",
|
|
333
|
-
"akash": { "dseq": 12345678, "owner": "akash1...", ... }
|
|
520
|
+
"akash": { "dseq": 12345678, "owner": "akash1...", "...": "..." }
|
|
334
521
|
}
|
|
335
522
|
}
|
|
336
523
|
}
|
|
@@ -338,10 +525,14 @@ The `.kadi-deploy.lock` file is written to the project root after every successf
|
|
|
338
525
|
|
|
339
526
|
Each entry contains:
|
|
340
527
|
|
|
528
|
+
- **instanceId**: Unique 4-character hex identifier (e.g. `a3f7`)
|
|
529
|
+
- **label**: Optional human-readable label set at deploy time with `--label`
|
|
341
530
|
- **Local deployments**: compose file path, engine, network, service names, container IDs
|
|
342
531
|
- **Akash deployments**: DSEQ, owner address, provider, network, gseq/oseq
|
|
343
532
|
|
|
344
|
-
|
|
533
|
+
Use `kadi deploy list` to view active deployments without opening the lock file.
|
|
534
|
+
|
|
535
|
+
When you tear down a deployment, only that entry is removed. The file is deleted entirely when the last deployment is removed. Existing v1/v2 lock files are transparently migrated to v3 on read.
|
|
345
536
|
|
|
346
537
|
It is safe to add `.kadi-deploy.lock` to `.gitignore`.
|
|
347
538
|
|
|
@@ -538,12 +729,26 @@ kadi deploy --auto-approve-secrets # Works in interactive mode too
|
|
|
538
729
|
kadi deploy --secret-timeout 120000 # 2 min timeout for secret handshake
|
|
539
730
|
```
|
|
540
731
|
|
|
732
|
+
**List active deployments:**
|
|
733
|
+
|
|
734
|
+
```bash
|
|
735
|
+
kadi deploy list # Table of all active deployments
|
|
736
|
+
kadi deploy ls # Alias for list
|
|
737
|
+
kadi deploy list --json # Machine-readable JSON output
|
|
738
|
+
kadi deploy list --verbose # Show provider, services, network details
|
|
739
|
+
kadi deploy list --profile production # Filter by profile
|
|
740
|
+
```
|
|
741
|
+
|
|
541
742
|
**Tear down deployments:**
|
|
542
743
|
|
|
543
744
|
```bash
|
|
544
745
|
kadi deploy down # Tear down active deployment
|
|
746
|
+
kadi deploy down --label my-broker # Tear down by label
|
|
747
|
+
kadi deploy down --instance a3f7 # Tear down by instance ID
|
|
748
|
+
kadi deploy down --all # Tear down all deployments
|
|
545
749
|
kadi deploy down --yes # Skip confirmation (interactive)
|
|
546
750
|
kadi deploy down --autonomous # Fully non-interactive (skips confirmation + QR)
|
|
751
|
+
kadi deploy down --autonomous --label prod-east # Autonomous teardown by label
|
|
547
752
|
kadi deploy down --autonomous --profile prod # Required when multiple deployments active
|
|
548
753
|
kadi deploy down --autonomous --secrets-vault v # Akash: custom vault
|
|
549
754
|
kadi deploy down --engine podman # Override container engine
|
|
@@ -587,3 +792,114 @@ kadi deploy --profile production --dry-run
|
|
|
587
792
|
```bash
|
|
588
793
|
kadi deploy --profile production --verbose
|
|
589
794
|
```
|
|
795
|
+
|
|
796
|
+
---
|
|
797
|
+
|
|
798
|
+
## Migrating from .env to Vaults
|
|
799
|
+
|
|
800
|
+
**As of kadi-deploy v0.19.0**, secrets are stored in encrypted vaults (`secrets.toml`) and config in `config.yml` — replacing the old `.env` file approach entirely. The `.env` fallback still works but is deprecated and will be removed in a future release.
|
|
801
|
+
|
|
802
|
+
### Step 1: Update kadi-deploy
|
|
803
|
+
|
|
804
|
+
```bash
|
|
805
|
+
kadi install kadi-deploy
|
|
806
|
+
```
|
|
807
|
+
|
|
808
|
+
This pulls `kadi-deploy@0.19.1` (with `kadi-secret@0.11.1` and `secret-ability@0.9.2`).
|
|
809
|
+
|
|
810
|
+
### Step 2: Move tunnel token to a global vault
|
|
811
|
+
|
|
812
|
+
Previously you had a `.env` file (often inside `abilities/` or your project root) containing:
|
|
813
|
+
|
|
814
|
+
```
|
|
815
|
+
KADI_TUNNEL_TOKEN=your-token-here
|
|
816
|
+
NGROK_AUTH_TOKEN=your-ngrok-token
|
|
817
|
+
```
|
|
818
|
+
|
|
819
|
+
Move these to an encrypted global vault:
|
|
820
|
+
|
|
821
|
+
```bash
|
|
822
|
+
# Create the tunnel vault at user level (~/.kadi/secrets/config.toml)
|
|
823
|
+
kadi secret create tunnel -g
|
|
824
|
+
|
|
825
|
+
# Copy your token value from the old .env file, then:
|
|
826
|
+
kadi secret set KADI_TUNNEL_TOKEN "your-token-here" -v tunnel
|
|
827
|
+
|
|
828
|
+
# If you used ngrok:
|
|
829
|
+
kadi secret set NGROK_AUTH_TOKEN "your-ngrok-token" -v tunnel
|
|
830
|
+
```
|
|
831
|
+
|
|
832
|
+
The global (`-g`) vault lives at `~/.kadi/secrets/config.toml`, accessible from any project directory. You no longer need to copy `.env` files into ability folders.
|
|
833
|
+
|
|
834
|
+
### Step 3: Move Akash wallet (autonomous deployments only)
|
|
835
|
+
|
|
836
|
+
If you had `AKASH_WALLET` in a `.env` or environment variable:
|
|
837
|
+
|
|
838
|
+
```bash
|
|
839
|
+
# The 'global' vault likely already exists — if not:
|
|
840
|
+
kadi secret create global -g
|
|
841
|
+
|
|
842
|
+
# Store the mnemonic
|
|
843
|
+
kadi secret set AKASH_WALLET "your twelve or twenty four word mnemonic" -v global
|
|
844
|
+
```
|
|
845
|
+
|
|
846
|
+
### Step 4: Create config.yml (optional)
|
|
847
|
+
|
|
848
|
+
If you had non-secret settings in `.env` (tunnel server, ports, etc.), move them to `config.yml` in your project root:
|
|
849
|
+
|
|
850
|
+
```yaml
|
|
851
|
+
tunnel:
|
|
852
|
+
server_addr: broker.kadi.build
|
|
853
|
+
tunnel_domain: tunnel.kadi.build
|
|
854
|
+
transport: wss
|
|
855
|
+
|
|
856
|
+
deploy:
|
|
857
|
+
container_engine: docker
|
|
858
|
+
```
|
|
859
|
+
|
|
860
|
+
All values have sensible defaults — you only need `config.yml` if you customized something.
|
|
861
|
+
|
|
862
|
+
### Step 5: Clean up
|
|
863
|
+
|
|
864
|
+
```bash
|
|
865
|
+
# Delete the old .env file from your kadi abilities folder
|
|
866
|
+
rm ~/.kadi/../abilities/.env # or wherever your .env lived
|
|
867
|
+
|
|
868
|
+
# Also remove from your project if you had one there
|
|
869
|
+
rm /path/to/project/.env # only if it only contained kadi tunnel/deploy secrets
|
|
870
|
+
```
|
|
871
|
+
|
|
872
|
+
### Step 6: Verify
|
|
873
|
+
|
|
874
|
+
```bash
|
|
875
|
+
# From any directory — should return your token
|
|
876
|
+
kadi secret get KADI_TUNNEL_TOKEN -v tunnel
|
|
877
|
+
|
|
878
|
+
# From an agent subdirectory — multi-level discovery finds the global vault
|
|
879
|
+
cd agents/my-agent
|
|
880
|
+
kadi secret get KADI_TUNNEL_TOKEN -v tunnel
|
|
881
|
+
|
|
882
|
+
# Test a deploy
|
|
883
|
+
kadi deploy --dry-run
|
|
884
|
+
```
|
|
885
|
+
|
|
886
|
+
### What changed
|
|
887
|
+
|
|
888
|
+
| Before (.env) | After (vaults + config.yml) |
|
|
889
|
+
|---------------|----------------------------|
|
|
890
|
+
| Plaintext `.env` file in ability folder | Encrypted `secrets.toml` in `~/.kadi/` (age/ChaCha20-Poly1305) |
|
|
891
|
+
| Copy `.env` into each ability that needs it | Global vault — one location, accessible everywhere |
|
|
892
|
+
| Secrets and config mixed in one flat file | Secrets in vault, config in `config.yml` — separated |
|
|
893
|
+
| No encryption, easy to leak | Encrypted at rest, master key in OS keychain |
|
|
894
|
+
| Per-project only | Global (`~/.kadi/`) or per-project — your choice |
|
|
895
|
+
|
|
896
|
+
### Backwards compatibility
|
|
897
|
+
|
|
898
|
+
The `.env` fallback is still supported as a **tier-3 fallback** in `configResolver.js`:
|
|
899
|
+
|
|
900
|
+
1. `process.env` (always wins)
|
|
901
|
+
2. Encrypted vault (`secrets.toml`)
|
|
902
|
+
3. `.env` file walk-up (deprecated — still works)
|
|
903
|
+
4. `config.yml` (for non-secret settings)
|
|
904
|
+
|
|
905
|
+
If you have an existing `.env` file, it will continue to work. But new setups should use vaults.
|
package/agent.json
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "kadi-deploy",
|
|
3
|
-
"version": "0.19.
|
|
3
|
+
"version": "0.19.3",
|
|
4
4
|
"license": "MIT",
|
|
5
5
|
"type": "plugin",
|
|
6
6
|
"entrypoint": "dist/index.js",
|
|
7
7
|
"commands": ["deploy"],
|
|
8
8
|
"description": "CLI plugin that converts an agent's deploy spec into platform-specific artifacts (Akash SDL, Docker Compose, etc.) and orchestrates the deployment process.",
|
|
9
9
|
"repo": "https://gitlab.com/humin-game-lab/kadi/kadi-deploy.git",
|
|
10
|
-
"lib": "https://gitlab.com/humin-game-lab/kadi/kadi-deploy/-/archive/v0.19.
|
|
10
|
+
"lib": "https://gitlab.com/humin-game-lab/kadi/kadi-deploy/-/archive/v0.19.3/kadi-deploy-v0.19.3.zip",
|
|
11
11
|
"api": "http://127.0.0.1:8000",
|
|
12
12
|
"brokers": {
|
|
13
13
|
"local": "ws://127.0.0.1:8080",
|
package/package.json
CHANGED
|
@@ -31,6 +31,7 @@ import { error as errorColor, warning, success as successColor, formatKeyValue,
|
|
|
31
31
|
import type { DeploymentContext } from '../types.js';
|
|
32
32
|
import type { Ora } from 'ora';
|
|
33
33
|
import { buildLocalLock, writeLockFile } from './lock.js';
|
|
34
|
+
import { resolveEngine } from '../utils/engine.js';
|
|
34
35
|
|
|
35
36
|
/**
|
|
36
37
|
* Executes local Docker/Podman deployment using deploy-ability
|
|
@@ -134,7 +135,7 @@ export async function executeLocalDeployment(ctx: DeploymentContext): Promise<vo
|
|
|
134
135
|
agent: agentConfig,
|
|
135
136
|
projectRoot,
|
|
136
137
|
},
|
|
137
|
-
engine:
|
|
138
|
+
engine: resolveEngine(flags.engine, profile.engine, ctx),
|
|
138
139
|
dryRun: flags.dryRun || false,
|
|
139
140
|
verbose: flags.verbose || false,
|
|
140
141
|
|
package/src/commands/down.ts
CHANGED
|
@@ -15,6 +15,7 @@
|
|
|
15
15
|
|
|
16
16
|
import path from 'node:path';
|
|
17
17
|
import { execSync } from 'node:child_process';
|
|
18
|
+
import { resolveEngine, composeCmd } from '../utils/engine.js';
|
|
18
19
|
|
|
19
20
|
import {
|
|
20
21
|
AkashClient,
|
|
@@ -55,6 +56,7 @@ export interface DownOptions {
|
|
|
55
56
|
project?: string;
|
|
56
57
|
profile?: string;
|
|
57
58
|
instance?: string;
|
|
59
|
+
label?: string;
|
|
58
60
|
all?: boolean;
|
|
59
61
|
engine?: string;
|
|
60
62
|
yes?: boolean;
|
|
@@ -125,6 +127,50 @@ export async function executeDown(
|
|
|
125
127
|
return;
|
|
126
128
|
}
|
|
127
129
|
lock = entry;
|
|
130
|
+
} else if (options.label) {
|
|
131
|
+
// Explicit --label flag: find deployments matching the label
|
|
132
|
+
const labelMatches = entries.filter(([_, d]) => d.label === options.label);
|
|
133
|
+
if (labelMatches.length === 0) {
|
|
134
|
+
logger.error(
|
|
135
|
+
errorColor(`No active deployment found with label "${options.label}".`)
|
|
136
|
+
);
|
|
137
|
+
logger.log(
|
|
138
|
+
dim('Active deployments: ' + entries.map(([k, d]) => {
|
|
139
|
+
const l = d.label ? ` (${d.label})` : '';
|
|
140
|
+
return `${d.instanceId}${l}`;
|
|
141
|
+
}).join(', '))
|
|
142
|
+
);
|
|
143
|
+
return;
|
|
144
|
+
} else if (labelMatches.length === 1) {
|
|
145
|
+
lock = labelMatches[0][1];
|
|
146
|
+
} else if (options.autonomous) {
|
|
147
|
+
logger.error(
|
|
148
|
+
errorColor(
|
|
149
|
+
`Multiple deployments found with label "${options.label}". In autonomous mode, specify --instance to disambiguate.`
|
|
150
|
+
)
|
|
151
|
+
);
|
|
152
|
+
logger.log(
|
|
153
|
+
dim('Instances: ' + labelMatches.map(([_, d]) => `${d.instanceId} (${d.profile})`).join(', '))
|
|
154
|
+
);
|
|
155
|
+
return;
|
|
156
|
+
} else {
|
|
157
|
+
// Multiple deployments with same label — prompt
|
|
158
|
+
logger.log('');
|
|
159
|
+
logger.log(bold(`Multiple deployments with label "${options.label}" found:`));
|
|
160
|
+
logger.log('');
|
|
161
|
+
for (const [key, dep] of labelMatches) {
|
|
162
|
+
logger.log(` ${highlight(dep.instanceId)} ${dim(`(${dep.profile}, ${dep.target}, deployed ${dep.deployedAt})`)}`);
|
|
163
|
+
}
|
|
164
|
+
logger.log('');
|
|
165
|
+
|
|
166
|
+
const selected = await selectPrompt(
|
|
167
|
+
'Which deployment do you want to tear down?',
|
|
168
|
+
labelMatches.map(([_, dep]) => `${dep.instanceId} (${dep.profile}, ${dep.target})`)
|
|
169
|
+
);
|
|
170
|
+
|
|
171
|
+
const selectedInstanceId = selected.replace(/ \(.*\)$/, '');
|
|
172
|
+
lock = labelMatches.find(([_, d]) => d.instanceId === selectedInstanceId)![1];
|
|
173
|
+
}
|
|
128
174
|
} else if (options.all) {
|
|
129
175
|
// --all flag: tear down everything (handled separately below)
|
|
130
176
|
if (!options.yes && !options.autonomous) {
|
|
@@ -314,7 +360,8 @@ async function teardownLocal(
|
|
|
314
360
|
): Promise<void> {
|
|
315
361
|
const { logger } = ctx;
|
|
316
362
|
const local = lock.local!;
|
|
317
|
-
const engine = options.engine
|
|
363
|
+
const engine = resolveEngine(options.engine, local.engine, ctx);
|
|
364
|
+
const compose = composeCmd(engine);
|
|
318
365
|
|
|
319
366
|
const spinner = startSpinner(
|
|
320
367
|
`Stopping ${local.services.length} service(s) with ${engine}...`
|
|
@@ -322,7 +369,7 @@ async function teardownLocal(
|
|
|
322
369
|
|
|
323
370
|
try {
|
|
324
371
|
const composeFile = local.composePath;
|
|
325
|
-
const cmd = `${
|
|
372
|
+
const cmd = `${compose} -f "${composeFile}" down --remove-orphans`;
|
|
326
373
|
|
|
327
374
|
if (options.verbose) {
|
|
328
375
|
logger.log(dim(`Running: ${cmd}`));
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Deploy List Command — Show active deployments
|
|
3
|
+
*
|
|
4
|
+
* Reads the `.kadi-deploy.lock` file and displays a table of all active
|
|
5
|
+
* deployment instances with their key identifiers (instance ID, profile,
|
|
6
|
+
* target, label, Akash DSEQ / provider, local engine / services, etc.).
|
|
7
|
+
*
|
|
8
|
+
* @module commands/list
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import path from 'node:path';
|
|
12
|
+
import {
|
|
13
|
+
readLockFile,
|
|
14
|
+
type DeploymentLock,
|
|
15
|
+
type LockFile,
|
|
16
|
+
} from './lock.js';
|
|
17
|
+
import {
|
|
18
|
+
error as errorColor,
|
|
19
|
+
dim,
|
|
20
|
+
bold,
|
|
21
|
+
highlight,
|
|
22
|
+
formatKeyValue,
|
|
23
|
+
} from '../cli/colors.js';
|
|
24
|
+
import type { IKadiContext } from '../types.js';
|
|
25
|
+
|
|
26
|
+
// ─────────────────────────────────────────────────────────
|
|
27
|
+
// Types
|
|
28
|
+
// ─────────────────────────────────────────────────────────
|
|
29
|
+
|
|
30
|
+
export interface ListOptions {
|
|
31
|
+
project?: string;
|
|
32
|
+
profile?: string;
|
|
33
|
+
json?: boolean;
|
|
34
|
+
verbose?: boolean;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// ─────────────────────────────────────────────────────────
|
|
38
|
+
// Main entry point
|
|
39
|
+
// ─────────────────────────────────────────────────────────
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Execute the `kadi deploy list` command.
|
|
43
|
+
*
|
|
44
|
+
* Reads the lock file and prints all active deployments.
|
|
45
|
+
*/
|
|
46
|
+
export async function executeList(
|
|
47
|
+
ctx: IKadiContext,
|
|
48
|
+
options: ListOptions
|
|
49
|
+
): Promise<void> {
|
|
50
|
+
const { logger } = ctx;
|
|
51
|
+
const projectRoot = path.resolve(options.project || process.cwd());
|
|
52
|
+
|
|
53
|
+
// ----------------------------------------------------------------
|
|
54
|
+
// 1. Read lock file
|
|
55
|
+
// ----------------------------------------------------------------
|
|
56
|
+
let lockFile: LockFile | null;
|
|
57
|
+
|
|
58
|
+
try {
|
|
59
|
+
lockFile = await readLockFile(projectRoot);
|
|
60
|
+
} catch (err) {
|
|
61
|
+
logger.error(
|
|
62
|
+
errorColor(`Failed to read lock file: ${(err as Error).message}`)
|
|
63
|
+
);
|
|
64
|
+
return;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
if (!lockFile || Object.keys(lockFile.deployments).length === 0) {
|
|
68
|
+
if (options.json) {
|
|
69
|
+
logger.log(JSON.stringify([], null, 2));
|
|
70
|
+
} else {
|
|
71
|
+
logger.log(dim('No active deployments found.'));
|
|
72
|
+
logger.log(
|
|
73
|
+
dim(
|
|
74
|
+
'Deploy with `kadi deploy` to create a deployment, or specify --project <path>.'
|
|
75
|
+
)
|
|
76
|
+
);
|
|
77
|
+
}
|
|
78
|
+
return;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// ----------------------------------------------------------------
|
|
82
|
+
// 2. Filter by profile if specified
|
|
83
|
+
// ----------------------------------------------------------------
|
|
84
|
+
let entries = Object.entries(lockFile.deployments);
|
|
85
|
+
|
|
86
|
+
if (options.profile) {
|
|
87
|
+
entries = entries.filter(([_, d]) => d.profile === options.profile);
|
|
88
|
+
if (entries.length === 0) {
|
|
89
|
+
if (options.json) {
|
|
90
|
+
logger.log(JSON.stringify([], null, 2));
|
|
91
|
+
} else {
|
|
92
|
+
logger.log(
|
|
93
|
+
dim(`No active deployments found for profile "${options.profile}".`)
|
|
94
|
+
);
|
|
95
|
+
}
|
|
96
|
+
return;
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// ----------------------------------------------------------------
|
|
101
|
+
// 3. JSON output
|
|
102
|
+
// ----------------------------------------------------------------
|
|
103
|
+
if (options.json) {
|
|
104
|
+
const output = entries.map(([key, dep]) => ({
|
|
105
|
+
instanceId: dep.instanceId,
|
|
106
|
+
profile: dep.profile,
|
|
107
|
+
target: dep.target,
|
|
108
|
+
label: dep.label || null,
|
|
109
|
+
deployedAt: dep.deployedAt,
|
|
110
|
+
...(dep.akash
|
|
111
|
+
? {
|
|
112
|
+
dseq: dep.akash.dseq,
|
|
113
|
+
owner: dep.akash.owner,
|
|
114
|
+
provider: dep.akash.provider,
|
|
115
|
+
providerUri: dep.akash.providerUri,
|
|
116
|
+
network: dep.akash.network,
|
|
117
|
+
}
|
|
118
|
+
: {}),
|
|
119
|
+
...(dep.local
|
|
120
|
+
? {
|
|
121
|
+
engine: dep.local.engine,
|
|
122
|
+
services: dep.local.services,
|
|
123
|
+
composePath: dep.local.composePath,
|
|
124
|
+
}
|
|
125
|
+
: {}),
|
|
126
|
+
}));
|
|
127
|
+
logger.log(JSON.stringify(output, null, 2));
|
|
128
|
+
return;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// ----------------------------------------------------------------
|
|
132
|
+
// 4. Table output
|
|
133
|
+
// ----------------------------------------------------------------
|
|
134
|
+
logger.log('');
|
|
135
|
+
logger.log(bold(`Active Deployments (${entries.length})`));
|
|
136
|
+
logger.log(dim('─'.repeat(70)));
|
|
137
|
+
logger.log('');
|
|
138
|
+
|
|
139
|
+
// Build column data
|
|
140
|
+
const rows: string[][] = [];
|
|
141
|
+
const headers = ['INSTANCE', 'PROFILE', 'TARGET', 'LABEL', 'DETAILS', 'DEPLOYED'];
|
|
142
|
+
|
|
143
|
+
for (const [key, dep] of entries) {
|
|
144
|
+
const label = dep.label || dim('—');
|
|
145
|
+
const deployed = formatDate(dep.deployedAt);
|
|
146
|
+
let details: string;
|
|
147
|
+
|
|
148
|
+
if (dep.target === 'akash' && dep.akash) {
|
|
149
|
+
details = `dseq=${dep.akash.dseq}`;
|
|
150
|
+
if (options.verbose) {
|
|
151
|
+
details += ` provider=${shortenProvider(dep.akash.provider)}`;
|
|
152
|
+
details += ` network=${dep.akash.network}`;
|
|
153
|
+
}
|
|
154
|
+
} else if (dep.target === 'local' && dep.local) {
|
|
155
|
+
details = `engine=${dep.local.engine}`;
|
|
156
|
+
if (options.verbose) {
|
|
157
|
+
details += ` services=${dep.local.services.join(',')}`;
|
|
158
|
+
}
|
|
159
|
+
} else {
|
|
160
|
+
details = dim('—');
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
rows.push([
|
|
164
|
+
dep.instanceId,
|
|
165
|
+
dep.profile,
|
|
166
|
+
dep.target,
|
|
167
|
+
label,
|
|
168
|
+
details,
|
|
169
|
+
deployed,
|
|
170
|
+
]);
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
// Calculate column widths
|
|
174
|
+
const colWidths = headers.map((h, i) =>
|
|
175
|
+
Math.max(h.length, ...rows.map((r) => stripAnsi(r[i]).length))
|
|
176
|
+
);
|
|
177
|
+
|
|
178
|
+
// Print header
|
|
179
|
+
logger.log(
|
|
180
|
+
headers
|
|
181
|
+
.map((h, i) => dim(h.padEnd(colWidths[i] + 2)))
|
|
182
|
+
.join('')
|
|
183
|
+
);
|
|
184
|
+
|
|
185
|
+
// Print rows
|
|
186
|
+
for (const row of rows) {
|
|
187
|
+
const formatted = row.map((cell, i) => {
|
|
188
|
+
const padded = padWithAnsi(cell, colWidths[i] + 2);
|
|
189
|
+
if (i === 0) return highlight(padded); // Instance ID in cyan
|
|
190
|
+
return padded;
|
|
191
|
+
});
|
|
192
|
+
logger.log(formatted.join(''));
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
logger.log('');
|
|
196
|
+
logger.log(
|
|
197
|
+
dim(
|
|
198
|
+
'Use `kadi deploy down --instance <id>` or `kadi deploy down --label <label>` to tear down a deployment.'
|
|
199
|
+
)
|
|
200
|
+
);
|
|
201
|
+
logger.log('');
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// ─────────────────────────────────────────────────────────
|
|
205
|
+
// Helpers
|
|
206
|
+
// ─────────────────────────────────────────────────────────
|
|
207
|
+
|
|
208
|
+
/** Strip ANSI escape codes for width calculation */
|
|
209
|
+
function stripAnsi(str: string): string {
|
|
210
|
+
// eslint-disable-next-line no-control-regex
|
|
211
|
+
return str.replace(/\u001B\[[0-9;]*m/g, '');
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
/** Pad a string that may contain ANSI codes to a visible width */
|
|
215
|
+
function padWithAnsi(str: string, width: number): string {
|
|
216
|
+
const visible = stripAnsi(str).length;
|
|
217
|
+
const padding = Math.max(0, width - visible);
|
|
218
|
+
return str + ' '.repeat(padding);
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
/** Format ISO date to a shorter display format */
|
|
222
|
+
function formatDate(iso: string): string {
|
|
223
|
+
try {
|
|
224
|
+
const d = new Date(iso);
|
|
225
|
+
return d.toLocaleDateString('en-US', {
|
|
226
|
+
year: 'numeric',
|
|
227
|
+
month: 'short',
|
|
228
|
+
day: 'numeric',
|
|
229
|
+
hour: '2-digit',
|
|
230
|
+
minute: '2-digit',
|
|
231
|
+
});
|
|
232
|
+
} catch {
|
|
233
|
+
return iso;
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
/** Shorten a provider address for display */
|
|
238
|
+
function shortenProvider(provider: string): string {
|
|
239
|
+
if (provider.length <= 20) return provider;
|
|
240
|
+
return provider.slice(0, 10) + '...' + provider.slice(-8);
|
|
241
|
+
}
|
package/src/index.ts
CHANGED
|
@@ -15,6 +15,7 @@ import { loadAgentConfig, getFirstProfile, hasProfile, getProfile } from './conf
|
|
|
15
15
|
import { executeAkashDeployment } from './commands/deploy.js';
|
|
16
16
|
import { executeLocalDeployment } from './commands/deploy-local.js';
|
|
17
17
|
import { executeDown } from './commands/down.js';
|
|
18
|
+
import { executeList } from './commands/list.js';
|
|
18
19
|
import { validateSecretsOrFail } from './secrets/index.js';
|
|
19
20
|
|
|
20
21
|
/**
|
|
@@ -23,6 +24,7 @@ import { validateSecretsOrFail } from './secrets/index.js';
|
|
|
23
24
|
* Uses Commander subcommands:
|
|
24
25
|
* kadi deploy [up] — Deploy agents (default, backward compatible)
|
|
25
26
|
* kadi deploy down — Tear down an active deployment
|
|
27
|
+
* kadi deploy list — List active deployments
|
|
26
28
|
*
|
|
27
29
|
* @param ctx - Plugin context injected by the Kadi CLI
|
|
28
30
|
*/
|
|
@@ -100,8 +102,14 @@ Teardown:
|
|
|
100
102
|
kadi deploy down # Tear down the active deployment
|
|
101
103
|
kadi deploy down --autonomous # Fully non-interactive (skips confirmation + QR)
|
|
102
104
|
kadi deploy down --autonomous --profile prod # Required when multiple deployments active
|
|
105
|
+
kadi deploy down --label my-label # Tear down deployment by label
|
|
103
106
|
kadi deploy down --yes # Skip confirmation (interactive mode)
|
|
104
107
|
|
|
108
|
+
Listing:
|
|
109
|
+
kadi deploy list # Show all active deployments
|
|
110
|
+
kadi deploy list --json # Output as JSON
|
|
111
|
+
kadi deploy list --verbose # Show additional details
|
|
112
|
+
|
|
105
113
|
Configuration:
|
|
106
114
|
Profiles are defined in agent.json under the "deploy" field.
|
|
107
115
|
CLI flags override profile settings.
|
|
@@ -157,6 +165,7 @@ Supported Targets:
|
|
|
157
165
|
)
|
|
158
166
|
.option('--profile <profile>', 'Profile name to tear down (prompts if multiple active)')
|
|
159
167
|
.option('--instance <id>', 'Instance ID to tear down (4-char hex from deploy output)')
|
|
168
|
+
.option('--label <label>', 'Tear down deployment matching this label')
|
|
160
169
|
.option('--all', 'Tear down all active deployments')
|
|
161
170
|
.option('--engine <engine>', 'Override container engine (docker|podman)')
|
|
162
171
|
.option('-y, --yes', 'Skip confirmation prompt')
|
|
@@ -172,6 +181,7 @@ Supported Targets:
|
|
|
172
181
|
project: (flags.project as string) || process.cwd(),
|
|
173
182
|
profile: flags.profile as string | undefined,
|
|
174
183
|
instance: flags.instance as string | undefined,
|
|
184
|
+
label: flags.label as string | undefined,
|
|
175
185
|
all: flags.all as boolean | undefined,
|
|
176
186
|
engine: flags.engine as string | undefined,
|
|
177
187
|
yes: flags.yes as boolean | undefined,
|
|
@@ -190,6 +200,46 @@ Supported Targets:
|
|
|
190
200
|
console.error(error);
|
|
191
201
|
}
|
|
192
202
|
|
|
203
|
+
process.exitCode = 1;
|
|
204
|
+
}
|
|
205
|
+
});
|
|
206
|
+
|
|
207
|
+
// ─────────────────────────────────────────────────────────────────
|
|
208
|
+
// Subcommand: deploy list — show active deployments
|
|
209
|
+
// ─────────────────────────────────────────────────────────────────
|
|
210
|
+
deploy
|
|
211
|
+
.command('list')
|
|
212
|
+
.alias('ls')
|
|
213
|
+
.description(
|
|
214
|
+
'List active deployments from .kadi-deploy.lock'
|
|
215
|
+
)
|
|
216
|
+
.option(
|
|
217
|
+
'-p, --project <path>',
|
|
218
|
+
'Path to project with agent.json / .kadi-deploy.lock'
|
|
219
|
+
)
|
|
220
|
+
.option('--profile <profile>', 'Filter by profile name')
|
|
221
|
+
.option('--json', 'Output as JSON')
|
|
222
|
+
.option('--verbose, -v', 'Show additional details (provider, services, etc.)')
|
|
223
|
+
.action(async (...args: unknown[]) => {
|
|
224
|
+
const flags = args[0] as Record<string, unknown>;
|
|
225
|
+
|
|
226
|
+
try {
|
|
227
|
+
await executeList(ctx, {
|
|
228
|
+
project: (flags.project as string) || process.cwd(),
|
|
229
|
+
profile: flags.profile as string | undefined,
|
|
230
|
+
json: flags.json as boolean | undefined,
|
|
231
|
+
verbose: flags.verbose as boolean | undefined,
|
|
232
|
+
});
|
|
233
|
+
} catch (error) {
|
|
234
|
+
const errorMessage =
|
|
235
|
+
error instanceof Error ? error.message : String(error);
|
|
236
|
+
logger.error(`List failed: ${errorMessage}`);
|
|
237
|
+
|
|
238
|
+
if (flags.verbose) {
|
|
239
|
+
logger.error('Full error details:');
|
|
240
|
+
console.error(error);
|
|
241
|
+
}
|
|
242
|
+
|
|
193
243
|
process.exitCode = 1;
|
|
194
244
|
}
|
|
195
245
|
});
|
|
@@ -88,8 +88,12 @@ export async function setupRegistryIfNeeded(
|
|
|
88
88
|
};
|
|
89
89
|
}
|
|
90
90
|
|
|
91
|
+
// Resolve engine early — needed for hasLocalImages + registry
|
|
92
|
+
const resolvedEngine = options.containerEngine || 'docker';
|
|
93
|
+
const resolvedTunnel = options.tunnelService || 'kadi';
|
|
94
|
+
|
|
91
95
|
// Check if any services use local images
|
|
92
|
-
if (!hasLocalImages(profile)) {
|
|
96
|
+
if (!hasLocalImages(profile, resolvedEngine)) {
|
|
93
97
|
// All images are remote (docker.io/nginx, ghcr.io/owner/repo, etc.)
|
|
94
98
|
// No registry needed - providers can pull directly
|
|
95
99
|
return {
|
|
@@ -99,8 +103,6 @@ export async function setupRegistryIfNeeded(
|
|
|
99
103
|
}
|
|
100
104
|
|
|
101
105
|
// Profile has local images - start registry infrastructure
|
|
102
|
-
const resolvedTunnel = options.tunnelService || 'kadi';
|
|
103
|
-
const resolvedEngine = options.containerEngine || 'docker';
|
|
104
106
|
logger.log(`🔧 Local images detected — setting up temporary registry (${resolvedTunnel} tunnel)...`);
|
|
105
107
|
|
|
106
108
|
const manager = new TemporaryContainerRegistryManager(logger);
|
|
@@ -179,7 +181,8 @@ export function hasLocalImages(profile: AkashProfile, engine: 'docker' | 'podman
|
|
|
179
181
|
// Check if image exists locally
|
|
180
182
|
const result = execSync(`${engine} images -q ${image}`, {
|
|
181
183
|
encoding: 'utf8',
|
|
182
|
-
stdio: ['pipe', 'pipe', 'pipe'] // Suppress stderr
|
|
184
|
+
stdio: ['pipe', 'pipe', 'pipe'], // Suppress stderr
|
|
185
|
+
timeout: 10000 // 10s — don't hang if daemon is unresponsive
|
|
183
186
|
});
|
|
184
187
|
return result.trim().length > 0;
|
|
185
188
|
} catch (error) {
|
package/src/types.ts
CHANGED
|
@@ -183,6 +183,14 @@ export interface IKadiContext {
|
|
|
183
183
|
commander: ICommander; // Commander.js instance for CLI registration
|
|
184
184
|
logger: IKadiLogger; // KADI logging utilities
|
|
185
185
|
core: IKadiCore; // Core KADI functionality and utilities
|
|
186
|
+
|
|
187
|
+
/**
|
|
188
|
+
* Global KADI configuration from ~/.kadi/config.json
|
|
189
|
+
*
|
|
190
|
+
* Injected by the main kadi CLI. Provides access to user preferences
|
|
191
|
+
* including preferences.containerEngine for default engine selection.
|
|
192
|
+
*/
|
|
193
|
+
config?: Record<string, any>;
|
|
186
194
|
}
|
|
187
195
|
|
|
188
196
|
/**
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Container Engine Utilities
|
|
3
|
+
*
|
|
4
|
+
* Centralized engine resolution and compose command detection for kadi-deploy.
|
|
5
|
+
* Mirrors the resolution pattern used in kadi-broker for consistency:
|
|
6
|
+
*
|
|
7
|
+
* CLI --engine flag > profile.engine > global preferences.containerEngine > 'docker'
|
|
8
|
+
*
|
|
9
|
+
* @module utils/engine
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { execSync } from 'node:child_process';
|
|
13
|
+
|
|
14
|
+
export type ContainerEngine = 'docker' | 'podman';
|
|
15
|
+
|
|
16
|
+
const DEFAULT_ENGINE: ContainerEngine = 'docker';
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Resolve which container engine to use.
|
|
20
|
+
*
|
|
21
|
+
* Resolution order (highest priority wins):
|
|
22
|
+
* 1. CLI --engine flag (explicit per-command override)
|
|
23
|
+
* 2. Deploy profile engine (agent.json profile setting)
|
|
24
|
+
* 3. Global config preferences.containerEngine (user default via `kadi config set`)
|
|
25
|
+
* 4. DEFAULT_ENGINE ('docker')
|
|
26
|
+
*
|
|
27
|
+
* @param flagEngine - Value of the --engine CLI flag (may be undefined)
|
|
28
|
+
* @param profileEngine - Engine from the deploy profile in agent.json (may be undefined)
|
|
29
|
+
* @param ctx - KADI context (used to read global config preferences)
|
|
30
|
+
* @returns The resolved container engine
|
|
31
|
+
*/
|
|
32
|
+
export function resolveEngine(
|
|
33
|
+
flagEngine: string | undefined,
|
|
34
|
+
profileEngine: string | undefined,
|
|
35
|
+
ctx: any
|
|
36
|
+
): ContainerEngine {
|
|
37
|
+
// 1. CLI flag takes highest priority
|
|
38
|
+
if (flagEngine && isValidEngine(flagEngine)) {
|
|
39
|
+
return flagEngine as ContainerEngine;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// 2. Deploy profile engine
|
|
43
|
+
if (profileEngine && isValidEngine(profileEngine)) {
|
|
44
|
+
return profileEngine as ContainerEngine;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// 3. Global config preferences.containerEngine
|
|
48
|
+
const configEngine = ctx?.config?.preferences?.containerEngine;
|
|
49
|
+
if (configEngine && isValidEngine(configEngine)) {
|
|
50
|
+
return configEngine as ContainerEngine;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// 4. Fall back to default
|
|
54
|
+
return DEFAULT_ENGINE;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Get the compose command for the given engine.
|
|
59
|
+
*
|
|
60
|
+
* - docker -> "docker compose"
|
|
61
|
+
* - podman -> "podman-compose" if available, else "podman compose"
|
|
62
|
+
*
|
|
63
|
+
* Podman doesn't have a built-in compose — it delegates to an external
|
|
64
|
+
* compose provider. On systems with Docker Desktop installed (even if not
|
|
65
|
+
* running), `podman compose` may resolve to Docker Desktop's broken shim.
|
|
66
|
+
* We prefer `podman-compose` (the standalone Python tool) when available.
|
|
67
|
+
*
|
|
68
|
+
* @param engine - The container engine to get the compose command for
|
|
69
|
+
* @returns The compose command string
|
|
70
|
+
*/
|
|
71
|
+
export function composeCmd(engine: ContainerEngine): string {
|
|
72
|
+
if (engine !== 'podman') return `${engine} compose`;
|
|
73
|
+
|
|
74
|
+
try {
|
|
75
|
+
execSync('podman-compose --version', { stdio: 'ignore' });
|
|
76
|
+
return 'podman-compose';
|
|
77
|
+
} catch {
|
|
78
|
+
return 'podman compose';
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
/**
|
|
83
|
+
* Validate that a string is a supported container engine.
|
|
84
|
+
*/
|
|
85
|
+
function isValidEngine(value: string): value is ContainerEngine {
|
|
86
|
+
const normalized = value.toLowerCase();
|
|
87
|
+
return normalized === 'docker' || normalized === 'podman';
|
|
88
|
+
}
|