nvoi 0.1.5 → 0.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.claude/todo/refactor/00-overview.md +171 -0
- data/.claude/todo/refactor/01-objects.md +96 -0
- data/.claude/todo/refactor/02-utils.md +143 -0
- data/.claude/todo/refactor/03-external-cloud.md +164 -0
- data/.claude/todo/refactor/04-external-dns.md +104 -0
- data/.claude/todo/refactor/05-external.md +133 -0
- data/.claude/todo/refactor/06-cli.md +123 -0
- data/.claude/todo/refactor/07-cli-deploy-command.md +177 -0
- data/.claude/todo/refactor/08-cli-deploy-steps.md +201 -0
- data/.claude/todo/refactor/09-cli-delete-command.md +169 -0
- data/.claude/todo/refactor/10-cli-exec-command.md +157 -0
- data/.claude/todo/refactor/11-cli-credentials-command.md +190 -0
- data/.claude/todo/refactor/_target.md +79 -0
- data/.claude/todo/scaleway.impl.md +644 -0
- data/.claude/todo/scaleway.reference.md +520 -0
- data/Gemfile +1 -0
- data/Gemfile.lock +12 -2
- data/doc/config-schema.yaml +44 -11
- data/examples/golang/deploy.enc +0 -0
- data/examples/golang/main.go +18 -0
- data/exe/nvoi +3 -1
- data/lib/nvoi/cli/credentials/edit/command.rb +384 -0
- data/lib/nvoi/cli/credentials/show/command.rb +35 -0
- data/lib/nvoi/cli/db/command.rb +308 -0
- data/lib/nvoi/cli/delete/command.rb +75 -0
- data/lib/nvoi/cli/delete/steps/detach_volumes.rb +98 -0
- data/lib/nvoi/cli/delete/steps/teardown_dns.rb +49 -0
- data/lib/nvoi/cli/delete/steps/teardown_firewall.rb +46 -0
- data/lib/nvoi/cli/delete/steps/teardown_network.rb +30 -0
- data/lib/nvoi/cli/delete/steps/teardown_server.rb +50 -0
- data/lib/nvoi/cli/delete/steps/teardown_tunnel.rb +44 -0
- data/lib/nvoi/cli/delete/steps/teardown_volume.rb +61 -0
- data/lib/nvoi/cli/deploy/command.rb +184 -0
- data/lib/nvoi/cli/deploy/steps/build_image.rb +27 -0
- data/lib/nvoi/cli/deploy/steps/cleanup_images.rb +42 -0
- data/lib/nvoi/cli/deploy/steps/configure_tunnel.rb +100 -0
- data/lib/nvoi/cli/deploy/steps/deploy_service.rb +396 -0
- data/lib/nvoi/cli/deploy/steps/provision_network.rb +44 -0
- data/lib/nvoi/cli/deploy/steps/provision_server.rb +143 -0
- data/lib/nvoi/cli/deploy/steps/provision_volume.rb +171 -0
- data/lib/nvoi/cli/deploy/steps/setup_k3s.rb +481 -0
- data/lib/nvoi/cli/exec/command.rb +173 -0
- data/lib/nvoi/cli.rb +83 -142
- data/lib/nvoi/config_api/actions/app.rb +53 -0
- data/lib/nvoi/config_api/actions/compute_provider.rb +55 -0
- data/lib/nvoi/config_api/actions/database.rb +70 -0
- data/lib/nvoi/config_api/actions/env.rb +32 -0
- data/lib/nvoi/config_api/actions/secret.rb +32 -0
- data/lib/nvoi/config_api/actions/server.rb +66 -0
- data/lib/nvoi/config_api/actions/volume.rb +40 -0
- data/lib/nvoi/config_api/base.rb +44 -0
- data/lib/nvoi/config_api/result.rb +26 -0
- data/lib/nvoi/config_api.rb +70 -0
- data/lib/nvoi/errors.rb +68 -50
- data/lib/nvoi/external/cloud/aws.rb +425 -0
- data/lib/nvoi/external/cloud/base.rb +99 -0
- data/lib/nvoi/external/cloud/factory.rb +48 -0
- data/lib/nvoi/external/cloud/hetzner.rb +376 -0
- data/lib/nvoi/external/cloud/scaleway.rb +533 -0
- data/lib/nvoi/external/cloud.rb +15 -0
- data/lib/nvoi/external/containerd.rb +82 -0
- data/lib/nvoi/external/database/mysql.rb +84 -0
- data/lib/nvoi/external/database/postgres.rb +82 -0
- data/lib/nvoi/external/database/provider.rb +65 -0
- data/lib/nvoi/external/database/sqlite.rb +72 -0
- data/lib/nvoi/external/database.rb +22 -0
- data/lib/nvoi/external/dns/cloudflare.rb +292 -0
- data/lib/nvoi/external/kubectl.rb +65 -0
- data/lib/nvoi/external/ssh.rb +106 -0
- data/lib/nvoi/objects/config_override.rb +60 -0
- data/lib/nvoi/objects/configuration.rb +463 -0
- data/lib/nvoi/objects/database.rb +56 -0
- data/lib/nvoi/objects/dns.rb +14 -0
- data/lib/nvoi/objects/firewall.rb +11 -0
- data/lib/nvoi/objects/network.rb +11 -0
- data/lib/nvoi/objects/server.rb +14 -0
- data/lib/nvoi/objects/service_spec.rb +26 -0
- data/lib/nvoi/objects/tunnel.rb +14 -0
- data/lib/nvoi/objects/volume.rb +17 -0
- data/lib/nvoi/utils/config_loader.rb +172 -0
- data/lib/nvoi/utils/constants.rb +61 -0
- data/lib/nvoi/{credentials/manager.rb → utils/credential_store.rb} +16 -16
- data/lib/nvoi/{credentials → utils}/crypto.rb +8 -5
- data/lib/nvoi/{config → utils}/env_resolver.rb +10 -2
- data/lib/nvoi/utils/logger.rb +84 -0
- data/lib/nvoi/{config/naming.rb → utils/namer.rb} +28 -25
- data/lib/nvoi/{deployer → utils}/retry.rb +23 -3
- data/lib/nvoi/utils/templates.rb +62 -0
- data/lib/nvoi/version.rb +1 -1
- data/lib/nvoi.rb +10 -54
- data/templates/error-backend.yaml.erb +134 -0
- metadata +97 -44
- data/examples/golang/deploy.yml +0 -54
- data/lib/nvoi/cloudflare/client.rb +0 -287
- data/lib/nvoi/config/config.rb +0 -248
- data/lib/nvoi/config/loader.rb +0 -102
- data/lib/nvoi/config/ssh_keys.rb +0 -82
- data/lib/nvoi/config/types.rb +0 -274
- data/lib/nvoi/constants.rb +0 -59
- data/lib/nvoi/credentials/editor.rb +0 -272
- data/lib/nvoi/deployer/cleaner.rb +0 -36
- data/lib/nvoi/deployer/image_builder.rb +0 -23
- data/lib/nvoi/deployer/infrastructure.rb +0 -126
- data/lib/nvoi/deployer/orchestrator.rb +0 -146
- data/lib/nvoi/deployer/service_deployer.rb +0 -311
- data/lib/nvoi/deployer/tunnel_manager.rb +0 -57
- data/lib/nvoi/deployer/types.rb +0 -8
- data/lib/nvoi/k8s/renderer.rb +0 -44
- data/lib/nvoi/k8s/templates.rb +0 -29
- data/lib/nvoi/logger.rb +0 -72
- data/lib/nvoi/providers/aws.rb +0 -403
- data/lib/nvoi/providers/base.rb +0 -111
- data/lib/nvoi/providers/hetzner.rb +0 -288
- data/lib/nvoi/providers/hetzner_client.rb +0 -170
- data/lib/nvoi/remote/docker_manager.rb +0 -203
- data/lib/nvoi/remote/ssh_executor.rb +0 -72
- data/lib/nvoi/remote/volume_manager.rb +0 -103
- data/lib/nvoi/service/delete.rb +0 -234
- data/lib/nvoi/service/deploy.rb +0 -80
- data/lib/nvoi/service/exec.rb +0 -144
- data/lib/nvoi/service/provider.rb +0 -36
- data/lib/nvoi/steps/application_deployer.rb +0 -26
- data/lib/nvoi/steps/database_provisioner.rb +0 -60
- data/lib/nvoi/steps/k3s_cluster_setup.rb +0 -105
- data/lib/nvoi/steps/k3s_provisioner.rb +0 -351
- data/lib/nvoi/steps/server_provisioner.rb +0 -43
- data/lib/nvoi/steps/services_provisioner.rb +0 -29
- data/lib/nvoi/steps/tunnel_configurator.rb +0 -66
- data/lib/nvoi/steps/volume_provisioner.rb +0 -154
|
@@ -1,36 +0,0 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
module Nvoi
|
|
4
|
-
module Deployer
|
|
5
|
-
# Cleaner handles cleanup of old deployments and resources
|
|
6
|
-
class Cleaner
|
|
7
|
-
def initialize(config, docker_manager, log)
|
|
8
|
-
@config = config
|
|
9
|
-
@docker_manager = docker_manager
|
|
10
|
-
@log = log
|
|
11
|
-
end
|
|
12
|
-
|
|
13
|
-
def cleanup_old_images(current_tag)
|
|
14
|
-
keep_count = @config.keep_count_value
|
|
15
|
-
prefix = @config.container_prefix
|
|
16
|
-
|
|
17
|
-
@log.info "Cleaning up old images (keeping %d)", keep_count
|
|
18
|
-
|
|
19
|
-
# List all images
|
|
20
|
-
all_tags = @docker_manager.list_images("reference=#{prefix}:*")
|
|
21
|
-
|
|
22
|
-
# Sort by tag (timestamp), keep newest
|
|
23
|
-
sorted_tags = all_tags.sort.reverse
|
|
24
|
-
keep_tags = sorted_tags.take(keep_count)
|
|
25
|
-
|
|
26
|
-
# Make sure current tag is kept
|
|
27
|
-
keep_tags << current_tag unless keep_tags.include?(current_tag)
|
|
28
|
-
keep_tags << "latest"
|
|
29
|
-
|
|
30
|
-
@docker_manager.cleanup_old_images(prefix, keep_tags.uniq)
|
|
31
|
-
|
|
32
|
-
@log.success "Old images cleaned up"
|
|
33
|
-
end
|
|
34
|
-
end
|
|
35
|
-
end
|
|
36
|
-
end
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
module Nvoi
|
|
4
|
-
module Deployer
|
|
5
|
-
# ImageBuilder handles Docker image building and pushing
|
|
6
|
-
class ImageBuilder
|
|
7
|
-
def initialize(config, docker_manager, log)
|
|
8
|
-
@config = config
|
|
9
|
-
@docker_manager = docker_manager
|
|
10
|
-
@log = log
|
|
11
|
-
end
|
|
12
|
-
|
|
13
|
-
def build_and_push(working_dir, image_tag)
|
|
14
|
-
@log.info "Building Docker image: %s", image_tag
|
|
15
|
-
|
|
16
|
-
# Build image locally, transfer to remote, load with containerd
|
|
17
|
-
@docker_manager.build_image(working_dir, image_tag, @config.namer.latest_image_tag)
|
|
18
|
-
|
|
19
|
-
@log.success "Image built and pushed: %s", image_tag
|
|
20
|
-
end
|
|
21
|
-
end
|
|
22
|
-
end
|
|
23
|
-
end
|
|
@@ -1,126 +0,0 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
module Nvoi
|
|
4
|
-
module Deployer
|
|
5
|
-
# Infrastructure handles cloud resource provisioning
|
|
6
|
-
class Infrastructure
|
|
7
|
-
def initialize(config, provider, log)
|
|
8
|
-
@config = config
|
|
9
|
-
@provider = provider
|
|
10
|
-
@log = log
|
|
11
|
-
end
|
|
12
|
-
|
|
13
|
-
def provision_network
|
|
14
|
-
@log.info "Provisioning network: %s", @config.network_name
|
|
15
|
-
network = @provider.find_or_create_network(@config.network_name)
|
|
16
|
-
@log.success "Network ready: %s", network.id
|
|
17
|
-
network
|
|
18
|
-
end
|
|
19
|
-
|
|
20
|
-
def provision_firewall
|
|
21
|
-
@log.info "Provisioning firewall: %s", @config.firewall_name
|
|
22
|
-
firewall = @provider.find_or_create_firewall(@config.firewall_name)
|
|
23
|
-
@log.success "Firewall ready: %s", firewall.id
|
|
24
|
-
firewall
|
|
25
|
-
end
|
|
26
|
-
|
|
27
|
-
def provision_server(name, network_id, firewall_id, server_config)
|
|
28
|
-
@log.info "Provisioning server: %s", name
|
|
29
|
-
|
|
30
|
-
# Check if server already exists
|
|
31
|
-
existing = @provider.find_server(name)
|
|
32
|
-
if existing
|
|
33
|
-
@log.info "Server already exists: %s (%s)", name, existing.public_ipv4
|
|
34
|
-
return existing
|
|
35
|
-
end
|
|
36
|
-
|
|
37
|
-
# Determine server type and location
|
|
38
|
-
server_type = server_config&.type
|
|
39
|
-
location = server_config&.location
|
|
40
|
-
|
|
41
|
-
case @config.provider_name
|
|
42
|
-
when "hetzner"
|
|
43
|
-
h = @config.hetzner
|
|
44
|
-
server_type ||= h.server_type
|
|
45
|
-
location ||= h.server_location
|
|
46
|
-
image = "ubuntu-22.04"
|
|
47
|
-
when "aws"
|
|
48
|
-
a = @config.aws
|
|
49
|
-
server_type ||= a.instance_type
|
|
50
|
-
location ||= a.region
|
|
51
|
-
image = "ubuntu-22.04"
|
|
52
|
-
end
|
|
53
|
-
|
|
54
|
-
# Create cloud-init user data
|
|
55
|
-
user_data = generate_user_data
|
|
56
|
-
|
|
57
|
-
opts = Providers::ServerCreateOptions.new(
|
|
58
|
-
name:,
|
|
59
|
-
type: server_type,
|
|
60
|
-
image:,
|
|
61
|
-
location:,
|
|
62
|
-
user_data:,
|
|
63
|
-
network_id:,
|
|
64
|
-
firewall_id:
|
|
65
|
-
)
|
|
66
|
-
|
|
67
|
-
server = @provider.create_server(opts)
|
|
68
|
-
@log.info "Server created: %s (waiting for ready...)", server.id
|
|
69
|
-
|
|
70
|
-
# Wait for server to be running
|
|
71
|
-
server = @provider.wait_for_server(server.id, Constants::SERVER_READY_MAX_ATTEMPTS)
|
|
72
|
-
@log.success "Server ready: %s (%s)", name, server.public_ipv4
|
|
73
|
-
|
|
74
|
-
# Wait for SSH to be available
|
|
75
|
-
wait_for_ssh(server.public_ipv4)
|
|
76
|
-
|
|
77
|
-
server
|
|
78
|
-
end
|
|
79
|
-
|
|
80
|
-
private
|
|
81
|
-
|
|
82
|
-
def generate_user_data
|
|
83
|
-
ssh_key = @config.ssh_public_key
|
|
84
|
-
|
|
85
|
-
<<~CLOUD_INIT
|
|
86
|
-
#cloud-config
|
|
87
|
-
users:
|
|
88
|
-
- name: deploy
|
|
89
|
-
groups: sudo, docker
|
|
90
|
-
shell: /bin/bash
|
|
91
|
-
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
92
|
-
ssh_authorized_keys:
|
|
93
|
-
- #{ssh_key}
|
|
94
|
-
package_update: true
|
|
95
|
-
package_upgrade: true
|
|
96
|
-
packages:
|
|
97
|
-
- curl
|
|
98
|
-
- git
|
|
99
|
-
- jq
|
|
100
|
-
- rsync
|
|
101
|
-
CLOUD_INIT
|
|
102
|
-
end
|
|
103
|
-
|
|
104
|
-
def wait_for_ssh(ip)
|
|
105
|
-
@log.info "Waiting for SSH on %s...", ip
|
|
106
|
-
ssh = Remote::SSHExecutor.new(ip, @config.ssh_key_path)
|
|
107
|
-
|
|
108
|
-
Constants::SSH_READY_MAX_ATTEMPTS.times do |i|
|
|
109
|
-
begin
|
|
110
|
-
output = ssh.execute("echo 'ready'")
|
|
111
|
-
if output.strip == "ready"
|
|
112
|
-
@log.success "SSH ready"
|
|
113
|
-
return
|
|
114
|
-
end
|
|
115
|
-
rescue SSHCommandError
|
|
116
|
-
# SSH not ready yet
|
|
117
|
-
end
|
|
118
|
-
|
|
119
|
-
sleep(Constants::SSH_READY_INTERVAL)
|
|
120
|
-
end
|
|
121
|
-
|
|
122
|
-
raise SSHConnectionError, "SSH connection failed after #{Constants::SSH_READY_MAX_ATTEMPTS} attempts"
|
|
123
|
-
end
|
|
124
|
-
end
|
|
125
|
-
end
|
|
126
|
-
end
|
|
@@ -1,146 +0,0 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
module Nvoi
|
|
4
|
-
module Deployer
|
|
5
|
-
# Orchestrator coordinates the deployment pipeline
|
|
6
|
-
class Orchestrator
|
|
7
|
-
def initialize(config, provider, log)
|
|
8
|
-
@config = config
|
|
9
|
-
@provider = provider
|
|
10
|
-
@log = log
|
|
11
|
-
end
|
|
12
|
-
|
|
13
|
-
def run(server_ip, tunnels, working_dir)
|
|
14
|
-
@log.info "Starting deployment orchestration"
|
|
15
|
-
|
|
16
|
-
# Create SSH connection to main server
|
|
17
|
-
@ssh = Remote::SSHExecutor.new(server_ip, @config.ssh_key_path)
|
|
18
|
-
|
|
19
|
-
# Acquire deployment lock
|
|
20
|
-
acquire_lock
|
|
21
|
-
|
|
22
|
-
begin
|
|
23
|
-
run_deployment(tunnels, working_dir)
|
|
24
|
-
ensure
|
|
25
|
-
release_lock
|
|
26
|
-
end
|
|
27
|
-
end
|
|
28
|
-
|
|
29
|
-
private
|
|
30
|
-
|
|
31
|
-
def run_deployment(tunnels, working_dir)
|
|
32
|
-
ssh = @ssh
|
|
33
|
-
docker = Remote::DockerManager.new(ssh)
|
|
34
|
-
|
|
35
|
-
# Generate image tag
|
|
36
|
-
timestamp = Time.now.strftime("%Y%m%d%H%M%S")
|
|
37
|
-
image_tag = @config.namer.image_tag(timestamp)
|
|
38
|
-
|
|
39
|
-
# Build and push image
|
|
40
|
-
image_builder = ImageBuilder.new(@config, docker, @log)
|
|
41
|
-
image_builder.build_and_push(working_dir, image_tag)
|
|
42
|
-
|
|
43
|
-
# Tag as latest locally for K8s to use
|
|
44
|
-
# The image is already in containerd from build_image
|
|
45
|
-
registry_tag = "localhost:#{Constants::REGISTRY_PORT}/#{@config.container_prefix}:#{timestamp}"
|
|
46
|
-
push_to_registry(ssh, image_tag, registry_tag)
|
|
47
|
-
|
|
48
|
-
# Deploy services
|
|
49
|
-
service_deployer = ServiceDeployer.new(@config, ssh, @log)
|
|
50
|
-
|
|
51
|
-
# Gather all env vars using EnvResolver (single source of truth)
|
|
52
|
-
# Use first app service to get full env (includes database vars, deploy_env, etc.)
|
|
53
|
-
first_service = @config.deploy.application.app.keys.first
|
|
54
|
-
all_env = @config.env_for_service(first_service)
|
|
55
|
-
|
|
56
|
-
# Deploy app secret
|
|
57
|
-
service_deployer.deploy_app_secret(all_env)
|
|
58
|
-
|
|
59
|
-
# Deploy database if configured (skip SQLite - handled by app volumes)
|
|
60
|
-
db_config = @config.deploy.application.database
|
|
61
|
-
if db_config && db_config.adapter != "sqlite3"
|
|
62
|
-
db_spec = db_config.to_service_spec(@config.namer)
|
|
63
|
-
service_deployer.deploy_database(db_spec)
|
|
64
|
-
end
|
|
65
|
-
|
|
66
|
-
# Deploy additional services
|
|
67
|
-
@config.deploy.application.services.each do |service_name, service_config|
|
|
68
|
-
service_spec = service_config.to_service_spec(@config.deploy.application.name, service_name)
|
|
69
|
-
service_deployer.deploy_service(service_name, service_spec)
|
|
70
|
-
end
|
|
71
|
-
|
|
72
|
-
# Deploy app services
|
|
73
|
-
@config.deploy.application.app.each do |service_name, service_config|
|
|
74
|
-
service_env = @config.env_for_service(service_name)
|
|
75
|
-
service_deployer.deploy_app_service(service_name, service_config, registry_tag, service_env)
|
|
76
|
-
|
|
77
|
-
# Deploy cloudflared for services with tunnels
|
|
78
|
-
tunnel = tunnels.find { |t| t.service_name == service_name }
|
|
79
|
-
if tunnel
|
|
80
|
-
service_deployer.deploy_cloudflared(service_name, tunnel.tunnel_token)
|
|
81
|
-
|
|
82
|
-
# Verify traffic is routing correctly
|
|
83
|
-
service_deployer.verify_traffic_switchover(service_config)
|
|
84
|
-
end
|
|
85
|
-
end
|
|
86
|
-
|
|
87
|
-
# Cleanup old images
|
|
88
|
-
cleaner = Cleaner.new(@config, docker, @log)
|
|
89
|
-
cleaner.cleanup_old_images(timestamp)
|
|
90
|
-
|
|
91
|
-
@log.success "Deployment orchestration complete"
|
|
92
|
-
end
|
|
93
|
-
|
|
94
|
-
def acquire_lock
|
|
95
|
-
lock_file = @config.namer.deployment_lock_file_path
|
|
96
|
-
|
|
97
|
-
# Check if lock file exists
|
|
98
|
-
output = @ssh.execute("test -f #{lock_file} && cat #{lock_file} || echo ''")
|
|
99
|
-
output = output.strip
|
|
100
|
-
|
|
101
|
-
unless output.empty?
|
|
102
|
-
# Lock exists, check timestamp
|
|
103
|
-
timestamp = output.to_i
|
|
104
|
-
if timestamp > 0
|
|
105
|
-
lock_time = Time.at(timestamp)
|
|
106
|
-
age = Time.now - lock_time
|
|
107
|
-
|
|
108
|
-
if age < Constants::STALE_DEPLOYMENT_LOCK_AGE
|
|
109
|
-
raise DeploymentError.new(
|
|
110
|
-
"lock",
|
|
111
|
-
"deployment already in progress (started #{age.round}s ago). Wait or remove lock file: #{lock_file}"
|
|
112
|
-
)
|
|
113
|
-
end
|
|
114
|
-
|
|
115
|
-
# Lock is stale, will overwrite
|
|
116
|
-
@log.warning "Removing stale deployment lock (age: #{age.round}s)"
|
|
117
|
-
end
|
|
118
|
-
end
|
|
119
|
-
|
|
120
|
-
# Create lock file with current timestamp
|
|
121
|
-
@ssh.execute("echo #{Time.now.to_i} > #{lock_file}")
|
|
122
|
-
@log.info "Deployment lock acquired: %s", lock_file
|
|
123
|
-
end
|
|
124
|
-
|
|
125
|
-
def release_lock
|
|
126
|
-
lock_file = @config.namer.deployment_lock_file_path
|
|
127
|
-
@log.info "Releasing deployment lock"
|
|
128
|
-
@ssh.execute("rm -f #{lock_file}")
|
|
129
|
-
rescue StandardError
|
|
130
|
-
# Ignore errors during lock release
|
|
131
|
-
end
|
|
132
|
-
|
|
133
|
-
def push_to_registry(ssh, local_tag, registry_tag)
|
|
134
|
-
@log.info "Pushing to in-cluster registry: %s", registry_tag
|
|
135
|
-
|
|
136
|
-
# Tag for registry
|
|
137
|
-
ssh.execute("sudo ctr -n k8s.io images tag #{local_tag} #{registry_tag}")
|
|
138
|
-
|
|
139
|
-
# Push to local registry
|
|
140
|
-
ssh.execute("sudo ctr -n k8s.io images push --plain-http #{registry_tag}")
|
|
141
|
-
|
|
142
|
-
@log.success "Image pushed to registry"
|
|
143
|
-
end
|
|
144
|
-
end
|
|
145
|
-
end
|
|
146
|
-
end
|
|
@@ -1,311 +0,0 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
module Nvoi
|
|
4
|
-
module Deployer
|
|
5
|
-
# ServiceDeployer handles K8s deployment of application services
|
|
6
|
-
class ServiceDeployer
|
|
7
|
-
DEFAULT_RESOURCES = {
|
|
8
|
-
request_memory: "128Mi",
|
|
9
|
-
request_cpu: "100m",
|
|
10
|
-
limit_memory: "512Mi",
|
|
11
|
-
limit_cpu: "500m"
|
|
12
|
-
}.freeze
|
|
13
|
-
|
|
14
|
-
def initialize(config, ssh, log)
|
|
15
|
-
@config = config
|
|
16
|
-
@ssh = ssh
|
|
17
|
-
@log = log
|
|
18
|
-
@namer = config.namer
|
|
19
|
-
end
|
|
20
|
-
|
|
21
|
-
# Deploy app secret with environment variables
|
|
22
|
-
def deploy_app_secret(env_vars)
|
|
23
|
-
secret_name = @namer.app_secret_name
|
|
24
|
-
|
|
25
|
-
@log.info "Deploying app secret: %s", secret_name
|
|
26
|
-
|
|
27
|
-
K8s::Renderer.apply_manifest(@ssh, "app-secret.yaml", {
|
|
28
|
-
name: secret_name,
|
|
29
|
-
env_vars:
|
|
30
|
-
})
|
|
31
|
-
|
|
32
|
-
@log.success "App secret deployed"
|
|
33
|
-
end
|
|
34
|
-
|
|
35
|
-
# Deploy an app service (web, worker, etc.)
|
|
36
|
-
def deploy_app_service(service_name, service_config, image_tag, env)
|
|
37
|
-
deployment_name = @namer.app_deployment_name(service_name)
|
|
38
|
-
@log.info "Deploying app service: %s", deployment_name
|
|
39
|
-
|
|
40
|
-
# Determine template based on port
|
|
41
|
-
has_port = service_config.port && service_config.port.positive?
|
|
42
|
-
template = has_port ? "app-deployment.yaml" : "worker-deployment.yaml"
|
|
43
|
-
|
|
44
|
-
# Build readiness probe if healthcheck configured
|
|
45
|
-
readiness_probe = nil
|
|
46
|
-
liveness_probe = nil
|
|
47
|
-
|
|
48
|
-
if service_config.healthcheck && has_port
|
|
49
|
-
hc = service_config.healthcheck
|
|
50
|
-
readiness_probe = {
|
|
51
|
-
path: hc.path || "/health",
|
|
52
|
-
port: hc.port || service_config.port,
|
|
53
|
-
initial_delay: 10,
|
|
54
|
-
period: 10,
|
|
55
|
-
timeout: 5,
|
|
56
|
-
failure_threshold: 3
|
|
57
|
-
}
|
|
58
|
-
liveness_probe = readiness_probe.merge(initial_delay: 30)
|
|
59
|
-
end
|
|
60
|
-
|
|
61
|
-
data = {
|
|
62
|
-
name: deployment_name,
|
|
63
|
-
image: image_tag,
|
|
64
|
-
replicas: has_port ? 2 : 1,
|
|
65
|
-
port: service_config.port,
|
|
66
|
-
command: service_config.command&.split || [],
|
|
67
|
-
secret_name: @namer.app_secret_name,
|
|
68
|
-
env_keys: env.keys.sort,
|
|
69
|
-
affinity_server_names: service_config.servers,
|
|
70
|
-
resources: DEFAULT_RESOURCES,
|
|
71
|
-
readiness_probe:,
|
|
72
|
-
liveness_probe:,
|
|
73
|
-
volume_mounts: [],
|
|
74
|
-
host_path_volumes: [],
|
|
75
|
-
volumes: []
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
# Add volumes if configured
|
|
79
|
-
service_config.volumes&.each do |vol_key, mount_path|
|
|
80
|
-
host_path = "/opt/nvoi/volumes/#{@namer.app_volume_name(service_name, vol_key)}"
|
|
81
|
-
data[:volume_mounts] << { name: vol_key, mount_path: }
|
|
82
|
-
data[:host_path_volumes] << { name: vol_key, host_path: }
|
|
83
|
-
end
|
|
84
|
-
|
|
85
|
-
K8s::Renderer.apply_manifest(@ssh, template, data)
|
|
86
|
-
|
|
87
|
-
# Deploy service if it has a port
|
|
88
|
-
if has_port
|
|
89
|
-
K8s::Renderer.apply_manifest(@ssh, "app-service.yaml", {
|
|
90
|
-
name: deployment_name,
|
|
91
|
-
port: service_config.port
|
|
92
|
-
})
|
|
93
|
-
end
|
|
94
|
-
|
|
95
|
-
# Deploy ingress if domain is specified
|
|
96
|
-
if service_config.domain && !service_config.domain.empty?
|
|
97
|
-
hostname = if service_config.subdomain && !service_config.subdomain.empty? && service_config.subdomain != "@"
|
|
98
|
-
"#{service_config.subdomain}.#{service_config.domain}"
|
|
99
|
-
else
|
|
100
|
-
service_config.domain
|
|
101
|
-
end
|
|
102
|
-
|
|
103
|
-
K8s::Renderer.apply_manifest(@ssh, "app-ingress.yaml", {
|
|
104
|
-
name: deployment_name,
|
|
105
|
-
domain: hostname,
|
|
106
|
-
port: service_config.port
|
|
107
|
-
})
|
|
108
|
-
end
|
|
109
|
-
|
|
110
|
-
# Wait for deployment to be ready
|
|
111
|
-
@log.info "Waiting for deployment to be ready..."
|
|
112
|
-
K8s::Renderer.wait_for_deployment(@ssh, deployment_name)
|
|
113
|
-
|
|
114
|
-
# Run pre-run command if specified (e.g., rails db:migrate)
|
|
115
|
-
if service_config.pre_run_command && !service_config.pre_run_command.empty?
|
|
116
|
-
run_pre_run_command(service_name, service_config.pre_run_command)
|
|
117
|
-
end
|
|
118
|
-
|
|
119
|
-
@log.success "App service deployed: %s", deployment_name
|
|
120
|
-
end
|
|
121
|
-
|
|
122
|
-
# Deploy database as StatefulSet
|
|
123
|
-
def deploy_database(db_spec)
|
|
124
|
-
@log.info "Deploying database: %s", db_spec.name
|
|
125
|
-
|
|
126
|
-
data = {
|
|
127
|
-
service_name: db_spec.name,
|
|
128
|
-
adapter: @config.deploy.application.database.adapter,
|
|
129
|
-
image: db_spec.image,
|
|
130
|
-
port: db_spec.port,
|
|
131
|
-
secret_name: @namer.database_secret_name,
|
|
132
|
-
secret_keys: db_spec.secrets.keys.sort,
|
|
133
|
-
data_path: "/var/lib/postgresql/data",
|
|
134
|
-
storage_size: "10Gi",
|
|
135
|
-
affinity_server_names: db_spec.servers,
|
|
136
|
-
host_path: nil
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
# Use hostPath for database volume if configured
|
|
140
|
-
if @config.deploy.application.database.volume
|
|
141
|
-
data[:host_path] = "/opt/nvoi/volumes/#{@namer.database_volume_name}"
|
|
142
|
-
end
|
|
143
|
-
|
|
144
|
-
# Create database secret first
|
|
145
|
-
K8s::Renderer.apply_manifest(@ssh, "app-secret.yaml", {
|
|
146
|
-
name: @namer.database_secret_name,
|
|
147
|
-
env_vars: db_spec.secrets
|
|
148
|
-
})
|
|
149
|
-
|
|
150
|
-
# Deploy StatefulSet
|
|
151
|
-
K8s::Renderer.apply_manifest(@ssh, "db-statefulset.yaml", data)
|
|
152
|
-
|
|
153
|
-
# Wait for database to be ready
|
|
154
|
-
@log.info "Waiting for database to be ready..."
|
|
155
|
-
wait_for_statefulset(db_spec.name)
|
|
156
|
-
|
|
157
|
-
@log.success "Database deployed: %s", db_spec.name
|
|
158
|
-
end
|
|
159
|
-
|
|
160
|
-
# Deploy additional service (redis, etc.)
|
|
161
|
-
def deploy_service(service_name, service_spec)
|
|
162
|
-
@log.info "Deploying service: %s", service_spec.name
|
|
163
|
-
|
|
164
|
-
host_path = nil
|
|
165
|
-
if service_spec.volumes["data"]
|
|
166
|
-
host_path = "/opt/nvoi/volumes/#{@namer.service_volume_name(service_name, 'data')}"
|
|
167
|
-
end
|
|
168
|
-
|
|
169
|
-
data = {
|
|
170
|
-
name: service_spec.name,
|
|
171
|
-
image: service_spec.image,
|
|
172
|
-
port: service_spec.port,
|
|
173
|
-
command: service_spec.command,
|
|
174
|
-
env_vars: service_spec.env,
|
|
175
|
-
env_keys: service_spec.env.keys.sort,
|
|
176
|
-
volume_path: service_spec.volumes["data"],
|
|
177
|
-
host_path:,
|
|
178
|
-
affinity_server_names: service_spec.servers
|
|
179
|
-
}
|
|
180
|
-
|
|
181
|
-
K8s::Renderer.apply_manifest(@ssh, "service-deployment.yaml", data)
|
|
182
|
-
|
|
183
|
-
@log.success "Service deployed: %s", service_spec.name
|
|
184
|
-
end
|
|
185
|
-
|
|
186
|
-
# Deploy cloudflared sidecar
|
|
187
|
-
def deploy_cloudflared(service_name, tunnel_token)
|
|
188
|
-
deployment_name = @namer.cloudflared_deployment_name(service_name)
|
|
189
|
-
@log.info "Deploying cloudflared: %s", deployment_name
|
|
190
|
-
|
|
191
|
-
# Simple cloudflared deployment
|
|
192
|
-
manifest = <<~YAML
|
|
193
|
-
apiVersion: apps/v1
|
|
194
|
-
kind: Deployment
|
|
195
|
-
metadata:
|
|
196
|
-
name: #{deployment_name}
|
|
197
|
-
namespace: default
|
|
198
|
-
spec:
|
|
199
|
-
replicas: 1
|
|
200
|
-
selector:
|
|
201
|
-
matchLabels:
|
|
202
|
-
app: #{deployment_name}
|
|
203
|
-
template:
|
|
204
|
-
metadata:
|
|
205
|
-
labels:
|
|
206
|
-
app: #{deployment_name}
|
|
207
|
-
spec:
|
|
208
|
-
containers:
|
|
209
|
-
- name: cloudflared
|
|
210
|
-
image: cloudflare/cloudflared:latest
|
|
211
|
-
args:
|
|
212
|
-
- tunnel
|
|
213
|
-
- run
|
|
214
|
-
- --token
|
|
215
|
-
- #{tunnel_token}
|
|
216
|
-
YAML
|
|
217
|
-
|
|
218
|
-
@ssh.execute("cat <<'EOF' | kubectl apply -f -\n#{manifest}\nEOF")
|
|
219
|
-
|
|
220
|
-
@log.success "Cloudflared deployed: %s", deployment_name
|
|
221
|
-
end
|
|
222
|
-
|
|
223
|
-
# Verify traffic is routing to the new deployment via public URL
|
|
224
|
-
def verify_traffic_switchover(service_config)
|
|
225
|
-
return unless service_config.domain && !service_config.domain.empty?
|
|
226
|
-
|
|
227
|
-
hostname = if service_config.subdomain && !service_config.subdomain.empty? && service_config.subdomain != "@"
|
|
228
|
-
"#{service_config.subdomain}.#{service_config.domain}"
|
|
229
|
-
else
|
|
230
|
-
service_config.domain
|
|
231
|
-
end
|
|
232
|
-
|
|
233
|
-
health_path = service_config.healthcheck&.path || "/"
|
|
234
|
-
public_url = "https://#{hostname}#{health_path}"
|
|
235
|
-
|
|
236
|
-
@log.info "Verifying public traffic routing"
|
|
237
|
-
@log.info "Testing: %s", public_url
|
|
238
|
-
|
|
239
|
-
consecutive_success = 0
|
|
240
|
-
required_consecutive = Constants::TRAFFIC_VERIFY_CONSECUTIVE
|
|
241
|
-
max_attempts = Constants::TRAFFIC_VERIFY_ATTEMPTS
|
|
242
|
-
|
|
243
|
-
max_attempts.times do |attempt|
|
|
244
|
-
curl_cmd = "curl -s -o /dev/null -w '%{http_code}' -m 10 '#{public_url}' 2>/dev/null"
|
|
245
|
-
|
|
246
|
-
begin
|
|
247
|
-
http_code = @ssh.execute(curl_cmd).strip
|
|
248
|
-
|
|
249
|
-
if http_code == "200"
|
|
250
|
-
consecutive_success += 1
|
|
251
|
-
@log.success "[%d/%d] Public URL responding: %s", consecutive_success, required_consecutive, http_code
|
|
252
|
-
|
|
253
|
-
if consecutive_success >= required_consecutive
|
|
254
|
-
@log.success "Traffic switchover verified: public URL accessible"
|
|
255
|
-
return
|
|
256
|
-
end
|
|
257
|
-
else
|
|
258
|
-
if consecutive_success > 0
|
|
259
|
-
@log.warning "Success streak broken at %d, restarting count", consecutive_success
|
|
260
|
-
end
|
|
261
|
-
consecutive_success = 0
|
|
262
|
-
@log.info "[%d/%d] Public URL check: %s (expected: 200)", attempt + 1, max_attempts, http_code
|
|
263
|
-
end
|
|
264
|
-
rescue SSHCommandError
|
|
265
|
-
consecutive_success = 0
|
|
266
|
-
@log.info "[%d/%d] Public URL check failed", attempt + 1, max_attempts
|
|
267
|
-
end
|
|
268
|
-
|
|
269
|
-
sleep(Constants::TRAFFIC_VERIFY_INTERVAL)
|
|
270
|
-
end
|
|
271
|
-
|
|
272
|
-
raise DeploymentError.new(
|
|
273
|
-
"traffic_verification",
|
|
274
|
-
"public URL verification failed after #{max_attempts} attempts. Cloudflare tunnel may not be routing correctly."
|
|
275
|
-
)
|
|
276
|
-
end
|
|
277
|
-
|
|
278
|
-
private
|
|
279
|
-
|
|
280
|
-
def run_pre_run_command(service_name, command)
|
|
281
|
-
@log.info "Running pre-run command: %s", command
|
|
282
|
-
|
|
283
|
-
# Get pod name
|
|
284
|
-
pod_label = @namer.app_pod_label(service_name)
|
|
285
|
-
pod_name = @ssh.execute("kubectl get pod -l #{pod_label} -o jsonpath='{.items[0].metadata.name}'")
|
|
286
|
-
pod_name = pod_name.strip.delete("'")
|
|
287
|
-
|
|
288
|
-
# Execute command in pod
|
|
289
|
-
escaped_command = command.gsub("'", "'\"'\"'")
|
|
290
|
-
exec_cmd = "kubectl exec #{pod_name} -- sh -c '#{escaped_command}'"
|
|
291
|
-
|
|
292
|
-
begin
|
|
293
|
-
output = @ssh.execute(exec_cmd)
|
|
294
|
-
@log.info "Pre-run command output:\n%s", output unless output.empty?
|
|
295
|
-
rescue SSHCommandError => e
|
|
296
|
-
@log.error "Pre-run command failed: %s", e.message
|
|
297
|
-
|
|
298
|
-
# Get pod logs for debugging
|
|
299
|
-
logs = @ssh.execute("kubectl logs #{pod_name} --tail=50")
|
|
300
|
-
@log.error "Pod logs:\n%s", logs
|
|
301
|
-
|
|
302
|
-
raise DeploymentError.new("pre_run_command", "deployment aborted: pre-run command failed: #{e.message}")
|
|
303
|
-
end
|
|
304
|
-
end
|
|
305
|
-
|
|
306
|
-
def wait_for_statefulset(name, namespace: "default", timeout: 300)
|
|
307
|
-
@ssh.execute("kubectl rollout status statefulset/#{name} -n #{namespace} --timeout=#{timeout}s")
|
|
308
|
-
end
|
|
309
|
-
end
|
|
310
|
-
end
|
|
311
|
-
end
|
|
@@ -1,57 +0,0 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
module Nvoi
|
|
4
|
-
module Deployer
|
|
5
|
-
# TunnelManager handles Cloudflare tunnel operations
|
|
6
|
-
class TunnelManager
|
|
7
|
-
def initialize(cf_client, log)
|
|
8
|
-
@cf_client = cf_client
|
|
9
|
-
@log = log
|
|
10
|
-
end
|
|
11
|
-
|
|
12
|
-
# Create or get existing tunnel, configure it, and create DNS record
|
|
13
|
-
def setup_tunnel(tunnel_name, hostname, service_url, domain)
|
|
14
|
-
@log.info "Setting up tunnel: %s -> %s", tunnel_name, hostname
|
|
15
|
-
|
|
16
|
-
# Find or create tunnel
|
|
17
|
-
tunnel = @cf_client.find_tunnel(tunnel_name)
|
|
18
|
-
|
|
19
|
-
if tunnel
|
|
20
|
-
@log.info "Using existing tunnel: %s", tunnel_name
|
|
21
|
-
else
|
|
22
|
-
@log.info "Creating new tunnel: %s", tunnel_name
|
|
23
|
-
tunnel = @cf_client.create_tunnel(tunnel_name)
|
|
24
|
-
end
|
|
25
|
-
|
|
26
|
-
# Get tunnel token
|
|
27
|
-
token = tunnel.token
|
|
28
|
-
if token.nil? || token.empty?
|
|
29
|
-
token = @cf_client.get_tunnel_token(tunnel.id)
|
|
30
|
-
end
|
|
31
|
-
|
|
32
|
-
# Configure tunnel ingress
|
|
33
|
-
@log.info "Configuring tunnel ingress: %s -> %s", hostname, service_url
|
|
34
|
-
@cf_client.update_tunnel_configuration(tunnel.id, hostname, service_url)
|
|
35
|
-
|
|
36
|
-
# Verify configuration propagated
|
|
37
|
-
@log.info "Verifying tunnel configuration..."
|
|
38
|
-
@cf_client.verify_tunnel_configuration(tunnel.id, hostname, service_url, Constants::TUNNEL_CONFIG_VERIFY_ATTEMPTS)
|
|
39
|
-
|
|
40
|
-
# Create DNS record
|
|
41
|
-
@log.info "Creating DNS CNAME record: %s", hostname
|
|
42
|
-
zone = @cf_client.find_zone(domain)
|
|
43
|
-
raise CloudflareError, "zone not found: #{domain}" unless zone
|
|
44
|
-
|
|
45
|
-
tunnel_cname = "#{tunnel.id}.cfargotunnel.com"
|
|
46
|
-
@cf_client.create_or_update_dns_record(zone.id, hostname, "CNAME", tunnel_cname, proxied: true)
|
|
47
|
-
|
|
48
|
-
@log.success "Tunnel configured: %s", tunnel_name
|
|
49
|
-
|
|
50
|
-
TunnelInfo.new(
|
|
51
|
-
tunnel_id: tunnel.id,
|
|
52
|
-
tunnel_token: token
|
|
53
|
-
)
|
|
54
|
-
end
|
|
55
|
-
end
|
|
56
|
-
end
|
|
57
|
-
end
|