nvoi 0.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rubocop.yml +19 -0
- data/Gemfile +9 -0
- data/Gemfile.lock +151 -0
- data/Makefile +26 -0
- data/Rakefile +16 -0
- data/doc/config-schema.yaml +357 -0
- data/examples/apex-wildcard/deploy.yml +68 -0
- data/examples/golang/.gitignore +19 -0
- data/examples/golang/Dockerfile +43 -0
- data/examples/golang/README.md +59 -0
- data/examples/golang/deploy.enc +0 -0
- data/examples/golang/deploy.yml +54 -0
- data/examples/golang/go.mod +39 -0
- data/examples/golang/go.sum +96 -0
- data/examples/golang/main.go +177 -0
- data/examples/golang/models/user.go +17 -0
- data/examples/golang-postgres-multi/.gitignore +18 -0
- data/examples/golang-postgres-multi/Dockerfile +39 -0
- data/examples/golang-postgres-multi/README.md +211 -0
- data/examples/golang-postgres-multi/deploy.yml +67 -0
- data/examples/golang-postgres-multi/go.mod +45 -0
- data/examples/golang-postgres-multi/go.sum +108 -0
- data/examples/golang-postgres-multi/main.go +197 -0
- data/examples/golang-postgres-multi/models/user.go +17 -0
- data/examples/postgres-multi/.env.production.example +11 -0
- data/examples/postgres-multi/README.md +112 -0
- data/examples/postgres-multi/deploy.yml +74 -0
- data/examples/postgres-single/.env.production.example +11 -0
- data/examples/postgres-single/.gitignore +15 -0
- data/examples/postgres-single/Dockerfile +35 -0
- data/examples/postgres-single/README.md +76 -0
- data/examples/postgres-single/deploy.yml +56 -0
- data/examples/postgres-single/go.mod +45 -0
- data/examples/postgres-single/go.sum +108 -0
- data/examples/postgres-single/main.go +184 -0
- data/examples/rails-single/.dockerignore +51 -0
- data/examples/rails-single/.env.production.example +11 -0
- data/examples/rails-single/.github/dependabot.yml +12 -0
- data/examples/rails-single/.github/workflows/ci.yml +39 -0
- data/examples/rails-single/.gitignore +20 -0
- data/examples/rails-single/.node-version +1 -0
- data/examples/rails-single/.rubocop.yml +8 -0
- data/examples/rails-single/.ruby-version +1 -0
- data/examples/rails-single/Dockerfile +86 -0
- data/examples/rails-single/Gemfile +56 -0
- data/examples/rails-single/Gemfile.lock +350 -0
- data/examples/rails-single/Procfile.dev +3 -0
- data/examples/rails-single/README.md +17 -0
- data/examples/rails-single/Rakefile +6 -0
- data/examples/rails-single/app/assets/builds/.keep +0 -0
- data/examples/rails-single/app/assets/images/.keep +0 -0
- data/examples/rails-single/app/assets/stylesheets/application.tailwind.css +1 -0
- data/examples/rails-single/app/controllers/application_controller.rb +4 -0
- data/examples/rails-single/app/controllers/concerns/.keep +0 -0
- data/examples/rails-single/app/controllers/users_controller.rb +19 -0
- data/examples/rails-single/app/helpers/application_helper.rb +2 -0
- data/examples/rails-single/app/javascript/application.js +3 -0
- data/examples/rails-single/app/javascript/controllers/application.js +9 -0
- data/examples/rails-single/app/javascript/controllers/hello_controller.js +7 -0
- data/examples/rails-single/app/javascript/controllers/index.js +8 -0
- data/examples/rails-single/app/jobs/application_job.rb +7 -0
- data/examples/rails-single/app/mailers/application_mailer.rb +4 -0
- data/examples/rails-single/app/models/application_record.rb +3 -0
- data/examples/rails-single/app/models/concerns/.keep +0 -0
- data/examples/rails-single/app/models/user.rb +2 -0
- data/examples/rails-single/app/views/layouts/application.html.erb +28 -0
- data/examples/rails-single/app/views/layouts/mailer.html.erb +13 -0
- data/examples/rails-single/app/views/layouts/mailer.text.erb +1 -0
- data/examples/rails-single/app/views/pwa/manifest.json.erb +22 -0
- data/examples/rails-single/app/views/pwa/service-worker.js +26 -0
- data/examples/rails-single/app/views/users/index.html.erb +38 -0
- data/examples/rails-single/bin/brakeman +7 -0
- data/examples/rails-single/bin/bundle +109 -0
- data/examples/rails-single/bin/dev +11 -0
- data/examples/rails-single/bin/docker-entrypoint +14 -0
- data/examples/rails-single/bin/jobs +6 -0
- data/examples/rails-single/bin/kamal +27 -0
- data/examples/rails-single/bin/rails +4 -0
- data/examples/rails-single/bin/rake +4 -0
- data/examples/rails-single/bin/rubocop +8 -0
- data/examples/rails-single/bin/setup +37 -0
- data/examples/rails-single/bin/thrust +5 -0
- data/examples/rails-single/bun.lock +224 -0
- data/examples/rails-single/config/application.rb +42 -0
- data/examples/rails-single/config/boot.rb +4 -0
- data/examples/rails-single/config/cable.yml +17 -0
- data/examples/rails-single/config/cache.yml +16 -0
- data/examples/rails-single/config/credentials.yml.enc +1 -0
- data/examples/rails-single/config/database.yml +100 -0
- data/examples/rails-single/config/environment.rb +5 -0
- data/examples/rails-single/config/environments/development.rb +69 -0
- data/examples/rails-single/config/environments/production.rb +87 -0
- data/examples/rails-single/config/environments/test.rb +50 -0
- data/examples/rails-single/config/initializers/assets.rb +7 -0
- data/examples/rails-single/config/initializers/content_security_policy.rb +25 -0
- data/examples/rails-single/config/initializers/filter_parameter_logging.rb +8 -0
- data/examples/rails-single/config/initializers/inflections.rb +16 -0
- data/examples/rails-single/config/locales/en.yml +31 -0
- data/examples/rails-single/config/puma.rb +41 -0
- data/examples/rails-single/config/queue.yml +18 -0
- data/examples/rails-single/config/recurring.yml +15 -0
- data/examples/rails-single/config/routes.rb +4 -0
- data/examples/rails-single/config.ru +6 -0
- data/examples/rails-single/db/cable_schema.rb +11 -0
- data/examples/rails-single/db/cache_schema.rb +12 -0
- data/examples/rails-single/db/migrate/20251123095526_create_users.rb +10 -0
- data/examples/rails-single/db/queue_schema.rb +129 -0
- data/examples/rails-single/db/seeds.rb +9 -0
- data/examples/rails-single/deploy.yml +57 -0
- data/examples/rails-single/lib/tasks/.keep +0 -0
- data/examples/rails-single/log/.keep +0 -0
- data/examples/rails-single/package.json +17 -0
- data/examples/rails-single/public/400.html +114 -0
- data/examples/rails-single/public/404.html +114 -0
- data/examples/rails-single/public/406-unsupported-browser.html +114 -0
- data/examples/rails-single/public/422.html +114 -0
- data/examples/rails-single/public/500.html +114 -0
- data/examples/rails-single/public/icon.png +0 -0
- data/examples/rails-single/public/icon.svg +3 -0
- data/examples/rails-single/public/robots.txt +1 -0
- data/examples/rails-single/script/.keep +0 -0
- data/examples/rails-single/vendor/.keep +0 -0
- data/examples/rails-single/yarn.lock +188 -0
- data/exe/nvoi +6 -0
- data/lib/nvoi/cli.rb +190 -0
- data/lib/nvoi/cloudflare/client.rb +287 -0
- data/lib/nvoi/config/config.rb +248 -0
- data/lib/nvoi/config/env_resolver.rb +63 -0
- data/lib/nvoi/config/loader.rb +102 -0
- data/lib/nvoi/config/naming.rb +196 -0
- data/lib/nvoi/config/ssh_keys.rb +82 -0
- data/lib/nvoi/config/types.rb +274 -0
- data/lib/nvoi/constants.rb +59 -0
- data/lib/nvoi/credentials/crypto.rb +88 -0
- data/lib/nvoi/credentials/editor.rb +272 -0
- data/lib/nvoi/credentials/manager.rb +173 -0
- data/lib/nvoi/deployer/cleaner.rb +36 -0
- data/lib/nvoi/deployer/image_builder.rb +23 -0
- data/lib/nvoi/deployer/infrastructure.rb +126 -0
- data/lib/nvoi/deployer/orchestrator.rb +146 -0
- data/lib/nvoi/deployer/retry.rb +67 -0
- data/lib/nvoi/deployer/service_deployer.rb +311 -0
- data/lib/nvoi/deployer/tunnel_manager.rb +57 -0
- data/lib/nvoi/deployer/types.rb +8 -0
- data/lib/nvoi/errors.rb +67 -0
- data/lib/nvoi/k8s/renderer.rb +44 -0
- data/lib/nvoi/k8s/templates.rb +29 -0
- data/lib/nvoi/logger.rb +72 -0
- data/lib/nvoi/providers/aws.rb +403 -0
- data/lib/nvoi/providers/base.rb +111 -0
- data/lib/nvoi/providers/hetzner.rb +288 -0
- data/lib/nvoi/providers/hetzner_client.rb +170 -0
- data/lib/nvoi/remote/docker_manager.rb +203 -0
- data/lib/nvoi/remote/ssh_executor.rb +72 -0
- data/lib/nvoi/remote/volume_manager.rb +103 -0
- data/lib/nvoi/service/delete.rb +234 -0
- data/lib/nvoi/service/deploy.rb +80 -0
- data/lib/nvoi/service/exec.rb +144 -0
- data/lib/nvoi/service/provider.rb +36 -0
- data/lib/nvoi/steps/application_deployer.rb +26 -0
- data/lib/nvoi/steps/database_provisioner.rb +60 -0
- data/lib/nvoi/steps/k3s_cluster_setup.rb +105 -0
- data/lib/nvoi/steps/k3s_provisioner.rb +351 -0
- data/lib/nvoi/steps/server_provisioner.rb +43 -0
- data/lib/nvoi/steps/services_provisioner.rb +29 -0
- data/lib/nvoi/steps/tunnel_configurator.rb +66 -0
- data/lib/nvoi/steps/volume_provisioner.rb +154 -0
- data/lib/nvoi/version.rb +5 -0
- data/lib/nvoi.rb +79 -0
- data/templates/app-deployment.yaml.erb +102 -0
- data/templates/app-ingress.yaml.erb +20 -0
- data/templates/app-secret.yaml.erb +10 -0
- data/templates/app-service.yaml.erb +12 -0
- data/templates/db-statefulset.yaml.erb +76 -0
- data/templates/service-deployment.yaml.erb +91 -0
- data/templates/worker-deployment.yaml.erb +50 -0
- metadata +361 -0
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Nvoi
|
|
4
|
+
module Steps
|
|
5
|
+
# DatabaseProvisioner handles database deployment
|
|
6
|
+
class DatabaseProvisioner
|
|
7
|
+
def initialize(config, ssh, log)
|
|
8
|
+
@config = config
|
|
9
|
+
@ssh = ssh
|
|
10
|
+
@log = log
|
|
11
|
+
@service_deployer = Deployer::ServiceDeployer.new(config, ssh, log)
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def run
|
|
15
|
+
db_config = @config.deploy.application.database
|
|
16
|
+
return unless db_config
|
|
17
|
+
|
|
18
|
+
# SQLite is handled by app deployment with PVC volumes
|
|
19
|
+
if db_config.adapter == "sqlite3"
|
|
20
|
+
@log.info "SQLite database will be provisioned with app deployment"
|
|
21
|
+
return
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
@log.info "Provisioning %s database via K8s", db_config.adapter
|
|
25
|
+
|
|
26
|
+
db_spec = db_config.to_service_spec(@config.namer)
|
|
27
|
+
@service_deployer.deploy_database(db_spec)
|
|
28
|
+
|
|
29
|
+
# Wait for database to be ready
|
|
30
|
+
wait_for_database(db_spec.name)
|
|
31
|
+
|
|
32
|
+
@log.success "Database provisioned"
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
private
|
|
36
|
+
|
|
37
|
+
def wait_for_database(name, timeout: 120)
|
|
38
|
+
@log.info "Waiting for database to be ready..."
|
|
39
|
+
|
|
40
|
+
start_time = Time.now
|
|
41
|
+
loop do
|
|
42
|
+
begin
|
|
43
|
+
output = @ssh.execute("kubectl get pods -l app=#{name} -o jsonpath='{.items[0].status.phase}'")
|
|
44
|
+
if output.strip == "Running"
|
|
45
|
+
@log.success "Database is running"
|
|
46
|
+
return
|
|
47
|
+
end
|
|
48
|
+
rescue SSHCommandError
|
|
49
|
+
# Not ready yet
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
elapsed = Time.now - start_time
|
|
53
|
+
raise K8sError, "database failed to start within #{timeout}s" if elapsed > timeout
|
|
54
|
+
|
|
55
|
+
sleep(5)
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
end
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Nvoi
|
|
4
|
+
module Steps
|
|
5
|
+
# K3sClusterSetup coordinates K3s installation across master and worker nodes
|
|
6
|
+
class K3sClusterSetup
|
|
7
|
+
def initialize(config, provider, log, main_server_ip)
|
|
8
|
+
@config = config
|
|
9
|
+
@provider = provider
|
|
10
|
+
@log = log
|
|
11
|
+
@main_server_ip = main_server_ip
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def run
|
|
15
|
+
@log.info "Setting up K3s cluster"
|
|
16
|
+
|
|
17
|
+
# Find master server group
|
|
18
|
+
master_group, master_config = find_master_group
|
|
19
|
+
raise K8sError, "no master server group found" unless master_group
|
|
20
|
+
|
|
21
|
+
# Setup K3s on master
|
|
22
|
+
master_name = @config.namer.server_name(master_group, 1)
|
|
23
|
+
master = @provider.find_server(master_name)
|
|
24
|
+
raise K8sError, "master server not found: #{master_name}" unless master
|
|
25
|
+
|
|
26
|
+
master_ssh = Remote::SSHExecutor.new(master.public_ipv4, @config.ssh_key_path)
|
|
27
|
+
master_provisioner = K3sProvisioner.new(master_ssh, @log, server_role: master_group, server_name: master_name)
|
|
28
|
+
master_provisioner.provision
|
|
29
|
+
|
|
30
|
+
# Get cluster token and private IP from master
|
|
31
|
+
cluster_token = master_provisioner.get_cluster_token
|
|
32
|
+
master_private_ip = master_provisioner.get_private_ip
|
|
33
|
+
|
|
34
|
+
# Setup K3s on worker nodes
|
|
35
|
+
@config.deploy.application.servers.each do |group_name, group_config|
|
|
36
|
+
next if group_name == master_group
|
|
37
|
+
next unless group_config
|
|
38
|
+
|
|
39
|
+
count = group_config.count.positive? ? group_config.count : 1
|
|
40
|
+
|
|
41
|
+
(1..count).each do |i|
|
|
42
|
+
worker_name = @config.namer.server_name(group_name, i)
|
|
43
|
+
setup_worker(worker_name, group_name, cluster_token, master_private_ip, master_ssh)
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
@log.success "K3s cluster setup complete"
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
private
|
|
51
|
+
|
|
52
|
+
def find_master_group
|
|
53
|
+
@config.deploy.application.servers.each do |name, cfg|
|
|
54
|
+
return [name, cfg] if cfg&.master
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
# If only one group, use it as master
|
|
58
|
+
if @config.deploy.application.servers.size == 1
|
|
59
|
+
return @config.deploy.application.servers.first
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
nil
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
def setup_worker(worker_name, group_name, cluster_token, master_private_ip, master_ssh)
|
|
66
|
+
@log.info "Setting up K3s worker: %s", worker_name
|
|
67
|
+
|
|
68
|
+
worker = @provider.find_server(worker_name)
|
|
69
|
+
unless worker
|
|
70
|
+
@log.warning "Worker server not found: %s", worker_name
|
|
71
|
+
return
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
worker_ssh = Remote::SSHExecutor.new(worker.public_ipv4, @config.ssh_key_path)
|
|
75
|
+
worker_provisioner = K3sProvisioner.new(worker_ssh, @log, server_role: group_name, server_name: worker_name)
|
|
76
|
+
worker_provisioner.cluster_token = cluster_token
|
|
77
|
+
worker_provisioner.main_server_private_ip = master_private_ip
|
|
78
|
+
worker_provisioner.provision
|
|
79
|
+
|
|
80
|
+
# Label worker node from master
|
|
81
|
+
@log.info "Labeling worker node: %s", worker_name
|
|
82
|
+
label_worker_from_master(master_ssh, worker_name, group_name)
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
def label_worker_from_master(master_ssh, worker_name, group_name)
|
|
86
|
+
# Wait for node to join cluster
|
|
87
|
+
30.times do
|
|
88
|
+
begin
|
|
89
|
+
output = master_ssh.execute("kubectl get nodes -o name")
|
|
90
|
+
if output.include?(worker_name)
|
|
91
|
+
master_ssh.execute("kubectl label node #{worker_name} nvoi.io/server-name=#{group_name} --overwrite")
|
|
92
|
+
@log.success "Worker labeled: %s", worker_name
|
|
93
|
+
return
|
|
94
|
+
end
|
|
95
|
+
rescue SSHCommandError
|
|
96
|
+
# Not ready
|
|
97
|
+
end
|
|
98
|
+
sleep(5)
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
@log.warning "Worker node did not join cluster in time: %s", worker_name
|
|
102
|
+
end
|
|
103
|
+
end
|
|
104
|
+
end
|
|
105
|
+
end
|
|
@@ -0,0 +1,351 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Nvoi
|
|
4
|
+
module Steps
|
|
5
|
+
# K3sProvisioner handles K3s installation and server setup
|
|
6
|
+
class K3sProvisioner
|
|
7
|
+
attr_accessor :main_server_ip, :main_server_private_ip, :cluster_token
|
|
8
|
+
|
|
9
|
+
def initialize(ssh, log, k3s_version: nil, enable_k3s: true, server_role: nil, server_name: nil)
|
|
10
|
+
@ssh = ssh
|
|
11
|
+
@log = log
|
|
12
|
+
@k3s_version = k3s_version || Constants::DEFAULT_K3S_VERSION
|
|
13
|
+
@enable_k3s = enable_k3s
|
|
14
|
+
@server_role = server_role
|
|
15
|
+
@server_name = server_name
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
def provision
|
|
19
|
+
@log.info "Starting K3s provisioning"
|
|
20
|
+
|
|
21
|
+
wait_for_cloud_init
|
|
22
|
+
|
|
23
|
+
if @enable_k3s
|
|
24
|
+
is_master = @cluster_token.nil? || @cluster_token.empty?
|
|
25
|
+
|
|
26
|
+
if is_master
|
|
27
|
+
install_k3s_server
|
|
28
|
+
label_node(@server_name, { "nvoi.io/server-name" => @server_role })
|
|
29
|
+
setup_registry
|
|
30
|
+
setup_ingress_controller
|
|
31
|
+
else
|
|
32
|
+
install_k3s_agent
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
@log.success "K3s provisioning complete"
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
def get_cluster_token
|
|
40
|
+
@log.info "Retrieving K3s cluster token"
|
|
41
|
+
output = @ssh.execute("sudo cat /var/lib/rancher/k3s/server/node-token")
|
|
42
|
+
token = output.strip
|
|
43
|
+
raise K8sError, "cluster token is empty" if token.empty?
|
|
44
|
+
|
|
45
|
+
@log.success "Cluster token retrieved"
|
|
46
|
+
token
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
def get_private_ip
|
|
50
|
+
output = @ssh.execute("ip addr show | grep 'inet 10\\.' | awk '{print $2}' | cut -d/ -f1 | head -1")
|
|
51
|
+
private_ip = output.strip
|
|
52
|
+
raise SSHError, "private IP not found" if private_ip.empty?
|
|
53
|
+
|
|
54
|
+
private_ip
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
private
|
|
58
|
+
|
|
59
|
+
def wait_for_cloud_init
|
|
60
|
+
@log.info "Waiting for cloud-init to complete"
|
|
61
|
+
|
|
62
|
+
60.times do
|
|
63
|
+
begin
|
|
64
|
+
output = @ssh.execute("test -f /var/lib/cloud/instance/boot-finished && echo 'ready'")
|
|
65
|
+
if output.include?("ready")
|
|
66
|
+
@log.success "Cloud-init complete"
|
|
67
|
+
return
|
|
68
|
+
end
|
|
69
|
+
rescue SSHCommandError
|
|
70
|
+
# Not ready yet
|
|
71
|
+
end
|
|
72
|
+
sleep(5)
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
raise K8sError, "cloud-init timeout"
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
def install_k3s_server
|
|
79
|
+
# Check if K3s is already running
|
|
80
|
+
begin
|
|
81
|
+
@ssh.execute("systemctl is-active k3s")
|
|
82
|
+
@log.info "K3s already running, skipping installation"
|
|
83
|
+
setup_kubeconfig
|
|
84
|
+
return
|
|
85
|
+
rescue SSHCommandError
|
|
86
|
+
# Not running, continue
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
@log.info "Installing K3s server"
|
|
90
|
+
|
|
91
|
+
# Detect private IP and interface
|
|
92
|
+
private_ip = get_private_ip
|
|
93
|
+
private_iface = @ssh.execute("ip addr show | grep 'inet 10\\.' | awk '{print $NF}' | head -1").strip
|
|
94
|
+
|
|
95
|
+
@log.info "Installing k3s on private IP: %s, interface: %s", private_ip, private_iface
|
|
96
|
+
|
|
97
|
+
# Install Docker for image building
|
|
98
|
+
install_docker(private_ip)
|
|
99
|
+
|
|
100
|
+
# Configure k3s registries
|
|
101
|
+
configure_registries
|
|
102
|
+
|
|
103
|
+
# Install K3s with full configuration
|
|
104
|
+
install_cmd = <<~CMD
|
|
105
|
+
curl -sfL https://get.k3s.io | sudo sh -s - server \
|
|
106
|
+
--bind-address=#{private_ip} \
|
|
107
|
+
--advertise-address=#{private_ip} \
|
|
108
|
+
--node-ip=#{private_ip} \
|
|
109
|
+
--tls-san=#{private_ip} \
|
|
110
|
+
--flannel-iface=#{private_iface} \
|
|
111
|
+
--flannel-backend=wireguard-native \
|
|
112
|
+
--disable=traefik \
|
|
113
|
+
--write-kubeconfig-mode=644 \
|
|
114
|
+
--cluster-cidr=10.42.0.0/16 \
|
|
115
|
+
--service-cidr=10.43.0.0/16
|
|
116
|
+
CMD
|
|
117
|
+
|
|
118
|
+
@ssh.execute(install_cmd, stream: true)
|
|
119
|
+
@log.success "K3s server installed"
|
|
120
|
+
|
|
121
|
+
setup_kubeconfig(private_ip)
|
|
122
|
+
wait_for_k3s_ready
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
def install_k3s_agent
|
|
126
|
+
# Check if K3s agent is already running
|
|
127
|
+
begin
|
|
128
|
+
@ssh.execute("systemctl is-active k3s-agent")
|
|
129
|
+
@log.info "K3s agent already running, skipping installation"
|
|
130
|
+
return
|
|
131
|
+
rescue SSHCommandError
|
|
132
|
+
# Not running, continue
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
@log.info "Installing K3s agent"
|
|
136
|
+
|
|
137
|
+
private_ip = get_private_ip
|
|
138
|
+
private_iface = @ssh.execute("ip addr show | grep 'inet 10\\.' | awk '{print $NF}' | head -1").strip
|
|
139
|
+
|
|
140
|
+
@log.info "Worker private IP: %s, interface: %s", private_ip, private_iface
|
|
141
|
+
|
|
142
|
+
cmd = <<~CMD
|
|
143
|
+
curl -sfL https://get.k3s.io | K3S_URL="https://#{@main_server_private_ip}:6443" K3S_TOKEN="#{@cluster_token}" sh -s - agent \
|
|
144
|
+
--node-ip=#{private_ip} \
|
|
145
|
+
--flannel-iface=#{private_iface} \
|
|
146
|
+
--node-name=#{@server_name}
|
|
147
|
+
CMD
|
|
148
|
+
|
|
149
|
+
@ssh.execute(cmd, stream: true)
|
|
150
|
+
@log.success "K3s agent installed"
|
|
151
|
+
end
|
|
152
|
+
|
|
153
|
+
def install_docker(private_ip)
|
|
154
|
+
# Check if Docker is already installed and running
|
|
155
|
+
begin
|
|
156
|
+
@ssh.execute("systemctl is-active docker")
|
|
157
|
+
@log.info "Docker already running, skipping installation"
|
|
158
|
+
rescue SSHCommandError
|
|
159
|
+
# Not running, install it
|
|
160
|
+
docker_install = <<~CMD
|
|
161
|
+
sudo apt-get update && sudo apt-get install -y docker.io
|
|
162
|
+
sudo systemctl start docker
|
|
163
|
+
sudo systemctl enable docker
|
|
164
|
+
sudo usermod -aG docker deploy
|
|
165
|
+
CMD
|
|
166
|
+
|
|
167
|
+
@ssh.execute(docker_install, stream: true)
|
|
168
|
+
end
|
|
169
|
+
|
|
170
|
+
# Configure Docker for insecure registry
|
|
171
|
+
docker_config = <<~CMD
|
|
172
|
+
sudo mkdir -p /etc/docker
|
|
173
|
+
sudo tee /etc/docker/daemon.json > /dev/null <<EOF
|
|
174
|
+
{"insecure-registries": ["#{private_ip}:5001", "localhost:30500"]}
|
|
175
|
+
EOF
|
|
176
|
+
sudo systemctl restart docker
|
|
177
|
+
CMD
|
|
178
|
+
|
|
179
|
+
@ssh.execute(docker_config)
|
|
180
|
+
|
|
181
|
+
# Add registry domain to /etc/hosts
|
|
182
|
+
@ssh.execute('grep -q "nvoi-registry.default.svc.cluster.local" /etc/hosts || echo "127.0.0.1 nvoi-registry.default.svc.cluster.local" | sudo tee -a /etc/hosts')
|
|
183
|
+
end
|
|
184
|
+
|
|
185
|
+
def configure_registries
|
|
186
|
+
config = <<~CMD
|
|
187
|
+
sudo mkdir -p /etc/rancher/k3s
|
|
188
|
+
sudo tee /etc/rancher/k3s/registries.yaml > /dev/null <<'REGEOF'
|
|
189
|
+
mirrors:
|
|
190
|
+
"nvoi-registry.default.svc.cluster.local:5000":
|
|
191
|
+
endpoint:
|
|
192
|
+
- "http://localhost:30500"
|
|
193
|
+
"localhost:30500":
|
|
194
|
+
endpoint:
|
|
195
|
+
- "http://localhost:30500"
|
|
196
|
+
configs:
|
|
197
|
+
"nvoi-registry.default.svc.cluster.local:5000":
|
|
198
|
+
tls:
|
|
199
|
+
insecure_skip_verify: true
|
|
200
|
+
"localhost:30500":
|
|
201
|
+
tls:
|
|
202
|
+
insecure_skip_verify: true
|
|
203
|
+
REGEOF
|
|
204
|
+
CMD
|
|
205
|
+
|
|
206
|
+
@ssh.execute(config)
|
|
207
|
+
end
|
|
208
|
+
|
|
209
|
+
def setup_kubeconfig(private_ip = nil)
|
|
210
|
+
private_ip ||= get_private_ip
|
|
211
|
+
|
|
212
|
+
cmd = <<~CMD
|
|
213
|
+
sudo mkdir -p /home/deploy/.kube
|
|
214
|
+
sudo cp /etc/rancher/k3s/k3s.yaml /home/deploy/.kube/config
|
|
215
|
+
sudo sed -i "s/127.0.0.1/#{private_ip}/g" /home/deploy/.kube/config
|
|
216
|
+
sudo chown -R deploy:deploy /home/deploy/.kube
|
|
217
|
+
CMD
|
|
218
|
+
|
|
219
|
+
@ssh.execute(cmd)
|
|
220
|
+
end
|
|
221
|
+
|
|
222
|
+
def wait_for_k3s_ready
|
|
223
|
+
@log.info "Waiting for K3s to be ready"
|
|
224
|
+
|
|
225
|
+
60.times do
|
|
226
|
+
begin
|
|
227
|
+
output = @ssh.execute("kubectl get nodes")
|
|
228
|
+
if output.include?("Ready")
|
|
229
|
+
@log.success "K3s is ready"
|
|
230
|
+
return
|
|
231
|
+
end
|
|
232
|
+
rescue SSHCommandError
|
|
233
|
+
# Not ready yet
|
|
234
|
+
end
|
|
235
|
+
sleep(5)
|
|
236
|
+
end
|
|
237
|
+
|
|
238
|
+
raise K8sError, "K3s failed to become ready"
|
|
239
|
+
end
|
|
240
|
+
|
|
241
|
+
def label_node(node_name, labels)
|
|
242
|
+
# Get actual node name from K3s
|
|
243
|
+
actual_node = @ssh.execute("kubectl get nodes -o jsonpath='{.items[0].metadata.name}'").strip
|
|
244
|
+
|
|
245
|
+
labels.each do |key, value|
|
|
246
|
+
@ssh.execute("kubectl label node #{actual_node} #{key}=#{value} --overwrite")
|
|
247
|
+
end
|
|
248
|
+
end
|
|
249
|
+
|
|
250
|
+
def setup_registry
|
|
251
|
+
@log.info "Setting up in-cluster registry"
|
|
252
|
+
|
|
253
|
+
manifest = <<~YAML
|
|
254
|
+
apiVersion: v1
|
|
255
|
+
kind: Namespace
|
|
256
|
+
metadata:
|
|
257
|
+
name: nvoi-system
|
|
258
|
+
---
|
|
259
|
+
apiVersion: apps/v1
|
|
260
|
+
kind: Deployment
|
|
261
|
+
metadata:
|
|
262
|
+
name: nvoi-registry
|
|
263
|
+
namespace: default
|
|
264
|
+
spec:
|
|
265
|
+
replicas: 1
|
|
266
|
+
selector:
|
|
267
|
+
matchLabels:
|
|
268
|
+
app: nvoi-registry
|
|
269
|
+
template:
|
|
270
|
+
metadata:
|
|
271
|
+
labels:
|
|
272
|
+
app: nvoi-registry
|
|
273
|
+
spec:
|
|
274
|
+
containers:
|
|
275
|
+
- name: registry
|
|
276
|
+
image: registry:2
|
|
277
|
+
ports:
|
|
278
|
+
- containerPort: 5000
|
|
279
|
+
protocol: TCP
|
|
280
|
+
env:
|
|
281
|
+
- name: REGISTRY_HTTP_ADDR
|
|
282
|
+
value: "0.0.0.0:5000"
|
|
283
|
+
volumeMounts:
|
|
284
|
+
- name: registry-storage
|
|
285
|
+
mountPath: /var/lib/registry
|
|
286
|
+
volumes:
|
|
287
|
+
- name: registry-storage
|
|
288
|
+
emptyDir: {}
|
|
289
|
+
---
|
|
290
|
+
apiVersion: v1
|
|
291
|
+
kind: Service
|
|
292
|
+
metadata:
|
|
293
|
+
name: nvoi-registry
|
|
294
|
+
namespace: default
|
|
295
|
+
spec:
|
|
296
|
+
type: NodePort
|
|
297
|
+
ports:
|
|
298
|
+
- port: 5000
|
|
299
|
+
targetPort: 5000
|
|
300
|
+
nodePort: 30500
|
|
301
|
+
selector:
|
|
302
|
+
app: nvoi-registry
|
|
303
|
+
YAML
|
|
304
|
+
|
|
305
|
+
@ssh.execute("cat <<'EOF' | kubectl apply -f -\n#{manifest}\nEOF")
|
|
306
|
+
|
|
307
|
+
# Wait for registry to be ready
|
|
308
|
+
@log.info "Waiting for registry to be ready"
|
|
309
|
+
24.times do
|
|
310
|
+
begin
|
|
311
|
+
output = @ssh.execute("kubectl get deployment nvoi-registry -n default -o jsonpath='{.status.readyReplicas}'")
|
|
312
|
+
if output.strip == "1"
|
|
313
|
+
@log.success "In-cluster registry running on :30500"
|
|
314
|
+
return
|
|
315
|
+
end
|
|
316
|
+
rescue SSHCommandError
|
|
317
|
+
# Not ready
|
|
318
|
+
end
|
|
319
|
+
sleep(5)
|
|
320
|
+
end
|
|
321
|
+
|
|
322
|
+
raise K8sError, "registry failed to become ready"
|
|
323
|
+
end
|
|
324
|
+
|
|
325
|
+
def setup_ingress_controller
|
|
326
|
+
@log.info "Setting up NGINX Ingress Controller"
|
|
327
|
+
|
|
328
|
+
@ssh.execute("kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.10.0/deploy/static/provider/baremetal/deploy.yaml", stream: true)
|
|
329
|
+
|
|
330
|
+
# Wait for ingress controller
|
|
331
|
+
@log.info "Waiting for NGINX Ingress Controller to be ready"
|
|
332
|
+
60.times do
|
|
333
|
+
begin
|
|
334
|
+
ready = @ssh.execute("kubectl get deployment ingress-nginx-controller -n ingress-nginx -o jsonpath='{.status.readyReplicas}'").strip
|
|
335
|
+
desired = @ssh.execute("kubectl get deployment ingress-nginx-controller -n ingress-nginx -o jsonpath='{.spec.replicas}'").strip
|
|
336
|
+
|
|
337
|
+
if !ready.empty? && !desired.empty? && ready == desired
|
|
338
|
+
@log.success "NGINX Ingress Controller is ready"
|
|
339
|
+
return
|
|
340
|
+
end
|
|
341
|
+
rescue SSHCommandError
|
|
342
|
+
# Not ready
|
|
343
|
+
end
|
|
344
|
+
sleep(10)
|
|
345
|
+
end
|
|
346
|
+
|
|
347
|
+
raise K8sError, "NGINX Ingress Controller failed to become ready"
|
|
348
|
+
end
|
|
349
|
+
end
|
|
350
|
+
end
|
|
351
|
+
end
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Nvoi
|
|
4
|
+
module Steps
|
|
5
|
+
# ServerProvisioner handles provisioning of compute servers
|
|
6
|
+
class ServerProvisioner
|
|
7
|
+
def initialize(config, provider, log)
|
|
8
|
+
@config = config
|
|
9
|
+
@provider = provider
|
|
10
|
+
@log = log
|
|
11
|
+
@infrastructure = Deployer::Infrastructure.new(config, provider, log)
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
# Run provisions all servers and returns the main server IP
|
|
15
|
+
def run
|
|
16
|
+
@log.info "Provisioning servers"
|
|
17
|
+
|
|
18
|
+
# Provision network and firewall
|
|
19
|
+
network = @infrastructure.provision_network
|
|
20
|
+
firewall = @infrastructure.provision_firewall
|
|
21
|
+
|
|
22
|
+
servers = @config.deploy.application.servers
|
|
23
|
+
main_server_ip = nil
|
|
24
|
+
|
|
25
|
+
# Provision each server group
|
|
26
|
+
servers.each do |group_name, group_config|
|
|
27
|
+
count = group_config&.count&.positive? ? group_config.count : 1
|
|
28
|
+
|
|
29
|
+
(1..count).each do |i|
|
|
30
|
+
server_name = @config.namer.server_name(group_name, i)
|
|
31
|
+
server = @infrastructure.provision_server(server_name, network.id, firewall.id, group_config)
|
|
32
|
+
|
|
33
|
+
# Track main server IP (first master, or just first server)
|
|
34
|
+
main_server_ip ||= server.public_ipv4 if group_config&.master || i == 1
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
@log.success "All servers provisioned"
|
|
39
|
+
main_server_ip
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
end
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Nvoi
|
|
4
|
+
module Steps
|
|
5
|
+
# ServicesProvisioner handles deployment of additional services (redis, etc.)
|
|
6
|
+
class ServicesProvisioner
|
|
7
|
+
def initialize(config, ssh, log)
|
|
8
|
+
@config = config
|
|
9
|
+
@ssh = ssh
|
|
10
|
+
@log = log
|
|
11
|
+
@service_deployer = Deployer::ServiceDeployer.new(config, ssh, log)
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def run
|
|
15
|
+
services = @config.deploy.application.services
|
|
16
|
+
return if services.empty?
|
|
17
|
+
|
|
18
|
+
@log.info "Provisioning %d additional service(s)", services.size
|
|
19
|
+
|
|
20
|
+
services.each do |service_name, service_config|
|
|
21
|
+
service_spec = service_config.to_service_spec(@config.deploy.application.name, service_name)
|
|
22
|
+
@service_deployer.deploy_service(service_name, service_spec)
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
@log.success "Additional services provisioned"
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
end
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Nvoi
|
|
4
|
+
module Steps
|
|
5
|
+
# TunnelConfigurator handles Cloudflare tunnel setup for services
|
|
6
|
+
class TunnelConfigurator
|
|
7
|
+
def initialize(config, log)
|
|
8
|
+
@config = config
|
|
9
|
+
@log = log
|
|
10
|
+
|
|
11
|
+
cf = config.cloudflare
|
|
12
|
+
@cf_client = Cloudflare::Client.new(cf.api_token, cf.account_id)
|
|
13
|
+
@tunnel_manager = Deployer::TunnelManager.new(@cf_client, log)
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def run
|
|
17
|
+
@log.info "Configuring Cloudflare tunnels"
|
|
18
|
+
|
|
19
|
+
tunnels = []
|
|
20
|
+
|
|
21
|
+
@config.deploy.application.app.each do |service_name, service_config|
|
|
22
|
+
next unless service_config.domain && !service_config.domain.empty?
|
|
23
|
+
next unless service_config.port && service_config.port.positive?
|
|
24
|
+
# Allow empty subdomain or "@" for apex domain
|
|
25
|
+
next if service_config.subdomain.nil?
|
|
26
|
+
|
|
27
|
+
tunnel_info = configure_service_tunnel(service_name, service_config)
|
|
28
|
+
tunnels << tunnel_info
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
@log.success "All tunnels configured (%d)", tunnels.size
|
|
32
|
+
tunnels
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
private
|
|
36
|
+
|
|
37
|
+
def configure_service_tunnel(service_name, service_config)
|
|
38
|
+
tunnel_name = @config.namer.tunnel_name(service_name)
|
|
39
|
+
hostname = build_hostname(service_config.subdomain, service_config.domain)
|
|
40
|
+
|
|
41
|
+
# Service URL points to the K8s service
|
|
42
|
+
k8s_service_name = @config.namer.app_service_name(service_name)
|
|
43
|
+
service_url = "http://#{k8s_service_name}:#{service_config.port}"
|
|
44
|
+
|
|
45
|
+
tunnel = @tunnel_manager.setup_tunnel(tunnel_name, hostname, service_url, service_config.domain)
|
|
46
|
+
|
|
47
|
+
Deployer::TunnelInfo.new(
|
|
48
|
+
service_name:,
|
|
49
|
+
hostname:,
|
|
50
|
+
tunnel_id: tunnel.tunnel_id,
|
|
51
|
+
tunnel_token: tunnel.tunnel_token
|
|
52
|
+
)
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
# Build hostname from subdomain and domain
|
|
56
|
+
# Supports: "app" -> "app.example.com", "" or "@" -> "example.com", "*" -> "*.example.com"
|
|
57
|
+
def build_hostname(subdomain, domain)
|
|
58
|
+
if subdomain.nil? || subdomain.empty? || subdomain == "@"
|
|
59
|
+
domain
|
|
60
|
+
else
|
|
61
|
+
"#{subdomain}.#{domain}"
|
|
62
|
+
end
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
end
|
|
66
|
+
end
|