k8s-harness 1.0.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: ace0cae7e7c7b384c5420e91e29bc2ddac484160f23772b9fc0b58e49a73e16e
4
+ data.tar.gz: d595a4aff27c2b8a14acf89d40b9d3a18d9482debded7894328e8a91350ec511
5
+ SHA512:
6
+ metadata.gz: 7754badb5e231b29e20c81c466feb865d52e9444dc904ab7be7fa285dc2ec4a8476f91c5862971a07071e696eaff8b58734beacd41d9bc59f299e6177b1b75f4
7
+ data.tar.gz: a946ffb779de5a8a433f1844d3d1b95d038ea81a6f580e1c5e4d7f2840708d2875393d348742dbed7803735f8311fbcc23d0f26894ab24c46b30136fc32a5a44
@@ -0,0 +1,7 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
5
+ require 'k8s_harness'
6
+
7
+ KubernetesHarness::CLI.parse(ARGV)
@@ -0,0 +1,5 @@
1
+ ---
2
+ - name: vagrant
3
+ version_check: vagrant --version
4
+ - name: ansible-playbook
5
+ version_check: ansible-playbook --version
@@ -0,0 +1,39 @@
1
+ # vim: set ft=ruby:
2
+ num_nodes = ENV["K3S_NUMBER_OF_NODES"] || 2
3
+ memory_per_node = ENV["K3S_NODE_MEMORY_GB"] || 1024
4
+ ssh_pub_key = File.read("#{ENV['VAGRANT_CWD']}/ssh_key.pub").gsub("\n","")
5
+ install_ansible_command = <<-COMMAND
6
+ apk update
7
+ if ! apk add ansible
8
+ then
9
+ echo "ERROR: Failed to install Ansible on this machine."
10
+ exit 1
11
+ fi
12
+ COMMAND
13
+
14
+ Vagrant.configure("2") do |config|
15
+ config.vm.box = "maier/alpine-3.6-x86_64"
16
+ config.vm.provider "virtualbox" do |vb|
17
+ vb.customize [ "modifyvm", :id, "--memory", memory_per_node ]
18
+ end
19
+
20
+ config.vm.define "k3s-registry" do |node|
21
+ node.vm.hostname = "k3s-registry"
22
+ node.vm.network "private_network", ip: "192.168.50.200"
23
+ node.vm.network "forwarded_port", guest: 5000, host: 5000
24
+ node.vm.provision "shell",
25
+ inline: "echo '#{ssh_pub_key}' >> /home/vagrant/.ssh/authorized_keys"
26
+ node.vm.provision "shell", inline: install_ansible_command
27
+ end
28
+
29
+ num_nodes.times do |node_id|
30
+ config.vm.define "k3s-node-#{node_id}" do |node|
31
+ node.vm.hostname = "k3s-node-#{node_id}"
32
+ node.vm.network "private_network", ip: "192.168.50.#{node_id+2}"
33
+ node.vm.network "forwarded_port", guest: 6443, host: 6443 if node_id == 0
34
+ node.vm.provision "shell",
35
+ inline: "echo '#{ssh_pub_key}' >> /home/vagrant/.ssh/authorized_keys"
36
+ node.vm.provision "shell", inline: install_ansible_command
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,10 @@
1
+ # These IP addresses are statically allocated.
2
+ # See scripts/deploy_k3s for more info.
3
+ [master]
4
+ 192.168.50.2
5
+
6
+ [worker]
7
+ 192.168.50.3
8
+
9
+ [registry]
10
+ 192.168.50.200
@@ -0,0 +1,207 @@
1
+ ---
2
+ - hosts: master
3
+ become: true
4
+ gather_facts: no
5
+ tasks:
6
+ - name: Get this host's IP address
7
+ shell: "echo $(ip -4 -o addr show eth1 | awk '{print $4}' | cut -f1 -d '/')"
8
+ register: result
9
+
10
+ - set_fact:
11
+ ip_address: "{{ result.stdout }}"
12
+
13
+ - name: Create directories
14
+ file:
15
+ path: "{{ item }}"
16
+ state: directory
17
+ with_items:
18
+ - /etc/rancher/k3s
19
+ - /etc/docker
20
+
21
+ - name: Create registry files
22
+ file:
23
+ path: "{{ item }}"
24
+ state: touch
25
+ with_items:
26
+ - /etc/rancher/k3s/registries.yaml
27
+ - /etc/docker/daemon.json
28
+
29
+ - name: Configure insecure registries for k3s
30
+ blockinfile:
31
+ path: /etc/rancher/k3s/registries.yaml
32
+ block: |
33
+ mirrors:
34
+ "10.0.2.2:5000":
35
+ endpoint:
36
+ - "http://10.0.2.2:5000"
37
+
38
+ - name: Configure insecure regsitries for containerd
39
+ block:
40
+ - name: Create the daemon file
41
+ blockinfile:
42
+ path: /etc/docker/daemon.json
43
+ marker: ""
44
+ block: |
45
+ { "insecure-registries": [ "10.0.2.2:5000" ] }
46
+
47
+ - name: Remove blank lines
48
+ lineinfile:
49
+ path: /etc/docker/daemon.json
50
+ state: absent
51
+ regexp: '^$'
52
+
53
+ - name: Install Rancher k3s
54
+ shell: curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--node-ip={{ ip_address }} --flannel-iface=eth1" K3S_TOKEN={{ k3s_token }} sh -
55
+
56
+ - name: Check if extlinux updated
57
+ shell: "grep -q cgroup_enable=cpuset /etc/update-extlinux.conf"
58
+ register: extlinux_enabled_result
59
+ ignore_errors: true
60
+
61
+ - name: Update extlinux per documentation
62
+ shell: "echo 'default_kernel_opts=\"... cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\" >> /etc/update-extlinux.conf'"
63
+ when: extlinux_enabled_result.rc != 0
64
+
65
+ - name: Apply extlinux updates
66
+ shell: update-extlinux
67
+ when: extlinux_enabled_result.rc != 0
68
+
69
+ - name: Reboot
70
+ shell: /sbin/reboot
71
+ when: extlinux_enabled_result.rc != 0
72
+
73
+ - name: "Wait for machine"
74
+ become: false
75
+ register: wait_result
76
+ local_action: wait_for host={{ ip_address }} port=22 timeout=300 connect_timeout=300
77
+
78
+ - hosts: worker
79
+ become: true
80
+ tasks:
81
+ - name: Get this host's IP address
82
+ shell: "echo $(ip -4 -o addr show eth1 | awk '{print $4}' | cut -f1 -d '/')"
83
+ register: result
84
+
85
+ - set_fact:
86
+ ip_address: "{{ result.stdout }}"
87
+
88
+ - name: Wait for master to become available
89
+ register: wait_result
90
+ wait_for:
91
+ timeout: 300
92
+ connect_timeout: 300
93
+ host: 192.168.50.2
94
+ port: 6443
95
+
96
+
97
+ - name: Create directories
98
+ file:
99
+ path: "{{ item }}"
100
+ state: directory
101
+ with_items:
102
+ - /etc/rancher/k3s
103
+ - /etc/docker
104
+
105
+ - name: Create registry files
106
+ file:
107
+ path: "{{ item }}"
108
+ state: touch
109
+ with_items:
110
+ - /etc/rancher/k3s/registries.yaml
111
+ - /etc/docker/daemon.json
112
+
113
+ - name: Configure insecure registries for k3s
114
+ blockinfile:
115
+ path: /etc/rancher/k3s/registries.yaml
116
+ block: |
117
+ mirrors:
118
+ "10.0.2.2:5000":
119
+ endpoint:
120
+ - "http://10.0.2.2:5000"
121
+
122
+ - name: Configure insecure regsitries for containerd
123
+ block:
124
+ - name: Create the daemon file
125
+ blockinfile:
126
+ path: /etc/docker/daemon.json
127
+ marker: ""
128
+ block: |
129
+ { "insecure-registries": [ "10.0.2.2:5000" ] }
130
+
131
+ - name: Remove blank lines
132
+ lineinfile:
133
+ path: /etc/docker/daemon.json
134
+ state: absent
135
+ regexp: '^$'
136
+
137
+ - name: Install k3s as worker
138
+ shell: curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--node-ip={{ ip_address }} --flannel-iface=eth1" K3S_URL=https://192.168.50.2:6443 K3S_TOKEN={{ k3s_token }} sh -
139
+
140
+ - name: Check if extlinux updated
141
+ shell: "grep -q cgroup_enable=cpuset /etc/update-extlinux.conf"
142
+ register: extlinux_enabled_result
143
+ ignore_errors: true
144
+
145
+ - name: Update extlinux per documentation
146
+ shell: "echo 'default_kernel_opts=\"... cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\" >> /etc/update-extlinux.conf'"
147
+ when: extlinux_enabled_result.rc != 0
148
+
149
+ - name: Apply extlinux updates
150
+ shell: update-extlinux
151
+ when: extlinux_enabled_result.rc != 0
152
+
153
+ - name: Reboot
154
+ shell: /sbin/reboot
155
+ when: extlinux_enabled_result.rc != 0
156
+
157
+ - name: "Wait for machine"
158
+ become: false
159
+ register: wait_result
160
+ local_action: wait_for host={{ ip_address }} port=22 timeout=300 connect_timeout=300
161
+
162
+
163
+ - hosts: registry
164
+ become: true
165
+ tasks:
166
+ - name: Get this host's IP address
167
+ shell: "echo $(ip -4 -o addr show eth1 | awk '{print $4}' | cut -f1 -d '/')"
168
+ register: result
169
+
170
+ - set_fact:
171
+ ip_address: "{{ result.stdout }}"
172
+
173
+
174
+ - name: Install Docker
175
+ apk:
176
+ name:
177
+ - docker
178
+
179
+ - name: Add docker as service
180
+ shell: "rc-update add docker boot"
181
+
182
+ - name: Reboot
183
+ shell: /sbin/reboot
184
+
185
+ - name: "Wait for machine"
186
+ become: false
187
+ register: wait_result
188
+ local_action: wait_for host={{ ip_address }} port=22 timeout=300 connect_timeout=300
189
+
190
+ - name: Start docker daemon
191
+ shell: "service docker start"
192
+ retries: 5
193
+ delay: 2
194
+
195
+ - name: Confirm Docker available
196
+ shell: "docker run --rm hello-world"
197
+ retries: 5
198
+ delay: 2
199
+
200
+ - name: Check for instances of registry
201
+ shell: "sudo docker ps | grep -q registry"
202
+ register: result
203
+ ignore_errors: true
204
+
205
+ - name: Start Docker Registry
206
+ shell: "sudo docker run -d --restart=always -p 5000:5000 --name registry registry:2"
207
+ when: result.rc != 0
@@ -0,0 +1,5 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'k8s_harness/cli'
4
+ require 'k8s_harness/clusters'
5
+ require 'k8s_harness/logging'
@@ -0,0 +1,116 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'optparse'
4
+ require 'k8s_harness/subcommand'
5
+
6
+ # KubernetesHarness
7
+ module KubernetesHarness
8
+ # This module contains everything CLI-related.
9
+ # We're using it as the entry-point for k8s-harness.
10
+ module CLI
11
+ @options = {
12
+ base: {}
13
+ }
14
+ @subcommands = {
15
+ run: {
16
+ description: 'Runs tests',
17
+ option_parser: OptionParser.new do |opts|
18
+ opts.banner = 'Usage: k8s-harness run [options]'
19
+ opts.separator 'Runs tests'
20
+ opts.separator ''
21
+ opts.separator 'Commands:'
22
+ opts.on('-h', '--help', 'Displays this help message') do
23
+ add_option(options: { show_usage: true }, subcommand: :run)
24
+ puts opts
25
+ end
26
+ opts.on('--disable-teardown', 'Keeps the cluster up for local testing') do
27
+ add_option(options: { disable_teardown: true }, subcommand: :run)
28
+ end
29
+ end
30
+ },
31
+ validate: {
32
+ description: 'Validates .k8sharness files',
33
+ option_parser: OptionParser.new do |opts|
34
+ opts.banner = 'Usage: k8s-harness validate [options]'
35
+ opts.separator 'Validates that a .k8sharness file is correct'
36
+ opts.separator ''
37
+ opts.separator 'Commands:'
38
+ opts.on('-h', '--help', 'Displays this help message') do
39
+ add_option(options: { show_usage: true }, subcommand: :validate)
40
+ puts opts
41
+ end
42
+ end
43
+ },
44
+ destroy: {
45
+ description: 'Deletes a live cluster provisioned by k8s-harness WITHOUT WARNING.',
46
+ option_parser: OptionParser.new do |opts|
47
+ opts.banner = 'Usage: k8s-harness destroy [options]'
48
+ opts.separator 'Deletes live clusters provisioned by k8s-harness WITHOUT WARNING'
49
+ opts.separator ''
50
+ opts.separator 'Commands:'
51
+ opts.on('-h', '--help', 'Displays this help message') do
52
+ add_option(options: { show_usage: true }, subcommand: :destroy)
53
+ puts opts
54
+ end
55
+ end
56
+ }
57
+ }
58
+
59
+ @base_command = OptionParser.new do |opts|
60
+ opts.banner = 'Usage: k8s-harness [subcommand] [options]'
61
+ opts.separator 'Test your apps in disposable Kubernetes clusters'
62
+ opts.separator ''
63
+ opts.separator 'Sub-commands:'
64
+ opts.separator ''
65
+ @subcommands.each_key do |subcommand|
66
+ opts.separator " #{subcommand.to_s.ljust(20)} #{@subcommands[subcommand][:description]}"
67
+ end
68
+ opts.separator ''
69
+ opts.separator 'See k8s-harness [subcommand] --help for more specific options.'
70
+ opts.separator ''
71
+ opts.separator 'Global options:'
72
+ opts.on('-d', '--debug', 'Show debug output') do
73
+ add_option(options: { enable_debug_logging: true })
74
+ end
75
+ opts.on('-h', '--help', 'Displays this help message') do
76
+ add_option(options: { help: opts.help })
77
+ end
78
+ end
79
+
80
+ def self.parse(args)
81
+ args.push('-h') if args.empty? || subcommands_missing?(args)
82
+ @base_command.order!(args)
83
+ subcommand = args.shift
84
+ if subcommand.nil?
85
+ puts @options[:base][:help]
86
+ else
87
+ enable_debug_logging_if_present
88
+ @subcommands[subcommand.to_sym][:option_parser].order!(args)
89
+ call_entrypoint(subcommand)
90
+ end
91
+ end
92
+
93
+ def self.enable_debug_logging_if_present
94
+ KubernetesHarness::Logging.enable_debug_logging if @options[:base][:enable_debug_logging]
95
+ end
96
+
97
+ def self.subcommands_missing?(args)
98
+ args.select { |arg| arg.match?(/^[a-z]/) }.empty?
99
+ end
100
+
101
+ def self.add_option(options:, subcommand: nil)
102
+ if subcommand.nil?
103
+ @options[:base].merge!(options)
104
+ else
105
+ @options[subcommand] = {} unless @options.key subcommand
106
+ @options[subcommand].merge!(options)
107
+ end
108
+ end
109
+
110
+ def self.call_entrypoint(subcommand)
111
+ KubernetesHarness::Subcommand.method(subcommand.to_sym).call(@options[subcommand.to_sym])
112
+ end
113
+
114
+ private_class_method :call_entrypoint, :add_option, :subcommands_missing?
115
+ end
116
+ end
@@ -0,0 +1,211 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'k8s_harness/clusters/ansible'
4
+ require 'k8s_harness/clusters/constants'
5
+ require 'k8s_harness/clusters/cluster_info'
6
+ require 'k8s_harness/clusters/metadata'
7
+ require 'k8s_harness/clusters/required_software'
8
+ require 'k8s_harness/clusters/vagrant'
9
+ require 'k8s_harness/shell_command'
10
+
11
+ module KubernetesHarness
12
+ # Handles bring up and deletion of disposable clusters.
13
+ module Clusters
14
+ def self.create!
15
+ RequiredSoftware.ensure_installed_or_exit!
16
+ Metadata.initialize!
17
+ create_ssh_key!
18
+ vagrant_up_disposable_cluster_or_exit!
19
+ cluster = ClusterInfo.new(master_ip_address_command: master_ip_address_command,
20
+ worker_ip_addresses_command: worker_ip_addresses_command,
21
+ docker_registry_command: docker_registry_command,
22
+ kubeconfig_path: 'not_yet',
23
+ ssh_key_path: cluster_ssh_key)
24
+ Metadata.write!('cluster.yaml', cluster.to_yaml)
25
+ cluster
26
+ end
27
+
28
+ def self.create_ssh_key!
29
+ ssh_key_fp = File.join(Metadata.default_dir, 'ssh_key')
30
+ return if File.exist? ssh_key_fp
31
+
32
+ KubernetesHarness.nice_logger.info 'Creating a new SSH key for the cluster.'
33
+ ssh_key_command = ShellCommand.new(
34
+ "ssh-keygen -t rsa -f '#{ssh_key_fp}' -q -N ''"
35
+ )
36
+ raise 'Unable to create a SSH key for the cluster' unless ssh_key_command.execute!
37
+ end
38
+
39
+ def self.provision!(cluster_info)
40
+ all_results = provision_nodes_in_parallel!(cluster_info)
41
+ failures = all_results.filter { |thread| !thread.success? }
42
+ raise failed_cluster_error(failures) unless failures.empty?
43
+
44
+ cluster_info.kubeconfig_path = cluster_kubeconfig
45
+ true
46
+ end
47
+
48
+ # TODO: tests missing
49
+ def self.teardown!
50
+ destroy_nodes_in_parallel!
51
+
52
+ true
53
+ end
54
+
55
+ # TODO: tests missing
56
+ def self.destroy_existing!
57
+ destroy_nodes_in_parallel!
58
+ end
59
+
60
+ def self.destroy_nodes_in_parallel!
61
+ if cluster_running?
62
+ KubernetesHarness.logger.debug('🚨 Deleting all nodes! 🚨')
63
+ vagrant_threads = []
64
+ Constants::ALL_NODES.each do |node|
65
+ KubernetesHarness.logger.debug("Starting thread for node #{node}")
66
+ vagrant_threads << Thread.new do
67
+ vagrant_command = Vagrant.new_command('destroy', ['-f', node])
68
+ vagrant_command.execute!
69
+ vagrant_command
70
+ end
71
+ end
72
+ results = vagrant_threads.each(&:join).map(&:value)
73
+ failures = results.filter { |result| !result.success? }
74
+ raise failed_cluster_destroy_error(failures) unless failures.empty?
75
+
76
+ delete_cluster_yaml_and_ssh_key!
77
+ else
78
+ KubernetesHarness.nice_logger.info('No clusters found to destroy. Stopping.')
79
+ end
80
+ end
81
+
82
+ def self.provision_nodes_in_parallel!(cluster_info)
83
+ ansible_threads = []
84
+ ssh_key_path = cluster_info.ssh_key_path
85
+ [cluster_info.master_ip_address,
86
+ cluster_info.worker_ip_addresses,
87
+ cluster_info.docker_registry_address].flatten.each do |addr|
88
+ ansible_threads << Thread.new do
89
+ command = Ansible::Playbook.create_run_against_single_host(
90
+ playbook_fp: playbook_path,
91
+ ssh_key_path: ssh_key_path,
92
+ inventory_fp: inventory_path,
93
+ ip_address: addr,
94
+ extra_vars: ["k3s_token=#{cluster_info.kubernetes_cluster_token}"]
95
+ )
96
+ command.execute!
97
+ command
98
+ end
99
+ end
100
+ ansible_threads.each(&:join).map(&:value)
101
+ end
102
+
103
+ def self.worker_ip_addresses_command
104
+ Constants::WORKER_NODE_NAMES.map do |node|
105
+ Vagrant.create_and_execute_new_ssh_command(node, Constants::IP_ETH1_COMMAND)
106
+ end
107
+ end
108
+
109
+ def self.master_ip_address_command
110
+ Vagrant.create_and_execute_new_ssh_command(Constants::MASTER_NODE_NAME,
111
+ Constants::IP_ETH1_COMMAND)
112
+ end
113
+
114
+ def self.docker_registry_command
115
+ Vagrant.create_and_execute_new_ssh_command(Constants::DOCKER_REGISTRY_NAME,
116
+ Constants::IP_ETH1_COMMAND)
117
+ end
118
+
119
+ def self.cluster_kubeconfig
120
+ args = [
121
+ '-c',
122
+ '"sudo cat /etc/rancher/k3s/k3s.yaml"',
123
+ Constants::MASTER_NODE_NAME.to_s
124
+ ]
125
+ command = Vagrant.new_command('ssh', args)
126
+ command.execute!
127
+ if command.stdout.empty?
128
+ KubernetesHarness.logger.warn('No kubeconfig created!')
129
+ return
130
+ end
131
+ Metadata.write!('kubeconfig', command.stdout)
132
+ File.join Metadata.default_dir, 'kubeconfig'
133
+ end
134
+
135
+ def self.cluster_ssh_key
136
+ File.join KubernetesHarness::Clusters::Metadata.default_dir, '/ssh_key'
137
+ end
138
+
139
+ def self.playbook_path
140
+ File.join Metadata.default_dir, 'site.yml'
141
+ end
142
+
143
+ def self.inventory_path
144
+ File.join Metadata.default_dir, 'inventory'
145
+ end
146
+
147
+ def self.vagrant_up_disposable_cluster_or_exit!
148
+ KubernetesHarness.logger.debug('🚀 Creating node new disposable cluster 🚀')
149
+ vagrant_threads = []
150
+ Constants::ALL_NODES.each do |node|
151
+ KubernetesHarness.logger.debug("Starting thread for node #{node}")
152
+ vagrant_threads << Thread.new do
153
+ vagrant_command = Vagrant.new_command('up', [node])
154
+ vagrant_command.execute!
155
+ vagrant_command
156
+ end
157
+ end
158
+ results = vagrant_threads.each(&:join).map(&:value)
159
+ failures = results.filter { |result| !result.success? }
160
+ raise failed_cluster_error(failures) unless failures.empty?
161
+ end
162
+
163
+ def self.generate_err_msg(cmd)
164
+ header = "From command '#{cmd.command}':"
165
+ separator = '-' * (header.length + 4)
166
+
167
+ <<~MESSAGE
168
+ #{header}
169
+ #{separator}
170
+
171
+ Output:
172
+ #{cmd.stdout}
173
+
174
+ Errors:
175
+ #{cmd.stderr}
176
+ MESSAGE
177
+ end
178
+
179
+ def self.failed_cluster_error(command)
180
+ stderr = if command.is_a? Array
181
+ command.map do |cmd|
182
+ generate_err_msg(cmd)
183
+ end.flatten.join("\n\n")
184
+ else
185
+ generate_err_msg(command)
186
+ end
187
+ raise "Failed to start Kubernetes cluster. Here's why:\n\n#{stderr}"
188
+ end
189
+
190
+ def self.failed_cluster_destroy_error(command)
191
+ stderr = if command.is_a? Array
192
+ command.map(&:stderr).uniq!.join("\n")
193
+ else
194
+ command.stderr
195
+ end
196
+ raise "Failed to delete Kubernetes cluster. Here's why:\n\n#{stderr}"
197
+ end
198
+
199
+ def self.delete_cluster_yaml_and_ssh_key!
200
+ ['ssh_key', 'ssh_key.pub', 'cluster.yaml'].each do |file|
201
+ Metadata.delete!(file)
202
+ end
203
+ end
204
+
205
+ def self.cluster_running?
206
+ vagrant_status_command = Vagrant.new_command('global-status')
207
+ vagrant_status_command.execute!
208
+ vagrant_status_command.stdout.match?(/k3s-/)
209
+ end
210
+ end
211
+ end
@@ -0,0 +1,57 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'yaml'
4
+ require 'k8s_harness/paths'
5
+
6
+ module KubernetesHarness
7
+ module Clusters
8
+ # Simple module for interacting with Vagrant.
9
+ module Ansible
10
+ # for ansible-playbook
11
+ module Playbook
12
+ def self.create_run_against_single_host(playbook_fp:,
13
+ inventory_fp:,
14
+ ssh_key_path:,
15
+ ip_address:,
16
+ extra_vars:)
17
+ log_new_run(playbook_fp, inventory_fp, ssh_key_path, ip_address, extra_vars)
18
+ command_env = {
19
+ ANSIBLE_HOST_KEY_CHECKING: 'no',
20
+ ANSIBLE_SSH_ARGS: '-o IdentitiesOnly=true',
21
+ ANSIBLE_COMMAND_WARNINGS: 'False',
22
+ ANSIBLE_PYTHON_INTERPRETER: '/usr/bin/python'
23
+ }
24
+ KubernetesHarness::ShellCommand.new(
25
+ command(playbook_fp, inventory_fp, ssh_key_path, ip_address, extra_vars),
26
+ environment: command_env
27
+ )
28
+ end
29
+
30
+ def self.command(playbook_fp, inventory_fp, ssh_key_path, ip_address, extra_vars)
31
+ [
32
+ 'ansible-playbook',
33
+ "-i #{inventory_fp}",
34
+ "-e \"ansible_ssh_user=\\\"#{ENV['ANSIBLE_SSH_USER'] || 'vagrant'}\\\"\"",
35
+ extra_vars.map { |var| "-e \"#{var}\"" },
36
+ "-l #{ip_address}",
37
+ "--private-key #{ssh_key_path}",
38
+ playbook_fp
39
+ ].flatten.join(' ')
40
+ end
41
+
42
+ def self.log_new_run(playbook_fp, inventory_fp, ssh_key_path, ip_address = '', extra_vars)
43
+ KubernetesHarness.logger.info(
44
+ <<~MESSAGE.strip
45
+ Creating a new single-host Ansible Playbook run! \
46
+ playbook: #{playbook_fp}, \
47
+ inventory: #{inventory_fp}, \
48
+ ssh_key: #{ssh_key_path}, \
49
+ ip_address: #{ip_address}, \
50
+ extra_vars: #{extra_vars}
51
+ MESSAGE
52
+ )
53
+ end
54
+ end
55
+ end
56
+ end
57
+ end
@@ -0,0 +1,69 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'yaml'
4
+ require 'digest/md5'
5
+
6
+ module KubernetesHarness
7
+ module Clusters
8
+ # This class provides a handy set of information that might be useful for k8s-harness
9
+ # users after creating their clusters.
10
+ class ClusterInfo
11
+ attr_reader :master_ip_address,
12
+ :worker_ip_addresses,
13
+ :docker_registry_address,
14
+ :ssh_key_path,
15
+ :kubernetes_cluster_token
16
+ attr_accessor :kubeconfig_path
17
+
18
+ IP_ADDRESS_REGEX = /
19
+ \b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.
20
+ (25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.
21
+ (25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.
22
+ (25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b
23
+ /x.freeze
24
+
25
+ def initialize(master_ip_address_command:,
26
+ worker_ip_addresses_command:,
27
+ docker_registry_command:,
28
+ kubeconfig_path:,
29
+ ssh_key_path:)
30
+ @kubeconfig_path = kubeconfig_path
31
+ @ssh_key_path = ssh_key_path
32
+ @master_ip_address = get_ip_addresses_from_command(master_ip_address_command).first
33
+ @docker_registry_address = get_ip_addresses_from_command(docker_registry_command).first
34
+ @worker_ip_addresses =
35
+ worker_ip_addresses_command.map do |command|
36
+ get_ip_addresses_from_command(command)
37
+ end.flatten
38
+ @kubernetes_cluster_token = generate_k8s_token(
39
+ @master_ip_address,
40
+ @worker_ip_addresses,
41
+ @docker_registry_address
42
+ )
43
+ end
44
+
45
+ def to_yaml
46
+ YAML.dump({
47
+ master_ip_address: @master_ip_address,
48
+ worker_ip_addresses: @worker_ip_addresses,
49
+ docker_registry_address: @docker_registry_address,
50
+ kubeconfig_path: @kubeconfig_path,
51
+ ssh_key_path: @ssh_key_path,
52
+ kubernetes_cluster_token: @kubernetes_cluster_token
53
+ })
54
+ end
55
+
56
+ private
57
+
58
+ def generate_k8s_token(master_ip, worker_ip, docker_registry)
59
+ combined_addresses = [master_ip, worker_ip, docker_registry].flatten.join('')
60
+ Digest::MD5.hexdigest(combined_addresses)
61
+ end
62
+
63
+ def get_ip_addresses_from_command(command)
64
+ command.execute!
65
+ command.stdout.split("\n").select { |line| line.match? IP_ADDRESS_REGEX }
66
+ end
67
+ end
68
+ end
69
+ end
@@ -0,0 +1,15 @@
1
+ # frozen_string_literal: true
2
+
3
+ module KubernetesHarness
4
+ module Clusters
5
+ # Just constants.
6
+ module Constants
7
+ MASTER_NODE_NAME = 'k3s-node-0'
8
+ WORKER_NODE_NAMES = ['k3s-node-1'].freeze
9
+ DOCKER_REGISTRY_NAME = 'k3s-registry'
10
+ IP_ETH1_COMMAND =
11
+ "\"ip addr show dev eth1 | grep \'\\<inet\\>\' | awk \'{print \\$2}\' | cut -f1 -d \'/\'\""
12
+ ALL_NODES = [MASTER_NODE_NAME, WORKER_NODE_NAMES, DOCKER_REGISTRY_NAME].flatten
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'fileutils'
4
+ require 'k8s_harness/paths'
5
+
6
+ module KubernetesHarness
7
+ # This module is for everything around CRUD'ing disposable clusters.
8
+ module Clusters
9
+ # k8s-harness relies on storing things like Ansible playbooks for our
10
+ # disposable cluster and extra files that users might use.
11
+ # This module handles all of that.
12
+ module Metadata
13
+ def self.default_dir
14
+ "#{ENV['PWD']}/.k8sharness_data"
15
+ end
16
+
17
+ def self.create_dir!
18
+ ::FileUtils.mkdir_p default_dir unless Dir.exist? default_dir
19
+ end
20
+
21
+ def self.initialize!
22
+ create_dir!
23
+ FileUtils.cp_r("#{KubernetesHarness::Paths.include_dir}/.", default_dir)
24
+ end
25
+
26
+ def self.write!(file_name, content)
27
+ KubernetesHarness.logger.debug "Creating new metadata: #{file_name}"
28
+ fp = File.join default_dir, file_name
29
+ File.write(fp, content)
30
+ end
31
+
32
+ def self.delete!(file_name)
33
+ KubernetesHarness.logger.debug "Deleting from metadata: #{file_name}"
34
+ fp = File.join default_dir, file_name
35
+ FileUtils.rm(fp)
36
+ end
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'yaml'
4
+ require 'k8s_harness/paths'
5
+ require 'k8s_harness/shell_command'
6
+
7
+ module KubernetesHarness
8
+ module Clusters
9
+ # This module ensures that we have the software we need to run k8s-harness
10
+ # on the user's machine.
11
+ module RequiredSoftware
12
+ def self.software
13
+ YAML.safe_load(
14
+ File.read(File.join(KubernetesHarness::Paths.conf_dir, 'required_software.yaml')),
15
+ symbolize_names: true
16
+ )
17
+ end
18
+
19
+ def self.ensure_installed_or_exit!
20
+ missing = []
21
+ software.each do |app_data|
22
+ name = app_data[:name]
23
+ version_check = app_data[:version_check]
24
+ KubernetesHarness.logger.debug("Checking that this is installed: #{name}")
25
+ command_string = "sh -c '#{version_check}; exit $?'"
26
+ command = KubernetesHarness::ShellCommand.new(command_string)
27
+ command.execute!
28
+ missing.push name unless command.success?
29
+ end
30
+
31
+ raise show_missing_software_message(missing) unless missing.empty?
32
+ end
33
+
34
+ def self.show_missing_software_message(apps)
35
+ <<~MESSAGE.strip
36
+ You are missing the following software:
37
+
38
+ #{apps.map { |app| "- #{app}" }.join("\n")}
39
+
40
+ Please consult the README to learn what you'll need to install before using k8s-harness.
41
+ MESSAGE
42
+ end
43
+
44
+ private_class_method :show_missing_software_message
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,24 @@
1
+ # frozen_string_literal: true
2
+
3
+ module KubernetesHarness
4
+ module Clusters
5
+ # Simple module for interacting with Vagrant.
6
+ module Vagrant
7
+ def self.new_command(command, args = nil)
8
+ command_env = {
9
+ VAGRANT_CWD: Metadata.default_dir
10
+ }
11
+ command = "vagrant #{command}"
12
+ command = "#{command} #{[args].flatten.join(' ')}" unless args.nil?
13
+ KubernetesHarness::ShellCommand.new(command, environment: command_env)
14
+ end
15
+
16
+ def self.create_and_execute_new_ssh_command(node_name, command)
17
+ args = ['-c', command, node_name]
18
+ command = Vagrant.new_command('ssh', args)
19
+ command.execute!
20
+ command
21
+ end
22
+ end
23
+ end
24
+ end
@@ -0,0 +1,91 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'shellwords'
4
+ require 'yaml'
5
+ require 'k8s_harness/shell_command'
6
+
7
+ module KubernetesHarness
8
+ # This module handles reading and validating .k8sharness files.
9
+ module HarnessFile
10
+ def self.execute_setup!(options)
11
+ exec_command!(options, :setup, 'Setting up your tests.')
12
+ end
13
+
14
+ # TODO: Tests missing (but execute_setup! has a test and implementation is the same.)
15
+ def self.execute_tests!(options)
16
+ exec_command!(options, :test, 'Running your tests.')
17
+ end
18
+
19
+ # TODO: Tests missing (but execute_setup! has a test and implementation is the same.)
20
+ def self.execute_teardown!(options)
21
+ exec_command!(options, :teardown, 'Tearing down your test bench.')
22
+ end
23
+
24
+ def self.exec_command!(options, key, message)
25
+ rendered = render(options)
26
+ raise 'No tests found' if (key == :test) && !rendered.key?(:test)
27
+
28
+ KubernetesHarness.logger.debug "Checking for: #{key}"
29
+ return nil unless rendered.key? key
30
+
31
+ KubernetesHarness.nice_logger.info message
32
+ command = KubernetesHarness::ShellCommand.new(rendered[key])
33
+ command.execute!
34
+ KubernetesHarness.logger.error command.stderr unless command.stderr.empty?
35
+ puts command.stdout
36
+ end
37
+
38
+ def self.test_present?(options)
39
+ harness_file(options).key? :test
40
+ end
41
+
42
+ def self.convert_to_commands(options)
43
+ # TODO: Currently, we are assuming that the steps provided in the .k8sharness
44
+ # will always be invoked in a shell.
45
+ # First, we shouldn't assume that the user will want to use `sh` for these commands.
46
+ # Second, we should allow users to invoke code in the language of their preference to
47
+ # maximize codebase homogeneity.
48
+ rendered = harness_file(options)
49
+ rendered.each_key do |key|
50
+ if rendered[key].match?(/.(sh|bash|zsh)$/)
51
+ rendered[key] = "sh #{rendered[key]}"
52
+ else
53
+ rendered[key] = "sh -c '#{Shellwords.escape(rendered[key])}'" \
54
+ unless rendered[key].match?(/^(sh|bash|zsh) -c/)
55
+ end
56
+ end
57
+ end
58
+
59
+ def self.render(options = {})
60
+ fp = harness_file_path(options)
61
+ raise "k8s-harness file not found at: #{fp}" unless File.exist? fp
62
+ return convert_to_commands(options) if test_present?(options)
63
+
64
+ raise KeyError, <<~MESSAGE.strip
65
+ It appears that your test isn't defined in #{fp}. Ensure that \
66
+ a key called 'test' is in #{fp}. See .k8sharness.example for \
67
+ an example of what a valid .k8sharness looks like.
68
+ MESSAGE
69
+ end
70
+
71
+ def self.validate(options)
72
+ puts YAML.dump(render(options.to_h))
73
+ end
74
+
75
+ def self.default_harness_file_path
76
+ "#{Dir.pwd}/.k8sharness"
77
+ end
78
+
79
+ def self.harness_file_path(options)
80
+ if !options.nil? && options.key?(:alternate_harnessfile)
81
+ options[:alternate_harnessfile]
82
+ else
83
+ default_harness_file_path
84
+ end
85
+ end
86
+
87
+ def self.harness_file(options)
88
+ YAML.safe_load(File.read(harness_file_path(options)), symbolize_names: true)
89
+ end
90
+ end
91
+ end
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'logger'
4
+
5
+ # KubernetesHarness.
6
+ module KubernetesHarness
7
+ @logger = Logger.new($stdout)
8
+ @logger.level = ENV['LOG_LEVEL'] || Logger::WARN
9
+ @nice_logger = Logger.new($stdout)
10
+ @nice_logger.formatter = proc do |_sev, datetime, _app, message|
11
+ if @logger.level == Logger::DEBUG
12
+ "--> [#{datetime.strftime('%F %T %z')}] #{message}\n"
13
+ else
14
+ "--> #{message}\n"
15
+ end
16
+ end
17
+ def self.logger
18
+ @logger
19
+ end
20
+
21
+ def self.nice_logger
22
+ @nice_logger
23
+ end
24
+
25
+ # Functions to manipulate log control.
26
+ module Logging
27
+ def self.enable_debug_logging
28
+ KubernetesHarness.logger.level = Logger::DEBUG
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ module KubernetesHarness
4
+ # The canonical source of all toplevel paths
5
+ module Paths
6
+ def self.root_dir
7
+ File.expand_path '../..', __dir__
8
+ end
9
+
10
+ def self.include_dir
11
+ File.join root_dir, 'include'
12
+ end
13
+
14
+ def self.conf_dir
15
+ File.join root_dir, 'conf'
16
+ end
17
+ end
18
+ end
@@ -0,0 +1,88 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'English'
4
+ require 'open3'
5
+
6
+ module KubernetesHarness
7
+ # Handles all interactions with shells
8
+ class ShellCommand
9
+ attr_accessor :command, :stdout, :stderr
10
+
11
+ # Ruby 2.7 deprecated keyword arguments in favor of passing in Hashes.
12
+ # TODO: Refactor to account for this.
13
+ def initialize(command, environment: {})
14
+ KubernetesHarness.logger.debug("Creating new command #{command} with env #{environment}")
15
+ @command = command
16
+ @environment = environment
17
+ @exitcode = nil
18
+ @stdout = nil
19
+ @stderr = nil
20
+ end
21
+
22
+ def execute!
23
+ @stdout, @stderr, @exitcode = read_output_in_chunks(@environment)
24
+
25
+ show_debug_command_output
26
+ end
27
+
28
+ def success?(exit_code: 0)
29
+ @exitcode == exit_code
30
+ end
31
+
32
+ private
33
+
34
+ def read_output_in_chunks(environment = {})
35
+ # Courtesy of: https://gist.github.com/chrisn/7450808
36
+ def all_eof(files)
37
+ files.find { |f| !f.eof }.nil?
38
+ end
39
+ block_size = 1024
40
+ final_stdout = ''
41
+ final_stderr = ''
42
+ final_process = nil
43
+ KubernetesHarness.logger.debug("Running #{@command} with env #{environment}")
44
+ Open3.popen3(environment.transform_keys(&:to_s), @command) do |stdin, stdout, stderr, thread|
45
+ stdin.close_write
46
+
47
+ begin
48
+ files = [stdout, stderr]
49
+ until all_eof(files)
50
+ ready = IO.select(files)
51
+ next unless ready
52
+
53
+ readable = ready[0]
54
+ readable.each do |f|
55
+ data = f.read_nonblock(block_size)
56
+ stdout_chunk = f == stdout ? data : ''
57
+ stderr_chunk = f == stderr ? data : ''
58
+ if f == stdout
59
+ final_stdout += stdout_chunk
60
+ else
61
+ final_stderr += stderr_chunk
62
+ end
63
+ KubernetesHarness.logger.debug("command: #{@command}, stdout_chunk: #{stdout_chunk}")
64
+ KubernetesHarness.logger.debug("command: #{@command}, stderr_chunk: #{stderr_chunk}")
65
+ rescue EOFError
66
+ KubernetesHarness.logger.debug("command: #{@command}, stream has EOF'ed")
67
+ end
68
+ end
69
+ rescue IOError => e
70
+ puts "IOError: #{e}"
71
+ end
72
+ final_process = thread.value.exitstatus
73
+ end
74
+
75
+ [final_stdout, final_stderr, final_process]
76
+ end
77
+
78
+ def show_debug_command_output
79
+ message = <<~MESSAGE.strip
80
+ Running #{@command} done, \
81
+ rc = #{@exitcode}, \
82
+ stdout = '#{@stdout}', \
83
+ stderr = '#{@stderr}'
84
+ MESSAGE
85
+ KubernetesHarness.logger.debug message
86
+ end
87
+ end
88
+ end
@@ -0,0 +1,100 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'k8s_harness/clusters'
4
+ require 'k8s_harness/clusters/cluster_info'
5
+ require 'k8s_harness/clusters/metadata'
6
+ require 'k8s_harness/harness_file'
7
+
8
+ module KubernetesHarness
9
+ # All entrypoints for our subcommands live here.
10
+ module Subcommand
11
+ def self.run(options = {})
12
+ fail_if_validate_fails!(options)
13
+ disable_teardown = !options.nil? && options[:disable_teardown]
14
+ return true if !options.nil? && options[:show_usage]
15
+
16
+ print_warning_if_teardown_disabled(disable_teardown)
17
+ cluster_info = create!
18
+ provision!(cluster_info)
19
+ print_post_create_message(cluster_info)
20
+ setup!(options)
21
+ run_tests!(options)
22
+ teardown!(options)
23
+ destroy_cluster!(disable_teardown)
24
+ end
25
+
26
+ def self.validate(options = {})
27
+ return true if options.to_h[:show_usage]
28
+
29
+ KubernetesHarness::HarnessFile.validate(options)
30
+ end
31
+
32
+ def self.destroy(options = {})
33
+ return true if options.to_h[:show_usage]
34
+
35
+ KubernetesHarness.nice_logger.info('Destroying your cluster (if any found).')
36
+ KubernetesHarness::Clusters.destroy_existing!
37
+ end
38
+
39
+ def self.print_warning_if_teardown_disabled(teardown_flag)
40
+ return unless teardown_flag
41
+
42
+ KubernetesHarness.nice_logger.warn(
43
+ <<~MESSAGE.strip
44
+ Teardown is disabled. Your cluster will stay up until you run \
45
+ 'k8s-harness destroy'.
46
+ MESSAGE
47
+ )
48
+ end
49
+
50
+ def self.create!
51
+ KubernetesHarness.nice_logger.info(
52
+ 'Creating your cluster now. Provisioning will occur in a few minutes.'
53
+ )
54
+ KubernetesHarness::Clusters.create!
55
+ end
56
+
57
+ def self.provision!(cluster_info)
58
+ KubernetesHarness.nice_logger.info('Provisioning the cluster. This will take a few minutes.')
59
+ KubernetesHarness::Clusters.provision!(cluster_info)
60
+ end
61
+
62
+ def self.setup!(options)
63
+ KubernetesHarness::HarnessFile.execute_setup!(options)
64
+ end
65
+
66
+ def self.run_tests!(options)
67
+ KubernetesHarness.nice_logger.info('Running your tests.')
68
+ KubernetesHarness::HarnessFile.execute_tests!(options)
69
+ end
70
+
71
+ def self.teardown!(options)
72
+ KubernetesHarness::HarnessFile.execute_teardown!(options)
73
+ end
74
+
75
+ def self.destroy_cluster!(disable_teardown)
76
+ KubernetesHarness.nice_logger.info('Done. Tearing down the cluster.')
77
+ KubernetesHarness::Clusters.teardown! unless disable_teardown
78
+ end
79
+
80
+ def self.fail_if_validate_fails!(options)
81
+ _ = KubernetesHarness::HarnessFile.render(options)
82
+ end
83
+
84
+ def self.print_post_create_message(cluster_info)
85
+ # TODO: Make this not hardcoded.
86
+ cluster_info_yaml_path = File.join Clusters::Metadata.default_dir, 'cluster.yaml'
87
+ KubernetesHarness.nice_logger.info(
88
+ <<~MESSAGE.strip
89
+ Cluster has been created. Details are below and in YAML at #{cluster_info_yaml_path}:
90
+
91
+ * Master address: '#{cluster_info.master_ip_address}'
92
+ * Worker addresses: #{cluster_info.worker_ip_addresses}
93
+ * Docker registry address: '#{cluster_info.docker_registry_address}'
94
+ * Kubeconfig path: #{cluster_info.kubeconfig_path}
95
+ * SSH key path: #{cluster_info.ssh_key_path}
96
+ MESSAGE
97
+ )
98
+ end
99
+ end
100
+ end
metadata ADDED
@@ -0,0 +1,63 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: k8s-harness
3
+ version: !ruby/object:Gem::Version
4
+ version: 1.0.0
5
+ platform: ruby
6
+ authors:
7
+ - Carlos Nunez
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2020-10-28 00:00:00.000000000 Z
12
+ dependencies: []
13
+ description: Please visit the README in the Github repo linked to this gem for more
14
+ info.
15
+ email: dev@carlosnunez.me
16
+ executables:
17
+ - k8s-harness
18
+ extensions: []
19
+ extra_rdoc_files: []
20
+ files:
21
+ - "./conf/required_software.yaml"
22
+ - "./include/Vagrantfile"
23
+ - "./include/inventory"
24
+ - "./include/site.yml"
25
+ - "./lib/k8s_harness.rb"
26
+ - "./lib/k8s_harness/cli.rb"
27
+ - "./lib/k8s_harness/clusters.rb"
28
+ - "./lib/k8s_harness/clusters/ansible.rb"
29
+ - "./lib/k8s_harness/clusters/cluster_info.rb"
30
+ - "./lib/k8s_harness/clusters/constants.rb"
31
+ - "./lib/k8s_harness/clusters/metadata.rb"
32
+ - "./lib/k8s_harness/clusters/required_software.rb"
33
+ - "./lib/k8s_harness/clusters/vagrant.rb"
34
+ - "./lib/k8s_harness/harness_file.rb"
35
+ - "./lib/k8s_harness/logging.rb"
36
+ - "./lib/k8s_harness/paths.rb"
37
+ - "./lib/k8s_harness/shell_command.rb"
38
+ - "./lib/k8s_harness/subcommand.rb"
39
+ - bin/k8s-harness
40
+ homepage: https://github.com/carlosonunez/k8s-harness
41
+ licenses:
42
+ - MIT
43
+ metadata: {}
44
+ post_install_message:
45
+ rdoc_options: []
46
+ require_paths:
47
+ - lib
48
+ required_ruby_version: !ruby/object:Gem::Requirement
49
+ requirements:
50
+ - - "~>"
51
+ - !ruby/object:Gem::Version
52
+ version: 2.7.0
53
+ required_rubygems_version: !ruby/object:Gem::Requirement
54
+ requirements:
55
+ - - ">="
56
+ - !ruby/object:Gem::Version
57
+ version: '0'
58
+ requirements: []
59
+ rubygems_version: 3.1.4
60
+ signing_key:
61
+ specification_version: 4
62
+ summary: Test your apps in disposable, prod-like Kubernetes clusters
63
+ test_files: []