prepd 0.1.1 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (51) hide show
  1. checksums.yaml +5 -5
  2. data/bin/console +2 -0
  3. data/files/cluster/Vagrantfile +118 -0
  4. data/files/cluster/vagrant.yml +52 -0
  5. data/files/developer/cluster/provision.yml +2 -0
  6. data/files/machine/build.json +52 -0
  7. data/files/machine/debian/stretch/iso.json +101 -0
  8. data/files/machine/debian/stretch/preseed.cfg +404 -0
  9. data/files/machine/json.rb +26 -0
  10. data/files/machine/push.json +56 -0
  11. data/files/machine/rebuild.json +60 -0
  12. data/files/project/provision.yml +20 -0
  13. data/files/project/vars.yml +5 -0
  14. data/files/setup.yml +16 -0
  15. data/files/setup/README.md +21 -0
  16. data/files/setup/ansible.cfg +4 -0
  17. data/files/setup/hosts +1 -0
  18. data/files/setup/setup.yml +19 -0
  19. data/files/setup/vars.yml +20 -0
  20. data/files/workspace/.gitignore +12 -0
  21. data/files/workspace/README.md +11 -0
  22. data/files/workspace/clusters/prepd.yml +17 -0
  23. data/files/workspace/clusters/provision.yml +73 -0
  24. data/files/workspace/clusters/vagrant.rb +106 -0
  25. data/files/workspace/clusters/vagrant.yml +18 -0
  26. data/files/workspace/data/.keep +0 -0
  27. data/files/workspace/developer/ansible.cfg +4 -0
  28. data/files/workspace/developer/credentials/.keep +0 -0
  29. data/files/workspace/developer/hosts +7 -0
  30. data/files/workspace/developer/machines/provision.yml +9 -0
  31. data/files/workspace/developer/provision.yml +15 -0
  32. data/files/workspace/machines/build.yml +34 -0
  33. data/files/workspace/machines/provision.yml +36 -0
  34. data/lib/prepd.rb +95 -34
  35. data/lib/prepd/cli.rb +27 -4
  36. data/lib/prepd/cli/commands.rb +64 -25
  37. data/lib/prepd/cli/options_parser.rb +42 -16
  38. data/lib/prepd/models.rb +7 -261
  39. data/lib/prepd/models/base.rb +124 -0
  40. data/lib/prepd/models/cluster.rb +255 -0
  41. data/lib/prepd/models/data.rb +5 -0
  42. data/lib/prepd/models/developer.rb +129 -0
  43. data/lib/prepd/models/machine.rb +146 -0
  44. data/lib/prepd/models/project.rb +94 -0
  45. data/lib/prepd/models/setup.rb +48 -0
  46. data/lib/prepd/models/workspace.rb +51 -0
  47. data/lib/prepd/version.rb +1 -1
  48. data/prepd.gemspec +4 -6
  49. metadata +47 -37
  50. data/TODO.md +0 -17
  51. data/lib/prepd/schema.rb +0 -23
@@ -0,0 +1,26 @@
1
+ #!/usr/bin/env ruby
2
+ require 'json'
3
+
4
+ # path, prefix, name, version = ARGV
5
+ prefix, name, version = ARGV
6
+ path = "#{Dir.pwd}/boxes"
7
+ # prefix = 'prepd'
8
+ # name = 'debian-stretch-amd64-developer'
9
+ # version = '9.9.9'
10
+
11
+ json = {
12
+ name: "#{prefix}/#{name}",
13
+ versions: [
14
+ {
15
+ version: version,
16
+ providers: [
17
+ {
18
+ name: 'virtualbox',
19
+ url: "file://#{path}/#{name}.box"
20
+ }
21
+ ]
22
+ }
23
+ ]
24
+ }
25
+
26
+ File.open("#{path}/#{name}.json", 'w') { |f| f.write json.to_json }
@@ -0,0 +1,56 @@
1
+ {
2
+ "variables": {
3
+ "aws_profile": "{{env `AWS_PROFILE`}}",
4
+ "vm_base_name": "{{env `VM_BASE_NAME`}}",
5
+ "vm_name": "{{env `VM_BASE_NAME`}}-{{env `VM_OUTPUT`}}",
6
+ "vm_input": "{{env `VM_INPUT`}}",
7
+ "vm_output": "{{env `VM_OUTPUT`}}",
8
+ "box_version": "{{env `BOX_VERSION`}}",
9
+ "s3_bucket": "{{env `S3_BUCKET`}}",
10
+ "s3_region": "{{env `S3_REGION`}}",
11
+ "s3_box_dir": "{{ env `S3_BOX_DIR`}}"
12
+ },
13
+
14
+ "builders": [
15
+ {
16
+ "guest_additions_mode": "disable",
17
+ "headless": true,
18
+ "output_directory": "images_cache/{{user `vm_output`}}",
19
+ "shutdown_command": "echo 'halt -p' > shutdown.sh; echo 'vagrant'|sudo -S sh 'shutdown.sh'",
20
+ "source_path": "images/{{user `vm_input`}}/{{user `vm_base_name`}}-{{user `vm_input`}}.ovf",
21
+ "ssh_username": "vagrant",
22
+ "ssh_password": "vagrant",
23
+ "ssh_wait_timeout": "30s",
24
+ "type": "virtualbox-ovf",
25
+ "vm_name": "{{user `vm_name`}}"
26
+ }
27
+ ],
28
+
29
+ "post-processors": [
30
+ [
31
+ {
32
+ "keep_input_artifact": true,
33
+ "output": "{{user `s3_box_dir`}}/{{user `vm_name`}}.box",
34
+ "type": "vagrant"
35
+ },
36
+ {
37
+ "type": "vagrant-s3",
38
+ "box_dir": "{{user `s3_box_dir`}}",
39
+ "box_name": "{{user `vm_name`}}",
40
+ "bucket": "{{user `s3_bucket`}}",
41
+ "manifest": "{{user `vm_name`}}.json",
42
+ "profile": "{{user `aws_profile`}}",
43
+ "region": "{{user `s3_region`}}",
44
+ "version": "{{ user `box_version` }}"
45
+ },
46
+ {
47
+ "type": "shell-local",
48
+ "inline": [
49
+ "rm -rf images/{{user `vm_input`}}",
50
+ "mv images_cache/{{user `vm_input`}} images",
51
+ "rmdir images_cache"
52
+ ]
53
+ }
54
+ ]
55
+ ]
56
+ }
@@ -0,0 +1,60 @@
1
+ {
2
+ "variables": {
3
+ "box_namespace": "{{env `BOX_NAMESPACE`}}",
4
+ "box_version": "{{env `BOX_VERSION`}}",
5
+ "playbook_file": "{{env `PLAYBOOK_FILE`}}",
6
+ "vm_base_name": "{{env `VM_BASE_NAME`}}",
7
+ "vm_name": "{{env `VM_BASE_NAME`}}-{{env `VM_OUTPUT`}}",
8
+ "vm_input": "{{env `VM_INPUT`}}",
9
+ "vm_output": "{{env `VM_OUTPUT`}}",
10
+ "json_rb_file": "{{env `JSON_RB_FILE`}}"
11
+ },
12
+
13
+ "builders": [
14
+ {
15
+ "type": "virtualbox-ovf",
16
+ "guest_additions_mode": "disable",
17
+ "headless": true,
18
+ "output_directory": "images_cache/{{user `vm_output`}}",
19
+ "shutdown_command": "echo 'halt -p' > shutdown.sh; echo 'vagrant'|sudo -S sh 'shutdown.sh'",
20
+ "source_path": "images/{{user `vm_input`}}/{{user `vm_base_name`}}-{{user `vm_input`}}.ovf",
21
+ "ssh_username": "vagrant",
22
+ "ssh_password": "vagrant",
23
+ "ssh_wait_timeout": "30s",
24
+ "vm_name": "{{user `vm_name`}}"
25
+ }
26
+ ],
27
+
28
+ "provisioners": [
29
+ {
30
+ "type": "ansible",
31
+ "groups": [ "{{user `vm_output`}}" ],
32
+ "playbook_file": "{{user `playbook_file`}}",
33
+ "user" : "vagrant"
34
+ }
35
+ ],
36
+
37
+ "post-processors": [
38
+ [
39
+ {
40
+ "type": "vagrant",
41
+ "keep_input_artifact": true,
42
+ "output": "boxes/{{user `vm_name`}}.box"
43
+ },
44
+ {
45
+ "type": "shell-local",
46
+ "inline": [
47
+ "rm -rf images/{{user `vm_input`}}",
48
+ "mv images_cache/{{user `vm_input`}} images",
49
+ "rmdir images_cache"
50
+ ]
51
+ },
52
+ {
53
+ "type": "shell-local",
54
+ "inline": [
55
+ "{{user `json_rb_file`}} {{user `box_namespace`}} {{user `vm_name`}} {{user `box_version`}}"
56
+ ]
57
+ }
58
+ ]
59
+ ]
60
+ }
@@ -0,0 +1,20 @@
1
+ #!/usr/bin/env ansible-playbook
2
+ ---
3
+ - hosts: development
4
+ tasks:
5
+ - include_role:
6
+ name: prepd/machine
7
+
8
+ - name: Include vars
9
+ include_vars:
10
+ file: vars.yml
11
+ name: projects
12
+
13
+ - include_role:
14
+ name: prepd/project/setup
15
+ tasks_from: clone
16
+ vars:
17
+ projects_dir: '{{ prepd_machine.dirs.projects }}'
18
+ with_dict: '{{ projects }}'
19
+ loop_control:
20
+ loop_var: project
@@ -0,0 +1,5 @@
1
+ ---
2
+ example:
3
+ repo:
4
+ name: devops
5
+ url: git@github.com:rjayroach/prepd-project.git
data/files/setup.yml ADDED
@@ -0,0 +1,16 @@
1
+ # NOTE: Custom values for the developer's machines
2
+ # TODO:
3
+ # move the machine hash to the machine's vars/setup.yml
4
+ # shell and aliases_file are properties of the developer so put them under a 'developer' hash key
5
+ ---
6
+ machine_time_zone: 'Asia/Singapore'
7
+ machine_shell: zsh
8
+ machine_aliases_file: '{{ ansible_env.HOME }}/.zsh.after/aliases.zsh'
9
+ machine_projects_dir: '{{ ansible_env.HOME }}/dev'
10
+
11
+ machine:
12
+ time_zone: 'Asia/Singapore'
13
+ shell: zsh
14
+ aliases_file: '{{ ansible_env.HOME }}/.zsh.after/aliases.zsh'
15
+
16
+ machine_env: '{{ machine }}'
@@ -0,0 +1,21 @@
1
+ # prepd
2
+
3
+ ## Getting Started with a new Host
4
+
5
+ First, modify vars.yml to
6
+
7
+ - include your git name and email
8
+ - the list of extra packages to install
9
+
10
+ Next, run the setup.yml playbook. It will fail on the first attempt to install VirtualBox
11
+
12
+ After installation fails go to security settings and enable kernel extensions from Oracle Amercia
13
+
14
+ Then rerun setup.yml
15
+
16
+
17
+ ```bash
18
+ prepd setup
19
+ ```
20
+
21
+ Full documentation is available [here](https://github.com/rjayroach/prepd/tree/master/docs)
@@ -0,0 +1,4 @@
1
+ [defaults]
2
+ inventory = hosts
3
+ retry_files_enabled = no
4
+ # vault_password_file = ~/prepd/config/developer/vault-keys/password.txt
data/files/setup/hosts ADDED
@@ -0,0 +1 @@
1
+ localhost ansible_connection=local
@@ -0,0 +1,19 @@
1
+ #!/usr/bin/env ansible-playbook
2
+ # NOTE: run setup.yml with -K to prompt for sudo password
3
+ # NOTE: the order of the roles is important: virtualbox is first b/c it will fail, yadr is next b/c it sets the shell
4
+ # roles installed after that often depend on the shell type which is zsh in the case of yadr
5
+ ---
6
+ - hosts: localhost
7
+ vars_files:
8
+ - vars.yml
9
+ roles:
10
+ - prepd/virtualbox
11
+ - prepd/yadr
12
+ - prepd/vagrant
13
+ - prepd/packer
14
+ - prepd/extras
15
+
16
+ # TODO: put the extra role still has some legacy code; refactor that when installing on linux laptop
17
+ # TODO: cordova needs some work
18
+ # TODO: developer/setup role contains the list of packages to install, but is not invoked here yet
19
+ # TODO: when the installation is 'standarized' then put all the roles into a summary role in prepd
@@ -0,0 +1,20 @@
1
+ ---
2
+ git_user:
3
+ name: John Doe Developer
4
+ email: email@example.com
5
+
6
+ extra_items:
7
+ - google-chrome
8
+ - dropbox
9
+ - evernote
10
+ - iterm2
11
+ - firefox
12
+ - kindle
13
+ - postman
14
+ - skype
15
+ - slack
16
+ - spotify
17
+ - transmission
18
+
19
+ extra_items_not_installing:
20
+ - insomniax
@@ -0,0 +1,12 @@
1
+ # See https://help.github.com/articles/ignoring-files for more about ignoring files.
2
+
3
+ /developer/vault-password.txt
4
+
5
+ /machines/*
6
+ !/machines/*.yml
7
+
8
+ /projects/*
9
+ !/projects/*.yml
10
+
11
+ /data/*
12
+ !/data/.keep
@@ -0,0 +1,11 @@
1
+ # prepd
2
+
3
+ ## Getting Started
4
+
5
+ Create a VirtualBox cluster of machines managed by Vagrant:
6
+
7
+ ```bash
8
+ prepd new cluster my-cluster
9
+ ```
10
+
11
+ Full documentation is available [here](https://github.com/rjayroach/prepd/tree/master/docs)
@@ -0,0 +1,17 @@
1
+ # included as prepd_machine by prepd/machine role
2
+ ---
3
+ dirs:
4
+ data: '{{ ansible_env.HOME }}/data'
5
+ developer: '{{ ansible_env.HOME }}/developer'
6
+ machine: '{{ ansible_env.HOME }}/machine'
7
+ projects: '{{ ansible_env.HOME }}/projects'
8
+ share: '{{ ansible_env.HOME }}/share'
9
+
10
+ dns:
11
+ name: "{{ hostname.stdout.split('.')[1] }}"
12
+ domainname: "{{ hostname.stdout.split('.')[-2:] | join('.') }}"
13
+
14
+ cluster:
15
+ ssh: n1
16
+ host: "node1.{{ hostname.stdout.split('.')[-2:] | join('.') }}"
17
+ registry_port: 30005
@@ -0,0 +1,73 @@
1
+ #!/usr/bin/env ansible-playbook
2
+ # vagrant machine provisioner (based on host's ansible.groups values set in Vagrantfile)
3
+ ---
4
+ - hosts: all
5
+ tasks:
6
+ - file:
7
+ path: '{{ ansible_env.HOME }}/{{ item }}'
8
+ state: absent
9
+ with_items: [shutdown.sh, yankring_history_v2.txt]
10
+
11
+ # TODO: move terraform/terragrunt binary installation can be moved to packer.
12
+ - hosts: kubectl:!minikube
13
+ # NOTE: These vars need to be accessible to the project, e.g. utils.yml -e@utils/build.yml needs the cluster_host and local_registry_port in order to tag the image to push it
14
+ # This should go into the machine's prepd.yml config file which also has directory locations. Anything that is a machine wide configuration goes here
15
+ # This would include machine's timezone, docker daemon config, perhaps PG installation and so on. The project could still configure this stuff, but doesn't have to
16
+ # vars:
17
+ # cluster_host: node1
18
+ # cluster_ssh: n1
19
+ # local_registry_port: 30005
20
+
21
+ tasks:
22
+ - include_role:
23
+ name: prepd/machine
24
+
25
+ - name: Insert aliases
26
+ include_role:
27
+ name: prepd/aliases
28
+ vars:
29
+ aliases_marker: host-ssh
30
+ aliases_block: |
31
+ {{ prepd_machine.cluster.ssh }}() { ssh {{ prepd_machine.cluster.host }} $@ }
32
+
33
+ - name: Use scp to copy minikube credentials from cluster
34
+ include_role:
35
+ name: prepd/k8s/kubectl
36
+ tasks_from: remote
37
+ vars:
38
+ k8s_kubectl_cluster_host: '{{ prepd_machine.cluster.host }}'
39
+
40
+ - name: Initialize helm
41
+ command: helm init
42
+
43
+ - name: Wait for tiller to be available
44
+ pause:
45
+ seconds: 30
46
+
47
+ - name: Install local registry in local cluster
48
+ command: 'helm install stable/docker-registry --name local-registry --set service.type=NodePort,service.nodePort={{ prepd_machine.cluster.registry_port }}'
49
+
50
+ - name: Configure docker to allow push to insecure registry running in local cluster
51
+ include_role:
52
+ name: prepd/docker
53
+ tasks_from: insecure-registry
54
+ vars:
55
+ registry: '{{ prepd_machine.cluster.host }}:{{ prepd_machine.cluster.registry_port }}'
56
+
57
+ - hosts: minikube
58
+ tasks:
59
+ - command: 'minikube status |grep minikube'
60
+ register: mks
61
+ ignore_errors: yes
62
+ changed_when: false
63
+
64
+ - set_fact:
65
+ minikube_running: "{{ mks.stdout_lines[0].split(' ')[-1] == 'Running' }}"
66
+
67
+ - include_role:
68
+ name: prepd/k8s/minikube
69
+ tasks_from: start
70
+ vars:
71
+ # The physical interface enp0s8 is specific to this hardware setup
72
+ minikube_spoof_intf: ansible_enp0s8
73
+ when: not minikube_running
@@ -0,0 +1,106 @@
1
+ # Load configuration yaml
2
+ require 'yaml'
3
+ require 'erb'
4
+
5
+ # Apply deep_merge method to Hash class
6
+ class ::Hash
7
+ def deep_merge(second)
8
+ merger = proc { |key, v1, v2| Hash === v1 && Hash === v2 ? v1.merge(v2, &merger) : v2 }
9
+ self.merge(second, &merger)
10
+ end
11
+ end
12
+
13
+ module Prepd
14
+ module Vagrant
15
+ class Config
16
+ # NOTE: paths a relative to the location of the Vagrantfile from which this file is included
17
+ WORKSPACE_CONFIG_FILE = '../../prepd-workspace.yml'
18
+ BASE_CLUSTER_CONFIG_FILE = '../vagrant.yml'
19
+ CLUSTER_CONFIG_FILE = 'vagrant.yml'
20
+
21
+ attr_accessor :workspace, :base, :cluster
22
+
23
+ def workspace
24
+ @workspace ||= YAML.load(ERB.new(File.read(WORKSPACE_CONFIG_FILE)).result(binding))
25
+ end
26
+
27
+ def base
28
+ return @base if @base
29
+ settings = {}
30
+ config = YAML.load(ERB.new(File.read(BASE_CLUSTER_CONFIG_FILE)).result(binding))
31
+ settings = config['settings']
32
+ @base = YAML.load(ERB.new(File.read(BASE_CLUSTER_CONFIG_FILE)).result(binding))
33
+ end
34
+
35
+ def cluster
36
+ return @cluster if @cluster
37
+ settings = base['settings']
38
+ boxes = base['boxes']
39
+ config = YAML.load(ERB.new(File.read(CLUSTER_CONFIG_FILE)).result(binding))
40
+ config['machines'].each do |k, v|
41
+ config['machines'][k] = config['defaults'].deep_merge(v)
42
+ end
43
+ @cluster = config
44
+ end
45
+
46
+ def machines
47
+ cluster['machines'].keys
48
+ end
49
+
50
+ def machine_config(key)
51
+ cluster['machines'][key]
52
+ end
53
+ end
54
+
55
+ class Machine
56
+ attr_accessor :name, :config
57
+
58
+ def initialize(name = 'node1')
59
+ @name = name
60
+ end
61
+
62
+ def config
63
+ @config ||= Config.new.machine_config(name)
64
+ end
65
+
66
+ def autostart
67
+ config['vm']['autostart'] || false
68
+ end
69
+
70
+ def host_name
71
+ "#{config['name'] || name}.#{config['domain']}"
72
+ end
73
+
74
+ def vm_box
75
+ config['vm']['box']
76
+ end
77
+
78
+ def vm_box_url
79
+ config['vm']['box_url']
80
+ end
81
+
82
+ def mounts
83
+ config['mounts'] || {}
84
+ end
85
+
86
+ def port_forwards
87
+ config['vm']['port_forwards'] || {}
88
+ end
89
+
90
+ def ssh_interface
91
+ config['ssh']['interface']
92
+ end
93
+
94
+ def ansible_groups
95
+ config['ansible_groups'] || {}
96
+ end
97
+ end
98
+ end
99
+ end
100
+
101
+ # Testing
102
+ unless defined? PREPD_VAGRANT
103
+ m = Prepd::Vagrant::Machine.new('node0')
104
+ require 'pry'
105
+ binding.pry
106
+ end