kite 0.1.0 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (93) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/CHANGELOG.md +18 -1
  4. data/Dockerfile +30 -9
  5. data/Makefile +21 -9
  6. data/README.md +9 -0
  7. data/bin/concourse/check +12 -0
  8. data/bin/concourse/in +12 -0
  9. data/bin/concourse/out +46 -0
  10. data/config/pipelines/review.yml +158 -0
  11. data/config/pipelines/tasks/create-pull-requests-tag.yml +13 -0
  12. data/config/pipelines/tasks/create-repository-tag.yml +13 -0
  13. data/config/pipelines/tasks/run-master-tests.yml +12 -0
  14. data/config/pipelines/tasks/run-pr-tests.yml +12 -0
  15. data/kite.gemspec +1 -1
  16. data/lib/kite/generate.rb +39 -0
  17. data/lib/kite/helpers/concourse.rb +36 -0
  18. data/lib/kite/render.rb +68 -2
  19. data/lib/kite/version.rb +1 -1
  20. data/tpl/aws/README.md +20 -0
  21. data/tpl/aws/bin/concourse-deploy.sh.tt +4 -1
  22. data/tpl/aws/bin/ingress-deploy.sh.tt +7 -0
  23. data/tpl/aws/bin/ingress-update.sh.tt +7 -0
  24. data/tpl/aws/bin/kops-delete.sh.erb +5 -0
  25. data/tpl/aws/bin/kops-deploy.sh.erb +11 -0
  26. data/tpl/aws/bin/oauth-deploy.sh.tt +17 -0
  27. data/tpl/aws/bin/prometheus-deploy.sh.tt +23 -0
  28. data/tpl/aws/bosh-vars.yml.erb +1 -0
  29. data/tpl/aws/config/oauth.yml +59 -0
  30. data/tpl/aws/deployments/bosh/cloud-config.yml.tt +17 -1
  31. data/tpl/aws/deployments/concourse/concourse.yml.tt +6 -0
  32. data/tpl/aws/deployments/ingress/ingress.yml.erb +78 -0
  33. data/tpl/aws/deployments/oauth/oauth.yml.tt +95 -0
  34. data/tpl/aws/deployments/prometheus/monitor-bosh.yml +518 -0
  35. data/tpl/aws/deployments/prometheus/monitor-kubernetes.yml +30 -0
  36. data/tpl/aws/deployments/prometheus/prometheus.yml.tt +184 -0
  37. data/tpl/aws/docs/concourse.md +2 -2
  38. data/tpl/aws/docs/ingress.md +14 -0
  39. data/tpl/aws/docs/kops.md +5 -8
  40. data/tpl/aws/docs/oauth.md +24 -0
  41. data/tpl/aws/docs/prometheus.md +31 -0
  42. data/tpl/aws/terraform/kite_bucket.tf +8 -0
  43. data/tpl/aws/terraform/network.tf.tt +27 -0
  44. data/tpl/aws/terraform/outputs.tf +4 -0
  45. data/tpl/aws/terraform/terraform.tfvars.tt +1 -0
  46. data/tpl/aws/terraform/variables.tf +4 -0
  47. data/tpl/gcp/README.md +19 -3
  48. data/tpl/gcp/bin/base/setup-tunnel.sh.tt +8 -3
  49. data/tpl/gcp/bin/bosh-install.sh.tt +4 -0
  50. data/tpl/gcp/bin/concourse-deploy.sh.tt +4 -1
  51. data/tpl/gcp/bin/ingress-deploy.sh.tt +7 -0
  52. data/tpl/gcp/bin/ingress-update.sh.tt +7 -0
  53. data/tpl/gcp/bin/oauth-deploy.sh.tt +19 -0
  54. data/tpl/gcp/bin/prometheus-deploy.sh.tt +23 -0
  55. data/tpl/gcp/bin/vault-deploy.sh.tt +1 -1
  56. data/tpl/gcp/bosh-vars.yml.erb +1 -0
  57. data/tpl/gcp/config/oauth.yml +59 -0
  58. data/tpl/gcp/deployments/bosh/cloud-config.yml.tt +17 -3
  59. data/tpl/gcp/deployments/concourse/concourse.yml.tt +15 -5
  60. data/tpl/gcp/deployments/ingress/ingress.yml.erb +111 -0
  61. data/tpl/gcp/deployments/oauth/oauth.yml.tt +95 -0
  62. data/tpl/gcp/deployments/prometheus/monitor-bosh.yml +518 -0
  63. data/tpl/gcp/deployments/prometheus/monitor-kubernetes.yml +30 -0
  64. data/tpl/gcp/deployments/prometheus/prometheus.yml +183 -0
  65. data/tpl/gcp/docs/bosh.md +5 -0
  66. data/tpl/gcp/docs/concourse.md +3 -3
  67. data/tpl/gcp/docs/ingress.md +12 -0
  68. data/tpl/gcp/docs/oauth.md +24 -0
  69. data/tpl/gcp/docs/prometheus.md +27 -0
  70. data/tpl/gcp/docs/vault.md +2 -1
  71. data/tpl/gcp/terraform/main.tf +6 -1
  72. data/tpl/gcp/terraform/outputs.tf +4 -0
  73. data/tpl/service/%output_path%/charts/%app_name%/Chart.yaml.tt +4 -0
  74. data/tpl/service/%output_path%/charts/%app_name%/templates/NOTES.txt.tt +19 -0
  75. data/tpl/service/%output_path%/charts/%app_name%/templates/_helpers.tpl +16 -0
  76. data/tpl/service/%output_path%/charts/%app_name%/templates/deployment.yaml +37 -0
  77. data/tpl/service/%output_path%/charts/%app_name%/templates/ingress.yaml +32 -0
  78. data/tpl/service/%output_path%/charts/%app_name%/templates/service.yaml +19 -0
  79. data/tpl/service/%output_path%/charts/%app_name%/values.yaml.tt +37 -0
  80. data/tpl/service/%output_path%/environments/.keep +0 -0
  81. data/tpl/service/%output_path%/pipelines/review.yml.tt +189 -0
  82. data/tpl/service/%output_path%/pipelines/tasks/create-pull-requests-tag.yml.tt +13 -0
  83. data/tpl/service/%output_path%/pipelines/tasks/create-repository-tag.yml.tt +13 -0
  84. data/tpl/service/%output_path%/pipelines/tasks/helm-deploy.yml.tt +22 -0
  85. data/tpl/service/%output_path%/pipelines/tasks/run-master-tests.yml.tt +12 -0
  86. data/tpl/service/%output_path%/pipelines/tasks/run-pr-tests.yml.tt +12 -0
  87. data/tpl/service/Dockerfile.tt +4 -0
  88. data/tpl/service/Makefile.tt +28 -0
  89. data/tpl/service/VERSION.tt +1 -0
  90. data/tpl/service/docs/pipeline.md.tt +58 -0
  91. data/tpl/skel/config/cloud.yml +30 -5
  92. metadata +58 -5
  93. data/tpl/gcp/deployments/nginx/nginx.yml.erb +0 -62
@@ -0,0 +1,30 @@
1
+ # This file assumes bosh_exporter based Service Discovery is being used: ./monitor-bosh.yml
2
+
3
+ # Exporter jobs
4
+ - type: replace
5
+ path: /instance_groups/name=prometheus/jobs/-
6
+ value:
7
+ name: kube_state_metrics_exporter
8
+ release: prometheus
9
+ properties:
10
+ kube_state_metrics_exporter:
11
+ apiserver: "((kubernetes_apiserver))"
12
+ kubeconfig: ((kubernetes_kubeconfig))
13
+
14
+ # Prometheus Alerts
15
+ - type: replace
16
+ path: /instance_groups/name=prometheus/jobs/name=kubernetes_alerts?/release
17
+ value: prometheus
18
+
19
+ - type: replace
20
+ path: /instance_groups/name=prometheus/jobs/name=prometheus/properties/prometheus/rule_files/-
21
+ value: /var/vcap/jobs/kubernetes_alerts/*.alerts
22
+
23
+ # Grafana Dashboards
24
+ - type: replace
25
+ path: /instance_groups/name=grafana/jobs/name=kubernetes_dashboards?/release
26
+ value: prometheus
27
+
28
+ - type: replace
29
+ path: /instance_groups/name=grafana/jobs/name=grafana/properties/grafana/prometheus/dashboard_files/-
30
+ value: /var/vcap/jobs/kubernetes_dashboards/*.json
@@ -0,0 +1,184 @@
1
+ name: prometheus
2
+
3
+ instance_groups:
4
+ - name: alertmanager
5
+ azs:
6
+ - z1
7
+ instances: 1
8
+ vm_type: default
9
+ persistent_disk_type: default
10
+ stemcell: default
11
+ networks:
12
+ - name: platform_net
13
+ static_ips: [<%= @private_subnet[15] %>]
14
+ jobs:
15
+ - name: alertmanager
16
+ release: prometheus
17
+ properties:
18
+ alertmanager:
19
+ mesh:
20
+ password: ((alertmanager_mesh_password))
21
+ route:
22
+ receiver: default
23
+ receivers:
24
+ - name: default
25
+ test_alert:
26
+ daily: true
27
+
28
+ - name: prometheus
29
+ azs:
30
+ - z1
31
+ instances: 1
32
+ vm_type: default
33
+ persistent_disk_type: default
34
+ stemcell: default
35
+ networks:
36
+ - name: platform_net
37
+ static_ips: [<%= @private_subnet[16] %>]
38
+ jobs:
39
+ - name: prometheus
40
+ release: prometheus
41
+ properties:
42
+ prometheus:
43
+ rule_files:
44
+ - /var/vcap/jobs/postgres_alerts/*.alerts
45
+ - /var/vcap/jobs/prometheus_alerts/*.alerts
46
+ scrape_configs:
47
+ - job_name: prometheus
48
+ static_configs:
49
+ - targets:
50
+ - localhost:9090
51
+ - name: postgres_alerts
52
+ release: prometheus
53
+ - name: prometheus_alerts
54
+ release: prometheus
55
+
56
+ - name: database
57
+ azs:
58
+ - z1
59
+ instances: 1
60
+ vm_type: default
61
+ persistent_disk_type: default
62
+ stemcell: default
63
+ networks:
64
+ - name: platform_net
65
+ jobs:
66
+ - name: postgres
67
+ release: postgres
68
+ properties:
69
+ databases:
70
+ port: 5432
71
+ databases:
72
+ - name: grafana
73
+ citext: true
74
+ roles:
75
+ - name: grafana
76
+ password: ((postgres_grafana_password))
77
+ - name: postgres_exporter
78
+ release: prometheus
79
+ properties:
80
+ postgres_exporter:
81
+ datasource_name: postgresql://grafana:((postgres_grafana_password))@127.0.0.1:5432/?sslmode=disable
82
+
83
+ - name: grafana
84
+ azs:
85
+ - z1
86
+ instances: 1
87
+ vm_type: default
88
+ persistent_disk_type: default
89
+ stemcell: default
90
+ networks:
91
+ - name: platform_net
92
+ static_ips: [<%= @private_subnet[17] %>]
93
+ jobs:
94
+ - name: grafana
95
+ release: prometheus
96
+ properties:
97
+ grafana:
98
+ database:
99
+ type: postgres
100
+ port: 5432
101
+ name: grafana
102
+ user: grafana
103
+ password: ((postgres_grafana_password))
104
+ session:
105
+ provider: postgres
106
+ provider_port: 5432
107
+ provider_name: grafana
108
+ provider_user: grafana
109
+ provider_password: ((postgres_grafana_password))
110
+ security:
111
+ admin_user: admin
112
+ admin_password: ((grafana_password))
113
+ secret_key: ((grafana_secret_key))
114
+ dashboards:
115
+ json:
116
+ enabled: true
117
+ prometheus:
118
+ dashboard_files:
119
+ - /var/vcap/jobs/grafana_dashboards/*.json
120
+ - /var/vcap/jobs/postgres_dashboards/*.json
121
+ - /var/vcap/jobs/prometheus_dashboards/*.json
122
+ - name: grafana_dashboards
123
+ release: prometheus
124
+ - name: postgres_dashboards
125
+ release: prometheus
126
+ - name: prometheus_dashboards
127
+ release: prometheus
128
+
129
+ - name: nginx
130
+ azs:
131
+ - z1
132
+ instances: 1
133
+ vm_type: default
134
+ stemcell: default
135
+ networks:
136
+ - name: platform_net
137
+ static_ips: [<%= @private_subnet[18] %>]
138
+ jobs:
139
+ - name: nginx
140
+ release: prometheus
141
+ properties:
142
+ nginx:
143
+ alertmanager:
144
+ auth_username: admin
145
+ auth_password: ((alertmanager_password))
146
+ prometheus:
147
+ auth_username: admin
148
+ auth_password: ((prometheus_password))
149
+
150
+ variables:
151
+ - name: alertmanager_password
152
+ type: password
153
+ - name: alertmanager_mesh_password
154
+ type: password
155
+ - name: prometheus_password
156
+ type: password
157
+ - name: postgres_grafana_password
158
+ type: password
159
+ - name: grafana_password
160
+ type: password
161
+ - name: grafana_secret_key
162
+ type: password
163
+
164
+ update:
165
+ canaries: 1
166
+ max_in_flight: 32
167
+ canary_watch_time: 1000-100000
168
+ update_watch_time: 1000-100000
169
+ serial: false
170
+
171
+ stemcells:
172
+ - alias: default
173
+ os: ubuntu-trusty
174
+ version: latest
175
+
176
+ releases:
177
+ - name: postgres
178
+ version: "20"
179
+ url: https://bosh.io/d/github.com/cloudfoundry/postgres-release?v=20
180
+ sha1: 3f378bcab294e20316171d4e656636df88763664
181
+ - name: prometheus
182
+ version: 18.6.2
183
+ url: https://github.com/cloudfoundry-community/prometheus-boshrelease/releases/download/v18.6.2/prometheus-18.6.2.tgz
184
+ sha1: f6b7ed381a28ce8fef99017a89e1122b718d5556
@@ -10,9 +10,9 @@
10
10
 
11
11
  Fill out the "token" field in `deployments/concourse/concourse.yml` with root token received from `vault init`.
12
12
 
13
- Deploy Concourse by running the script with the required arguments
13
+ Deploy Concourse by running the script with the Vault token as argument(strong passwords for Concourse auth and db will be generated automatically)
14
14
  ```
15
- ./bin/concourse-deploy.sh *concourse_auth_password* *concourse_db_password* *vault_token*
15
+ ./bin/concourse-deploy.sh *vault_token*
16
16
  ```
17
17
 
18
18
  ### Connect GitHub oAuth
@@ -0,0 +1,14 @@
1
+ #### [Back](../README.md)
2
+
3
+ ## Ingress
4
+
5
+ ### Prerequisites
6
+
7
+ - BOSH environment [ready](bosh.md)
8
+ - All hostnames resolve to the VIP configured in cloud.yml (this is mandatory to issue SSL certificates)
9
+
10
+ ### Deployment
11
+
12
+ To deploy Ingress, use `./bin/ingress-deploy.sh`
13
+
14
+ After each new component deployed, run `./bin/ingress-update`
data/tpl/aws/docs/kops.md CHANGED
@@ -22,17 +22,14 @@ export AWS_ACCESS_KEY_ID=<access key>
22
22
  export AWS_SECRET_ACCESS_KEY=<secret key>
23
23
  ```
24
24
 
25
- Create cluster configuration
25
+ Deploy the `kops` cluster
26
26
  ```
27
- kops create cluster --name *kops.example.com* --state "s3://kops-example-state-store" --zones *eu-central-1b* --ssh-public-key *path to SSH key*
27
+ ./bin/kops-deploy.sh
28
28
  ```
29
29
 
30
- Review and edit cluster configuration if needed
31
- ```
32
- kops edit cluster --name *kops.example.com* --state "s3://kops-example-state-store"
33
- ```
30
+ ### Teardown
34
31
 
35
- Build the cluster
32
+ To tear down the kops cluster you've created, just run
36
33
  ```
37
- kops update cluster --name *kops.example.com* --state "s3://kops-example-state-store" --yes
34
+ ./bin/kops-delete.sh
38
35
  ```
@@ -0,0 +1,24 @@
1
+ #### [Back](../README.md)
2
+
3
+ ## OAuth (UAA)
4
+
5
+ ### Configuration
6
+
7
+ If you want to add initial groups and users, change oauth look,
8
+ configure mail, etc. - you should edit `config/oauth.yml`.
9
+
10
+ Here are links to uaa config documentation:
11
+
12
+ * __users:__ [uaa.scim.users](https://bosh.io/jobs/uaa?source=github.com/cloudfoundry/uaa-release&version=52#p=uaa.scim.users)
13
+ * __groups:__ [uaa.scim.groups](https://bosh.io/jobs/uaa?source=github.com/cloudfoundry/uaa-release&version=52#p=uaa.scim.groups)
14
+ * __oauth clients:__ [uaa.clients](https://bosh.io/jobs/uaa?source=github.com/cloudfoundry/uaa-release&version=52#p=uaa.clients)
15
+ * __theming:__ [login.branding](https://bosh.io/jobs/uaa?source=github.com/cloudfoundry/uaa-release&version=52#p=login.branding)
16
+ * __email notifications:__ [login.smtp](https://bosh.io/jobs/uaa?source=github.com/cloudfoundry/uaa-release&version=52#p=login.smtp)
17
+
18
+ ### Deployment
19
+
20
+ After editing config, run `./bin/oauth-deploy.sh`
21
+
22
+ ### Usage
23
+
24
+ To check if OAuth works, visit [<%= @values['oauth']['hostname'] %>](<%= @values['oauth']['url'] %>).
@@ -0,0 +1,31 @@
1
+ #### [Back](../README.md)
2
+
3
+ ## Prometheus
4
+
5
+ ### Prerequisites
6
+
7
+ - BOSH environment [ready](bosh.md)
8
+ - Kops cluster [deployed](kops.md)
9
+
10
+ ### Setup
11
+
12
+ Enter path to your Kubernetes config in `config/cloud.yml` and add the Kubernetes API server address to `config/bosh_vars.yml`.
13
+
14
+ Afterwards, deploy Prometheus
15
+ ```
16
+ ./bin/prometheus-deploy.sh
17
+ ```
18
+
19
+ ### Access
20
+
21
+ After the deployment process is done, you can reach each Prometheus' component's web UI at:
22
+
23
+ If you have [Ingress](ingress.md) deployed and DNS record created, each Prometheus stack component should be accessible by its respective address.
24
+
25
+ Without Ingress:
26
+
27
+ - AlertManager: http://10.0.0.18:9093
28
+ - Grafana: http://10.0.0.18:3000
29
+ - Prometheus: http://10.0.0.18:9090
30
+
31
+ You can find related credentials in `config/creds.yml`
@@ -0,0 +1,8 @@
1
+ resource "aws_s3_bucket" "kite_bucket" {
2
+ bucket = "${var.bucket_name}"
3
+
4
+ tags {
5
+ Name = "${var.bucket_name}"
6
+ Component = "kite-platform"
7
+ }
8
+ }
@@ -182,6 +182,33 @@ resource "aws_security_group" "bosh_sg" {
182
182
  }
183
183
  }
184
184
 
185
+ # Create an Ingress security group
186
+ resource "aws_security_group" "ingress_sg" {
187
+ name = "ingress-sg"
188
+ description = "Ingress security group"
189
+ vpc_id = <%= "\"#{conditional_vpc_id(@values)}\"" %>
190
+ tags {
191
+ Name = "ingress-sg"
192
+ Component = "ingress"
193
+ }
194
+
195
+ # outbound internet access
196
+ egress {
197
+ from_port = 0
198
+ to_port = 0
199
+ protocol = "-1"
200
+ cidr_blocks = ["0.0.0.0/0"]
201
+ }
202
+
203
+ # inbound HTTP access
204
+ ingress {
205
+ from_port = 80
206
+ to_port = 80
207
+ protocol = "tcp"
208
+ cidr_blocks = ["0.0.0.0/0"]
209
+ }
210
+ }
211
+
185
212
  # Create a Concourse security group
186
213
  resource "aws_security_group" "concourse_sg" {
187
214
  name = "concourse-sg"
@@ -6,6 +6,10 @@ output "platform_subnet_id" {
6
6
  value = "${aws_subnet.platform_net.id}"
7
7
  }
8
8
 
9
+ output "dmz_subnet_id" {
10
+ value = "${aws_subnet.platform_dmz.id}"
11
+ }
12
+
9
13
  output "bastion_ip" {
10
14
  value = "${aws_instance.bastion.public_ip}"
11
15
  }
@@ -16,5 +16,6 @@ private_subnet_cidr = "<%= @values['aws']['private_subnet']['network'] %>"
16
16
 
17
17
  # Kite config
18
18
  keypair_name = "<%= @values['kite']['keypair_name'] %>"
19
+ bucket_name = "<%= @values['kite']['bucket_name'] %>"
19
20
  public_key = "<%= @values['kite']['public_key_path'] %>"
20
21
  private_key = "<%= @values['kite']['private_key_path'] %>"
@@ -18,6 +18,10 @@ variable "keypair_name" {
18
18
  type = "string"
19
19
  }
20
20
 
21
+ variable "bucket_name" {
22
+ type = "string"
23
+ }
24
+
21
25
  variable "region" {
22
26
  type = "string"
23
27
  default = "eu-central-1"
data/tpl/gcp/README.md CHANGED
@@ -1,13 +1,14 @@
1
- ## GCP Cloud
1
+ # GCP Cloud
2
2
 
3
- ### Setup
3
+ ## Setup
4
4
 
5
+ ### Prerequisites
5
6
  Set path to your service account credentials:
6
7
  ```
7
8
  export GOOGLE_CREDENTIALS=*~/credentials/service-account.json*
8
9
  ```
9
10
 
10
-
11
+ ### Setup the basic infrastructure and bastion
11
12
  Apply terraform code
12
13
  ```
13
14
  pushd terraform && terraform init && terraform apply && popd
@@ -16,6 +17,7 @@ pushd terraform && terraform init && terraform apply && popd
16
17
  [Note]
17
18
  To destroy Bastion later, use `terraform destroy -target google_compute_instance.bastion`
18
19
 
20
+ ### Setup BOSH
19
21
  Render BOSH manifest and related files
20
22
  ```
21
23
  kite render manifest bosh --cloud gcp
@@ -23,6 +25,16 @@ kite render manifest bosh --cloud gcp
23
25
 
24
26
  Prepare BOSH environment using instructions from [docs/bosh.md](docs/bosh.md)
25
27
 
28
+ ### Setup INGRESS
29
+ Render Ingress manifest and related files
30
+ ```
31
+ kite render manifest ingress --cloud gcp
32
+ ```
33
+
34
+ Follow instructions from [docs/ingress.md](docs/ingress.md) to deploy Ingress
35
+
36
+
37
+ ### Setup VAULT
26
38
  Render Vault deployment
27
39
  ```
28
40
  kite render manifest vault --cloud gcp
@@ -30,6 +42,10 @@ kite render manifest vault --cloud gcp
30
42
 
31
43
  Follow instructions from [docs/vault.md](docs/vault.md) to deploy Vault
32
44
 
45
+ ### Setup CONCOURSE
46
+ [Note]
47
+ To expose concourse publicly, you must create first (manually) a virtual IP in GCP and create a DNS A entry for the hostname for this IP. Set the IP into config/cloud.yml (concourse.vip).
48
+
33
49
  Render Concourse manifest
34
50
  ```
35
51
  kite render manifest concourse --cloud gcp