kite 0.1.0 → 0.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +2 -0
- data/CHANGELOG.md +18 -1
- data/Dockerfile +30 -9
- data/Makefile +21 -9
- data/README.md +9 -0
- data/bin/concourse/check +12 -0
- data/bin/concourse/in +12 -0
- data/bin/concourse/out +46 -0
- data/config/pipelines/review.yml +158 -0
- data/config/pipelines/tasks/create-pull-requests-tag.yml +13 -0
- data/config/pipelines/tasks/create-repository-tag.yml +13 -0
- data/config/pipelines/tasks/run-master-tests.yml +12 -0
- data/config/pipelines/tasks/run-pr-tests.yml +12 -0
- data/kite.gemspec +1 -1
- data/lib/kite/generate.rb +39 -0
- data/lib/kite/helpers/concourse.rb +36 -0
- data/lib/kite/render.rb +68 -2
- data/lib/kite/version.rb +1 -1
- data/tpl/aws/README.md +20 -0
- data/tpl/aws/bin/concourse-deploy.sh.tt +4 -1
- data/tpl/aws/bin/ingress-deploy.sh.tt +7 -0
- data/tpl/aws/bin/ingress-update.sh.tt +7 -0
- data/tpl/aws/bin/kops-delete.sh.erb +5 -0
- data/tpl/aws/bin/kops-deploy.sh.erb +11 -0
- data/tpl/aws/bin/oauth-deploy.sh.tt +17 -0
- data/tpl/aws/bin/prometheus-deploy.sh.tt +23 -0
- data/tpl/aws/bosh-vars.yml.erb +1 -0
- data/tpl/aws/config/oauth.yml +59 -0
- data/tpl/aws/deployments/bosh/cloud-config.yml.tt +17 -1
- data/tpl/aws/deployments/concourse/concourse.yml.tt +6 -0
- data/tpl/aws/deployments/ingress/ingress.yml.erb +78 -0
- data/tpl/aws/deployments/oauth/oauth.yml.tt +95 -0
- data/tpl/aws/deployments/prometheus/monitor-bosh.yml +518 -0
- data/tpl/aws/deployments/prometheus/monitor-kubernetes.yml +30 -0
- data/tpl/aws/deployments/prometheus/prometheus.yml.tt +184 -0
- data/tpl/aws/docs/concourse.md +2 -2
- data/tpl/aws/docs/ingress.md +14 -0
- data/tpl/aws/docs/kops.md +5 -8
- data/tpl/aws/docs/oauth.md +24 -0
- data/tpl/aws/docs/prometheus.md +31 -0
- data/tpl/aws/terraform/kite_bucket.tf +8 -0
- data/tpl/aws/terraform/network.tf.tt +27 -0
- data/tpl/aws/terraform/outputs.tf +4 -0
- data/tpl/aws/terraform/terraform.tfvars.tt +1 -0
- data/tpl/aws/terraform/variables.tf +4 -0
- data/tpl/gcp/README.md +19 -3
- data/tpl/gcp/bin/base/setup-tunnel.sh.tt +8 -3
- data/tpl/gcp/bin/bosh-install.sh.tt +4 -0
- data/tpl/gcp/bin/concourse-deploy.sh.tt +4 -1
- data/tpl/gcp/bin/ingress-deploy.sh.tt +7 -0
- data/tpl/gcp/bin/ingress-update.sh.tt +7 -0
- data/tpl/gcp/bin/oauth-deploy.sh.tt +19 -0
- data/tpl/gcp/bin/prometheus-deploy.sh.tt +23 -0
- data/tpl/gcp/bin/vault-deploy.sh.tt +1 -1
- data/tpl/gcp/bosh-vars.yml.erb +1 -0
- data/tpl/gcp/config/oauth.yml +59 -0
- data/tpl/gcp/deployments/bosh/cloud-config.yml.tt +17 -3
- data/tpl/gcp/deployments/concourse/concourse.yml.tt +15 -5
- data/tpl/gcp/deployments/ingress/ingress.yml.erb +111 -0
- data/tpl/gcp/deployments/oauth/oauth.yml.tt +95 -0
- data/tpl/gcp/deployments/prometheus/monitor-bosh.yml +518 -0
- data/tpl/gcp/deployments/prometheus/monitor-kubernetes.yml +30 -0
- data/tpl/gcp/deployments/prometheus/prometheus.yml +183 -0
- data/tpl/gcp/docs/bosh.md +5 -0
- data/tpl/gcp/docs/concourse.md +3 -3
- data/tpl/gcp/docs/ingress.md +12 -0
- data/tpl/gcp/docs/oauth.md +24 -0
- data/tpl/gcp/docs/prometheus.md +27 -0
- data/tpl/gcp/docs/vault.md +2 -1
- data/tpl/gcp/terraform/main.tf +6 -1
- data/tpl/gcp/terraform/outputs.tf +4 -0
- data/tpl/service/%output_path%/charts/%app_name%/Chart.yaml.tt +4 -0
- data/tpl/service/%output_path%/charts/%app_name%/templates/NOTES.txt.tt +19 -0
- data/tpl/service/%output_path%/charts/%app_name%/templates/_helpers.tpl +16 -0
- data/tpl/service/%output_path%/charts/%app_name%/templates/deployment.yaml +37 -0
- data/tpl/service/%output_path%/charts/%app_name%/templates/ingress.yaml +32 -0
- data/tpl/service/%output_path%/charts/%app_name%/templates/service.yaml +19 -0
- data/tpl/service/%output_path%/charts/%app_name%/values.yaml.tt +37 -0
- data/tpl/service/%output_path%/environments/.keep +0 -0
- data/tpl/service/%output_path%/pipelines/review.yml.tt +189 -0
- data/tpl/service/%output_path%/pipelines/tasks/create-pull-requests-tag.yml.tt +13 -0
- data/tpl/service/%output_path%/pipelines/tasks/create-repository-tag.yml.tt +13 -0
- data/tpl/service/%output_path%/pipelines/tasks/helm-deploy.yml.tt +22 -0
- data/tpl/service/%output_path%/pipelines/tasks/run-master-tests.yml.tt +12 -0
- data/tpl/service/%output_path%/pipelines/tasks/run-pr-tests.yml.tt +12 -0
- data/tpl/service/Dockerfile.tt +4 -0
- data/tpl/service/Makefile.tt +28 -0
- data/tpl/service/VERSION.tt +1 -0
- data/tpl/service/docs/pipeline.md.tt +58 -0
- data/tpl/skel/config/cloud.yml +30 -5
- metadata +58 -5
- data/tpl/gcp/deployments/nginx/nginx.yml.erb +0 -62
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file assumes bosh_exporter based Service Discovery is being used: ./monitor-bosh.yml
|
2
|
+
|
3
|
+
# Exporter jobs
|
4
|
+
- type: replace
|
5
|
+
path: /instance_groups/name=prometheus/jobs/-
|
6
|
+
value:
|
7
|
+
name: kube_state_metrics_exporter
|
8
|
+
release: prometheus
|
9
|
+
properties:
|
10
|
+
kube_state_metrics_exporter:
|
11
|
+
apiserver: "((kubernetes_apiserver))"
|
12
|
+
kubeconfig: ((kubernetes_kubeconfig))
|
13
|
+
|
14
|
+
# Prometheus Alerts
|
15
|
+
- type: replace
|
16
|
+
path: /instance_groups/name=prometheus/jobs/name=kubernetes_alerts?/release
|
17
|
+
value: prometheus
|
18
|
+
|
19
|
+
- type: replace
|
20
|
+
path: /instance_groups/name=prometheus/jobs/name=prometheus/properties/prometheus/rule_files/-
|
21
|
+
value: /var/vcap/jobs/kubernetes_alerts/*.alerts
|
22
|
+
|
23
|
+
# Grafana Dashboards
|
24
|
+
- type: replace
|
25
|
+
path: /instance_groups/name=grafana/jobs/name=kubernetes_dashboards?/release
|
26
|
+
value: prometheus
|
27
|
+
|
28
|
+
- type: replace
|
29
|
+
path: /instance_groups/name=grafana/jobs/name=grafana/properties/grafana/prometheus/dashboard_files/-
|
30
|
+
value: /var/vcap/jobs/kubernetes_dashboards/*.json
|
@@ -0,0 +1,183 @@
|
|
1
|
+
name: prometheus
|
2
|
+
|
3
|
+
instance_groups:
|
4
|
+
- name: alertmanager
|
5
|
+
azs:
|
6
|
+
- z1
|
7
|
+
instances: 1
|
8
|
+
vm_type: common
|
9
|
+
persistent_disk_type: common
|
10
|
+
stemcell: default
|
11
|
+
networks:
|
12
|
+
- name: public
|
13
|
+
static_ips: [<%= @private_subnet[14] %>]
|
14
|
+
jobs:
|
15
|
+
- name: alertmanager
|
16
|
+
release: prometheus
|
17
|
+
properties:
|
18
|
+
alertmanager:
|
19
|
+
mesh:
|
20
|
+
password: ((alertmanager_mesh_password))
|
21
|
+
route:
|
22
|
+
receiver: default
|
23
|
+
receivers:
|
24
|
+
- name: default
|
25
|
+
test_alert:
|
26
|
+
daily: true
|
27
|
+
|
28
|
+
- name: prometheus
|
29
|
+
azs:
|
30
|
+
- z1
|
31
|
+
instances: 1
|
32
|
+
vm_type: common
|
33
|
+
persistent_disk_type: database
|
34
|
+
stemcell: default
|
35
|
+
networks:
|
36
|
+
- name: public
|
37
|
+
static_ips: [<%= @private_subnet[16] %>]
|
38
|
+
jobs:
|
39
|
+
- name: prometheus
|
40
|
+
release: prometheus
|
41
|
+
properties:
|
42
|
+
prometheus:
|
43
|
+
rule_files:
|
44
|
+
- /var/vcap/jobs/postgres_alerts/*.alerts
|
45
|
+
- /var/vcap/jobs/prometheus_alerts/*.alerts
|
46
|
+
scrape_configs:
|
47
|
+
- job_name: prometheus
|
48
|
+
static_configs:
|
49
|
+
- targets:
|
50
|
+
- localhost:9090
|
51
|
+
- name: postgres_alerts
|
52
|
+
release: prometheus
|
53
|
+
- name: prometheus_alerts
|
54
|
+
release: prometheus
|
55
|
+
|
56
|
+
- name: database
|
57
|
+
azs:
|
58
|
+
- z1
|
59
|
+
instances: 1
|
60
|
+
vm_type: common
|
61
|
+
persistent_disk_type: database
|
62
|
+
stemcell: default
|
63
|
+
networks:
|
64
|
+
- name: public
|
65
|
+
jobs:
|
66
|
+
- name: postgres
|
67
|
+
release: postgres
|
68
|
+
properties:
|
69
|
+
databases:
|
70
|
+
port: 5432
|
71
|
+
databases:
|
72
|
+
- name: grafana
|
73
|
+
citext: true
|
74
|
+
roles:
|
75
|
+
- name: grafana
|
76
|
+
password: ((postgres_grafana_password))
|
77
|
+
- name: postgres_exporter
|
78
|
+
release: prometheus
|
79
|
+
properties:
|
80
|
+
postgres_exporter:
|
81
|
+
datasource_name: postgresql://grafana:((postgres_grafana_password))@127.0.0.1:5432/?sslmode=disable
|
82
|
+
|
83
|
+
- name: grafana
|
84
|
+
azs:
|
85
|
+
- z1
|
86
|
+
instances: 1
|
87
|
+
vm_type: common
|
88
|
+
persistent_disk_type: database
|
89
|
+
stemcell: default
|
90
|
+
networks:
|
91
|
+
- name: public
|
92
|
+
static_ips: [<%= @private_subnet[15] %>]
|
93
|
+
jobs:
|
94
|
+
- name: grafana
|
95
|
+
release: prometheus
|
96
|
+
properties:
|
97
|
+
grafana:
|
98
|
+
database:
|
99
|
+
type: postgres
|
100
|
+
port: 5432
|
101
|
+
name: grafana
|
102
|
+
user: grafana
|
103
|
+
password: ((postgres_grafana_password))
|
104
|
+
session:
|
105
|
+
provider: postgres
|
106
|
+
provider_port: 5432
|
107
|
+
provider_name: grafana
|
108
|
+
provider_user: grafana
|
109
|
+
provider_password: ((postgres_grafana_password))
|
110
|
+
security:
|
111
|
+
admin_user: admin
|
112
|
+
admin_password: ((grafana_password))
|
113
|
+
secret_key: ((grafana_secret_key))
|
114
|
+
dashboards:
|
115
|
+
json:
|
116
|
+
enabled: true
|
117
|
+
prometheus:
|
118
|
+
dashboard_files:
|
119
|
+
- /var/vcap/jobs/grafana_dashboards/*.json
|
120
|
+
- /var/vcap/jobs/postgres_dashboards/*.json
|
121
|
+
- /var/vcap/jobs/prometheus_dashboards/*.json
|
122
|
+
- name: grafana_dashboards
|
123
|
+
release: prometheus
|
124
|
+
- name: postgres_dashboards
|
125
|
+
release: prometheus
|
126
|
+
- name: prometheus_dashboards
|
127
|
+
release: prometheus
|
128
|
+
|
129
|
+
- name: nginx
|
130
|
+
azs:
|
131
|
+
- z1
|
132
|
+
instances: 1
|
133
|
+
vm_type: common
|
134
|
+
stemcell: default
|
135
|
+
networks:
|
136
|
+
- name: public
|
137
|
+
jobs:
|
138
|
+
- name: nginx
|
139
|
+
release: prometheus
|
140
|
+
properties:
|
141
|
+
nginx:
|
142
|
+
alertmanager:
|
143
|
+
auth_username: admin
|
144
|
+
auth_password: ((alertmanager_password))
|
145
|
+
prometheus:
|
146
|
+
auth_username: admin
|
147
|
+
auth_password: ((prometheus_password))
|
148
|
+
|
149
|
+
variables:
|
150
|
+
- name: alertmanager_password
|
151
|
+
type: password
|
152
|
+
- name: alertmanager_mesh_password
|
153
|
+
type: password
|
154
|
+
- name: prometheus_password
|
155
|
+
type: password
|
156
|
+
- name: postgres_grafana_password
|
157
|
+
type: password
|
158
|
+
- name: grafana_password
|
159
|
+
type: password
|
160
|
+
- name: grafana_secret_key
|
161
|
+
type: password
|
162
|
+
|
163
|
+
update:
|
164
|
+
canaries: 1
|
165
|
+
max_in_flight: 32
|
166
|
+
canary_watch_time: 1000-100000
|
167
|
+
update_watch_time: 1000-100000
|
168
|
+
serial: false
|
169
|
+
|
170
|
+
stemcells:
|
171
|
+
- alias: default
|
172
|
+
os: ubuntu-trusty
|
173
|
+
version: latest
|
174
|
+
|
175
|
+
releases:
|
176
|
+
- name: postgres
|
177
|
+
version: "20"
|
178
|
+
url: https://bosh.io/d/github.com/cloudfoundry/postgres-release?v=20
|
179
|
+
sha1: 3f378bcab294e20316171d4e656636df88763664
|
180
|
+
- name: prometheus
|
181
|
+
version: 18.6.2
|
182
|
+
url: https://github.com/cloudfoundry-community/prometheus-boshrelease/releases/download/v18.6.2/prometheus-18.6.2.tgz
|
183
|
+
sha1: f6b7ed381a28ce8fef99017a89e1122b718d5556
|
data/tpl/gcp/docs/bosh.md
CHANGED
data/tpl/gcp/docs/concourse.md
CHANGED
@@ -10,9 +10,9 @@
|
|
10
10
|
|
11
11
|
Fill out the "token" field in `deployments/concourse/concourse.yml` with root token received from `vault init`.
|
12
12
|
|
13
|
-
Deploy Concourse by running the script with the
|
13
|
+
Deploy Concourse by running the script with the Vault token as argument(strong passwords for Concourse auth and db will be generated automatically)
|
14
14
|
```
|
15
|
-
./bin/concourse-deploy.sh *
|
15
|
+
./bin/concourse-deploy.sh *vault_token*
|
16
16
|
```
|
17
17
|
|
18
18
|
### Connect GitHub oAuth
|
@@ -34,7 +34,7 @@ To run a test Concourse job:
|
|
34
34
|
- Fill out `test-credentials.yml`
|
35
35
|
- Add necessary secrets to your Vault(see [docs/vault.md](docs/vault.md))
|
36
36
|
- Download the `fly` client from Concourse web panel and add it to your PATH: `mv *path_to_fly* /usr/local/bin`
|
37
|
-
- Login to Concourse using the `fly` client: `fly -t ci --concourse-url *concourse-url*`
|
37
|
+
- Login to Concourse using the `fly` client: `fly login -t ci --concourse-url *concourse-url*`
|
38
38
|
- Create a test pipeline with `fly set-pipeline -t ci -c test-pipeline.yml -p test --load-vars-from test-credentials.yml -n`
|
39
39
|
- Unpause pipeline: `fly unpause-pipeline -t ci -p test`
|
40
40
|
- Trigger and unpause the test job: `fly trigger-job -t ci -j test/test-publish`
|
@@ -0,0 +1,12 @@
|
|
1
|
+
#### [Back](../README.md)
|
2
|
+
|
3
|
+
## Ingress
|
4
|
+
|
5
|
+
### Prerequisites
|
6
|
+
|
7
|
+
- BOSH environment [ready](bosh.md)
|
8
|
+
- All hostnames resolve to the VIP configured in cloud.yml (this is mandatory to issue SSL certificates)
|
9
|
+
|
10
|
+
### Deployment
|
11
|
+
|
12
|
+
To deploy Ingress, use `./bin/ingress-deploy.sh`
|
@@ -0,0 +1,24 @@
|
|
1
|
+
#### [Back](../README.md)
|
2
|
+
|
3
|
+
## OAuth (UAA)
|
4
|
+
|
5
|
+
### Configuration
|
6
|
+
|
7
|
+
If you want to add initial groups and users, change oauth look,
|
8
|
+
configure mail, etc. - you should edit `config/oauth.yml`.
|
9
|
+
|
10
|
+
Here are links to uaa config documentation:
|
11
|
+
|
12
|
+
* __users:__ [uaa.scim.users](https://bosh.io/jobs/uaa?source=github.com/cloudfoundry/uaa-release&version=52#p=uaa.scim.users)
|
13
|
+
* __groups:__ [uaa.scim.groups](https://bosh.io/jobs/uaa?source=github.com/cloudfoundry/uaa-release&version=52#p=uaa.scim.groups)
|
14
|
+
* __oauth clients:__ [uaa.clients](https://bosh.io/jobs/uaa?source=github.com/cloudfoundry/uaa-release&version=52#p=uaa.clients)
|
15
|
+
* __theming:__ [login.branding](https://bosh.io/jobs/uaa?source=github.com/cloudfoundry/uaa-release&version=52#p=login.branding)
|
16
|
+
* __email notifications:__ [login.smtp](https://bosh.io/jobs/uaa?source=github.com/cloudfoundry/uaa-release&version=52#p=login.smtp)
|
17
|
+
|
18
|
+
### Deployment
|
19
|
+
|
20
|
+
After editing config, run `./bin/oauth-deploy.sh`
|
21
|
+
|
22
|
+
### Usage
|
23
|
+
|
24
|
+
To check if OAuth works, visit [<%= @values['oauth']['hostname'] %>](<%= @values['oauth']['url'] %>).
|
@@ -0,0 +1,27 @@
|
|
1
|
+
#### [Back](../README.md)
|
2
|
+
|
3
|
+
## Prometheus
|
4
|
+
|
5
|
+
### Prerequisites
|
6
|
+
|
7
|
+
- BOSH environment [ready](bosh.md)
|
8
|
+
- Kops cluster [deployed](kops.md)
|
9
|
+
|
10
|
+
### Setup
|
11
|
+
|
12
|
+
Enter path to your Kubernetes config in `config/cloud.yml` and add the Kubernetes API server address to `config/bosh_vars.yml`.
|
13
|
+
|
14
|
+
Afterwards, deploy Prometheus
|
15
|
+
```
|
16
|
+
./bin/prometheus-deploy.sh
|
17
|
+
```
|
18
|
+
|
19
|
+
### Access
|
20
|
+
|
21
|
+
After the deployment process is done, you can reach each Prometheus' component's web UI at:
|
22
|
+
|
23
|
+
- AlertManager: http://10.0.0.14:9093
|
24
|
+
- Grafana: http://10.0.0.15:3000
|
25
|
+
- Prometheus: http://10.0.0.16:9090
|
26
|
+
|
27
|
+
You can find related credentials in `config/creds.yml`
|
data/tpl/gcp/docs/vault.md
CHANGED
@@ -16,7 +16,8 @@ To deploy Vault, use `./bin/vault-deploy.sh`
|
|
16
16
|
|
17
17
|
### Connection
|
18
18
|
|
19
|
-
-
|
19
|
+
- You can now deploy the ingress to access vault
|
20
|
+
- Export your Vault's address using `export VAULT_ADDR=https://*vault_host*`
|
20
21
|
- Run `vault init` to initialize the vault
|
21
22
|
- Store the keys displayed after init
|
22
23
|
- Unseal the vault by running `vault unseal` three times using three keys from the previous step
|
data/tpl/gcp/terraform/main.tf
CHANGED
@@ -29,7 +29,7 @@ resource "google_compute_address" "bastion" {
|
|
29
29
|
|
30
30
|
resource "google_compute_instance" "bastion" {
|
31
31
|
name = "bastion"
|
32
|
-
machine_type = "
|
32
|
+
machine_type = "g1-small"
|
33
33
|
zone = "${var.zone}"
|
34
34
|
|
35
35
|
tags = ["bastion", "platform-internal"]
|
@@ -63,3 +63,8 @@ EOT
|
|
63
63
|
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
64
64
|
}
|
65
65
|
}
|
66
|
+
|
67
|
+
# Ingress
|
68
|
+
resource "google_compute_address" "ingress" {
|
69
|
+
name = "ingress-ip"
|
70
|
+
}
|
@@ -0,0 +1,19 @@
|
|
1
|
+
1. Get the application URL by running these commands:
|
2
|
+
{{- if .Values.ingress.enabled }}
|
3
|
+
{{- range .Values.ingress.hosts }}
|
4
|
+
http://{{ . }}
|
5
|
+
{{- end }}
|
6
|
+
{{- else if contains "NodePort" .Values.service.type }}
|
7
|
+
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "fullname" . }})
|
8
|
+
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
9
|
+
echo http://$NODE_IP:$NODE_PORT
|
10
|
+
{{- else if contains "LoadBalancer" .Values.service.type }}
|
11
|
+
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
12
|
+
You can watch the status of by running 'kubectl get svc -w {{ template "fullname" . }}'
|
13
|
+
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
14
|
+
echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
|
15
|
+
{{- else if contains "ClusterIP" .Values.service.type }}
|
16
|
+
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
17
|
+
echo "Visit http://127.0.0.1:8080 to use your application"
|
18
|
+
kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
|
19
|
+
{{- end }}
|
@@ -0,0 +1,16 @@
|
|
1
|
+
{{/* vim: set filetype=mustache: */}}
|
2
|
+
{{/*
|
3
|
+
Expand the name of the chart.
|
4
|
+
*/}}
|
5
|
+
{{- define "name" -}}
|
6
|
+
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
7
|
+
{{- end -}}
|
8
|
+
|
9
|
+
{{/*
|
10
|
+
Create a default fully qualified app name.
|
11
|
+
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
12
|
+
*/}}
|
13
|
+
{{- define "fullname" -}}
|
14
|
+
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
15
|
+
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
16
|
+
{{- end -}}
|
@@ -0,0 +1,37 @@
|
|
1
|
+
apiVersion: extensions/v1beta1
|
2
|
+
kind: Deployment
|
3
|
+
metadata:
|
4
|
+
name: {{ template "fullname" . }}
|
5
|
+
labels:
|
6
|
+
app: {{ template "name" . }}
|
7
|
+
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
8
|
+
release: {{ .Release.Name }}
|
9
|
+
heritage: {{ .Release.Service }}
|
10
|
+
spec:
|
11
|
+
replicas: {{ .Values.replicaCount }}
|
12
|
+
template:
|
13
|
+
metadata:
|
14
|
+
labels:
|
15
|
+
app: {{ template "name" . }}
|
16
|
+
release: {{ .Release.Name }}
|
17
|
+
spec:
|
18
|
+
containers:
|
19
|
+
- name: {{ .Chart.Name }}
|
20
|
+
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
21
|
+
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
22
|
+
ports:
|
23
|
+
- containerPort: {{ .Values.service.internalPort }}
|
24
|
+
livenessProbe:
|
25
|
+
httpGet:
|
26
|
+
path: /
|
27
|
+
port: {{ .Values.service.internalPort }}
|
28
|
+
readinessProbe:
|
29
|
+
httpGet:
|
30
|
+
path: /
|
31
|
+
port: {{ .Values.service.internalPort }}
|
32
|
+
resources:
|
33
|
+
{{ toYaml .Values.resources | indent 12 }}
|
34
|
+
{{- if .Values.nodeSelector }}
|
35
|
+
nodeSelector:
|
36
|
+
{{ toYaml .Values.nodeSelector | indent 8 }}
|
37
|
+
{{- end }}
|
@@ -0,0 +1,32 @@
|
|
1
|
+
{{- if .Values.ingress.enabled -}}
|
2
|
+
{{- $serviceName := include "fullname" . -}}
|
3
|
+
{{- $servicePort := .Values.service.externalPort -}}
|
4
|
+
apiVersion: extensions/v1beta1
|
5
|
+
kind: Ingress
|
6
|
+
metadata:
|
7
|
+
name: {{ template "fullname" . }}
|
8
|
+
labels:
|
9
|
+
app: {{ template "name" . }}
|
10
|
+
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
11
|
+
release: {{ .Release.Name }}
|
12
|
+
heritage: {{ .Release.Service }}
|
13
|
+
annotations:
|
14
|
+
{{- range $key, $value := .Values.ingress.annotations }}
|
15
|
+
{{ $key }}: {{ $value | quote }}
|
16
|
+
{{- end }}
|
17
|
+
spec:
|
18
|
+
rules:
|
19
|
+
{{- range $host := .Values.ingress.hosts }}
|
20
|
+
- host: {{ $host }}
|
21
|
+
http:
|
22
|
+
paths:
|
23
|
+
- path: /
|
24
|
+
backend:
|
25
|
+
serviceName: {{ $serviceName }}
|
26
|
+
servicePort: {{ $servicePort }}
|
27
|
+
{{- end -}}
|
28
|
+
{{- if .Values.ingress.tls }}
|
29
|
+
tls:
|
30
|
+
{{ toYaml .Values.ingress.tls | indent 4 }}
|
31
|
+
{{- end -}}
|
32
|
+
{{- end -}}
|