@colisweb/rescript-toolkit 4.14.14 → 4.14.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,2384 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- #VARIABLES
4
- export SCRIPT_FULL_PATH=$(dirname "$0")
5
-
6
- ##FUNCTIONS
7
- # https://stackoverflow.com/questions/1527049/how-can-i-join-elements-of-an-array-in-bash
8
- join_by() {
9
- local d=${1-} f=${2-}
10
- if shift 2; then
11
- printf %s "$f" "${@/#/$d}"
12
- fi
13
- }
14
-
15
- mkstring() {
16
- local start=$1
17
- local separator=$2
18
- local end=$3
19
- shift 3
20
-
21
- if [ $# -gt 0 ]; then
22
- printf $start
23
- join_by $separator $*
24
- printf $end
25
- fi
26
- }
27
-
28
- md5all() {
29
- all_hash=$(mktemp)
30
- for name in $*; do
31
- find $name -type f -exec cat {} \; | md5sum | cut -f1 -d ' ' >> $all_hash
32
- done;
33
- cat $all_hash | md5sum | cut -f1 -d ' '
34
- }
35
-
36
- log() {
37
- echo "$*" >&2
38
- }
39
- #!/usr/bin/env bash
40
-
41
- check_args() {
42
- if [ -z $2 ] || [ "$1" != "$2" ]; then
43
- echo >&2 "missing argument $1"
44
- return 1
45
- fi
46
- }
47
-
48
- check_env_vars() {
49
- ArgsCount=$1 && shift
50
- for ((i = 0; i < $ArgsCount; i++)); do
51
- if [[ -z "${!1}" ]]; then
52
- echo >&2 "missing ENV $1"
53
- return 1
54
- fi
55
- shift
56
- done
57
- }
58
-
59
- extract_arg() {
60
- name=$1
61
- passed=$2
62
- value=$3
63
- if [ "--$name" != "$passed" ]; then
64
- echo "missing argument $name"
65
- exit 1
66
- fi
67
- eval $name='$value'
68
- }
69
-
70
- extract_args() {
71
- declare -a Array_Args
72
- ArgsCount=$1 && shift
73
- for ((i = 0; i < $ArgsCount; i++)); do
74
- Array_Args[i]=$1 && shift
75
- done
76
- for ArgName in "${Array_Args[@]}"; do
77
- extract_arg "$ArgName" $* && shift 2
78
- done
79
- }
80
-
81
- #!/usr/bin/env bash
82
-
83
- aws_ecr_login() {
84
- PATH=/root/.local/bin:$PATH
85
-
86
- aws ecr get-login-password \
87
- | docker login --username AWS --password-stdin 949316342391.dkr.ecr.eu-west-1.amazonaws.com \
88
- || (echo "you should update to AWS CLI version 2 https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-mac.html " $(aws ecr get-login --region=eu-west-1 --no-include-email) )
89
- }
90
-
91
- aws_ecr_token() {
92
- aws ecr get-authorization-token --region=eu-west-1 --output text --query 'authorizationData[].authorizationToken'
93
- }
94
-
95
- # you will need jq to use these commands. You can install it using "brew install jq"
96
- # delete_images colisweb_api 8
97
- # will delete images older than 8 weeks
98
- delete_images() {
99
-
100
- aws --version | grep -q "^aws-cli/2" || (echo "You must have aws-cli v2 installed to use this script") ; return 1
101
-
102
- REPO=$1
103
- WEEKS=${2:-16}
104
-
105
- WEEKS_AGO=$(date -j -v-${WEEKS}w +%s)
106
-
107
- #Get all ecr images
108
- IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
109
-
110
- #Filter unnecessary values and map `imagePushedAt` to EPOCH
111
- TIMED_IMAGES=$(echo $IMAGES | jq .'[]' | jq "map({imagePushedAt: (.imagePushedAt[0:19]+\"Z\" | fromdateiso8601), imageDigest: .imageDigest}) | sort_by(.imagePushedAt) | .[:-1]")
112
-
113
- #Filter on EPOCH
114
- OLD_IMAGES=$(echo $TIMED_IMAGES | jq "map(select (.imagePushedAt < $WEEKS_AGO)) | .[] " | jq -r '.imageDigest')
115
-
116
- while IFS= read -r IMAGE; do
117
- if [ "$IMAGE" != "" ]; then
118
- echo "Deleting $IMAGE from $REPO"
119
- AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
120
- fi
121
- done <<< "$OLD_IMAGES"
122
- }
123
-
124
- # delete_images_all_repos 12
125
- # will delete images in all repositories older than 12 weeks
126
- delete_images_all_repos() {
127
- REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
128
-
129
- while IFS= read -r REPO; do
130
- echo "processing ECR repository $REPO"
131
- delete_images $REPO $1
132
- done <<< "$REPOSITORIES"
133
- }
134
-
135
- #!/usr/bin/env bash
136
-
137
- image_exists() {
138
- set -e
139
-
140
- REGISTRY=$1
141
- REPOSITORY=$2
142
- IMAGE=$3
143
-
144
- TAGGED_IMAGE="$REGISTRY/$REPOSITORY:$IMAGE"
145
-
146
- aws ecr describe-images --registry-id $REGISTRY --repository-name $REPOSITORY --image-ids "imageTag=$IMAGE"
147
-
148
- if [ $? -eq 0 ]
149
- then
150
- echo "Image $TAGGED_IMAGE already present in distant repo"
151
- return 0
152
- else
153
- echo "Image $TAGGED_IMAGE NOT present in distant repo"
154
- return 1
155
- fi
156
- }
157
- #!/usr/bin/env bash
158
-
159
- gmm() {
160
- git checkout $1
161
- git pull
162
- git checkout $2
163
- git pull
164
- git merge $1
165
- git push
166
- }
167
-
168
- git_damn_merge() {
169
- git checkout $1
170
- git pull
171
- git checkout $2
172
- git dammit
173
- git merge $1
174
- git push
175
- }
176
-
177
- git_prune_local_branches() {
178
- git branch -r |
179
- awk '{print $1}' |
180
- egrep -v -f /dev/fd/0 <(git branch -vv | grep origin) |
181
- awk '{print $1}' |
182
- xargs git branch -d
183
- }
184
-
185
- gum_checkout() {
186
- git branch -a | cut -f3- -d "/" | gum filter | xargs git checkout
187
- }
188
-
189
- # useful option :
190
- # export GIT_SUBLINE_MERGE_NON_INTERACTIVE_MODE=TRUE
191
- # see https://github.com/paulaltin/git-subline-merge
192
- setup_subline_merge() {
193
- location=${1:-"--local"}
194
-
195
- case $location in
196
- --local)
197
- if [ -d ".git" ]; then
198
- echo "* merge=subline" >>.git/info/attributes
199
- else
200
- echo "Cannot use local option, not in a git repository"
201
- return 1
202
- fi
203
- ;;
204
- --global)
205
- echo "* merge=subline" >>~/.gitattributes
206
- ;;
207
- *)
208
- echo "unknown argument $location"
209
- return 2
210
- ;;
211
- esac
212
-
213
- git config $location merge.conflictStyle diff3
214
- git config $location merge.subline.driver "$colisweb_scripts/shell-session/shell/dev/git-subline-merge %O %A %B %L %P"
215
- git config $location merge.subline.recursive binary
216
- }
217
-
218
- rebase_from_ancestor() {
219
- set -x
220
- branch=$1
221
- tip=$(git rev-parse HEAD)
222
- ancestor=$(git merge-base $branch $tip)
223
- commits=$(git log $ancestor..$tip)
224
- git reset --hard $ancestor
225
- git merge --squash $tip
226
- git commit -m "squashed commmits $commits" || echo "nothing committed"
227
- git rebase $branch -Xtheirs
228
- }
229
-
230
- #!/usr/bin/env bash
231
-
232
- import_all_pgp_keys() {
233
- echo "importing all PGP keys"
234
- gpg --import $SCRIPT_FULL_PATH/pgp_keys/*.key
235
- }
236
-
237
- remove_all_persons_from_secrets() {
238
- echo "cleanup git secret"
239
- WHO_KNOWS=($(git secret whoknows))
240
- git secret removeperson $WHO_KNOWS
241
- echo "Removed secrets access for $WHO_KNOWS"
242
- }
243
-
244
- all_pgp_emails() {
245
- gpg --show-key $SCRIPT_FULL_PATH/pgp_keys/*.key | sed -rn "s/.*<(.*)>/\1/p"
246
- }
247
-
248
- set_all_secret_keys() {
249
-
250
- import_all_pgp_keys
251
-
252
- git secret reveal -f
253
-
254
- remove_all_persons_from_secrets
255
-
256
- if [ $# -eq 0 ]; then
257
- echo "No emails supplied, using dev-tools pgp keys as source"
258
- IN_THE_KNOW=($(gum choose --no-limit $(all_pgp_emails)))
259
- else
260
- IN_THE_KNOW=($*)
261
- fi
262
-
263
- git secret tell $IN_THE_KNOW
264
- git secret hide
265
- git secret whoknows
266
-
267
- echo "all secrets updated, you'll need to commit the changes"
268
- }
269
-
270
- #!/usr/bin/env bash
271
-
272
- start_ssh_bastion() {
273
- ENV=$1
274
- SSH_LOCAL_PORT=$2
275
- POD_NAME=ssh-bastion-$USERNAME
276
- CONFIG_MAP_NAME=ssh-bastion-$USERNAME
277
- configure_kubectl_for $ENV
278
- kubectl get pods -o name | grep pod/$POD_NAME
279
- if [ $? -eq 0 ]; then
280
- echo "$POD_NAME is already running"
281
- else
282
- #configmap
283
- kubectl get configmap $CONFIG_MAP_NAME && kubectl delete configmap $CONFIG_MAP_NAME
284
- tempdir=$(mktemp -d)
285
- cat <<EOF > $tempdir/sshd_config
286
- AllowTcpForwarding yes
287
- Port 2222
288
- PermitRootLogin yes
289
- AuthorizedKeysFile /etc/ssh/authorized_keys
290
- EOF
291
- cp ~/.ssh/id_rsa.pub $tempdir/authorized_keys
292
- kubectl create configmap $CONFIG_MAP_NAME --from-file=$tempdir
293
-
294
- #pod
295
- kubectl get pod $POD_NAME && kubectl delete pod $POD_NAME
296
- cat <<EOF | kubectl create -f -
297
-
298
- apiVersion: v1
299
- kind: Pod
300
- metadata:
301
- name: $POD_NAME
302
- spec:
303
- containers:
304
- - name: $POD_NAME
305
- image: sickp/alpine-sshd:7.4
306
- ports:
307
- - containerPort: 2222
308
- volumeMounts:
309
- - mountPath: /etc/ssh/sshd_config
310
- name: ssh-config
311
- subPath: sshd_config
312
- - mountPath: /etc/ssh/authorized_keys
313
- name: ssh-config
314
- subPath: authorized_keys
315
- volumes:
316
- - name: ssh-config
317
- configMap:
318
- name: $CONFIG_MAP_NAME
319
- EOF
320
-
321
- fi
322
-
323
- # You need a recent kubectl for wait to work (1.15 works), install or upgrade
324
- # with brew :
325
- # brew install kubernetes-cli
326
- # brew upgrade kubernetes-cli
327
- kubectl wait --for=condition=Ready pod/$POD_NAME
328
-
329
- # kube port-forward
330
- lsof -ti tcp:$SSH_LOCAL_PORT | xargs kill
331
- kubectl port-forward $POD_NAME $SSH_LOCAL_PORT:2222 &
332
- while ! nc -z 127.0.0.1 $SSH_LOCAL_PORT; do
333
- sleep 1
334
- done
335
- echo "forwarding ssh via local port $SSH_LOCAL_PORT"
336
- echo "remember to terminate the bastion with 'stop_ssh_bastion'"
337
- }
338
-
339
- stop_ssh_bastion() {
340
- POD_NAME=ssh-bastion-$USERNAME
341
- kubectl delete pod $POD_NAME
342
- }
343
-
344
- #!/usr/bin/env bash
345
-
346
- configure_kubectl_for() {
347
- local infra_env="$1"
348
- local valid_envs="[testing][staging][production][performance][tests][recette]"
349
- echo "$valid_envs" | grep -q "\[$infra_env\]"
350
-
351
- if [ $? -ne 0 ]; then
352
- echo "Cannot configure kubectl for invalid env : $infra_env"
353
- echo "choose one of $valid_envs"
354
- return 1
355
- fi
356
-
357
- aws eks update-kubeconfig --name "toutatis-$infra_env-eks" >&2
358
- }
359
-
360
- #!/usr/bin/env bash
361
-
362
- # WARNING : never try to do a dump directly from the database_production_ca
363
- # this could cause lot of lock database issues.
364
- # always use database_production_read_replica_ca instead
365
- database_k8s() {
366
- MODE=$1
367
- case $MODE in
368
- "tests") SSH_LOCAL_PORT=2224;PG_LOCAL_PORT=24440;CA_LOCAL_PORT=25430;ENV="tests";;
369
- "testing") SSH_LOCAL_PORT=2225;PG_LOCAL_PORT=24441;CA_LOCAL_PORT=25431;ENV="testing";;
370
- "staging") SSH_LOCAL_PORT=2226;PG_LOCAL_PORT=24442;CA_LOCAL_PORT=25432;ENV="staging";;
371
- "production") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24443;CA_LOCAL_PORT=25433;ENV="production";;
372
- "production_rw") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24444;CA_LOCAL_PORT=25434;ENV="production";;
373
- "recette") SSH_LOCAL_PORT=2228;PG_LOCAL_PORT=24446;CA_LOCAL_PORT=25436;ENV="recette";;
374
- *) echo "Unsupported ENV : $MODE"; return 1 ;;
375
- esac
376
-
377
- start_ssh_bastion $ENV $SSH_LOCAL_PORT
378
-
379
- lsof -ti tcp:$PG_LOCAL_PORT | xargs kill
380
-
381
- bastion_config=$(mktemp)
382
- cat > "$bastion_config" <<EOF
383
- UserKnownHostsFile /dev/null
384
- StrictHostKeyChecking no
385
- User root
386
- Host bastion_tests
387
- HostName 127.0.0.1
388
- Port 2224
389
- LocalForward 24440 toutatis-tests-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
390
- Host bastion_testing
391
- HostName 127.0.0.1
392
- Port 2225
393
- LocalForward 24441 toutatis-testing-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
394
- LocalForward 25431 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
395
- Host bastion_staging
396
- HostName 127.0.0.1
397
- Port 2226
398
- LocalForward 24442 toutatis-staging-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
399
- LocalForward 25432 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
400
- Host bastion_recette
401
- HostName 127.0.0.1
402
- Port 2228
403
- LocalForward 24446 toutatis-recette-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
404
- LocalForward 25436 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
405
- Host bastion_production
406
- HostName 127.0.0.1
407
- Port 2227
408
- LocalForward 24443 toutatis-production-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
409
- LocalForward 25433 api-production-rds-read-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
410
- LocalForward 25435 archive-ca.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
411
- EOF
412
- if [ "$MODE" = "production_rw" ] ; then
413
- cat >> "$bastion_config" <<EOF
414
- LocalForward 24444 toutatis-production-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
415
- LocalForward 25434 api-production-rds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
416
- EOF
417
- fi
418
-
419
- ssh -f -N \
420
- -F "$bastion_config" \
421
- "bastion_$ENV"
422
-
423
- echo "sample command : 'psql postgres://postgres@127.0.0.1:$PG_LOCAL_PORT'"
424
- echo "sample command : 'mysql -u colisweb -h 127.0.0.1 -P $CA_LOCAL_PORT -p db_name'"
425
-
426
- echo "run 'kubectl delete pod $POD_NAME' when you have finished"
427
- }
428
-
429
- psql_on_k8() {
430
- NAMESPACE=$1
431
- SERVICE=$2
432
- CONNECTION=$3
433
- shift 3
434
-
435
- kubectl -n $NAMESPACE run ${SERVICE}-database-init \
436
- --image jbergknoff/postgresql-client \
437
- --restart=Never \
438
- --attach --rm \
439
- -- \
440
- postgresql://${CONNECTION} \
441
- "$*"
442
- }
443
-
444
- mysql_on_k8() {
445
- local namespace=$1
446
- local db_host=$2
447
- local db_port=$3
448
- local db_init_username=$4
449
- local db_init_password=$5
450
- local query=$6
451
-
452
- kubectl -n ${namespace} run datadog-database-init \
453
- --image widdpim/mysql-client \
454
- --restart=Never \
455
- --attach --rm \
456
- -- \
457
- mysql --host=$db_host --user=$db_init_username --password=$db_init_password --port=$db_port --execute="$query"
458
- }
459
- #!/usr/bin/env bash
460
-
461
- kube_init_database_once() {
462
-
463
- extract_args 8 namespace db_host db_port db_init_username db_init_password db_database db_username db_password $*
464
-
465
- echo "======================="
466
- echo " Initializing Database '$db_database' for namespace $namespace"
467
- echo "======================="
468
-
469
- set -x
470
-
471
- echo "Checking if Database '$db_database' exists"
472
- set +e
473
- psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -lqtA | cut -d\| -f1 | grep "^$db_database$"
474
- return_code=$?
475
- set -e
476
-
477
- if [ ${return_code} -eq 0 ]; then
478
- echo "Database $db_database already exists - nothing to do"
479
- else
480
- echo "Database $db_database does not exist - initializing"
481
-
482
- psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'CREATE DATABASE '"$db_database"';'
483
- echo "DB created $db_database"
484
-
485
- psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'CREATE USER '"$db_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
486
- echo "USER created $db_username"
487
-
488
- psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_username"';'
489
- echo "Granted all privileges for $db_username on $db_database"
490
- fi
491
-
492
- echo "======================="
493
- echo " Database '$db_database' Initialization complete for namespace $namespace"
494
- echo "======================="
495
- }
496
-
497
- kube_init_database_readonly_account() {
498
-
499
- extract_args 6 namespace service db_connection db_database db_readonly_username db_readonly_password $*
500
-
501
- echo "======================="
502
- echo " Initializing Readonly Account '$db_readonly_username' for '$db_database' for namespace $namespace"
503
- echo "======================="
504
-
505
- # Print commands before execution, except echo
506
- trap '[[ $BASH_COMMAND != echo* ]] && echo $BASH_COMMAND' DEBUG
507
-
508
- echo "Checking if Readonly account '$db_readonly_username' for '$db_database' exists"
509
- set +e
510
- psql_on_k8 $namespace $service $db_connection -qtAc 'SELECT rolname FROM pg_roles;' | grep "^$db_readonly_username$"
511
- return_code=$?
512
- set -e
513
-
514
- if [ ${return_code} -eq 0 ]; then
515
- echo "Account $db_readonly_username already exists - nothing to do"
516
- else
517
- echo "Account $db_readonly_username does not exist - creating"
518
-
519
- psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_readonly_username"' WITH ENCRYPTED PASSWORD '"'$db_readonly_password'"';'
520
- psql_on_k8 $namespace $service $db_connection -c 'GRANT CONNECT ON DATABASE '"$db_database"' TO '"$db_readonly_username"';'
521
- psql_on_k8 $namespace $service $db_connection -c 'GRANT USAGE ON SCHEMA public TO '"$db_readonly_username"';'
522
- psql_on_k8 $namespace $service $db_connection -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO '"$db_readonly_username"';'
523
- psql_on_k8 $namespace $service $db_connection -c 'ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO '"$db_readonly_username"';'
524
-
525
- echo "Created user with read-only permissions for $db_readonly_username on $db_database (schema public)"
526
- fi
527
- }
528
-
529
- kube_init_datadog_in_database() {
530
- extract_args 8 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password db_datadog_schema $*
531
-
532
- echo "======================="
533
- echo " Initializing Datadog Agent Requiement for namespace $namespace"
534
- echo "======================="
535
-
536
- set -x
537
-
538
- echo "Checking if Database '$db_datadog_username' exists"
539
- set +e
540
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'SELECT user FROM mysql.user;' | grep "^$db_datadog_username$"
541
- return_code=$?
542
- set -e
543
-
544
- if [ ${return_code} -eq 0 ]; then
545
- echo "User $db_datadog_username already exists - nothing to do"
546
- else
547
- echo "User $db_datadog_username does not exist - initializing"
548
-
549
- # All the query come from this docs : https://docs.datadoghq.com/fr/database_monitoring/setup_mysql/selfhosted/?tab=mysql56
550
-
551
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'CREATE USER '"$db_datadog_username"'@"%" IDENTIFIED BY '"'$db_datadog_password'"';'
552
- echo "USER created $db_datadog_username"
553
-
554
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT REPLICATION CLIENT ON *.* TO datadog@"%" WITH MAX_USER_CONNECTIONS 5;'
555
- echo "ALTER USER $db_datadog_username"
556
-
557
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT PROCESS ON *.* TO '"$db_datadog_username"'@"%";'
558
- echo "Granted PROCESS for $db_datadog_username"
559
-
560
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT SELECT ON performance_schema.* TO '"$db_datadog_username"'@"%";'
561
- echo "Granted SELECT on performance_schema for $db_datadog_username"
562
-
563
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'CREATE SCHEMA IF NOT EXISTS datadog;'
564
- echo "CREATE SCHEMA datadog"
565
-
566
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT EXECUTE ON datadog.* to '"$db_datadog_username"'@"%";'
567
- echo "Granted 'GRANT EXECUTE for $db_datadog_username on datadog"
568
-
569
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT CREATE TEMPORARY TABLES ON datadog.* TO '"$db_datadog_username"'@"%";'
570
- echo "Granted CREATE TEMPORARY TABLES for $db_datadog_username"
571
-
572
-
573
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.explain_statement;
574
- DELIMITER $$
575
- CREATE PROCEDURE datadog.explain_statement(IN query TEXT)
576
- SQL SECURITY DEFINER
577
- BEGIN
578
- SET @explain := CONCAT("EXPLAIN FORMAT=json ", query);
579
- PREPARE stmt FROM @explain;
580
- EXECUTE stmt;
581
- DEALLOCATE PREPARE stmt;
582
- END $$
583
- DELIMITER ;'
584
- echo "CREATE PROCEDURE PROCEDURE datadog.explain_statement"
585
-
586
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS '"$db_datadog_username"'.explain_statement;
587
- DELIMITER $$
588
- CREATE PROCEDURE '"$db_datadog_username"'.explain_statement(IN query TEXT)
589
- SQL SECURITY DEFINER
590
- BEGIN
591
- SET @explain := CONCAT("EXPLAIN FORMAT=json ", query);
592
- PREPARE stmt FROM @explain;
593
- EXECUTE stmt;
594
- DEALLOCATE PREPARE stmt;
595
- END $$
596
- DELIMITER ;
597
- GRANT EXECUTE ON PROCEDURE '"$db_datadog_username"'.explain_statement TO datadog@"%";'
598
- echo "CREATE PROCEDURE on SCHEMA $db_datadog_schema for $db_datadog_username"
599
-
600
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.enable_events_statements_consumers;
601
- DELIMITER $$
602
- CREATE PROCEDURE datadog.enable_events_statements_consumers()
603
- SQL SECURITY DEFINER
604
- BEGIN
605
- UPDATE performance_schema.setup_consumers SET enabled="YES" WHERE name LIKE "events_statements_%";
606
- END $$
607
- DELIMITER ;
608
- GRANT EXECUTE ON PROCEDURE datadog.enable_events_statements_consumers TO datadog@"%";'
609
-
610
- echo "CREATE PROCEDURE on datadog.enable_events_statements_consumers"
611
- fi
612
-
613
- echo "======================="
614
- echo " Database '$db_datadog_schema' Initialization complete for namespace $namespace"
615
- echo "======================="
616
- }
617
-
618
- kube_init_service_database() {
619
-
620
- extract_args 9 namespace service db_host db_port db_init_username db_init_password db_database db_username db_password $*
621
-
622
- local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
623
-
624
- set -x
625
-
626
- echo "Checking if Database '$db_database' exists"
627
- set +e
628
- psql_on_k8 $namespace $service $db_connection -lqtA | cut -d\| -f1 | grep "^$db_database$"
629
- return_code=$?
630
- set -e
631
-
632
- if [ ${return_code} -eq 0 ]; then
633
- echo "Database $db_database already exists - nothing to do"
634
- else
635
- echo "Database $db_database does not exist - initializing"
636
-
637
- psql_on_k8 $namespace $service $db_connection -c 'CREATE DATABASE '"$db_database"';'
638
- echo "DB created $db_database"
639
-
640
- psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_datadog_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
641
- echo "USER created $db_datadog_username"
642
-
643
- psql_on_k8 $namespace $service $db_connection -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_datadog_username"';'
644
- echo "Granted all privileges for $db_datadog_username on $db_database"
645
- fi
646
-
647
- echo "======================="
648
- echo " Database '$db_database' Initialization complete for namespace $namespace"
649
- echo "======================="
650
- }
651
- #!/usr/bin/env bash
652
-
653
- # Port forward on the first matching pod
654
- # Ex :
655
- # pod_forward testing notification-http
656
- # pod_forward testing colisweb-api-web 3333 3000
657
- pod_forward() {
658
- ENV=$1
659
- POD_FILTER=$2
660
- LOCAL_PORT=${3:-8080}
661
- POD_PORT=${4:-8080}
662
-
663
- if PID=$(lsof -ti tcp:$LOCAL_PORT); then
664
- echo "killing process $PID which uses port $LOCAL_PORT"
665
- kill $PID
666
- fi
667
-
668
- configure_kubectl_for $ENV
669
-
670
- POD=`pick_pod $ENV $POD_FILTER`
671
-
672
- echo "setting up forwarding to $POD"
673
- kubectl -n $ENV port-forward $POD $LOCAL_PORT:$POD_PORT &
674
- PID=$!
675
-
676
- while ! echo exit | nc localhost $LOCAL_PORT > /dev/null; do
677
- sleep 1
678
- echo "waiting for port $LOCAL_PORT to be open locally"
679
- done
680
- echo "port $LOCAL_PORT is now available on localhost, forwarding to $ENV $POD:$POD_PORT"
681
- echo 'you can terminate it with "kill '$PID'" or "kill $(lsof -ti tcp:'$LOCAL_PORT')"'
682
- }
683
-
684
- # prompts to pick a pod and run a command like bash inside
685
- # pod_exec testing
686
- # pod_exec testing bash
687
- # pod_exec testing bash colisweb-api
688
- pod_exec() {
689
- ENV=$1
690
- COMMAND=${2:-bash}
691
- configure_kubectl_for $ENV
692
- POD_FILTER=$3
693
- POD=`pick_pod $ENV $POD_FILTER`
694
- echo "running $COMMAND inside $POD"
695
- kubectl -n $ENV exec -ti $POD -- $COMMAND
696
- }
697
-
698
- # prompts to pick a pod and copy from a local file to the pod
699
- # pod_copy_to testing localfile remotefile
700
- # pod_copy_to testing localfile remotefile colisweb-api
701
- pod_copy_to() {
702
- ENV=$1
703
- LOCAL_FILE=$2
704
- REMOTE_FILE=$3
705
- configure_kubectl_for $ENV
706
- POD_FILTER=$4
707
- POD=`pick_pod $ENV $POD_FILTER`
708
- kubectl cp $LOCAL_FILE $ENV/$POD:$REMOTE_FILE
709
- }
710
-
711
-
712
- pick_pod() {
713
- ENV=$1
714
- POD_FILTER="pod/$2"
715
- configure_kubectl_for $ENV
716
-
717
- if [ -z "$2" ] ; then
718
- kubectl -n $ENV get pods | gum filter | cut -f1 -d" "
719
- else
720
- if PODS=$(kubectl -n $ENV get pods -o=name | grep "$POD_FILTER"); then
721
- echo $PODS | head -1 | sed -e 's/pod\///'
722
- else
723
- echo "no pods found on $ENV matching $POD_FILTER" >&2
724
- fi
725
- fi
726
- }
727
-
728
- #!/usr/bin/env bash
729
-
730
- redis_k8s() {
731
- MODE=$1
732
- case $MODE in
733
- "testing") SSH_LOCAL_PORT=2225;REDIS_LOCAL_PORT=63791;ENV="testing";;
734
- "staging") SSH_LOCAL_PORT=2226;REDIS_LOCAL_PORT=63792;ENV="staging";;
735
- "production") SSH_LOCAL_PORT=2227;REDIS_LOCAL_PORT=63793;ENV="production";;
736
- *) echo "Unsupported ENV : $MODE"; return 1 ;;
737
- esac
738
-
739
- start_ssh_bastion $ENV $SSH_LOCAL_PORT
740
-
741
- lsof -ti tcp:$REDIS_LOCAL_PORT | xargs kill
742
-
743
- bastion_config=$(mktemp)
744
- cat > "$bastion_config" <<EOF
745
- UserKnownHostsFile /dev/null
746
- StrictHostKeyChecking no
747
- User root
748
- Host bastion_testing
749
- HostName 127.0.0.1
750
- Port 2225
751
- LocalForward 63791 redis-testing.xufte6.0001.euw1.cache.amazonaws.com:6379
752
- Host bastion_staging
753
- HostName 127.0.0.1
754
- Port 2226
755
- LocalForward 63792 redis-sandbox.xufte6.0001.euw1.cache.amazonaws.com:6379
756
- Host bastion_production
757
- HostName 127.0.0.1
758
- Port 2227
759
- LocalForward 63793 redis-prod.xufte6.0001.euw1.cache.amazonaws.com:6379
760
- EOF
761
-
762
- ssh -f -N \
763
- -F "$bastion_config" \
764
- "bastion_$ENV"
765
-
766
- echo "sample command : 'redis-cli -p $REDIS_LOCAL_PORT'"
767
- echo "run 'kubectl delete pod $POD_NAME' when you have finished"
768
-
769
- redis-cli -p $REDIS_LOCAL_PORT
770
- }
771
-
772
- #!/usr/bin/env bash
773
-
774
- #Create a k8s cron jobs that will be run regularly
775
- #See run_cron_job_k8s -h for more details
776
-
777
- run_cron_job_k8s() {
778
-
779
- #default values
780
- local namespace="testing"
781
- local name="$USERNAME"
782
- local SCHEDULE="00 05 * * *"
783
- local secret=""
784
- local amm_folder=""
785
- local amm_script=""
786
-
787
- while getopts ":e:c:p:f:s:t:h" opt; do
788
- case $opt in
789
- e)
790
- namespace="$OPTARG" >&2
791
- ;;
792
- t)
793
- SCHEDULE="$OPTARG" >&2
794
- ;;
795
- p)
796
- name="$OPTARG" >&2
797
- ;;
798
- c)
799
- secret="$OPTARG" >&2
800
- ;;
801
- f)
802
- amm_folder="$OPTARG" >&2
803
- ;;
804
- s)
805
- amm_script="$OPTARG" >&2
806
- ;;
807
- h)
808
- show_help_cron_job
809
- return 0
810
- ;;
811
- :)
812
- echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
813
- return 0
814
- ;;
815
- \?)
816
- echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
817
- return 0
818
- ;;
819
- esac
820
- done
821
-
822
- if [ -z "$amm_script" ]; then
823
- echo 'Missing -s. Run run_cron_job_k8s -h for help' >&2
824
- return 0
825
- fi
826
-
827
- shift "$((OPTIND-1))"
828
-
829
- local script_args=$(
830
- if [ "$#" -gt 0 ] ; then
831
- printf '"'
832
- join_by '", "' $*
833
- printf '"'
834
- fi
835
- )
836
-
837
- local IMAGE="lolhens/ammonite:2.5.4"
838
- local CRONJOB_NAME="cronjob-ammonite-$name"
839
-
840
-
841
- configure_kubectl_for $namespace
842
-
843
- if [[ ! -r "$amm_script" ]]; then
844
- echo "ammonite script not found $amm_script"
845
- return 2
846
- else
847
- local CONFIG_MAP="config-$CRONJOB_NAME"
848
- local SECRET_MAP="secret-$CRONJOB_NAME"
849
- local CONFIG_MAP_DIR="$(mktemp -d)"
850
-
851
- if [[ ! -z $amm_folder && -d $amm_folder ]] ; then
852
- cp -r "$amm_folder/" "$CONFIG_MAP_DIR"
853
- fi
854
- cp "$amm_script" "$CONFIG_MAP_DIR/script.sc"
855
-
856
- kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
857
- kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
858
-
859
- kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
860
- kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
861
-
862
- kubectl -n $namespace get cronjob $CRONJOB_NAME && kubectl -n $namespace delete cronjob $CRONJOB_NAME
863
-
864
- echo "starting $CRONJOB_NAME with $IMAGE"
865
-
866
- JOB_DEFINITION='
867
- apiVersion: batch/v1
868
- kind: CronJob
869
- metadata:
870
- name: '$CRONJOB_NAME'
871
- namespace: '$namespace'
872
- spec:
873
- schedule: "'$SCHEDULE'"
874
- concurrencyPolicy: Forbid
875
- jobTemplate:
876
- spec:
877
- backoffLimit: 0
878
- template:
879
- spec:
880
- nodeSelector:
881
- workType: "workers"
882
- restartPolicy: Never
883
- volumes:
884
- - name: config
885
- configMap:
886
- name: '$CONFIG_MAP'
887
- - name: secret
888
- secret:
889
- secretName: '$SECRET_MAP'
890
- containers:
891
- - name: '$CRONJOB_NAME'
892
- command: ["amm", "/code/script.sc"]
893
- image: '$IMAGE'
894
- imagePullPolicy: IfNotPresent
895
- args: ['$script_args']
896
- env:
897
- - name: POD_NAME
898
- valueFrom:
899
- fieldRef:
900
- apiVersion: v1
901
- fieldPath: metadata.name
902
- - name: POD_NAMESPACE
903
- valueFrom:
904
- fieldRef:
905
- apiVersion: v1
906
- fieldPath: metadata.namespace
907
- - name: HOST_IP
908
- valueFrom:
909
- fieldRef:
910
- apiVersion: v1
911
- fieldPath: status.hostIP
912
- volumeMounts:
913
- - name: config
914
- mountPath: /code
915
- - name: secret
916
- mountPath: /conf
917
- readOnly: true
918
- resources:
919
- requests:
920
- cpu: 500m
921
- memory: 256Mi
922
- limits:
923
- cpu: 4000m
924
- memory: 512Mi
925
- envFrom:
926
- - configMapRef:
927
- name: '$CONFIG_MAP'
928
- - secretRef:
929
- name: '$SECRET_MAP'
930
- '
931
-
932
- echo $JOB_DEFINITION > /tmp/job.yaml
933
-
934
- kubectl -n $namespace apply -f /tmp/job.yaml
935
-
936
- fi
937
- }
938
-
939
- # Usage info
940
- show_help_cron_job() {
941
- #p:f:s
942
- local help="""Usage: run_cron_job_k8s -s SCRIPT [-t TIME] [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
943
- Create a k8s cron job that will be run a script regularly
944
-
945
- -h display this help and exit
946
- -s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
947
- -t TIME opt. time when the job will be launched. TIME should be in CRON syntax (default to 00 05 * * *, ie 5AM UTC)
948
- -e ENV opt. set execution environment (default to testing)
949
- -c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
950
- -p POD opt. name of the pod to create (default to $USERNAME)
951
- -f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
952
- ARGS opt. additional arguments for SCRIPT
953
- """
954
- echo "$help"
955
- }
956
-
957
- #!/usr/bin/env bash
958
-
959
- # Usage info
960
- show_help_job() {
961
- local help="""Usage: run_job_k8s -s SCRIPT [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
962
- Create a k8s job executing a script
963
-
964
- -h display this help and exit
965
- -s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
966
- -e ENV opt. set execution environment (default to testing)
967
- -c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
968
- -p POD opt. name of the pod to create (default to $USERNAME)
969
- -f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
970
- ARGS opt. additional arguments for SCRIPT
971
-
972
- The organisation of the files must be the same locally as on the pod :
973
- - /code containing the script to execute (arg -s) and the other needed files (if the arg -f is used, it must reference this directory)
974
- - /conf containing the secret file (arg -c if used)
975
- E.g. in the script \"/code/script.sc\", to use a secret file \"/conf/secret.sc\", the import should look like \"import \$file.^.conf.secret.sc\"
976
- """
977
- echo "$help"
978
- }
979
-
980
- run_job_k8s() {
981
-
982
- #default values
983
- local namespace="testing"
984
- local name="$USERNAME"
985
- local secret=""
986
- local amm_folder=""
987
- local amm_script=""
988
-
989
- while getopts ":e:c:p:f:s:h" opt; do
990
- case $opt in
991
- e)
992
- namespace="$OPTARG" >&2
993
- ;;
994
- p)
995
- name="$OPTARG" >&2
996
- ;;
997
- c)
998
- secret="$OPTARG" >&2
999
- ;;
1000
- f)
1001
- amm_folder="$OPTARG" >&2
1002
- ;;
1003
- s)
1004
- amm_script="$OPTARG" >&2
1005
- ;;
1006
- h)
1007
- show_help_job
1008
- return 0
1009
- ;;
1010
- :)
1011
- echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
1012
- return 0
1013
- ;;
1014
- \?)
1015
- echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
1016
- return 0
1017
- ;;
1018
- esac
1019
- done
1020
-
1021
- if [ -z "$amm_script" ]; then
1022
- echo 'Missing -s. Run run_job_k8s -h for help' >&2
1023
- return 0
1024
- fi
1025
-
1026
- shift "$((OPTIND-1))"
1027
-
1028
- local script_args=$(
1029
- if [ "$#" -gt 0 ] ; then
1030
- printf '"'
1031
- join_by '", "' $*
1032
- printf '"'
1033
- fi
1034
- )
1035
-
1036
- local IMAGE="lolhens/ammonite:2.5.4"
1037
- local JOB_NAME="job-ammonite-$name"
1038
-
1039
- if [[ ! -r "$amm_script" ]]; then
1040
- echo "ammonite script not found $amm_script"
1041
- return 2
1042
- else
1043
- local CONFIG_MAP="config-$JOB_NAME"
1044
- local CONFIG_MAP_DIR="$(mktemp -d)"
1045
- local SECRET_MAP="secret-$JOB_NAME"
1046
-
1047
- configure_kubectl_for $namespace
1048
-
1049
- if [[ ! -z $amm_folder && -d $amm_folder ]] ; then
1050
- cp -r "$amm_folder/" "$CONFIG_MAP_DIR"
1051
- fi
1052
- cp "$amm_script" "$CONFIG_MAP_DIR/script.sc"
1053
-
1054
- kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1055
- kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1056
-
1057
- kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
1058
- kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
1059
-
1060
- kubectl -n $namespace get job $JOB_NAME && kubectl -n $namespace delete job $JOB_NAME
1061
-
1062
- echo "starting $JOB_NAME with $IMAGE"
1063
- fi
1064
-
1065
- JOB_DEFINITION='
1066
- apiVersion: batch/v1
1067
- kind: Job
1068
- metadata:
1069
- name: '$JOB_NAME'
1070
- namespace: '$namespace'
1071
- spec:
1072
- template:
1073
- spec:
1074
- containers:
1075
- - name: '$JOB_NAME'
1076
- command: ["amm", "/code/script.sc"]
1077
- image: '$IMAGE'
1078
- args: ['$script_args']
1079
- env:
1080
- - name: POD_NAME
1081
- valueFrom:
1082
- fieldRef:
1083
- apiVersion: v1
1084
- fieldPath: metadata.name
1085
- - name: POD_NAMESPACE
1086
- valueFrom:
1087
- fieldRef:
1088
- apiVersion: v1
1089
- fieldPath: metadata.namespace
1090
- - name: HOST_IP
1091
- valueFrom:
1092
- fieldRef:
1093
- apiVersion: v1
1094
- fieldPath: status.hostIP
1095
- volumeMounts:
1096
- - name: config
1097
- mountPath: /code
1098
- - name: secret
1099
- mountPath: /conf
1100
- readOnly: true
1101
- resources:
1102
- requests:
1103
- cpu: 500m
1104
- memory: 256Mi
1105
- limits:
1106
- cpu: 4000m
1107
- memory: 1Gi
1108
- nodeSelector:
1109
- workType: workers
1110
- restartPolicy: Never
1111
- volumes:
1112
- - name: config
1113
- configMap:
1114
- name: '$CONFIG_MAP'
1115
- - name: secret
1116
- secret:
1117
- secretName: '$SECRET_MAP'
1118
- '
1119
-
1120
-
1121
- echo $JOB_DEFINITION > /tmp/job.yaml
1122
-
1123
- kubectl -n $namespace apply -f /tmp/job.yaml
1124
-
1125
- }
1126
-
1127
-
1128
- #!/usr/bin/env bash
1129
-
1130
- run_task() {
1131
- set -e
1132
-
1133
- check_args "--namespace" $1
1134
- shift
1135
- NAMESPACE=$1
1136
- shift
1137
- check_args "--image" $1
1138
- shift
1139
- IMAGE=$1
1140
- shift
1141
- check_args "--name" $1
1142
- shift
1143
- NAME=$1
1144
- shift
1145
-
1146
- set -x
1147
-
1148
- kubectl -n ${NAMESPACE} run ${NAME} \
1149
- --image ${IMAGE} \
1150
- --restart=Never \
1151
- --attach --rm \
1152
- $*
1153
- }
1154
- geocode_address() {
1155
- ADDRESS=$(sed -e 's: :%20:g' <(echo "$*"))
1156
- URL="https://maps.googleapis.com/maps/api/geocode/json?address=${ADDRESS}&key=${GOOGLE_API_KEY}"
1157
- curl $URL
1158
- }
1159
-
1160
- search_business() {
1161
- SIREN=$1
1162
- shift
1163
- QUERY=$(sed -e 's: :+:g' <(echo "$*"))
1164
- URL="https://data.opendatasoft.com/api/records/1.0/search/?dataset=sirene_v3%40public&q=${QUERY}&sort=datederniertraitementetablissement&facet=trancheeffectifsetablissement&facet=libellecommuneetablissement&facet=departementetablissementi&refine.siren=${SIREN}"
1165
- curl $URL
1166
- }
1167
-
1168
- #!/usr/bin/env bash
1169
-
1170
- # possible syntax:
1171
- # login
1172
- # login testing
1173
- # login testing userid
1174
- login() {
1175
- ENV=${1:-`gum choose testing staging production recette`} && \
1176
- USER=${2:-`gum input --placeholder username`} && \
1177
- PASSWORD=`gum input --password --placeholder password` && \
1178
- TOKEN=`$SCRIPT_FULL_PATH/scala/auth.sc login --env $ENV --user $USER --password $PASSWORD` && \
1179
- export TOKEN_$ENV=$TOKEN && \
1180
- echo "login success for $USER on $ENV" >&2
1181
- }
1182
-
1183
- # you need to call login first (see above)
1184
- # possible syntax:
1185
- # recompute_tour
1186
- # recompute_tour testing
1187
- # recompute_tour testing draft
1188
- # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09
1189
- # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09 TODAY
1190
- # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09 FRIDAY
1191
- recompute_tour() {
1192
- ENV=${1:-`gum choose testing staging production recette`}
1193
- MODE=${2:-`gum choose draft definitive`}
1194
- PROJECT_ID=${3:-`pick_project $ENV`}
1195
- DAY=${4:-`gum choose TODAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY SUNDAY`}
1196
- jwt_token $ENV
1197
- scala/tour_config.sc $MODE -t $TOKEN -p $PROJECT_ID -d $DAY
1198
- }
1199
-
1200
- pick_project() {
1201
- ENV=${1:-`gum choose testing staging production recette`}
1202
- jwt_token $ENV
1203
- scala/tour_config.sc list -t $TOKEN -e $ENV | gum filter | cut -f1
1204
- }
1205
-
1206
- jwt_token() {
1207
- ENV=${1:-`gum choose testing staging production recette`}
1208
- eval 'TOKEN=$TOKEN_'$ENV
1209
- if ! $SCRIPT_FULL_PATH/scala/auth.sc check -t $TOKEN -e $ENV ; then
1210
- login $ENV
1211
- fi
1212
- }
1213
-
1214
- #!/usr/bin/env bash
1215
-
1216
- ftp_ikea_k8s() {
1217
- SSH_LOCAL_PORT=2230
1218
- FTP_LOCAL_PORT=25500
1219
- start_ssh_bastion testing $SSH_LOCAL_PORT
1220
-
1221
- lsof -ti tcp:$FTP_LOCAL_PORT | xargs kill
1222
-
1223
- bastion_config=$(mktemp)
1224
- cat > "$bastion_config" <<EOF
1225
- UserKnownHostsFile /dev/null
1226
- StrictHostKeyChecking no
1227
- User root
1228
- Host bastion_ftp
1229
- HostName 127.0.0.1
1230
- Port 2230
1231
- LocalForward 25500 ft.centiro.ikea.com:22
1232
- EOF
1233
-
1234
- ssh -f -N \
1235
- -F "$bastion_config" \
1236
- "bastion_ftp"
1237
-
1238
- sftp -P $FTP_LOCAL_PORT colisweb.fr@127.0.0.1
1239
- }
1240
-
1241
- #!/usr/bin/env bash
1242
-
1243
- # usage:
1244
- # jconsole_k8s testing colisweb-api-web
1245
-
1246
- jconsole_k8s() {
1247
- ENV=$1
1248
- NAME=$2
1249
-
1250
- start_ssh_bastion $ENV 2242
1251
- POD_IP=$( \
1252
- kubectl -n $ENV get pods -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIP}{"\n"}{end}' \
1253
- | grep "$NAME" | cut -d' ' -f2 | head -1 \
1254
- )
1255
- echo "selected POD with ip $POD_IP"
1256
- echo "use 'root' as password"
1257
- ssh -f -N -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -D 7777 root@127.0.0.1 -p 2242
1258
-
1259
- jconsole \
1260
- -J-DsocksProxyHost=localhost \
1261
- -J-DsocksProxyPort=7777 \
1262
- -J-DsocksNonProxyHosts= \
1263
- service:jmx:rmi:///jndi/rmi://$POD_IP:7199/jmxrmi \
1264
- &
1265
-
1266
- echo "remember to stop with 'stop_ssh_bastion'"
1267
-
1268
- }
1269
-
1270
- #!/usr/bin/env bash
1271
-
1272
- # Interactive console on an existing pod. See also run_ruby_k8s
1273
- # Ex :
1274
- # railsc_k8s production
1275
- # railsc_k8s production "User.where(email:'toni@colisweb.com')"
1276
- railsc_k8s() {
1277
- ENV=$1
1278
- COMMAND=$2
1279
- configure_kubectl_for $ENV
1280
- POD=$(kubectl -n $ENV get pods -o=name | grep colisweb-api-web | head -1 | sed -e 's/pod\///')
1281
- KUBERAILS="kubectl -n $ENV exec -ti $POD -- /usr/src/app/bin/rails c"
1282
- [ -z "$COMMAND" ] && eval $KUBERAILS || echo $COMMAND | eval $KUBERAILS
1283
- }
1284
-
1285
- # Ex :
1286
- # create_user testing claire.lien@colisweb.com super_admin clairemdp
1287
- create_user() {
1288
- ENV=$1
1289
- EMAIL=$2
1290
- ROLE=$3
1291
- PASSWORD=$4
1292
- railsc_k8s $ENV "User.where(email:'$EMAIL', role:'$ROLE').first_or_create.update_attributes!(password: '$PASSWORD')"
1293
- }
1294
-
1295
- # Ex :
1296
- # delete_user testing claire.lien@colisweb.com
1297
- delete_user() {
1298
- ENV=$1
1299
- EMAIL=$2
1300
- railsc_k8s $ENV "User.find_by(email:'$EMAIL').destroy"
1301
- }
1302
-
1303
- # NON Interactive console on an new pod, for long-running tasks (a few minutes)
1304
- # See also railsc_k8s
1305
- # file.txt will be available from /conf/data.txt in the ruby code
1306
- # examples :
1307
- # run_ruby_k8s testing demo <(echo "pp JSON.parse(File.read('/conf/data.txt'))") <(echo '{ "content": 123 }')
1308
- # run_ruby_k8s testing demo ~/.oh-my-zsh/custom/dev-tools/shell-session/ruby/demo.rb <(echo '{ "content": 123 }')
1309
- run_ruby_k8s() {
1310
- if [ $# -lt 4 ]; then
1311
- echo "usage : run_ruby_k8s production name-for-pod script.rb file.txt"
1312
- return 1
1313
- fi
1314
- local namespace=$1
1315
- local name=$2
1316
- local ruby_script=$3
1317
- local input_data=$4
1318
- [[ $namespace = "production" ]] && default_tag="master-latest" || default_tag="${namespace}-latest"
1319
- local image_tag=${5:-$default_tag}
1320
-
1321
- if [ ! -r "$ruby_script" ]; then
1322
- echo "ruby script not found $ruby_script"
1323
- return 2
1324
- fi
1325
-
1326
- if [ ! -r "$input_data" ]; then
1327
- echo "data not found $input_data"
1328
- return 3
1329
- fi
1330
-
1331
-
1332
- local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
1333
- local POD_NAME="colisweb-api-script-$name"
1334
- local CONFIG_MAP="config-$POD_NAME"
1335
- local CONFIG_MAP_DIR="$(mktemp -d)"
1336
-
1337
-
1338
- configure_kubectl_for $namespace
1339
-
1340
-
1341
- cp "$ruby_script" "$CONFIG_MAP_DIR/script.rb"
1342
- cp "$input_data" "$CONFIG_MAP_DIR/data.txt"
1343
-
1344
- kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1345
- kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1346
-
1347
- kubectl -n $namespace get pod $POD_NAME && kubectl -n $namespace delete pod $POD_NAME
1348
-
1349
- echo "starting with $IMAGE"
1350
- kubectl -n $namespace run $POD_NAME \
1351
- --image $IMAGE \
1352
- -ti \
1353
- --restart=Never \
1354
- --attach \
1355
- --rm \
1356
- --overrides='{
1357
- "spec":{
1358
- "nodeSelector":{
1359
- "workType": "workers"
1360
- },
1361
- "containers":[
1362
- {
1363
- "name":"'$POD_NAME'",
1364
- "image":"'$IMAGE'",
1365
- "imagePullPolicy":"Always",
1366
- "command":[
1367
- "/usr/src/app/bin/rails",
1368
- "r",
1369
- "/conf/script.rb"
1370
- ],
1371
- "resources":{
1372
- "limits":{
1373
- "memory": "4096Mi"
1374
- }
1375
- },
1376
- "volumeMounts":[
1377
- {
1378
- "name":"conf",
1379
- "mountPath":"/conf"
1380
- }
1381
- ],
1382
- "envFrom": [ {
1383
- "configMapRef": {
1384
- "name": "colisweb-api"
1385
- }
1386
- }, {
1387
- "secretRef": {
1388
- "name": "colisweb-api"
1389
- }
1390
- }
1391
- ]
1392
- }
1393
- ],
1394
- "volumes":[
1395
- {
1396
- "name":"conf",
1397
- "configMap":{ "name":"'$CONFIG_MAP'" }
1398
- }
1399
- ]
1400
- }
1401
- }
1402
- '
1403
-
1404
- kubectl -n $namespace delete configmap $CONFIG_MAP
1405
- }
1406
-
1407
- # example:
1408
- # update_pickup_cp testing <( echo '{"wrong_cp": "59123", "corrected_cp": "59223", "delivery_ids": ["4192421", "4192425"]}' )
1409
- update_pickup_cp() {
1410
- run_ruby_k8s $1 update-pickup-cp "$SCRIPT_FULL_PATH/ruby/update_pickup_cp.rb" $2
1411
- }
1412
-
1413
-
1414
-
1415
- update_all_prices() {
1416
- local namespace=$1
1417
- local json_prices=$2
1418
-
1419
- local json_size=$(wc -c < "$json_prices")
1420
-
1421
- if ((json_size > 940000)); then
1422
- command -v jq || (echo "jq not found (use brew install jq)" && return 1)
1423
- local max_lines=3000
1424
- local total_lines=$(jq '. | length' $json_prices)
1425
- local iterations=$((total_lines / max_lines + 1))
1426
- echo "$json_prices is too big, I'll split it for you in blocks of $max_lines lines. It will take $iterations runs"
1427
- for (( i = 0 ; i < iterations ; i++ )) ; do
1428
- local start=$((i * max_lines))
1429
- local end=$(( (i + 1) * max_lines))
1430
- local split_file=$(mktemp)
1431
- jq -c ".[$start:$end]" $json_prices > $split_file
1432
- local split_lines=$(jq '. | length' $split_file)
1433
- echo "starting iteration $i from $start to $end with $split_file command -v has $split_lines lines"
1434
- run_ruby_k8s $namespace "update-prices-$i" "$SCRIPT_FULL_PATH/ruby/update_prices.rb" $split_file
1435
- done
1436
- else
1437
- run_ruby_k8s $namespace "update-prices" "$SCRIPT_FULL_PATH/ruby/update_prices.rb" $json_prices
1438
- fi
1439
- }
1440
-
1441
-
1442
- update_surveys() {
1443
- local namespace=$1
1444
- local csv_surveys=$2
1445
-
1446
- local csv_size=$(wc -c < "$csv_surveys")
1447
-
1448
-
1449
- if ((csv_size > 940000)); then
1450
- local max_lines=400
1451
- local total_lines=$(wc -l < $csv_surveys)
1452
- local iterations=$((total_lines / max_lines + 1))
1453
- echo "$csv_surveys is too big, I'll split it for you in blocks of $max_lines lines. It will take $iterations runs"
1454
- for (( i = 0 ; i < iterations ; i++ )) ; do
1455
- local start=$((i * max_lines + 2))
1456
- local end=$(( (i + 1) * max_lines + 1))
1457
- local split_file=$(mktemp)
1458
- head -1 $csv_surveys > $split_file
1459
- sed -n ''"$start,${end}p" $csv_surveys >> $split_file
1460
-
1461
-
1462
- local split_lines=$(wc -l < $split_file)
1463
- echo "starting iteration $i from $start to $end with $split_file command -v has $split_lines lines"
1464
- run_ruby_k8s $namespace "reimport-surveys-$i" "$SCRIPT_FULL_PATH/ruby/feedback_kpi_reuploader.rb" $split_file
1465
- done
1466
- else
1467
- run_ruby_k8s $namespace "reimport-surveys" "$SCRIPT_FULL_PATH/ruby/feedback_kpi_reuploader.rb" $csv_surveys
1468
- fi
1469
- }
1470
-
1471
- #!/usr/bin/env bash
1472
-
1473
- configure_gitlab_ssh() {
1474
- tmp_dir=$(mktemp -d)
1475
- ssh-keyscan gitlab.com > $tmp_dir/known_hosts
1476
- echo "$SSH_PRIVATE_KEY" > $tmp_dir/id_rsa
1477
- chmod 600 $tmp_dir/id_rsa
1478
- ssh -i $tmp_dir/id_rsa -T git@gitlab.com
1479
- rm -Rf $tmp_dir
1480
- }
1481
-
1482
-
1483
- configure_gitlab_ssh_home() {
1484
- mkdir ~/.ssh
1485
- ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
1486
- echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_rsa
1487
- chmod 600 ~/.ssh/id_rsa
1488
- ssh -T git@gitlab.com
1489
- }
1490
- #!/usr/bin/env bash
1491
-
1492
- datadog_schedule_downtime() {
1493
- SERVICES=$1
1494
- DOWNTIME_MINUTES=${2:-30}
1495
-
1496
- if [[ "$ENVIRONMENT" == "production" ]] ; then
1497
- log "scheduling downtime for $SERVICES in $ENVIRONMENT"
1498
- else
1499
- return 0
1500
- fi
1501
-
1502
- for SERVICE in $SERVICES ; do
1503
- datadog_schedule_downtime_single $SERVICE $DOWNTIME_MINUTES
1504
- done
1505
- }
1506
-
1507
- datadog_schedule_downtime_single() {
1508
- local SERVICE=$1
1509
- local DOWNTIME_MINUTES=$2
1510
-
1511
- START=$(date +%s)
1512
- END=$((START + 60 * DOWNTIME_MINUTES))
1513
-
1514
- log "scheduling a downtime on datadog for $SERVICE ($DOWNTIME_MINUTES minutes)"
1515
- curl -X POST "https://api.datadoghq.com/api/v1/downtime" \
1516
- -H "Content-Type: application/json" \
1517
- -H "DD-API-KEY: ${DD_API_KEY}" \
1518
- -H "DD-APPLICATION-KEY: ${DD_APP_KEY}" \
1519
- -d '
1520
- {
1521
- "active": true,
1522
- "downtime_type": 0,
1523
- "start": $START,
1524
- "end": $END,
1525
- "message": "CA Deployment - performance for $SERVICE may be lower for next $DOWNTIME_MINUTES min",
1526
- "monitor_tags": [
1527
- "service:$SERVICE",
1528
- "performance"
1529
- ],
1530
- "scope": [
1531
- "env:production"
1532
- ],
1533
- "timezone": "Europe/Paris"
1534
- }
1535
- '
1536
- }
1537
- #!/usr/bin/env bash
1538
-
1539
- docker_build_push() {
1540
- read -r -a BUILD_ARGS <<< "$1"
1541
- DOCKER_BUILD_ARGS="--build-arg VCS_REF=$(git rev-parse --short HEAD)"
1542
- for ARG_NAME in "${BUILD_ARGS[@]}"
1543
- do
1544
- DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS --build-arg $ARG_NAME=${!ARG_NAME}"
1545
- done
1546
-
1547
- if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
1548
- docker pull $DOCKER_IMAGE || true
1549
- docker build $DOCKER_BUILD_ARGS -t $DOCKER_IMAGE_SHA --cache-from $DOCKER_IMAGE $DOCKER_STAGE_PATH
1550
- docker push $DOCKER_IMAGE_SHA
1551
- fi
1552
- }
1553
-
1554
- docker_promote() {
1555
- # inspired by https://dille.name/blog/2018/09/20/how-to-tag-docker-images-without-pulling-them/
1556
- OLD_TAG=${1//[^0-9a-zA-Z-.]/_}
1557
- NEW_TAG=${2//[^0-9a-zA-Z-.]/_}
1558
- echo "promoting from $OLD_TAG to $NEW_TAG"
1559
- TOKEN=$(aws_ecr_token)
1560
- CONTENT_TYPE="application/vnd.docker.distribution.manifest.v2+json"
1561
- MANIFESTS_API="https://${DOCKER_REGISTRY}/v2/${APPLICATION}/manifests"
1562
-
1563
- if MANIFEST=$(curl --fail -H "Authorization: Basic $TOKEN" -H "Accept: ${CONTENT_TYPE}" "$MANIFESTS_API/${OLD_TAG}"); then
1564
- echo "authenticated on $MANIFESTS_API"
1565
- else
1566
- return 1
1567
- fi
1568
- if curl --fail -H "Authorization: Basic $TOKEN" -X PUT -H "Content-Type: ${CONTENT_TYPE}" -d "${MANIFEST}" "$MANIFESTS_API/$NEW_TAG" ; then
1569
- echo "promoted ${APPLICATION} from $OLD_TAG to $NEW_TAG"
1570
- else
1571
- return 2
1572
- fi
1573
- }
1574
-
1575
- ensure_images_exists() {
1576
- for IMAGE_TO_CHECK in $(echo $1 | tr "," "\n"); do
1577
- image_exists ${DOCKER_REGISTRY_ID} ${IMAGE_TO_CHECK} ${VERSION} || return 1
1578
- done
1579
- }
1580
- #!/usr/bin/env bash
1581
-
1582
- extract_yaml_config_variable() {
1583
- set +e
1584
- set +x
1585
-
1586
- check_args "--environment" $1
1587
- shift
1588
- ENVIRONMENT=$1
1589
- shift
1590
-
1591
- check_args "--configs-path" $1
1592
- shift
1593
- CONFIGS_PATH=$1
1594
- shift
1595
-
1596
- check_args "--variable" $1
1597
- shift
1598
- VARIABLE=$1
1599
- shift
1600
-
1601
- [[ "$1" == "--optional" ]] && OPTIONAL=true || OPTIONAL=false
1602
-
1603
- if [ ! -f ${CONFIGS_PATH}/common.yaml ]; then
1604
- echo >&2 "Missing $CONFIGS_PATH/common.yaml configuration file"
1605
- exit 1
1606
- fi
1607
- if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}.yaml ]; then
1608
- echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT.yaml configuration file"
1609
- exit 1
1610
- fi
1611
- if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}-secrets.yaml ]; then
1612
- echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml configuration file"
1613
- exit 1
1614
- fi
1615
-
1616
- result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT-secrets.yaml")
1617
- if [ $? -ne 0 ] || [ "$result" = "null" ]; then
1618
- result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT.yaml")
1619
- if [ $? -ne 0 ] || [ "$result" = "null" ]; then
1620
- result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/common.yaml")
1621
- if [ $? -ne 0 ] || [ "$result" = "null" ]; then
1622
- if [ $OPTIONAL = true ]; then
1623
- echo ""
1624
- exit 0
1625
- else
1626
- echo >&2 "Missing path $VARIABLE in $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml, $CONFIGS_PATH/$ENVIRONMENT.yaml or $CONFIGS_PATH/common.yaml"
1627
- exit 1
1628
- fi
1629
- fi
1630
- fi
1631
- fi
1632
- echo ${result}
1633
- }
1634
- #!/usr/bin/env bash
1635
-
1636
- flyway_clean() {
1637
- HOST="$1"
1638
- PORT="$2"
1639
- DATABASE="$3"
1640
- USER="$4"
1641
- PASSWORD="$5"
1642
-
1643
- kubectl run -it --rm flywayclean \
1644
- --image=flyway/flyway \
1645
- --restart=Never \
1646
- -- \
1647
- -cleanDisabled=false \
1648
- -url="jdbc:postgresql://$HOST:$PORT/$DATABASE" \
1649
- -user="$USER" \
1650
- -password="$PASSWORD" \
1651
- clean
1652
- }
1653
-
1654
- #!/usr/bin/env bash
1655
-
1656
- FLYWAY_VERSION="5.2.4"
1657
-
1658
-
1659
- get_yaml_variable() {
1660
- extract_yaml_config_variable --environment ${ENVIRONMENT} --configs-path $(pwd)/deploy --variable $@
1661
- }
1662
-
1663
- init_migrate_db() {
1664
- set -e
1665
-
1666
- check_env_vars 4 "APPLICATION" "ENVIRONMENT" "FLYWAY_VERSION" "MIGRATION_SQL_PATH"
1667
-
1668
- PG_YAML_PATH=".${APPLICATION}config.postgres"
1669
-
1670
- DB_PORT="5432"
1671
- DB_HOST=$(get_yaml_variable "${PG_YAML_PATH}.host")
1672
- DB_INIT_USERNAME=$(get_yaml_variable "${PG_YAML_PATH}.initUsername")
1673
- DB_INIT_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.initPassword")
1674
- DB_DATABASE=$(get_yaml_variable "${PG_YAML_PATH}.database")
1675
- DB_USER=$(get_yaml_variable "${PG_YAML_PATH}.user")
1676
- DB_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.password")
1677
- DB_URL="jdbc:postgresql://${DB_HOST}:${DB_PORT}/${DB_DATABASE}"
1678
-
1679
- DB_RO_USER=$(get_yaml_variable "${PG_YAML_PATH}.readOnlyUser" --optional)
1680
- DB_RO_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.readOnlyPassword" --optional)
1681
-
1682
- unset KUBECONFIG
1683
-
1684
- configure_kubectl_for_ci ${ENVIRONMENT}
1685
-
1686
- kube_init_service_database \
1687
- --namespace ${ENVIRONMENT} \
1688
- --service ${APPLICATION} \
1689
- --db_host ${DB_HOST} \
1690
- --db_port ${DB_PORT} \
1691
- --db_init_username ${DB_INIT_USERNAME} \
1692
- --db_init_password ${DB_INIT_PASSWORD} \
1693
- --db_database ${DB_DATABASE} \
1694
- --db_username ${DB_USER} \
1695
- --db_password ${DB_PASSWORD}
1696
-
1697
- if [[ ! -z "$DB_RO_USER" ]] && [[ ! -z "$DB_RO_USER" ]]; then
1698
- kube_init_database_readonly_account \
1699
- --namespace ${ENVIRONMENT} \
1700
- --service ${APPLICATION} \
1701
- --db_connection "$DB_INIT_USERNAME:$DB_INIT_PASSWORD@$DB_HOST:$DB_PORT" \
1702
- --db_database ${DB_DATABASE} \
1703
- --db_readonly_username ${DB_RO_USER} \
1704
- --db_readonly_password ${DB_RO_PASSWORD}
1705
- fi
1706
-
1707
- flyway_migrate \
1708
- --environment ${ENVIRONMENT} \
1709
- --namespace ${ENVIRONMENT} \
1710
- --service ${APPLICATION} \
1711
- --db_url ${DB_URL} \
1712
- --db_user ${DB_USER} \
1713
- --db_password ${DB_PASSWORD} \
1714
- --flyway_version ${FLYWAY_VERSION} \
1715
- --flyway_sql_folder $(pwd)/${MIGRATION_SQL_PATH}
1716
- }
1717
-
1718
- flyway_migrate() {
1719
- set -e
1720
-
1721
- extract_args 8 \
1722
- environment namespace service db_url db_user db_password flyway_version flyway_sql_folder $*
1723
-
1724
- echo "running flyway migrations for service $service in environment $environment namespace $namespace for db_url $db_url with user $db_user"
1725
- echo "migration files expected in $flyway_sql_folder"
1726
-
1727
- CONFIGMAP_NAME="$service-flyway-migration-sql"
1728
- POD_NAME="$service-flyway-migration"
1729
-
1730
- configure_kubectl_for_ci $environment
1731
-
1732
- kubectl -n $namespace delete configmap $CONFIGMAP_NAME --ignore-not-found
1733
- kubectl -n $namespace delete pod $POD_NAME --ignore-not-found
1734
- kubectl -n $namespace create configmap $CONFIGMAP_NAME --from-file=$flyway_sql_folder
1735
-
1736
- kubectl -n $namespace run $POD_NAME --image ignored -ti --restart=Never --attach --rm --overrides='
1737
- {
1738
- "spec":{
1739
- "containers":[
1740
- {
1741
- "name":"'$POD_NAME'",
1742
- "image":"boxfuse/flyway:'$flyway_version'",
1743
- "command":["flyway", "-url='$db_url'", "-user='$db_user'", "-password='$db_password'", "migrate"],
1744
- "volumeMounts":[
1745
- {
1746
- "name":"sql",
1747
- "mountPath":"/flyway/sql"
1748
- }
1749
- ]
1750
- }
1751
- ],
1752
- "volumes":[
1753
- {
1754
- "name":"sql",
1755
- "configMap":{
1756
- "name":"'$CONFIGMAP_NAME'"
1757
- }
1758
- }
1759
- ]
1760
- }
1761
- }
1762
- '
1763
-
1764
- kubectl -n $namespace delete configmap $CONFIGMAP_NAME
1765
- }
1766
-
1767
- #!/usr/bin/env bash
1768
-
1769
- record_git_commit() {
1770
- for file in $GIT_COMMIT_FILES; do
1771
- sed -i 's&GIT_COMMIT&'"${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHORT_SHA}&" "$file"
1772
- done
1773
- }
1774
-
1775
- gitlab_import_pgp_key() {
1776
- if [ "$GITLAB_PGP_PRIVATE_KEY" != "" ]
1777
- then
1778
- KEY_FOLDER=<(echo "$GITLAB_PGP_PRIVATE_KEY")
1779
- gpg --import $KEY_FOLDER > /dev/null
1780
- else
1781
- echo '$GITLAB_PGP_PRIVATE_KEY is not set'
1782
- return 1
1783
- fi
1784
- }
1785
-
1786
- git_reveal() {
1787
- gitlab_import_pgp_key
1788
- gpg --decrypt $1
1789
- }
1790
- #!/usr/bin/env bash
1791
-
1792
- helm_deploy() {
1793
- APPLICATION=$1
1794
- ENVIRONMENT=$2
1795
- VERSION=$3
1796
- deploy_chart \
1797
- --path_configs deploy \
1798
- --path_chart deploy/$APPLICATION \
1799
- --application $APPLICATION \
1800
- --environment $ENVIRONMENT \
1801
- --namespace $ENVIRONMENT \
1802
- --helm_extra_args --set global.version=$VERSION
1803
- }
1804
-
1805
- deploy_chart() {
1806
- set -e
1807
- set -x
1808
-
1809
- # Rigid parsing, but all args are mandatory (expect last) and flexible order is unnecessary
1810
- check_args "--path_configs" $1; shift
1811
- path_configs=$1; shift
1812
-
1813
- check_args "--path_chart" $1; shift
1814
- path_chart=$1; shift
1815
-
1816
- check_args "--application" $1; shift
1817
- application=$1; shift
1818
-
1819
- check_args "--environment" $1; shift
1820
- environment=$1; shift
1821
-
1822
- check_args "--namespace" $1; shift
1823
- namespace=$1; shift
1824
-
1825
- if [ $# -ne 0 ]; then
1826
- check_args "--helm_extra_args" $1; shift
1827
- helm_extra_args=$*
1828
- fi
1829
-
1830
- echo "================================"
1831
- echo " Deploying $application"
1832
- echo " - Environment: $environment"
1833
- echo " - Namespace: $namespace"
1834
- echo "================================"
1835
-
1836
- root_path=$(pwd)
1837
-
1838
- # Check the configs exists
1839
-
1840
- check_config_file ${root_path}/${path_configs}/common.yaml
1841
- check_config_file ${root_path}/${path_configs}/${namespace}.yaml
1842
- check_config_file ${root_path}/${path_configs}/${namespace}-secrets.yaml
1843
-
1844
- # Check the chart exists
1845
- if [ ! -d ${root_path}/${path_chart} ] || [ ! -f ${root_path}/${path_chart}/Chart.yaml ]; then
1846
- echo "Bad Chart $root_path/$path_chart : does not exists or missing Chart.yaml"
1847
- print_usage
1848
- exit 1
1849
- fi
1850
-
1851
- # Unset Kubectl configuration made via the KUBECONFIG env variable
1852
- # it would override the config made by configure_kubectl_for
1853
- # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
1854
- unset KUBECONFIG
1855
-
1856
- # Configure Kubectl
1857
- configure_kubectl_for_ci ${environment}
1858
-
1859
- # Configure Helm
1860
- helm version --tiller-namespace ${namespace} || true
1861
- # Helm stable repo have changed and must be updated manually, in versions < v2.17.0
1862
- helm init --tiller-namespace ${namespace} --client-only --stable-repo-url https://charts.helm.sh/stable
1863
- helm repo add colisweb s3://colisweb-helm-charts/colisweb
1864
- helm repo update --strict
1865
- helm dependency update --tiller-namespace ${namespace} ${root_path}/${path_chart}
1866
-
1867
- # Gather values/*.yaml files
1868
- values_path="${root_path}/${path_chart}/values"
1869
- values_files=''
1870
- [ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
1871
-
1872
- # Deploy
1873
- helm upgrade -i \
1874
- --namespace ${namespace} \
1875
- --tiller-namespace ${namespace} \
1876
- ${values_files} \
1877
- -f ${root_path}/${path_configs}/common.yaml \
1878
- -f ${root_path}/${path_configs}/${namespace}.yaml \
1879
- -f ${root_path}/${path_configs}/${namespace}-secrets.yaml \
1880
- ${helm_extra_args} \
1881
- ${application} ${root_path}/${path_chart}
1882
-
1883
- #send event to dd
1884
- PUBLISHED_VERSION="$CI_COMMIT_REF_NAME-$CI_COMMIT_SHA"
1885
- emit_datadog_deploy_event --environment $environment --service $application --version $PUBLISHED_VERSION
1886
-
1887
- echo "================================"
1888
- echo " Deployed $application"
1889
- echo " - Environment: $environment"
1890
- echo " - Namespace: $namespace"
1891
- echo "================================"
1892
-
1893
- set +x
1894
- }
1895
-
1896
- verify_deployments() {
1897
- set -e
1898
-
1899
- # usage :
1900
- # verify_deployments staging price
1901
- # verify_deployments -t 15m testing price
1902
-
1903
- if [ "$1" == "-t" ]; then
1904
- TIMEOUT=$2
1905
- shift
1906
- shift
1907
- else
1908
- TIMEOUT=5m
1909
- fi
1910
-
1911
- NAMESPACE=$1
1912
- RELEASE=$2
1913
-
1914
- # Get all Deployments names from the deployed chart
1915
- DEPLOYMENTS=(
1916
- $(helm get manifest --tiller-namespace $NAMESPACE $RELEASE | yq -rs '.[] | select(.kind=="Deployment") | .metadata.name')
1917
- )
1918
-
1919
- echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
1920
-
1921
- PIDS=()
1922
- for D in "${DEPLOYMENTS[@]}"; do
1923
- kubectl -n ${NAMESPACE} rollout status deployment ${D} --timeout=${TIMEOUT} &
1924
- PIDS+=($!)
1925
- done
1926
-
1927
- for P in ${PIDS[@]}; do
1928
- wait $P
1929
-
1930
- if [ $? -ne 0 ]; then
1931
- echo "at least one deployment failed or timed out (after $TIMEOUT)"
1932
- exit 1
1933
- fi
1934
- done
1935
-
1936
- }
1937
-
1938
- helm_deploy_v3() {
1939
- APPLICATION=$1
1940
- ENVIRONMENT=$2
1941
- VERSION=$3
1942
- deploy_chart_v3 \
1943
- --path_configs deploy \
1944
- --path_chart deploy/$APPLICATION \
1945
- --application $APPLICATION \
1946
- --environment $ENVIRONMENT \
1947
- --namespace $ENVIRONMENT \
1948
- --helm_extra_args --set global.version=$VERSION
1949
- }
1950
-
1951
- deploy_chart_v3() {
1952
- set -e
1953
- set -x
1954
-
1955
- # Rigid parsing, but all args are mandatory (expect last) and flexible order is unnecessary
1956
- check_args "--path_configs" $1; shift
1957
- path_configs=$1; shift
1958
- check_args "--path_chart" $1; shift
1959
- path_chart=$1; shift
1960
- check_args "--application" $1; shift
1961
- application=$1; shift
1962
- check_args "--environment" $1; shift
1963
- environment=$1; shift
1964
- check_args "--namespace" $1; shift
1965
- namespace=$1; shift
1966
- if [ $# -ne 0 ]; then
1967
- check_args "--helm_extra_args" $1; shift
1968
- helm_extra_args=$*
1969
- fi
1970
-
1971
- echo "================================"
1972
- echo " Deploying $application"
1973
- echo " - Environment: $environment"
1974
- echo " - Namespace: $namespace"
1975
- echo "================================"
1976
-
1977
- root_path=$(pwd)
1978
-
1979
- # Check the configs exists
1980
-
1981
- check_config_file ${root_path}/${path_configs}/common.yaml
1982
- check_config_file ${root_path}/${path_configs}/${namespace}.yaml
1983
- check_config_file ${root_path}/${path_configs}/${namespace}-secrets.yaml
1984
-
1985
- # Check the chart exists
1986
- if [ ! -d ${root_path}/${path_chart} ] || [ ! -f ${root_path}/${path_chart}/Chart.yaml ]; then
1987
- echo "Bad Chart $root_path/$path_chart : does not exists or missing Chart.yaml"
1988
- print_usage
1989
- exit 1
1990
- fi
1991
-
1992
- # Unset Kubectl configuration made via the KUBECONFIG env variable
1993
- # it would override the config made by configure_kubectl_for
1994
- # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
1995
- unset KUBECONFIG
1996
-
1997
- # Configure Kubectl
1998
- configure_kubectl_for_ci ${environment}
1999
-
2000
- # Configure helm3
2001
- helm3 version --namespace ${namespace} || true
2002
- # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
2003
- helm3 repo add colisweb s3://colisweb-helm-charts/colisweb
2004
- helm3 repo add stable https://charts.helm.sh/stable
2005
- helm3 repo update
2006
- helm3 dependency update ${root_path}/${path_chart}
2007
-
2008
- # Gather values/*.yaml files
2009
- values_path="${root_path}/${path_chart}/values"
2010
- values_files=''
2011
- [ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
2012
-
2013
- # Deploy
2014
- helm3 upgrade --install \
2015
- --namespace ${namespace} \
2016
- ${values_files} \
2017
- -f ${root_path}/${path_configs}/common.yaml \
2018
- -f ${root_path}/${path_configs}/${namespace}.yaml \
2019
- -f ${root_path}/${path_configs}/${namespace}-secrets.yaml \
2020
- ${helm_extra_args} \
2021
- ${application} ${root_path}/${path_chart}
2022
-
2023
- #send event to dd
2024
- PUBLISHED_VERSION="$CI_COMMIT_REF_NAME-$CI_COMMIT_SHA"
2025
- emit_datadog_deploy_event --environment $environment --service $application --version $PUBLISHED_VERSION
2026
-
2027
- echo "================================"
2028
- echo " Deployed $application"
2029
- echo " - Environment: $environment"
2030
- echo " - Namespace: $namespace"
2031
- echo "================================"
2032
-
2033
- set +x
2034
- }
2035
-
2036
- verify_deployments_v3() {
2037
- set -e
2038
-
2039
- # usage :
2040
- # verify_deployments staging price
2041
- # verify_deployments -t 15m testing price
2042
-
2043
- if [ "$1" == "-t" ]; then
2044
- TIMEOUT=$2
2045
- shift
2046
- shift
2047
- else
2048
- TIMEOUT=5m
2049
- fi
2050
-
2051
- NAMESPACE=$1
2052
- RELEASE=$2
2053
-
2054
- # Get all Deployments names from the deployed chart
2055
- DEPLOYMENTS=(
2056
- $(helm3 get manifest --namespace $NAMESPACE $RELEASE | yq -rs '.[] | select(.kind=="Deployment") | .metadata.name')
2057
- )
2058
-
2059
- echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
2060
-
2061
- PIDS=()
2062
- for D in "${DEPLOYMENTS[@]}"; do
2063
- kubectl -n ${NAMESPACE} rollout status deployment ${D} --timeout=${TIMEOUT} &
2064
- PIDS+=($!)
2065
- done
2066
-
2067
- for P in ${PIDS[@]}; do
2068
- wait $P
2069
-
2070
- if [ $? -ne 0 ]; then
2071
- echo "at least one deployment failed or timed out (after $TIMEOUT)"
2072
- exit 1
2073
- fi
2074
- done
2075
-
2076
- }
2077
-
2078
- print_usage() {
2079
- echo "Usage:"
2080
- echo "deploy_chart \\"
2081
- echo " --path_configs <path to .yaml namespaces and secret config files>"
2082
- echo " --path_chart <path to Helm Chart>"
2083
- echo " --application <application name used by Helm>"
2084
- echo " --environment <infrastructure environment>"
2085
- echo " --namespace <namespace>"
2086
- echo " --helm-extra-args <extra args to pass to helm, ex: --set my.value=42 --set your.setting=on>"
2087
- echo ""
2088
- }
2089
-
2090
- check_config_file() {
2091
- local filename=$1
2092
- if [ ! -f ${filename} ]; then
2093
- echo "Missing $filename configuration file"
2094
- print_usage
2095
- exit 1
2096
- fi
2097
- }
2098
-
2099
- #!/usr/bin/env bash
2100
-
2101
- configure_kubectl_for_ci() {
2102
- if [ -z ${GITLAB_PAT} ]; then
2103
- echo "Cannot configure kubectl: no GITLAB_PAT configured"
2104
- exit 1
2105
- fi
2106
-
2107
- infra_env="$1"
2108
- valid_envs="[testing][staging][production][performance][tests][recette]"
2109
- echo "$valid_envs" | grep -q "\[$infra_env\]"
2110
-
2111
- if [ $? -ne 0 ]; then
2112
- echo "Cannot configure kubectl for invalid env : $infra_env"
2113
- echo "choose one of $valid_envs"
2114
- exit 1
2115
- fi
2116
-
2117
- mkdir -p ~/.kube
2118
- curl -fsS \
2119
- --header "PRIVATE-TOKEN: $GITLAB_PAT" \
2120
- "https://gitlab.com/api/v4/projects/8141053/jobs/artifacts/$infra_env/raw/$infra_env.kubeconfig?job=4_kubernetes_config_output" \
2121
- > ~/.kube/$infra_env.kubeconfig
2122
-
2123
- curl_return_code=$?
2124
- if [ ${curl_return_code} -ne 0 ]; then
2125
- echo "Cannot configure kubectl for $infra_env, get configuration failed with code $curl_return_code"
2126
- exit ${curl_return_code}
2127
- fi
2128
-
2129
- rm -f ~/.kube/config
2130
- ln -s ~/.kube/$infra_env.kubeconfig ~/.kube/config
2131
- echo "Configured kubectl for env : $infra_env"
2132
- }
2133
- notify_new_deployment() {
2134
- jq --version || (apt update && apt install -y jq)
2135
-
2136
- CHAT_URL=${1:-$DEFAULT_CHAT_URL}
2137
-
2138
- STATUS=$(echo $CI_JOB_STATUS | tr '[:lower:]' '[:upper:]' )
2139
- ENV_NAME=$(echo $ENVIRONMENT | tr '[:lower:]' '[:upper:]' )
2140
-
2141
- JOB_LINK="<$CI_JOB_URL| $CI_JOB_NAME $CI_JOB_ID>"
2142
-
2143
- DESCRIPTION="
2144
- $STATUS : Deployment for $CI_PROJECT_NAME on $ENV_NAME
2145
- $JOB_LINK
2146
- $CI_COMMIT_TITLE
2147
- "
2148
-
2149
- JSON_MESSAGE=$(jq -n --arg text "$DESCRIPTION" '{text: $text }')
2150
- curl -X POST $CHAT_URL \
2151
- --header "Content-Type: application/json" \
2152
- --data "$JSON_MESSAGE"
2153
- }
2154
- notify_new_version() {
2155
-
2156
- ! test -z $CI_COMMIT_TAG || exit 0
2157
-
2158
- jq --version || (apt update && apt install -y jq)
2159
-
2160
- KIND=$1
2161
- CHAT_URL=${2:-$DEFAULT_CHAT_URL}
2162
-
2163
- STATUS=$(echo $CI_JOB_STATUS | tr '[:lower:]' '[:upper:]' )
2164
- ENV_NAME=$(echo $ENVIRONMENT | tr '[:lower:]' '[:upper:]' )
2165
- TITLE="$ENV_NAME *$STATUS* $KIND for version *$CI_COMMIT_TAG* of *$CI_PROJECT_NAME* "
2166
-
2167
- RELEASE_URL="https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/releases/$CI_COMMIT_TAG"
2168
-
2169
- NOTES=$(curl --header "PRIVATE-TOKEN: $GITLAB_TOKEN" $RELEASE_URL |
2170
- jq .description |
2171
- sed -e 's/^"//' -e 's/"$//' |
2172
- sed -E 's/\[([^]]+)\]\(([^)]+)\)/<\2|\1>/g' |
2173
- sed -E 's/\\n/\'$'\n/g')
2174
-
2175
- JOB_LINK="<$CI_JOB_URL| $CI_JOB_NAME $CI_JOB_ID>"
2176
-
2177
- DESCRIPTION="
2178
- $TITLE
2179
- $JOB_LINK
2180
- $NOTES
2181
- "
2182
-
2183
- JSON_MESSAGE=$(jq -n --arg text "$DESCRIPTION" '{text: $text }')
2184
- curl -X POST $CHAT_URL \
2185
- --header "Content-Type: application/json" \
2186
- --data "$JSON_MESSAGE"
2187
- }
2188
- #!/usr/bin/env bash
2189
-
2190
- skip_sbt_compile_cache() {
2191
- COMPARED_BRANCH="${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-$CI_DEFAULT_BRANCH}"
2192
- echo "branch to compare to: $COMPARED_BRANCH"
2193
- git fetch origin $COMPARED_BRANCH
2194
- echo "fetched $COMPARED_BRANCH"
2195
- [[ "$CI_COMMIT_REF_NAME" =~ ^(master|develop)$ || $(git diff origin/$COMPARED_BRANCH --exit-code -- project) ]]
2196
- }
2197
- #!/usr/bin/env bash
2198
-
2199
- # in case of trouble with functions for update history during import
2200
- # https://stackoverflow.com/questions/56729192/pg-restore-fails-when-trying-to-create-function-referencing-table-that-does-not
2201
-
2202
- # example: clone_databases --source_env testing --destination_env recette --services "order,notification,parcel,ikea"
2203
- clone_databases() {
2204
- export USERNAME="database-cloner"
2205
-
2206
- set -e
2207
-
2208
- extract_args 3 source_env destination_env services $*
2209
-
2210
- dump_databases "$source_env" "$services"
2211
- import_databases "$destination_env" "$services"
2212
- }
2213
-
2214
- dump_databases() {
2215
- local env="$1"
2216
- local services=$(echo -n "$2" | tr ',' '\n')
2217
-
2218
- database_k8s_output_dump_path="/tmp/database_k8s_output_dump"
2219
-
2220
- configure_kubectl_for "$env"
2221
- set +e
2222
- database_k8s "$env" > "$database_k8s_output_dump_path"
2223
- set -e
2224
-
2225
- source_pg_local_port=$(extract_pg_local_port "$database_k8s_output_dump_path")
2226
-
2227
- for service in $services
2228
- do
2229
- service_path="/tmp/$service"
2230
-
2231
- set +e
2232
- git clone "git@gitlab.com:colisweb/back/$service.git" "$service_path"
2233
- set -e
2234
-
2235
- if cd "$service_path"; then
2236
- echo "dump the database for service $service.."
2237
-
2238
- git secret reveal -f
2239
-
2240
- PG_YAML_PATH=".${service}config.postgres"
2241
-
2242
- SOURCE_DB_DATABASE=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.database")
2243
- SOURCE_DB_USER=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.user")
2244
- SOURCE_DB_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.password")
2245
-
2246
- export PGPASSWORD="$SOURCE_DB_PASSWORD"
2247
-
2248
- DUMP_PATH="/tmp/db_dump_${service}.sql"
2249
- pg_dump --no-owner -h localhost -p "$source_pg_local_port" -U "$SOURCE_DB_USER" "$SOURCE_DB_DATABASE" > "$DUMP_PATH"
2250
-
2251
- cd ..
2252
- rm -rf "$service_path"
2253
- else
2254
- echo "WARN: failed to clone $service - skipping"
2255
- fi
2256
- done
2257
- }
2258
-
2259
- import_databases() {
2260
- local env="$1"
2261
- local services=$(echo -n "$2" | tr ',' '\n')
2262
-
2263
- database_k8s_output_import_path="/tmp/database_k8s_output_import"
2264
-
2265
- configure_kubectl_for "$env"
2266
- set +e
2267
- database_k8s "$env" > "$database_k8s_output_import_path"
2268
- set -e
2269
-
2270
- destination_pg_local_port=$(extract_pg_local_port "$database_k8s_output_import_path")
2271
-
2272
- for service in $services
2273
- do
2274
- service_path="/tmp/$service"
2275
-
2276
- set +e
2277
- git clone "git@gitlab.com:colisweb/back/$service.git" "$service_path"
2278
- set -e
2279
-
2280
- if cd "$service_path"; then
2281
- echo "create and import database for $service.."
2282
-
2283
- git secret reveal -f
2284
-
2285
- PG_YAML_PATH=".${service}config.postgres"
2286
-
2287
- DB_PORT="5432"
2288
- DB_HOST=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.host")
2289
- DB_INIT_USERNAME=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.initUsername")
2290
- DB_INIT_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.initPassword")
2291
- DB_DATABASE=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.database")
2292
- DB_USER=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.user")
2293
- DB_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.password")
2294
-
2295
- kube_init_service_database \
2296
- --namespace ${env} \
2297
- --service ${service} \
2298
- --db_host ${DB_HOST} \
2299
- --db_port ${DB_PORT} \
2300
- --db_init_username ${DB_INIT_USERNAME} \
2301
- --db_init_password ${DB_INIT_PASSWORD} \
2302
- --db_database ${DB_DATABASE} \
2303
- --db_username ${DB_USER} \
2304
- --db_password ${DB_PASSWORD}
2305
-
2306
- echo "WARN: A complete clean of $DB_DATABASE on $DB_HOST will be operated"
2307
- read -rsn1 -p"Press any key to continue";echo
2308
- flyway_clean "$DB_HOST" "$DB_PORT" "$DB_DATABASE" "$DB_USER" "$DB_PASSWORD"
2309
-
2310
- DUMP_PATH="/tmp/db_dump_${service}.sql"
2311
- export PGPASSWORD="$DB_PASSWORD"
2312
- set +e
2313
- psql "postgres://$DB_USER@127.0.0.1:$destination_pg_local_port" -p "$DB_DATABASE" -f "$DUMP_PATH"
2314
- set -e
2315
-
2316
- cd ..
2317
- rm -rf "$service_path"
2318
- else
2319
- echo "WARN: failed to clone $service - skipping"
2320
- fi
2321
- done
2322
- }
2323
-
2324
- extract_pg_local_port() {
2325
- cat "$1" | grep 'postgres@127.0.0.1:' | sed 's/.*postgres@127.0.0.1:\(.*[0-9]\).*/\1/g'
2326
- }
2327
- #!/usr/bin/env bash
2328
-
2329
- emit_datadog_deploy_event() {
2330
- extract_args 3 environment service version $*
2331
- check_env_vars 1 "DD_API_KEY"
2332
-
2333
- response=$(
2334
- curl -X POST -H "Content-type: application/json" \
2335
- -d '{
2336
- "title": "deploying '"$service"' to '"$environment"'",
2337
- "text": "deploying '"$service"' version '"$version"' to '"$environment"'",
2338
- "priority": "normal",
2339
- "tags": ["service:'"$service"' ", "env:'"$environment"'" ,"action:'"deployment"'"] ,
2340
-
2341
- "alert_type": "Info"
2342
- }' \
2343
- "https://api.datadoghq.com/api/v1/events?api_key=$DD_API_KEY"
2344
- )
2345
-
2346
- #echo $response
2347
- EventID=$(echo $response | jq ".event.id")
2348
- url=$(echo $response | jq ".event.url")
2349
-
2350
- if [[ $EventID -ne 0 ]]; then
2351
- echo "event successfully created check in datadog UI : $url"
2352
- else
2353
- echo " failed to create event "
2354
- exit 1
2355
- fi
2356
- }
2357
-
2358
- #!/usr/bin/env bash
2359
-
2360
- # DEPRECATED
2361
- emit_datadog_error_events() {
2362
- set -e
2363
- extract_args 4 title text priority environment $*
2364
- check_env_vars 1 "DD_API_KEY"
2365
-
2366
- curl -X POST -H "Content-type: application/json" \
2367
- -d '{
2368
- "title": "'"$title"'",
2369
- "text": "'"$text"'",
2370
- "priority": "'"$priority"'",
2371
- "tags": ["environment:'"$environment"'"],
2372
- "alert_type": "Error"
2373
- }' \
2374
- "https://api.datadoghq.com/api/v1/events?api_key=$DD_API_KEY"
2375
- }
2376
-
2377
- #!/usr/bin/env bash
2378
- terraform_init() {
2379
- SECTION=$1
2380
- ENV=$2
2381
- cd $SECTION
2382
- terraform init -input=false
2383
- terraform workspace select $ENV || terraform workspace new $ENV
2384
- }