@colisweb/rescript-toolkit 4.22.0 → 4.22.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,2422 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- #VARIABLES
4
- export SCRIPT_FULL_PATH=$(dirname "$0")
5
-
6
- ##FUNCTIONS
7
- # https://stackoverflow.com/questions/1527049/how-can-i-join-elements-of-an-array-in-bash
8
- join_by() {
9
- local d=${1-} f=${2-}
10
- if shift 2; then
11
- printf %s "$f" "${@/#/$d}"
12
- fi
13
- }
14
-
15
- mkstring() {
16
- local start=$1
17
- local separator=$2
18
- local end=$3
19
- shift 3
20
-
21
- if [ $# -gt 0 ]; then
22
- printf $start
23
- join_by $separator $*
24
- printf $end
25
- fi
26
- }
27
-
28
- md5all() {
29
- all_hash=$(mktemp)
30
- for name in $*; do
31
- find $name -type f -exec cat {} \; | md5sum | cut -f1 -d ' ' >> $all_hash
32
- done;
33
- cat $all_hash | md5sum | cut -f1 -d ' '
34
- }
35
-
36
- log() {
37
- echo "$*" >&2
38
- }
39
- #!/usr/bin/env bash
40
-
41
- check_args() {
42
- if [ -z $2 ] || [ "$1" != "$2" ]; then
43
- echo >&2 "missing argument $1"
44
- return 1
45
- fi
46
- }
47
-
48
- check_env_vars() {
49
- ArgsCount=$1 && shift
50
- for ((i = 0; i < $ArgsCount; i++)); do
51
- if [[ -z "${!1}" ]]; then
52
- echo >&2 "missing ENV $1"
53
- return 1
54
- fi
55
- shift
56
- done
57
- }
58
-
59
- extract_arg() {
60
- name=$1
61
- passed=$2
62
- value=$3
63
- if [ "--$name" != "$passed" ]; then
64
- echo "missing argument $name"
65
- exit 1
66
- fi
67
- eval $name='$value'
68
- }
69
-
70
- extract_args() {
71
- declare -a Array_Args
72
- ArgsCount=$1 && shift
73
- for ((i = 0; i < $ArgsCount; i++)); do
74
- Array_Args[i]=$1 && shift
75
- done
76
- for ArgName in "${Array_Args[@]}"; do
77
- extract_arg "$ArgName" $* && shift 2
78
- done
79
- }
80
-
81
- #!/usr/bin/env bash
82
-
83
- aws_ecr_login() {
84
- PATH=/root/.local/bin:$PATH
85
-
86
- aws ecr get-login-password \
87
- | docker login --username AWS --password-stdin 949316342391.dkr.ecr.eu-west-1.amazonaws.com \
88
- || (echo "you should update to AWS CLI version 2 https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-mac.html " $(aws ecr get-login --region=eu-west-1 --no-include-email) )
89
- }
90
-
91
- aws_ecr_token() {
92
- aws ecr get-authorization-token --region=eu-west-1 --output text --query 'authorizationData[].authorizationToken'
93
- }
94
-
95
- # you will need jq to use these commands. You can install it using "brew install jq"
96
- # delete_images colisweb_api 8
97
- # will delete images older than 8 weeks
98
- delete_images() {
99
-
100
- REPO=$1
101
- WEEKS=${2:-16}
102
-
103
- WEEKS_AGO=$(date -j -v-${WEEKS}w +%s)
104
-
105
- #Get all ecr images
106
- IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
107
-
108
- #Filter unnecessary values and map `imagePushedAt` to EPOCH
109
- TIMED_IMAGES=$(echo $IMAGES | jq .'[]' | jq "map({imagePushedAt: (.imagePushedAt[0:19]+\"Z\" | fromdateiso8601), imageDigest: .imageDigest}) | sort_by(.imagePushedAt) | .[:-1]")
110
-
111
- #Filter on EPOCH
112
- OLD_IMAGES=$(echo $TIMED_IMAGES | jq "map(select (.imagePushedAt < $WEEKS_AGO)) | .[] " | jq -r '.imageDigest')
113
-
114
- while IFS= read -r IMAGE; do
115
- if [ "$IMAGE" != "" ]; then
116
- echo "Deleting $IMAGE from $REPO"
117
- AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
118
- fi
119
- done <<< "$OLD_IMAGES"
120
- }
121
-
122
- # delete_images_all_repos 12
123
- # will delete images in all repositories older than 12 weeks
124
- delete_images_all_repos() {
125
- REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
126
-
127
- while IFS= read -r REPO; do
128
- echo "processing ECR repository $REPO"
129
- delete_images $REPO $1
130
- done <<< "$REPOSITORIES"
131
- }
132
-
133
- #!/usr/bin/env bash
134
-
135
- # If gitlab is down or pipeline are stuck, hotfixes need to be available
136
- # This script will publish docker images to ECR using your current git HEAD, then deploy them to a given environment.
137
- # Some local files (git-commit.conf and sentry.properties) will be updated, take caution.
138
- # No trace of this will appear on Gitlab (no releases, no pipelines, no tags).
139
- # create_hotfix_scala $ENVIRONMENT $CHART_NAME [ $MODULE_NAME $MODULE_PATH $DEPLOYMENT ]
140
- # create_hotfix_scala testing crm main modules/3-executables/main crm
141
- # create_hotfix_scala testing notification \
142
- # main-http modules/3-executables/main-http notification-http \
143
- # main-consumer modules/3-executables/main-consumer notification-consumer
144
-
145
- create_hotfix_scala() {
146
-
147
- ENVIRONMENT=$1
148
- CHART_NAME=$2
149
- shift 2
150
-
151
- SHORT_SHA=$(git rev-parse --short HEAD)
152
- HOTFIX_TAG="hotfix-$SHORT_SHA"
153
-
154
- gum confirm "Preparing $HOTFIX_TAG for $CHART_NAME ?" || exit
155
- prepare_hotfix_scala $HOTFIX_TAG
156
-
157
- gum confirm "Building $HOTFIX_TAG for $CHART_NAME ?" || exit
158
- while [[ $# -gt 2 ]] ; do
159
- build_hotfix_scala $HOTFIX_TAG "$1" "$2" "$3"
160
- shift 3
161
- done
162
-
163
- gum confirm "Deploying $HOTFIX_TAG for $CHART_NAME ?" || exit
164
- deploy_hotfix $CHART_NAME $ENVIRONMENT $HOTFIX_TAG
165
- }
166
-
167
- # Update local git-commit.conf and sentry.properties files using git short sha
168
- prepare_hotfix_scala() {
169
- HOTFIX_TAG=$1
170
-
171
- git secret reveal -f
172
- aws_ecr_login
173
-
174
- COMMIT_CONF_FILES=$(find . -name "git-commit.conf")
175
- SENTRY_PROPERTIES_FILES=$(find . -name "sentry.properties")
176
-
177
- for file in $(echo "$COMMIT_CONF_FILES\n$SENTRY_PROPERTIES_FILES"); do
178
- sed -i '' -e 's&GIT_COMMIT&'"$HOTFIX_TAG&" $file
179
- done
180
-
181
- }
182
-
183
- # Build docker images locally and publish them to AWS ECR.
184
- build_hotfix_scala() {
185
-
186
- HOTFIX_TAG=$1
187
- SBT_MODULE=$2
188
- DOCKER_PATH=$3
189
- DEPLOYMENT=$4
190
-
191
- DOCKER_REGISTRY_ID="949316342391"
192
- DOCKER_REGISTRY="$DOCKER_REGISTRY_ID.dkr.ecr.eu-west-1.amazonaws.com"
193
- DOCKER_IMAGE=$DOCKER_REGISTRY/$DEPLOYMENT
194
- HOTFIX_IMAGE=$DOCKER_IMAGE:$HOTFIX_TAG
195
-
196
- #Build
197
- sbt "project $SBT_MODULE" "Docker / stage"
198
-
199
- #Publish
200
- docker build --platform "linux/amd64" -t $HOTFIX_IMAGE --cache-from $DOCKER_IMAGE "$DOCKER_PATH/target/docker/stage"
201
- docker push $HOTFIX_IMAGE
202
-
203
- echo "Created hotfix $HOTFIX_IMAGE"
204
- }
205
-
206
- # Deploy the project in the given environment
207
- deploy_hotfix() {
208
- source $colisweb_scripts/ci/helm.sh
209
-
210
- CHART_NAME=$1
211
- ENVIRONMENT=$2
212
- HOTFIX_TAG=$3
213
-
214
- CONFIG_PATH=deploy
215
- CHART_PATH=$CONFIG_PATH/$CHART_NAME
216
- ROOT_PATH=$(pwd)
217
-
218
- # Unset Kubectl configuration made via the KUBECONFIG env variable
219
- # it would override the config made by configure_kubectl_for
220
- # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
221
- unset KUBECONFIG
222
-
223
- # Configure Kubectl
224
- configure_kubectl_for $ENVIRONMENT
225
-
226
- # Avoiding "no local-index.yaml" or "empty local-index.yaml" error
227
- cat > $HOME/Library/Caches/helm/repository/local-index.yaml <<EOT
228
- apiVersion: v1
229
- entries:
230
- cronjob:
231
- EOT
232
-
233
- # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
234
- helm3 repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
235
- helm3 repo add stable https://charts.helm.sh/stable --force-update
236
- helm3 repo update
237
- helm3 dependency update ${ROOT_PATH}/${CHART_PATH}
238
-
239
- # Gather values/*.yaml files
240
- VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
241
- VALUES_FILES=''
242
- [ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
243
-
244
- # Deploy
245
- helm3 upgrade --install \
246
- --namespace ${ENVIRONMENT} \
247
- ${VALUES_FILES} \
248
- -f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
249
- -f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}.yaml \
250
- -f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}-secrets.yaml \
251
- --set global.version=$HOTFIX_TAG \
252
- ${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
253
-
254
-
255
- verify_deployments_v3 -t 10m $ENVIRONMENT $CHART_NAME
256
-
257
- }
258
-
259
- #!/usr/bin/env bash
260
-
261
- image_exists() {
262
- set -e
263
-
264
- REGISTRY=$1
265
- REPOSITORY=$2
266
- IMAGE=$3
267
-
268
- TAGGED_IMAGE="$REGISTRY/$REPOSITORY:$IMAGE"
269
-
270
- aws ecr describe-images --registry-id $REGISTRY --repository-name $REPOSITORY --image-ids "imageTag=$IMAGE"
271
-
272
- if [ $? -eq 0 ]
273
- then
274
- echo "Image $TAGGED_IMAGE already present in distant repo"
275
- return 0
276
- else
277
- echo "Image $TAGGED_IMAGE NOT present in distant repo"
278
- return 1
279
- fi
280
- }
281
- #!/usr/bin/env bash
282
-
283
- gmm() {
284
- git checkout $1
285
- git pull
286
- git checkout $2
287
- git pull
288
- git merge $1
289
- git push
290
- }
291
-
292
- git_damn_merge() {
293
- git checkout $1
294
- git pull
295
- git checkout $2
296
- git dammit
297
- git merge $1
298
- git push
299
- }
300
-
301
- git_prune_local_branches() {
302
- git branch -r |
303
- awk '{print $1}' |
304
- egrep -v -f /dev/fd/0 <(git branch -vv | grep origin) |
305
- awk '{print $1}' |
306
- xargs git branch -d
307
- }
308
-
309
- gum_checkout() {
310
- git branch -a | cut -f3- -d "/" | gum filter | xargs git checkout
311
- }
312
-
313
- # useful option :
314
- # export GIT_SUBLINE_MERGE_NON_INTERACTIVE_MODE=TRUE
315
- # see https://github.com/paulaltin/git-subline-merge
316
- setup_subline_merge() {
317
- location=${1:-"--local"}
318
-
319
- case $location in
320
- --local)
321
- if [ -d ".git" ]; then
322
- echo "* merge=subline" >>.git/info/attributes
323
- else
324
- echo "Cannot use local option, not in a git repository"
325
- return 1
326
- fi
327
- ;;
328
- --global)
329
- echo "* merge=subline" >>~/.gitattributes
330
- ;;
331
- *)
332
- echo "unknown argument $location"
333
- return 2
334
- ;;
335
- esac
336
-
337
- git config $location merge.conflictStyle diff3
338
- git config $location merge.subline.driver "$colisweb_scripts/shell-session/shell/dev/git-subline-merge %O %A %B %L %P"
339
- git config $location merge.subline.recursive binary
340
- }
341
-
342
- rebase_from_ancestor() {
343
- set -x
344
- branch=$1
345
- tip=$(git rev-parse HEAD)
346
- ancestor=$(git merge-base $branch $tip)
347
- commits=$(git log $ancestor..$tip)
348
- git reset --hard $ancestor
349
- git merge --squash $tip
350
- git commit -m "squashed commmits $commits" || echo "nothing committed"
351
- git rebase $branch -Xtheirs
352
- }
353
-
354
- #!/usr/bin/env bash
355
-
356
- import_all_pgp_keys() {
357
- echo "importing all PGP keys"
358
- gpg --import $SCRIPT_FULL_PATH/pgp_keys/*.key
359
- }
360
-
361
- remove_all_persons_from_secrets() {
362
- echo "cleanup git secret"
363
- WHO_KNOWS=($(git secret whoknows))
364
- git secret removeperson $WHO_KNOWS
365
- echo "Removed secrets access for $WHO_KNOWS"
366
- }
367
-
368
- all_pgp_emails() {
369
- gpg --show-key $SCRIPT_FULL_PATH/pgp_keys/*.key | sed -rn "s/.*<(.*)>/\1/p"
370
- }
371
-
372
- set_all_secret_keys() {
373
-
374
- import_all_pgp_keys
375
-
376
- git secret reveal -f
377
-
378
- remove_all_persons_from_secrets
379
-
380
- if [ $# -eq 0 ]; then
381
- echo "No emails supplied, using dev-tools pgp keys as source"
382
- IN_THE_KNOW=($(gum choose --no-limit $(all_pgp_emails)))
383
- else
384
- IN_THE_KNOW=($*)
385
- fi
386
-
387
- git secret tell $IN_THE_KNOW
388
- git secret hide
389
- git secret whoknows
390
-
391
- echo "all secrets updated, you'll need to commit the changes"
392
- }
393
-
394
- #!/usr/bin/env bash
395
-
396
- start_ssh_bastion() {
397
- ENV=$1
398
- SSH_LOCAL_PORT=$2
399
- POD_NAME=ssh-bastion-$USERNAME
400
- CONFIG_MAP_NAME=ssh-bastion-$USERNAME
401
- configure_kubectl_for $ENV
402
- kubectl get pods -o name | grep pod/$POD_NAME
403
- if [ $? -eq 0 ]; then
404
- echo "$POD_NAME is already running"
405
- else
406
- #configmap
407
- kubectl get configmap $CONFIG_MAP_NAME && kubectl delete configmap $CONFIG_MAP_NAME
408
- tempdir=$(mktemp -d)
409
- cat <<EOF > $tempdir/sshd_config
410
- AllowTcpForwarding yes
411
- Port 2222
412
- PermitRootLogin yes
413
- AuthorizedKeysFile /etc/ssh/authorized_keys
414
- EOF
415
- cp ~/.ssh/id_rsa.pub $tempdir/authorized_keys
416
- kubectl create configmap $CONFIG_MAP_NAME --from-file=$tempdir
417
-
418
- #pod
419
- kubectl get pod $POD_NAME && kubectl delete pod $POD_NAME
420
- cat <<EOF | kubectl create -f -
421
-
422
- apiVersion: v1
423
- kind: Pod
424
- metadata:
425
- name: $POD_NAME
426
- spec:
427
- containers:
428
- - name: $POD_NAME
429
- image: sickp/alpine-sshd:7.4
430
- ports:
431
- - containerPort: 2222
432
- volumeMounts:
433
- - mountPath: /etc/ssh/sshd_config
434
- name: ssh-config
435
- subPath: sshd_config
436
- - mountPath: /etc/ssh/authorized_keys
437
- name: ssh-config
438
- subPath: authorized_keys
439
- volumes:
440
- - name: ssh-config
441
- configMap:
442
- name: $CONFIG_MAP_NAME
443
- EOF
444
-
445
- fi
446
-
447
- # You need a recent kubectl for wait to work (1.15 works), install or upgrade
448
- # with brew :
449
- # brew install kubernetes-cli
450
- # brew upgrade kubernetes-cli
451
- kubectl wait --for=condition=Ready pod/$POD_NAME
452
-
453
- # kube port-forward
454
- lsof -ti tcp:$SSH_LOCAL_PORT | xargs kill
455
- kubectl port-forward $POD_NAME $SSH_LOCAL_PORT:2222 &
456
- while ! nc -z 127.0.0.1 $SSH_LOCAL_PORT; do
457
- sleep 1
458
- done
459
- echo "forwarding ssh via local port $SSH_LOCAL_PORT"
460
- echo "remember to terminate the bastion with 'stop_ssh_bastion'"
461
- }
462
-
463
- stop_ssh_bastion() {
464
- POD_NAME=ssh-bastion-$USERNAME
465
- kubectl delete pod $POD_NAME
466
- }
467
-
468
- #!/usr/bin/env bash
469
-
470
- configure_kubectl_for() {
471
- local infra_env="$1"
472
- local valid_envs="[testing][staging][production][performance][tests][recette]"
473
- echo "$valid_envs" | grep -q "\[$infra_env\]"
474
-
475
- if [ $? -ne 0 ]; then
476
- echo "Cannot configure kubectl for invalid env : $infra_env"
477
- echo "choose one of $valid_envs"
478
- return 1
479
- fi
480
-
481
- aws eks update-kubeconfig --name "toutatis-$infra_env-eks" >&2
482
- }
483
-
484
- #!/usr/bin/env bash
485
-
486
- # WARNING : never try to do a dump directly from the database_production_ca
487
- # this could cause lot of lock database issues.
488
- # always use database_production_read_replica_ca instead
489
- database_k8s() {
490
- MODE=$1
491
- case $MODE in
492
- "tests") SSH_LOCAL_PORT=2224;PG_LOCAL_PORT=24440;CA_LOCAL_PORT=25430;ENV="tests";;
493
- "testing") SSH_LOCAL_PORT=2225;PG_LOCAL_PORT=24441;CA_LOCAL_PORT=25431;ENV="testing";;
494
- "staging") SSH_LOCAL_PORT=2226;PG_LOCAL_PORT=24442;CA_LOCAL_PORT=25432;ENV="staging";;
495
- "production") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24443;CA_LOCAL_PORT=25433;ENV="production";;
496
- "production_rw") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24444;CA_LOCAL_PORT=25434;ENV="production";;
497
- "recette") SSH_LOCAL_PORT=2228;PG_LOCAL_PORT=24446;CA_LOCAL_PORT=25436;ENV="recette";;
498
- *) echo "Unsupported ENV : $MODE"; return 1 ;;
499
- esac
500
-
501
- start_ssh_bastion $ENV $SSH_LOCAL_PORT
502
-
503
- lsof -ti tcp:$PG_LOCAL_PORT | xargs kill
504
-
505
- bastion_config=$(mktemp)
506
- cat > "$bastion_config" <<EOF
507
- UserKnownHostsFile /dev/null
508
- StrictHostKeyChecking no
509
- User root
510
- Host bastion_tests
511
- HostName 127.0.0.1
512
- Port 2224
513
- LocalForward 24440 toutatis-tests-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
514
- Host bastion_testing
515
- HostName 127.0.0.1
516
- Port 2225
517
- LocalForward 24441 toutatis-testing-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
518
- LocalForward 25431 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
519
- Host bastion_staging
520
- HostName 127.0.0.1
521
- Port 2226
522
- LocalForward 24442 toutatis-staging-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
523
- LocalForward 25432 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
524
- Host bastion_recette
525
- HostName 127.0.0.1
526
- Port 2228
527
- LocalForward 24446 toutatis-recette-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
528
- LocalForward 25436 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
529
- Host bastion_production
530
- HostName 127.0.0.1
531
- Port 2227
532
- LocalForward 24443 toutatis-production-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
533
- LocalForward 25433 api-production-rds-read-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
534
- LocalForward 25435 archive-ca.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
535
- EOF
536
- if [ "$MODE" = "production_rw" ] ; then
537
- cat >> "$bastion_config" <<EOF
538
- LocalForward 24444 toutatis-production-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
539
- LocalForward 25434 api-production-rds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
540
- EOF
541
- fi
542
-
543
- ssh -f -N \
544
- -F "$bastion_config" \
545
- "bastion_$ENV"
546
-
547
- echo "sample command : 'psql postgres://postgres@127.0.0.1:$PG_LOCAL_PORT'"
548
- echo "sample command : 'mysql -u colisweb -h 127.0.0.1 -P $CA_LOCAL_PORT -p db_name'"
549
-
550
- echo "run 'kubectl delete pod $POD_NAME' when you have finished"
551
- }
552
-
553
- psql_on_k8() {
554
- NAMESPACE=$1
555
- SERVICE=$2
556
- CONNECTION=$3
557
- shift 3
558
-
559
- kubectl -n $NAMESPACE run ${SERVICE}-database-init \
560
- --image jbergknoff/postgresql-client \
561
- --restart=Never \
562
- --attach --rm \
563
- -- \
564
- postgresql://${CONNECTION} \
565
- "$*"
566
- }
567
-
568
- mysql_on_k8() {
569
- local namespace=$1
570
- local db_host=$2
571
- local db_port=$3
572
- local db_init_username=$4
573
- local db_init_password=$5
574
- local query=$6
575
-
576
- kubectl -n ${namespace} run datadog-database-init \
577
- --image widdpim/mysql-client \
578
- --restart=Never \
579
- --attach --rm \
580
- -- \
581
- mysql --host=$db_host --user=$db_init_username --password=$db_init_password --port=$db_port --execute="$query"
582
- }
583
- #!/usr/bin/env bash
584
-
585
- kube_init_database_once() {
586
-
587
- extract_args 8 namespace db_host db_port db_init_username db_init_password db_database db_username db_password $*
588
-
589
- echo "======================="
590
- echo " Initializing Database '$db_database' for namespace $namespace"
591
- echo "======================="
592
-
593
- set -x
594
-
595
- echo "Checking if Database '$db_database' exists"
596
- set +e
597
- psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -lqtA | cut -d\| -f1 | grep "^$db_database$"
598
- return_code=$?
599
- set -e
600
-
601
- if [ ${return_code} -eq 0 ]; then
602
- echo "Database $db_database already exists - nothing to do"
603
- else
604
- echo "Database $db_database does not exist - initializing"
605
-
606
- psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'CREATE DATABASE '"$db_database"';'
607
- echo "DB created $db_database"
608
-
609
- psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'CREATE USER '"$db_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
610
- echo "USER created $db_username"
611
-
612
- psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_username"';'
613
- echo "Granted all privileges for $db_username on $db_database"
614
- fi
615
-
616
- echo "======================="
617
- echo " Database '$db_database' Initialization complete for namespace $namespace"
618
- echo "======================="
619
- }
620
-
621
- kube_init_database_readonly_account() {
622
-
623
- extract_args 6 namespace service db_connection db_database db_readonly_username db_readonly_password $*
624
-
625
- echo "======================="
626
- echo " Initializing Readonly Account '$db_readonly_username' for '$db_database' for namespace $namespace"
627
- echo "======================="
628
-
629
- # Print commands before execution, except echo
630
- trap '[[ $BASH_COMMAND != echo* ]] && echo $BASH_COMMAND' DEBUG
631
-
632
- echo "Checking if Readonly account '$db_readonly_username' for '$db_database' exists"
633
- set +e
634
- psql_on_k8 $namespace $service $db_connection -qtAc 'SELECT rolname FROM pg_roles;' | grep "^$db_readonly_username$"
635
- return_code=$?
636
- set -e
637
-
638
- if [ ${return_code} -eq 0 ]; then
639
- echo "Account $db_readonly_username already exists - nothing to do"
640
- else
641
- echo "Account $db_readonly_username does not exist - creating"
642
-
643
- psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_readonly_username"' WITH ENCRYPTED PASSWORD '"'$db_readonly_password'"';'
644
- psql_on_k8 $namespace $service $db_connection -c 'GRANT CONNECT ON DATABASE '"$db_database"' TO '"$db_readonly_username"';'
645
- psql_on_k8 $namespace $service $db_connection -c 'GRANT USAGE ON SCHEMA public TO '"$db_readonly_username"';'
646
- psql_on_k8 $namespace $service $db_connection -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO '"$db_readonly_username"';'
647
- psql_on_k8 $namespace $service $db_connection -c 'ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO '"$db_readonly_username"';'
648
-
649
- echo "Created user with read-only permissions for $db_readonly_username on $db_database (schema public)"
650
- fi
651
- }
652
-
653
- kube_init_datadog_in_database() {
654
- extract_args 8 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password db_datadog_schema $*
655
-
656
- echo "======================="
657
- echo " Initializing Datadog Agent Requiement for namespace $namespace"
658
- echo "======================="
659
-
660
- set -x
661
-
662
- echo "Checking if User '$db_datadog_username' exists"
663
- set +e
664
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'SELECT user FROM mysql.user;' | grep "^$db_datadog_username$"
665
- return_code=$?
666
- set -e
667
-
668
- if [ ${return_code} -eq 0 ]; then
669
- echo "User $db_datadog_username already exists - nothing to do"
670
- else
671
- echo "User $db_datadog_username does not exist - initializing"
672
-
673
- # All the query come from this docs : https://docs.datadoghq.com/fr/database_monitoring/setup_mysql/selfhosted/?tab=mysql56
674
-
675
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'CREATE USER '"$db_datadog_username"'@"%" IDENTIFIED BY '"'$db_datadog_password'"';'
676
- echo "USER created $db_datadog_username"
677
-
678
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT REPLICATION CLIENT ON *.* TO datadog@"%" WITH MAX_USER_CONNECTIONS 5;'
679
- echo "ALTER USER $db_datadog_username"
680
-
681
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT PROCESS ON *.* TO '"$db_datadog_username"'@"%";'
682
- echo "Granted PROCESS for $db_datadog_username"
683
-
684
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT SELECT ON performance_schema.* TO '"$db_datadog_username"'@"%";'
685
- echo "Granted SELECT on performance_schema for $db_datadog_username"
686
-
687
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'CREATE SCHEMA IF NOT EXISTS datadog;'
688
- echo "CREATE SCHEMA datadog"
689
-
690
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT EXECUTE ON datadog.* to '"$db_datadog_username"'@"%";'
691
- echo "Granted 'GRANT EXECUTE for $db_datadog_username on datadog"
692
-
693
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT CREATE TEMPORARY TABLES ON datadog.* TO '"$db_datadog_username"'@"%";'
694
- echo "Granted CREATE TEMPORARY TABLES for $db_datadog_username"
695
-
696
-
697
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.explain_statement;
698
- DELIMITER $$
699
- CREATE PROCEDURE datadog.explain_statement(IN query TEXT)
700
- SQL SECURITY DEFINER
701
- BEGIN
702
- SET @explain := CONCAT("EXPLAIN FORMAT=json ", query);
703
- PREPARE stmt FROM @explain;
704
- EXECUTE stmt;
705
- DEALLOCATE PREPARE stmt;
706
- END $$
707
- DELIMITER ;'
708
- echo "CREATE PROCEDURE PROCEDURE datadog.explain_statement"
709
-
710
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS '"$db_datadog_username"'.explain_statement;
711
- DELIMITER $$
712
- CREATE PROCEDURE '"$db_datadog_username"'.explain_statement(IN query TEXT)
713
- SQL SECURITY DEFINER
714
- BEGIN
715
- SET @explain := CONCAT("EXPLAIN FORMAT=json ", query);
716
- PREPARE stmt FROM @explain;
717
- EXECUTE stmt;
718
- DEALLOCATE PREPARE stmt;
719
- END $$
720
- DELIMITER ;
721
- GRANT EXECUTE ON PROCEDURE '"$db_datadog_username"'.explain_statement TO datadog@"%";'
722
- echo "CREATE PROCEDURE on SCHEMA $db_datadog_schema for $db_datadog_username"
723
-
724
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.enable_events_statements_consumers;
725
- DELIMITER $$
726
- CREATE PROCEDURE datadog.enable_events_statements_consumers()
727
- SQL SECURITY DEFINER
728
- BEGIN
729
- UPDATE performance_schema.setup_consumers SET enabled="YES" WHERE name LIKE "events_statements_%";
730
- END $$
731
- DELIMITER ;
732
- GRANT EXECUTE ON PROCEDURE datadog.enable_events_statements_consumers TO datadog@"%";'
733
-
734
- echo "CREATE PROCEDURE on datadog.enable_events_statements_consumers"
735
- fi
736
-
737
- echo "======================="
738
- echo " Database '$db_datadog_schema' Initialization complete for namespace $namespace"
739
- echo "======================="
740
- }
741
-
742
- kube_init_service_database() {
743
-
744
- extract_args 9 namespace service db_host db_port db_init_username db_init_password db_database db_username db_password $*
745
-
746
- local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
747
-
748
- set -x
749
-
750
- echo "Checking if Database '$db_database' exists"
751
- set +e
752
- psql_on_k8 $namespace $service $db_connection -lqtA | cut -d\| -f1 | grep "^$db_database$"
753
- return_code=$?
754
- set -e
755
-
756
- if [ ${return_code} -eq 0 ]; then
757
- echo "Database $db_database already exists - nothing to do"
758
- else
759
- echo "Database $db_database does not exist - initializing"
760
-
761
- psql_on_k8 $namespace $service $db_connection -c 'CREATE DATABASE '"$db_database"';'
762
- echo "DB created $db_database"
763
-
764
- psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_datadog_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
765
- echo "USER created $db_datadog_username"
766
-
767
- psql_on_k8 $namespace $service $db_connection -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_datadog_username"';'
768
- echo "Granted all privileges for $db_datadog_username on $db_database"
769
- fi
770
-
771
- echo "======================="
772
- echo " Database '$db_database' Initialization complete for namespace $namespace"
773
- echo "======================="
774
- }
775
- #!/usr/bin/env bash
776
-
777
- # Port forward on the first matching pod
778
- # Ex :
779
- # pod_forward testing notification-http
780
- # pod_forward testing colisweb-api-web 3333 3000
781
- pod_forward() {
782
- ENV=$1
783
- POD_FILTER=$2
784
- LOCAL_PORT=${3:-8080}
785
- POD_PORT=${4:-8080}
786
-
787
- if PID=$(lsof -ti tcp:$LOCAL_PORT); then
788
- echo "killing process $PID which uses port $LOCAL_PORT"
789
- kill $PID
790
- fi
791
-
792
- configure_kubectl_for $ENV
793
-
794
- POD=`pick_pod $ENV $POD_FILTER`
795
-
796
- echo "setting up forwarding to $POD"
797
- kubectl -n $ENV port-forward $POD $LOCAL_PORT:$POD_PORT &
798
- PID=$!
799
-
800
- while ! echo exit | nc localhost $LOCAL_PORT > /dev/null; do
801
- sleep 1
802
- echo "waiting for port $LOCAL_PORT to be open locally"
803
- done
804
- echo "port $LOCAL_PORT is now available on localhost, forwarding to $ENV $POD:$POD_PORT"
805
- echo 'you can terminate it with "kill '$PID'" or "kill $(lsof -ti tcp:'$LOCAL_PORT')"'
806
- }
807
-
808
- # prompts to pick a pod and run a command like bash inside
809
- # pod_exec testing
810
- # pod_exec testing bash
811
- # pod_exec testing bash colisweb-api
812
- pod_exec() {
813
- ENV=$1
814
- COMMAND=${2:-bash}
815
- configure_kubectl_for $ENV
816
- POD_FILTER=$3
817
- POD=`pick_pod $ENV $POD_FILTER`
818
- echo "running $COMMAND inside $POD"
819
- kubectl -n $ENV exec -ti $POD -- $COMMAND
820
- }
821
-
822
- # prompts to pick a pod and copy from a local file to the pod
823
- # pod_copy_to testing localfile remotefile
824
- # pod_copy_to testing localfile remotefile colisweb-api
825
- pod_copy_to() {
826
- ENV=$1
827
- LOCAL_FILE=$2
828
- REMOTE_FILE=$3
829
- configure_kubectl_for $ENV
830
- POD_FILTER=$4
831
- POD=`pick_pod $ENV $POD_FILTER`
832
- kubectl cp $LOCAL_FILE $ENV/$POD:$REMOTE_FILE
833
- }
834
-
835
-
836
- pick_pod() {
837
- ENV=$1
838
- POD_FILTER="pod/$2"
839
- configure_kubectl_for $ENV
840
-
841
- if [ -z "$2" ] ; then
842
- kubectl -n $ENV get pods | gum filter | cut -f1 -d" "
843
- else
844
- if PODS=$(kubectl -n $ENV get pods -o=name | grep "$POD_FILTER"); then
845
- echo $PODS | head -1 | sed -e 's/pod\///'
846
- else
847
- echo "no pods found on $ENV matching $POD_FILTER" >&2
848
- fi
849
- fi
850
- }
851
-
852
- #!/usr/bin/env bash
853
-
854
- redis_k8s() {
855
- MODE=$1
856
- case $MODE in
857
- "testing") SSH_LOCAL_PORT=2225;REDIS_LOCAL_PORT=63791;ENV="testing";;
858
- "staging") SSH_LOCAL_PORT=2226;REDIS_LOCAL_PORT=63792;ENV="staging";;
859
- "production") SSH_LOCAL_PORT=2227;REDIS_LOCAL_PORT=63793;ENV="production";;
860
- *) echo "Unsupported ENV : $MODE"; return 1 ;;
861
- esac
862
-
863
- start_ssh_bastion $ENV $SSH_LOCAL_PORT
864
-
865
- lsof -ti tcp:$REDIS_LOCAL_PORT | xargs kill
866
-
867
- bastion_config=$(mktemp)
868
- cat > "$bastion_config" <<EOF
869
- UserKnownHostsFile /dev/null
870
- StrictHostKeyChecking no
871
- User root
872
- Host bastion_testing
873
- HostName 127.0.0.1
874
- Port 2225
875
- LocalForward 63791 redis-testing.xufte6.0001.euw1.cache.amazonaws.com:6379
876
- Host bastion_staging
877
- HostName 127.0.0.1
878
- Port 2226
879
- LocalForward 63792 redis-sandbox.xufte6.0001.euw1.cache.amazonaws.com:6379
880
- Host bastion_production
881
- HostName 127.0.0.1
882
- Port 2227
883
- LocalForward 63793 redis-prod.xufte6.0001.euw1.cache.amazonaws.com:6379
884
- EOF
885
-
886
- ssh -f -N \
887
- -F "$bastion_config" \
888
- "bastion_$ENV"
889
-
890
- echo "sample command : 'redis-cli -p $REDIS_LOCAL_PORT'"
891
- echo "run 'kubectl delete pod $POD_NAME' when you have finished"
892
-
893
- redis-cli -p $REDIS_LOCAL_PORT
894
- }
895
-
896
- #!/usr/bin/env bash
897
-
898
- #Create a k8s cron jobs that will be run regularly
899
- #See run_cron_job_k8s -h for more details
900
-
901
- run_cron_job_k8s() {
902
-
903
- #default values
904
- local namespace="testing"
905
- local name="$USERNAME"
906
- local SCHEDULE="00 05 * * *"
907
- local secret=""
908
- local amm_folder=""
909
- local amm_script=""
910
-
911
- while getopts ":e:c:p:f:s:t:h" opt; do
912
- case $opt in
913
- e)
914
- namespace="$OPTARG" >&2
915
- ;;
916
- t)
917
- SCHEDULE="$OPTARG" >&2
918
- ;;
919
- p)
920
- name="$OPTARG" >&2
921
- ;;
922
- c)
923
- secret="$OPTARG" >&2
924
- ;;
925
- f)
926
- amm_folder="$OPTARG" >&2
927
- ;;
928
- s)
929
- amm_script="$OPTARG" >&2
930
- ;;
931
- h)
932
- show_help_cron_job
933
- return 0
934
- ;;
935
- :)
936
- echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
937
- return 0
938
- ;;
939
- \?)
940
- echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
941
- return 0
942
- ;;
943
- esac
944
- done
945
-
946
- if [ -z "$amm_script" ]; then
947
- echo 'Missing -s. Run run_cron_job_k8s -h for help' >&2
948
- return 0
949
- fi
950
-
951
- shift "$((OPTIND-1))"
952
-
953
- local script_args=$(
954
- if [ "$#" -gt 0 ] ; then
955
- printf '"'
956
- join_by '", "' $*
957
- printf '"'
958
- fi
959
- )
960
-
961
- local IMAGE="lolhens/ammonite:2.5.4"
962
- local CRONJOB_NAME="cronjob-ammonite-$name"
963
-
964
-
965
- configure_kubectl_for $namespace
966
-
967
- if [[ ! -r "$amm_script" ]]; then
968
- echo "ammonite script not found $amm_script"
969
- return 2
970
- else
971
- local CONFIG_MAP="config-$CRONJOB_NAME"
972
- local SECRET_MAP="secret-$CRONJOB_NAME"
973
- local CONFIG_MAP_DIR="$(mktemp -d)"
974
-
975
- if [[ ! -z $amm_folder && -d $amm_folder ]] ; then
976
- cp -r "$amm_folder/" "$CONFIG_MAP_DIR"
977
- fi
978
- cp "$amm_script" "$CONFIG_MAP_DIR/script.sc"
979
-
980
- kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
981
- kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
982
-
983
- kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
984
- kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
985
-
986
- kubectl -n $namespace get cronjob $CRONJOB_NAME && kubectl -n $namespace delete cronjob $CRONJOB_NAME
987
-
988
- echo "starting $CRONJOB_NAME with $IMAGE"
989
-
990
- JOB_DEFINITION='
991
- apiVersion: batch/v1
992
- kind: CronJob
993
- metadata:
994
- name: '$CRONJOB_NAME'
995
- namespace: '$namespace'
996
- spec:
997
- schedule: "'$SCHEDULE'"
998
- concurrencyPolicy: Forbid
999
- jobTemplate:
1000
- spec:
1001
- backoffLimit: 0
1002
- template:
1003
- spec:
1004
- nodeSelector:
1005
- workType: "workers"
1006
- restartPolicy: Never
1007
- volumes:
1008
- - name: config
1009
- configMap:
1010
- name: '$CONFIG_MAP'
1011
- - name: secret
1012
- secret:
1013
- secretName: '$SECRET_MAP'
1014
- containers:
1015
- - name: '$CRONJOB_NAME'
1016
- command: ["amm", "/code/script.sc"]
1017
- image: '$IMAGE'
1018
- imagePullPolicy: IfNotPresent
1019
- args: ['$script_args']
1020
- env:
1021
- - name: POD_NAME
1022
- valueFrom:
1023
- fieldRef:
1024
- apiVersion: v1
1025
- fieldPath: metadata.name
1026
- - name: POD_NAMESPACE
1027
- valueFrom:
1028
- fieldRef:
1029
- apiVersion: v1
1030
- fieldPath: metadata.namespace
1031
- - name: HOST_IP
1032
- valueFrom:
1033
- fieldRef:
1034
- apiVersion: v1
1035
- fieldPath: status.hostIP
1036
- volumeMounts:
1037
- - name: config
1038
- mountPath: /code
1039
- - name: secret
1040
- mountPath: /conf
1041
- readOnly: true
1042
- resources:
1043
- requests:
1044
- cpu: 500m
1045
- memory: 256Mi
1046
- limits:
1047
- cpu: 4000m
1048
- memory: 512Mi
1049
- envFrom:
1050
- - configMapRef:
1051
- name: '$CONFIG_MAP'
1052
- - secretRef:
1053
- name: '$SECRET_MAP'
1054
- '
1055
-
1056
- echo $JOB_DEFINITION > /tmp/job.yaml
1057
-
1058
- kubectl -n $namespace apply -f /tmp/job.yaml
1059
-
1060
- fi
1061
- }
1062
-
1063
- # Usage info
1064
- show_help_cron_job() {
1065
- #p:f:s
1066
- local help="""Usage: run_cron_job_k8s -s SCRIPT [-t TIME] [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
1067
- Create a k8s cron job that will be run a script regularly
1068
-
1069
- -h display this help and exit
1070
- -s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
1071
- -t TIME opt. time when the job will be launched. TIME should be in CRON syntax (default to 00 05 * * *, ie 5AM UTC)
1072
- -e ENV opt. set execution environment (default to testing)
1073
- -c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
1074
- -p POD opt. name of the pod to create (default to $USERNAME)
1075
- -f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
1076
- ARGS opt. additional arguments for SCRIPT
1077
- """
1078
- echo "$help"
1079
- }
1080
-
1081
- #!/usr/bin/env bash
1082
-
1083
- # Usage info
1084
- show_help_job() {
1085
- local help="""Usage: run_job_k8s -s SCRIPT [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
1086
- Create a k8s job executing a script
1087
-
1088
- -h display this help and exit
1089
- -s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
1090
- -e ENV opt. set execution environment (default to testing)
1091
- -c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
1092
- -p POD opt. name of the pod to create (default to $USERNAME)
1093
- -f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
1094
- ARGS opt. additional arguments for SCRIPT
1095
-
1096
- The organisation of the files must be the same locally as on the pod :
1097
- - /code containing the script to execute (arg -s) and the other needed files (if the arg -f is used, it must reference this directory)
1098
- - /conf containing the secret file (arg -c if used)
1099
- E.g. in the script \"/code/script.sc\", to use a secret file \"/conf/secret.sc\", the import should look like \"import \$file.^.conf.secret.sc\"
1100
- """
1101
- echo "$help"
1102
- }
1103
-
1104
- run_job_k8s() {
1105
-
1106
- #default values
1107
- local namespace="testing"
1108
- local name="$USERNAME"
1109
- local secret=""
1110
- local amm_folder=""
1111
- local amm_script=""
1112
-
1113
- while getopts ":e:c:p:f:s:h" opt; do
1114
- case $opt in
1115
- e)
1116
- namespace="$OPTARG" >&2
1117
- ;;
1118
- p)
1119
- name="$OPTARG" >&2
1120
- ;;
1121
- c)
1122
- secret="$OPTARG" >&2
1123
- ;;
1124
- f)
1125
- amm_folder="$OPTARG" >&2
1126
- ;;
1127
- s)
1128
- amm_script="$OPTARG" >&2
1129
- ;;
1130
- h)
1131
- show_help_job
1132
- return 0
1133
- ;;
1134
- :)
1135
- echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
1136
- return 0
1137
- ;;
1138
- \?)
1139
- echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
1140
- return 0
1141
- ;;
1142
- esac
1143
- done
1144
-
1145
- if [ -z "$amm_script" ]; then
1146
- echo 'Missing -s. Run run_job_k8s -h for help' >&2
1147
- return 0
1148
- fi
1149
-
1150
- shift "$((OPTIND-1))"
1151
-
1152
- local script_args=$(
1153
- if [ "$#" -gt 0 ] ; then
1154
- printf '"'
1155
- join_by '", "' $*
1156
- printf '"'
1157
- fi
1158
- )
1159
-
1160
- local IMAGE="lolhens/ammonite:2.5.4"
1161
- local JOB_NAME="job-ammonite-$name"
1162
-
1163
- if [[ ! -r "$amm_script" ]]; then
1164
- echo "ammonite script not found $amm_script"
1165
- return 2
1166
- else
1167
- local CONFIG_MAP="config-$JOB_NAME"
1168
- local CONFIG_MAP_DIR="$(mktemp -d)"
1169
- local SECRET_MAP="secret-$JOB_NAME"
1170
-
1171
- configure_kubectl_for $namespace
1172
-
1173
- if [[ ! -z $amm_folder && -d $amm_folder ]] ; then
1174
- cp -r "$amm_folder/" "$CONFIG_MAP_DIR"
1175
- fi
1176
- cp "$amm_script" "$CONFIG_MAP_DIR/script.sc"
1177
-
1178
- kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1179
- kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1180
-
1181
- kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
1182
- kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
1183
-
1184
- kubectl -n $namespace get job $JOB_NAME && kubectl -n $namespace delete job $JOB_NAME
1185
-
1186
- echo "starting $JOB_NAME with $IMAGE"
1187
- fi
1188
-
1189
- JOB_DEFINITION='
1190
- apiVersion: batch/v1
1191
- kind: Job
1192
- metadata:
1193
- name: '$JOB_NAME'
1194
- namespace: '$namespace'
1195
- spec:
1196
- template:
1197
- spec:
1198
- containers:
1199
- - name: '$JOB_NAME'
1200
- command: ["amm", "/code/script.sc"]
1201
- image: '$IMAGE'
1202
- args: ['$script_args']
1203
- env:
1204
- - name: POD_NAME
1205
- valueFrom:
1206
- fieldRef:
1207
- apiVersion: v1
1208
- fieldPath: metadata.name
1209
- - name: POD_NAMESPACE
1210
- valueFrom:
1211
- fieldRef:
1212
- apiVersion: v1
1213
- fieldPath: metadata.namespace
1214
- - name: HOST_IP
1215
- valueFrom:
1216
- fieldRef:
1217
- apiVersion: v1
1218
- fieldPath: status.hostIP
1219
- volumeMounts:
1220
- - name: config
1221
- mountPath: /code
1222
- - name: secret
1223
- mountPath: /conf
1224
- readOnly: true
1225
- resources:
1226
- requests:
1227
- cpu: 500m
1228
- memory: 256Mi
1229
- limits:
1230
- cpu: 4000m
1231
- memory: 1Gi
1232
- nodeSelector:
1233
- workType: workers
1234
- restartPolicy: Never
1235
- volumes:
1236
- - name: config
1237
- configMap:
1238
- name: '$CONFIG_MAP'
1239
- - name: secret
1240
- secret:
1241
- secretName: '$SECRET_MAP'
1242
- '
1243
-
1244
-
1245
- echo $JOB_DEFINITION > /tmp/job.yaml
1246
-
1247
- kubectl -n $namespace apply -f /tmp/job.yaml
1248
-
1249
- }
1250
-
1251
-
1252
- #!/usr/bin/env bash
1253
-
1254
- run_task() {
1255
- set -e
1256
-
1257
- check_args "--namespace" $1
1258
- shift
1259
- NAMESPACE=$1
1260
- shift
1261
- check_args "--image" $1
1262
- shift
1263
- IMAGE=$1
1264
- shift
1265
- check_args "--name" $1
1266
- shift
1267
- NAME=$1
1268
- shift
1269
-
1270
- set -x
1271
-
1272
- kubectl -n ${NAMESPACE} run ${NAME} \
1273
- --image ${IMAGE} \
1274
- --restart=Never \
1275
- --attach --rm \
1276
- $*
1277
- }
1278
- geocode_address() {
1279
- ADDRESS=$(sed -e 's: :%20:g' <(echo "$*"))
1280
- URL="https://maps.googleapis.com/maps/api/geocode/json?address=${ADDRESS}&key=${GOOGLE_API_KEY}"
1281
- curl $URL
1282
- }
1283
-
1284
- search_business() {
1285
- SIREN=$1
1286
- shift
1287
- QUERY=$(sed -e 's: :+:g' <(echo "$*"))
1288
- URL="https://data.opendatasoft.com/api/records/1.0/search/?dataset=sirene_v3%40public&q=${QUERY}&sort=datederniertraitementetablissement&facet=trancheeffectifsetablissement&facet=libellecommuneetablissement&facet=departementetablissementi&refine.siren=${SIREN}"
1289
- curl $URL
1290
- }
1291
-
1292
- #!/usr/bin/env bash
1293
-
1294
- # possible syntax:
1295
- # login
1296
- # login testing
1297
- # login testing userid
1298
- login() {
1299
- ENV=${1:-`gum choose testing staging production recette`} && \
1300
- USER=${2:-`gum input --placeholder username`} && \
1301
- PASSWORD=`gum input --password --placeholder password` && \
1302
- TOKEN=`$SCRIPT_FULL_PATH/scala/auth.sc login --env $ENV --user $USER --password $PASSWORD` && \
1303
- export TOKEN_$ENV=$TOKEN && \
1304
- echo "login success for $USER on $ENV" >&2
1305
- }
1306
-
1307
- # you need to call login first (see above)
1308
- # possible syntax:
1309
- # recompute_tour
1310
- # recompute_tour testing
1311
- # recompute_tour testing draft
1312
- # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09
1313
- # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09 TODAY
1314
- # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09 FRIDAY
1315
- recompute_tour() {
1316
- ENV=${1:-`gum choose testing staging production recette`}
1317
- MODE=${2:-`gum choose draft definitive`}
1318
- PROJECT_ID=${3:-`pick_project $ENV`}
1319
- DAY=${4:-`gum choose TODAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY SUNDAY`}
1320
- jwt_token $ENV
1321
- scala/tour_config.sc $MODE -t $TOKEN -p $PROJECT_ID -d $DAY
1322
- }
1323
-
1324
- pick_project() {
1325
- ENV=${1:-`gum choose testing staging production recette`}
1326
- jwt_token $ENV
1327
- scala/tour_config.sc list -t $TOKEN -e $ENV | gum filter | cut -f1
1328
- }
1329
-
1330
- jwt_token() {
1331
- ENV=${1:-`gum choose testing staging production recette`}
1332
- eval 'TOKEN=$TOKEN_'$ENV
1333
- if ! $SCRIPT_FULL_PATH/scala/auth.sc check -t $TOKEN -e $ENV ; then
1334
- login $ENV
1335
- fi
1336
- }
1337
-
1338
- #!/usr/bin/env bash
1339
-
1340
- ftp_ikea_k8s() {
1341
- SSH_LOCAL_PORT=2230
1342
- FTP_LOCAL_PORT=25500
1343
- start_ssh_bastion testing $SSH_LOCAL_PORT
1344
-
1345
- lsof -ti tcp:$FTP_LOCAL_PORT | xargs kill
1346
-
1347
- bastion_config=$(mktemp)
1348
- cat > "$bastion_config" <<EOF
1349
- UserKnownHostsFile /dev/null
1350
- StrictHostKeyChecking no
1351
- User root
1352
- Host bastion_ftp
1353
- HostName 127.0.0.1
1354
- Port 2230
1355
- LocalForward 25500 ft.centiro.ikea.com:22
1356
- EOF
1357
-
1358
- ssh -f -N \
1359
- -F "$bastion_config" \
1360
- "bastion_ftp"
1361
-
1362
- sftp -P $FTP_LOCAL_PORT colisweb.fr@127.0.0.1
1363
- }
1364
-
1365
- #!/usr/bin/env bash
1366
-
1367
- # usage:
1368
- # jconsole_k8s testing colisweb-api-web
1369
-
1370
- jconsole_k8s() {
1371
- ENV=$1
1372
- NAME=$2
1373
-
1374
- start_ssh_bastion $ENV 2242
1375
- POD_IP=$( \
1376
- kubectl -n $ENV get pods -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIP}{"\n"}{end}' \
1377
- | grep "$NAME" | cut -d' ' -f2 | head -1 \
1378
- )
1379
- echo "selected POD with ip $POD_IP"
1380
- echo "use 'root' as password"
1381
- ssh -f -N -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -D 7777 root@127.0.0.1 -p 2242
1382
-
1383
- jconsole \
1384
- -J-DsocksProxyHost=localhost \
1385
- -J-DsocksProxyPort=7777 \
1386
- -J-DsocksNonProxyHosts= \
1387
- service:jmx:rmi:///jndi/rmi://$POD_IP:7199/jmxrmi \
1388
- &
1389
-
1390
- echo "remember to stop with 'stop_ssh_bastion'"
1391
-
1392
- }
1393
-
1394
- #!/usr/bin/env bash
1395
-
1396
- # Interactive console on an existing pod. See also run_ruby_k8s
1397
- # Ex :
1398
- # railsc_k8s_old production
1399
- # railsc_k8s_old production "User.where(email:'toni@colisweb.com')"
1400
- railsc_k8s_old() {
1401
- ENV=$1
1402
- COMMAND=$2
1403
- configure_kubectl_for $ENV
1404
- POD=$(kubectl -n $ENV get pods -o=name | grep colisweb-api-web | head -1 | sed -e 's/pod\///')
1405
- KUBERAILS="kubectl -n $ENV exec -ti $POD -- /usr/src/app/bin/rails c"
1406
- [ -z "$COMMAND" ] && eval $KUBERAILS || echo $COMMAND | eval $KUBERAILS
1407
- }
1408
-
1409
- # Interactive console on an new pod. See also run_ruby_k8s
1410
- # Ex :
1411
- # railsc_k8s production
1412
- railsc_k8s() {
1413
- ENV=$1
1414
- [[ $ENV = "production" || $ENV = "staging" ]] && default_tag="master-latest" || default_tag="${ENV}-latest"
1415
- local image_tag=${5:-$default_tag}
1416
- local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
1417
- local POD_NAME="colisweb-api-rails-console-$image_tag-$USERNAME"
1418
-
1419
- kubectl -n $ENV get pod $POD_NAME && kubectl -n $ENV delete pod $POD_NAME
1420
-
1421
- configure_kubectl_for $ENV
1422
- echo "starting with $IMAGE"
1423
-
1424
- kubectl -n $ENV run $POD_NAME \
1425
- --image $IMAGE \
1426
- --restart=Never \
1427
- --overrides='{
1428
- "spec":{
1429
- "nodeSelector":{
1430
- "workType": "workers"
1431
- },
1432
- "containers":[
1433
- {
1434
- "name":"'$POD_NAME'",
1435
- "image":"'$IMAGE'",
1436
- "imagePullPolicy":"Always",
1437
- "command":[
1438
- "sleep",
1439
- "infinity"
1440
- ],
1441
- "resources":{
1442
- "limits":{
1443
- "memory": "2048Mi"
1444
- }
1445
- },
1446
- "envFrom": [ {
1447
- "configMapRef": {
1448
- "name": "colisweb-api"
1449
- }
1450
- }, {
1451
- "secretRef": {
1452
- "name": "colisweb-api"
1453
- }
1454
- }
1455
- ]
1456
- }
1457
- ]
1458
- }
1459
- }
1460
- '
1461
-
1462
- sleep 5
1463
- kubectl -n $ENV exec -it $POD_NAME -- /usr/src/app/bin/rails c
1464
-
1465
- print "End of $POD_NAME "
1466
- kubectl -n $ENV delete pods $POD_NAME
1467
- }
1468
-
1469
- # Ex :
1470
- # create_user testing claire.lien@colisweb.com super_admin clairemdp
1471
- create_user() {
1472
- ENV=$1
1473
- EMAIL=$2
1474
- ROLE=$3
1475
- PASSWORD=$4
1476
- railsc_k8s $ENV "User.where(email:'$EMAIL', role:'$ROLE').first_or_create.update_attributes!(password: '$PASSWORD')"
1477
- }
1478
-
1479
- # Ex :
1480
- # delete_user testing claire.lien@colisweb.com
1481
- delete_user() {
1482
- ENV=$1
1483
- EMAIL=$2
1484
- railsc_k8s $ENV "User.find_by(email:'$EMAIL').destroy"
1485
- }
1486
-
1487
- # NON Interactive console on an new pod, for long-running tasks (a few minutes)
1488
- # See also railsc_k8s
1489
- # file.txt will be available from /conf/data.txt in the ruby code
1490
- # examples :
1491
- # run_ruby_k8s testing demo <(echo "pp JSON.parse(File.read('/conf/data.txt'))") <(echo '{ "content": 123 }')
1492
- # run_ruby_k8s testing demo ~/.oh-my-zsh/custom/dev-tools/shell-session/ruby/demo.rb <(echo '{ "content": 123 }')
1493
- run_ruby_k8s() {
1494
- if [ $# -lt 4 ]; then
1495
- echo "usage : run_ruby_k8s production name-for-pod script.rb file.txt"
1496
- return 1
1497
- fi
1498
- local namespace=$1
1499
- local name=$2
1500
- local ruby_script=$3
1501
- local input_data=$4
1502
- [[ $namespace = "production" || $namespace = "staging" ]] && default_tag="master-latest" || default_tag="${namespace}-latest"
1503
- local image_tag=${5:-$default_tag}
1504
-
1505
- if [ ! -r "$ruby_script" ]; then
1506
- echo "ruby script not found $ruby_script"
1507
- return 2
1508
- fi
1509
-
1510
- if [ ! -r "$input_data" ]; then
1511
- echo "data not found $input_data"
1512
- return 3
1513
- fi
1514
-
1515
-
1516
- local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
1517
- local POD_NAME="colisweb-api-script-$name"
1518
- local CONFIG_MAP="config-$POD_NAME"
1519
- local CONFIG_MAP_DIR="$(mktemp -d)"
1520
-
1521
-
1522
- configure_kubectl_for $namespace
1523
-
1524
-
1525
- cp "$ruby_script" "$CONFIG_MAP_DIR/script.rb"
1526
- cp "$input_data" "$CONFIG_MAP_DIR/data.txt"
1527
-
1528
- kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1529
- kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1530
-
1531
- kubectl -n $namespace get pod $POD_NAME && kubectl -n $namespace delete pod $POD_NAME
1532
-
1533
- echo "starting with $IMAGE"
1534
- kubectl -n $namespace run $POD_NAME \
1535
- --image $IMAGE \
1536
- -ti \
1537
- --restart=Never \
1538
- --attach \
1539
- --rm \
1540
- --overrides='{
1541
- "spec":{
1542
- "nodeSelector":{
1543
- "workType": "workers"
1544
- },
1545
- "containers":[
1546
- {
1547
- "name":"'$POD_NAME'",
1548
- "image":"'$IMAGE'",
1549
- "imagePullPolicy":"Always",
1550
- "command":[
1551
- "/usr/src/app/bin/rails",
1552
- "r",
1553
- "/conf/script.rb"
1554
- ],
1555
- "resources":{
1556
- "limits":{
1557
- "memory": "4096Mi"
1558
- }
1559
- },
1560
- "volumeMounts":[
1561
- {
1562
- "name":"conf",
1563
- "mountPath":"/conf"
1564
- }
1565
- ],
1566
- "envFrom": [ {
1567
- "configMapRef": {
1568
- "name": "colisweb-api"
1569
- }
1570
- }, {
1571
- "secretRef": {
1572
- "name": "colisweb-api"
1573
- }
1574
- }
1575
- ]
1576
- }
1577
- ],
1578
- "volumes":[
1579
- {
1580
- "name":"conf",
1581
- "configMap":{ "name":"'$CONFIG_MAP'" }
1582
- }
1583
- ]
1584
- }
1585
- }
1586
- '
1587
-
1588
- kubectl -n $namespace delete configmap $CONFIG_MAP
1589
- }
1590
-
1591
- # example:
1592
- # update_pickup_cp testing <( echo '{"wrong_cp": "59123", "corrected_cp": "59223", "delivery_ids": ["4192421", "4192425"]}' )
1593
- update_pickup_cp() {
1594
- run_ruby_k8s $1 update-pickup-cp "$SCRIPT_FULL_PATH/ruby/update_pickup_cp.rb" $2
1595
- }
1596
-
1597
-
1598
-
1599
- update_all_prices() {
1600
- local namespace=$1
1601
- local json_prices=$2
1602
-
1603
- local json_size=$(wc -c < "$json_prices")
1604
-
1605
- if ((json_size > 940000)); then
1606
- command -v jq || (echo "jq not found (use brew install jq)" && return 1)
1607
- local max_lines=3000
1608
- local total_lines=$(jq '. | length' $json_prices)
1609
- local iterations=$((total_lines / max_lines + 1))
1610
- echo "$json_prices is too big, I'll split it for you in blocks of $max_lines lines. It will take $iterations runs"
1611
- for (( i = 0 ; i < iterations ; i++ )) ; do
1612
- local start=$((i * max_lines))
1613
- local end=$(( (i + 1) * max_lines))
1614
- local split_file=$(mktemp)
1615
- jq -c ".[$start:$end]" $json_prices > $split_file
1616
- local split_lines=$(jq '. | length' $split_file)
1617
- echo "starting iteration $i from $start to $end with $split_file command -v has $split_lines lines"
1618
- run_ruby_k8s $namespace "update-prices-$i" "$SCRIPT_FULL_PATH/ruby/update_prices.rb" $split_file
1619
- done
1620
- else
1621
- run_ruby_k8s $namespace "update-prices" "$SCRIPT_FULL_PATH/ruby/update_prices.rb" $json_prices
1622
- fi
1623
- }
1624
-
1625
-
1626
- update_surveys() {
1627
- local namespace=$1
1628
- local csv_surveys=$2
1629
-
1630
- local csv_size=$(wc -c < "$csv_surveys")
1631
-
1632
-
1633
- if ((csv_size > 940000)); then
1634
- local max_lines=400
1635
- local total_lines=$(wc -l < $csv_surveys)
1636
- local iterations=$((total_lines / max_lines + 1))
1637
- echo "$csv_surveys is too big, I'll split it for you in blocks of $max_lines lines. It will take $iterations runs"
1638
- for (( i = 0 ; i < iterations ; i++ )) ; do
1639
- local start=$((i * max_lines + 2))
1640
- local end=$(( (i + 1) * max_lines + 1))
1641
- local split_file=$(mktemp)
1642
- head -1 $csv_surveys > $split_file
1643
- sed -n ''"$start,${end}p" $csv_surveys >> $split_file
1644
-
1645
-
1646
- local split_lines=$(wc -l < $split_file)
1647
- echo "starting iteration $i from $start to $end with $split_file command -v has $split_lines lines"
1648
- run_ruby_k8s $namespace "reimport-surveys-$i" "$SCRIPT_FULL_PATH/ruby/feedback_kpi_reuploader.rb" $split_file
1649
- done
1650
- else
1651
- run_ruby_k8s $namespace "reimport-surveys" "$SCRIPT_FULL_PATH/ruby/feedback_kpi_reuploader.rb" $csv_surveys
1652
- fi
1653
- }
1654
-
1655
- #!/usr/bin/env bash
1656
-
1657
- configure_gitlab_ssh() {
1658
- tmp_dir=$(mktemp -d)
1659
- ssh-keyscan gitlab.com > $tmp_dir/known_hosts
1660
- echo "$SSH_PRIVATE_KEY" > $tmp_dir/id_rsa
1661
- chmod 600 $tmp_dir/id_rsa
1662
- ssh -i $tmp_dir/id_rsa -T git@gitlab.com
1663
- rm -Rf $tmp_dir
1664
- }
1665
-
1666
-
1667
- configure_gitlab_ssh_home() {
1668
- mkdir ~/.ssh
1669
- ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
1670
- echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_rsa
1671
- chmod 600 ~/.ssh/id_rsa
1672
- ssh -T git@gitlab.com
1673
- }
1674
- #!/usr/bin/env bash
1675
-
1676
- datadog_schedule_downtime() {
1677
- SERVICES=$1
1678
- DOWNTIME_MINUTES=${2:-30}
1679
-
1680
- if [[ "$ENVIRONMENT" == "production" ]] ; then
1681
- log "scheduling downtime for $SERVICES in $ENVIRONMENT"
1682
- else
1683
- return 0
1684
- fi
1685
-
1686
- for SERVICE in $SERVICES ; do
1687
- datadog_schedule_downtime_single $SERVICE $DOWNTIME_MINUTES
1688
- done
1689
- }
1690
-
1691
- datadog_schedule_downtime_single() {
1692
- local SERVICE=$1
1693
- local DOWNTIME_MINUTES=$2
1694
-
1695
- START=$(date +%s)
1696
- END=$((START + 60 * DOWNTIME_MINUTES))
1697
-
1698
- log "scheduling a downtime on datadog for $SERVICE ($DOWNTIME_MINUTES minutes)"
1699
- curl -X POST "https://api.datadoghq.com/api/v1/downtime" \
1700
- -H "Content-Type: application/json" \
1701
- -H "DD-API-KEY: ${DD_API_KEY}" \
1702
- -H "DD-APPLICATION-KEY: ${DD_APP_KEY}" \
1703
- -d '
1704
- {
1705
- "active": true,
1706
- "downtime_type": 0,
1707
- "start": $START,
1708
- "end": $END,
1709
- "message": "CA Deployment - performance for $SERVICE may be lower for next $DOWNTIME_MINUTES min",
1710
- "monitor_tags": [
1711
- "service:$SERVICE",
1712
- "performance"
1713
- ],
1714
- "scope": [
1715
- "env:production"
1716
- ],
1717
- "timezone": "Europe/Paris"
1718
- }
1719
- '
1720
- }
1721
- #!/usr/bin/env bash
1722
-
1723
- docker_build_push() {
1724
- read -r -a BUILD_ARGS <<< "$1"
1725
- DOCKER_BUILD_ARGS="--build-arg VCS_REF=$(git rev-parse --short HEAD)"
1726
- for ARG_NAME in "${BUILD_ARGS[@]}"
1727
- do
1728
- DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS --build-arg $ARG_NAME=${!ARG_NAME}"
1729
- done
1730
-
1731
- if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
1732
- docker pull $DOCKER_IMAGE || true
1733
- docker build $DOCKER_BUILD_ARGS -t $DOCKER_IMAGE_SHA --cache-from $DOCKER_IMAGE $DOCKER_STAGE_PATH
1734
- docker push $DOCKER_IMAGE_SHA
1735
- fi
1736
- }
1737
-
1738
- docker_promote() {
1739
- # inspired by https://dille.name/blog/2018/09/20/how-to-tag-docker-images-without-pulling-them/
1740
- OLD_TAG=${1//[^0-9a-zA-Z-.]/_}
1741
- NEW_TAG=${2//[^0-9a-zA-Z-.]/_}
1742
- echo "promoting from $OLD_TAG to $NEW_TAG"
1743
- TOKEN=$(aws_ecr_token)
1744
- CONTENT_TYPE="application/vnd.docker.distribution.manifest.v2+json"
1745
- MANIFESTS_API="https://${DOCKER_REGISTRY}/v2/${APPLICATION}/manifests"
1746
-
1747
- if MANIFEST=$(curl --fail -H "Authorization: Basic $TOKEN" -H "Accept: ${CONTENT_TYPE}" "$MANIFESTS_API/${OLD_TAG}"); then
1748
- echo "authenticated on $MANIFESTS_API"
1749
- else
1750
- return 1
1751
- fi
1752
- if curl --fail -H "Authorization: Basic $TOKEN" -X PUT -H "Content-Type: ${CONTENT_TYPE}" -d "${MANIFEST}" "$MANIFESTS_API/$NEW_TAG" ; then
1753
- echo "promoted ${APPLICATION} from $OLD_TAG to $NEW_TAG"
1754
- else
1755
- return 2
1756
- fi
1757
- }
1758
-
1759
- ensure_images_exists() {
1760
- for IMAGE_TO_CHECK in $(echo $1 | tr "," "\n"); do
1761
- image_exists ${DOCKER_REGISTRY_ID} ${IMAGE_TO_CHECK} ${VERSION} || return 1
1762
- done
1763
- }
1764
- #!/usr/bin/env bash
1765
-
1766
- extract_yaml_config_variable() {
1767
- set +e
1768
- set +x
1769
-
1770
- check_args "--environment" $1
1771
- shift
1772
- ENVIRONMENT=$1
1773
- shift
1774
-
1775
- check_args "--configs-path" $1
1776
- shift
1777
- CONFIGS_PATH=$1
1778
- shift
1779
-
1780
- check_args "--variable" $1
1781
- shift
1782
- VARIABLE=$1
1783
- shift
1784
-
1785
- [[ "$1" == "--optional" ]] && OPTIONAL=true || OPTIONAL=false
1786
-
1787
- if [ ! -f ${CONFIGS_PATH}/common.yaml ]; then
1788
- echo >&2 "Missing $CONFIGS_PATH/common.yaml configuration file"
1789
- exit 1
1790
- fi
1791
- if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}.yaml ]; then
1792
- echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT.yaml configuration file"
1793
- exit 1
1794
- fi
1795
- if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}-secrets.yaml ]; then
1796
- echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml configuration file"
1797
- exit 1
1798
- fi
1799
-
1800
- result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT-secrets.yaml")
1801
- if [ $? -ne 0 ] || [ "$result" = "null" ]; then
1802
- result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT.yaml")
1803
- if [ $? -ne 0 ] || [ "$result" = "null" ]; then
1804
- result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/common.yaml")
1805
- if [ $? -ne 0 ] || [ "$result" = "null" ]; then
1806
- if [ $OPTIONAL = true ]; then
1807
- echo ""
1808
- exit 0
1809
- else
1810
- echo >&2 "Missing path $VARIABLE in $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml, $CONFIGS_PATH/$ENVIRONMENT.yaml or $CONFIGS_PATH/common.yaml"
1811
- exit 1
1812
- fi
1813
- fi
1814
- fi
1815
- fi
1816
- echo ${result}
1817
- }
1818
- #!/usr/bin/env bash
1819
-
1820
- flyway_clean() {
1821
- HOST="$1"
1822
- PORT="$2"
1823
- DATABASE="$3"
1824
- USER="$4"
1825
- PASSWORD="$5"
1826
-
1827
- kubectl run -it --rm flywayclean \
1828
- --image=flyway/flyway \
1829
- --restart=Never \
1830
- -- \
1831
- -cleanDisabled=false \
1832
- -url="jdbc:postgresql://$HOST:$PORT/$DATABASE" \
1833
- -user="$USER" \
1834
- -password="$PASSWORD" \
1835
- clean
1836
- }
1837
-
1838
- #!/usr/bin/env bash
1839
-
1840
- FLYWAY_VERSION="5.2.4"
1841
-
1842
-
1843
- get_yaml_variable() {
1844
- extract_yaml_config_variable --environment ${ENVIRONMENT} --configs-path $(pwd)/deploy --variable $@
1845
- }
1846
-
1847
- init_migrate_db() {
1848
- set -e
1849
-
1850
- check_env_vars 4 "APPLICATION" "ENVIRONMENT" "FLYWAY_VERSION" "MIGRATION_SQL_PATH"
1851
-
1852
- PG_YAML_PATH=".${APPLICATION}config.postgres"
1853
-
1854
- DB_PORT="5432"
1855
- DB_HOST=$(get_yaml_variable "${PG_YAML_PATH}.host")
1856
- DB_INIT_USERNAME=$(get_yaml_variable "${PG_YAML_PATH}.initUsername")
1857
- DB_INIT_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.initPassword")
1858
- DB_DATABASE=$(get_yaml_variable "${PG_YAML_PATH}.database")
1859
- DB_USER=$(get_yaml_variable "${PG_YAML_PATH}.user")
1860
- DB_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.password")
1861
- DB_URL="jdbc:postgresql://${DB_HOST}:${DB_PORT}/${DB_DATABASE}"
1862
-
1863
- DB_RO_USER=$(get_yaml_variable "${PG_YAML_PATH}.readOnlyUser" --optional)
1864
- DB_RO_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.readOnlyPassword" --optional)
1865
-
1866
- unset KUBECONFIG
1867
-
1868
- configure_kubectl_for_ci ${ENVIRONMENT}
1869
-
1870
- kube_init_service_database \
1871
- --namespace ${ENVIRONMENT} \
1872
- --service ${APPLICATION} \
1873
- --db_host ${DB_HOST} \
1874
- --db_port ${DB_PORT} \
1875
- --db_init_username ${DB_INIT_USERNAME} \
1876
- --db_init_password ${DB_INIT_PASSWORD} \
1877
- --db_database ${DB_DATABASE} \
1878
- --db_username ${DB_USER} \
1879
- --db_password ${DB_PASSWORD}
1880
-
1881
- if [[ ! -z "$DB_RO_USER" ]] && [[ ! -z "$DB_RO_USER" ]]; then
1882
- kube_init_database_readonly_account \
1883
- --namespace ${ENVIRONMENT} \
1884
- --service ${APPLICATION} \
1885
- --db_connection "$DB_INIT_USERNAME:$DB_INIT_PASSWORD@$DB_HOST:$DB_PORT" \
1886
- --db_database ${DB_DATABASE} \
1887
- --db_readonly_username ${DB_RO_USER} \
1888
- --db_readonly_password ${DB_RO_PASSWORD}
1889
- fi
1890
-
1891
- flyway_migrate \
1892
- --environment ${ENVIRONMENT} \
1893
- --namespace ${ENVIRONMENT} \
1894
- --service ${APPLICATION} \
1895
- --db_url ${DB_URL} \
1896
- --db_user ${DB_USER} \
1897
- --db_password ${DB_PASSWORD} \
1898
- --flyway_version ${FLYWAY_VERSION} \
1899
- --flyway_sql_folder $(pwd)/${MIGRATION_SQL_PATH}
1900
- }
1901
-
1902
- flyway_migrate() {
1903
- set -e
1904
-
1905
- extract_args 8 \
1906
- environment namespace service db_url db_user db_password flyway_version flyway_sql_folder $*
1907
-
1908
- echo "running flyway migrations for service $service in environment $environment namespace $namespace for db_url $db_url with user $db_user"
1909
- echo "migration files expected in $flyway_sql_folder"
1910
-
1911
- CONFIGMAP_NAME="$service-flyway-migration-sql"
1912
- POD_NAME="$service-flyway-migration"
1913
-
1914
- configure_kubectl_for_ci $environment
1915
-
1916
- kubectl -n $namespace delete configmap $CONFIGMAP_NAME --ignore-not-found
1917
- kubectl -n $namespace delete pod $POD_NAME --ignore-not-found
1918
- kubectl -n $namespace create configmap $CONFIGMAP_NAME --from-file=$flyway_sql_folder
1919
-
1920
- kubectl -n $namespace run $POD_NAME --image ignored -ti --restart=Never --attach --rm --overrides='
1921
- {
1922
- "spec":{
1923
- "containers":[
1924
- {
1925
- "name":"'$POD_NAME'",
1926
- "image":"boxfuse/flyway:'$flyway_version'",
1927
- "command":["flyway", "-url='$db_url'", "-user='$db_user'", "-password='$db_password'", "migrate"],
1928
- "volumeMounts":[
1929
- {
1930
- "name":"sql",
1931
- "mountPath":"/flyway/sql"
1932
- }
1933
- ]
1934
- }
1935
- ],
1936
- "volumes":[
1937
- {
1938
- "name":"sql",
1939
- "configMap":{
1940
- "name":"'$CONFIGMAP_NAME'"
1941
- }
1942
- }
1943
- ]
1944
- }
1945
- }
1946
- '
1947
-
1948
- kubectl -n $namespace delete configmap $CONFIGMAP_NAME
1949
- }
1950
-
1951
- #!/usr/bin/env bash
1952
-
1953
- record_git_commit() {
1954
- for file in $GIT_COMMIT_FILES; do
1955
- sed -i 's&GIT_COMMIT&'"${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHORT_SHA}&" "$file"
1956
- done
1957
- }
1958
-
1959
- gitlab_import_pgp_key() {
1960
- if [ "$GITLAB_PGP_PRIVATE_KEY" != "" ]
1961
- then
1962
- KEY_FOLDER=<(echo "$GITLAB_PGP_PRIVATE_KEY")
1963
- gpg --import $KEY_FOLDER > /dev/null
1964
- else
1965
- echo '$GITLAB_PGP_PRIVATE_KEY is not set'
1966
- return 1
1967
- fi
1968
- }
1969
-
1970
- git_reveal() {
1971
- gitlab_import_pgp_key
1972
- gpg --decrypt $1
1973
- }
1974
- #!/usr/bin/env bash
1975
-
1976
- helm_deploy_v3() {
1977
- APPLICATION=$1
1978
- ENVIRONMENT=$2
1979
- VERSION=$3
1980
- deploy_chart_v3 \
1981
- --path_configs deploy \
1982
- --path_chart deploy/$APPLICATION \
1983
- --application $APPLICATION \
1984
- --environment $ENVIRONMENT \
1985
- --namespace $ENVIRONMENT \
1986
- --helm_extra_args --set global.version=$VERSION
1987
- }
1988
-
1989
- deploy_chart_v3() {
1990
- set -e
1991
- set -x
1992
-
1993
- # Rigid parsing, but all args are mandatory (expect last) and flexible order is unnecessary
1994
- check_args "--path_configs" $1; shift
1995
- path_configs=$1; shift
1996
- check_args "--path_chart" $1; shift
1997
- path_chart=$1; shift
1998
- check_args "--application" $1; shift
1999
- application=$1; shift
2000
- check_args "--environment" $1; shift
2001
- environment=$1; shift
2002
- check_args "--namespace" $1; shift
2003
- namespace=$1; shift
2004
- if [ $# -ne 0 ]; then
2005
- check_args "--helm_extra_args" $1; shift
2006
- helm_extra_args=$*
2007
- fi
2008
-
2009
- echo "================================"
2010
- echo " Deploying $application"
2011
- echo " - Environment: $environment"
2012
- echo " - Namespace: $namespace"
2013
- echo "================================"
2014
-
2015
- root_path=$(pwd)
2016
-
2017
- # Check the configs exists
2018
-
2019
- check_config_file ${root_path}/${path_configs}/common.yaml
2020
- check_config_file ${root_path}/${path_configs}/${namespace}.yaml
2021
- check_config_file ${root_path}/${path_configs}/${namespace}-secrets.yaml
2022
-
2023
- # Check the chart exists
2024
- if [ ! -d ${root_path}/${path_chart} ] || [ ! -f ${root_path}/${path_chart}/Chart.yaml ]; then
2025
- echo "Bad Chart $root_path/$path_chart : does not exists or missing Chart.yaml"
2026
- print_usage
2027
- exit 1
2028
- fi
2029
-
2030
- # Unset Kubectl configuration made via the KUBECONFIG env variable
2031
- # it would override the config made by configure_kubectl_for
2032
- # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
2033
- unset KUBECONFIG
2034
-
2035
- # Configure Kubectl
2036
- configure_kubectl_for_ci ${environment}
2037
-
2038
- # Configure helm3
2039
- helm3 version --namespace ${namespace} || true
2040
- # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
2041
- helm3 repo add colisweb s3://colisweb-helm-charts/colisweb
2042
- helm3 repo add stable https://charts.helm.sh/stable
2043
- helm3 repo update
2044
- helm3 dependency update ${root_path}/${path_chart}
2045
-
2046
- # Gather values/*.yaml files
2047
- values_path="${root_path}/${path_chart}/values"
2048
- values_files=''
2049
- [ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
2050
-
2051
- # Deploy
2052
- helm3 upgrade --install \
2053
- --namespace ${namespace} \
2054
- ${values_files} \
2055
- -f ${root_path}/${path_configs}/common.yaml \
2056
- -f ${root_path}/${path_configs}/${namespace}.yaml \
2057
- -f ${root_path}/${path_configs}/${namespace}-secrets.yaml \
2058
- ${helm_extra_args} \
2059
- ${application} ${root_path}/${path_chart}
2060
-
2061
- #send event to dd
2062
- PUBLISHED_VERSION="$CI_COMMIT_REF_NAME-$CI_COMMIT_SHA"
2063
- emit_datadog_deploy_event --environment $environment --service $application --version $PUBLISHED_VERSION
2064
-
2065
- echo "================================"
2066
- echo " Deployed $application"
2067
- echo " - Environment: $environment"
2068
- echo " - Namespace: $namespace"
2069
- echo "================================"
2070
-
2071
- set +x
2072
- }
2073
-
2074
- verify_deployments_v3() {
2075
- set -e
2076
-
2077
- # usage :
2078
- # verify_deployments staging price
2079
- # verify_deployments -t 15m testing price
2080
-
2081
- if [ "$1" = "-t" ] ; then
2082
- TIMEOUT=$2
2083
- shift
2084
- shift
2085
- else
2086
- TIMEOUT=5m
2087
- fi
2088
-
2089
- NAMESPACE=$1
2090
- RELEASE=$2
2091
-
2092
- # Get all Deployments names from the deployed chart
2093
- DEPLOYMENTS=(
2094
- $(helm3 get manifest --namespace $NAMESPACE $RELEASE | yq -rs '.[] | select(.kind=="Deployment") | .metadata.name')
2095
- )
2096
-
2097
- echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
2098
-
2099
- PIDS=()
2100
- for D in "${DEPLOYMENTS[@]}"; do
2101
- kubectl -n ${NAMESPACE} rollout status deployment ${D} --timeout=${TIMEOUT} &
2102
- PIDS+=($!)
2103
- done
2104
-
2105
- for P in ${PIDS[@]}; do
2106
- wait $P
2107
-
2108
- if [ $? -ne 0 ]; then
2109
- echo "at least one deployment failed or timed out (after $TIMEOUT)"
2110
- exit 1
2111
- fi
2112
- done
2113
-
2114
- }
2115
-
2116
- print_usage() {
2117
- echo "Usage:"
2118
- echo "deploy_chart \\"
2119
- echo " --path_configs <path to .yaml namespaces and secret config files>"
2120
- echo " --path_chart <path to Helm Chart>"
2121
- echo " --application <application name used by Helm>"
2122
- echo " --environment <infrastructure environment>"
2123
- echo " --namespace <namespace>"
2124
- echo " --helm-extra-args <extra args to pass to helm, ex: --set my.value=42 --set your.setting=on>"
2125
- echo ""
2126
- }
2127
-
2128
- check_config_file() {
2129
- local filename=$1
2130
- if [ ! -f ${filename} ]; then
2131
- echo "Missing $filename configuration file"
2132
- print_usage
2133
- exit 1
2134
- fi
2135
- }
2136
-
2137
- #!/usr/bin/env bash
2138
-
2139
- configure_kubectl_for_ci() {
2140
- if [ -z ${GITLAB_PAT} ]; then
2141
- echo "Cannot configure kubectl: no GITLAB_PAT configured"
2142
- exit 1
2143
- fi
2144
-
2145
- infra_env="$1"
2146
- valid_envs="[testing][staging][production][performance][tests][recette]"
2147
- echo "$valid_envs" | grep -q "\[$infra_env\]"
2148
-
2149
- if [ $? -ne 0 ]; then
2150
- echo "Cannot configure kubectl for invalid env : $infra_env"
2151
- echo "choose one of $valid_envs"
2152
- exit 1
2153
- fi
2154
-
2155
- mkdir -p ~/.kube
2156
- curl -fsS \
2157
- --header "PRIVATE-TOKEN: $GITLAB_PAT" \
2158
- "https://gitlab.com/api/v4/projects/8141053/jobs/artifacts/$infra_env/raw/$infra_env.kubeconfig?job=4_kubernetes_config_output" \
2159
- > ~/.kube/$infra_env.kubeconfig
2160
-
2161
- curl_return_code=$?
2162
- if [ ${curl_return_code} -ne 0 ]; then
2163
- echo "Cannot configure kubectl for $infra_env, get configuration failed with code $curl_return_code"
2164
- exit ${curl_return_code}
2165
- fi
2166
-
2167
- rm -f ~/.kube/config
2168
- ln -s ~/.kube/$infra_env.kubeconfig ~/.kube/config
2169
- echo "Configured kubectl for env : $infra_env"
2170
- }
2171
- notify_new_deployment() {
2172
- jq --version || (apt update && apt install -y jq)
2173
-
2174
- CHAT_URL=${1:-$DEFAULT_CHAT_URL}
2175
-
2176
- STATUS=$(echo $CI_JOB_STATUS | tr '[:lower:]' '[:upper:]' )
2177
- ENV_NAME=$(echo $ENVIRONMENT | tr '[:lower:]' '[:upper:]' )
2178
-
2179
- JOB_LINK="<$CI_JOB_URL| $CI_JOB_NAME $CI_JOB_ID>"
2180
-
2181
- DESCRIPTION="
2182
- $STATUS : Deployment for $CI_PROJECT_NAME on $ENV_NAME
2183
- $JOB_LINK
2184
- $CI_COMMIT_TITLE
2185
- "
2186
-
2187
- JSON_MESSAGE=$(jq -n --arg text "$DESCRIPTION" '{text: $text }')
2188
- curl -X POST $CHAT_URL \
2189
- --header "Content-Type: application/json" \
2190
- --data "$JSON_MESSAGE"
2191
- }
2192
- notify_new_version() {
2193
-
2194
- ! test -z $CI_COMMIT_TAG || exit 0
2195
-
2196
- jq --version || (apt update && apt install -y jq)
2197
-
2198
- KIND=$1
2199
- CHAT_URL=${2:-$DEFAULT_CHAT_URL}
2200
-
2201
- STATUS=$(echo $CI_JOB_STATUS | tr '[:lower:]' '[:upper:]' )
2202
- ENV_NAME=$(echo $ENVIRONMENT | tr '[:lower:]' '[:upper:]' )
2203
- TITLE="$ENV_NAME *$STATUS* $KIND for version *$CI_COMMIT_TAG* of *$CI_PROJECT_NAME* "
2204
-
2205
- RELEASE_URL="https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/releases/$CI_COMMIT_TAG"
2206
-
2207
- NOTES=$(curl --header "PRIVATE-TOKEN: $GITLAB_TOKEN" $RELEASE_URL |
2208
- jq .description |
2209
- sed -e 's/^"//' -e 's/"$//' |
2210
- sed -E 's/\[([^]]+)\]\(([^)]+)\)/<\2|\1>/g' |
2211
- sed -E 's/\\n/\'$'\n/g')
2212
-
2213
- JOB_LINK="<$CI_JOB_URL| $CI_JOB_NAME $CI_JOB_ID>"
2214
-
2215
- DESCRIPTION="
2216
- $TITLE
2217
- $JOB_LINK
2218
- $NOTES
2219
- "
2220
-
2221
- JSON_MESSAGE=$(jq -n --arg text "$DESCRIPTION" '{text: $text }')
2222
- curl -X POST $CHAT_URL \
2223
- --header "Content-Type: application/json" \
2224
- --data "$JSON_MESSAGE"
2225
- }
2226
- #!/usr/bin/env bash
2227
-
2228
- skip_sbt_compile_cache() {
2229
- COMPARED_BRANCH="${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-$CI_DEFAULT_BRANCH}"
2230
- echo "branch to compare to: $COMPARED_BRANCH"
2231
- git fetch origin $COMPARED_BRANCH
2232
- echo "fetched $COMPARED_BRANCH"
2233
- [[ "$CI_COMMIT_REF_NAME" =~ ^(master|develop)$ || $(git diff origin/$COMPARED_BRANCH --exit-code -- project) ]]
2234
- }
2235
- #!/usr/bin/env bash
2236
-
2237
- # in case of trouble with functions for update history during import
2238
- # https://stackoverflow.com/questions/56729192/pg-restore-fails-when-trying-to-create-function-referencing-table-that-does-not
2239
-
2240
- # example: clone_databases --source_env testing --destination_env recette --services "order,notification,parcel,ikea"
2241
- clone_databases() {
2242
- export USERNAME="database-cloner"
2243
-
2244
- set -e
2245
-
2246
- extract_args 3 source_env destination_env services $*
2247
-
2248
- dump_databases "$source_env" "$services"
2249
- import_databases "$destination_env" "$services"
2250
- }
2251
-
2252
- dump_databases() {
2253
- local env="$1"
2254
- local services=$(echo -n "$2" | tr ',' '\n')
2255
-
2256
- database_k8s_output_dump_path="/tmp/database_k8s_output_dump"
2257
-
2258
- configure_kubectl_for "$env"
2259
- set +e
2260
- database_k8s "$env" > "$database_k8s_output_dump_path"
2261
- set -e
2262
-
2263
- source_pg_local_port=$(extract_pg_local_port "$database_k8s_output_dump_path")
2264
-
2265
- for service in $services
2266
- do
2267
- service_path="/tmp/$service"
2268
-
2269
- set +e
2270
- git clone "git@gitlab.com:colisweb/back/$service.git" "$service_path"
2271
- set -e
2272
-
2273
- if cd "$service_path"; then
2274
- echo "dump the database for service $service.."
2275
-
2276
- git secret reveal -f
2277
-
2278
- PG_YAML_PATH=".${service}config.postgres"
2279
-
2280
- SOURCE_DB_DATABASE=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.database")
2281
- SOURCE_DB_USER=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.user")
2282
- SOURCE_DB_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.password")
2283
-
2284
- export PGPASSWORD="$SOURCE_DB_PASSWORD"
2285
-
2286
- DUMP_PATH="/tmp/db_dump_${service}.sql"
2287
- pg_dump --no-owner -h localhost -p "$source_pg_local_port" -U "$SOURCE_DB_USER" "$SOURCE_DB_DATABASE" > "$DUMP_PATH"
2288
-
2289
- cd ..
2290
- rm -rf "$service_path"
2291
- else
2292
- echo "WARN: failed to clone $service - skipping"
2293
- fi
2294
- done
2295
- }
2296
-
2297
- import_databases() {
2298
- local env="$1"
2299
- local services=$(echo -n "$2" | tr ',' '\n')
2300
-
2301
- database_k8s_output_import_path="/tmp/database_k8s_output_import"
2302
-
2303
- configure_kubectl_for "$env"
2304
- set +e
2305
- database_k8s "$env" > "$database_k8s_output_import_path"
2306
- set -e
2307
-
2308
- destination_pg_local_port=$(extract_pg_local_port "$database_k8s_output_import_path")
2309
-
2310
- for service in $services
2311
- do
2312
- service_path="/tmp/$service"
2313
-
2314
- set +e
2315
- git clone "git@gitlab.com:colisweb/back/$service.git" "$service_path"
2316
- set -e
2317
-
2318
- if cd "$service_path"; then
2319
- echo "create and import database for $service.."
2320
-
2321
- git secret reveal -f
2322
-
2323
- PG_YAML_PATH=".${service}config.postgres"
2324
-
2325
- DB_PORT="5432"
2326
- DB_HOST=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.host")
2327
- DB_INIT_USERNAME=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.initUsername")
2328
- DB_INIT_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.initPassword")
2329
- DB_DATABASE=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.database")
2330
- DB_USER=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.user")
2331
- DB_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.password")
2332
-
2333
- kube_init_service_database \
2334
- --namespace ${env} \
2335
- --service ${service} \
2336
- --db_host ${DB_HOST} \
2337
- --db_port ${DB_PORT} \
2338
- --db_init_username ${DB_INIT_USERNAME} \
2339
- --db_init_password ${DB_INIT_PASSWORD} \
2340
- --db_database ${DB_DATABASE} \
2341
- --db_username ${DB_USER} \
2342
- --db_password ${DB_PASSWORD}
2343
-
2344
- echo "WARN: A complete clean of $DB_DATABASE on $DB_HOST will be operated"
2345
- read -rsn1 -p"Press any key to continue";echo
2346
- flyway_clean "$DB_HOST" "$DB_PORT" "$DB_DATABASE" "$DB_USER" "$DB_PASSWORD"
2347
-
2348
- DUMP_PATH="/tmp/db_dump_${service}.sql"
2349
- export PGPASSWORD="$DB_PASSWORD"
2350
- set +e
2351
- psql "postgres://$DB_USER@127.0.0.1:$destination_pg_local_port" -p "$DB_DATABASE" -f "$DUMP_PATH"
2352
- set -e
2353
-
2354
- cd ..
2355
- rm -rf "$service_path"
2356
- else
2357
- echo "WARN: failed to clone $service - skipping"
2358
- fi
2359
- done
2360
- }
2361
-
2362
- extract_pg_local_port() {
2363
- cat "$1" | grep 'postgres@127.0.0.1:' | sed 's/.*postgres@127.0.0.1:\(.*[0-9]\).*/\1/g'
2364
- }
2365
- #!/usr/bin/env bash
2366
-
2367
- emit_datadog_deploy_event() {
2368
- extract_args 3 environment service version $*
2369
- check_env_vars 1 "DD_API_KEY"
2370
-
2371
- response=$(
2372
- curl -X POST -H "Content-type: application/json" \
2373
- -d '{
2374
- "title": "deploying '"$service"' to '"$environment"'",
2375
- "text": "deploying '"$service"' version '"$version"' to '"$environment"'",
2376
- "priority": "normal",
2377
- "tags": ["service:'"$service"' ", "env:'"$environment"'" ,"action:'"deployment"'"] ,
2378
-
2379
- "alert_type": "Info"
2380
- }' \
2381
- "https://api.datadoghq.com/api/v1/events?api_key=$DD_API_KEY"
2382
- )
2383
-
2384
- #echo $response
2385
- EventID=$(echo $response | jq ".event.id")
2386
- url=$(echo $response | jq ".event.url")
2387
-
2388
- if [[ $EventID -ne 0 ]]; then
2389
- echo "event successfully created check in datadog UI : $url"
2390
- else
2391
- echo " failed to create event "
2392
- exit 1
2393
- fi
2394
- }
2395
-
2396
- #!/usr/bin/env bash
2397
-
2398
- # DEPRECATED
2399
- emit_datadog_error_events() {
2400
- set -e
2401
- extract_args 4 title text priority environment $*
2402
- check_env_vars 1 "DD_API_KEY"
2403
-
2404
- curl -X POST -H "Content-type: application/json" \
2405
- -d '{
2406
- "title": "'"$title"'",
2407
- "text": "'"$text"'",
2408
- "priority": "'"$priority"'",
2409
- "tags": ["environment:'"$environment"'"],
2410
- "alert_type": "Error"
2411
- }' \
2412
- "https://api.datadoghq.com/api/v1/events?api_key=$DD_API_KEY"
2413
- }
2414
-
2415
- #!/usr/bin/env bash
2416
- terraform_init() {
2417
- SECTION=$1
2418
- ENV=$2
2419
- cd $SECTION
2420
- terraform init -input=false
2421
- terraform workspace select $ENV || terraform workspace new $ENV
2422
- }