@colisweb/rescript-toolkit 5.8.0 → 5.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2659 @@
1
+ #!/usr/bin/env bash
2
+
3
+ #VARIABLES
4
+ export SCRIPT_FULL_PATH=$(dirname "$0")
5
+
6
+ ##FUNCTIONS
7
+ # https://stackoverflow.com/questions/1527049/how-can-i-join-elements-of-an-array-in-bash
8
+ join_by() {
9
+ local d=${1-} f=${2-}
10
+ if shift 2; then
11
+ printf %s "$f" "${@/#/$d}"
12
+ fi
13
+ }
14
+
15
+ mkstring() {
16
+ local start=$1
17
+ local separator=$2
18
+ local end=$3
19
+ shift 3
20
+
21
+ if [ $# -gt 0 ]; then
22
+ printf $start
23
+ join_by $separator $*
24
+ printf $end
25
+ fi
26
+ }
27
+
28
+ md5all() {
29
+ all_hash=$(mktemp)
30
+ for name in $*; do
31
+ find $name -type f -exec cat {} \; | md5sum | cut -f1 -d ' ' >> $all_hash
32
+ done;
33
+ cat $all_hash | md5sum | cut -f1 -d ' '
34
+ }
35
+
36
+ log() {
37
+ echo "$*" >&2
38
+ }
39
+ #!/usr/bin/env bash
40
+
41
+ check_args() {
42
+ if [ -z $2 ] || [ "$1" != "$2" ]; then
43
+ echo >&2 "missing argument $1"
44
+ return 1
45
+ fi
46
+ }
47
+
48
+ check_env_vars() {
49
+ ArgsCount=$1 && shift
50
+ for ((i = 0; i < $ArgsCount; i++)); do
51
+ if [[ -z "${!1}" ]]; then
52
+ echo >&2 "missing ENV $1"
53
+ return 1
54
+ fi
55
+ shift
56
+ done
57
+ }
58
+
59
+ extract_arg() {
60
+ name=$1
61
+ passed=$2
62
+ value=$3
63
+ if [ "--$name" != "$passed" ]; then
64
+ echo "missing argument $name"
65
+ exit 1
66
+ fi
67
+ eval $name='$value'
68
+ }
69
+
70
+ extract_args() {
71
+ declare -a Array_Args
72
+ ArgsCount=$1 && shift
73
+ for ((i = 0; i < $ArgsCount; i++)); do
74
+ Array_Args[i]=$1 && shift
75
+ done
76
+ for ArgName in "${Array_Args[@]}"; do
77
+ extract_arg "$ArgName" $* && shift 2
78
+ done
79
+ }
80
+
81
+ #!/usr/bin/env bash
82
+
83
+ aws_ecr_login() {
84
+ PATH=/root/.local/bin:$PATH
85
+
86
+ aws ecr get-login-password \
87
+ | docker login --username AWS --password-stdin 949316342391.dkr.ecr.eu-west-1.amazonaws.com \
88
+ || (echo "you should update to AWS CLI version 2 https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-mac.html " $(aws ecr get-login --region=eu-west-1 --no-include-email) )
89
+ }
90
+
91
+ aws_ecr_token() {
92
+ aws ecr get-authorization-token --region=eu-west-1 --output text --query 'authorizationData[].authorizationToken'
93
+ }
94
+
95
+ # you will need jq to use these commands. You can install it using "brew install jq"
96
+ # delete_images colisweb_api 8
97
+ # will delete images older than 8 weeks
98
+ delete_images() {
99
+
100
+ REPO=$1
101
+ WEEKS=${2:-16}
102
+
103
+ WEEKS_AGO=$(date -v-${WEEKS}w +%F)
104
+
105
+ #Get all ecr images
106
+ IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
107
+
108
+ #Filter unnecessary values and map `imagePushedAt` to EPOCH
109
+ NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
110
+
111
+ #Filter on EPOCH
112
+ OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
113
+ while IFS= read -r IMAGE; do
114
+ if [ "$IMAGE" != "" ]; then
115
+ echo "Deleting $IMAGE from $REPO"
116
+ AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
117
+ fi
118
+ done <<< "$OLD_IMAGES"
119
+ }
120
+
121
+ # delete_images_all_repos 12
122
+ # will delete images in all repositories older than 12 weeks
123
+ delete_images_all_repos() {
124
+ REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
125
+
126
+ while IFS= read -r REPO; do
127
+ echo "processing ECR repository $REPO"
128
+ delete_images $REPO $1
129
+ done <<< "$REPOSITORIES"
130
+ }
131
+
132
+ delete_old_cache() {
133
+ DATE=${1:-$(date -v-1m +%F)}
134
+ CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
135
+
136
+ echo "deleting from cache $CACHE_BUCKET all older than $DATE"
137
+
138
+ aws_ecr_login
139
+
140
+ while read -r line; do
141
+ datum=$(echo $line | cut -c1-10)
142
+ if [[ "$datum" < "$DATE" ]] ; then
143
+ # Shell Parameter Expansion: ${parameter##word}
144
+ # Allow to return the result from "word" to the end of "parameters"
145
+ # Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
146
+ TO_DELETE="$CACHE_BUCKET${line##* project/}"
147
+ echo $TO_DELETE
148
+ aws s3 rm $TO_DELETE
149
+ fi
150
+ done < <(aws s3 ls $CACHE_BUCKET --recursive)
151
+ }
152
+
153
+ #!/usr/bin/env bash
154
+
155
+ # If gitlab is down or pipeline are stuck, hotfixes need to be available
156
+ # This script will publish docker images to ECR using your current git HEAD, then deploy them to a given environment.
157
+ # Some local files (git-commit.conf and sentry.properties) will be updated, take caution.
158
+ # No trace of this will appear on Gitlab (no releases, no pipelines, no tags).
159
+ # create_hotfix_scala $ENVIRONMENT $CHART_NAME [ $MODULE_NAME $MODULE_PATH $DEPLOYMENT ]
160
+ # create_hotfix_scala testing crm main modules/3-executables/main crm
161
+ # create_hotfix_scala testing notification \
162
+ # main-http modules/3-executables/main-http notification-http \
163
+ # main-consumer modules/3-executables/main-consumer notification-consumer
164
+
165
+ create_hotfix_scala() {
166
+
167
+ ENVIRONMENT=$1
168
+ CHART_NAME=$2
169
+ shift 2
170
+
171
+ SHORT_SHA=$(git rev-parse --short HEAD)
172
+ HOTFIX_TAG="hotfix-$SHORT_SHA"
173
+
174
+ gum confirm "Preparing $HOTFIX_TAG for $CHART_NAME ?" || exit
175
+ prepare_hotfix_scala $HOTFIX_TAG
176
+
177
+ gum confirm "Building $HOTFIX_TAG for $CHART_NAME ?" || exit
178
+ while [[ $# -gt 2 ]] ; do
179
+ build_hotfix_scala $HOTFIX_TAG "$1" "$2" "$3"
180
+ shift 3
181
+ done
182
+
183
+ gum confirm "Deploying $HOTFIX_TAG for $CHART_NAME ?" || exit
184
+ deploy_hotfix $CHART_NAME $ENVIRONMENT $HOTFIX_TAG
185
+ }
186
+
187
+ # Update local git-commit.conf and sentry.properties files using git short sha
188
+ prepare_hotfix_scala() {
189
+ HOTFIX_TAG=$1
190
+
191
+ git secret reveal -f
192
+ aws_ecr_login
193
+
194
+ COMMIT_CONF_FILES=$(find . -name "git-commit.conf")
195
+ SENTRY_PROPERTIES_FILES=$(find . -name "sentry.properties")
196
+
197
+ for file in $(echo "$COMMIT_CONF_FILES\n$SENTRY_PROPERTIES_FILES"); do
198
+ sed -i '' -e 's&GIT_COMMIT&'"$HOTFIX_TAG&" $file
199
+ done
200
+
201
+ }
202
+
203
+ # Build docker images locally and publish them to AWS ECR.
204
+ build_hotfix_scala() {
205
+
206
+ HOTFIX_TAG=$1
207
+ SBT_MODULE=$2
208
+ DOCKER_PATH=$3
209
+ DEPLOYMENT=$4
210
+
211
+ DOCKER_REGISTRY_ID="949316342391"
212
+ DOCKER_REGISTRY="$DOCKER_REGISTRY_ID.dkr.ecr.eu-west-1.amazonaws.com"
213
+ DOCKER_IMAGE=$DOCKER_REGISTRY/$DEPLOYMENT
214
+ HOTFIX_IMAGE=$DOCKER_IMAGE:$HOTFIX_TAG
215
+
216
+ #Build
217
+ sbt "project $SBT_MODULE" "Docker / stage"
218
+
219
+ #Publish
220
+ docker build --platform "linux/amd64" -t $HOTFIX_IMAGE --cache-from $DOCKER_IMAGE "$DOCKER_PATH/target/docker/stage"
221
+ docker push $HOTFIX_IMAGE
222
+
223
+ echo "Created hotfix $HOTFIX_IMAGE"
224
+ }
225
+
226
+ # Deploy the project in the given environment
227
+ deploy_hotfix() {
228
+ source $colisweb_scripts/ci/helm.sh
229
+
230
+ CHART_NAME=$1
231
+ ENVIRONMENT=$2
232
+ HOTFIX_TAG=$3
233
+
234
+ CONFIG_PATH=deploy
235
+ CHART_PATH=$CONFIG_PATH/$CHART_NAME
236
+ ROOT_PATH=$(pwd)
237
+
238
+ # Unset Kubectl configuration made via the KUBECONFIG env variable
239
+ # it would override the config made by configure_kubectl_for
240
+ # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
241
+ unset KUBECONFIG
242
+
243
+ # Configure Kubectl
244
+ configure_kubectl_for $ENVIRONMENT
245
+
246
+ # Avoiding "no local-index.yaml" or "empty local-index.yaml" error
247
+ cat > $HOME/Library/Caches/helm/repository/local-index.yaml <<EOT
248
+ apiVersion: v1
249
+ entries:
250
+ cronjob:
251
+ EOT
252
+
253
+ # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
254
+ helm3 repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
255
+ helm3 repo add stable https://charts.helm.sh/stable --force-update
256
+ helm3 repo update
257
+ helm3 dependency update ${ROOT_PATH}/${CHART_PATH}
258
+
259
+ # Gather values/*.yaml files
260
+ VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
261
+ VALUES_FILES=''
262
+ [ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
263
+
264
+ # Deploy
265
+ helm3 upgrade --install \
266
+ --namespace ${ENVIRONMENT} \
267
+ ${VALUES_FILES} \
268
+ -f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
269
+ -f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}.yaml \
270
+ -f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}-secrets.yaml \
271
+ --set global.version=$HOTFIX_TAG \
272
+ ${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
273
+
274
+
275
+ verify_deployments_v3 -t 10m $ENVIRONMENT $CHART_NAME
276
+
277
+ }
278
+
279
+ #!/usr/bin/env bash
280
+
281
+ image_exists() {
282
+ set -e
283
+
284
+ REGISTRY=$1
285
+ REPOSITORY=$2
286
+ IMAGE=$3
287
+
288
+ TAGGED_IMAGE="$REGISTRY/$REPOSITORY:$IMAGE"
289
+
290
+ aws ecr describe-images --registry-id $REGISTRY --repository-name $REPOSITORY --image-ids "imageTag=$IMAGE"
291
+
292
+ if [ $? -eq 0 ]
293
+ then
294
+ echo "Image $TAGGED_IMAGE already present in distant repo"
295
+ return 0
296
+ else
297
+ echo "Image $TAGGED_IMAGE NOT present in distant repo"
298
+ return 1
299
+ fi
300
+ }
301
+ #!/usr/bin/env bash
302
+
303
+ gmm() {
304
+ git checkout $1
305
+ git pull
306
+ git checkout $2
307
+ git pull
308
+ git merge $1
309
+ git push
310
+ }
311
+
312
+ git_damn_merge() {
313
+ git checkout $1
314
+ git pull
315
+ git checkout $2
316
+ git dammit
317
+ git merge $1
318
+ git push
319
+ }
320
+
321
+ git_prune_local_branches() {
322
+ git branch -r |
323
+ awk '{print $1}' |
324
+ egrep -v -f /dev/fd/0 <(git branch -vv | grep origin) |
325
+ awk '{print $1}' |
326
+ xargs git branch -d
327
+ }
328
+
329
+ gum_checkout() {
330
+ git branch -a | cut -f3- -d "/" | gum filter | xargs git checkout
331
+ }
332
+
333
+ # useful option :
334
+ # export GIT_SUBLINE_MERGE_NON_INTERACTIVE_MODE=TRUE
335
+ # see https://github.com/paulaltin/git-subline-merge
336
+ setup_subline_merge() {
337
+ location=${1:-"--local"}
338
+
339
+ case $location in
340
+ --local)
341
+ if [ -d ".git" ]; then
342
+ echo "* merge=subline" >>.git/info/attributes
343
+ else
344
+ echo "Cannot use local option, not in a git repository"
345
+ return 1
346
+ fi
347
+ ;;
348
+ --global)
349
+ echo "* merge=subline" >>~/.gitattributes
350
+ ;;
351
+ *)
352
+ echo "unknown argument $location"
353
+ return 2
354
+ ;;
355
+ esac
356
+
357
+ git config $location merge.conflictStyle diff3
358
+ git config $location merge.subline.driver "$colisweb_scripts/shell-session/shell/dev/git-subline-merge %O %A %B %L %P"
359
+ git config $location merge.subline.recursive binary
360
+ }
361
+
362
+ rebase_from_ancestor() {
363
+ set -x
364
+ branch=$1
365
+ tip=$(git rev-parse HEAD)
366
+ ancestor=$(git merge-base $branch $tip)
367
+ commits=$(git log $ancestor..$tip)
368
+ git reset --hard $ancestor
369
+ git merge --squash $tip
370
+ git commit -m "squashed commmits $commits" || echo "nothing committed"
371
+ git rebase $branch -Xtheirs
372
+ }
373
+
374
+ #!/usr/bin/env bash
375
+
376
+ import_all_pgp_keys() {
377
+ echo "importing all PGP keys"
378
+ gpg --import $SCRIPT_FULL_PATH/pgp_keys/*.key
379
+ }
380
+
381
+ remove_all_persons_from_secrets() {
382
+ echo "cleanup git secret"
383
+ WHO_KNOWS=($(git secret whoknows))
384
+ git secret removeperson $WHO_KNOWS
385
+ echo "Removed secrets access for $WHO_KNOWS"
386
+ }
387
+
388
+ all_pgp_emails() {
389
+ gpg --show-key $SCRIPT_FULL_PATH/pgp_keys/*.key | sed -rn "s/.*<(.*)>/\1/p"
390
+ }
391
+
392
+ set_all_secret_keys() {
393
+
394
+ import_all_pgp_keys
395
+
396
+ git secret reveal -f
397
+
398
+ remove_all_persons_from_secrets
399
+
400
+ if [ $# -eq 0 ]; then
401
+ echo "No emails supplied, using dev-tools pgp keys as source"
402
+ IN_THE_KNOW=($(gum choose --no-limit $(all_pgp_emails)))
403
+ else
404
+ IN_THE_KNOW=($*)
405
+ fi
406
+
407
+ git secret tell $IN_THE_KNOW
408
+ git secret hide
409
+ git secret whoknows
410
+
411
+ echo "all secrets updated, you'll need to commit the changes"
412
+ }
413
+
414
+ #!/usr/bin/env bash
415
+
416
+ start_ssh_bastion() {
417
+ ENV=$1
418
+ SSH_LOCAL_PORT=$2
419
+ POD_NAME=ssh-bastion-$USERNAME
420
+ CONFIG_MAP_NAME=ssh-bastion-$USERNAME
421
+ configure_kubectl_for $ENV
422
+ kubectl get pods -o name | grep pod/$POD_NAME
423
+ if [ $? -eq 0 ]; then
424
+ echo "$POD_NAME is already running"
425
+ else
426
+ #configmap
427
+ kubectl get configmap $CONFIG_MAP_NAME && kubectl delete configmap $CONFIG_MAP_NAME
428
+ tempdir=$(mktemp -d)
429
+ cat <<EOF > $tempdir/sshd_config
430
+ AllowTcpForwarding yes
431
+ Port 2222
432
+ PermitRootLogin yes
433
+ AuthorizedKeysFile /etc/ssh/authorized_keys
434
+ EOF
435
+ cp ~/.ssh/id_rsa.pub $tempdir/authorized_keys
436
+ kubectl create configmap $CONFIG_MAP_NAME --from-file=$tempdir
437
+
438
+ #pod
439
+ kubectl get pod $POD_NAME && kubectl delete pod $POD_NAME
440
+ cat <<EOF | kubectl create -f -
441
+
442
+ apiVersion: v1
443
+ kind: Pod
444
+ metadata:
445
+ name: $POD_NAME
446
+ spec:
447
+ containers:
448
+ - name: $POD_NAME
449
+ image: sickp/alpine-sshd:7.4
450
+ ports:
451
+ - containerPort: 2222
452
+ volumeMounts:
453
+ - mountPath: /etc/ssh/sshd_config
454
+ name: ssh-config
455
+ subPath: sshd_config
456
+ - mountPath: /etc/ssh/authorized_keys
457
+ name: ssh-config
458
+ subPath: authorized_keys
459
+ volumes:
460
+ - name: ssh-config
461
+ configMap:
462
+ name: $CONFIG_MAP_NAME
463
+ EOF
464
+
465
+ fi
466
+
467
+ # You need a recent kubectl for wait to work (1.15 works), install or upgrade
468
+ # with brew :
469
+ # brew install kubernetes-cli
470
+ # brew upgrade kubernetes-cli
471
+ kubectl wait --for=condition=Ready pod/$POD_NAME
472
+
473
+ # kube port-forward
474
+ lsof -ti tcp:$SSH_LOCAL_PORT | xargs kill
475
+ kubectl port-forward $POD_NAME $SSH_LOCAL_PORT:2222 &
476
+ while ! nc -z 127.0.0.1 $SSH_LOCAL_PORT; do
477
+ sleep 1
478
+ done
479
+ echo "forwarding ssh via local port $SSH_LOCAL_PORT"
480
+ echo "remember to terminate the bastion with 'stop_ssh_bastion'"
481
+ }
482
+
483
+ stop_ssh_bastion() {
484
+ POD_NAME=ssh-bastion-$USERNAME
485
+ kubectl delete pod $POD_NAME
486
+ }
487
+
488
+ #!/usr/bin/env bash
489
+
490
+ configure_kubectl_for() {
491
+ local infra_env="$1"
492
+ local valid_envs="[testing][staging][production][performance][tests][recette]"
493
+ echo "$valid_envs" | grep -q "\[$infra_env\]"
494
+
495
+ if [ $? -ne 0 ]; then
496
+ echo "Cannot configure kubectl for invalid env : $infra_env"
497
+ echo "choose one of $valid_envs"
498
+ return 1
499
+ fi
500
+
501
+ aws eks update-kubeconfig --name "toutatis-$infra_env-eks" >&2
502
+ }
503
+
504
+ #!/usr/bin/env bash
505
+
506
+ # WARNING : never try to do a dump directly from the database_production_ca
507
+ # this could cause lot of lock database issues.
508
+ # always use database_production_read_replica_ca instead
509
+ database_k8s() {
510
+ MODE=$1
511
+ case $MODE in
512
+ "tests") SSH_LOCAL_PORT=2224;PG_LOCAL_PORT=24440;CA_LOCAL_PORT=25430;ENV="tests";;
513
+ "testing") SSH_LOCAL_PORT=2225;PG_LOCAL_PORT=24441;CA_LOCAL_PORT=25431;ENV="testing";;
514
+ "staging") SSH_LOCAL_PORT=2226;PG_LOCAL_PORT=24442;CA_LOCAL_PORT=25432;ENV="staging";;
515
+ "production") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24443;CA_LOCAL_PORT=25433;ENV="production";;
516
+ "production_rw") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24444;CA_LOCAL_PORT=25434;ENV="production";;
517
+ "recette") SSH_LOCAL_PORT=2228;PG_LOCAL_PORT=24446;CA_LOCAL_PORT=25436;ENV="recette";;
518
+ *) echo "Unsupported ENV : $MODE"; return 1 ;;
519
+ esac
520
+
521
+ start_ssh_bastion $ENV $SSH_LOCAL_PORT
522
+
523
+ lsof -ti tcp:$PG_LOCAL_PORT | xargs kill
524
+
525
+ bastion_config=$(mktemp)
526
+ cat > "$bastion_config" <<EOF
527
+ UserKnownHostsFile /dev/null
528
+ StrictHostKeyChecking no
529
+ User root
530
+ Host bastion_tests
531
+ HostName 127.0.0.1
532
+ Port 2224
533
+ LocalForward 24440 toutatis-tests-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
534
+ Host bastion_testing
535
+ HostName 127.0.0.1
536
+ Port 2225
537
+ LocalForward 24441 toutatis-testing-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
538
+ LocalForward 25431 toutatis-testing-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
539
+ LocalForward 25531 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
540
+ Host bastion_staging
541
+ HostName 127.0.0.1
542
+ Port 2226
543
+ LocalForward 24442 toutatis-staging-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
544
+ LocalForward 25432 toutatis-staging-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
545
+ Host bastion_recette
546
+ HostName 127.0.0.1
547
+ Port 2228
548
+ LocalForward 24446 toutatis-recette-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
549
+ LocalForward 25436 toutatis-recette-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
550
+ LocalForward 25536 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
551
+ Host bastion_production
552
+ HostName 127.0.0.1
553
+ Port 2227
554
+ LocalForward 24443 toutatis-production-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
555
+ LocalForward 25433 toutatis-production-mysql-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
556
+ EOF
557
+ if [ "$MODE" = "production_rw" ] ; then
558
+ cat >> "$bastion_config" <<EOF
559
+ LocalForward 24444 toutatis-production-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
560
+ LocalForward 25434 toutatis-production-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
561
+ EOF
562
+ fi
563
+
564
+ ssh -f -N \
565
+ -F "$bastion_config" \
566
+ "bastion_$ENV"
567
+
568
+ echo "sample command : 'psql postgres://postgres@127.0.0.1:$PG_LOCAL_PORT'"
569
+ echo "sample command : 'mysql -u colisweb -h 127.0.0.1 -P $CA_LOCAL_PORT -p db_name'"
570
+
571
+ echo "run 'kubectl delete pod $POD_NAME' when you have finished"
572
+ }
573
+
574
+ psql_on_k8() {
575
+ NAMESPACE=$1
576
+ SERVICE=$2
577
+ CONNECTION=$3
578
+ shift 3
579
+
580
+ kubectl -n $NAMESPACE run ${SERVICE}-postgres-init \
581
+ --image jbergknoff/postgresql-client \
582
+ --restart=Never \
583
+ --attach --rm \
584
+ -- \
585
+ postgresql://${CONNECTION} \
586
+ "$*"
587
+ }
588
+
589
+ mysql_on_k8() {
590
+ local namespace=$1
591
+ local service=$2
592
+ local db_host=$3
593
+ local db_port=$4
594
+ local db_init_username=$5
595
+ local db_init_password=$6
596
+ local query=$7
597
+
598
+ kubectl -n ${namespace} run ${service}-mysql-init \
599
+ --image arey/mysql-client \
600
+ --restart=Never \
601
+ --attach --rm \
602
+ -- \
603
+ mysql --host=$db_host --user=$db_init_username --password=$db_init_password --port=$db_port --execute="$query"
604
+ }
605
+ #!/usr/bin/env bash
606
+
607
+ kube_init_database_once() {
608
+
609
+ extract_args 8 namespace db_host db_port db_init_username db_init_password db_database db_username db_password $*
610
+
611
+ echo "======================="
612
+ echo " Initializing Database '$db_database' for namespace $namespace"
613
+ echo "======================="
614
+
615
+ echo "Checking if Database '$db_database' exists"
616
+ set +e
617
+ psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -lqtA | cut -d\| -f1 | grep "^$db_database$"
618
+ return_code=$?
619
+ set -e
620
+
621
+ if [ ${return_code} -eq 0 ]; then
622
+ echo "Database $db_database already exists - nothing to do"
623
+ else
624
+ echo "Database $db_database does not exist - initializing"
625
+
626
+ psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'CREATE DATABASE '"$db_database"';'
627
+ echo "DB created $db_database"
628
+
629
+ psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'CREATE USER '"$db_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
630
+ echo "USER created $db_username"
631
+
632
+ psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_username"';'
633
+ echo "Granted all privileges for $db_username on $db_database"
634
+ fi
635
+
636
+ echo "======================="
637
+ echo " Database '$db_database' Initialization complete for namespace $namespace"
638
+ echo "======================="
639
+ }
640
+
641
+ kube_init_database_readonly_account() {
642
+
643
+ extract_args 6 namespace service db_connection db_database db_readonly_username db_readonly_password $*
644
+
645
+ echo "======================="
646
+ echo " Initializing Readonly Account '$db_readonly_username' for '$db_database' for namespace $namespace"
647
+ echo "======================="
648
+
649
+ # Print commands before execution, except echo
650
+ trap '[[ $BASH_COMMAND != echo* ]] && echo $BASH_COMMAND' DEBUG
651
+
652
+ echo "Checking if Readonly account '$db_readonly_username' for '$db_database' exists"
653
+ set +e
654
+ psql_on_k8 $namespace $service $db_connection -qtAc 'SELECT rolname FROM pg_roles;' | grep "^$db_readonly_username$"
655
+ return_code=$?
656
+ set -e
657
+
658
+ if [ ${return_code} -eq 0 ]; then
659
+ echo "Account $db_readonly_username already exists - nothing to do"
660
+ else
661
+ echo "Account $db_readonly_username does not exist - creating"
662
+
663
+ psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_readonly_username"' WITH ENCRYPTED PASSWORD '"'$db_readonly_password'"';'
664
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT CONNECT ON DATABASE '"$db_database"' TO '"$db_readonly_username"';'
665
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT USAGE ON SCHEMA public TO '"$db_readonly_username"';'
666
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO '"$db_readonly_username"';'
667
+ psql_on_k8 $namespace $service $db_connection -c 'ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO '"$db_readonly_username"';'
668
+
669
+ echo "Created user with read-only permissions for $db_readonly_username on $db_database (schema public)"
670
+ fi
671
+ }
672
+
673
+ kube_init_datadog_in_database() {
674
+ extract_args 8 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password db_datadog_schema $*
675
+
676
+ echo "======================="
677
+ echo " Initializing Datadog Agent Requirement for namespace $namespace"
678
+ echo "======================="
679
+
680
+ echo "Checking if User '$db_datadog_username' exists"
681
+ local service="datadog"
682
+ found_db_users=$(mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'SELECT user FROM mysql.user;')
683
+ set +e
684
+ echo "$found_db_users" | grep "^$db_datadog_username$"
685
+ return_code=$?
686
+ set -e
687
+
688
+ if [ ${return_code} -eq 0 ]; then
689
+ echo "User $db_datadog_username already exists - nothing to do"
690
+ else
691
+ echo "User $db_datadog_username does not exist - initializing"
692
+
693
+ # All the query come from this docs : https://docs.datadoghq.com/fr/database_monitoring/setup_mysql/selfhosted/?tab=mysql56
694
+
695
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'CREATE USER '"$db_datadog_username"'@"%" IDENTIFIED BY '"'$db_datadog_password'"';'
696
+ echo "USER created $db_datadog_username"
697
+
698
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT REPLICATION CLIENT ON *.* TO datadog@"%" WITH MAX_USER_CONNECTIONS 5;'
699
+ echo "ALTER USER $db_datadog_username"
700
+
701
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT PROCESS ON *.* TO '"$db_datadog_username"'@"%";'
702
+ echo "Granted PROCESS for $db_datadog_username"
703
+
704
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT SELECT ON performance_schema.* TO '"$db_datadog_username"'@"%";'
705
+ echo "Granted SELECT on performance_schema for $db_datadog_username"
706
+
707
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'CREATE SCHEMA IF NOT EXISTS datadog;'
708
+ echo "CREATE SCHEMA datadog"
709
+
710
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT EXECUTE ON datadog.* to '"$db_datadog_username"'@"%";'
711
+ echo "Granted 'GRANT EXECUTE for $db_datadog_username on datadog"
712
+
713
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT CREATE TEMPORARY TABLES ON datadog.* TO '"$db_datadog_username"'@"%";'
714
+ echo "Granted CREATE TEMPORARY TABLES for $db_datadog_username"
715
+
716
+
717
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.explain_statement;
718
+ DELIMITER $$
719
+ CREATE PROCEDURE datadog.explain_statement(IN query TEXT)
720
+ SQL SECURITY DEFINER
721
+ BEGIN
722
+ SET @explain := CONCAT("EXPLAIN FORMAT=json ", query);
723
+ PREPARE stmt FROM @explain;
724
+ EXECUTE stmt;
725
+ DEALLOCATE PREPARE stmt;
726
+ END $$
727
+ DELIMITER ;'
728
+ echo "CREATE PROCEDURE PROCEDURE datadog.explain_statement"
729
+
730
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS '"$db_datadog_username"'.explain_statement;
731
+ DELIMITER $$
732
+ CREATE PROCEDURE '"$db_datadog_username"'.explain_statement(IN query TEXT)
733
+ SQL SECURITY DEFINER
734
+ BEGIN
735
+ SET @explain := CONCAT("EXPLAIN FORMAT=json ", query);
736
+ PREPARE stmt FROM @explain;
737
+ EXECUTE stmt;
738
+ DEALLOCATE PREPARE stmt;
739
+ END $$
740
+ DELIMITER ;
741
+ GRANT EXECUTE ON PROCEDURE '"$db_datadog_username"'.explain_statement TO datadog@"%";'
742
+ echo "CREATE PROCEDURE on SCHEMA $db_datadog_schema for $db_datadog_username"
743
+
744
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.enable_events_statements_consumers;
745
+ DELIMITER $$
746
+ CREATE PROCEDURE datadog.enable_events_statements_consumers()
747
+ SQL SECURITY DEFINER
748
+ BEGIN
749
+ UPDATE performance_schema.setup_consumers SET enabled="YES" WHERE name LIKE "events_statements_%";
750
+ END $$
751
+ DELIMITER ;
752
+ GRANT EXECUTE ON PROCEDURE datadog.enable_events_statements_consumers TO datadog@"%";'
753
+
754
+ echo "CREATE PROCEDURE on datadog.enable_events_statements_consumers"
755
+ fi
756
+
757
+ echo "======================="
758
+ echo " Database '$db_datadog_schema' Initialization complete for namespace $namespace"
759
+ echo "======================="
760
+ }
761
+
762
+ kube_init_datadog_in_postgres_database() {
763
+ extract_args 7 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password $*
764
+
765
+ local service="datadog"
766
+ local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
767
+
768
+ echo "======================="
769
+ echo " Initializing $service Agent On PostgresSQL Database Requirement for namespace $namespace"
770
+ echo "======================="
771
+
772
+ echo "Checking if User '$db_datadog_username' exists"
773
+
774
+ set +e
775
+ if psql_on_k8 $namespace $service $db_connection -qtAc 'SELECT usename FROM pg_catalog.pg_user;' | grep "^$db_datadog_username$";
776
+ then
777
+ echo "User $db_datadog_username already exists - nothing to do"
778
+ else
779
+ echo "User $db_datadog_username does not exist - initializing"
780
+
781
+ set -e
782
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE USER '"$db_datadog_username"' WITH password '"'$db_datadog_password'"';'
783
+ echo "User created $db_datadog_username"
784
+
785
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE SCHEMA datadog;'
786
+ echo "Schema datadog created"
787
+
788
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT USAGE ON SCHEMA datadog TO datadog;'
789
+ echo "Granted usage for datadog schema to datadog"
790
+
791
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT USAGE ON SCHEMA public TO datadog;'
792
+ echo "Granted usage for public schema to datadog"
793
+
794
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT pg_monitor TO datadog;'
795
+ echo "Granted pg_monitor to datadog"
796
+
797
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE EXTENSION IF NOT EXISTS pg_stat_statements schema public;'
798
+ echo "Extension pg_stat_statements created"
799
+
800
+ local datadog_function_path="/tmp/datatog-explain-statement-function.sql"
801
+ local datadog_function="CREATE OR REPLACE FUNCTION datadog.explain_statement(
802
+ l_query TEXT,
803
+ OUT explain JSON
804
+ )
805
+ RETURNS SETOF JSON AS
806
+ \\$\\$
807
+ DECLARE
808
+ curs REFCURSOR;
809
+ plan JSON;
810
+
811
+ BEGIN
812
+ OPEN curs FOR EXECUTE pg_catalog.concat('EXPLAIN (FORMAT JSON) ', l_query);
813
+ FETCH curs INTO plan;
814
+ CLOSE curs;
815
+ RETURN QUERY SELECT plan;
816
+ END;
817
+ \\$\\$
818
+ LANGUAGE 'plpgsql'
819
+ RETURNS NULL ON NULL INPUT
820
+ SECURITY DEFINER;"
821
+
822
+ kubectl -n $namespace run $service-postgres-init \
823
+ --image jbergknoff/postgresql-client \
824
+ --restart=Never \
825
+ --attach --rm \
826
+ --command \
827
+ -- \
828
+ /bin/sh -c "echo -e \"$datadog_function\" > $datadog_function_path; psql postgresql://$db_connection -qf $datadog_function_path"
829
+
830
+ echo "Function datadog.explain_statement created"
831
+ fi
832
+
833
+ echo "======================="
834
+ echo " Database $service Initialization complete for namespace $namespace"
835
+ echo "======================="
836
+ }
837
+
838
+ kube_init_service_database() {
839
+
840
+ extract_args 9 namespace service db_host db_port db_init_username db_init_password db_database db_username db_password $*
841
+
842
+ local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
843
+
844
+ echo "Checking if Database '$db_database' exists"
845
+ set +e
846
+ psql_on_k8 $namespace $service $db_connection -lqtA | cut -d\| -f1 | grep "^$db_database$"
847
+ return_code=$?
848
+ set -e
849
+
850
+ if [ ${return_code} -eq 0 ]; then
851
+ echo "Database $db_database already exists - nothing to do"
852
+ else
853
+ echo "Database $db_database does not exist - initializing"
854
+
855
+ psql_on_k8 $namespace $service $db_connection -c 'CREATE DATABASE '"$db_database"';'
856
+ echo "DB created $db_database"
857
+
858
+ psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
859
+ echo "USER created $db_username"
860
+
861
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_username"';'
862
+ echo "Granted all privileges for $db_username on $db_database"
863
+ fi
864
+
865
+ echo "======================="
866
+ echo " Database '$db_database' Initialization complete for namespace $namespace"
867
+ echo "======================="
868
+ }
869
+
870
+ #!/usr/bin/env bash
871
+
872
+ # Allow to use JMX connection to retrieve data and metrics from the pods within kubernetes
873
+ # You will need visualVM to use this tool https://visualvm.github.io/
874
+ # ex: bind_jmx testing notification
875
+ bind_jmx() {
876
+
877
+ local ENV=$1
878
+ local SERVICE_NAME=$2
879
+ local PORT=2242
880
+
881
+ start_ssh_bastion $ENV $PORT
882
+
883
+ echo "root" | ssh -f -N -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -D 7777 root@127.0.0.1 -p 2242
884
+ local PODS=$(kubectl -n $ENV get pods -o wide | grep $SERVICE_NAME | grep -Eo '^[^ ]+')
885
+
886
+ echo "Choose one of the following pod to get metrics from..."
887
+ local POD_NAME=$(gum choose $PODS)
888
+ local POD_IP=$(
889
+ kubectl -n $ENV get pods -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIP}{"\n"}{end}' |
890
+ grep $POD_NAME |
891
+ cut -d' ' -f2 |
892
+ head -1
893
+ )
894
+
895
+ jconsole -J-DsocksProxyHost=localhost \
896
+ -J-DsocksProxyPort=7777 \
897
+ service:jmx:rmi:///jndi/rmi://$POD_IP:7199/jmxrmi \
898
+ -J-DsocksNonProxyHosts= &
899
+
900
+ cat << EOF
901
+ Now start VisualVM
902
+ Preferences > Network > Manual Proxy Settings
903
+ SOCKS Proxy Line: Set 'localhost' and Port '7777'
904
+ File > Add JMX Connection
905
+ Set $POD_IP:7199, check 'do not require an SSL connection'
906
+ Remember to kill you bastion afterward using 'stop_ssh_bastion'
907
+ EOF
908
+ }
909
+ #!/usr/bin/env bash
910
+
911
+ k8_nodes_stats() {
912
+ kubectl get nodes -o name |
913
+ xargs kubectl describe |
914
+ grep "^Name\|workType\|cpu \|memory " |
915
+ sed -r 's/[ :=]+/\t/g' |
916
+ sed 's/\tworkType\t//g' |
917
+ sed -r 's/^Name/---\nName/g' |
918
+ grep --color "Name\|web\|workers\|cpu\|memory\|---"
919
+ }
920
+
921
+ #!/usr/bin/env bash
922
+
923
+ # Port forward on the first matching pod
924
+ # Ex :
925
+ # pod_forward testing notification-http
926
+ # pod_forward testing colisweb-api-web 3333 3000
927
+ pod_forward() {
928
+ ENV=$1
929
+ POD_FILTER=$2
930
+ LOCAL_PORT=${3:-8080}
931
+ POD_PORT=${4:-8080}
932
+
933
+ if PID=$(lsof -ti tcp:$LOCAL_PORT); then
934
+ echo "killing process $PID which uses port $LOCAL_PORT"
935
+ kill $PID
936
+ fi
937
+
938
+ configure_kubectl_for $ENV
939
+
940
+ POD=`pick_pod $ENV $POD_FILTER`
941
+
942
+ echo "setting up forwarding to $POD"
943
+ kubectl -n $ENV port-forward $POD $LOCAL_PORT:$POD_PORT &
944
+ PID=$!
945
+
946
+ while ! echo exit | nc localhost $LOCAL_PORT > /dev/null; do
947
+ sleep 1
948
+ echo "waiting for port $LOCAL_PORT to be open locally"
949
+ done
950
+ echo "port $LOCAL_PORT is now available on localhost, forwarding to $ENV $POD:$POD_PORT"
951
+ echo 'you can terminate it with "kill '$PID'" or "kill $(lsof -ti tcp:'$LOCAL_PORT')"'
952
+ }
953
+
954
+ # prompts to pick a pod and run a command like bash inside
955
+ # pod_exec testing
956
+ # pod_exec testing bash
957
+ # pod_exec testing bash colisweb-api
958
+ pod_exec() {
959
+ ENV=$1
960
+ COMMAND=${2:-bash}
961
+ configure_kubectl_for $ENV
962
+ POD_FILTER=$3
963
+ POD=`pick_pod $ENV $POD_FILTER`
964
+ echo "running $COMMAND inside $POD"
965
+ kubectl -n $ENV exec -ti $POD -- $COMMAND
966
+ }
967
+
968
+ # prompts to pick a pod and copy from a local file to the pod
969
+ # pod_copy_to testing localfile remotefile
970
+ # pod_copy_to testing localfile remotefile colisweb-api
971
+ pod_copy_to() {
972
+ ENV=$1
973
+ LOCAL_FILE=$2
974
+ REMOTE_FILE=$3
975
+ configure_kubectl_for $ENV
976
+ POD_FILTER=$4
977
+ POD=`pick_pod $ENV $POD_FILTER`
978
+ kubectl cp $LOCAL_FILE $ENV/$POD:$REMOTE_FILE
979
+ }
980
+
981
+
982
+ pick_pod() {
983
+ ENV=$1
984
+ POD_FILTER="pod/$2"
985
+ configure_kubectl_for $ENV
986
+
987
+ if [ -z "$2" ] ; then
988
+ kubectl -n $ENV get pods | gum filter | cut -f1 -d" "
989
+ else
990
+ if PODS=$(kubectl -n $ENV get pods -o=name | grep "$POD_FILTER"); then
991
+ echo $PODS | head -1 | sed -e 's/pod\///'
992
+ else
993
+ echo "no pods found on $ENV matching $POD_FILTER" >&2
994
+ fi
995
+ fi
996
+ }
997
+
998
+ #!/usr/bin/env bash
999
+
1000
+ bastion_config_for_redis_ca() {
1001
+ ssh_config xufte6.0001.euw1.cache.amazonaws.com redis 2223 63789 tests testing recette-001 sandbox prod > $1
1002
+ }
1003
+
1004
+ bastion_config_for_redis_toutatis() {
1005
+ ssh_config xufte6.0001.euw1.cache.amazonaws.com toutatis 2223 63789 tests testing recette staging production > $1
1006
+ }
1007
+
1008
+ ssh_config() {
1009
+ host=$1
1010
+ host_prefix=$2
1011
+ port0=$3
1012
+ forward0=$4
1013
+ shift 4
1014
+ instance_names=("$@") # /!\ indices start at 1 with zsh
1015
+ ssh_header
1016
+
1017
+ environments=(tests testing recette staging production)
1018
+
1019
+ length=${#environments[@]}
1020
+ for (( i=1; i<=${length}; i++ ));
1021
+ do
1022
+ bastion_block bastion_${environments[$i]} $(($port0 + $i)) $(($forward0 + $i)) ${host_prefix}-${instance_names[$i]}.$host
1023
+ done
1024
+ }
1025
+
1026
+ ssh_header() {
1027
+ cat <<EOF
1028
+ UserKnownHostsFile /dev/null
1029
+ StrictHostKeyChecking no
1030
+ User root
1031
+ EOF
1032
+ }
1033
+
1034
+ bastion_block() {
1035
+ cat <<EOF
1036
+ Host $1
1037
+ HostName 127.0.0.1
1038
+ Port $2
1039
+ LocalForward $3 $4:6379
1040
+ EOF
1041
+ }
1042
+
1043
+ redis_k8s() {
1044
+ MODE=$1
1045
+ REDIS_INSTANCE=${2:-ca}
1046
+ case $MODE in
1047
+ "tests") SSH_LOCAL_PORT=2224;REDIS_LOCAL_PORT=63790;ENV="tests";;
1048
+ "testing") SSH_LOCAL_PORT=2225;REDIS_LOCAL_PORT=63791;ENV="testing";;
1049
+ "recette") SSH_LOCAL_PORT=2226;REDIS_LOCAL_PORT=63792;ENV="recette";;
1050
+ "staging") SSH_LOCAL_PORT=2227;REDIS_LOCAL_PORT=63793;ENV="staging";;
1051
+ "production") SSH_LOCAL_PORT=2228;REDIS_LOCAL_PORT=63794;ENV="production";;
1052
+ *) echo "Unsupported ENV : $MODE"; return 1 ;;
1053
+ esac
1054
+
1055
+ start_ssh_bastion $ENV $SSH_LOCAL_PORT
1056
+
1057
+ lsof -ti tcp:$REDIS_LOCAL_PORT | xargs kill
1058
+
1059
+ bastion_config=$(mktemp)
1060
+ case $REDIS_INSTANCE in
1061
+ "ca") bastion_config_for_redis_ca "$bastion_config";;
1062
+ "toutatis") bastion_config_for_redis_toutatis "$bastion_config";;
1063
+ *) echo "Unsupported redis instance (ca or toutatis available) : $REDIS_INSTANCE"; return 1;;
1064
+ esac
1065
+
1066
+ ssh -f -N \
1067
+ -F "$bastion_config" \
1068
+ "bastion_$ENV"
1069
+
1070
+ echo "sample command : 'redis-cli -p $REDIS_LOCAL_PORT'"
1071
+ echo "run 'kubectl delete pod $POD_NAME' when you have finished"
1072
+
1073
+ redis-cli -p $REDIS_LOCAL_PORT
1074
+ }
1075
+
1076
+ #!/usr/bin/env bash
1077
+
1078
+ #Create a k8s cron jobs that will be run regularly
1079
+ #See run_cron_job_k8s -h for more details
1080
+
1081
+ run_cron_job_k8s() {
1082
+
1083
+ #default values
1084
+ local namespace="testing"
1085
+ local name="$USERNAME"
1086
+ local SCHEDULE="00 05 * * *"
1087
+ local secret=""
1088
+ local amm_folder=""
1089
+ local amm_script=""
1090
+
1091
+ while getopts ":e:c:p:f:s:t:h" opt; do
1092
+ case $opt in
1093
+ e)
1094
+ namespace="$OPTARG" >&2
1095
+ ;;
1096
+ t)
1097
+ SCHEDULE="$OPTARG" >&2
1098
+ ;;
1099
+ p)
1100
+ name="$OPTARG" >&2
1101
+ ;;
1102
+ c)
1103
+ secret="$OPTARG" >&2
1104
+ ;;
1105
+ f)
1106
+ amm_folder="$OPTARG" >&2
1107
+ ;;
1108
+ s)
1109
+ amm_script="$OPTARG" >&2
1110
+ ;;
1111
+ h)
1112
+ show_help_cron_job
1113
+ return 0
1114
+ ;;
1115
+ :)
1116
+ echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
1117
+ return 0
1118
+ ;;
1119
+ \?)
1120
+ echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
1121
+ return 0
1122
+ ;;
1123
+ esac
1124
+ done
1125
+
1126
+ if [ -z "$amm_script" ]; then
1127
+ echo 'Missing -s. Run run_cron_job_k8s -h for help' >&2
1128
+ return 0
1129
+ fi
1130
+
1131
+ shift "$((OPTIND-1))"
1132
+
1133
+ local script_args=$(
1134
+ if [ "$#" -gt 0 ] ; then
1135
+ printf '"'
1136
+ join_by '", "' $*
1137
+ printf '"'
1138
+ fi
1139
+ )
1140
+
1141
+ local IMAGE="lolhens/ammonite:2.5.4"
1142
+ local CRONJOB_NAME="cronjob-ammonite-$name"
1143
+
1144
+
1145
+ configure_kubectl_for $namespace
1146
+
1147
+ if [[ ! -r "$amm_script" ]]; then
1148
+ echo "ammonite script not found $amm_script"
1149
+ return 2
1150
+ else
1151
+ local CONFIG_MAP="config-$CRONJOB_NAME"
1152
+ local SECRET_MAP="secret-$CRONJOB_NAME"
1153
+ local CONFIG_MAP_DIR="$(mktemp -d)"
1154
+
1155
+ if [[ ! -z $amm_folder && -d $amm_folder ]] ; then
1156
+ cp -r "$amm_folder/" "$CONFIG_MAP_DIR"
1157
+ fi
1158
+ cp "$amm_script" "$CONFIG_MAP_DIR/script.sc"
1159
+
1160
+ kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1161
+ kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1162
+
1163
+ kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
1164
+ kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
1165
+
1166
+ kubectl -n $namespace get cronjob $CRONJOB_NAME && kubectl -n $namespace delete cronjob $CRONJOB_NAME
1167
+
1168
+ echo "starting $CRONJOB_NAME with $IMAGE"
1169
+
1170
+ JOB_DEFINITION='
1171
+ apiVersion: batch/v1
1172
+ kind: CronJob
1173
+ metadata:
1174
+ name: '$CRONJOB_NAME'
1175
+ namespace: '$namespace'
1176
+ spec:
1177
+ schedule: "'$SCHEDULE'"
1178
+ concurrencyPolicy: Forbid
1179
+ jobTemplate:
1180
+ spec:
1181
+ backoffLimit: 0
1182
+ template:
1183
+ spec:
1184
+ nodeSelector:
1185
+ workType: "workers"
1186
+ restartPolicy: Never
1187
+ volumes:
1188
+ - name: config
1189
+ configMap:
1190
+ name: '$CONFIG_MAP'
1191
+ - name: secret
1192
+ secret:
1193
+ secretName: '$SECRET_MAP'
1194
+ containers:
1195
+ - name: '$CRONJOB_NAME'
1196
+ command: ["amm", "/code/script.sc"]
1197
+ image: '$IMAGE'
1198
+ imagePullPolicy: IfNotPresent
1199
+ args: ['$script_args']
1200
+ env:
1201
+ - name: POD_NAME
1202
+ valueFrom:
1203
+ fieldRef:
1204
+ apiVersion: v1
1205
+ fieldPath: metadata.name
1206
+ - name: POD_NAMESPACE
1207
+ valueFrom:
1208
+ fieldRef:
1209
+ apiVersion: v1
1210
+ fieldPath: metadata.namespace
1211
+ - name: HOST_IP
1212
+ valueFrom:
1213
+ fieldRef:
1214
+ apiVersion: v1
1215
+ fieldPath: status.hostIP
1216
+ volumeMounts:
1217
+ - name: config
1218
+ mountPath: /code
1219
+ - name: secret
1220
+ mountPath: /conf
1221
+ readOnly: true
1222
+ resources:
1223
+ requests:
1224
+ cpu: 500m
1225
+ memory: 256Mi
1226
+ limits:
1227
+ cpu: 4000m
1228
+ memory: 512Mi
1229
+ envFrom:
1230
+ - configMapRef:
1231
+ name: '$CONFIG_MAP'
1232
+ - secretRef:
1233
+ name: '$SECRET_MAP'
1234
+ '
1235
+
1236
+ echo $JOB_DEFINITION > /tmp/job.yaml
1237
+
1238
+ kubectl -n $namespace apply -f /tmp/job.yaml
1239
+
1240
+ fi
1241
+ }
1242
+
1243
+ # Usage info
1244
+ show_help_cron_job() {
1245
+ #p:f:s
1246
+ local help="""Usage: run_cron_job_k8s -s SCRIPT [-t TIME] [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
1247
+ Create a k8s cron job that will be run a script regularly
1248
+
1249
+ -h display this help and exit
1250
+ -s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
1251
+ -t TIME opt. time when the job will be launched. TIME should be in CRON syntax (default to 00 05 * * *, ie 5AM UTC)
1252
+ -e ENV opt. set execution environment (default to testing)
1253
+ -c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
1254
+ -p POD opt. name of the pod to create (default to $USERNAME)
1255
+ -f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
1256
+ ARGS opt. additional arguments for SCRIPT
1257
+ """
1258
+ echo "$help"
1259
+ }
1260
+
1261
+ #!/usr/bin/env bash
1262
+
1263
+ # Usage info
1264
+ show_help_job() {
1265
+ local help="""Usage: run_job_k8s -s SCRIPT [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
1266
+ Create a k8s job executing a script
1267
+
1268
+ -h display this help and exit
1269
+ -s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
1270
+ -e ENV opt. set execution environment (default to testing)
1271
+ -c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
1272
+ -p POD opt. name of the pod to create (default to $USERNAME)
1273
+ -f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
1274
+ ARGS opt. additional arguments for SCRIPT
1275
+
1276
+ The organisation of the files must be the same locally as on the pod :
1277
+ - /code containing the script to execute (arg -s) and the other needed files (if the arg -f is used, it must reference this directory)
1278
+ - /conf containing the secret file (arg -c if used)
1279
+ E.g. in the script \"/code/script.sc\", to use a secret file \"/conf/secret.sc\", the import should look like \"import \$file.^.conf.secret.sc\"
1280
+ """
1281
+ echo "$help"
1282
+ }
1283
+
1284
+ run_job_k8s() {
1285
+
1286
+ #default values
1287
+ local namespace="testing"
1288
+ local name="$USERNAME"
1289
+ local secret=""
1290
+ local amm_folder=""
1291
+ local amm_script=""
1292
+
1293
+ while getopts ":e:c:p:f:s:h" opt; do
1294
+ case $opt in
1295
+ e)
1296
+ namespace="$OPTARG" >&2
1297
+ ;;
1298
+ p)
1299
+ name="$OPTARG" >&2
1300
+ ;;
1301
+ c)
1302
+ secret="$OPTARG" >&2
1303
+ ;;
1304
+ f)
1305
+ amm_folder="$OPTARG" >&2
1306
+ ;;
1307
+ s)
1308
+ amm_script="$OPTARG" >&2
1309
+ ;;
1310
+ h)
1311
+ show_help_job
1312
+ return 0
1313
+ ;;
1314
+ :)
1315
+ echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
1316
+ return 0
1317
+ ;;
1318
+ \?)
1319
+ echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
1320
+ return 0
1321
+ ;;
1322
+ esac
1323
+ done
1324
+
1325
+ if [ -z "$amm_script" ]; then
1326
+ echo 'Missing -s. Run run_job_k8s -h for help' >&2
1327
+ return 0
1328
+ fi
1329
+
1330
+ shift "$((OPTIND-1))"
1331
+
1332
+ local script_args=$(
1333
+ if [ "$#" -gt 0 ] ; then
1334
+ printf '"'
1335
+ join_by '", "' $*
1336
+ printf '"'
1337
+ fi
1338
+ )
1339
+
1340
+ local IMAGE="lolhens/ammonite:2.5.4"
1341
+ local JOB_NAME="job-ammonite-$name"
1342
+
1343
+ if [[ ! -r "$amm_script" ]]; then
1344
+ echo "ammonite script not found $amm_script"
1345
+ return 2
1346
+ else
1347
+ local CONFIG_MAP="config-$JOB_NAME"
1348
+ local CONFIG_MAP_DIR="$(mktemp -d)"
1349
+ local SECRET_MAP="secret-$JOB_NAME"
1350
+
1351
+ configure_kubectl_for $namespace
1352
+
1353
+ if [[ ! -z $amm_folder && -d $amm_folder ]] ; then
1354
+ cp -r "$amm_folder/" "$CONFIG_MAP_DIR"
1355
+ fi
1356
+ cp "$amm_script" "$CONFIG_MAP_DIR/script.sc"
1357
+
1358
+ kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1359
+ kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1360
+
1361
+ kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
1362
+ kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
1363
+
1364
+ kubectl -n $namespace get job $JOB_NAME && kubectl -n $namespace delete job $JOB_NAME
1365
+
1366
+ echo "starting $JOB_NAME with $IMAGE"
1367
+ fi
1368
+
1369
+ JOB_DEFINITION='
1370
+ apiVersion: batch/v1
1371
+ kind: Job
1372
+ metadata:
1373
+ name: '$JOB_NAME'
1374
+ namespace: '$namespace'
1375
+ spec:
1376
+ template:
1377
+ spec:
1378
+ containers:
1379
+ - name: '$JOB_NAME'
1380
+ command: ["amm", "/code/script.sc"]
1381
+ image: '$IMAGE'
1382
+ args: ['$script_args']
1383
+ env:
1384
+ - name: POD_NAME
1385
+ valueFrom:
1386
+ fieldRef:
1387
+ apiVersion: v1
1388
+ fieldPath: metadata.name
1389
+ - name: POD_NAMESPACE
1390
+ valueFrom:
1391
+ fieldRef:
1392
+ apiVersion: v1
1393
+ fieldPath: metadata.namespace
1394
+ - name: HOST_IP
1395
+ valueFrom:
1396
+ fieldRef:
1397
+ apiVersion: v1
1398
+ fieldPath: status.hostIP
1399
+ volumeMounts:
1400
+ - name: config
1401
+ mountPath: /code
1402
+ - name: secret
1403
+ mountPath: /conf
1404
+ readOnly: true
1405
+ resources:
1406
+ requests:
1407
+ cpu: 500m
1408
+ memory: 256Mi
1409
+ limits:
1410
+ cpu: 4000m
1411
+ memory: 1Gi
1412
+ nodeSelector:
1413
+ workType: workers
1414
+ restartPolicy: Never
1415
+ volumes:
1416
+ - name: config
1417
+ configMap:
1418
+ name: '$CONFIG_MAP'
1419
+ - name: secret
1420
+ secret:
1421
+ secretName: '$SECRET_MAP'
1422
+ '
1423
+
1424
+
1425
+ echo $JOB_DEFINITION > /tmp/job.yaml
1426
+
1427
+ kubectl -n $namespace apply -f /tmp/job.yaml
1428
+
1429
+ }
1430
+
1431
+
1432
+ #!/usr/bin/env bash
1433
+
1434
+ run_task() {
1435
+ set -e
1436
+
1437
+ check_args "--namespace" $1
1438
+ shift
1439
+ NAMESPACE=$1
1440
+ shift
1441
+ check_args "--image" $1
1442
+ shift
1443
+ IMAGE=$1
1444
+ shift
1445
+ check_args "--name" $1
1446
+ shift
1447
+ NAME=$1
1448
+ shift
1449
+
1450
+ set -x
1451
+
1452
+ kubectl -n ${NAMESPACE} run ${NAME} \
1453
+ --image ${IMAGE} \
1454
+ --restart=Never \
1455
+ --attach --rm \
1456
+ $*
1457
+ }
1458
+ geocode_address() {
1459
+ ADDRESS=$(sed -e 's: :%20:g' <(echo "$*"))
1460
+ URL="https://maps.googleapis.com/maps/api/geocode/json?address=${ADDRESS}&key=${GOOGLE_API_KEY}"
1461
+ curl $URL
1462
+ }
1463
+
1464
+ search_business() {
1465
+ SIREN=$1
1466
+ shift
1467
+ QUERY=$(sed -e 's: :+:g' <(echo "$*"))
1468
+ URL="https://data.opendatasoft.com/api/records/1.0/search/?dataset=sirene_v3%40public&q=${QUERY}&sort=datederniertraitementetablissement&facet=trancheeffectifsetablissement&facet=libellecommuneetablissement&facet=departementetablissementi&refine.siren=${SIREN}"
1469
+ curl $URL
1470
+ }
1471
+
1472
+ #!/bin/bash
1473
+
1474
+ # source tolls.sh ; tolls antoine.thomas@colisweb.com
1475
+ function tolls() {
1476
+ USER=${1:-first.last@colisweb.com}
1477
+ FROM_DATE=${2:-"2023-02-01"}
1478
+ TO_DATE=${3:-"2023-02-28"}
1479
+
1480
+ USER=$(gum input --prompt "username : " --value $USER)
1481
+ TOKEN=$(./tour_details.sc login --user $USER --password $(gum input --password --placeholder password))
1482
+ [ "$TOKEN" != "" ] && echo "connected" || return 1
1483
+
1484
+ FROM_DATE=$(gum input --prompt "Date start : " --value $FROM_DATE)
1485
+ TO_DATE=$(gum input --prompt "Date end : " --value $TO_DATE)
1486
+ FILENAME="tours-${FROM_DATE}-TO-${TO_DATE}.json"
1487
+ curl --cookie "session=$TOKEN" "https://api.production.colisweb.com/api/v6/routes-plans/external?from=${FROM_DATE}&to=${TO_DATE}" > ~/Downloads/$FILENAME
1488
+ echo "Tournées téléchargées"
1489
+
1490
+ projectIds=$(./tour_details.sc allProjects --file ~/Downloads/$FILENAME | gum choose --no-limit | cut -d "," -f 2)
1491
+ echo "projets sélectionnés : $projectIds"
1492
+ tourIds=$(./tour_details.sc allTours --file ~/Downloads/$FILENAME --projectIds "$projectIds")
1493
+ echo "tournées sélectionnées : $tourIds"
1494
+
1495
+ TARGET="${FROM_DATE}-TO-${TO_DATE}.csv"
1496
+ echo "appels à HERE, écriture dans $TARGET"
1497
+ ./tour_details.sc allToursDetails --token $TOKEN --hereApiKey $HERE_API_KEY --routeIds "$tourIds" > "$TARGET"
1498
+
1499
+ echo "terminé"
1500
+ }
1501
+
1502
+ #!/usr/bin/env bash
1503
+
1504
+ # possible syntax:
1505
+ # login
1506
+ # login testing
1507
+ # login testing userid
1508
+ login() {
1509
+ ENV=${1:-`gum choose testing staging production recette`} && \
1510
+ USER=${2:-`gum input --placeholder username`} && \
1511
+ PASSWORD=`gum input --password --placeholder password` && \
1512
+ TOKEN=`$SCRIPT_FULL_PATH/scala/auth.sc login --env $ENV --user $USER --password $PASSWORD` && \
1513
+ export TOKEN_$ENV=$TOKEN && \
1514
+ echo "login success for $USER on $ENV" >&2
1515
+ }
1516
+
1517
+ # you need to call login first (see above)
1518
+ # possible syntax:
1519
+ # recompute_tour
1520
+ # recompute_tour testing
1521
+ # recompute_tour testing draft
1522
+ # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09
1523
+ # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09 TODAY
1524
+ # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09 FRIDAY
1525
+ recompute_tour() {
1526
+ ENV=${1:-`gum choose testing staging production recette`}
1527
+ MODE=${2:-`gum choose draft definitive`}
1528
+ PROJECT_ID=${3:-`pick_project $ENV`}
1529
+ DAY=${4:-`gum choose TODAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY SUNDAY`}
1530
+ jwt_token $ENV
1531
+ scala/tour_config.sc $MODE -t $TOKEN -p $PROJECT_ID -d $DAY
1532
+ }
1533
+
1534
+ pick_project() {
1535
+ ENV=${1:-`gum choose testing staging production recette`}
1536
+ jwt_token $ENV
1537
+ scala/tour_config.sc list -t $TOKEN -e $ENV | gum filter | cut -f1
1538
+ }
1539
+
1540
+ jwt_token() {
1541
+ ENV=${1:-`gum choose testing staging production recette`}
1542
+ eval 'TOKEN=$TOKEN_'$ENV
1543
+ if ! $SCRIPT_FULL_PATH/scala/auth.sc check -t $TOKEN -e $ENV ; then
1544
+ login $ENV
1545
+ fi
1546
+ }
1547
+
1548
+ #!/usr/bin/env bash
1549
+
1550
+ ftp_ikea_k8s() {
1551
+ SSH_LOCAL_PORT=2230
1552
+ FTP_LOCAL_PORT=25500
1553
+ start_ssh_bastion testing $SSH_LOCAL_PORT
1554
+
1555
+ lsof -ti tcp:$FTP_LOCAL_PORT | xargs kill
1556
+
1557
+ bastion_config=$(mktemp)
1558
+ cat > "$bastion_config" <<EOF
1559
+ UserKnownHostsFile /dev/null
1560
+ StrictHostKeyChecking no
1561
+ User root
1562
+ Host bastion_ftp
1563
+ HostName 127.0.0.1
1564
+ Port 2230
1565
+ LocalForward 25500 ft.centiro.ikea.com:22
1566
+ EOF
1567
+
1568
+ ssh -f -N \
1569
+ -F "$bastion_config" \
1570
+ "bastion_ftp"
1571
+
1572
+ sftp -P $FTP_LOCAL_PORT colisweb.fr@127.0.0.1
1573
+ }
1574
+
1575
+ #!/usr/bin/env bash
1576
+
1577
+ # usage:
1578
+ # jconsole_k8s testing colisweb-api-web
1579
+
1580
+ jconsole_k8s() {
1581
+ ENV=$1
1582
+ NAME=$2
1583
+
1584
+ start_ssh_bastion $ENV 2242
1585
+ POD_IP=$( \
1586
+ kubectl -n $ENV get pods -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIP}{"\n"}{end}' \
1587
+ | grep "$NAME" | cut -d' ' -f2 | head -1 \
1588
+ )
1589
+ echo "selected POD with ip $POD_IP"
1590
+ echo "use 'root' as password"
1591
+ ssh -f -N -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -D 7777 root@127.0.0.1 -p 2242
1592
+
1593
+ jconsole \
1594
+ -J-DsocksProxyHost=localhost \
1595
+ -J-DsocksProxyPort=7777 \
1596
+ -J-DsocksNonProxyHosts= \
1597
+ service:jmx:rmi:///jndi/rmi://$POD_IP:7199/jmxrmi \
1598
+ &
1599
+
1600
+ echo "remember to stop with 'stop_ssh_bastion'"
1601
+
1602
+ }
1603
+
1604
+ #!/usr/bin/env bash
1605
+
1606
+ # Interactive console on an new pod. See also run_ruby_k8s
1607
+ # Ex :
1608
+ # railsc_k8s production
1609
+ # railsc_k8s production "User.where(email:'toni@colisweb.com')"
1610
+ railsc_k8s() {
1611
+ ENV=$1
1612
+ COMMAND=$2
1613
+ [[ $ENV = "production" || $ENV = "staging" ]] && default_tag="master-latest" || default_tag="${ENV}-latest"
1614
+ local image_tag=${5:-$default_tag}
1615
+ local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
1616
+ local POD_NAME="colisweb-api-rails-console-$image_tag-$USERNAME"
1617
+
1618
+ kubectl -n $ENV get pod $POD_NAME && kubectl -n $ENV delete pod $POD_NAME
1619
+
1620
+ configure_kubectl_for $ENV
1621
+ echo "starting with $IMAGE"
1622
+
1623
+ kubectl -n $ENV run $POD_NAME \
1624
+ --image $IMAGE \
1625
+ --restart=Never \
1626
+ --overrides='{
1627
+ "spec":{
1628
+ "nodeSelector":{
1629
+ "workType": "workers"
1630
+ },
1631
+ "containers":[
1632
+ {
1633
+ "name":"'$POD_NAME'",
1634
+ "image":"'$IMAGE'",
1635
+ "imagePullPolicy":"Always",
1636
+ "command":[
1637
+ "sleep",
1638
+ "infinity"
1639
+ ],
1640
+ "resources":{
1641
+ "limits":{
1642
+ "memory": "2048Mi"
1643
+ }
1644
+ },
1645
+ "envFrom": [ {
1646
+ "configMapRef": {
1647
+ "name": "colisweb-api"
1648
+ }
1649
+ }, {
1650
+ "secretRef": {
1651
+ "name": "colisweb-api"
1652
+ }
1653
+ }
1654
+ ]
1655
+ }
1656
+ ]
1657
+ }
1658
+ }
1659
+ '
1660
+
1661
+ sleep 5
1662
+ KUBERAILS="kubectl -n $ENV exec -ti $POD_NAME -- /usr/src/app/bin/rails c"
1663
+ [ -z "$COMMAND" ] && eval $KUBERAILS || echo $COMMAND | eval $KUBERAILS
1664
+
1665
+ print "End of $POD_NAME "
1666
+ kubectl -n $ENV delete pods $POD_NAME
1667
+ }
1668
+
1669
+ # Ex :
1670
+ # create_user testing claire.lien@colisweb.com super_admin clairemdp
1671
+ create_user() {
1672
+ ENV=$1
1673
+ EMAIL=$2
1674
+ ROLE=$3
1675
+ PASSWORD=$4
1676
+ railsc_k8s $ENV "User.where(email:'$EMAIL', role:'$ROLE').first_or_create.update_attributes!(password: '$PASSWORD')"
1677
+ }
1678
+
1679
+ # Ex :
1680
+ # delete_user testing claire.lien@colisweb.com
1681
+ delete_user() {
1682
+ ENV=$1
1683
+ EMAIL=$2
1684
+ railsc_k8s $ENV "User.find_by(email:'$EMAIL').destroy"
1685
+ }
1686
+
1687
+ # NON Interactive console on an new pod, for long-running tasks (a few minutes)
1688
+ # See also railsc_k8s
1689
+ # file.txt will be available from /conf/data.txt in the ruby code
1690
+ # examples :
1691
+ # run_ruby_k8s testing demo <(echo "pp JSON.parse(File.read('/conf/data.txt'))") <(echo '{ "content": 123 }')
1692
+ # run_ruby_k8s testing demo ~/.oh-my-zsh/custom/dev-tools/shell-session/ruby/demo.rb <(echo '{ "content": 123 }')
1693
+ run_ruby_k8s() {
1694
+ if [ $# -lt 4 ]; then
1695
+ echo "usage : run_ruby_k8s production name-for-pod script.rb file.txt"
1696
+ return 1
1697
+ fi
1698
+ local namespace=$1
1699
+ local name=$2
1700
+ local ruby_script=$3
1701
+ local input_data=$4
1702
+ [[ $namespace = "production" || $namespace = "staging" ]] && default_tag="master-latest" || default_tag="${namespace}-latest"
1703
+ local image_tag=${5:-$default_tag}
1704
+
1705
+ if [ ! -r "$ruby_script" ]; then
1706
+ echo "ruby script not found $ruby_script"
1707
+ return 2
1708
+ fi
1709
+
1710
+ if [ ! -r "$input_data" ]; then
1711
+ echo "data not found $input_data"
1712
+ return 3
1713
+ fi
1714
+
1715
+
1716
+ local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
1717
+ local POD_NAME="colisweb-api-script-$name"
1718
+ local CONFIG_MAP="config-$POD_NAME"
1719
+ local CONFIG_MAP_DIR="$(mktemp -d)"
1720
+
1721
+
1722
+ configure_kubectl_for $namespace
1723
+
1724
+
1725
+ cp "$ruby_script" "$CONFIG_MAP_DIR/script.rb"
1726
+ cp "$input_data" "$CONFIG_MAP_DIR/data.txt"
1727
+
1728
+ kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1729
+ kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1730
+
1731
+ kubectl -n $namespace get pod $POD_NAME && kubectl -n $namespace delete pod $POD_NAME
1732
+
1733
+ echo "starting with $IMAGE"
1734
+ kubectl -n $namespace run $POD_NAME \
1735
+ --image $IMAGE \
1736
+ -ti \
1737
+ --restart=Never \
1738
+ --attach \
1739
+ --rm \
1740
+ --overrides='{
1741
+ "spec":{
1742
+ "nodeSelector":{
1743
+ "workType": "workers"
1744
+ },
1745
+ "containers":[
1746
+ {
1747
+ "name":"'$POD_NAME'",
1748
+ "image":"'$IMAGE'",
1749
+ "imagePullPolicy":"Always",
1750
+ "command":[
1751
+ "/usr/src/app/bin/rails",
1752
+ "r",
1753
+ "/conf/script.rb"
1754
+ ],
1755
+ "resources":{
1756
+ "limits":{
1757
+ "memory": "4096Mi"
1758
+ }
1759
+ },
1760
+ "volumeMounts":[
1761
+ {
1762
+ "name":"conf",
1763
+ "mountPath":"/conf"
1764
+ }
1765
+ ],
1766
+ "envFrom": [ {
1767
+ "configMapRef": {
1768
+ "name": "colisweb-api"
1769
+ }
1770
+ }, {
1771
+ "secretRef": {
1772
+ "name": "colisweb-api"
1773
+ }
1774
+ }
1775
+ ]
1776
+ }
1777
+ ],
1778
+ "volumes":[
1779
+ {
1780
+ "name":"conf",
1781
+ "configMap":{ "name":"'$CONFIG_MAP'" }
1782
+ }
1783
+ ]
1784
+ }
1785
+ }
1786
+ '
1787
+
1788
+ kubectl -n $namespace delete configmap $CONFIG_MAP
1789
+ }
1790
+
1791
+ # example:
1792
+ # update_pickup_cp testing <( echo '{"wrong_cp": "59123", "corrected_cp": "59223", "delivery_ids": ["4192421", "4192425"]}' )
1793
+ update_pickup_cp() {
1794
+ run_ruby_k8s $1 update-pickup-cp "$SCRIPT_FULL_PATH/ruby/update_pickup_cp.rb" $2
1795
+ }
1796
+
1797
+
1798
+
1799
+ update_all_prices() {
1800
+ local namespace=$1
1801
+ local json_prices=$2
1802
+
1803
+ local json_size=$(wc -c < "$json_prices")
1804
+
1805
+ if ((json_size > 940000)); then
1806
+ command -v jq || (echo "jq not found (use brew install jq)" && return 1)
1807
+ local max_lines=3000
1808
+ local total_lines=$(jq '. | length' $json_prices)
1809
+ local iterations=$((total_lines / max_lines + 1))
1810
+ echo "$json_prices is too big, I'll split it for you in blocks of $max_lines lines. It will take $iterations runs"
1811
+ for (( i = 0 ; i < iterations ; i++ )) ; do
1812
+ local start=$((i * max_lines))
1813
+ local end=$(( (i + 1) * max_lines))
1814
+ local split_file=$(mktemp)
1815
+ jq -c ".[$start:$end]" $json_prices > $split_file
1816
+ local split_lines=$(jq '. | length' $split_file)
1817
+ echo "starting iteration $i from $start to $end with $split_file command -v has $split_lines lines"
1818
+ run_ruby_k8s $namespace "update-prices-$i" "$SCRIPT_FULL_PATH/ruby/update_prices.rb" $split_file
1819
+ done
1820
+ else
1821
+ run_ruby_k8s $namespace "update-prices" "$SCRIPT_FULL_PATH/ruby/update_prices.rb" $json_prices
1822
+ fi
1823
+ }
1824
+
1825
+
1826
+ update_surveys() {
1827
+ local namespace=$1
1828
+ local csv_surveys=$2
1829
+
1830
+ local csv_size=$(wc -c < "$csv_surveys")
1831
+
1832
+
1833
+ if ((csv_size > 940000)); then
1834
+ local max_lines=400
1835
+ local total_lines=$(wc -l < $csv_surveys)
1836
+ local iterations=$((total_lines / max_lines + 1))
1837
+ echo "$csv_surveys is too big, I'll split it for you in blocks of $max_lines lines. It will take $iterations runs"
1838
+ for (( i = 0 ; i < iterations ; i++ )) ; do
1839
+ local start=$((i * max_lines + 2))
1840
+ local end=$(( (i + 1) * max_lines + 1))
1841
+ local split_file=$(mktemp)
1842
+ head -1 $csv_surveys > $split_file
1843
+ sed -n ''"$start,${end}p" $csv_surveys >> $split_file
1844
+
1845
+
1846
+ local split_lines=$(wc -l < $split_file)
1847
+ echo "starting iteration $i from $start to $end with $split_file command -v has $split_lines lines"
1848
+ run_ruby_k8s $namespace "reimport-surveys-$i" "$SCRIPT_FULL_PATH/ruby/feedback_kpi_reuploader.rb" $split_file
1849
+ done
1850
+ else
1851
+ run_ruby_k8s $namespace "reimport-surveys" "$SCRIPT_FULL_PATH/ruby/feedback_kpi_reuploader.rb" $csv_surveys
1852
+ fi
1853
+ }
1854
+
1855
+ #!/usr/bin/env bash
1856
+
1857
+ configure_gitlab_ssh() {
1858
+ tmp_dir=$(mktemp -d)
1859
+ ssh-keyscan gitlab.com > $tmp_dir/known_hosts
1860
+ echo "$SSH_PRIVATE_KEY" > $tmp_dir/id_rsa
1861
+ chmod 600 $tmp_dir/id_rsa
1862
+ ssh -i $tmp_dir/id_rsa -T git@gitlab.com
1863
+ rm -Rf $tmp_dir
1864
+ }
1865
+
1866
+
1867
+ configure_gitlab_ssh_home() {
1868
+ mkdir ~/.ssh
1869
+ ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
1870
+ echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_rsa
1871
+ chmod 600 ~/.ssh/id_rsa
1872
+ ssh -T git@gitlab.com
1873
+ }
1874
+ #!/usr/bin/env bash
1875
+
1876
+ datadog_schedule_downtime() {
1877
+ SERVICES=$1
1878
+ DOWNTIME_MINUTES=${2:-30}
1879
+
1880
+ if [[ "$ENVIRONMENT" == "production" ]] ; then
1881
+ log "scheduling downtime for $SERVICES in $ENVIRONMENT"
1882
+ else
1883
+ return 0
1884
+ fi
1885
+
1886
+ for SERVICE in $SERVICES ; do
1887
+ datadog_schedule_downtime_single $SERVICE $DOWNTIME_MINUTES
1888
+ done
1889
+ }
1890
+
1891
+ datadog_schedule_downtime_single() {
1892
+ local SERVICE=$1
1893
+ local DOWNTIME_MINUTES=$2
1894
+
1895
+ START=$(date +%s)
1896
+ END=$((START + 60 * DOWNTIME_MINUTES))
1897
+
1898
+ log "scheduling a downtime on datadog for $SERVICE ($DOWNTIME_MINUTES minutes)"
1899
+ curl -X POST "https://api.datadoghq.com/api/v1/downtime" \
1900
+ -H "Content-Type: application/json" \
1901
+ -H "DD-API-KEY: ${DD_API_KEY}" \
1902
+ -H "DD-APPLICATION-KEY: ${DD_APP_KEY}" \
1903
+ -d '
1904
+ {
1905
+ "active": true,
1906
+ "downtime_type": 0,
1907
+ "start": '$START',
1908
+ "end": '$END',
1909
+ "message": "CA Deployment - performance for '$SERVICE' may be lower for next '$DOWNTIME_MINUTES' min",
1910
+ "monitor_tags": [
1911
+ "service:'$SERVICE'",
1912
+ "performance"
1913
+ ],
1914
+ "scope": [
1915
+ "env:production"
1916
+ ],
1917
+ "timezone": "Europe/Paris"
1918
+ }
1919
+ '
1920
+ }
1921
+
1922
+ #!/usr/bin/env bash
1923
+
1924
+ docker_build_push() {
1925
+ read -r -a BUILD_ARGS <<< "$1"
1926
+ DOCKER_BUILD_ARGS="--build-arg VCS_REF=$(git rev-parse --short HEAD)"
1927
+
1928
+ for ARG_NAME in "${BUILD_ARGS[@]}"
1929
+ do
1930
+ DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS --build-arg $ARG_NAME=${!ARG_NAME}"
1931
+ done
1932
+
1933
+ if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
1934
+ docker pull $DOCKER_IMAGE || true
1935
+ SOURCE_URL=${CI_PROJECT_URL:8} # without "https://" protocol, like gitlab.com/colisweb-idl/colisweb/back/packing
1936
+
1937
+ docker buildx create --use
1938
+
1939
+ docker buildx build $DOCKER_BUILD_ARGS \
1940
+ -t $DOCKER_IMAGE_SHA \
1941
+ --platform "linux/arm64,linux/amd64" \
1942
+ --label org.opencontainers.image.revision=$(git rev-parse HEAD) \
1943
+ --label org.opencontainers.image.source=$SOURCE_URL \
1944
+ --provenance=false \
1945
+ --push \
1946
+ $DOCKER_STAGE_PATH
1947
+ fi
1948
+ }
1949
+
1950
+
1951
+ docker_promote() {
1952
+ # inspired by https://dille.name/blog/2018/09/20/how-to-tag-docker-images-without-pulling-them/
1953
+ OLD_TAG=${1//[^0-9a-zA-Z-.]/_}
1954
+ NEW_TAG=${2//[^0-9a-zA-Z-.]/_}
1955
+ echo "promoting from $OLD_TAG to $NEW_TAG"
1956
+ TOKEN=$(aws_ecr_token)
1957
+ CONTENT_TYPE="application/vnd.docker.distribution.manifest.v2+json"
1958
+ MANIFESTS_API="https://${DOCKER_REGISTRY}/v2/${APPLICATION}/manifests"
1959
+
1960
+ if MANIFEST=$(curl --fail -H "Authorization: Basic $TOKEN" -H "Accept: ${CONTENT_TYPE}" "$MANIFESTS_API/${OLD_TAG}"); then
1961
+ echo "authenticated on $MANIFESTS_API"
1962
+ else
1963
+ return 1
1964
+ fi
1965
+ if curl --fail -H "Authorization: Basic $TOKEN" -X PUT -H "Content-Type: ${CONTENT_TYPE}" -d "${MANIFEST}" "$MANIFESTS_API/$NEW_TAG" ; then
1966
+ echo "promoted ${APPLICATION} from $OLD_TAG to $NEW_TAG"
1967
+ else
1968
+ return 2
1969
+ fi
1970
+ }
1971
+
1972
+ ensure_images_exists() {
1973
+ for IMAGE_TO_CHECK in $(echo $1 | tr "," "\n"); do
1974
+ image_exists ${DOCKER_REGISTRY_ID} ${IMAGE_TO_CHECK} ${VERSION} || return 1
1975
+ done
1976
+ }
1977
+
1978
+ #!/usr/bin/env bash
1979
+
1980
+ extract_yaml_config_variable() {
1981
+ set +e
1982
+ set +x
1983
+
1984
+ check_args "--environment" $1
1985
+ shift
1986
+ ENVIRONMENT=$1
1987
+ shift
1988
+
1989
+ check_args "--configs-path" $1
1990
+ shift
1991
+ CONFIGS_PATH=$1
1992
+ shift
1993
+
1994
+ check_args "--variable" $1
1995
+ shift
1996
+ VARIABLE=$1
1997
+ shift
1998
+
1999
+ [[ "$1" == "--optional" ]] && OPTIONAL=true || OPTIONAL=false
2000
+
2001
+ if [ ! -f ${CONFIGS_PATH}/common.yaml ]; then
2002
+ echo >&2 "Missing $CONFIGS_PATH/common.yaml configuration file"
2003
+ exit 1
2004
+ fi
2005
+ if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}.yaml ]; then
2006
+ echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT.yaml configuration file"
2007
+ exit 1
2008
+ fi
2009
+ if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}-secrets.yaml ]; then
2010
+ echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml configuration file"
2011
+ exit 1
2012
+ fi
2013
+
2014
+ result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT-secrets.yaml")
2015
+ if [ $? -ne 0 ] || [ "$result" = "null" ]; then
2016
+ result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT.yaml")
2017
+ if [ $? -ne 0 ] || [ "$result" = "null" ]; then
2018
+ result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/common.yaml")
2019
+ if [ $? -ne 0 ] || [ "$result" = "null" ]; then
2020
+ if [ $OPTIONAL = true ]; then
2021
+ echo ""
2022
+ exit 0
2023
+ else
2024
+ echo >&2 "Missing path $VARIABLE in $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml, $CONFIGS_PATH/$ENVIRONMENT.yaml or $CONFIGS_PATH/common.yaml"
2025
+ exit 1
2026
+ fi
2027
+ fi
2028
+ fi
2029
+ fi
2030
+ echo ${result}
2031
+ }
2032
+ #!/usr/bin/env bash
2033
+
2034
+ flyway_clean() {
2035
+ HOST="$1"
2036
+ PORT="$2"
2037
+ DATABASE="$3"
2038
+ USER="$4"
2039
+ PASSWORD="$5"
2040
+
2041
+ kubectl run -it --rm flywayclean \
2042
+ --image=flyway/flyway \
2043
+ --restart=Never \
2044
+ -- \
2045
+ -cleanDisabled=false \
2046
+ -url="jdbc:postgresql://$HOST:$PORT/$DATABASE" \
2047
+ -user="$USER" \
2048
+ -password="$PASSWORD" \
2049
+ clean
2050
+ }
2051
+
2052
+ #!/usr/bin/env bash
2053
+
2054
+ FLYWAY_VERSION="7.4.0"
2055
+
2056
+
2057
+ get_yaml_variable() {
2058
+ extract_yaml_config_variable --environment ${ENVIRONMENT} --configs-path $(pwd)/deploy --variable $@
2059
+ }
2060
+
2061
+ init_migrate_db() {
2062
+ set -e
2063
+
2064
+ check_env_vars 4 "APPLICATION" "ENVIRONMENT" "FLYWAY_VERSION" "MIGRATION_SQL_PATH"
2065
+
2066
+ PG_YAML_PATH=".${APPLICATION}config.postgres"
2067
+
2068
+ DB_PORT="5432"
2069
+ DB_HOST=$(get_yaml_variable "${PG_YAML_PATH}.host")
2070
+ DB_INIT_USERNAME=$(get_yaml_variable "${PG_YAML_PATH}.initUsername")
2071
+ DB_INIT_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.initPassword")
2072
+ DB_DATABASE=$(get_yaml_variable "${PG_YAML_PATH}.database")
2073
+ DB_USER=$(get_yaml_variable "${PG_YAML_PATH}.user")
2074
+ DB_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.password")
2075
+ DB_URL="jdbc:postgresql://${DB_HOST}:${DB_PORT}/${DB_DATABASE}"
2076
+
2077
+ DB_RO_USER=$(get_yaml_variable "${PG_YAML_PATH}.readOnlyUser" --optional)
2078
+ DB_RO_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.readOnlyPassword" --optional)
2079
+
2080
+ unset KUBECONFIG
2081
+
2082
+ configure_kubectl_for ${ENVIRONMENT}
2083
+
2084
+ kube_init_service_database \
2085
+ --namespace ${ENVIRONMENT} \
2086
+ --service ${APPLICATION} \
2087
+ --db_host ${DB_HOST} \
2088
+ --db_port ${DB_PORT} \
2089
+ --db_init_username ${DB_INIT_USERNAME} \
2090
+ --db_init_password ${DB_INIT_PASSWORD} \
2091
+ --db_database ${DB_DATABASE} \
2092
+ --db_username ${DB_USER} \
2093
+ --db_password ${DB_PASSWORD}
2094
+
2095
+ if [[ ! -z "$DB_RO_USER" ]] && [[ ! -z "$DB_RO_USER" ]]; then
2096
+ kube_init_database_readonly_account \
2097
+ --namespace ${ENVIRONMENT} \
2098
+ --service ${APPLICATION} \
2099
+ --db_connection "$DB_INIT_USERNAME:$DB_INIT_PASSWORD@$DB_HOST:$DB_PORT" \
2100
+ --db_database ${DB_DATABASE} \
2101
+ --db_readonly_username ${DB_RO_USER} \
2102
+ --db_readonly_password ${DB_RO_PASSWORD}
2103
+ fi
2104
+
2105
+ flyway_migrate \
2106
+ --environment ${ENVIRONMENT} \
2107
+ --namespace ${ENVIRONMENT} \
2108
+ --service ${APPLICATION} \
2109
+ --db_url ${DB_URL} \
2110
+ --db_user ${DB_USER} \
2111
+ --db_password ${DB_PASSWORD} \
2112
+ --flyway_version ${FLYWAY_VERSION} \
2113
+ --flyway_sql_folder $(pwd)/${MIGRATION_SQL_PATH}
2114
+ }
2115
+
2116
+ flyway_migrate() {
2117
+ set -e
2118
+
2119
+ extract_args 8 \
2120
+ environment namespace service db_url db_user db_password flyway_version flyway_sql_folder $*
2121
+
2122
+ echo "running flyway migrations for service $service in environment $environment namespace $namespace for db_url $db_url with user $db_user"
2123
+ echo "migration files expected in $flyway_sql_folder"
2124
+
2125
+ CONFIGMAP_NAME="$service-flyway-migration-sql"
2126
+ POD_NAME="$service-flyway-migration"
2127
+
2128
+ configure_kubectl_for $environment
2129
+
2130
+ kubectl -n $namespace delete configmap $CONFIGMAP_NAME --ignore-not-found
2131
+ kubectl -n $namespace delete pod $POD_NAME --ignore-not-found
2132
+ kubectl -n $namespace create configmap $CONFIGMAP_NAME --from-file=$flyway_sql_folder
2133
+
2134
+ kubectl -n $namespace run $POD_NAME --image ignored -ti --restart=Never --attach --rm --overrides='
2135
+ {
2136
+ "spec":{
2137
+ "containers":[
2138
+ {
2139
+ "name":"'$POD_NAME'",
2140
+ "image":"flyway/flyway:'$flyway_version'",
2141
+ "command":["flyway", "-url='$db_url'", "-user='$db_user'", "-password='$db_password'", "migrate"],
2142
+ "volumeMounts":[
2143
+ {
2144
+ "name":"sql",
2145
+ "mountPath":"/flyway/sql"
2146
+ }
2147
+ ]
2148
+ }
2149
+ ],
2150
+ "volumes":[
2151
+ {
2152
+ "name":"sql",
2153
+ "configMap":{
2154
+ "name":"'$CONFIGMAP_NAME'"
2155
+ }
2156
+ }
2157
+ ]
2158
+ }
2159
+ }
2160
+ '
2161
+
2162
+ kubectl -n $namespace delete configmap $CONFIGMAP_NAME
2163
+ }
2164
+
2165
+ #!/usr/bin/env bash
2166
+ flyway_repair() {
2167
+ set -e
2168
+ check_env_vars 4 "APPLICATION" "ENVIRONMENT" "FLYWAY_VERSION" "MIGRATION_SQL_PATH"
2169
+
2170
+ PG_YAML_PATH=".${APPLICATION}config.postgres"
2171
+
2172
+ DB_PORT="5432"
2173
+ DB_HOST=$(get_yaml_variable "${PG_YAML_PATH}.host")
2174
+ DB_DATABASE=$(get_yaml_variable "${PG_YAML_PATH}.database")
2175
+ DB_USER=$(get_yaml_variable "${PG_YAML_PATH}.user")
2176
+ DB_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.password")
2177
+ DB_URL="jdbc:postgresql://${DB_HOST}:${DB_PORT}/${DB_DATABASE}"
2178
+
2179
+ flyway_sql_folder=$(pwd)/${MIGRATION_SQL_PATH}
2180
+
2181
+ configure_kubectl_for "${ENVIRONMENT}"
2182
+ POD_NAME="${APPLICATION}-flyway-repair"
2183
+ CONFIGMAP_NAME="${APPLICATION}-flyway-repair-sql"
2184
+
2185
+ kubectl -n "${ENVIRONMENT}" delete configmap $CONFIGMAP_NAME --ignore-not-found
2186
+ kubectl -n "${ENVIRONMENT}" delete pod $POD_NAME --ignore-not-found
2187
+ kubectl -n "${ENVIRONMENT}" create configmap $CONFIGMAP_NAME --from-file="${flyway_sql_folder}"
2188
+
2189
+ kubectl -n "${ENVIRONMENT}" run --rm -it "${POD_NAME}" \
2190
+ --image=flyway/flyway \
2191
+ --restart=Never \
2192
+ --overrides='
2193
+ {
2194
+ "spec":{
2195
+ "containers":[
2196
+ {
2197
+ "name":"'$POD_NAME'",
2198
+ "image":"flyway/flyway:'${FLYWAY_VERSION}'",
2199
+ "command":["flyway", "-url='$DB_URL'", "-user='$DB_USER'", "-password='$DB_PASSWORD'", "repair"],
2200
+ "volumeMounts":[
2201
+ {
2202
+ "name":"sql",
2203
+ "mountPath":"/flyway/sql"
2204
+ }
2205
+ ]
2206
+ }
2207
+ ],
2208
+ "volumes":[
2209
+ {
2210
+ "name":"sql",
2211
+ "configMap":{
2212
+ "name":"'$CONFIGMAP_NAME'"
2213
+ }
2214
+ }
2215
+ ]
2216
+ }
2217
+ }
2218
+ '
2219
+ kubectl -n "${ENVIRONMENT}" delete configmap $CONFIGMAP_NAME
2220
+ }
2221
+
2222
+ #!/usr/bin/env bash
2223
+
2224
+ record_git_commit() {
2225
+ for file in $GIT_COMMIT_FILES; do
2226
+ sed -i 's&GIT_COMMIT&'"${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHORT_SHA}&" "$file"
2227
+ done
2228
+ }
2229
+
2230
+ gitlab_import_pgp_key() {
2231
+ if [ "$GITLAB_PGP_PRIVATE_KEY" != "" ]
2232
+ then
2233
+ KEY_FOLDER=<(echo "$GITLAB_PGP_PRIVATE_KEY")
2234
+ gpg --import $KEY_FOLDER > /dev/null
2235
+ else
2236
+ echo '$GITLAB_PGP_PRIVATE_KEY is not set'
2237
+ return 1
2238
+ fi
2239
+ }
2240
+
2241
+ git_reveal() {
2242
+ gitlab_import_pgp_key
2243
+ gpg --decrypt $1
2244
+ }
2245
+ #!/usr/bin/env bash
2246
+
2247
+ helm_deploy_v3() {
2248
+ APPLICATION=$1
2249
+ ENVIRONMENT=$2
2250
+ VERSION=$3
2251
+ deploy_chart_v3 \
2252
+ --path_configs deploy \
2253
+ --path_chart deploy/$APPLICATION \
2254
+ --application $APPLICATION \
2255
+ --environment $ENVIRONMENT \
2256
+ --namespace $ENVIRONMENT \
2257
+ --helm_extra_args --set global.version=$VERSION
2258
+ }
2259
+
2260
+ deploy_chart_v3() {
2261
+ set -e
2262
+ set -x
2263
+
2264
+ # Rigid parsing, but all args are mandatory (expect last) and flexible order is unnecessary
2265
+ check_args "--path_configs" $1; shift
2266
+ path_configs=$1; shift
2267
+ check_args "--path_chart" $1; shift
2268
+ path_chart=$1; shift
2269
+ check_args "--application" $1; shift
2270
+ application=$1; shift
2271
+ check_args "--environment" $1; shift
2272
+ environment=$1; shift
2273
+ check_args "--namespace" $1; shift
2274
+ namespace=$1; shift
2275
+ if [ $# -ne 0 ]; then
2276
+ check_args "--helm_extra_args" $1; shift
2277
+ helm_extra_args=$*
2278
+ fi
2279
+
2280
+ echo "================================"
2281
+ echo " Deploying $application"
2282
+ echo " - Environment: $environment"
2283
+ echo " - Namespace: $namespace"
2284
+ echo "================================"
2285
+
2286
+ root_path=$(pwd)
2287
+
2288
+ # Check the configs exists
2289
+
2290
+ check_config_file ${root_path}/${path_configs}/common.yaml
2291
+ check_config_file ${root_path}/${path_configs}/${namespace}.yaml
2292
+ check_config_file ${root_path}/${path_configs}/${namespace}-secrets.yaml
2293
+
2294
+ # Check the chart exists
2295
+ if [ ! -d ${root_path}/${path_chart} ] || [ ! -f ${root_path}/${path_chart}/Chart.yaml ]; then
2296
+ echo "Bad Chart $root_path/$path_chart : does not exists or missing Chart.yaml"
2297
+ print_usage
2298
+ exit 1
2299
+ fi
2300
+
2301
+ # Unset Kubectl configuration made via the KUBECONFIG env variable
2302
+ # it would override the config made by configure_kubectl_for
2303
+ # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
2304
+ unset KUBECONFIG
2305
+
2306
+ # Configure Kubectl
2307
+ configure_kubectl_for ${environment}
2308
+
2309
+ # Configure helm3
2310
+ helm3 version --namespace ${namespace} || true
2311
+ # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
2312
+ helm3 repo add colisweb s3://colisweb-helm-charts/colisweb
2313
+ helm3 repo add stable https://charts.helm.sh/stable
2314
+ helm3 repo update
2315
+ helm3 dependency update ${root_path}/${path_chart}
2316
+
2317
+ # Gather values/*.yaml files
2318
+ values_path="${root_path}/${path_chart}/values"
2319
+ values_files=''
2320
+ [ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
2321
+
2322
+ # Deploy
2323
+ helm3 upgrade --install \
2324
+ --namespace ${namespace} \
2325
+ ${values_files} \
2326
+ -f ${root_path}/${path_configs}/common.yaml \
2327
+ -f ${root_path}/${path_configs}/${namespace}.yaml \
2328
+ -f ${root_path}/${path_configs}/${namespace}-secrets.yaml \
2329
+ ${helm_extra_args} \
2330
+ ${application} ${root_path}/${path_chart}
2331
+
2332
+ #send event to dd
2333
+ PUBLISHED_VERSION="$CI_COMMIT_REF_NAME-$CI_COMMIT_SHA"
2334
+ emit_datadog_deploy_event --environment $environment --service $application --version $PUBLISHED_VERSION
2335
+
2336
+ echo "================================"
2337
+ echo " Deployed $application"
2338
+ echo " - Environment: $environment"
2339
+ echo " - Namespace: $namespace"
2340
+ echo "================================"
2341
+
2342
+ set +x
2343
+ }
2344
+
2345
+ verify_deployments_v3() {
2346
+ set -e
2347
+
2348
+ # usage :
2349
+ # verify_deployments staging price
2350
+ # verify_deployments -t 15m testing price
2351
+
2352
+ if [ "$1" = "-t" ] ; then
2353
+ TIMEOUT=$2
2354
+ shift
2355
+ shift
2356
+ else
2357
+ TIMEOUT=5m
2358
+ fi
2359
+
2360
+ NAMESPACE=$1
2361
+ RELEASE=$2
2362
+
2363
+ # Get all Deployments names from the deployed chart
2364
+ DEPLOYMENTS=(
2365
+ $(helm3 get manifest --namespace $NAMESPACE $RELEASE | yq -rs '.[] | select(.kind=="Deployment") | .metadata.name')
2366
+ )
2367
+
2368
+ echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
2369
+
2370
+ PIDS=()
2371
+ for D in "${DEPLOYMENTS[@]}"; do
2372
+ kubectl -n ${NAMESPACE} rollout status deployment ${D} --timeout=${TIMEOUT} &
2373
+ PIDS+=($!)
2374
+ done
2375
+
2376
+ for P in ${PIDS[@]}; do
2377
+ wait $P
2378
+
2379
+ if [ $? -ne 0 ]; then
2380
+ echo "at least one deployment failed or timed out (after $TIMEOUT)"
2381
+ exit 1
2382
+ fi
2383
+ done
2384
+
2385
+ }
2386
+
2387
+ print_usage() {
2388
+ echo "Usage:"
2389
+ echo "deploy_chart \\"
2390
+ echo " --path_configs <path to .yaml namespaces and secret config files>"
2391
+ echo " --path_chart <path to Helm Chart>"
2392
+ echo " --application <application name used by Helm>"
2393
+ echo " --environment <infrastructure environment>"
2394
+ echo " --namespace <namespace>"
2395
+ echo " --helm-extra-args <extra args to pass to helm, ex: --set my.value=42 --set your.setting=on>"
2396
+ echo ""
2397
+ }
2398
+
2399
+ check_config_file() {
2400
+ local filename=$1
2401
+ if [ ! -f ${filename} ]; then
2402
+ echo "Missing $filename configuration file"
2403
+ print_usage
2404
+ exit 1
2405
+ fi
2406
+ }
2407
+
2408
+ notify_new_deployment() {
2409
+ jq --version || (apt update && apt install -y jq)
2410
+
2411
+ CHAT_URL=${1:-$DEFAULT_CHAT_URL}
2412
+
2413
+ STATUS=$(echo $CI_JOB_STATUS | tr '[:lower:]' '[:upper:]' )
2414
+ ENV_NAME=$(echo $ENVIRONMENT | tr '[:lower:]' '[:upper:]' )
2415
+
2416
+ JOB_LINK="<$CI_JOB_URL| $CI_JOB_NAME $CI_JOB_ID>"
2417
+
2418
+ DESCRIPTION="
2419
+ $STATUS : Deployment for $CI_PROJECT_NAME on $ENV_NAME
2420
+ $JOB_LINK
2421
+ $CI_COMMIT_TITLE
2422
+ "
2423
+
2424
+ JSON_MESSAGE=$(jq -n --arg text "$DESCRIPTION" '{text: $text }')
2425
+ curl -X POST $CHAT_URL \
2426
+ --header "Content-Type: application/json" \
2427
+ --data "$JSON_MESSAGE"
2428
+ }
2429
+ notify_new_version() {
2430
+
2431
+ ! test -z $CI_COMMIT_TAG || exit 0
2432
+
2433
+ jq --version || (apt update && apt install -y jq)
2434
+
2435
+ KIND=$1
2436
+ CHAT_URL=${2:-$DEFAULT_CHAT_URL}
2437
+
2438
+ STATUS=$(echo $CI_JOB_STATUS | tr '[:lower:]' '[:upper:]' )
2439
+ ENV_NAME=$(echo $ENVIRONMENT | tr '[:lower:]' '[:upper:]' )
2440
+ TITLE="$ENV_NAME *$STATUS* $KIND for version *$CI_COMMIT_TAG* of *$CI_PROJECT_NAME* "
2441
+
2442
+ RELEASE_URL="https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/releases/$CI_COMMIT_TAG"
2443
+
2444
+ NOTES=$(curl --header "PRIVATE-TOKEN: $GITLAB_TOKEN" $RELEASE_URL |
2445
+ jq .description |
2446
+ sed -e 's/^"//' -e 's/"$//' |
2447
+ sed -E 's/\[([^]]+)\]\(([^)]+)\)/<\2|\1>/g' |
2448
+ sed -E 's/\\n/\'$'\n/g')
2449
+
2450
+ JOB_LINK="<$CI_JOB_URL| $CI_JOB_NAME $CI_JOB_ID>"
2451
+
2452
+ DESCRIPTION="
2453
+ $TITLE
2454
+ $JOB_LINK
2455
+ $NOTES
2456
+ "
2457
+
2458
+ JSON_MESSAGE=$(jq -n --arg text "$DESCRIPTION" '{text: $text }')
2459
+ curl -X POST $CHAT_URL \
2460
+ --header "Content-Type: application/json" \
2461
+ --data "$JSON_MESSAGE"
2462
+ }
2463
+ #!/usr/bin/env bash
2464
+
2465
+ skip_sbt_compile_cache() {
2466
+ COMPARED_BRANCH="${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-$CI_DEFAULT_BRANCH}"
2467
+ echo "branch to compare to: $COMPARED_BRANCH"
2468
+ git fetch origin $COMPARED_BRANCH
2469
+ echo "fetched $COMPARED_BRANCH"
2470
+ [[ "$CI_COMMIT_REF_NAME" =~ ^(master|develop)$ || $(git diff origin/$COMPARED_BRANCH --exit-code -- project) ]]
2471
+ }
2472
+ #!/usr/bin/env bash
2473
+
2474
+ # in case of trouble with functions for update history during import
2475
+ # https://stackoverflow.com/questions/56729192/pg-restore-fails-when-trying-to-create-function-referencing-table-that-does-not
2476
+
2477
+ # example: clone_databases --source_env testing --destination_env recette --services "order,notification,parcel,ikea"
2478
+ clone_databases() {
2479
+ export USERNAME="database-cloner"
2480
+
2481
+ set -e
2482
+
2483
+ extract_args 3 source_env destination_env services $*
2484
+
2485
+ dump_databases "$source_env" "$services"
2486
+ import_databases "$destination_env" "$services"
2487
+ }
2488
+
2489
+ dump_databases() {
2490
+ local env="$1"
2491
+ local services=$(echo -n "$2" | tr ',' '\n')
2492
+
2493
+ database_k8s_output_dump_path="/tmp/database_k8s_output_dump"
2494
+
2495
+ configure_kubectl_for "$env"
2496
+ set +e
2497
+ database_k8s "$env" > "$database_k8s_output_dump_path"
2498
+ set -e
2499
+
2500
+ source_pg_local_port=$(extract_pg_local_port "$database_k8s_output_dump_path")
2501
+
2502
+ for service in $services
2503
+ do
2504
+ service_path="/tmp/$service"
2505
+
2506
+ set +e
2507
+ git clone "git@gitlab.com:colisweb/back/$service.git" "$service_path"
2508
+ set -e
2509
+
2510
+ if cd "$service_path"; then
2511
+ echo "dump the database for service $service.."
2512
+
2513
+ git secret reveal -f
2514
+
2515
+ PG_YAML_PATH=".${service}config.postgres"
2516
+
2517
+ SOURCE_DB_DATABASE=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.database")
2518
+ SOURCE_DB_USER=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.user")
2519
+ SOURCE_DB_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.password")
2520
+
2521
+ export PGPASSWORD="$SOURCE_DB_PASSWORD"
2522
+
2523
+ DUMP_PATH="/tmp/db_dump_${service}.sql"
2524
+ pg_dump --no-owner -h localhost -p "$source_pg_local_port" -U "$SOURCE_DB_USER" "$SOURCE_DB_DATABASE" > "$DUMP_PATH"
2525
+
2526
+ cd ..
2527
+ rm -rf "$service_path"
2528
+ else
2529
+ echo "WARN: failed to clone $service - skipping"
2530
+ fi
2531
+ done
2532
+ }
2533
+
2534
+ import_databases() {
2535
+ local env="$1"
2536
+ local services=$(echo -n "$2" | tr ',' '\n')
2537
+
2538
+ database_k8s_output_import_path="/tmp/database_k8s_output_import"
2539
+
2540
+ configure_kubectl_for "$env"
2541
+ set +e
2542
+ database_k8s "$env" > "$database_k8s_output_import_path"
2543
+ set -e
2544
+
2545
+ destination_pg_local_port=$(extract_pg_local_port "$database_k8s_output_import_path")
2546
+
2547
+ for service in $services
2548
+ do
2549
+ service_path="/tmp/$service"
2550
+
2551
+ set +e
2552
+ git clone "git@gitlab.com:colisweb/back/$service.git" "$service_path"
2553
+ set -e
2554
+
2555
+ if cd "$service_path"; then
2556
+ echo "create and import database for $service.."
2557
+
2558
+ git secret reveal -f
2559
+
2560
+ PG_YAML_PATH=".${service}config.postgres"
2561
+
2562
+ DB_PORT="5432"
2563
+ DB_HOST=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.host")
2564
+ DB_INIT_USERNAME=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.initUsername")
2565
+ DB_INIT_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.initPassword")
2566
+ DB_DATABASE=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.database")
2567
+ DB_USER=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.user")
2568
+ DB_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.password")
2569
+
2570
+ kube_init_service_database \
2571
+ --namespace ${env} \
2572
+ --service ${service} \
2573
+ --db_host ${DB_HOST} \
2574
+ --db_port ${DB_PORT} \
2575
+ --db_init_username ${DB_INIT_USERNAME} \
2576
+ --db_init_password ${DB_INIT_PASSWORD} \
2577
+ --db_database ${DB_DATABASE} \
2578
+ --db_username ${DB_USER} \
2579
+ --db_password ${DB_PASSWORD}
2580
+
2581
+ echo "WARN: A complete clean of $DB_DATABASE on $DB_HOST will be operated"
2582
+ read -rsn1 -p"Press any key to continue";echo
2583
+ flyway_clean "$DB_HOST" "$DB_PORT" "$DB_DATABASE" "$DB_USER" "$DB_PASSWORD"
2584
+
2585
+ DUMP_PATH="/tmp/db_dump_${service}.sql"
2586
+ export PGPASSWORD="$DB_PASSWORD"
2587
+ set +e
2588
+ psql "postgres://$DB_USER@127.0.0.1:$destination_pg_local_port" -p "$DB_DATABASE" -f "$DUMP_PATH"
2589
+ set -e
2590
+
2591
+ cd ..
2592
+ rm -rf "$service_path"
2593
+ else
2594
+ echo "WARN: failed to clone $service - skipping"
2595
+ fi
2596
+ done
2597
+ }
2598
+
2599
+ extract_pg_local_port() {
2600
+ cat "$1" | grep 'postgres@127.0.0.1:' | sed 's/.*postgres@127.0.0.1:\(.*[0-9]\).*/\1/g'
2601
+ }
2602
+ #!/usr/bin/env bash
2603
+
2604
+ emit_datadog_deploy_event() {
2605
+ extract_args 3 environment service version $*
2606
+ check_env_vars 1 "DD_API_KEY"
2607
+
2608
+ response=$(
2609
+ curl -X POST -H "Content-type: application/json" \
2610
+ -d '{
2611
+ "title": "deploying '"$service"' to '"$environment"'",
2612
+ "text": "deploying '"$service"' version '"$version"' to '"$environment"'",
2613
+ "priority": "normal",
2614
+ "tags": ["service:'"$service"' ", "env:'"$environment"'" ,"action:'"deployment"'"] ,
2615
+
2616
+ "alert_type": "Info"
2617
+ }' \
2618
+ "https://api.datadoghq.com/api/v1/events?api_key=$DD_API_KEY"
2619
+ )
2620
+
2621
+ #echo $response
2622
+ EventID=$(echo $response | jq ".event.id")
2623
+ url=$(echo $response | jq ".event.url")
2624
+
2625
+ if [[ $EventID -ne 0 ]]; then
2626
+ echo "event successfully created check in datadog UI : $url"
2627
+ else
2628
+ echo " failed to create event "
2629
+ exit 1
2630
+ fi
2631
+ }
2632
+
2633
+ #!/usr/bin/env bash
2634
+
2635
+ # DEPRECATED
2636
+ emit_datadog_error_events() {
2637
+ set -e
2638
+ extract_args 4 title text priority environment $*
2639
+ check_env_vars 1 "DD_API_KEY"
2640
+
2641
+ curl -X POST -H "Content-type: application/json" \
2642
+ -d '{
2643
+ "title": "'"$title"'",
2644
+ "text": "'"$text"'",
2645
+ "priority": "'"$priority"'",
2646
+ "tags": ["environment:'"$environment"'"],
2647
+ "alert_type": "Error"
2648
+ }' \
2649
+ "https://api.datadoghq.com/api/v1/events?api_key=$DD_API_KEY"
2650
+ }
2651
+
2652
+ #!/usr/bin/env bash
2653
+ terraform_init() {
2654
+ SECTION=$1
2655
+ ENV=$2
2656
+ cd $SECTION
2657
+ terraform init -input=false
2658
+ terraform workspace select $ENV || terraform workspace new $ENV
2659
+ }