@colisweb/rescript-toolkit 5.46.4 → 5.47.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3255 @@
1
+ #!/usr/bin/env bash
2
+
3
+ #VARIABLES
4
+ export SCRIPT_FULL_PATH=$(dirname "$0")
5
+
6
+ ##FUNCTIONS
7
+ # https://stackoverflow.com/questions/1527049/how-can-i-join-elements-of-an-array-in-bash
8
+ join_by() {
9
+ local d=${1-} f=${2-}
10
+ if shift 2; then
11
+ printf %s "$f" "${@/#/$d}"
12
+ fi
13
+ }
14
+
15
+ mkstring() {
16
+ local start=$1
17
+ local separator=$2
18
+ local end=$3
19
+ shift 3
20
+
21
+ if [ $# -gt 0 ]; then
22
+ printf $start
23
+ join_by $separator $*
24
+ printf $end
25
+ fi
26
+ }
27
+
28
+ md5all() {
29
+ all_hash=$(mktemp)
30
+ for name in $*; do
31
+ find $name -type f -exec cat {} \; | md5sum | cut -f1 -d ' ' >> $all_hash
32
+ done;
33
+ cat $all_hash | md5sum | cut -f1 -d ' '
34
+ }
35
+
36
+ log() {
37
+ echo "$*" >&2
38
+ }
39
+ #!/usr/bin/env bash
40
+
41
+ check_args() {
42
+ if [ -z $2 ] || [ "$1" != "$2" ]; then
43
+ echo >&2 "missing argument $1"
44
+ return 1
45
+ fi
46
+ }
47
+
48
+ check_env_vars() {
49
+ ArgsCount=$1 && shift
50
+ for ((i = 0; i < $ArgsCount; i++)); do
51
+ if [[ -z "${!1}" ]]; then
52
+ echo >&2 "missing ENV $1"
53
+ return 1
54
+ fi
55
+ shift
56
+ done
57
+ }
58
+
59
+ extract_arg() {
60
+ name=$1
61
+ passed=$2
62
+ value=$3
63
+ if [ "--$name" != "$passed" ]; then
64
+ echo "missing argument $name"
65
+ return 1
66
+ fi
67
+ eval $name='$value'
68
+ }
69
+
70
+ extract_args() {
71
+ declare -a Array_Args
72
+ ArgsCount=$1 && shift
73
+ for ((i = 0; i < $ArgsCount; i++)); do
74
+ Array_Args[i]=$1 && shift
75
+ done
76
+ for ArgName in "${Array_Args[@]}"; do
77
+ extract_arg "$ArgName" $* && shift 2
78
+ done
79
+ }
80
+
81
+ #!/usr/bin/env bash
82
+
83
+ aws_ecr_login() {
84
+ PATH=/root/.local/bin:$PATH
85
+
86
+ aws ecr get-login-password \
87
+ | docker login --username AWS --password-stdin 949316342391.dkr.ecr.eu-west-1.amazonaws.com \
88
+ || (echo "you should update to AWS CLI version 2 https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-mac.html " $(aws ecr get-login --region=eu-west-1 --no-include-email) )
89
+ }
90
+
91
+ aws_ecr_token() {
92
+ aws ecr get-authorization-token --region=eu-west-1 --output text --query 'authorizationData[].authorizationToken'
93
+ }
94
+
95
+
96
+ #!/usr/bin/env bash
97
+
98
+ # If gitlab is down or pipeline are stuck, hotfixes need to be available
99
+ # This script will publish docker images to ECR using your current git HEAD, then deploy them to a given environment.
100
+ # Some local files (git-commit.conf and sentry.properties) will be updated, take caution.
101
+ # No trace of this will appear on Gitlab (no releases, no pipelines, no tags).
102
+ # create_hotfix_scala $ENVIRONMENT $CHART_NAME [ $MODULE_NAME $MODULE_PATH $DEPLOYMENT ]
103
+ # create_hotfix_scala testing crm main modules/3-executables/main crm
104
+ # create_hotfix_scala testing notification \
105
+ # main-http modules/3-executables/main-http notification-http \
106
+ # main-consumer modules/3-executables/main-consumer notification-consumer
107
+
108
+ create_hotfix_scala() {
109
+
110
+ ENVIRONMENT=$1
111
+ CHART_NAME=$2
112
+ shift 2
113
+
114
+ SHORT_SHA=$(git rev-parse --short HEAD)
115
+ HOTFIX_TAG="hotfix-$SHORT_SHA"
116
+
117
+ gum confirm "Preparing $HOTFIX_TAG for $CHART_NAME ?" || exit
118
+ prepare_hotfix_scala $HOTFIX_TAG
119
+
120
+ gum confirm "Building $HOTFIX_TAG for $CHART_NAME ?" || exit
121
+ while [[ $# -gt 2 ]] ; do
122
+ build_hotfix_scala $HOTFIX_TAG "$1" "$2" "$3"
123
+ shift 3
124
+ done
125
+
126
+ gum confirm "Deploying $HOTFIX_TAG for $CHART_NAME ?" || exit
127
+ deploy_hotfix $CHART_NAME $ENVIRONMENT $HOTFIX_TAG
128
+ }
129
+
130
+ # Update local git-commit.conf and sentry.properties files using git short sha
131
+ prepare_hotfix_scala() {
132
+ HOTFIX_TAG=$1
133
+
134
+ git secret reveal -f
135
+ aws_ecr_login
136
+
137
+ COMMIT_CONF_FILES=$(find . -name "git-commit.conf")
138
+ SENTRY_PROPERTIES_FILES=$(find . -name "sentry.properties")
139
+
140
+ for file in $(echo "$COMMIT_CONF_FILES\n$SENTRY_PROPERTIES_FILES"); do
141
+ sed -i '' -e 's&GIT_COMMIT&'"$HOTFIX_TAG&" $file
142
+ done
143
+
144
+ }
145
+
146
+ # Build docker images locally and publish them to AWS ECR.
147
+ build_hotfix_scala() {
148
+
149
+ HOTFIX_TAG=$1
150
+ SBT_MODULE=$2
151
+ DOCKER_PATH=$3
152
+ DEPLOYMENT=$4
153
+
154
+ DOCKER_REGISTRY_ID="949316342391"
155
+ DOCKER_REGISTRY="$DOCKER_REGISTRY_ID.dkr.ecr.eu-west-1.amazonaws.com"
156
+ DOCKER_IMAGE=$DOCKER_REGISTRY/$DEPLOYMENT
157
+ HOTFIX_IMAGE=$DOCKER_IMAGE:$HOTFIX_TAG
158
+
159
+ #Build
160
+ sbt "project $SBT_MODULE" "Docker / stage"
161
+
162
+ #Publish
163
+ docker build --platform "linux/amd64" -t $HOTFIX_IMAGE --cache-from $DOCKER_IMAGE "$DOCKER_PATH/target/docker/stage"
164
+ docker push $HOTFIX_IMAGE
165
+
166
+ echo "Created hotfix $HOTFIX_IMAGE"
167
+ }
168
+
169
+ # Deploy the project in the given environment
170
+ deploy_hotfix() {
171
+ source $colisweb_scripts/ci/helm.sh
172
+
173
+ CHART_NAME=$1
174
+ ENVIRONMENT=$2
175
+ HOTFIX_TAG=$3
176
+
177
+ CONFIG_PATH=deploy
178
+ CHART_PATH=$CONFIG_PATH/$CHART_NAME
179
+ ROOT_PATH=$(pwd)
180
+
181
+ # Unset Kubectl configuration made via the KUBECONFIG env variable
182
+ # it would override the config made by configure_kubectl_for
183
+ # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
184
+ unset KUBECONFIG
185
+
186
+ # Configure Kubectl
187
+ configure_kubectl_for $ENVIRONMENT
188
+
189
+ # Avoiding "no local-index.yaml" or "empty local-index.yaml" error
190
+ cat > $HOME/Library/Caches/helm/repository/local-index.yaml <<EOT
191
+ apiVersion: v1
192
+ entries:
193
+ cronjob:
194
+ EOT
195
+
196
+ # helm stable repo have changed and must be updated manually, in versions < v2.17.0
197
+ helm repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
198
+ helm repo add stable https://charts.helm.sh/stable --force-update
199
+ helm repo update
200
+ helm dependency update ${ROOT_PATH}/${CHART_PATH}
201
+
202
+ # Gather values/*.yaml files
203
+ VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
204
+ VALUES_FILES=''
205
+ [ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
206
+
207
+ # Deploy
208
+ helm upgrade --install \
209
+ --namespace ${ENVIRONMENT} \
210
+ ${VALUES_FILES} \
211
+ -f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
212
+ -f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}.yaml \
213
+ -f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}-secrets.yaml \
214
+ --set global.version=$HOTFIX_TAG \
215
+ ${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
216
+
217
+
218
+ verify_deployments -t 10m $ENVIRONMENT $CHART_NAME
219
+
220
+ }
221
+
222
+ #!/usr/bin/env bash
223
+
224
+ image_exists() {
225
+ set -e
226
+
227
+ REGISTRY=$1
228
+ REPOSITORY=$2
229
+ IMAGE=$3
230
+
231
+ TAGGED_IMAGE="$REGISTRY/$REPOSITORY:$IMAGE"
232
+
233
+ aws ecr describe-images --registry-id $REGISTRY --repository-name $REPOSITORY --image-ids "imageTag=$IMAGE"
234
+
235
+ if [ $? -eq 0 ]
236
+ then
237
+ echo "Image $TAGGED_IMAGE already present in distant repo"
238
+ return 0
239
+ else
240
+ echo "Image $TAGGED_IMAGE NOT present in distant repo"
241
+ return 1
242
+ fi
243
+ }
244
+ #!/usr/bin/env bash
245
+
246
+ gmm() {
247
+ git checkout $1
248
+ git pull
249
+ git checkout $2
250
+ git pull
251
+ git merge $1
252
+ git push
253
+ }
254
+
255
+ git_damn_merge() {
256
+ git checkout $1
257
+ git pull
258
+ git checkout $2
259
+ git dammit
260
+ git merge $1
261
+ git push
262
+ }
263
+
264
+ git_prune_local_branches() {
265
+ git branch -r |
266
+ awk '{print $1}' |
267
+ egrep -v -f /dev/fd/0 <(git branch -vv | grep origin) |
268
+ awk '{print $1}' |
269
+ xargs git branch -d
270
+ }
271
+
272
+ gum_checkout() {
273
+ git branch -a | cut -f3- -d "/" | gum filter | xargs git checkout
274
+ }
275
+
276
+ # useful option :
277
+ # export GIT_SUBLINE_MERGE_NON_INTERACTIVE_MODE=TRUE
278
+ # see https://github.com/paulaltin/git-subline-merge
279
+ setup_subline_merge() {
280
+ location=${1:-"--local"}
281
+
282
+ case $location in
283
+ --local)
284
+ if [ -d ".git" ]; then
285
+ echo "* merge=subline" >>.git/info/attributes
286
+ else
287
+ echo "Cannot use local option, not in a git repository"
288
+ return 1
289
+ fi
290
+ ;;
291
+ --global)
292
+ echo "* merge=subline" >>~/.gitattributes
293
+ ;;
294
+ *)
295
+ echo "unknown argument $location"
296
+ return 2
297
+ ;;
298
+ esac
299
+
300
+ git config $location merge.conflictStyle diff3
301
+ git config $location merge.subline.driver "$colisweb_scripts/shell-session/shell/dev/git-subline-merge %O %A %B %L %P"
302
+ git config $location merge.subline.recursive binary
303
+ }
304
+
305
+ rebase_from_ancestor() {
306
+ set -x
307
+ branch=$1
308
+ tip=$(git rev-parse HEAD)
309
+ ancestor=$(git merge-base $branch $tip)
310
+ commits=$(git log $ancestor..$tip)
311
+ git reset --hard $ancestor
312
+ git merge --squash $tip
313
+ git commit -m "squashed commmits $commits" || echo "nothing committed"
314
+ git rebase $branch -Xtheirs
315
+ }
316
+
317
+ #!/usr/bin/env bash
318
+
319
+ import_all_pgp_keys() {
320
+ echo "importing all PGP keys"
321
+ gpg --import $SCRIPT_FULL_PATH/pgp_keys/*.key
322
+ }
323
+
324
+ remove_all_persons_from_secrets() {
325
+ echo "cleanup git secret"
326
+ WHO_KNOWS=($(git secret whoknows))
327
+ git secret removeperson $WHO_KNOWS
328
+ echo "Removed secrets access for $WHO_KNOWS"
329
+ }
330
+
331
+ all_pgp_emails() {
332
+ gpg --show-key $SCRIPT_FULL_PATH/pgp_keys/*.key | sed -rn "s/.*<(.*)>/\1/p"
333
+ }
334
+
335
+ set_all_secret_keys() {
336
+
337
+ import_all_pgp_keys
338
+
339
+ git secret reveal -f
340
+
341
+ remove_all_persons_from_secrets
342
+
343
+ if [ $# -eq 0 ]; then
344
+ echo "No emails supplied, using dev-tools pgp keys as source"
345
+ IN_THE_KNOW=($(gum choose --no-limit $(all_pgp_emails)))
346
+ else
347
+ IN_THE_KNOW=($*)
348
+ fi
349
+
350
+ git secret tell $IN_THE_KNOW
351
+ git secret hide
352
+ git secret whoknows
353
+
354
+ echo "all secrets updated, you'll need to commit the changes"
355
+ }
356
+
357
+ #!/usr/bin/env bash
358
+
359
+ start_ssh_bastion() {
360
+ ENV=$1
361
+ SSH_LOCAL_PORT=$2
362
+ POD_NAME=ssh-bastion-$USERNAME
363
+ CONFIG_MAP_NAME=ssh-bastion-$USERNAME
364
+ configure_kubectl_for $ENV
365
+ kubectl get pods -o name | grep pod/$POD_NAME
366
+ if [ $? -eq 0 ]; then
367
+ echo "$POD_NAME is already running"
368
+ else
369
+ #configmap
370
+ kubectl get configmap $CONFIG_MAP_NAME && kubectl delete configmap $CONFIG_MAP_NAME
371
+ tempdir=$(mktemp -d)
372
+ cat <<EOF > $tempdir/sshd_config
373
+ AllowTcpForwarding yes
374
+ Port 2222
375
+ PermitRootLogin yes
376
+ AuthorizedKeysFile /etc/ssh/authorized_keys
377
+ EOF
378
+ cp ~/.ssh/id_rsa.pub $tempdir/authorized_keys
379
+ kubectl create configmap $CONFIG_MAP_NAME --from-file=$tempdir
380
+
381
+ #pod
382
+ kubectl get pod $POD_NAME && kubectl delete pod $POD_NAME
383
+ cat <<EOF | kubectl create -f -
384
+
385
+ apiVersion: v1
386
+ kind: Pod
387
+ metadata:
388
+ name: $POD_NAME
389
+ spec:
390
+ containers:
391
+ - name: $POD_NAME
392
+ image: sickp/alpine-sshd:7.4
393
+ ports:
394
+ - containerPort: 2222
395
+ volumeMounts:
396
+ - mountPath: /etc/ssh/sshd_config
397
+ name: ssh-config
398
+ subPath: sshd_config
399
+ - mountPath: /etc/ssh/authorized_keys
400
+ name: ssh-config
401
+ subPath: authorized_keys
402
+ volumes:
403
+ - name: ssh-config
404
+ configMap:
405
+ name: $CONFIG_MAP_NAME
406
+ EOF
407
+
408
+ fi
409
+
410
+ # You need a recent kubectl for wait to work (1.15 works), install or upgrade
411
+ # with brew :
412
+ # brew install kubernetes-cli
413
+ # brew upgrade kubernetes-cli
414
+ kubectl wait --for=condition=Ready pod/$POD_NAME
415
+
416
+ # kube port-forward
417
+ lsof -ti tcp:$SSH_LOCAL_PORT | xargs kill
418
+ kubectl port-forward $POD_NAME $SSH_LOCAL_PORT:2222 &
419
+ while ! nc -z 127.0.0.1 $SSH_LOCAL_PORT; do
420
+ sleep 1
421
+ done
422
+ echo "forwarding ssh via local port $SSH_LOCAL_PORT"
423
+ echo "remember to terminate the bastion with 'stop_ssh_bastion'"
424
+ }
425
+
426
+ stop_ssh_bastion() {
427
+ POD_NAME=ssh-bastion-$USERNAME
428
+ kubectl delete pod $POD_NAME
429
+ }
430
+
431
+ #!/usr/bin/env bash
432
+
433
+ configure_kubectl_for() {
434
+ local infra_env="$1"
435
+ local valid_envs="[testing][staging][production][performance][tests][recette]"
436
+ echo "$valid_envs" | grep -q "\[$infra_env\]"
437
+
438
+ if [ $? -ne 0 ]; then
439
+ echo "Cannot configure kubectl for invalid env : $infra_env"
440
+ echo "choose one of $valid_envs"
441
+ return 1
442
+ fi
443
+
444
+ aws eks update-kubeconfig --name "toutatis-$infra_env-eks" >&2
445
+ }
446
+
447
+ #!/usr/bin/env bash
448
+
449
+ # WARNING : never try to do a dump directly from the database_production_ca
450
+ # this could cause lot of lock database issues.
451
+ # always use database_production_read_replica_ca instead
452
+ database_k8s() {
453
+ MODE=$1
454
+ case $MODE in
455
+ "tests") SSH_LOCAL_PORT=2224;COMP_LOCAL_PORT=25550;PG_LOCAL_PORT=24440;CA_LOCAL_PORT=25430;ENV="tests";;
456
+ "testing") SSH_LOCAL_PORT=2225;COMP_LOCAL_PORT=25551;PG_LOCAL_PORT=24441;CA_LOCAL_PORT=25431;ENV="testing";;
457
+ "staging") SSH_LOCAL_PORT=2226;COMP_LOCAL_PORT=25552;PG_LOCAL_PORT=24442;CA_LOCAL_PORT=25432;ENV="staging";;
458
+ "production") SSH_LOCAL_PORT=2227;COMP_LOCAL_PORT=25553;PG_LOCAL_PORT=24443;CA_LOCAL_PORT=25433;ENV="production";;
459
+ "production_rw") SSH_LOCAL_PORT=2227;COMP_LOCAL_PORT=25554;PG_LOCAL_PORT=24444;CA_LOCAL_PORT=25434;ENV="production";;
460
+ "recette") SSH_LOCAL_PORT=2228;COMP_LOCAL_PORT=25556; PG_LOCAL_PORT=24446;CA_LOCAL_PORT=25436;ENV="recette";;
461
+ *) echo "Unsupported ENV : $MODE"; return 1 ;;
462
+ esac
463
+
464
+ start_ssh_bastion $ENV $SSH_LOCAL_PORT
465
+
466
+ lsof -ti tcp:$PG_LOCAL_PORT | xargs kill
467
+
468
+ bastion_config=$(mktemp)
469
+ cat > "$bastion_config" <<EOF
470
+ UserKnownHostsFile /dev/null
471
+ StrictHostKeyChecking no
472
+ User root
473
+ Host bastion_tests
474
+ HostName 127.0.0.1
475
+ Port 2224
476
+ LocalForward 24440 toutatis-tests-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
477
+ Host bastion_testing
478
+ HostName 127.0.0.1
479
+ Port 2225
480
+ LocalForward 24441 toutatis-testing-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
481
+ LocalForward 25551 toutatis-testing-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
482
+ LocalForward 25431 toutatis-testing-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
483
+ LocalForward 25531 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
484
+ LocalForward 25561 toutatis-testing-oracle-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:1521
485
+ Host bastion_staging
486
+ HostName 127.0.0.1
487
+ Port 2226
488
+ LocalForward 24442 toutatis-staging-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
489
+ LocalForward 25552 toutatis-staging-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
490
+ LocalForward 25432 toutatis-staging-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
491
+ Host bastion_recette
492
+ HostName 127.0.0.1
493
+ Port 2228
494
+ LocalForward 24446 toutatis-recette-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
495
+ LocalForward 25556 toutatis-recette-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
496
+ LocalForward 25436 toutatis-recette-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
497
+ LocalForward 25536 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
498
+ Host bastion_production
499
+ HostName 127.0.0.1
500
+ Port 2227
501
+ LocalForward 24443 toutatis-production-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
502
+ LocalForward 25553 toutatis-production-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
503
+ LocalForward 25433 toutatis-production-mysql-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
504
+ EOF
505
+ if [ "$MODE" = "production_rw" ] ; then
506
+ cat >> "$bastion_config" <<EOF
507
+ LocalForward 24444 toutatis-production-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
508
+ LocalForward 25434 toutatis-production-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
509
+ EOF
510
+ fi
511
+
512
+ ssh -f -N \
513
+ -F "$bastion_config" \
514
+ "bastion_$ENV"
515
+
516
+ echo "sample command (composite) : 'psql postgres://postgres@127.0.0.1:$COMP_LOCAL_PORT'"
517
+ echo "sample command : 'psql postgres://postgres@127.0.0.1:$PG_LOCAL_PORT'"
518
+ echo "sample command : 'mysql -u colisweb -h 127.0.0.1 -P $CA_LOCAL_PORT -p db_name'"
519
+
520
+ echo "run 'kubectl delete pod $POD_NAME' when you have finished"
521
+ }
522
+
523
+ psql_on_k8() {
524
+ NAMESPACE=$1
525
+ SERVICE=$2
526
+ CONNECTION=$3
527
+ shift 3
528
+
529
+ kubectl -n $NAMESPACE run ${SERVICE}-postgres-init \
530
+ --image jbergknoff/postgresql-client \
531
+ --restart=Never \
532
+ --attach --rm \
533
+ -- \
534
+ postgresql://${CONNECTION} \
535
+ "$*"
536
+ }
537
+
538
+ mysql_on_k8() {
539
+ local namespace=$1
540
+ local service=$2
541
+ local db_host=$3
542
+ local db_port=$4
543
+ local db_init_username=$5
544
+ local db_init_password=$6
545
+ local query=$7
546
+
547
+ kubectl -n ${namespace} run ${service}-mysql-init \
548
+ --image arey/mysql-client \
549
+ --restart=Never \
550
+ --attach --rm \
551
+ -- \
552
+ mysql --host=$db_host --user=$db_init_username --password=$db_init_password --port=$db_port --execute="$query"
553
+ }
554
+ #!/usr/bin/env bash
555
+
556
+ kube_init_database_once() {
557
+
558
+ extract_args 8 namespace db_host db_port db_init_username db_init_password db_database db_username db_password $*
559
+
560
+ echo "======================="
561
+ echo " Initializing Database '$db_database' for namespace $namespace"
562
+ echo "======================="
563
+
564
+ echo "Checking if Database '$db_database' exists"
565
+ set +e
566
+ psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -lqtA | cut -d\| -f1 | grep "^$db_database$"
567
+ return_code=$?
568
+ set -e
569
+
570
+ if [ ${return_code} -eq 0 ]; then
571
+ echo "Database $db_database already exists - nothing to do"
572
+ else
573
+ echo "Database $db_database does not exist - initializing"
574
+
575
+ psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'CREATE DATABASE '"$db_database"';'
576
+ echo "DB created $db_database"
577
+
578
+ psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'CREATE USER '"$db_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
579
+ echo "USER created $db_username"
580
+
581
+ psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_username"';'
582
+ echo "Granted all privileges for $db_username on $db_database"
583
+ fi
584
+
585
+ echo "======================="
586
+ echo " Database '$db_database' Initialization complete for namespace $namespace"
587
+ echo "======================="
588
+ }
589
+
590
+ kube_init_database_readonly_account() {
591
+
592
+ extract_args 6 namespace service db_connection db_database db_readonly_username db_readonly_password $*
593
+
594
+ echo "======================="
595
+ echo " Initializing Readonly Account '$db_readonly_username' for '$db_database' for namespace $namespace"
596
+ echo "======================="
597
+
598
+ # Print commands before execution, except echo
599
+ trap '[[ $BASH_COMMAND != echo* ]] && echo $BASH_COMMAND' DEBUG
600
+
601
+ echo "Checking if Readonly account '$db_readonly_username' for '$db_database' exists"
602
+ set +e
603
+ psql_on_k8 $namespace $service $db_connection -qtAc 'SELECT rolname FROM pg_roles;' | grep "^$db_readonly_username$"
604
+ return_code=$?
605
+ set -e
606
+
607
+ if [ ${return_code} -eq 0 ]; then
608
+ echo "Account $db_readonly_username already exists - nothing to do"
609
+ else
610
+ echo "Account $db_readonly_username does not exist - creating"
611
+
612
+ psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_readonly_username"' WITH ENCRYPTED PASSWORD '"'$db_readonly_password'"';'
613
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT CONNECT ON DATABASE '"$db_database"' TO '"$db_readonly_username"';'
614
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT USAGE ON SCHEMA public TO '"$db_readonly_username"';'
615
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO '"$db_readonly_username"';'
616
+ psql_on_k8 $namespace $service $db_connection -c 'ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO '"$db_readonly_username"';'
617
+
618
+ echo "Created user with read-only permissions for $db_readonly_username on $db_database (schema public)"
619
+ fi
620
+ }
621
+
622
+ kube_init_datadog_in_database() {
623
+ extract_args 8 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password db_datadog_schema $*
624
+
625
+ echo "======================="
626
+ echo " Initializing Datadog Agent Requirement for namespace $namespace"
627
+ echo "======================="
628
+
629
+ echo "Checking if User '$db_datadog_username' exists"
630
+ local service="datadog"
631
+ found_db_users=$(mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'SELECT user FROM mysql.user;')
632
+ set +e
633
+ echo "$found_db_users" | grep "^$db_datadog_username$"
634
+ return_code=$?
635
+ set -e
636
+
637
+ if [ ${return_code} -eq 0 ]; then
638
+ echo "User $db_datadog_username already exists - nothing to do"
639
+ else
640
+ echo "User $db_datadog_username does not exist - initializing"
641
+
642
+ # All the query come from this docs : https://docs.datadoghq.com/fr/database_monitoring/setup_mysql/selfhosted/?tab=mysql56
643
+
644
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'CREATE USER '"$db_datadog_username"'@"%" IDENTIFIED BY '"'$db_datadog_password'"';'
645
+ echo "USER created $db_datadog_username"
646
+
647
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT REPLICATION CLIENT ON *.* TO datadog@"%" WITH MAX_USER_CONNECTIONS 5;'
648
+ echo "ALTER USER $db_datadog_username"
649
+
650
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT PROCESS ON *.* TO '"$db_datadog_username"'@"%";'
651
+ echo "Granted PROCESS for $db_datadog_username"
652
+
653
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT SELECT ON performance_schema.* TO '"$db_datadog_username"'@"%";'
654
+ echo "Granted SELECT on performance_schema for $db_datadog_username"
655
+
656
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'CREATE SCHEMA IF NOT EXISTS datadog;'
657
+ echo "CREATE SCHEMA datadog"
658
+
659
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT EXECUTE ON datadog.* to '"$db_datadog_username"'@"%";'
660
+ echo "Granted 'GRANT EXECUTE for $db_datadog_username on datadog"
661
+
662
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT CREATE TEMPORARY TABLES ON datadog.* TO '"$db_datadog_username"'@"%";'
663
+ echo "Granted CREATE TEMPORARY TABLES for $db_datadog_username"
664
+
665
+
666
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.explain_statement;
667
+ DELIMITER $$
668
+ CREATE PROCEDURE datadog.explain_statement(IN query TEXT)
669
+ SQL SECURITY DEFINER
670
+ BEGIN
671
+ SET @explain := CONCAT("EXPLAIN FORMAT=json ", query);
672
+ PREPARE stmt FROM @explain;
673
+ EXECUTE stmt;
674
+ DEALLOCATE PREPARE stmt;
675
+ END $$
676
+ DELIMITER ;'
677
+ echo "CREATE PROCEDURE PROCEDURE datadog.explain_statement"
678
+
679
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS '"$db_datadog_username"'.explain_statement;
680
+ DELIMITER $$
681
+ CREATE PROCEDURE '"$db_datadog_username"'.explain_statement(IN query TEXT)
682
+ SQL SECURITY DEFINER
683
+ BEGIN
684
+ SET @explain := CONCAT("EXPLAIN FORMAT=json ", query);
685
+ PREPARE stmt FROM @explain;
686
+ EXECUTE stmt;
687
+ DEALLOCATE PREPARE stmt;
688
+ END $$
689
+ DELIMITER ;
690
+ GRANT EXECUTE ON PROCEDURE '"$db_datadog_username"'.explain_statement TO datadog@"%";'
691
+ echo "CREATE PROCEDURE on SCHEMA $db_datadog_schema for $db_datadog_username"
692
+
693
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.enable_events_statements_consumers;
694
+ DELIMITER $$
695
+ CREATE PROCEDURE datadog.enable_events_statements_consumers()
696
+ SQL SECURITY DEFINER
697
+ BEGIN
698
+ UPDATE performance_schema.setup_consumers SET enabled="YES" WHERE name LIKE "events_statements_%";
699
+ END $$
700
+ DELIMITER ;
701
+ GRANT EXECUTE ON PROCEDURE datadog.enable_events_statements_consumers TO datadog@"%";'
702
+
703
+ echo "CREATE PROCEDURE on datadog.enable_events_statements_consumers"
704
+ fi
705
+
706
+ echo "======================="
707
+ echo " Database '$db_datadog_schema' Initialization complete for namespace $namespace"
708
+ echo "======================="
709
+ }
710
+
711
+ kube_init_datadog_in_postgres_database() {
712
+ extract_args 7 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password $*
713
+
714
+ local service="datadog"
715
+ local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
716
+
717
+ echo "======================="
718
+ echo " Initializing $service Agent On PostgresSQL Database Requirement for namespace $namespace"
719
+ echo "======================="
720
+
721
+ echo "Checking if User '$db_datadog_username' exists"
722
+
723
+ set +e
724
+ if psql_on_k8 $namespace $service $db_connection -qtAc 'SELECT usename FROM pg_catalog.pg_user;' | grep "^$db_datadog_username$";
725
+ then
726
+ echo "User $db_datadog_username already exists - nothing to do"
727
+ else
728
+ echo "User $db_datadog_username does not exist - initializing"
729
+
730
+ set -e
731
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE USER '"$db_datadog_username"' WITH password '"'$db_datadog_password'"';'
732
+ echo "User created $db_datadog_username"
733
+
734
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE SCHEMA datadog;'
735
+ echo "Schema datadog created"
736
+
737
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT USAGE ON SCHEMA datadog TO datadog;'
738
+ echo "Granted usage for datadog schema to datadog"
739
+
740
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT USAGE ON SCHEMA public TO datadog;'
741
+ echo "Granted usage for public schema to datadog"
742
+
743
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT pg_monitor TO datadog;'
744
+ echo "Granted pg_monitor to datadog"
745
+
746
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE EXTENSION IF NOT EXISTS pg_stat_statements schema public;'
747
+ echo "Extension pg_stat_statements created"
748
+
749
+ local datadog_function_path="/tmp/datatog-explain-statement-function.sql"
750
+ local datadog_function="CREATE OR REPLACE FUNCTION datadog.explain_statement(
751
+ l_query TEXT,
752
+ OUT explain JSON
753
+ )
754
+ RETURNS SETOF JSON AS
755
+ \\$\\$
756
+ DECLARE
757
+ curs REFCURSOR;
758
+ plan JSON;
759
+
760
+ BEGIN
761
+ OPEN curs FOR EXECUTE pg_catalog.concat('EXPLAIN (FORMAT JSON) ', l_query);
762
+ FETCH curs INTO plan;
763
+ CLOSE curs;
764
+ RETURN QUERY SELECT plan;
765
+ END;
766
+ \\$\\$
767
+ LANGUAGE 'plpgsql'
768
+ RETURNS NULL ON NULL INPUT
769
+ SECURITY DEFINER;"
770
+
771
+ kubectl -n $namespace run $service-postgres-init \
772
+ --image jbergknoff/postgresql-client \
773
+ --restart=Never \
774
+ --attach --rm \
775
+ --command \
776
+ -- \
777
+ /bin/sh -c "echo -e \"$datadog_function\" > $datadog_function_path; psql postgresql://$db_connection -qf $datadog_function_path"
778
+
779
+ echo "Function datadog.explain_statement created"
780
+ fi
781
+
782
+ echo "======================="
783
+ echo " Database $service Initialization complete for namespace $namespace"
784
+ echo "======================="
785
+ }
786
+
787
+ kube_init_service_database() {
788
+
789
+ extract_args 9 namespace service db_host db_port db_init_username db_init_password db_database db_username db_password $*
790
+
791
+ local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
792
+
793
+ echo "Checking if Database '$db_database' exists"
794
+ set +e
795
+ psql_on_k8 $namespace $service $db_connection -lqtA | cut -d\| -f1 | grep "^$db_database$"
796
+ return_code=$?
797
+ set -e
798
+
799
+ if [ ${return_code} -eq 0 ]; then
800
+ echo "Database $db_database already exists - nothing to do"
801
+ else
802
+ echo "Database $db_database does not exist - initializing"
803
+
804
+ psql_on_k8 $namespace $service $db_connection -c 'CREATE DATABASE '"$db_database"';'
805
+ echo "DB created $db_database"
806
+
807
+ psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
808
+ echo "USER created $db_username"
809
+
810
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_username"';'
811
+ echo "Granted all privileges for $db_username on $db_database"
812
+ fi
813
+
814
+ echo "======================="
815
+ echo " Database '$db_database' Initialization complete for namespace $namespace"
816
+ echo "======================="
817
+ }
818
+
819
+ #!/usr/bin/env bash
820
+
821
+ # Allow to use JMX connection to retrieve data and metrics from the pods within kubernetes
822
+ # You will need visualVM to use this tool https://visualvm.github.io/
823
+ # ex: bind_jmx testing notification
824
+ bind_jmx() {
825
+
826
+ local ENV=$1
827
+ local SERVICE_NAME=$2
828
+ local PORT=2242
829
+
830
+ start_ssh_bastion $ENV $PORT
831
+
832
+ echo "root" | ssh -f -N -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -D 7777 root@127.0.0.1 -p 2242
833
+ local PODS=$(kubectl -n $ENV get pods -o wide | grep $SERVICE_NAME | grep -Eo '^[^ ]+')
834
+
835
+ echo "Choose one of the following pod to get metrics from..."
836
+ local POD_NAME=$(gum choose $PODS)
837
+ local POD_IP=$(
838
+ kubectl -n $ENV get pods -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIP}{"\n"}{end}' |
839
+ grep $POD_NAME |
840
+ cut -d' ' -f2 |
841
+ head -1
842
+ )
843
+
844
+ jconsole -J-DsocksProxyHost=localhost \
845
+ -J-DsocksProxyPort=7777 \
846
+ service:jmx:rmi:///jndi/rmi://$POD_IP:7199/jmxrmi \
847
+ -J-DsocksNonProxyHosts= &
848
+
849
+ cat << EOF
850
+ Now start VisualVM
851
+ Preferences > Network > Manual Proxy Settings
852
+ SOCKS Proxy Line: Set 'localhost' and Port '7777'
853
+ File > Add JMX Connection
854
+ Set $POD_IP:7199, check 'do not require an SSL connection'
855
+ Remember to kill you bastion afterward using 'stop_ssh_bastion'
856
+ EOF
857
+ }
858
+ #!/usr/bin/env bash
859
+
860
+ function kstatus() {
861
+ if [ -z "$3" ]
862
+ then
863
+ configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2"
864
+ else
865
+ configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2 | grep $3"
866
+ fi
867
+ }
868
+
869
+ #!/usr/bin/env bash
870
+
871
+ k8_nodes_stats() {
872
+ ENV=${1:-testing}
873
+
874
+ configure_kubectl_for "${ENV}"
875
+
876
+ kubectl get nodes -o name |
877
+ xargs kubectl describe |
878
+ grep "^Name\|workType\|cpu \|memory " |
879
+ sed -r 's/[ :=]+/\t/g' |
880
+ sed 's/\tworkType\t//g' |
881
+ sed -r 's/^Name/---\nName/g' |
882
+ grep --color "Name\|web\|workers\|cpu\|memory\|---"
883
+ }
884
+
885
+ #!/usr/bin/env bash
886
+
887
+ # Port forward on the first matching pod
888
+ # Ex :
889
+ # pod_forward testing notification-http
890
+ # pod_forward testing colisweb-api-web 3333 3000
891
+ pod_forward() {
892
+ ENV=$1
893
+ POD_FILTER=$2
894
+ LOCAL_PORT=${3:-8080}
895
+ POD_PORT=${4:-8080}
896
+
897
+ if PID=$(lsof -ti tcp:$LOCAL_PORT); then
898
+ echo "killing process $PID which uses port $LOCAL_PORT"
899
+ kill $PID
900
+ fi
901
+
902
+ configure_kubectl_for $ENV
903
+
904
+ POD=`pick_pod $ENV $POD_FILTER`
905
+
906
+ echo "setting up forwarding to $POD"
907
+ kubectl -n $ENV port-forward $POD $LOCAL_PORT:$POD_PORT &
908
+ PID=$!
909
+
910
+ while ! echo exit | nc localhost $LOCAL_PORT > /dev/null; do
911
+ sleep 1
912
+ echo "waiting for port $LOCAL_PORT to be open locally"
913
+ done
914
+ echo "port $LOCAL_PORT is now available on localhost, forwarding to $ENV $POD:$POD_PORT"
915
+ echo 'you can terminate it with "kill '$PID'" or "kill $(lsof -ti tcp:'$LOCAL_PORT')"'
916
+ }
917
+
918
+ # prompts to pick a pod and run a command like bash inside
919
+ # pod_exec testing
920
+ # pod_exec testing bash
921
+ # pod_exec testing bash colisweb-api
922
+ pod_exec() {
923
+ ENV=$1
924
+ COMMAND=${2:-bash}
925
+ configure_kubectl_for $ENV
926
+ POD_FILTER=$3
927
+ POD=`pick_pod $ENV $POD_FILTER`
928
+ echo "running $COMMAND inside $POD"
929
+ kubectl -n $ENV exec -ti $POD -- $COMMAND
930
+ }
931
+
932
+ # prompts to pick a pod and copy from a local file to the pod
933
+ # pod_copy_to testing localfile remotefile
934
+ # pod_copy_to testing localfile remotefile colisweb-api
935
+ pod_copy_to() {
936
+ ENV=$1
937
+ LOCAL_FILE=$2
938
+ REMOTE_FILE=$3
939
+ configure_kubectl_for $ENV
940
+ POD_FILTER=$4
941
+ POD=`pick_pod $ENV $POD_FILTER`
942
+ kubectl cp $LOCAL_FILE $ENV/$POD:$REMOTE_FILE
943
+ }
944
+
945
+
946
+ pick_pod() {
947
+ ENV=$1
948
+ POD_FILTER=$2
949
+ configure_kubectl_for $ENV
950
+
951
+ if [ -z "$2" ] ; then
952
+ kubectl -n $ENV get pods | gum filter | cut -f1 -d" "
953
+ else
954
+ if PODS=$(kubectl -n $ENV get pods | grep -m1 "$POD_FILTER" | cut -f1 -d" "); then
955
+ echo $PODS
956
+ else
957
+ echo "no pods found on $ENV matching $POD_FILTER" >&2
958
+ fi
959
+ fi
960
+ }
961
+
962
+ # pods_settings $ENV
963
+ # Will output a CSV (;) of all deployments on this environment with cpu and memory request and limits
964
+ # Errors and null outputs are ignored and won't be in the output.
965
+ pods_resources() {
966
+ ENV=$1
967
+ configure_kubectl_for $ENV
968
+ DEPLOYMENTS=(
969
+ $(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
970
+ )
971
+ echo "deployment; request_cpu; request_memory; limits_cpu;limits_memory"
972
+ for D in "${DEPLOYMENTS[@]}"; do
973
+ info=$(kubectl -n $ENV get deployment -o yaml $D |
974
+ yq '.spec.template.spec.containers[].resources' |
975
+ yq '.L = .requests.cpu + "; " + .requests.memory + "; " + .limits.cpu + "; " + .limits.memory' |
976
+ yq ".L") 2&>/dev/null
977
+ if ! [ "$info" = "null" ]; then
978
+ echo "$D; $info"
979
+ fi
980
+ done
981
+ }
982
+
983
+ pods_strategies() {
984
+ ENV=$1
985
+ configure_kubectl_for $ENV
986
+ DEPLOYMENTS=(
987
+ $(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
988
+ )
989
+ echo "deployment; max_surge; max_unavailable"
990
+ for D in "${DEPLOYMENTS[@]}"; do
991
+ info=$(kubectl -n $ENV get deployment -o yaml $D |
992
+ yq '.spec.strategy' |
993
+ yq '.L = .rollingUpdate.maxSurge + "; " + .rollingUpdate.maxUnavailable' |
994
+ yq ".L") 2&>/dev/null
995
+ if ! [ "$info" = "null" ]; then
996
+ echo "$D; $info"
997
+ fi
998
+ done
999
+ }
1000
+
1001
+ #!/usr/bin/env bash
1002
+
1003
+ bastion_config_for_redis_ca() {
1004
+ ssh_config xufte6.0001.euw1.cache.amazonaws.com redis 2223 63789 tests testing recette-001 sandbox prod > $1
1005
+ }
1006
+
1007
+ bastion_config_for_redis_toutatis() {
1008
+ ssh_config xufte6.0001.euw1.cache.amazonaws.com toutatis 2223 63789 tests testing recette staging production > $1
1009
+ }
1010
+
1011
+ ssh_config() {
1012
+ host=$1
1013
+ host_prefix=$2
1014
+ port0=$3
1015
+ forward0=$4
1016
+ shift 4
1017
+ instance_names=("$@") # /!\ indices start at 1 with zsh
1018
+ ssh_header
1019
+
1020
+ environments=(tests testing recette staging production)
1021
+
1022
+ length=${#environments[@]}
1023
+ for (( i=1; i<=${length}; i++ ));
1024
+ do
1025
+ bastion_block bastion_${environments[$i]} $(($port0 + $i)) $(($forward0 + $i)) ${host_prefix}-${instance_names[$i]}.$host
1026
+ done
1027
+ }
1028
+
1029
+ ssh_header() {
1030
+ cat <<EOF
1031
+ UserKnownHostsFile /dev/null
1032
+ StrictHostKeyChecking no
1033
+ User root
1034
+ EOF
1035
+ }
1036
+
1037
+ bastion_block() {
1038
+ cat <<EOF
1039
+ Host $1
1040
+ HostName 127.0.0.1
1041
+ Port $2
1042
+ LocalForward $3 $4:6379
1043
+ EOF
1044
+ }
1045
+
1046
+ redis_k8s() {
1047
+ MODE=$1
1048
+ REDIS_INSTANCE=${2:-ca}
1049
+ case $MODE in
1050
+ "tests") SSH_LOCAL_PORT=2224;REDIS_LOCAL_PORT=63790;ENV="tests";;
1051
+ "testing") SSH_LOCAL_PORT=2225;REDIS_LOCAL_PORT=63791;ENV="testing";;
1052
+ "recette") SSH_LOCAL_PORT=2226;REDIS_LOCAL_PORT=63792;ENV="recette";;
1053
+ "staging") SSH_LOCAL_PORT=2227;REDIS_LOCAL_PORT=63793;ENV="staging";;
1054
+ "production") SSH_LOCAL_PORT=2228;REDIS_LOCAL_PORT=63794;ENV="production";;
1055
+ *) echo "Unsupported ENV : $MODE"; return 1 ;;
1056
+ esac
1057
+
1058
+ start_ssh_bastion $ENV $SSH_LOCAL_PORT
1059
+
1060
+ lsof -ti tcp:$REDIS_LOCAL_PORT | xargs kill
1061
+
1062
+ bastion_config=$(mktemp)
1063
+ case $REDIS_INSTANCE in
1064
+ "ca") bastion_config_for_redis_ca "$bastion_config";;
1065
+ "toutatis") bastion_config_for_redis_toutatis "$bastion_config";;
1066
+ *) echo "Unsupported redis instance (ca or toutatis available) : $REDIS_INSTANCE"; return 1;;
1067
+ esac
1068
+
1069
+ ssh -f -N \
1070
+ -F "$bastion_config" \
1071
+ "bastion_$ENV"
1072
+
1073
+ echo "sample command : 'redis-cli -p $REDIS_LOCAL_PORT'"
1074
+ echo "run 'kubectl delete pod $POD_NAME' when you have finished"
1075
+
1076
+ redis-cli -p $REDIS_LOCAL_PORT
1077
+ }
1078
+
1079
+ #!/usr/bin/env bash
1080
+
1081
+ #Create a k8s cron jobs that will be run regularly
1082
+ #See run_cron_job_k8s -h for more details
1083
+
1084
+ run_cron_job_k8s() {
1085
+
1086
+ #default values
1087
+ local namespace="testing"
1088
+ local name="$USERNAME"
1089
+ local SCHEDULE="00 05 * * *"
1090
+ local secret=""
1091
+ local amm_folder=""
1092
+ local amm_script=""
1093
+
1094
+ while getopts ":e:c:p:f:s:t:h" opt; do
1095
+ case $opt in
1096
+ e)
1097
+ namespace="$OPTARG" >&2
1098
+ ;;
1099
+ t)
1100
+ SCHEDULE="$OPTARG" >&2
1101
+ ;;
1102
+ p)
1103
+ name="$OPTARG" >&2
1104
+ ;;
1105
+ c)
1106
+ secret="$OPTARG" >&2
1107
+ ;;
1108
+ f)
1109
+ amm_folder="$OPTARG" >&2
1110
+ ;;
1111
+ s)
1112
+ amm_script="$OPTARG" >&2
1113
+ ;;
1114
+ h)
1115
+ show_help_cron_job
1116
+ return 0
1117
+ ;;
1118
+ :)
1119
+ echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
1120
+ return 0
1121
+ ;;
1122
+ \?)
1123
+ echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
1124
+ return 0
1125
+ ;;
1126
+ esac
1127
+ done
1128
+
1129
+ if [ -z "$amm_script" ]; then
1130
+ echo 'Missing -s. Run run_cron_job_k8s -h for help' >&2
1131
+ return 0
1132
+ fi
1133
+
1134
+ shift "$((OPTIND-1))"
1135
+
1136
+ local script_args=$(
1137
+ if [ "$#" -gt 0 ] ; then
1138
+ printf '"'
1139
+ join_by '", "' $*
1140
+ printf '"'
1141
+ fi
1142
+ )
1143
+
1144
+ local IMAGE="lolhens/ammonite:2.5.4"
1145
+ local CRONJOB_NAME="cronjob-ammonite-$name"
1146
+
1147
+
1148
+ configure_kubectl_for $namespace
1149
+
1150
+ if [[ ! -r "$amm_script" ]]; then
1151
+ echo "ammonite script not found $amm_script"
1152
+ return 2
1153
+ else
1154
+ local CONFIG_MAP="config-$CRONJOB_NAME"
1155
+ local SECRET_MAP="secret-$CRONJOB_NAME"
1156
+ local CONFIG_MAP_DIR="$(mktemp -d)"
1157
+
1158
+ if [[ ! -z $amm_folder && -d $amm_folder ]] ; then
1159
+ cp -r "$amm_folder/" "$CONFIG_MAP_DIR"
1160
+ fi
1161
+ cp "$amm_script" "$CONFIG_MAP_DIR/script.sc"
1162
+
1163
+ kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1164
+ kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1165
+
1166
+ kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
1167
+ kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
1168
+
1169
+ kubectl -n $namespace get cronjob $CRONJOB_NAME && kubectl -n $namespace delete cronjob $CRONJOB_NAME
1170
+
1171
+ echo "starting $CRONJOB_NAME with $IMAGE"
1172
+
1173
+ JOB_DEFINITION='
1174
+ apiVersion: batch/v1
1175
+ kind: CronJob
1176
+ metadata:
1177
+ name: '$CRONJOB_NAME'
1178
+ namespace: '$namespace'
1179
+ spec:
1180
+ schedule: "'$SCHEDULE'"
1181
+ concurrencyPolicy: Forbid
1182
+ jobTemplate:
1183
+ spec:
1184
+ backoffLimit: 0
1185
+ template:
1186
+ spec:
1187
+ nodeSelector:
1188
+ workType: "workers"
1189
+ restartPolicy: Never
1190
+ volumes:
1191
+ - name: config
1192
+ configMap:
1193
+ name: '$CONFIG_MAP'
1194
+ - name: secret
1195
+ secret:
1196
+ secretName: '$SECRET_MAP'
1197
+ containers:
1198
+ - name: '$CRONJOB_NAME'
1199
+ command: ["amm", "/code/script.sc"]
1200
+ image: '$IMAGE'
1201
+ imagePullPolicy: IfNotPresent
1202
+ args: ['$script_args']
1203
+ env:
1204
+ - name: POD_NAME
1205
+ valueFrom:
1206
+ fieldRef:
1207
+ apiVersion: v1
1208
+ fieldPath: metadata.name
1209
+ - name: POD_NAMESPACE
1210
+ valueFrom:
1211
+ fieldRef:
1212
+ apiVersion: v1
1213
+ fieldPath: metadata.namespace
1214
+ - name: HOST_IP
1215
+ valueFrom:
1216
+ fieldRef:
1217
+ apiVersion: v1
1218
+ fieldPath: status.hostIP
1219
+ volumeMounts:
1220
+ - name: config
1221
+ mountPath: /code
1222
+ - name: secret
1223
+ mountPath: /conf
1224
+ readOnly: true
1225
+ resources:
1226
+ requests:
1227
+ cpu: 500m
1228
+ memory: 256Mi
1229
+ limits:
1230
+ cpu: 4000m
1231
+ memory: 512Mi
1232
+ envFrom:
1233
+ - configMapRef:
1234
+ name: '$CONFIG_MAP'
1235
+ - secretRef:
1236
+ name: '$SECRET_MAP'
1237
+ '
1238
+
1239
+ echo $JOB_DEFINITION > /tmp/job.yaml
1240
+
1241
+ kubectl -n $namespace apply -f /tmp/job.yaml
1242
+
1243
+ fi
1244
+ }
1245
+
1246
+ # Usage info
1247
+ show_help_cron_job() {
1248
+ #p:f:s
1249
+ local help="""Usage: run_cron_job_k8s -s SCRIPT [-t TIME] [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
1250
+ Create a k8s cron job that will be run a script regularly
1251
+
1252
+ -h display this help and exit
1253
+ -s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
1254
+ -t TIME opt. time when the job will be launched. TIME should be in CRON syntax (default to 00 05 * * *, ie 5AM UTC)
1255
+ -e ENV opt. set execution environment (default to testing)
1256
+ -c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
1257
+ -p POD opt. name of the pod to create (default to $USERNAME)
1258
+ -f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
1259
+ ARGS opt. additional arguments for SCRIPT
1260
+ """
1261
+ echo "$help"
1262
+ }
1263
+
1264
+ #!/usr/bin/env bash
1265
+
1266
+ # Usage info
1267
+ show_help_job() {
1268
+ local help="""Usage: run_job_k8s -s SCRIPT [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
1269
+ Create a k8s job executing a script
1270
+
1271
+ -h display this help and exit
1272
+ -s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
1273
+ -e ENV opt. set execution environment (default to testing)
1274
+ -c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
1275
+ -p POD opt. name of the pod to create (default to $USERNAME)
1276
+ -f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
1277
+ ARGS opt. additional arguments for SCRIPT
1278
+
1279
+ The organisation of the files must be the same locally as on the pod :
1280
+ - /code containing the script to execute (arg -s) and the other needed files (if the arg -f is used, it must reference this directory)
1281
+ - /conf containing the secret file (arg -c if used)
1282
+ E.g. in the script \"/code/script.sc\", to use a secret file \"/conf/secret.sc\", the import should look like \"import \$file.^.conf.secret.sc\"
1283
+ """
1284
+ echo "$help"
1285
+ }
1286
+
1287
+ run_job_k8s() {
1288
+
1289
+ #default values
1290
+ local namespace="testing"
1291
+ local name="$USERNAME"
1292
+ local secret=""
1293
+ local amm_folder=""
1294
+ local amm_script=""
1295
+
1296
+ while getopts ":e:c:p:f:s:h" opt; do
1297
+ case $opt in
1298
+ e)
1299
+ namespace="$OPTARG" >&2
1300
+ ;;
1301
+ p)
1302
+ name="$OPTARG" >&2
1303
+ ;;
1304
+ c)
1305
+ secret="$OPTARG" >&2
1306
+ ;;
1307
+ f)
1308
+ amm_folder="$OPTARG" >&2
1309
+ ;;
1310
+ s)
1311
+ amm_script="$OPTARG" >&2
1312
+ ;;
1313
+ h)
1314
+ show_help_job
1315
+ return 0
1316
+ ;;
1317
+ :)
1318
+ echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
1319
+ return 0
1320
+ ;;
1321
+ \?)
1322
+ echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
1323
+ return 0
1324
+ ;;
1325
+ esac
1326
+ done
1327
+
1328
+ if [ -z "$amm_script" ]; then
1329
+ echo 'Missing -s. Run run_job_k8s -h for help' >&2
1330
+ return 0
1331
+ fi
1332
+
1333
+ shift "$((OPTIND-1))"
1334
+
1335
+ local script_args=$(
1336
+ if [ "$#" -gt 0 ] ; then
1337
+ printf '"'
1338
+ join_by '", "' $*
1339
+ printf '"'
1340
+ fi
1341
+ )
1342
+
1343
+ local IMAGE="lolhens/ammonite:2.5.4"
1344
+ local JOB_NAME="job-ammonite-$name"
1345
+
1346
+ if [[ ! -r "$amm_script" ]]; then
1347
+ echo "ammonite script not found $amm_script"
1348
+ return 2
1349
+ else
1350
+ local CONFIG_MAP="config-$JOB_NAME"
1351
+ local CONFIG_MAP_DIR="$(mktemp -d)"
1352
+ local SECRET_MAP="secret-$JOB_NAME"
1353
+
1354
+ configure_kubectl_for $namespace
1355
+
1356
+ if [[ ! -z $amm_folder && -d $amm_folder ]] ; then
1357
+ cp -r "$amm_folder/" "$CONFIG_MAP_DIR"
1358
+ fi
1359
+ cp "$amm_script" "$CONFIG_MAP_DIR/script.sc"
1360
+
1361
+ kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1362
+ kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1363
+
1364
+ kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
1365
+ kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
1366
+
1367
+ kubectl -n $namespace get job $JOB_NAME && kubectl -n $namespace delete job $JOB_NAME
1368
+
1369
+ echo "starting $JOB_NAME with $IMAGE"
1370
+ fi
1371
+
1372
+ JOB_DEFINITION='
1373
+ apiVersion: batch/v1
1374
+ kind: Job
1375
+ metadata:
1376
+ name: '$JOB_NAME'
1377
+ namespace: '$namespace'
1378
+ spec:
1379
+ template:
1380
+ spec:
1381
+ containers:
1382
+ - name: '$JOB_NAME'
1383
+ command: ["amm", "/code/script.sc"]
1384
+ image: '$IMAGE'
1385
+ args: ['$script_args']
1386
+ env:
1387
+ - name: POD_NAME
1388
+ valueFrom:
1389
+ fieldRef:
1390
+ apiVersion: v1
1391
+ fieldPath: metadata.name
1392
+ - name: POD_NAMESPACE
1393
+ valueFrom:
1394
+ fieldRef:
1395
+ apiVersion: v1
1396
+ fieldPath: metadata.namespace
1397
+ - name: HOST_IP
1398
+ valueFrom:
1399
+ fieldRef:
1400
+ apiVersion: v1
1401
+ fieldPath: status.hostIP
1402
+ volumeMounts:
1403
+ - name: config
1404
+ mountPath: /code
1405
+ - name: secret
1406
+ mountPath: /conf
1407
+ readOnly: true
1408
+ resources:
1409
+ requests:
1410
+ cpu: 500m
1411
+ memory: 256Mi
1412
+ limits:
1413
+ cpu: 4000m
1414
+ memory: 1Gi
1415
+ nodeSelector:
1416
+ workType: workers
1417
+ restartPolicy: Never
1418
+ volumes:
1419
+ - name: config
1420
+ configMap:
1421
+ name: '$CONFIG_MAP'
1422
+ - name: secret
1423
+ secret:
1424
+ secretName: '$SECRET_MAP'
1425
+ '
1426
+
1427
+
1428
+ echo $JOB_DEFINITION > /tmp/job.yaml
1429
+
1430
+ kubectl -n $namespace apply -f /tmp/job.yaml
1431
+
1432
+ }
1433
+
1434
+
1435
+ #!/usr/bin/env bash
1436
+
1437
+ # Usage info
1438
+ show_help_shell() {
1439
+ local help="""Usage: run_job_k8s -s SCRIPT [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
1440
+ Create a k8s job executing a script
1441
+
1442
+ -h display this help and exit
1443
+ -s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
1444
+ -e ENV opt. set execution environment (default to testing)
1445
+ -c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
1446
+ -p POD opt. name of the pod to create (default to $USERNAME)
1447
+ -f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
1448
+ ARGS opt. additional arguments for SCRIPT
1449
+
1450
+ The organisation of the files must be the same locally as on the pod :
1451
+ - /code containing the script to execute (arg -s) and the other needed files (if the arg -f is used, it must reference this directory)
1452
+ - /conf containing the secret file (arg -c if used)
1453
+ E.g. in the script \"/code/script.sc\", to use a secret file \"/conf/secret.sc\", the import should look like \"import \$file.^.conf.secret.sc\"
1454
+ """
1455
+ echo "$help"
1456
+ }
1457
+
1458
+ run_shell_k8s() {
1459
+
1460
+ #default values
1461
+ local namespace="testing"
1462
+ local name="$USERNAME"
1463
+ local secret=""
1464
+ local shell_folder=""
1465
+ local script_script=""
1466
+
1467
+ while getopts ":e:c:p:f:s:h" opt; do
1468
+ case $opt in
1469
+ e)
1470
+ namespace="$OPTARG" >&2
1471
+ ;;
1472
+ p)
1473
+ name="$OPTARG" >&2
1474
+ ;;
1475
+ c)
1476
+ secret="$OPTARG" >&2
1477
+ ;;
1478
+ f)
1479
+ shell_folder="$OPTARG" >&2
1480
+ ;;
1481
+ s)
1482
+ shell_script="$OPTARG" >&2
1483
+ ;;
1484
+ h)
1485
+ show_help_job
1486
+ return 0
1487
+ ;;
1488
+ :)
1489
+ echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
1490
+ return 0
1491
+ ;;
1492
+ \?)
1493
+ echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
1494
+ return 0
1495
+ ;;
1496
+ esac
1497
+ done
1498
+
1499
+ if [ -z "$shell_script" ]; then
1500
+ echo 'Missing -s. Run run_job_k8s -h for help' >&2
1501
+ return 0
1502
+ fi
1503
+
1504
+ shift "$((OPTIND-1))"
1505
+
1506
+ local script_args=$(
1507
+ if [ "$#" -gt 0 ] ; then
1508
+ printf '"'
1509
+ join_by '", "' $*
1510
+ printf '"'
1511
+ fi
1512
+ )
1513
+
1514
+
1515
+
1516
+
1517
+ local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/docker-infra-builder:v3.1.0"
1518
+ local JOB_NAME="job-shell-$name"
1519
+
1520
+ if [[ ! -r "$shell_script" ]]; then
1521
+ echo "shell script not found $shell_script"
1522
+ return 2
1523
+ else
1524
+ local CONFIG_MAP="config-$JOB_NAME"
1525
+ local CONFIG_MAP_DIR="$(mktemp -d)"
1526
+ local SECRET_MAP="secret-$JOB_NAME"
1527
+
1528
+ configure_kubectl_for $namespace
1529
+
1530
+ if [[ ! -z $shell_folder && -d $shell_folder ]] ; then
1531
+ cp -r "$shell_folder/" "$CONFIG_MAP_DIR"
1532
+ fi
1533
+ cp "$shell_script" "$CONFIG_MAP_DIR/script.sh"
1534
+
1535
+ kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1536
+ kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1537
+
1538
+ kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
1539
+ kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
1540
+
1541
+ kubectl -n $namespace get job $JOB_NAME && kubectl -n $namespace delete job $JOB_NAME
1542
+
1543
+ echo "starting $JOB_NAME with $IMAGE"
1544
+ fi
1545
+
1546
+ JOB_DEFINITION='
1547
+ apiVersion: batch/v1
1548
+ kind: Job
1549
+ metadata:
1550
+ name: '$JOB_NAME'
1551
+ namespace: '$namespace'
1552
+ spec:
1553
+ template:
1554
+ spec:
1555
+ containers:
1556
+ - name: '$JOB_NAME'
1557
+ command: ["bash", "/code/script.sh"]
1558
+ image: '$IMAGE'
1559
+ args: ['$script_args']
1560
+ env:
1561
+ - name: POD_NAME
1562
+ valueFrom:
1563
+ fieldRef:
1564
+ apiVersion: v1
1565
+ fieldPath: metadata.name
1566
+ - name: POD_NAMESPACE
1567
+ valueFrom:
1568
+ fieldRef:
1569
+ apiVersion: v1
1570
+ fieldPath: metadata.namespace
1571
+ - name: HOST_IP
1572
+ valueFrom:
1573
+ fieldRef:
1574
+ apiVersion: v1
1575
+ fieldPath: status.hostIP
1576
+ volumeMounts:
1577
+ - name: config
1578
+ mountPath: /code
1579
+ - name: secret
1580
+ mountPath: /conf
1581
+ readOnly: true
1582
+ resources:
1583
+ requests:
1584
+ cpu: 500m
1585
+ memory: 256Mi
1586
+ limits:
1587
+ cpu: 4000m
1588
+ memory: 1Gi
1589
+ nodeSelector:
1590
+ workType: workers
1591
+ restartPolicy: Never
1592
+ volumes:
1593
+ - name: config
1594
+ configMap:
1595
+ name: '$CONFIG_MAP'
1596
+ - name: secret
1597
+ secret:
1598
+ secretName: '$SECRET_MAP'
1599
+ - name: stockage
1600
+
1601
+ '
1602
+
1603
+
1604
+ echo $JOB_DEFINITION > /tmp/job.yaml
1605
+
1606
+ kubectl -n $namespace apply -f /tmp/job.yaml
1607
+
1608
+ }
1609
+
1610
+
1611
+ #!/usr/bin/env bash
1612
+
1613
+ run_task() {
1614
+ set -e
1615
+
1616
+ check_args "--namespace" $1
1617
+ shift
1618
+ NAMESPACE=$1
1619
+ shift
1620
+ check_args "--image" $1
1621
+ shift
1622
+ IMAGE=$1
1623
+ shift
1624
+ check_args "--name" $1
1625
+ shift
1626
+ NAME=$1
1627
+ shift
1628
+
1629
+ set -x
1630
+
1631
+ kubectl -n ${NAMESPACE} run ${NAME} \
1632
+ --image ${IMAGE} \
1633
+ --restart=Never \
1634
+ --attach --rm \
1635
+ $*
1636
+ }
1637
+ geocode_address() {
1638
+ ADDRESS=$(sed -e 's: :%20:g' <(echo "$*"))
1639
+ URL="https://maps.googleapis.com/maps/api/geocode/json?address=${ADDRESS}&key=${GOOGLE_API_KEY}"
1640
+ curl $URL
1641
+ }
1642
+
1643
+ search_business() {
1644
+ SIREN=$1
1645
+ shift
1646
+ QUERY=$(sed -e 's: :+:g' <(echo "$*"))
1647
+ URL="https://data.opendatasoft.com/api/records/1.0/search/?dataset=sirene_v3%40public&q=${QUERY}&sort=datederniertraitementetablissement&facet=trancheeffectifsetablissement&facet=libellecommuneetablissement&facet=departementetablissementi&refine.siren=${SIREN}"
1648
+ curl $URL
1649
+ }
1650
+
1651
+ #!/bin/bash
1652
+
1653
+ # source tolls.sh ; tolls antoine.thomas@colisweb.com
1654
+ function tolls() {
1655
+ USER=${1:-first.last@colisweb.com}
1656
+ FROM_DATE=${2:-"2023-02-01"}
1657
+ TO_DATE=${3:-"2023-02-28"}
1658
+
1659
+ USER=$(gum input --prompt "username : " --value $USER)
1660
+ TOKEN=$(./tour_details.sc login --user $USER --password $(gum input --password --placeholder password))
1661
+ [ "$TOKEN" != "" ] && echo "connected" || return 1
1662
+
1663
+ FROM_DATE=$(gum input --prompt "Date start : " --value $FROM_DATE)
1664
+ TO_DATE=$(gum input --prompt "Date end : " --value $TO_DATE)
1665
+ FILENAME="tours-${FROM_DATE}-TO-${TO_DATE}.json"
1666
+ curl --cookie "session=$TOKEN" "https://api.production.colisweb.com/api/v6/routes-plans/external?from=${FROM_DATE}&to=${TO_DATE}" > ~/Downloads/$FILENAME
1667
+ echo "Tournées téléchargées"
1668
+
1669
+ projectIds=$(./tour_details.sc allProjects --file ~/Downloads/$FILENAME | gum choose --no-limit | cut -d "," -f 2)
1670
+ echo "projets sélectionnés : $projectIds"
1671
+ tourIds=$(./tour_details.sc allTours --file ~/Downloads/$FILENAME --projectIds "$projectIds")
1672
+ echo "tournées sélectionnées : $tourIds"
1673
+
1674
+ TARGET="${FROM_DATE}-TO-${TO_DATE}.csv"
1675
+ echo "appels à HERE, écriture dans $TARGET"
1676
+ ./tour_details.sc allToursDetails --token $TOKEN --hereApiKey $HERE_API_KEY --routeIds "$tourIds" > "$TARGET"
1677
+
1678
+ echo "terminé"
1679
+ }
1680
+
1681
+ #!/usr/bin/env bash
1682
+
1683
+ # possible syntax:
1684
+ # login
1685
+ # login testing
1686
+ # login testing userid
1687
+ login() {
1688
+ ENV=${1:-`gum choose testing staging production recette`} && \
1689
+ USER=${2:-`gum input --placeholder username`} && \
1690
+ PASSWORD=`gum input --password --placeholder password` && \
1691
+ TOKEN=`$SCRIPT_FULL_PATH/scala/auth.sc login --env $ENV --user $USER --password $PASSWORD` && \
1692
+ export TOKEN_$ENV=$TOKEN && \
1693
+ echo "login success for $USER on $ENV" >&2
1694
+ }
1695
+
1696
+ # you need to call login first (see above)
1697
+ # possible syntax:
1698
+ # recompute_tour
1699
+ # recompute_tour testing
1700
+ # recompute_tour testing draft
1701
+ # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09
1702
+ # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09 TODAY
1703
+ # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09 FRIDAY
1704
+ recompute_tour() {
1705
+ ENV=${1:-`gum choose testing staging production recette`}
1706
+ MODE=${2:-`gum choose draft definitive`}
1707
+ PROJECT_ID=${3:-`pick_project $ENV`}
1708
+ DAY=${4:-`gum choose TODAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY SUNDAY`}
1709
+ jwt_token $ENV
1710
+ scala/tour_config.sc $MODE -t $TOKEN -p $PROJECT_ID -d $DAY
1711
+ }
1712
+
1713
+ pick_project() {
1714
+ ENV=${1:-`gum choose testing staging production recette`}
1715
+ jwt_token $ENV
1716
+ scala/tour_config.sc list -t $TOKEN -e $ENV | gum filter | cut -f1
1717
+ }
1718
+
1719
+ jwt_token() {
1720
+ ENV=${1:-`gum choose testing staging production recette`}
1721
+ eval 'TOKEN=$TOKEN_'$ENV
1722
+ if ! $SCRIPT_FULL_PATH/scala/auth.sc check -t $TOKEN -e $ENV ; then
1723
+ login $ENV
1724
+ fi
1725
+ }
1726
+
1727
+ #!/usr/bin/env bash
1728
+
1729
+ alias update_devtool="git -C ~/.oh-my-zsh/custom/dev-tools/ pull"
1730
+
1731
+ SCRIPT_PATH=$SCRIPT_FULL_PATH/shell/run
1732
+ PATH="$PATH:$SCRIPT_PATH/script"
1733
+
1734
+ function get_token {
1735
+ local ENV=$1
1736
+ local LOGIN_FILE="$HOME/scriptlogin"
1737
+
1738
+ if [ ! -f "$LOGIN_FILE" ]; then
1739
+ cat > "$LOGIN_FILE" <<-'EOF'
1740
+ #!/bin/bash
1741
+ case $ENV in
1742
+ "testing")
1743
+ local BO_USERNAME=""
1744
+ local BO_PASSWORD=""
1745
+ ;;
1746
+ "recette")
1747
+ local BO_USERNAME=""
1748
+ local BO_PASSWORD=""
1749
+ ;;
1750
+ "staging")
1751
+ local BO_USERNAME=""
1752
+ local BO_PASSWORD=""
1753
+ ;;
1754
+ *)
1755
+ local BO_USERNAME=""
1756
+ local BO_PASSWORD=""
1757
+ echo "ENV ${ENV} inconu"
1758
+ return
1759
+ ;;
1760
+ esac
1761
+ EOF
1762
+ fi
1763
+
1764
+ source "${LOGIN_FILE}"
1765
+
1766
+ if [ -z "$BO_PASSWORD" ] || [ -z "$BO_USERNAME" ]
1767
+ then
1768
+ echo éditer le ficher "$LOGIN_FILE"
1769
+ return 1
1770
+ fi
1771
+
1772
+ curl -o /dev/null -D - "https://api.$ENV.colisweb.com/api/v6/authent/external/session" \
1773
+ --data-raw '{"username":"'"${BO_USERNAME}"'","password":"'"${BO_PASSWORD/\"/\\\"}"'"}' \
1774
+ --compressed 2> /dev/null | grep set-cook | sed -e 's/.*session=//g;s/;.*//g'
1775
+ }
1776
+
1777
+ function bash_array_to_json {
1778
+ function join {
1779
+ local IFS="$1"
1780
+ shift
1781
+ echo "$*"
1782
+ }
1783
+
1784
+ echo '["'"$(join , $*| sed -e 's/,/","/g' )"'"]' | jq
1785
+ }
1786
+
1787
+ function get_random_street {
1788
+ local CODE_POSTAUX_ARG=${1:-59000}
1789
+ IFS=',' read -r -a CODE_POSTAUX <<< "$CODE_POSTAUX_ARG"
1790
+ for CODE_POSTAL in "${CODE_POSTAUX[@]}"; do
1791
+ if [[ ! "$CODE_POSTAL" =~ ^[0-9]{5}$ ]]; then
1792
+ echo "Chaque CODE_POSTAL doit avoir une taille de 5 chiffre : $CODE_POSTAL"
1793
+ exit 1
1794
+ fi
1795
+ done
1796
+ local CODE_POSTAL=$(echo "${CODE_POSTAUX[@]}" | tr " " "\n" | sort -u -R | head -n 1)
1797
+
1798
+ get_random_street_in_cp $CODE_POSTAL
1799
+ }
1800
+
1801
+ function get_random_street_in_cp {
1802
+ local CODE_POSTAL=$1
1803
+
1804
+ FILENAME="rue-$CODE_POSTAL.lst"
1805
+ if [ ! -f "$FILENAME" ]; then
1806
+ curl --output tmp1.gz https://adresse.data.gouv.fr/data/ban/adresses/latest/csv/adresses-"${CODE_POSTAL:0:2}".csv.gz
1807
+ gzip -d tmp1.gz
1808
+ cut -d\; -f3,5,6,8 tmp1 | sed "/;$CODE_POSTAL;/!d" > "$FILENAME"
1809
+ rm tmp1
1810
+ fi
1811
+
1812
+ sort -R "$FILENAME" | head -n 1
1813
+ }
1814
+
1815
+ function rand_slot {
1816
+
1817
+ local SCENARIO=$2
1818
+ if [ -f "$SCENARIO" ]; then
1819
+ source "$SCENARIO"
1820
+ fi
1821
+ local ORDER_DATE="$1"
1822
+
1823
+ DEFAULT=(
1824
+ "06:00+01:00[Europe/Paris]-08:00+01:00[Europe/Paris]"
1825
+ "08:00+01:00[Europe/Paris]-10:00+01:00[Europe/Paris]"
1826
+ "10:00+01:00[Europe/Paris]-12:00+01:00[Europe/Paris]"
1827
+ "16:00+01:00[Europe/Paris]-18:00+01:00[Europe/Paris]"
1828
+ "18:00+01:00[Europe/Paris]-20:00+01:00[Europe/Paris]"
1829
+ )
1830
+ USAGE=${DELIVERY_SLOTS:-${DEFAULT[@]}}
1831
+
1832
+ IFS="-" read -r start_time end_time < <(echo "${USAGE[@]}" | tr " " "\n" | sort -u -R | head -n 1 )
1833
+
1834
+ echo '{"start":"'"${ORDER_DATE}T${start_time}"'", "end":"'"${ORDER_DATE}T${end_time}"'" }'
1835
+ }
1836
+
1837
+ function call_create_sfh_order {
1838
+ local ENV=$1
1839
+ local TOKEN=$2
1840
+ source "$3"
1841
+ local POS=$4
1842
+ local BARCODES="$5"
1843
+ local CODE_POSTAUX="$6"
1844
+ local PACKAGES=$(echo "$BARCODES" | jq '[{
1845
+ "barcode": .[],
1846
+ "length": 20.0,
1847
+ "height": 15.0,
1848
+ "width": 4.0,
1849
+ "weight": 1.5,
1850
+ "description": "test parcel",
1851
+ "options": [],
1852
+ "productTypology": "Classical",
1853
+ "packageType": "Parcel"
1854
+ }
1855
+ ]')
1856
+
1857
+ DELIVERY_OPTIONS_P='['
1858
+ for option in "${DELIVERY_OPTIONS[@]}"; do
1859
+ if [ "$DELIVERY_OPTIONS_P" != '[' ]; then
1860
+ DELIVERY_OPTIONS_P+=", "
1861
+ fi
1862
+ DELIVERY_OPTIONS_P+="\"$option\""
1863
+ done
1864
+ DELIVERY_OPTIONS_P+=']'
1865
+
1866
+ IFS=";" read -r nu rue code_postal ville < <(get_random_street "$CODE_POSTAUX")
1867
+
1868
+ if [ -n "$PICKUP_STORE_CODE" ]; then
1869
+ PICKUP_LOCATION='{
1870
+ "type": "store",
1871
+ "storeCode": "'"$PICKUP_STORE_CODE"'"
1872
+ }'
1873
+ elif [ -n "$PICKUP_WAREHOUSE_CODE" ]; then
1874
+ PICKUP_LOCATION='{
1875
+ "type": "Warehouse",
1876
+ "warehouseCode": "'"$PICKUP_WAREHOUSE_CODE"'"
1877
+ }'
1878
+ else
1879
+ echo PICKUP_WAREHOUSE_CODE ou PICKUP_STORE_CODE doit être définie dans la "$3"
1880
+ exit 1
1881
+ fi
1882
+ JSON='{
1883
+ "primaryOrderReference": "'"${PRIMARY_REF}${POS}"'",
1884
+ "secondaryOrderReference": null,
1885
+ "stages": [
1886
+ {
1887
+ "type": "Pickup",
1888
+ "packageBarcodes": '"$BARCODES"',
1889
+ "location": '"$PICKUP_LOCATION"'
1890
+ },
1891
+ {
1892
+ "type": "Dropoff",
1893
+ "packageBarcodes": '"$BARCODES"',
1894
+ "location": {
1895
+ "type": "Address",
1896
+ "address": {
1897
+ "address1": "'"$nu $rue"'",
1898
+ "postalCode": "'"$code_postal"'",
1899
+ "city": "'"$ville"'",
1900
+ "country": "France",
1901
+ "floor": 0,
1902
+ "lift": "with_lift"
1903
+ },
1904
+ "contact": {
1905
+ "name": "John Doe",
1906
+ "primaryPhone": "+33606060606"
1907
+ }
1908
+ }
1909
+ }
1910
+ ],
1911
+ "packages": '"$PACKAGES"',
1912
+ "owner": {
1913
+ "accountIdentifier": "'$ACCOUNT_IDENTIFIER'"
1914
+ },
1915
+ "deliveryOptions": '"$DELIVERY_OPTIONS_P"',
1916
+ "ecommerceValidationDate": "'"${ORDER_DATE}"'"
1917
+ }'
1918
+
1919
+ RESULT=$(curl -s -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON")
1920
+ ORDER_ID=$(jq ".orderId" -r <<< "$RESULT")
1921
+
1922
+ echo "nouvelle commande : https://bo.$ENV.colisweb.com/admin/orders/$ORDER_ID" >&2
1923
+
1924
+ echo "$RESULT"
1925
+ }
1926
+
1927
+
1928
+ function call_scan {
1929
+ local ENV=$1
1930
+ local TOKEN=$2
1931
+ source "$3"
1932
+ local BARCODES="$4"
1933
+ local SCAN=$(echo "$BARCODES" | jq '[{"barcode" :.[], "context": "shuttle"}]')
1934
+
1935
+ JSON='{"scans":'$SCAN'}'
1936
+
1937
+ curl -X POST https://api.$ENV.colisweb.com/api/v6/parcel/external/units/scans/bulk -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON"
1938
+ }
1939
+
1940
+
1941
+ function call_register_delivery {
1942
+ local ENV=$1
1943
+ local TOKEN=$2
1944
+
1945
+ SCENARIO=$3
1946
+ source "$SCENARIO"
1947
+
1948
+ local ORDER_ID=$4
1949
+ local BARCODES="$5"
1950
+
1951
+ DATA='{
1952
+ "slot": '"$(rand_slot "${DELIVERY_DATE}" "$SCENARIO")"',
1953
+ "storeIdOwner":"'"$STORE_ID_OWNER"'",
1954
+ "pickup":{"type":"hub","code":"'"$HUB"'"},
1955
+ "barcodes":'"$BARCODES"',
1956
+ "price":{"origin":"auto","amount":25.9},
1957
+ "allowCustomerSlotUpdate":false,
1958
+ "withForcedSlot": false
1959
+ }'
1960
+
1961
+ curl -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders/"$ORDER_ID"/deliveries \
1962
+ --cookie session="$TOKEN" --data-raw "$DATA"
1963
+ }
1964
+
1965
+
1966
+
1967
+ function _create_scenario_file_if_not_exist () {
1968
+ if [ ! -f "$SCENARIO" ]
1969
+ then
1970
+ cat > "$SCENARIO" <<-'EOF'
1971
+ DELIVERY_DATE=$(date -v+7d '+%Y-%m-%d') # ici on demande de crée a date d'aujourd'hui + 7 jours
1972
+ # peu remplacer -v+7d par -v+1d pour une livrasion programmer demain
1973
+ # utiliser que par create_many_sfh_order_and_delivery
1974
+ ENV="testing" # sur quelle enviromement lancer le scripts
1975
+ # ENV="staging"
1976
+ # ENV="recette"
1977
+
1978
+ ACCOUNT_IDENTIFIER="102" # pour la creation de order force utilies owner.accountIdentifier
1979
+ # sur l'appel api/v6/order/external/warehouse/orders
1980
+ # (creation de la commade)
1981
+ HUB="duck" # pour sur l'appel api/v6/order/external/warehouse/orders
1982
+ # parametre pickup.code (type est a "hub")
1983
+ STORE_ID_OWNER="184" # parametre pickup.storeIdOwner
1984
+ # sur l'appel api/v6/order/external/warehouse/orders
1985
+ # PICKUP_STORE_CODE="2" # si non commenté alors départ du magasin
1986
+ PICKUP_WAREHOUSE_CODE="422" # pour un départ d'entrepôt
1987
+
1988
+ BARCODES_COUNT=5 # nombres packages
1989
+ PREF="aaaa" # doit faire 4 caractères utilies pour générer les barecode
1990
+ # des packtages
1991
+
1992
+ CODE_POSTAUX=("59000", "75001") # liste code postale sur lequelle une addresse aléatoire seras choisi
1993
+ # (creation de la commade)
1994
+ DELIVERY_SLOTS=( # liste des horraires de créneau de livraison choisi aléatoirement
1995
+ "06:00+01:00[Europe/Paris]-08:00+01:00[Europe/Paris]"
1996
+ "08:00+01:00[Europe/Paris]-10:00+01:00[Europe/Paris]"
1997
+ "10:00+01:00[Europe/Paris]-12:00+01:00[Europe/Paris]"
1998
+ "16:00+01:00[Europe/Paris]-18:00+01:00[Europe/Paris]"
1999
+ "18:00+01:00[Europe/Paris]-20:00+01:00[Europe/Paris]"
2000
+ )
2001
+
2002
+ # DELIVERY_OPTIONS=("skill1" "skill2") # liste des nom skill - a décommanter
2003
+
2004
+ # normalement pas bessoin modifer
2005
+ ORDER_DATE=$(date '+%Y-%m-%d') # date du jour
2006
+ RAND=$(date +%y%m%d%H%M%S) # valueur peudo aleadoire (ici basé par date) doit faire 17 caractères
2007
+ BARCODE_PART=0000$RAND # utiliser pour générer les bare code les barecode sont :
2008
+ # {BARECODE_PART}{00000} a {BARECODE_PART}{BARECODES_COUNT}
2009
+ PRIMARY_REF=$PREF$RAND # primaryOrderReference de la commande
2010
+ EOF
2011
+ echo "éditer le fichier $SCENARIO"
2012
+ return 1
2013
+ fi
2014
+ }
2015
+
2016
+ #!/usr/bin/env bash
2017
+
2018
+ cleanup_merged_mr() {
2019
+ COLISWEB_IDL_GROUP=3054234
2020
+
2021
+ BEFORE=${1:- $(date -I -v -2y)}
2022
+
2023
+ for (( COUNTER=1; COUNTER<=12; COUNTER+=2 )); do
2024
+ cleanup_grouped_merged_mr $COLISWEB_IDL_GROUP $BEFORE $COUNTER &
2025
+ done
2026
+
2027
+ }
2028
+
2029
+ cleanup_grouped_merged_mr() {
2030
+ GROUP=$1
2031
+ BEFORE=$2
2032
+ PAGE_COUNT=$3
2033
+ MERGED_MRS=($(curl --header "PRIVATE-TOKEN: $GITLAB_PAT" \
2034
+ --url "https://gitlab.com/api/v4/groups/$GROUP/merge_requests?updated_before=${BEFORE}T08:00:00Z&status=merged&per_page=50&page=$PAGE_COUNT" |
2035
+ jq -r '.[] | {iid: .iid|tostring, pid:.project_id|tostring} | (.pid + "/merge_requests/" + .iid)'))
2036
+
2037
+ for MR in ${MERGED_MRS[@]}; do
2038
+ echo "https://gitlab.com/api/v4/projects/$MR"
2039
+ curl --request DELETE \
2040
+ --header "PRIVATE-TOKEN: $GITLAB_PAT" \
2041
+ --url "https://gitlab.com/api/v4/projects/$MR"
2042
+ done
2043
+ }
2044
+
2045
+ # you will need jq to use these commands. You can install it using "brew install jq"
2046
+ # cleanup_all_ecr_images 12
2047
+ # will delete images in all repositories older than 12 weeks
2048
+ # cleanup_single_ecr_repository colisweb-api 8
2049
+ # will delete images older than 8 weeks in the colisweb-api repository
2050
+ cleanup_all_ecr_images() {
2051
+ WEEKS=$1
2052
+
2053
+ # OR to work on bash and zsh
2054
+ CLEAN_BEFORE=$(date -v-${WEEKS}w +%F || date --date="-${WEEKS} weeks" +'%Y-%m-%d')
2055
+ REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[] |.[].repositoryName')
2056
+
2057
+ while read -r REPOSITORY; do
2058
+ echo "processing ECR repository $REPOSITORY before $CLEAN_BEFORE"
2059
+ cleanup_single_ecr_repository "$CLEAN_BEFORE" "$REPOSITORY"
2060
+ done <<< "$REPOSITORIES"
2061
+ }
2062
+
2063
+ cleanup_single_ecr_repository() {
2064
+ BEFORE=$1
2065
+ REPOSITORY=$2
2066
+
2067
+ echo "gettings tags for repository $REPOSITORY before $BEFORE"
2068
+
2069
+ ALL_TAGS=$(aws ecr describe-images --repository-name "$REPOSITORY" --output json |
2070
+ jq '.imageDetails' |
2071
+ jq '. |= sort_by(.imagePushedAt)' |
2072
+ jq --arg date $BEFORE '.[] | select(.imagePushedAt[0:10] < $date)' |
2073
+ jq 'select((.imageTags != null) or (.imageTags == []))' |
2074
+ jq 'select(.imageTags | any(endswith("latest")) | not)' |
2075
+ jq -r '.imageTags | join(" ")' |
2076
+ sort -u)
2077
+
2078
+ if [ -z "${ALL_TAGS}" ]; then
2079
+ echo "no tag to delete for repository $REPOSITORY"
2080
+ else
2081
+ echo "deleting $(echo $ALL_TAGS | wc -l) tags for $REPOSITORY"
2082
+
2083
+ while read image_tags; do
2084
+ SINGLE_TAG=$(echo $image_tags | grep -o '^\S*')
2085
+
2086
+ DIGESTS_TO_DELETE=$(docker buildx imagetools inspect \
2087
+ 949316342391.dkr.ecr.eu-west-1.amazonaws.com/$REPOSITORY:$SINGLE_TAG --raw |
2088
+ jq -r '[.manifests | .[].digest] | join(" imageDigest=") | "imageDigest=" + .' ||
2089
+ echo "")
2090
+
2091
+ TAGS_TO_DELETE=$(echo "$image_tags" | sed 's/[^ ]* */imageTag=&/g')
2092
+
2093
+ export AWS_PAGER=""
2094
+
2095
+ aws ecr batch-delete-image --repository-name "$REPOSITORY" --image-ids $(echo $TAGS_TO_DELETE) > /dev/null 2>&1
2096
+ test -z $DIGESTS_TO_DELETE ||
2097
+ aws ecr batch-delete-image --repository-name "$REPOSITORY" --image-ids $(echo $DIGESTS_TO_DELETE)> /dev/null 2>&1
2098
+ done <<< $ALL_TAGS
2099
+
2100
+ echo "deleted $(echo $ALL_TAGS | wc -l) tags"
2101
+ fi
2102
+
2103
+ }
2104
+
2105
+
2106
+ cleanup_ci_cache() {
2107
+ DATE=${1:-$(date -v-1m +%F)}
2108
+ CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
2109
+
2110
+ echo "deleting from cache $CACHE_BUCKET all older than $DATE"
2111
+
2112
+ aws_ecr_login
2113
+
2114
+ while read -r line; do
2115
+ datum=$(echo $line | cut -c1-10)
2116
+ if [[ "$datum" < "$DATE" ]] ; then
2117
+ # Shell Parameter Expansion: ${parameter##word}
2118
+ # Allow to return the result from "word" to the end of "parameters"
2119
+ # Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
2120
+ TO_DELETE="$CACHE_BUCKET${line##* project/}"
2121
+ echo $TO_DELETE
2122
+ aws s3 rm $TO_DELETE
2123
+ fi
2124
+ done < <(aws s3 ls $CACHE_BUCKET --recursive)
2125
+ }
2126
+
2127
+ cleanup_batch_definitions() {
2128
+ DEFINITION_NAME=$1
2129
+ ARNs=($(
2130
+ aws batch describe-job-definitions \
2131
+ --status ACTIVE \
2132
+ --job-definition-name "$DEFINITION_NAME" |
2133
+ jq '.jobDefinitions | sort_by(-.revision)' |
2134
+ jq 'del( .[0])' |
2135
+ jq -r '.[] | .jobDefinitionArn'
2136
+ )
2137
+ )
2138
+ for A in ${ARNs[@]}; do
2139
+ echo "deregister $A"
2140
+ aws batch deregister-job-definition --job-definition $A
2141
+ done
2142
+ echo "cleaned up all definitions except latest"
2143
+ }
2144
+ #!/usr/bin/env bash
2145
+
2146
+ ftp_ikea_k8s() {
2147
+ SSH_LOCAL_PORT=2230
2148
+ FTP_LOCAL_PORT=25500
2149
+ start_ssh_bastion testing $SSH_LOCAL_PORT
2150
+
2151
+ lsof -ti tcp:$FTP_LOCAL_PORT | xargs kill
2152
+
2153
+ bastion_config=$(mktemp)
2154
+ cat > "$bastion_config" <<EOF
2155
+ UserKnownHostsFile /dev/null
2156
+ StrictHostKeyChecking no
2157
+ User root
2158
+ Host bastion_ftp
2159
+ HostName 127.0.0.1
2160
+ Port 2230
2161
+ LocalForward 25500 ft.centiro.ikea.com:22
2162
+ EOF
2163
+
2164
+ ssh -f -N \
2165
+ -F "$bastion_config" \
2166
+ "bastion_ftp"
2167
+
2168
+ sftp -P $FTP_LOCAL_PORT colisweb.fr@127.0.0.1
2169
+ }
2170
+
2171
+ #!/usr/bin/env bash
2172
+
2173
+ # usage:
2174
+ # jconsole_k8s testing colisweb-api-web
2175
+
2176
+ jconsole_k8s() {
2177
+ ENV=$1
2178
+ NAME=$2
2179
+
2180
+ start_ssh_bastion $ENV 2242
2181
+ POD_IP=$( \
2182
+ kubectl -n $ENV get pods -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIP}{"\n"}{end}' \
2183
+ | grep "$NAME" | cut -d' ' -f2 | head -1 \
2184
+ )
2185
+ echo "selected POD with ip $POD_IP"
2186
+ echo "use 'root' as password"
2187
+ ssh -f -N -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -D 7777 root@127.0.0.1 -p 2242
2188
+
2189
+ jconsole \
2190
+ -J-DsocksProxyHost=localhost \
2191
+ -J-DsocksProxyPort=7777 \
2192
+ -J-DsocksNonProxyHosts= \
2193
+ service:jmx:rmi:///jndi/rmi://$POD_IP:7199/jmxrmi \
2194
+ &
2195
+
2196
+ echo "remember to stop with 'stop_ssh_bastion'"
2197
+
2198
+ }
2199
+
2200
+ #!/usr/bin/env bash
2201
+
2202
+ # Interactive console on an new pod. See also run_ruby_k8s
2203
+ # Ex :
2204
+ # railsc_k8s production
2205
+ # railsc_k8s production "User.where(email:'toni@colisweb.com')"
2206
+ railsc_k8s() {
2207
+ ENV=$1
2208
+ COMMAND=$2
2209
+ [[ $ENV = "production" || $ENV = "staging" ]] && default_tag="master-latest" || default_tag="${ENV}-latest"
2210
+ local image_tag=${5:-$default_tag}
2211
+ local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
2212
+ local POD_NAME="colisweb-api-rails-console-$image_tag-$USERNAME"
2213
+
2214
+ kubectl -n $ENV get pod $POD_NAME && kubectl -n $ENV delete pod $POD_NAME
2215
+
2216
+ configure_kubectl_for $ENV
2217
+ echo "starting with $IMAGE"
2218
+
2219
+ kubectl -n $ENV run $POD_NAME \
2220
+ --image $IMAGE \
2221
+ --restart=Never \
2222
+ --overrides='{
2223
+ "spec":{
2224
+ "nodeSelector":{
2225
+ "workType": "workers"
2226
+ },
2227
+ "containers":[
2228
+ {
2229
+ "name":"'$POD_NAME'",
2230
+ "image":"'$IMAGE'",
2231
+ "imagePullPolicy":"Always",
2232
+ "command":[
2233
+ "sleep",
2234
+ "infinity"
2235
+ ],
2236
+ "resources":{
2237
+ "limits":{
2238
+ "memory": "2048Mi"
2239
+ }
2240
+ },
2241
+ "envFrom": [ {
2242
+ "configMapRef": {
2243
+ "name": "colisweb-api"
2244
+ }
2245
+ }, {
2246
+ "secretRef": {
2247
+ "name": "colisweb-api"
2248
+ }
2249
+ }
2250
+ ]
2251
+ }
2252
+ ]
2253
+ }
2254
+ }
2255
+ '
2256
+
2257
+ sleep 5
2258
+ KUBERAILS="kubectl -n $ENV exec -ti $POD_NAME -- /usr/src/app/bin/rails c"
2259
+ [ -z "$COMMAND" ] && eval $KUBERAILS || echo $COMMAND | eval $KUBERAILS
2260
+
2261
+ print "End of $POD_NAME "
2262
+ kubectl -n $ENV delete pods $POD_NAME
2263
+ }
2264
+
2265
+ # Ex :
2266
+ # create_user testing claire.lien@colisweb.com super_admin clairemdp
2267
+ create_user() {
2268
+ ENV=$1
2269
+ EMAIL=$2
2270
+ ROLE=$3
2271
+ PASSWORD=$4
2272
+ railsc_k8s $ENV "User.where(email:'$EMAIL', role:'$ROLE').first_or_create.update_attributes!(password: '$PASSWORD')"
2273
+ }
2274
+
2275
+ # Ex :
2276
+ # delete_user testing claire.lien@colisweb.com
2277
+ delete_user() {
2278
+ ENV=$1
2279
+ EMAIL=$2
2280
+ railsc_k8s $ENV "User.find_by(email:'$EMAIL').destroy"
2281
+ }
2282
+
2283
+ # NON Interactive console on an new pod, for long-running tasks (a few minutes)
2284
+ # See also railsc_k8s
2285
+ # file.txt will be available from /conf/data.txt in the ruby code
2286
+ # examples :
2287
+ # run_ruby_k8s testing demo <(echo "pp JSON.parse(File.read('/conf/data.txt'))") <(echo '{ "content": 123 }')
2288
+ # run_ruby_k8s testing demo ~/.oh-my-zsh/custom/dev-tools/shell-session/ruby/demo.rb <(echo '{ "content": 123 }')
2289
+ run_ruby_k8s() {
2290
+ if [ $# -lt 4 ]; then
2291
+ echo "usage : run_ruby_k8s production name-for-pod script.rb file.txt"
2292
+ return 1
2293
+ fi
2294
+ local namespace=$1
2295
+ local name=$2
2296
+ local ruby_script=$3
2297
+ local input_data=$4
2298
+ [[ $namespace = "production" || $namespace = "staging" ]] && default_tag="master-latest" || default_tag="${namespace}-latest"
2299
+ local image_tag=${5:-$default_tag}
2300
+
2301
+ if [ ! -r "$ruby_script" ]; then
2302
+ echo "ruby script not found $ruby_script"
2303
+ return 2
2304
+ fi
2305
+
2306
+ if [ ! -r "$input_data" ]; then
2307
+ echo "data not found $input_data"
2308
+ return 3
2309
+ fi
2310
+
2311
+
2312
+ local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
2313
+ local POD_NAME="colisweb-api-script-$name"
2314
+ local CONFIG_MAP="config-$POD_NAME"
2315
+ local CONFIG_MAP_DIR="$(mktemp -d)"
2316
+
2317
+
2318
+ configure_kubectl_for $namespace
2319
+
2320
+
2321
+ cp "$ruby_script" "$CONFIG_MAP_DIR/script.rb"
2322
+ cp "$input_data" "$CONFIG_MAP_DIR/data.txt"
2323
+
2324
+ kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
2325
+ kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
2326
+
2327
+ kubectl -n $namespace get pod $POD_NAME && kubectl -n $namespace delete pod $POD_NAME
2328
+
2329
+ echo "starting with $IMAGE"
2330
+ kubectl -n $namespace run $POD_NAME \
2331
+ --image $IMAGE \
2332
+ -ti \
2333
+ --restart=Never \
2334
+ --attach \
2335
+ --rm \
2336
+ --overrides='{
2337
+ "spec":{
2338
+ "nodeSelector":{
2339
+ "workType": "workers"
2340
+ },
2341
+ "containers":[
2342
+ {
2343
+ "name":"'$POD_NAME'",
2344
+ "image":"'$IMAGE'",
2345
+ "imagePullPolicy":"Always",
2346
+ "command":[
2347
+ "/usr/src/app/bin/rails",
2348
+ "r",
2349
+ "/conf/script.rb"
2350
+ ],
2351
+ "resources":{
2352
+ "limits":{
2353
+ "memory": "4096Mi"
2354
+ }
2355
+ },
2356
+ "volumeMounts":[
2357
+ {
2358
+ "name":"conf",
2359
+ "mountPath":"/conf"
2360
+ }
2361
+ ],
2362
+ "envFrom": [ {
2363
+ "configMapRef": {
2364
+ "name": "colisweb-api"
2365
+ }
2366
+ }, {
2367
+ "secretRef": {
2368
+ "name": "colisweb-api"
2369
+ }
2370
+ }
2371
+ ]
2372
+ }
2373
+ ],
2374
+ "volumes":[
2375
+ {
2376
+ "name":"conf",
2377
+ "configMap":{ "name":"'$CONFIG_MAP'" }
2378
+ }
2379
+ ]
2380
+ }
2381
+ }
2382
+ '
2383
+
2384
+ kubectl -n $namespace delete configmap $CONFIG_MAP
2385
+ }
2386
+
2387
+ # example:
2388
+ # update_pickup_cp testing <( echo '{"wrong_cp": "59123", "corrected_cp": "59223", "delivery_ids": ["4192421", "4192425"]}' )
2389
+ update_pickup_cp() {
2390
+ run_ruby_k8s $1 update-pickup-cp "$SCRIPT_FULL_PATH/ruby/update_pickup_cp.rb" $2
2391
+ }
2392
+
2393
+
2394
+
2395
+ update_all_prices() {
2396
+ local namespace=$1
2397
+ local json_prices=$2
2398
+
2399
+ local json_size=$(wc -c < "$json_prices")
2400
+
2401
+ if ((json_size > 940000)); then
2402
+ command -v jq || (echo "jq not found (use brew install jq)" && return 1)
2403
+ local max_lines=3000
2404
+ local total_lines=$(jq '. | length' $json_prices)
2405
+ local iterations=$((total_lines / max_lines + 1))
2406
+ echo "$json_prices is too big, I'll split it for you in blocks of $max_lines lines. It will take $iterations runs"
2407
+ for (( i = 0 ; i < iterations ; i++ )) ; do
2408
+ local start=$((i * max_lines))
2409
+ local end=$(( (i + 1) * max_lines))
2410
+ local split_file=$(mktemp)
2411
+ jq -c ".[$start:$end]" $json_prices > $split_file
2412
+ local split_lines=$(jq '. | length' $split_file)
2413
+ echo "starting iteration $i from $start to $end with $split_file command -v has $split_lines lines"
2414
+ run_ruby_k8s $namespace "update-prices-$i" "$SCRIPT_FULL_PATH/ruby/update_prices.rb" $split_file
2415
+ done
2416
+ else
2417
+ run_ruby_k8s $namespace "update-prices" "$SCRIPT_FULL_PATH/ruby/update_prices.rb" $json_prices
2418
+ fi
2419
+ }
2420
+
2421
+
2422
+ update_surveys() {
2423
+ local namespace=$1
2424
+ local csv_surveys=$2
2425
+
2426
+ local csv_size=$(wc -c < "$csv_surveys")
2427
+
2428
+
2429
+ if ((csv_size > 940000)); then
2430
+ local max_lines=400
2431
+ local total_lines=$(wc -l < $csv_surveys)
2432
+ local iterations=$((total_lines / max_lines + 1))
2433
+ echo "$csv_surveys is too big, I'll split it for you in blocks of $max_lines lines. It will take $iterations runs"
2434
+ for (( i = 0 ; i < iterations ; i++ )) ; do
2435
+ local start=$((i * max_lines + 2))
2436
+ local end=$(( (i + 1) * max_lines + 1))
2437
+ local split_file=$(mktemp)
2438
+ head -1 $csv_surveys > $split_file
2439
+ sed -n ''"$start,${end}p" $csv_surveys >> $split_file
2440
+
2441
+
2442
+ local split_lines=$(wc -l < $split_file)
2443
+ echo "starting iteration $i from $start to $end with $split_file command -v has $split_lines lines"
2444
+ run_ruby_k8s $namespace "reimport-surveys-$i" "$SCRIPT_FULL_PATH/ruby/feedback_kpi_reuploader.rb" $split_file
2445
+ done
2446
+ else
2447
+ run_ruby_k8s $namespace "reimport-surveys" "$SCRIPT_FULL_PATH/ruby/feedback_kpi_reuploader.rb" $csv_surveys
2448
+ fi
2449
+ }
2450
+
2451
+ #!/usr/bin/env bash
2452
+
2453
+ configure_gitlab_ssh() {
2454
+ tmp_dir=$(mktemp -d)
2455
+ ssh-keyscan gitlab.com > $tmp_dir/known_hosts
2456
+ echo "$SSH_PRIVATE_KEY" > $tmp_dir/id_rsa
2457
+ chmod 600 $tmp_dir/id_rsa
2458
+ ssh -i $tmp_dir/id_rsa -T git@gitlab.com
2459
+ rm -Rf $tmp_dir
2460
+ }
2461
+
2462
+
2463
+ configure_gitlab_ssh_home() {
2464
+ mkdir ~/.ssh
2465
+ ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
2466
+ echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_rsa
2467
+ chmod 600 ~/.ssh/id_rsa
2468
+ ssh -T git@gitlab.com
2469
+ }
2470
+ #!/usr/bin/env bash
2471
+
2472
+ datadog_schedule_downtime() {
2473
+ SERVICES=$1
2474
+ DOWNTIME_MINUTES=${2:-30}
2475
+
2476
+ if [[ "$ENVIRONMENT" == "production" ]] ; then
2477
+ log "scheduling downtime for $SERVICES in $ENVIRONMENT"
2478
+ else
2479
+ return 0
2480
+ fi
2481
+
2482
+ for SERVICE in $SERVICES ; do
2483
+ datadog_schedule_downtime_single $SERVICE $DOWNTIME_MINUTES
2484
+ done
2485
+ }
2486
+
2487
+ datadog_schedule_downtime_single() {
2488
+ local SERVICE=$1
2489
+ local DOWNTIME_MINUTES=$2
2490
+
2491
+ START=$(date +%s)
2492
+ END=$((START + 60 * DOWNTIME_MINUTES))
2493
+
2494
+ log "scheduling a downtime on datadog for $SERVICE ($DOWNTIME_MINUTES minutes)"
2495
+ curl -X POST "https://api.datadoghq.com/api/v1/downtime" \
2496
+ -H "Content-Type: application/json" \
2497
+ -H "DD-API-KEY: ${DD_API_KEY}" \
2498
+ -H "DD-APPLICATION-KEY: ${DD_APP_KEY}" \
2499
+ -d '
2500
+ {
2501
+ "active": true,
2502
+ "downtime_type": 0,
2503
+ "start": '$START',
2504
+ "end": '$END',
2505
+ "message": "CA Deployment - performance for '$SERVICE' may be lower for next '$DOWNTIME_MINUTES' min",
2506
+ "monitor_tags": [
2507
+ "service:'$SERVICE'",
2508
+ "performance"
2509
+ ],
2510
+ "scope": [
2511
+ "env:production"
2512
+ ],
2513
+ "timezone": "Europe/Paris"
2514
+ }
2515
+ '
2516
+ }
2517
+
2518
+ #!/usr/bin/env bash
2519
+
2520
+ docker_build_push() {
2521
+ read -r -a BUILD_ARGS <<< "$1"
2522
+ DOCKER_BUILD_ARGS="--build-arg VCS_REF=$(git rev-parse --short HEAD)"
2523
+
2524
+ for ARG_NAME in "${BUILD_ARGS[@]}"
2525
+ do
2526
+ DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS --build-arg $ARG_NAME=${!ARG_NAME}"
2527
+ done
2528
+
2529
+ if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
2530
+ docker pull $DOCKER_IMAGE || true
2531
+ SOURCE_URL=${CI_PROJECT_URL:8} # without "https://" protocol, like gitlab.com/colisweb-idl/colisweb/back/packing
2532
+
2533
+ docker buildx create --use
2534
+
2535
+ docker buildx build $DOCKER_BUILD_ARGS \
2536
+ -t $DOCKER_IMAGE_SHA \
2537
+ --platform "linux/arm64,linux/amd64" \
2538
+ --label org.opencontainers.image.revision=$(git rev-parse HEAD) \
2539
+ --label org.opencontainers.image.source=$SOURCE_URL \
2540
+ --provenance=false \
2541
+ --push \
2542
+ $DOCKER_STAGE_PATH
2543
+ fi
2544
+ }
2545
+
2546
+
2547
+ docker_promote() {
2548
+ # inspired by https://dille.name/blog/2018/09/20/how-to-tag-docker-images-without-pulling-them/
2549
+ OLD_TAG=${1//[^0-9a-zA-Z-.]/_}
2550
+ NEW_TAG=${2//[^0-9a-zA-Z-.]/_}
2551
+ echo "promoting from $OLD_TAG to $NEW_TAG"
2552
+ TOKEN=$(aws_ecr_token)
2553
+ CONTENT_TYPE="application/vnd.docker.distribution.manifest.v2+json"
2554
+ MANIFESTS_API="https://${DOCKER_REGISTRY}/v2/${APPLICATION}/manifests"
2555
+
2556
+ if MANIFEST=$(curl --fail -H "Authorization: Basic $TOKEN" -H "Accept: ${CONTENT_TYPE}" "$MANIFESTS_API/${OLD_TAG}"); then
2557
+ echo "authenticated on $MANIFESTS_API"
2558
+ else
2559
+ return 1
2560
+ fi
2561
+ if curl --fail -H "Authorization: Basic $TOKEN" -X PUT -H "Content-Type: ${CONTENT_TYPE}" -d "${MANIFEST}" "$MANIFESTS_API/$NEW_TAG" ; then
2562
+ echo "promoted ${APPLICATION} from $OLD_TAG to $NEW_TAG"
2563
+ else
2564
+ return 2
2565
+ fi
2566
+ }
2567
+
2568
+ ensure_images_exists() {
2569
+ for IMAGE_TO_CHECK in $(echo $1 | tr "," "\n"); do
2570
+ image_exists ${DOCKER_REGISTRY_ID} ${IMAGE_TO_CHECK} ${VERSION} || return 1
2571
+ done
2572
+ }
2573
+
2574
+ #!/usr/bin/env bash
2575
+
2576
+ extract_yaml_config_variable() {
2577
+ set +e
2578
+ set +x
2579
+
2580
+ check_args "--environment" $1
2581
+ shift
2582
+ ENVIRONMENT=$1
2583
+ shift
2584
+
2585
+ check_args "--configs-path" $1
2586
+ shift
2587
+ CONFIGS_PATH=$1
2588
+ shift
2589
+
2590
+ check_args "--variable" $1
2591
+ shift
2592
+ VARIABLE=$1
2593
+ shift
2594
+
2595
+ [[ "$1" == "--optional" ]] && OPTIONAL=true || OPTIONAL=false
2596
+
2597
+ if [ ! -f ${CONFIGS_PATH}/common.yaml ]; then
2598
+ echo >&2 "Missing $CONFIGS_PATH/common.yaml configuration file"
2599
+ return 1
2600
+ fi
2601
+ if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}.yaml ]; then
2602
+ echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT.yaml configuration file"
2603
+ return 1
2604
+ fi
2605
+ if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}-secrets.yaml ]; then
2606
+ echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml configuration file"
2607
+ return 1
2608
+ fi
2609
+
2610
+ result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT-secrets.yaml")
2611
+ if [ $? -ne 0 ] || [ "$result" = "null" ]; then
2612
+ result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT.yaml")
2613
+ if [ $? -ne 0 ] || [ "$result" = "null" ]; then
2614
+ result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/common.yaml")
2615
+ if [ $? -ne 0 ] || [ "$result" = "null" ]; then
2616
+ if [ $OPTIONAL = true ]; then
2617
+ echo ""
2618
+ return 0
2619
+ else
2620
+ echo >&2 "Missing path $VARIABLE in $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml, $CONFIGS_PATH/$ENVIRONMENT.yaml or $CONFIGS_PATH/common.yaml"
2621
+ return 1
2622
+ fi
2623
+ fi
2624
+ fi
2625
+ fi
2626
+ echo ${result}
2627
+ }
2628
+ #!/usr/bin/env bash
2629
+
2630
+ flyway_clean() {
2631
+ HOST="$1"
2632
+ PORT="$2"
2633
+ DATABASE="$3"
2634
+ USER="$4"
2635
+ PASSWORD="$5"
2636
+
2637
+ kubectl run -it --rm flywayclean \
2638
+ --image=flyway/flyway \
2639
+ --restart=Never \
2640
+ -- \
2641
+ -cleanDisabled=false \
2642
+ -url="jdbc:postgresql://$HOST:$PORT/$DATABASE" \
2643
+ -user="$USER" \
2644
+ -password="$PASSWORD" \
2645
+ clean
2646
+ }
2647
+
2648
+ #!/usr/bin/env bash
2649
+
2650
+ FLYWAY_VERSION="7.4.0"
2651
+
2652
+
2653
+ get_yaml_variable() {
2654
+ extract_yaml_config_variable --environment ${ENVIRONMENT} --configs-path $(pwd)/deploy --variable $@
2655
+ }
2656
+
2657
+ init_migrate_db() {
2658
+ set -e
2659
+
2660
+ check_env_vars 4 "APPLICATION" "ENVIRONMENT" "FLYWAY_VERSION" "MIGRATION_SQL_PATH"
2661
+
2662
+ PG_YAML_PATH=".${APPLICATION}config.postgres"
2663
+
2664
+ DB_PORT="5432"
2665
+ DB_HOST=$(get_yaml_variable "${PG_YAML_PATH}.host")
2666
+ DB_INIT_USERNAME=$(get_yaml_variable "${PG_YAML_PATH}.initUsername")
2667
+ DB_INIT_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.initPassword")
2668
+ DB_DATABASE=$(get_yaml_variable "${PG_YAML_PATH}.database")
2669
+ DB_USER=$(get_yaml_variable "${PG_YAML_PATH}.user")
2670
+ DB_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.password")
2671
+ DB_URL="jdbc:postgresql://${DB_HOST}:${DB_PORT}/${DB_DATABASE}"
2672
+
2673
+ DB_RO_USER=$(get_yaml_variable "${PG_YAML_PATH}.readOnlyUser" --optional)
2674
+ DB_RO_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.readOnlyPassword" --optional)
2675
+
2676
+ unset KUBECONFIG
2677
+
2678
+ configure_kubectl_for ${ENVIRONMENT}
2679
+
2680
+ kube_init_service_database \
2681
+ --namespace ${ENVIRONMENT} \
2682
+ --service ${APPLICATION} \
2683
+ --db_host ${DB_HOST} \
2684
+ --db_port ${DB_PORT} \
2685
+ --db_init_username ${DB_INIT_USERNAME} \
2686
+ --db_init_password ${DB_INIT_PASSWORD} \
2687
+ --db_database ${DB_DATABASE} \
2688
+ --db_username ${DB_USER} \
2689
+ --db_password ${DB_PASSWORD}
2690
+
2691
+ if [[ ! -z "$DB_RO_USER" ]] && [[ ! -z "$DB_RO_USER" ]]; then
2692
+ kube_init_database_readonly_account \
2693
+ --namespace ${ENVIRONMENT} \
2694
+ --service ${APPLICATION} \
2695
+ --db_connection "$DB_INIT_USERNAME:$DB_INIT_PASSWORD@$DB_HOST:$DB_PORT" \
2696
+ --db_database ${DB_DATABASE} \
2697
+ --db_readonly_username ${DB_RO_USER} \
2698
+ --db_readonly_password ${DB_RO_PASSWORD}
2699
+ fi
2700
+
2701
+ flyway_migrate \
2702
+ --environment ${ENVIRONMENT} \
2703
+ --namespace ${ENVIRONMENT} \
2704
+ --service ${APPLICATION} \
2705
+ --db_url ${DB_URL} \
2706
+ --db_user ${DB_USER} \
2707
+ --db_password ${DB_PASSWORD} \
2708
+ --flyway_version ${FLYWAY_VERSION} \
2709
+ --flyway_sql_folder $(pwd)/${MIGRATION_SQL_PATH}
2710
+ }
2711
+
2712
+ flyway_migrate() {
2713
+ set -e
2714
+
2715
+ extract_args 8 \
2716
+ environment namespace service db_url db_user db_password flyway_version flyway_sql_folder $*
2717
+
2718
+ echo "running flyway migrations for service $service in environment $environment namespace $namespace for db_url $db_url with user $db_user"
2719
+ echo "migration files expected in $flyway_sql_folder"
2720
+
2721
+ CONFIGMAP_NAME="$service-flyway-migration-sql"
2722
+ POD_NAME="$service-flyway-migration"
2723
+
2724
+ configure_kubectl_for $environment
2725
+
2726
+ kubectl -n $namespace delete configmap $CONFIGMAP_NAME --ignore-not-found
2727
+ kubectl -n $namespace delete pod $POD_NAME --ignore-not-found
2728
+ kubectl -n $namespace create configmap $CONFIGMAP_NAME --from-file=$flyway_sql_folder
2729
+
2730
+ kubectl -n $namespace run $POD_NAME --image ignored -ti --restart=Never --attach --rm --overrides='
2731
+ {
2732
+ "spec":{
2733
+ "containers":[
2734
+ {
2735
+ "name":"'$POD_NAME'",
2736
+ "image":"flyway/flyway:'$flyway_version'",
2737
+ "command":["flyway", "-url='$db_url'", "-user='$db_user'", "-password='$db_password'", "migrate"],
2738
+ "volumeMounts":[
2739
+ {
2740
+ "name":"sql",
2741
+ "mountPath":"/flyway/sql"
2742
+ }
2743
+ ]
2744
+ }
2745
+ ],
2746
+ "volumes":[
2747
+ {
2748
+ "name":"sql",
2749
+ "configMap":{
2750
+ "name":"'$CONFIGMAP_NAME'"
2751
+ }
2752
+ }
2753
+ ]
2754
+ }
2755
+ }
2756
+ '
2757
+
2758
+ kubectl -n $namespace delete configmap $CONFIGMAP_NAME
2759
+ }
2760
+
2761
+ #!/usr/bin/env bash
2762
+ flyway_repair() {
2763
+ set -e
2764
+ check_env_vars 4 "APPLICATION" "ENVIRONMENT" "FLYWAY_VERSION" "MIGRATION_SQL_PATH"
2765
+
2766
+ PG_YAML_PATH=".${APPLICATION}config.postgres"
2767
+
2768
+ DB_PORT="5432"
2769
+ DB_HOST=$(get_yaml_variable "${PG_YAML_PATH}.host")
2770
+ DB_DATABASE=$(get_yaml_variable "${PG_YAML_PATH}.database")
2771
+ DB_USER=$(get_yaml_variable "${PG_YAML_PATH}.user")
2772
+ DB_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.password")
2773
+ DB_URL="jdbc:postgresql://${DB_HOST}:${DB_PORT}/${DB_DATABASE}"
2774
+
2775
+ flyway_sql_folder=$(pwd)/${MIGRATION_SQL_PATH}
2776
+
2777
+ configure_kubectl_for "${ENVIRONMENT}"
2778
+ POD_NAME="${APPLICATION}-flyway-repair"
2779
+ CONFIGMAP_NAME="${APPLICATION}-flyway-repair-sql"
2780
+
2781
+ kubectl -n "${ENVIRONMENT}" delete configmap $CONFIGMAP_NAME --ignore-not-found
2782
+ kubectl -n "${ENVIRONMENT}" delete pod $POD_NAME --ignore-not-found
2783
+ kubectl -n "${ENVIRONMENT}" create configmap $CONFIGMAP_NAME --from-file="${flyway_sql_folder}"
2784
+
2785
+ kubectl -n "${ENVIRONMENT}" run --rm -it "${POD_NAME}" \
2786
+ --image=flyway/flyway \
2787
+ --restart=Never \
2788
+ --overrides='
2789
+ {
2790
+ "spec":{
2791
+ "containers":[
2792
+ {
2793
+ "name":"'$POD_NAME'",
2794
+ "image":"flyway/flyway:'${FLYWAY_VERSION}'",
2795
+ "command":["flyway", "-url='$DB_URL'", "-user='$DB_USER'", "-password='$DB_PASSWORD'", "repair"],
2796
+ "volumeMounts":[
2797
+ {
2798
+ "name":"sql",
2799
+ "mountPath":"/flyway/sql"
2800
+ }
2801
+ ]
2802
+ }
2803
+ ],
2804
+ "volumes":[
2805
+ {
2806
+ "name":"sql",
2807
+ "configMap":{
2808
+ "name":"'$CONFIGMAP_NAME'"
2809
+ }
2810
+ }
2811
+ ]
2812
+ }
2813
+ }
2814
+ '
2815
+ kubectl -n "${ENVIRONMENT}" delete configmap $CONFIGMAP_NAME
2816
+ }
2817
+
2818
+ #!/usr/bin/env bash
2819
+
2820
+ record_git_commit() {
2821
+ for file in $GIT_COMMIT_FILES; do
2822
+ sed -i 's&GIT_COMMIT&'"${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHORT_SHA}&" "$file"
2823
+ done
2824
+ }
2825
+
2826
+ gitlab_import_pgp_key() {
2827
+ if [ "$GITLAB_PGP_PRIVATE_KEY" != "" ]
2828
+ then
2829
+ KEY_FOLDER=<(echo "$GITLAB_PGP_PRIVATE_KEY")
2830
+ gpg --import $KEY_FOLDER > /dev/null
2831
+ else
2832
+ echo '$GITLAB_PGP_PRIVATE_KEY is not set'
2833
+ return 1
2834
+ fi
2835
+ }
2836
+
2837
+ git_reveal() {
2838
+ gitlab_import_pgp_key
2839
+ gpg --decrypt $1
2840
+ }
2841
+ #!/usr/bin/env bash
2842
+
2843
+ helm_deploy() {
2844
+ APPLICATION=$1
2845
+ ENVIRONMENT=$2
2846
+ VERSION=$3
2847
+ deploy_chart \
2848
+ --path_configs deploy \
2849
+ --path_chart deploy/$APPLICATION \
2850
+ --application $APPLICATION \
2851
+ --environment $ENVIRONMENT \
2852
+ --namespace $ENVIRONMENT \
2853
+ --helm_extra_args --set global.version=$VERSION
2854
+ }
2855
+
2856
+ deploy_chart() {
2857
+ set -e
2858
+ set -x
2859
+
2860
+ # Rigid parsing, but all args are mandatory (expect last) and flexible order is unnecessary
2861
+ check_args "--path_configs" $1; shift
2862
+ path_configs=$1; shift
2863
+ check_args "--path_chart" $1; shift
2864
+ path_chart=$1; shift
2865
+ check_args "--application" $1; shift
2866
+ application=$1; shift
2867
+ check_args "--environment" $1; shift
2868
+ environment=$1; shift
2869
+ check_args "--namespace" $1; shift
2870
+ namespace=$1; shift
2871
+ if [ $# -ne 0 ]; then
2872
+ check_args "--helm_extra_args" $1; shift
2873
+ helm_extra_args=$*
2874
+ fi
2875
+
2876
+ echo "================================"
2877
+ echo " Deploying $application"
2878
+ echo " - Environment: $environment"
2879
+ echo " - Namespace: $namespace"
2880
+ echo "================================"
2881
+
2882
+ root_path=$(pwd)
2883
+
2884
+ # Check the configs exists
2885
+
2886
+ check_config_file ${root_path}/${path_configs}/common.yaml
2887
+ check_config_file ${root_path}/${path_configs}/${namespace}.yaml
2888
+ check_config_file ${root_path}/${path_configs}/${namespace}-secrets.yaml
2889
+
2890
+ # Check the chart exists
2891
+ if [ ! -d ${root_path}/${path_chart} ] || [ ! -f ${root_path}/${path_chart}/Chart.yaml ]; then
2892
+ echo "Bad Chart $root_path/$path_chart : does not exists or missing Chart.yaml"
2893
+ print_usage
2894
+ return 1
2895
+ fi
2896
+
2897
+ # Unset Kubectl configuration made via the KUBECONFIG env variable
2898
+ # it would override the config made by configure_kubectl_for
2899
+ # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
2900
+ unset KUBECONFIG
2901
+
2902
+ # Configure Kubectl
2903
+ configure_kubectl_for ${environment}
2904
+
2905
+ # Configure helm
2906
+ helm version --namespace ${namespace} || true
2907
+ # helm stable repo have changed and must be updated manually, in versions < v2.17.0
2908
+ helm repo add colisweb s3://colisweb-helm-charts/colisweb
2909
+ helm repo add stable https://charts.helm.sh/stable
2910
+ helm repo update
2911
+ helm dependency update ${root_path}/${path_chart}
2912
+
2913
+ # Gather values/*.yaml files
2914
+ values_path="${root_path}/${path_chart}/values"
2915
+ values_files=''
2916
+ [ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
2917
+
2918
+ # Deploy
2919
+ helm upgrade --install \
2920
+ --namespace ${namespace} \
2921
+ ${values_files} \
2922
+ -f ${root_path}/${path_configs}/common.yaml \
2923
+ -f ${root_path}/${path_configs}/${namespace}.yaml \
2924
+ -f ${root_path}/${path_configs}/${namespace}-secrets.yaml \
2925
+ ${helm_extra_args} \
2926
+ ${application} ${root_path}/${path_chart}
2927
+
2928
+ #send event to dd
2929
+ PUBLISHED_VERSION="$CI_COMMIT_REF_NAME-$CI_COMMIT_SHA"
2930
+ emit_datadog_deploy_event --environment $environment --service $application --version $PUBLISHED_VERSION
2931
+
2932
+ echo "================================"
2933
+ echo " Deployed $application"
2934
+ echo " - Environment: $environment"
2935
+ echo " - Namespace: $namespace"
2936
+ echo "================================"
2937
+
2938
+ set +x
2939
+ }
2940
+
2941
+ verify_deployments() {
2942
+ set -e
2943
+
2944
+ # usage :
2945
+ # verify_deployments staging price
2946
+ # verify_deployments -t 15m testing price
2947
+
2948
+ if [ "$1" = "-t" ] ; then
2949
+ TIMEOUT=$2
2950
+ shift
2951
+ shift
2952
+ else
2953
+ TIMEOUT=5m
2954
+ fi
2955
+
2956
+ NAMESPACE=$1
2957
+ RELEASE=$2
2958
+
2959
+ # Get all Deployments names from the deployed chart
2960
+ DEPLOYMENTS=(
2961
+ $(helm get manifest --namespace $NAMESPACE $RELEASE | yq --no-doc -r 'select(.kind=="Deployment").metadata.name')
2962
+ )
2963
+
2964
+ echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
2965
+
2966
+ PIDS=()
2967
+ for D in "${DEPLOYMENTS[@]}"; do
2968
+ kubectl -n ${NAMESPACE} rollout status deployment ${D} --timeout=${TIMEOUT} &
2969
+ PIDS+=($!)
2970
+ done
2971
+
2972
+ for P in ${PIDS[@]}; do
2973
+ wait $P
2974
+
2975
+ if [ $? -ne 0 ]; then
2976
+ echo "at least one deployment failed or timed out (after $TIMEOUT)"
2977
+ return 1
2978
+ fi
2979
+ done
2980
+
2981
+ }
2982
+
2983
+ print_usage() {
2984
+ echo "Usage:"
2985
+ echo "deploy_chart \\"
2986
+ echo " --path_configs <path to .yaml namespaces and secret config files>"
2987
+ echo " --path_chart <path to Helm Chart>"
2988
+ echo " --application <application name used by Helm>"
2989
+ echo " --environment <infrastructure environment>"
2990
+ echo " --namespace <namespace>"
2991
+ echo " --helm-extra-args <extra args to pass to helm, ex: --set my.value=42 --set your.setting=on>"
2992
+ echo ""
2993
+ }
2994
+
2995
+ check_config_file() {
2996
+ local filename=$1
2997
+ if [ ! -f ${filename} ]; then
2998
+ echo "Missing $filename configuration file"
2999
+ print_usage
3000
+ return 1
3001
+ fi
3002
+ }
3003
+
3004
+ notify_new_deployment() {
3005
+ jq --version || (apt update && apt install -y jq)
3006
+
3007
+ CHAT_URL=${1:-$DEFAULT_CHAT_URL}
3008
+
3009
+ STATUS=$(echo $CI_JOB_STATUS | tr '[:lower:]' '[:upper:]' )
3010
+ ENV_NAME=$(echo $ENVIRONMENT | tr '[:lower:]' '[:upper:]' )
3011
+
3012
+ JOB_LINK="<$CI_JOB_URL| $CI_JOB_NAME $CI_JOB_ID>"
3013
+
3014
+ DESCRIPTION="
3015
+ $STATUS : Deployment for $CI_PROJECT_NAME on $ENV_NAME
3016
+ $JOB_LINK
3017
+ $CI_COMMIT_TITLE
3018
+ "
3019
+
3020
+ JSON_MESSAGE=$(jq -n --arg text "$DESCRIPTION" '{text: $text }')
3021
+ curl -X POST $CHAT_URL \
3022
+ --header "Content-Type: application/json" \
3023
+ --data "$JSON_MESSAGE"
3024
+ }
3025
+ notify_new_version() {
3026
+
3027
+ ! test -z $CI_COMMIT_TAG || exit 0
3028
+
3029
+ jq --version || (apt update && apt install -y jq)
3030
+
3031
+ KIND=$1
3032
+ CHAT_URL=${2:-$DEFAULT_CHAT_URL}
3033
+
3034
+ STATUS=$(echo $CI_JOB_STATUS | tr '[:lower:]' '[:upper:]' )
3035
+ ENV_NAME=$(echo $ENVIRONMENT | tr '[:lower:]' '[:upper:]' )
3036
+ TITLE="$ENV_NAME *$STATUS* $KIND for version *$CI_COMMIT_TAG* of *$CI_PROJECT_NAME* "
3037
+
3038
+ RELEASE_URL="https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/releases/$CI_COMMIT_TAG"
3039
+
3040
+ NOTES=$(curl --header "PRIVATE-TOKEN: $GITLAB_TOKEN" $RELEASE_URL |
3041
+ jq .description |
3042
+ sed -e 's/^"//' -e 's/"$//' |
3043
+ sed -E 's/\[([^]]+)\]\(([^)]+)\)/<\2|\1>/g' |
3044
+ sed -E 's/\\n/\'$'\n/g')
3045
+
3046
+ JOB_LINK="<$CI_JOB_URL| $CI_JOB_NAME $CI_JOB_ID>"
3047
+
3048
+ DESCRIPTION="
3049
+ $TITLE
3050
+ $JOB_LINK
3051
+ $NOTES
3052
+ "
3053
+
3054
+ JSON_MESSAGE=$(jq -n --arg text "$DESCRIPTION" '{text: $text }')
3055
+ curl -X POST $CHAT_URL \
3056
+ --header "Content-Type: application/json" \
3057
+ --data "$JSON_MESSAGE"
3058
+ }
3059
+ #!/usr/bin/env bash
3060
+
3061
+ skip_sbt_compile_cache() {
3062
+ COMPARED_BRANCH="${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-$CI_DEFAULT_BRANCH}"
3063
+ echo "branch to compare to: $COMPARED_BRANCH"
3064
+ git fetch origin $COMPARED_BRANCH
3065
+ echo "fetched $COMPARED_BRANCH"
3066
+ [[ "$CI_COMMIT_REF_NAME" =~ ^(master|develop)$ || $(git diff origin/$COMPARED_BRANCH --exit-code -- project) ]]
3067
+ }
3068
+ #!/usr/bin/env bash
3069
+
3070
+ # in case of trouble with functions for update history during import
3071
+ # https://stackoverflow.com/questions/56729192/pg-restore-fails-when-trying-to-create-function-referencing-table-that-does-not
3072
+
3073
+ # example: clone_databases --source_env testing --destination_env recette --services "order,notification,parcel,ikea"
3074
+ clone_databases() {
3075
+ export USERNAME="database-cloner"
3076
+
3077
+ set -e
3078
+
3079
+ extract_args 3 source_env destination_env services $*
3080
+
3081
+ dump_databases "$source_env" "$services"
3082
+ import_databases "$destination_env" "$services"
3083
+ }
3084
+
3085
+ dump_databases() {
3086
+ local env="$1"
3087
+ local services=$(echo -n "$2" | tr ',' '\n')
3088
+
3089
+ database_k8s_output_dump_path="/tmp/database_k8s_output_dump"
3090
+
3091
+ configure_kubectl_for "$env"
3092
+ set +e
3093
+ database_k8s "$env" > "$database_k8s_output_dump_path"
3094
+ set -e
3095
+
3096
+ source_pg_local_port=$(extract_pg_local_port "$database_k8s_output_dump_path")
3097
+
3098
+ for service in $services
3099
+ do
3100
+ service_path="/tmp/$service"
3101
+
3102
+ set +e
3103
+ git clone "git@gitlab.com:colisweb/back/$service.git" "$service_path"
3104
+ set -e
3105
+
3106
+ if cd "$service_path"; then
3107
+ echo "dump the database for service $service.."
3108
+
3109
+ git secret reveal -f
3110
+
3111
+ PG_YAML_PATH=".${service}config.postgres"
3112
+
3113
+ SOURCE_DB_DATABASE=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.database")
3114
+ SOURCE_DB_USER=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.user")
3115
+ SOURCE_DB_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.password")
3116
+
3117
+ export PGPASSWORD="$SOURCE_DB_PASSWORD"
3118
+
3119
+ DUMP_PATH="/tmp/db_dump_${service}.sql"
3120
+ pg_dump --no-owner -h localhost -p "$source_pg_local_port" -U "$SOURCE_DB_USER" "$SOURCE_DB_DATABASE" > "$DUMP_PATH"
3121
+
3122
+ cd ..
3123
+ rm -rf "$service_path"
3124
+ else
3125
+ echo "WARN: failed to clone $service - skipping"
3126
+ fi
3127
+ done
3128
+ }
3129
+
3130
+ import_databases() {
3131
+ local env="$1"
3132
+ local services=$(echo -n "$2" | tr ',' '\n')
3133
+
3134
+ database_k8s_output_import_path="/tmp/database_k8s_output_import"
3135
+
3136
+ configure_kubectl_for "$env"
3137
+ set +e
3138
+ database_k8s "$env" > "$database_k8s_output_import_path"
3139
+ set -e
3140
+
3141
+ destination_pg_local_port=$(extract_pg_local_port "$database_k8s_output_import_path")
3142
+
3143
+ for service in $services
3144
+ do
3145
+ service_path="/tmp/$service"
3146
+
3147
+ set +e
3148
+ git clone "git@gitlab.com:colisweb/back/$service.git" "$service_path"
3149
+ set -e
3150
+
3151
+ if cd "$service_path"; then
3152
+ echo "create and import database for $service.."
3153
+
3154
+ git secret reveal -f
3155
+
3156
+ PG_YAML_PATH=".${service}config.postgres"
3157
+
3158
+ DB_PORT="5432"
3159
+ DB_HOST=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.host")
3160
+ DB_INIT_USERNAME=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.initUsername")
3161
+ DB_INIT_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.initPassword")
3162
+ DB_DATABASE=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.database")
3163
+ DB_USER=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.user")
3164
+ DB_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.password")
3165
+
3166
+ kube_init_service_database \
3167
+ --namespace ${env} \
3168
+ --service ${service} \
3169
+ --db_host ${DB_HOST} \
3170
+ --db_port ${DB_PORT} \
3171
+ --db_init_username ${DB_INIT_USERNAME} \
3172
+ --db_init_password ${DB_INIT_PASSWORD} \
3173
+ --db_database ${DB_DATABASE} \
3174
+ --db_username ${DB_USER} \
3175
+ --db_password ${DB_PASSWORD}
3176
+
3177
+ echo "WARN: A complete clean of $DB_DATABASE on $DB_HOST will be operated"
3178
+ read -rsn1 -p"Press any key to continue";echo
3179
+ flyway_clean "$DB_HOST" "$DB_PORT" "$DB_DATABASE" "$DB_USER" "$DB_PASSWORD"
3180
+
3181
+ DUMP_PATH="/tmp/db_dump_${service}.sql"
3182
+ export PGPASSWORD="$DB_PASSWORD"
3183
+ set +e
3184
+ psql "postgres://$DB_USER@127.0.0.1:$destination_pg_local_port" -p "$DB_DATABASE" -f "$DUMP_PATH"
3185
+ set -e
3186
+
3187
+ cd ..
3188
+ rm -rf "$service_path"
3189
+ else
3190
+ echo "WARN: failed to clone $service - skipping"
3191
+ fi
3192
+ done
3193
+ }
3194
+
3195
+ extract_pg_local_port() {
3196
+ cat "$1" | grep 'postgres@127.0.0.1:' | sed 's/.*postgres@127.0.0.1:\(.*[0-9]\).*/\1/g'
3197
+ }
3198
+ #!/usr/bin/env bash
3199
+
3200
+ emit_datadog_deploy_event() {
3201
+ extract_args 3 environment service version $*
3202
+ check_env_vars 1 "DD_API_KEY"
3203
+
3204
+ response=$(
3205
+ curl -X POST -H "Content-type: application/json" \
3206
+ -d '{
3207
+ "title": "deploying '"$service"' to '"$environment"'",
3208
+ "text": "deploying '"$service"' version '"$version"' to '"$environment"'",
3209
+ "priority": "normal",
3210
+ "tags": ["service:'"$service"' ", "env:'"$environment"'" ,"action:'"deployment"'"] ,
3211
+
3212
+ "alert_type": "Info"
3213
+ }' \
3214
+ "https://api.datadoghq.com/api/v1/events?api_key=$DD_API_KEY"
3215
+ )
3216
+
3217
+ #echo $response
3218
+ EventID=$(echo $response | jq ".event.id")
3219
+ url=$(echo $response | jq ".event.url")
3220
+
3221
+ if [[ $EventID -ne 0 ]]; then
3222
+ echo "event successfully created check in datadog UI : $url"
3223
+ else
3224
+ echo " failed to create event "
3225
+ return 1
3226
+ fi
3227
+ }
3228
+
3229
+ #!/usr/bin/env bash
3230
+
3231
+ # DEPRECATED
3232
+ emit_datadog_error_events() {
3233
+ set -e
3234
+ extract_args 4 title text priority environment $*
3235
+ check_env_vars 1 "DD_API_KEY"
3236
+
3237
+ curl -X POST -H "Content-type: application/json" \
3238
+ -d '{
3239
+ "title": "'"$title"'",
3240
+ "text": "'"$text"'",
3241
+ "priority": "'"$priority"'",
3242
+ "tags": ["environment:'"$environment"'"],
3243
+ "alert_type": "Error"
3244
+ }' \
3245
+ "https://api.datadoghq.com/api/v1/events?api_key=$DD_API_KEY"
3246
+ }
3247
+
3248
+ #!/usr/bin/env bash
3249
+ terraform_init() {
3250
+ SECTION=$1
3251
+ ENV=$2
3252
+ cd $SECTION
3253
+ terraform init -input=false
3254
+ terraform workspace select $ENV || terraform workspace new $ENV
3255
+ }