@colisweb/rescript-toolkit 3.7.1 → 3.7.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/.gitlab-ci.yml +1 -1
  2. package/.secure_files/ci-functions-v16.5.0 +2384 -0
  3. package/.secure_files/ci-functions-v16.6.0 +2384 -0
  4. package/.secure_files/ci-functions-v16.7.0 +2422 -0
  5. package/.secure_files/ci-functions-v17.0.0 +2422 -0
  6. package/.secure_files/ci-functions-v17.0.1 +2422 -0
  7. package/.secure_files/ci-functions-v17.0.10 +2525 -0
  8. package/.secure_files/ci-functions-v17.0.2 +2422 -0
  9. package/.secure_files/ci-functions-v17.0.3 +2422 -0
  10. package/.secure_files/ci-functions-v17.0.4 +2422 -0
  11. package/.secure_files/ci-functions-v17.0.5 +2422 -0
  12. package/.secure_files/ci-functions-v17.0.6 +2422 -0
  13. package/.secure_files/ci-functions-v17.0.7 +2422 -0
  14. package/.secure_files/ci-functions-v17.0.8 +2422 -0
  15. package/.secure_files/ci-functions-v17.0.9 +2527 -0
  16. package/.secure_files/ci-functions-v17.1.0 +2525 -0
  17. package/.secure_files/ci-functions-v17.1.1 +2547 -0
  18. package/.secure_files/ci-functions-vTest +2384 -0
  19. package/.yarn/install-state.gz +0 -0
  20. package/locale/fr.json +5 -0
  21. package/package.json +1 -1
  22. package/playground/components/Playground_Checkbox.res +6 -1
  23. package/playground/components/Playground_MultiSelect.res +21 -0
  24. package/src/form/Toolkit__FormValidationFunctions.res +12 -0
  25. package/src/ui/Toolkit__Ui_Checkbox.res +20 -9
  26. package/src/ui/Toolkit__Ui_Checkbox.resi +1 -0
  27. package/src/ui/Toolkit__Ui_MultiSelect.res +26 -4
  28. package/src/ui/Toolkit__Ui_MultiSelect.resi +1 -0
  29. package/src/utils/Toolkit__Utils_UnitMeasure.res +3 -3
  30. package/src/vendors/ReactIcons.res +30 -0
@@ -0,0 +1,2525 @@
1
+ #!/usr/bin/env bash
2
+
3
+ #VARIABLES
4
+ export SCRIPT_FULL_PATH=$(dirname "$0")
5
+
6
+ ##FUNCTIONS
7
+ # https://stackoverflow.com/questions/1527049/how-can-i-join-elements-of-an-array-in-bash
8
+ join_by() {
9
+ local d=${1-} f=${2-}
10
+ if shift 2; then
11
+ printf %s "$f" "${@/#/$d}"
12
+ fi
13
+ }
14
+
15
+ mkstring() {
16
+ local start=$1
17
+ local separator=$2
18
+ local end=$3
19
+ shift 3
20
+
21
+ if [ $# -gt 0 ]; then
22
+ printf $start
23
+ join_by $separator $*
24
+ printf $end
25
+ fi
26
+ }
27
+
28
+ md5all() {
29
+ all_hash=$(mktemp)
30
+ for name in $*; do
31
+ find $name -type f -exec cat {} \; | md5sum | cut -f1 -d ' ' >> $all_hash
32
+ done;
33
+ cat $all_hash | md5sum | cut -f1 -d ' '
34
+ }
35
+
36
+ log() {
37
+ echo "$*" >&2
38
+ }
39
+ #!/usr/bin/env bash
40
+
41
+ check_args() {
42
+ if [ -z $2 ] || [ "$1" != "$2" ]; then
43
+ echo >&2 "missing argument $1"
44
+ return 1
45
+ fi
46
+ }
47
+
48
+ check_env_vars() {
49
+ ArgsCount=$1 && shift
50
+ for ((i = 0; i < $ArgsCount; i++)); do
51
+ if [[ -z "${!1}" ]]; then
52
+ echo >&2 "missing ENV $1"
53
+ return 1
54
+ fi
55
+ shift
56
+ done
57
+ }
58
+
59
+ extract_arg() {
60
+ name=$1
61
+ passed=$2
62
+ value=$3
63
+ if [ "--$name" != "$passed" ]; then
64
+ echo "missing argument $name"
65
+ exit 1
66
+ fi
67
+ eval $name='$value'
68
+ }
69
+
70
+ extract_args() {
71
+ declare -a Array_Args
72
+ ArgsCount=$1 && shift
73
+ for ((i = 0; i < $ArgsCount; i++)); do
74
+ Array_Args[i]=$1 && shift
75
+ done
76
+ for ArgName in "${Array_Args[@]}"; do
77
+ extract_arg "$ArgName" $* && shift 2
78
+ done
79
+ }
80
+
81
+ #!/usr/bin/env bash
82
+
83
+ aws_ecr_login() {
84
+ PATH=/root/.local/bin:$PATH
85
+
86
+ aws ecr get-login-password \
87
+ | docker login --username AWS --password-stdin 949316342391.dkr.ecr.eu-west-1.amazonaws.com \
88
+ || (echo "you should update to AWS CLI version 2 https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-mac.html " $(aws ecr get-login --region=eu-west-1 --no-include-email) )
89
+ }
90
+
91
+ aws_ecr_token() {
92
+ aws ecr get-authorization-token --region=eu-west-1 --output text --query 'authorizationData[].authorizationToken'
93
+ }
94
+
95
+ # you will need jq to use these commands. You can install it using "brew install jq"
96
+ # delete_images colisweb_api 8
97
+ # will delete images older than 8 weeks
98
+ delete_images() {
99
+
100
+ REPO=$1
101
+ WEEKS=${2:-16}
102
+
103
+ WEEKS_AGO=$(date -v-${WEEKS}w +%F)
104
+
105
+ #Get all ecr images
106
+ IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
107
+
108
+ #Filter unnecessary values and map `imagePushedAt` to EPOCH
109
+ NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
110
+
111
+ #Filter on EPOCH
112
+ OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
113
+ while IFS= read -r IMAGE; do
114
+ if [ "$IMAGE" != "" ]; then
115
+ echo "Deleting $IMAGE from $REPO"
116
+ AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
117
+ fi
118
+ done <<< "$OLD_IMAGES"
119
+ }
120
+
121
+ # delete_images_all_repos 12
122
+ # will delete images in all repositories older than 12 weeks
123
+ delete_images_all_repos() {
124
+ REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
125
+
126
+ while IFS= read -r REPO; do
127
+ echo "processing ECR repository $REPO"
128
+ delete_images $REPO $1
129
+ done <<< "$REPOSITORIES"
130
+ }
131
+
132
+ #!/usr/bin/env bash
133
+
134
+ # If gitlab is down or pipeline are stuck, hotfixes need to be available
135
+ # This script will publish docker images to ECR using your current git HEAD, then deploy them to a given environment.
136
+ # Some local files (git-commit.conf and sentry.properties) will be updated, take caution.
137
+ # No trace of this will appear on Gitlab (no releases, no pipelines, no tags).
138
+ # create_hotfix_scala $ENVIRONMENT $CHART_NAME [ $MODULE_NAME $MODULE_PATH $DEPLOYMENT ]
139
+ # create_hotfix_scala testing crm main modules/3-executables/main crm
140
+ # create_hotfix_scala testing notification \
141
+ # main-http modules/3-executables/main-http notification-http \
142
+ # main-consumer modules/3-executables/main-consumer notification-consumer
143
+
144
+ create_hotfix_scala() {
145
+
146
+ ENVIRONMENT=$1
147
+ CHART_NAME=$2
148
+ shift 2
149
+
150
+ SHORT_SHA=$(git rev-parse --short HEAD)
151
+ HOTFIX_TAG="hotfix-$SHORT_SHA"
152
+
153
+ gum confirm "Preparing $HOTFIX_TAG for $CHART_NAME ?" || exit
154
+ prepare_hotfix_scala $HOTFIX_TAG
155
+
156
+ gum confirm "Building $HOTFIX_TAG for $CHART_NAME ?" || exit
157
+ while [[ $# -gt 2 ]] ; do
158
+ build_hotfix_scala $HOTFIX_TAG "$1" "$2" "$3"
159
+ shift 3
160
+ done
161
+
162
+ gum confirm "Deploying $HOTFIX_TAG for $CHART_NAME ?" || exit
163
+ deploy_hotfix $CHART_NAME $ENVIRONMENT $HOTFIX_TAG
164
+ }
165
+
166
+ # Update local git-commit.conf and sentry.properties files using git short sha
167
+ prepare_hotfix_scala() {
168
+ HOTFIX_TAG=$1
169
+
170
+ git secret reveal -f
171
+ aws_ecr_login
172
+
173
+ COMMIT_CONF_FILES=$(find . -name "git-commit.conf")
174
+ SENTRY_PROPERTIES_FILES=$(find . -name "sentry.properties")
175
+
176
+ for file in $(echo "$COMMIT_CONF_FILES\n$SENTRY_PROPERTIES_FILES"); do
177
+ sed -i '' -e 's&GIT_COMMIT&'"$HOTFIX_TAG&" $file
178
+ done
179
+
180
+ }
181
+
182
+ # Build docker images locally and publish them to AWS ECR.
183
+ build_hotfix_scala() {
184
+
185
+ HOTFIX_TAG=$1
186
+ SBT_MODULE=$2
187
+ DOCKER_PATH=$3
188
+ DEPLOYMENT=$4
189
+
190
+ DOCKER_REGISTRY_ID="949316342391"
191
+ DOCKER_REGISTRY="$DOCKER_REGISTRY_ID.dkr.ecr.eu-west-1.amazonaws.com"
192
+ DOCKER_IMAGE=$DOCKER_REGISTRY/$DEPLOYMENT
193
+ HOTFIX_IMAGE=$DOCKER_IMAGE:$HOTFIX_TAG
194
+
195
+ #Build
196
+ sbt "project $SBT_MODULE" "Docker / stage"
197
+
198
+ #Publish
199
+ docker build --platform "linux/amd64" -t $HOTFIX_IMAGE --cache-from $DOCKER_IMAGE "$DOCKER_PATH/target/docker/stage"
200
+ docker push $HOTFIX_IMAGE
201
+
202
+ echo "Created hotfix $HOTFIX_IMAGE"
203
+ }
204
+
205
+ # Deploy the project in the given environment
206
+ deploy_hotfix() {
207
+ source $colisweb_scripts/ci/helm.sh
208
+
209
+ CHART_NAME=$1
210
+ ENVIRONMENT=$2
211
+ HOTFIX_TAG=$3
212
+
213
+ CONFIG_PATH=deploy
214
+ CHART_PATH=$CONFIG_PATH/$CHART_NAME
215
+ ROOT_PATH=$(pwd)
216
+
217
+ # Unset Kubectl configuration made via the KUBECONFIG env variable
218
+ # it would override the config made by configure_kubectl_for
219
+ # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
220
+ unset KUBECONFIG
221
+
222
+ # Configure Kubectl
223
+ configure_kubectl_for $ENVIRONMENT
224
+
225
+ # Avoiding "no local-index.yaml" or "empty local-index.yaml" error
226
+ cat > $HOME/Library/Caches/helm/repository/local-index.yaml <<EOT
227
+ apiVersion: v1
228
+ entries:
229
+ cronjob:
230
+ EOT
231
+
232
+ # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
233
+ helm3 repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
234
+ helm3 repo add stable https://charts.helm.sh/stable --force-update
235
+ helm3 repo update
236
+ helm3 dependency update ${ROOT_PATH}/${CHART_PATH}
237
+
238
+ # Gather values/*.yaml files
239
+ VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
240
+ VALUES_FILES=''
241
+ [ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
242
+
243
+ # Deploy
244
+ helm3 upgrade --install \
245
+ --namespace ${ENVIRONMENT} \
246
+ ${VALUES_FILES} \
247
+ -f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
248
+ -f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}.yaml \
249
+ -f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}-secrets.yaml \
250
+ --set global.version=$HOTFIX_TAG \
251
+ ${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
252
+
253
+
254
+ verify_deployments_v3 -t 10m $ENVIRONMENT $CHART_NAME
255
+
256
+ }
257
+
258
+ #!/usr/bin/env bash
259
+
260
+ image_exists() {
261
+ set -e
262
+
263
+ REGISTRY=$1
264
+ REPOSITORY=$2
265
+ IMAGE=$3
266
+
267
+ TAGGED_IMAGE="$REGISTRY/$REPOSITORY:$IMAGE"
268
+
269
+ aws ecr describe-images --registry-id $REGISTRY --repository-name $REPOSITORY --image-ids "imageTag=$IMAGE"
270
+
271
+ if [ $? -eq 0 ]
272
+ then
273
+ echo "Image $TAGGED_IMAGE already present in distant repo"
274
+ return 0
275
+ else
276
+ echo "Image $TAGGED_IMAGE NOT present in distant repo"
277
+ return 1
278
+ fi
279
+ }
280
+ #!/usr/bin/env bash
281
+
282
+ gmm() {
283
+ git checkout $1
284
+ git pull
285
+ git checkout $2
286
+ git pull
287
+ git merge $1
288
+ git push
289
+ }
290
+
291
+ git_damn_merge() {
292
+ git checkout $1
293
+ git pull
294
+ git checkout $2
295
+ git dammit
296
+ git merge $1
297
+ git push
298
+ }
299
+
300
+ git_prune_local_branches() {
301
+ git branch -r |
302
+ awk '{print $1}' |
303
+ egrep -v -f /dev/fd/0 <(git branch -vv | grep origin) |
304
+ awk '{print $1}' |
305
+ xargs git branch -d
306
+ }
307
+
308
+ gum_checkout() {
309
+ git branch -a | cut -f3- -d "/" | gum filter | xargs git checkout
310
+ }
311
+
312
+ # useful option :
313
+ # export GIT_SUBLINE_MERGE_NON_INTERACTIVE_MODE=TRUE
314
+ # see https://github.com/paulaltin/git-subline-merge
315
+ setup_subline_merge() {
316
+ location=${1:-"--local"}
317
+
318
+ case $location in
319
+ --local)
320
+ if [ -d ".git" ]; then
321
+ echo "* merge=subline" >>.git/info/attributes
322
+ else
323
+ echo "Cannot use local option, not in a git repository"
324
+ return 1
325
+ fi
326
+ ;;
327
+ --global)
328
+ echo "* merge=subline" >>~/.gitattributes
329
+ ;;
330
+ *)
331
+ echo "unknown argument $location"
332
+ return 2
333
+ ;;
334
+ esac
335
+
336
+ git config $location merge.conflictStyle diff3
337
+ git config $location merge.subline.driver "$colisweb_scripts/shell-session/shell/dev/git-subline-merge %O %A %B %L %P"
338
+ git config $location merge.subline.recursive binary
339
+ }
340
+
341
+ rebase_from_ancestor() {
342
+ set -x
343
+ branch=$1
344
+ tip=$(git rev-parse HEAD)
345
+ ancestor=$(git merge-base $branch $tip)
346
+ commits=$(git log $ancestor..$tip)
347
+ git reset --hard $ancestor
348
+ git merge --squash $tip
349
+ git commit -m "squashed commmits $commits" || echo "nothing committed"
350
+ git rebase $branch -Xtheirs
351
+ }
352
+
353
+ #!/usr/bin/env bash
354
+
355
+ import_all_pgp_keys() {
356
+ echo "importing all PGP keys"
357
+ gpg --import $SCRIPT_FULL_PATH/pgp_keys/*.key
358
+ }
359
+
360
+ remove_all_persons_from_secrets() {
361
+ echo "cleanup git secret"
362
+ WHO_KNOWS=($(git secret whoknows))
363
+ git secret removeperson $WHO_KNOWS
364
+ echo "Removed secrets access for $WHO_KNOWS"
365
+ }
366
+
367
+ all_pgp_emails() {
368
+ gpg --show-key $SCRIPT_FULL_PATH/pgp_keys/*.key | sed -rn "s/.*<(.*)>/\1/p"
369
+ }
370
+
371
+ set_all_secret_keys() {
372
+
373
+ import_all_pgp_keys
374
+
375
+ git secret reveal -f
376
+
377
+ remove_all_persons_from_secrets
378
+
379
+ if [ $# -eq 0 ]; then
380
+ echo "No emails supplied, using dev-tools pgp keys as source"
381
+ IN_THE_KNOW=($(gum choose --no-limit $(all_pgp_emails)))
382
+ else
383
+ IN_THE_KNOW=($*)
384
+ fi
385
+
386
+ git secret tell $IN_THE_KNOW
387
+ git secret hide
388
+ git secret whoknows
389
+
390
+ echo "all secrets updated, you'll need to commit the changes"
391
+ }
392
+
393
+ #!/usr/bin/env bash
394
+
395
+ start_ssh_bastion() {
396
+ ENV=$1
397
+ SSH_LOCAL_PORT=$2
398
+ POD_NAME=ssh-bastion-$USERNAME
399
+ CONFIG_MAP_NAME=ssh-bastion-$USERNAME
400
+ configure_kubectl_for $ENV
401
+ kubectl get pods -o name | grep pod/$POD_NAME
402
+ if [ $? -eq 0 ]; then
403
+ echo "$POD_NAME is already running"
404
+ else
405
+ #configmap
406
+ kubectl get configmap $CONFIG_MAP_NAME && kubectl delete configmap $CONFIG_MAP_NAME
407
+ tempdir=$(mktemp -d)
408
+ cat <<EOF > $tempdir/sshd_config
409
+ AllowTcpForwarding yes
410
+ Port 2222
411
+ PermitRootLogin yes
412
+ AuthorizedKeysFile /etc/ssh/authorized_keys
413
+ EOF
414
+ cp ~/.ssh/id_rsa.pub $tempdir/authorized_keys
415
+ kubectl create configmap $CONFIG_MAP_NAME --from-file=$tempdir
416
+
417
+ #pod
418
+ kubectl get pod $POD_NAME && kubectl delete pod $POD_NAME
419
+ cat <<EOF | kubectl create -f -
420
+
421
+ apiVersion: v1
422
+ kind: Pod
423
+ metadata:
424
+ name: $POD_NAME
425
+ spec:
426
+ containers:
427
+ - name: $POD_NAME
428
+ image: sickp/alpine-sshd:7.4
429
+ ports:
430
+ - containerPort: 2222
431
+ volumeMounts:
432
+ - mountPath: /etc/ssh/sshd_config
433
+ name: ssh-config
434
+ subPath: sshd_config
435
+ - mountPath: /etc/ssh/authorized_keys
436
+ name: ssh-config
437
+ subPath: authorized_keys
438
+ volumes:
439
+ - name: ssh-config
440
+ configMap:
441
+ name: $CONFIG_MAP_NAME
442
+ EOF
443
+
444
+ fi
445
+
446
+ # You need a recent kubectl for wait to work (1.15 works), install or upgrade
447
+ # with brew :
448
+ # brew install kubernetes-cli
449
+ # brew upgrade kubernetes-cli
450
+ kubectl wait --for=condition=Ready pod/$POD_NAME
451
+
452
+ # kube port-forward
453
+ lsof -ti tcp:$SSH_LOCAL_PORT | xargs kill
454
+ kubectl port-forward $POD_NAME $SSH_LOCAL_PORT:2222 &
455
+ while ! nc -z 127.0.0.1 $SSH_LOCAL_PORT; do
456
+ sleep 1
457
+ done
458
+ echo "forwarding ssh via local port $SSH_LOCAL_PORT"
459
+ echo "remember to terminate the bastion with 'stop_ssh_bastion'"
460
+ }
461
+
462
+ stop_ssh_bastion() {
463
+ POD_NAME=ssh-bastion-$USERNAME
464
+ kubectl delete pod $POD_NAME
465
+ }
466
+
467
+ #!/usr/bin/env bash
468
+
469
+ configure_kubectl_for() {
470
+ local infra_env="$1"
471
+ local valid_envs="[testing][staging][production][performance][tests][recette]"
472
+ echo "$valid_envs" | grep -q "\[$infra_env\]"
473
+
474
+ if [ $? -ne 0 ]; then
475
+ echo "Cannot configure kubectl for invalid env : $infra_env"
476
+ echo "choose one of $valid_envs"
477
+ return 1
478
+ fi
479
+
480
+ aws eks update-kubeconfig --name "toutatis-$infra_env-eks" >&2
481
+ }
482
+
483
+ #!/usr/bin/env bash
484
+
485
+ # WARNING : never try to do a dump directly from the database_production_ca
486
+ # this could cause lot of lock database issues.
487
+ # always use database_production_read_replica_ca instead
488
+ database_k8s() {
489
+ MODE=$1
490
+ case $MODE in
491
+ "tests") SSH_LOCAL_PORT=2224;PG_LOCAL_PORT=24440;CA_LOCAL_PORT=25430;ENV="tests";;
492
+ "testing") SSH_LOCAL_PORT=2225;PG_LOCAL_PORT=24441;CA_LOCAL_PORT=25431;ENV="testing";;
493
+ "staging") SSH_LOCAL_PORT=2226;PG_LOCAL_PORT=24442;CA_LOCAL_PORT=25432;ENV="staging";;
494
+ "production") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24443;CA_LOCAL_PORT=25433;ENV="production";;
495
+ "production_rw") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24444;CA_LOCAL_PORT=25434;ENV="production";;
496
+ "recette") SSH_LOCAL_PORT=2228;PG_LOCAL_PORT=24446;CA_LOCAL_PORT=25436;ENV="recette";;
497
+ *) echo "Unsupported ENV : $MODE"; return 1 ;;
498
+ esac
499
+
500
+ start_ssh_bastion $ENV $SSH_LOCAL_PORT
501
+
502
+ lsof -ti tcp:$PG_LOCAL_PORT | xargs kill
503
+
504
+ bastion_config=$(mktemp)
505
+ cat > "$bastion_config" <<EOF
506
+ UserKnownHostsFile /dev/null
507
+ StrictHostKeyChecking no
508
+ User root
509
+ Host bastion_tests
510
+ HostName 127.0.0.1
511
+ Port 2224
512
+ LocalForward 24440 toutatis-tests-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
513
+ Host bastion_testing
514
+ HostName 127.0.0.1
515
+ Port 2225
516
+ LocalForward 24441 toutatis-testing-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
517
+ LocalForward 25431 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
518
+ Host bastion_staging
519
+ HostName 127.0.0.1
520
+ Port 2226
521
+ LocalForward 24442 toutatis-staging-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
522
+ LocalForward 25432 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
523
+ Host bastion_recette
524
+ HostName 127.0.0.1
525
+ Port 2228
526
+ LocalForward 24446 toutatis-recette-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
527
+ LocalForward 25436 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
528
+ Host bastion_production
529
+ HostName 127.0.0.1
530
+ Port 2227
531
+ LocalForward 24443 toutatis-production-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
532
+ LocalForward 25433 api-production-rds-read-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
533
+ LocalForward 25435 archive-ca.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
534
+ EOF
535
+ if [ "$MODE" = "production_rw" ] ; then
536
+ cat >> "$bastion_config" <<EOF
537
+ LocalForward 24444 toutatis-production-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
538
+ LocalForward 25434 api-production-rds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
539
+ EOF
540
+ fi
541
+
542
+ ssh -f -N \
543
+ -F "$bastion_config" \
544
+ "bastion_$ENV"
545
+
546
+ echo "sample command : 'psql postgres://postgres@127.0.0.1:$PG_LOCAL_PORT'"
547
+ echo "sample command : 'mysql -u colisweb -h 127.0.0.1 -P $CA_LOCAL_PORT -p db_name'"
548
+
549
+ echo "run 'kubectl delete pod $POD_NAME' when you have finished"
550
+ }
551
+
552
+ psql_on_k8() {
553
+ NAMESPACE=$1
554
+ SERVICE=$2
555
+ CONNECTION=$3
556
+ shift 3
557
+
558
+ kubectl -n $NAMESPACE run ${SERVICE}-database-init \
559
+ --image jbergknoff/postgresql-client \
560
+ --restart=Never \
561
+ --attach --rm \
562
+ -- \
563
+ postgresql://${CONNECTION} \
564
+ "$*"
565
+ }
566
+
567
+ mysql_on_k8() {
568
+ local namespace=$1
569
+ local db_host=$2
570
+ local db_port=$3
571
+ local db_init_username=$4
572
+ local db_init_password=$5
573
+ local query=$6
574
+
575
+ kubectl -n ${namespace} run datadog-database-init \
576
+ --image widdpim/mysql-client \
577
+ --restart=Never \
578
+ --attach --rm \
579
+ -- \
580
+ mysql --host=$db_host --user=$db_init_username --password=$db_init_password --port=$db_port --execute="$query"
581
+ }
582
+ #!/usr/bin/env bash
583
+
584
+ kube_init_database_once() {
585
+
586
+ extract_args 8 namespace db_host db_port db_init_username db_init_password db_database db_username db_password $*
587
+
588
+ echo "======================="
589
+ echo " Initializing Database '$db_database' for namespace $namespace"
590
+ echo "======================="
591
+
592
+ set -x
593
+
594
+ echo "Checking if Database '$db_database' exists"
595
+ set +e
596
+ psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -lqtA | cut -d\| -f1 | grep "^$db_database$"
597
+ return_code=$?
598
+ set -e
599
+
600
+ if [ ${return_code} -eq 0 ]; then
601
+ echo "Database $db_database already exists - nothing to do"
602
+ else
603
+ echo "Database $db_database does not exist - initializing"
604
+
605
+ psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'CREATE DATABASE '"$db_database"';'
606
+ echo "DB created $db_database"
607
+
608
+ psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'CREATE USER '"$db_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
609
+ echo "USER created $db_username"
610
+
611
+ psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_username"';'
612
+ echo "Granted all privileges for $db_username on $db_database"
613
+ fi
614
+
615
+ echo "======================="
616
+ echo " Database '$db_database' Initialization complete for namespace $namespace"
617
+ echo "======================="
618
+ }
619
+
620
+ kube_init_database_readonly_account() {
621
+
622
+ extract_args 6 namespace service db_connection db_database db_readonly_username db_readonly_password $*
623
+
624
+ echo "======================="
625
+ echo " Initializing Readonly Account '$db_readonly_username' for '$db_database' for namespace $namespace"
626
+ echo "======================="
627
+
628
+ # Print commands before execution, except echo
629
+ trap '[[ $BASH_COMMAND != echo* ]] && echo $BASH_COMMAND' DEBUG
630
+
631
+ echo "Checking if Readonly account '$db_readonly_username' for '$db_database' exists"
632
+ set +e
633
+ psql_on_k8 $namespace $service $db_connection -qtAc 'SELECT rolname FROM pg_roles;' | grep "^$db_readonly_username$"
634
+ return_code=$?
635
+ set -e
636
+
637
+ if [ ${return_code} -eq 0 ]; then
638
+ echo "Account $db_readonly_username already exists - nothing to do"
639
+ else
640
+ echo "Account $db_readonly_username does not exist - creating"
641
+
642
+ psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_readonly_username"' WITH ENCRYPTED PASSWORD '"'$db_readonly_password'"';'
643
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT CONNECT ON DATABASE '"$db_database"' TO '"$db_readonly_username"';'
644
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT USAGE ON SCHEMA public TO '"$db_readonly_username"';'
645
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO '"$db_readonly_username"';'
646
+ psql_on_k8 $namespace $service $db_connection -c 'ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO '"$db_readonly_username"';'
647
+
648
+ echo "Created user with read-only permissions for $db_readonly_username on $db_database (schema public)"
649
+ fi
650
+ }
651
+
652
+ kube_init_datadog_in_database() {
653
+ extract_args 8 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password db_datadog_schema $*
654
+
655
+ echo "======================="
656
+ echo " Initializing Datadog Agent Requiement for namespace $namespace"
657
+ echo "======================="
658
+
659
+ echo "Checking if User '$db_datadog_username' exists"
660
+ set +e
661
+ mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'SELECT user FROM mysql.user;' | grep "^$db_datadog_username$"
662
+ return_code=$?
663
+ set -e
664
+
665
+ if [ ${return_code} -eq 0 ]; then
666
+ echo "User $db_datadog_username already exists - nothing to do"
667
+ else
668
+ echo "User $db_datadog_username does not exist - initializing"
669
+
670
+ # All the query come from this docs : https://docs.datadoghq.com/fr/database_monitoring/setup_mysql/selfhosted/?tab=mysql56
671
+
672
+ mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'CREATE USER '"$db_datadog_username"'@"%" IDENTIFIED BY '"'$db_datadog_password'"';'
673
+ echo "USER created $db_datadog_username"
674
+
675
+ mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT REPLICATION CLIENT ON *.* TO datadog@"%" WITH MAX_USER_CONNECTIONS 5;'
676
+ echo "ALTER USER $db_datadog_username"
677
+
678
+ mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT PROCESS ON *.* TO '"$db_datadog_username"'@"%";'
679
+ echo "Granted PROCESS for $db_datadog_username"
680
+
681
+ mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT SELECT ON performance_schema.* TO '"$db_datadog_username"'@"%";'
682
+ echo "Granted SELECT on performance_schema for $db_datadog_username"
683
+
684
+ mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'CREATE SCHEMA IF NOT EXISTS datadog;'
685
+ echo "CREATE SCHEMA datadog"
686
+
687
+ mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT EXECUTE ON datadog.* to '"$db_datadog_username"'@"%";'
688
+ echo "Granted 'GRANT EXECUTE for $db_datadog_username on datadog"
689
+
690
+ mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT CREATE TEMPORARY TABLES ON datadog.* TO '"$db_datadog_username"'@"%";'
691
+ echo "Granted CREATE TEMPORARY TABLES for $db_datadog_username"
692
+
693
+
694
+ mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.explain_statement;
695
+ DELIMITER $$
696
+ CREATE PROCEDURE datadog.explain_statement(IN query TEXT)
697
+ SQL SECURITY DEFINER
698
+ BEGIN
699
+ SET @explain := CONCAT("EXPLAIN FORMAT=json ", query);
700
+ PREPARE stmt FROM @explain;
701
+ EXECUTE stmt;
702
+ DEALLOCATE PREPARE stmt;
703
+ END $$
704
+ DELIMITER ;'
705
+ echo "CREATE PROCEDURE PROCEDURE datadog.explain_statement"
706
+
707
+ mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS '"$db_datadog_username"'.explain_statement;
708
+ DELIMITER $$
709
+ CREATE PROCEDURE '"$db_datadog_username"'.explain_statement(IN query TEXT)
710
+ SQL SECURITY DEFINER
711
+ BEGIN
712
+ SET @explain := CONCAT("EXPLAIN FORMAT=json ", query);
713
+ PREPARE stmt FROM @explain;
714
+ EXECUTE stmt;
715
+ DEALLOCATE PREPARE stmt;
716
+ END $$
717
+ DELIMITER ;
718
+ GRANT EXECUTE ON PROCEDURE '"$db_datadog_username"'.explain_statement TO datadog@"%";'
719
+ echo "CREATE PROCEDURE on SCHEMA $db_datadog_schema for $db_datadog_username"
720
+
721
+ mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.enable_events_statements_consumers;
722
+ DELIMITER $$
723
+ CREATE PROCEDURE datadog.enable_events_statements_consumers()
724
+ SQL SECURITY DEFINER
725
+ BEGIN
726
+ UPDATE performance_schema.setup_consumers SET enabled="YES" WHERE name LIKE "events_statements_%";
727
+ END $$
728
+ DELIMITER ;
729
+ GRANT EXECUTE ON PROCEDURE datadog.enable_events_statements_consumers TO datadog@"%";'
730
+
731
+ echo "CREATE PROCEDURE on datadog.enable_events_statements_consumers"
732
+ fi
733
+
734
+ echo "======================="
735
+ echo " Database '$db_datadog_schema' Initialization complete for namespace $namespace"
736
+ echo "======================="
737
+ }
738
+
739
+ kube_init_datadog_in_postgres_database() {
740
+ extract_args 7 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password $*
741
+
742
+ local service="datadog"
743
+ local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
744
+
745
+ echo "======================="
746
+ echo " Initializing $service Agent On PostgresSQL Database Requirement for namespace $namespace"
747
+ echo "======================="
748
+
749
+ echo "Checking if User '$db_datadog_username' exists"
750
+
751
+ set +e
752
+ if psql_on_k8 $namespace $service $db_connection -qtAc 'SELECT usename FROM pg_catalog.pg_user;' | grep "^$db_datadog_username$";
753
+ then
754
+ echo "User $db_datadog_username already exists - nothing to do"
755
+ else
756
+ echo "User $db_datadog_username does not exist - initializing"
757
+
758
+ set -e
759
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE USER '"$db_datadog_username"' WITH password '"'$db_datadog_password'"';'
760
+ echo "User created $db_datadog_username"
761
+
762
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE SCHEMA datadog;'
763
+ echo "Schema datadog created"
764
+
765
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT USAGE ON SCHEMA datadog TO datadog;'
766
+ echo "Granted usage for datadog schema to datadog"
767
+
768
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT USAGE ON SCHEMA public TO datadog;'
769
+ echo "Granted usage for public schema to datadog"
770
+
771
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT pg_monitor TO datadog;'
772
+ echo "Granted pg_monitor to datadog"
773
+
774
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE EXTENSION IF NOT EXISTS pg_stat_statements schema public;'
775
+ echo "Extension pg_stat_statements created"
776
+
777
+ local datadog_function_path="/tmp/datatog-explain-statement-function.sql"
778
+ local datadog_function="CREATE OR REPLACE FUNCTION datadog.explain_statement(
779
+ l_query TEXT,
780
+ OUT explain JSON
781
+ )
782
+ RETURNS SETOF JSON AS
783
+ \\$\\$
784
+ DECLARE
785
+ curs REFCURSOR;
786
+ plan JSON;
787
+
788
+ BEGIN
789
+ OPEN curs FOR EXECUTE pg_catalog.concat('EXPLAIN (FORMAT JSON) ', l_query);
790
+ FETCH curs INTO plan;
791
+ CLOSE curs;
792
+ RETURN QUERY SELECT plan;
793
+ END;
794
+ \\$\\$
795
+ LANGUAGE 'plpgsql'
796
+ RETURNS NULL ON NULL INPUT
797
+ SECURITY DEFINER;"
798
+
799
+ kubectl -n $namespace run $service-database-init \
800
+ --image jbergknoff/postgresql-client \
801
+ --restart=Never \
802
+ --attach --rm \
803
+ --command \
804
+ -- \
805
+ /bin/sh -c "echo -e \"$datadog_function\" > $datadog_function_path; psql postgresql://$db_connection -qf $datadog_function_path"
806
+
807
+ echo "Function datadog.explain_statement created"
808
+ fi
809
+
810
+ echo "======================="
811
+ echo " Database $service Initialization complete for namespace $namespace"
812
+ echo "======================="
813
+ }
814
+
815
+ kube_init_service_database() {
816
+
817
+ extract_args 9 namespace service db_host db_port db_init_username db_init_password db_database db_username db_password $*
818
+
819
+ local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
820
+
821
+ set -x
822
+
823
+ echo "Checking if Database '$db_database' exists"
824
+ set +e
825
+ psql_on_k8 $namespace $service $db_connection -lqtA | cut -d\| -f1 | grep "^$db_database$"
826
+ return_code=$?
827
+ set -e
828
+
829
+ if [ ${return_code} -eq 0 ]; then
830
+ echo "Database $db_database already exists - nothing to do"
831
+ else
832
+ echo "Database $db_database does not exist - initializing"
833
+
834
+ psql_on_k8 $namespace $service $db_connection -c 'CREATE DATABASE '"$db_database"';'
835
+ echo "DB created $db_database"
836
+
837
+ psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_datadog_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
838
+ echo "USER created $db_datadog_username"
839
+
840
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_datadog_username"';'
841
+ echo "Granted all privileges for $db_datadog_username on $db_database"
842
+ fi
843
+
844
+ echo "======================="
845
+ echo " Database '$db_database' Initialization complete for namespace $namespace"
846
+ echo "======================="
847
+ }
848
+ #!/usr/bin/env bash
849
+
850
+ # Port forward on the first matching pod
851
+ # Ex :
852
+ # pod_forward testing notification-http
853
+ # pod_forward testing colisweb-api-web 3333 3000
854
+ pod_forward() {
855
+ ENV=$1
856
+ POD_FILTER=$2
857
+ LOCAL_PORT=${3:-8080}
858
+ POD_PORT=${4:-8080}
859
+
860
+ if PID=$(lsof -ti tcp:$LOCAL_PORT); then
861
+ echo "killing process $PID which uses port $LOCAL_PORT"
862
+ kill $PID
863
+ fi
864
+
865
+ configure_kubectl_for $ENV
866
+
867
+ POD=`pick_pod $ENV $POD_FILTER`
868
+
869
+ echo "setting up forwarding to $POD"
870
+ kubectl -n $ENV port-forward $POD $LOCAL_PORT:$POD_PORT &
871
+ PID=$!
872
+
873
+ while ! echo exit | nc localhost $LOCAL_PORT > /dev/null; do
874
+ sleep 1
875
+ echo "waiting for port $LOCAL_PORT to be open locally"
876
+ done
877
+ echo "port $LOCAL_PORT is now available on localhost, forwarding to $ENV $POD:$POD_PORT"
878
+ echo 'you can terminate it with "kill '$PID'" or "kill $(lsof -ti tcp:'$LOCAL_PORT')"'
879
+ }
880
+
881
+ # prompts to pick a pod and run a command like bash inside
882
+ # pod_exec testing
883
+ # pod_exec testing bash
884
+ # pod_exec testing bash colisweb-api
885
+ pod_exec() {
886
+ ENV=$1
887
+ COMMAND=${2:-bash}
888
+ configure_kubectl_for $ENV
889
+ POD_FILTER=$3
890
+ POD=`pick_pod $ENV $POD_FILTER`
891
+ echo "running $COMMAND inside $POD"
892
+ kubectl -n $ENV exec -ti $POD -- $COMMAND
893
+ }
894
+
895
+ # prompts to pick a pod and copy from a local file to the pod
896
+ # pod_copy_to testing localfile remotefile
897
+ # pod_copy_to testing localfile remotefile colisweb-api
898
+ pod_copy_to() {
899
+ ENV=$1
900
+ LOCAL_FILE=$2
901
+ REMOTE_FILE=$3
902
+ configure_kubectl_for $ENV
903
+ POD_FILTER=$4
904
+ POD=`pick_pod $ENV $POD_FILTER`
905
+ kubectl cp $LOCAL_FILE $ENV/$POD:$REMOTE_FILE
906
+ }
907
+
908
+
909
+ pick_pod() {
910
+ ENV=$1
911
+ POD_FILTER="pod/$2"
912
+ configure_kubectl_for $ENV
913
+
914
+ if [ -z "$2" ] ; then
915
+ kubectl -n $ENV get pods | gum filter | cut -f1 -d" "
916
+ else
917
+ if PODS=$(kubectl -n $ENV get pods -o=name | grep "$POD_FILTER"); then
918
+ echo $PODS | head -1 | sed -e 's/pod\///'
919
+ else
920
+ echo "no pods found on $ENV matching $POD_FILTER" >&2
921
+ fi
922
+ fi
923
+ }
924
+
925
+ #!/usr/bin/env bash
926
+
927
+ redis_k8s() {
928
+ MODE=$1
929
+ case $MODE in
930
+ "testing") SSH_LOCAL_PORT=2225;REDIS_LOCAL_PORT=63791;ENV="testing";;
931
+ "staging") SSH_LOCAL_PORT=2226;REDIS_LOCAL_PORT=63792;ENV="staging";;
932
+ "production") SSH_LOCAL_PORT=2227;REDIS_LOCAL_PORT=63793;ENV="production";;
933
+ *) echo "Unsupported ENV : $MODE"; return 1 ;;
934
+ esac
935
+
936
+ start_ssh_bastion $ENV $SSH_LOCAL_PORT
937
+
938
+ lsof -ti tcp:$REDIS_LOCAL_PORT | xargs kill
939
+
940
+ bastion_config=$(mktemp)
941
+ cat > "$bastion_config" <<EOF
942
+ UserKnownHostsFile /dev/null
943
+ StrictHostKeyChecking no
944
+ User root
945
+ Host bastion_testing
946
+ HostName 127.0.0.1
947
+ Port 2225
948
+ LocalForward 63791 redis-testing.xufte6.0001.euw1.cache.amazonaws.com:6379
949
+ Host bastion_staging
950
+ HostName 127.0.0.1
951
+ Port 2226
952
+ LocalForward 63792 redis-sandbox.xufte6.0001.euw1.cache.amazonaws.com:6379
953
+ Host bastion_production
954
+ HostName 127.0.0.1
955
+ Port 2227
956
+ LocalForward 63793 redis-prod.xufte6.0001.euw1.cache.amazonaws.com:6379
957
+ EOF
958
+
959
+ ssh -f -N \
960
+ -F "$bastion_config" \
961
+ "bastion_$ENV"
962
+
963
+ echo "sample command : 'redis-cli -p $REDIS_LOCAL_PORT'"
964
+ echo "run 'kubectl delete pod $POD_NAME' when you have finished"
965
+
966
+ redis-cli -p $REDIS_LOCAL_PORT
967
+ }
968
+
969
+ #!/usr/bin/env bash
970
+
971
+ #Create a k8s cron jobs that will be run regularly
972
+ #See run_cron_job_k8s -h for more details
973
+
974
+ run_cron_job_k8s() {
975
+
976
+ #default values
977
+ local namespace="testing"
978
+ local name="$USERNAME"
979
+ local SCHEDULE="00 05 * * *"
980
+ local secret=""
981
+ local amm_folder=""
982
+ local amm_script=""
983
+
984
+ while getopts ":e:c:p:f:s:t:h" opt; do
985
+ case $opt in
986
+ e)
987
+ namespace="$OPTARG" >&2
988
+ ;;
989
+ t)
990
+ SCHEDULE="$OPTARG" >&2
991
+ ;;
992
+ p)
993
+ name="$OPTARG" >&2
994
+ ;;
995
+ c)
996
+ secret="$OPTARG" >&2
997
+ ;;
998
+ f)
999
+ amm_folder="$OPTARG" >&2
1000
+ ;;
1001
+ s)
1002
+ amm_script="$OPTARG" >&2
1003
+ ;;
1004
+ h)
1005
+ show_help_cron_job
1006
+ return 0
1007
+ ;;
1008
+ :)
1009
+ echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
1010
+ return 0
1011
+ ;;
1012
+ \?)
1013
+ echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
1014
+ return 0
1015
+ ;;
1016
+ esac
1017
+ done
1018
+
1019
+ if [ -z "$amm_script" ]; then
1020
+ echo 'Missing -s. Run run_cron_job_k8s -h for help' >&2
1021
+ return 0
1022
+ fi
1023
+
1024
+ shift "$((OPTIND-1))"
1025
+
1026
+ local script_args=$(
1027
+ if [ "$#" -gt 0 ] ; then
1028
+ printf '"'
1029
+ join_by '", "' $*
1030
+ printf '"'
1031
+ fi
1032
+ )
1033
+
1034
+ local IMAGE="lolhens/ammonite:2.5.4"
1035
+ local CRONJOB_NAME="cronjob-ammonite-$name"
1036
+
1037
+
1038
+ configure_kubectl_for $namespace
1039
+
1040
+ if [[ ! -r "$amm_script" ]]; then
1041
+ echo "ammonite script not found $amm_script"
1042
+ return 2
1043
+ else
1044
+ local CONFIG_MAP="config-$CRONJOB_NAME"
1045
+ local SECRET_MAP="secret-$CRONJOB_NAME"
1046
+ local CONFIG_MAP_DIR="$(mktemp -d)"
1047
+
1048
+ if [[ ! -z $amm_folder && -d $amm_folder ]] ; then
1049
+ cp -r "$amm_folder/" "$CONFIG_MAP_DIR"
1050
+ fi
1051
+ cp "$amm_script" "$CONFIG_MAP_DIR/script.sc"
1052
+
1053
+ kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1054
+ kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1055
+
1056
+ kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
1057
+ kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
1058
+
1059
+ kubectl -n $namespace get cronjob $CRONJOB_NAME && kubectl -n $namespace delete cronjob $CRONJOB_NAME
1060
+
1061
+ echo "starting $CRONJOB_NAME with $IMAGE"
1062
+
1063
+ JOB_DEFINITION='
1064
+ apiVersion: batch/v1
1065
+ kind: CronJob
1066
+ metadata:
1067
+ name: '$CRONJOB_NAME'
1068
+ namespace: '$namespace'
1069
+ spec:
1070
+ schedule: "'$SCHEDULE'"
1071
+ concurrencyPolicy: Forbid
1072
+ jobTemplate:
1073
+ spec:
1074
+ backoffLimit: 0
1075
+ template:
1076
+ spec:
1077
+ nodeSelector:
1078
+ workType: "workers"
1079
+ restartPolicy: Never
1080
+ volumes:
1081
+ - name: config
1082
+ configMap:
1083
+ name: '$CONFIG_MAP'
1084
+ - name: secret
1085
+ secret:
1086
+ secretName: '$SECRET_MAP'
1087
+ containers:
1088
+ - name: '$CRONJOB_NAME'
1089
+ command: ["amm", "/code/script.sc"]
1090
+ image: '$IMAGE'
1091
+ imagePullPolicy: IfNotPresent
1092
+ args: ['$script_args']
1093
+ env:
1094
+ - name: POD_NAME
1095
+ valueFrom:
1096
+ fieldRef:
1097
+ apiVersion: v1
1098
+ fieldPath: metadata.name
1099
+ - name: POD_NAMESPACE
1100
+ valueFrom:
1101
+ fieldRef:
1102
+ apiVersion: v1
1103
+ fieldPath: metadata.namespace
1104
+ - name: HOST_IP
1105
+ valueFrom:
1106
+ fieldRef:
1107
+ apiVersion: v1
1108
+ fieldPath: status.hostIP
1109
+ volumeMounts:
1110
+ - name: config
1111
+ mountPath: /code
1112
+ - name: secret
1113
+ mountPath: /conf
1114
+ readOnly: true
1115
+ resources:
1116
+ requests:
1117
+ cpu: 500m
1118
+ memory: 256Mi
1119
+ limits:
1120
+ cpu: 4000m
1121
+ memory: 512Mi
1122
+ envFrom:
1123
+ - configMapRef:
1124
+ name: '$CONFIG_MAP'
1125
+ - secretRef:
1126
+ name: '$SECRET_MAP'
1127
+ '
1128
+
1129
+ echo $JOB_DEFINITION > /tmp/job.yaml
1130
+
1131
+ kubectl -n $namespace apply -f /tmp/job.yaml
1132
+
1133
+ fi
1134
+ }
1135
+
1136
+ # Usage info
1137
+ show_help_cron_job() {
1138
+ #p:f:s
1139
+ local help="""Usage: run_cron_job_k8s -s SCRIPT [-t TIME] [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
1140
+ Create a k8s cron job that will be run a script regularly
1141
+
1142
+ -h display this help and exit
1143
+ -s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
1144
+ -t TIME opt. time when the job will be launched. TIME should be in CRON syntax (default to 00 05 * * *, ie 5AM UTC)
1145
+ -e ENV opt. set execution environment (default to testing)
1146
+ -c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
1147
+ -p POD opt. name of the pod to create (default to $USERNAME)
1148
+ -f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
1149
+ ARGS opt. additional arguments for SCRIPT
1150
+ """
1151
+ echo "$help"
1152
+ }
1153
+
1154
+ #!/usr/bin/env bash
1155
+
1156
+ # Usage info
1157
+ show_help_job() {
1158
+ local help="""Usage: run_job_k8s -s SCRIPT [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
1159
+ Create a k8s job executing a script
1160
+
1161
+ -h display this help and exit
1162
+ -s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
1163
+ -e ENV opt. set execution environment (default to testing)
1164
+ -c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
1165
+ -p POD opt. name of the pod to create (default to $USERNAME)
1166
+ -f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
1167
+ ARGS opt. additional arguments for SCRIPT
1168
+
1169
+ The organisation of the files must be the same locally as on the pod :
1170
+ - /code containing the script to execute (arg -s) and the other needed files (if the arg -f is used, it must reference this directory)
1171
+ - /conf containing the secret file (arg -c if used)
1172
+ E.g. in the script \"/code/script.sc\", to use a secret file \"/conf/secret.sc\", the import should look like \"import \$file.^.conf.secret.sc\"
1173
+ """
1174
+ echo "$help"
1175
+ }
1176
+
1177
+ run_job_k8s() {
1178
+
1179
+ #default values
1180
+ local namespace="testing"
1181
+ local name="$USERNAME"
1182
+ local secret=""
1183
+ local amm_folder=""
1184
+ local amm_script=""
1185
+
1186
+ while getopts ":e:c:p:f:s:h" opt; do
1187
+ case $opt in
1188
+ e)
1189
+ namespace="$OPTARG" >&2
1190
+ ;;
1191
+ p)
1192
+ name="$OPTARG" >&2
1193
+ ;;
1194
+ c)
1195
+ secret="$OPTARG" >&2
1196
+ ;;
1197
+ f)
1198
+ amm_folder="$OPTARG" >&2
1199
+ ;;
1200
+ s)
1201
+ amm_script="$OPTARG" >&2
1202
+ ;;
1203
+ h)
1204
+ show_help_job
1205
+ return 0
1206
+ ;;
1207
+ :)
1208
+ echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
1209
+ return 0
1210
+ ;;
1211
+ \?)
1212
+ echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
1213
+ return 0
1214
+ ;;
1215
+ esac
1216
+ done
1217
+
1218
+ if [ -z "$amm_script" ]; then
1219
+ echo 'Missing -s. Run run_job_k8s -h for help' >&2
1220
+ return 0
1221
+ fi
1222
+
1223
+ shift "$((OPTIND-1))"
1224
+
1225
+ local script_args=$(
1226
+ if [ "$#" -gt 0 ] ; then
1227
+ printf '"'
1228
+ join_by '", "' $*
1229
+ printf '"'
1230
+ fi
1231
+ )
1232
+
1233
+ local IMAGE="lolhens/ammonite:2.5.4"
1234
+ local JOB_NAME="job-ammonite-$name"
1235
+
1236
+ if [[ ! -r "$amm_script" ]]; then
1237
+ echo "ammonite script not found $amm_script"
1238
+ return 2
1239
+ else
1240
+ local CONFIG_MAP="config-$JOB_NAME"
1241
+ local CONFIG_MAP_DIR="$(mktemp -d)"
1242
+ local SECRET_MAP="secret-$JOB_NAME"
1243
+
1244
+ configure_kubectl_for $namespace
1245
+
1246
+ if [[ ! -z $amm_folder && -d $amm_folder ]] ; then
1247
+ cp -r "$amm_folder/" "$CONFIG_MAP_DIR"
1248
+ fi
1249
+ cp "$amm_script" "$CONFIG_MAP_DIR/script.sc"
1250
+
1251
+ kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1252
+ kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1253
+
1254
+ kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
1255
+ kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
1256
+
1257
+ kubectl -n $namespace get job $JOB_NAME && kubectl -n $namespace delete job $JOB_NAME
1258
+
1259
+ echo "starting $JOB_NAME with $IMAGE"
1260
+ fi
1261
+
1262
+ JOB_DEFINITION='
1263
+ apiVersion: batch/v1
1264
+ kind: Job
1265
+ metadata:
1266
+ name: '$JOB_NAME'
1267
+ namespace: '$namespace'
1268
+ spec:
1269
+ template:
1270
+ spec:
1271
+ containers:
1272
+ - name: '$JOB_NAME'
1273
+ command: ["amm", "/code/script.sc"]
1274
+ image: '$IMAGE'
1275
+ args: ['$script_args']
1276
+ env:
1277
+ - name: POD_NAME
1278
+ valueFrom:
1279
+ fieldRef:
1280
+ apiVersion: v1
1281
+ fieldPath: metadata.name
1282
+ - name: POD_NAMESPACE
1283
+ valueFrom:
1284
+ fieldRef:
1285
+ apiVersion: v1
1286
+ fieldPath: metadata.namespace
1287
+ - name: HOST_IP
1288
+ valueFrom:
1289
+ fieldRef:
1290
+ apiVersion: v1
1291
+ fieldPath: status.hostIP
1292
+ volumeMounts:
1293
+ - name: config
1294
+ mountPath: /code
1295
+ - name: secret
1296
+ mountPath: /conf
1297
+ readOnly: true
1298
+ resources:
1299
+ requests:
1300
+ cpu: 500m
1301
+ memory: 256Mi
1302
+ limits:
1303
+ cpu: 4000m
1304
+ memory: 1Gi
1305
+ nodeSelector:
1306
+ workType: workers
1307
+ restartPolicy: Never
1308
+ volumes:
1309
+ - name: config
1310
+ configMap:
1311
+ name: '$CONFIG_MAP'
1312
+ - name: secret
1313
+ secret:
1314
+ secretName: '$SECRET_MAP'
1315
+ '
1316
+
1317
+
1318
+ echo $JOB_DEFINITION > /tmp/job.yaml
1319
+
1320
+ kubectl -n $namespace apply -f /tmp/job.yaml
1321
+
1322
+ }
1323
+
1324
+
1325
+ #!/usr/bin/env bash
1326
+
1327
+ run_task() {
1328
+ set -e
1329
+
1330
+ check_args "--namespace" $1
1331
+ shift
1332
+ NAMESPACE=$1
1333
+ shift
1334
+ check_args "--image" $1
1335
+ shift
1336
+ IMAGE=$1
1337
+ shift
1338
+ check_args "--name" $1
1339
+ shift
1340
+ NAME=$1
1341
+ shift
1342
+
1343
+ set -x
1344
+
1345
+ kubectl -n ${NAMESPACE} run ${NAME} \
1346
+ --image ${IMAGE} \
1347
+ --restart=Never \
1348
+ --attach --rm \
1349
+ $*
1350
+ }
1351
+ geocode_address() {
1352
+ ADDRESS=$(sed -e 's: :%20:g' <(echo "$*"))
1353
+ URL="https://maps.googleapis.com/maps/api/geocode/json?address=${ADDRESS}&key=${GOOGLE_API_KEY}"
1354
+ curl $URL
1355
+ }
1356
+
1357
+ search_business() {
1358
+ SIREN=$1
1359
+ shift
1360
+ QUERY=$(sed -e 's: :+:g' <(echo "$*"))
1361
+ URL="https://data.opendatasoft.com/api/records/1.0/search/?dataset=sirene_v3%40public&q=${QUERY}&sort=datederniertraitementetablissement&facet=trancheeffectifsetablissement&facet=libellecommuneetablissement&facet=departementetablissementi&refine.siren=${SIREN}"
1362
+ curl $URL
1363
+ }
1364
+
1365
+ #!/bin/bash
1366
+
1367
+ # source tolls.sh ; tolls antoine.thomas@colisweb.com
1368
+ function tolls() {
1369
+ USER=${1:-first.last@colisweb.com}
1370
+ FROM_DATE=${2:-"2023-02-01"}
1371
+ TO_DATE=${3:-"2023-02-28"}
1372
+
1373
+ USER=$(gum input --prompt "username : " --value $USER)
1374
+ TOKEN=$(./tour_details.sc login --user $USER --password $(gum input --password --placeholder password))
1375
+ [ "$TOKEN" != "" ] && echo "connected" || return 1
1376
+
1377
+ FROM_DATE=$(gum input --prompt "Date start : " --value $FROM_DATE)
1378
+ TO_DATE=$(gum input --prompt "Date end : " --value $TO_DATE)
1379
+ FILENAME="tours-${FROM_DATE}-TO-${TO_DATE}.json"
1380
+ curl --cookie "session=$TOKEN" "https://api.production.colisweb.com/api/v6/routes-plans/external?from=${FROM_DATE}&to=${TO_DATE}" > ~/Downloads/$FILENAME
1381
+ echo "Tournées téléchargées"
1382
+
1383
+ projectIds=$(./tour_details.sc allProjects --file ~/Downloads/$FILENAME | gum choose --no-limit | cut -d "," -f 2)
1384
+ echo "projets sélectionnés : $projectIds"
1385
+ tourIds=$(./tour_details.sc allTours --file ~/Downloads/$FILENAME --projectIds "$projectIds")
1386
+ echo "tournées sélectionnées : $tourIds"
1387
+
1388
+ TARGET="${FROM_DATE}-TO-${TO_DATE}.csv"
1389
+ echo "appels à HERE, écriture dans $TARGET"
1390
+ ./tour_details.sc allToursDetails --token $TOKEN --hereApiKey $HERE_API_KEY --routeIds "$tourIds" > "$TARGET"
1391
+
1392
+ echo "terminé"
1393
+ }
1394
+
1395
+ #!/usr/bin/env bash
1396
+
1397
+ # possible syntax:
1398
+ # login
1399
+ # login testing
1400
+ # login testing userid
1401
+ login() {
1402
+ ENV=${1:-`gum choose testing staging production recette`} && \
1403
+ USER=${2:-`gum input --placeholder username`} && \
1404
+ PASSWORD=`gum input --password --placeholder password` && \
1405
+ TOKEN=`$SCRIPT_FULL_PATH/scala/auth.sc login --env $ENV --user $USER --password $PASSWORD` && \
1406
+ export TOKEN_$ENV=$TOKEN && \
1407
+ echo "login success for $USER on $ENV" >&2
1408
+ }
1409
+
1410
+ # you need to call login first (see above)
1411
+ # possible syntax:
1412
+ # recompute_tour
1413
+ # recompute_tour testing
1414
+ # recompute_tour testing draft
1415
+ # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09
1416
+ # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09 TODAY
1417
+ # recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09 FRIDAY
1418
+ recompute_tour() {
1419
+ ENV=${1:-`gum choose testing staging production recette`}
1420
+ MODE=${2:-`gum choose draft definitive`}
1421
+ PROJECT_ID=${3:-`pick_project $ENV`}
1422
+ DAY=${4:-`gum choose TODAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY SUNDAY`}
1423
+ jwt_token $ENV
1424
+ scala/tour_config.sc $MODE -t $TOKEN -p $PROJECT_ID -d $DAY
1425
+ }
1426
+
1427
+ pick_project() {
1428
+ ENV=${1:-`gum choose testing staging production recette`}
1429
+ jwt_token $ENV
1430
+ scala/tour_config.sc list -t $TOKEN -e $ENV | gum filter | cut -f1
1431
+ }
1432
+
1433
+ jwt_token() {
1434
+ ENV=${1:-`gum choose testing staging production recette`}
1435
+ eval 'TOKEN=$TOKEN_'$ENV
1436
+ if ! $SCRIPT_FULL_PATH/scala/auth.sc check -t $TOKEN -e $ENV ; then
1437
+ login $ENV
1438
+ fi
1439
+ }
1440
+
1441
+ #!/usr/bin/env bash
1442
+
1443
+ ftp_ikea_k8s() {
1444
+ SSH_LOCAL_PORT=2230
1445
+ FTP_LOCAL_PORT=25500
1446
+ start_ssh_bastion testing $SSH_LOCAL_PORT
1447
+
1448
+ lsof -ti tcp:$FTP_LOCAL_PORT | xargs kill
1449
+
1450
+ bastion_config=$(mktemp)
1451
+ cat > "$bastion_config" <<EOF
1452
+ UserKnownHostsFile /dev/null
1453
+ StrictHostKeyChecking no
1454
+ User root
1455
+ Host bastion_ftp
1456
+ HostName 127.0.0.1
1457
+ Port 2230
1458
+ LocalForward 25500 ft.centiro.ikea.com:22
1459
+ EOF
1460
+
1461
+ ssh -f -N \
1462
+ -F "$bastion_config" \
1463
+ "bastion_ftp"
1464
+
1465
+ sftp -P $FTP_LOCAL_PORT colisweb.fr@127.0.0.1
1466
+ }
1467
+
1468
+ #!/usr/bin/env bash
1469
+
1470
+ # usage:
1471
+ # jconsole_k8s testing colisweb-api-web
1472
+
1473
+ jconsole_k8s() {
1474
+ ENV=$1
1475
+ NAME=$2
1476
+
1477
+ start_ssh_bastion $ENV 2242
1478
+ POD_IP=$( \
1479
+ kubectl -n $ENV get pods -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIP}{"\n"}{end}' \
1480
+ | grep "$NAME" | cut -d' ' -f2 | head -1 \
1481
+ )
1482
+ echo "selected POD with ip $POD_IP"
1483
+ echo "use 'root' as password"
1484
+ ssh -f -N -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -D 7777 root@127.0.0.1 -p 2242
1485
+
1486
+ jconsole \
1487
+ -J-DsocksProxyHost=localhost \
1488
+ -J-DsocksProxyPort=7777 \
1489
+ -J-DsocksNonProxyHosts= \
1490
+ service:jmx:rmi:///jndi/rmi://$POD_IP:7199/jmxrmi \
1491
+ &
1492
+
1493
+ echo "remember to stop with 'stop_ssh_bastion'"
1494
+
1495
+ }
1496
+
1497
+ #!/usr/bin/env bash
1498
+
1499
+ # Interactive console on an existing pod. See also run_ruby_k8s
1500
+ # Ex :
1501
+ # railsc_k8s_old production
1502
+ # railsc_k8s_old production "User.where(email:'toni@colisweb.com')"
1503
+ railsc_k8s_old() {
1504
+ ENV=$1
1505
+ COMMAND=$2
1506
+ configure_kubectl_for $ENV
1507
+ POD=$(kubectl -n $ENV get pods -o=name | grep colisweb-api-web | head -1 | sed -e 's/pod\///')
1508
+ KUBERAILS="kubectl -n $ENV exec -ti $POD -- /usr/src/app/bin/rails c"
1509
+ [ -z "$COMMAND" ] && eval $KUBERAILS || echo $COMMAND | eval $KUBERAILS
1510
+ }
1511
+
1512
+ # Interactive console on an new pod. See also run_ruby_k8s
1513
+ # Ex :
1514
+ # railsc_k8s production
1515
+ railsc_k8s() {
1516
+ ENV=$1
1517
+ [[ $ENV = "production" || $ENV = "staging" ]] && default_tag="master-latest" || default_tag="${ENV}-latest"
1518
+ local image_tag=${5:-$default_tag}
1519
+ local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
1520
+ local POD_NAME="colisweb-api-rails-console-$image_tag-$USERNAME"
1521
+
1522
+ kubectl -n $ENV get pod $POD_NAME && kubectl -n $ENV delete pod $POD_NAME
1523
+
1524
+ configure_kubectl_for $ENV
1525
+ echo "starting with $IMAGE"
1526
+
1527
+ kubectl -n $ENV run $POD_NAME \
1528
+ --image $IMAGE \
1529
+ --restart=Never \
1530
+ --overrides='{
1531
+ "spec":{
1532
+ "nodeSelector":{
1533
+ "workType": "workers"
1534
+ },
1535
+ "containers":[
1536
+ {
1537
+ "name":"'$POD_NAME'",
1538
+ "image":"'$IMAGE'",
1539
+ "imagePullPolicy":"Always",
1540
+ "command":[
1541
+ "sleep",
1542
+ "infinity"
1543
+ ],
1544
+ "resources":{
1545
+ "limits":{
1546
+ "memory": "2048Mi"
1547
+ }
1548
+ },
1549
+ "envFrom": [ {
1550
+ "configMapRef": {
1551
+ "name": "colisweb-api"
1552
+ }
1553
+ }, {
1554
+ "secretRef": {
1555
+ "name": "colisweb-api"
1556
+ }
1557
+ }
1558
+ ]
1559
+ }
1560
+ ]
1561
+ }
1562
+ }
1563
+ '
1564
+
1565
+ sleep 5
1566
+ kubectl -n $ENV exec -it $POD_NAME -- /usr/src/app/bin/rails c
1567
+
1568
+ print "End of $POD_NAME "
1569
+ kubectl -n $ENV delete pods $POD_NAME
1570
+ }
1571
+
1572
+ # Ex :
1573
+ # create_user testing claire.lien@colisweb.com super_admin clairemdp
1574
+ create_user() {
1575
+ ENV=$1
1576
+ EMAIL=$2
1577
+ ROLE=$3
1578
+ PASSWORD=$4
1579
+ railsc_k8s $ENV "User.where(email:'$EMAIL', role:'$ROLE').first_or_create.update_attributes!(password: '$PASSWORD')"
1580
+ }
1581
+
1582
+ # Ex :
1583
+ # delete_user testing claire.lien@colisweb.com
1584
+ delete_user() {
1585
+ ENV=$1
1586
+ EMAIL=$2
1587
+ railsc_k8s $ENV "User.find_by(email:'$EMAIL').destroy"
1588
+ }
1589
+
1590
+ # NON Interactive console on an new pod, for long-running tasks (a few minutes)
1591
+ # See also railsc_k8s
1592
+ # file.txt will be available from /conf/data.txt in the ruby code
1593
+ # examples :
1594
+ # run_ruby_k8s testing demo <(echo "pp JSON.parse(File.read('/conf/data.txt'))") <(echo '{ "content": 123 }')
1595
+ # run_ruby_k8s testing demo ~/.oh-my-zsh/custom/dev-tools/shell-session/ruby/demo.rb <(echo '{ "content": 123 }')
1596
+ run_ruby_k8s() {
1597
+ if [ $# -lt 4 ]; then
1598
+ echo "usage : run_ruby_k8s production name-for-pod script.rb file.txt"
1599
+ return 1
1600
+ fi
1601
+ local namespace=$1
1602
+ local name=$2
1603
+ local ruby_script=$3
1604
+ local input_data=$4
1605
+ [[ $namespace = "production" || $namespace = "staging" ]] && default_tag="master-latest" || default_tag="${namespace}-latest"
1606
+ local image_tag=${5:-$default_tag}
1607
+
1608
+ if [ ! -r "$ruby_script" ]; then
1609
+ echo "ruby script not found $ruby_script"
1610
+ return 2
1611
+ fi
1612
+
1613
+ if [ ! -r "$input_data" ]; then
1614
+ echo "data not found $input_data"
1615
+ return 3
1616
+ fi
1617
+
1618
+
1619
+ local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
1620
+ local POD_NAME="colisweb-api-script-$name"
1621
+ local CONFIG_MAP="config-$POD_NAME"
1622
+ local CONFIG_MAP_DIR="$(mktemp -d)"
1623
+
1624
+
1625
+ configure_kubectl_for $namespace
1626
+
1627
+
1628
+ cp "$ruby_script" "$CONFIG_MAP_DIR/script.rb"
1629
+ cp "$input_data" "$CONFIG_MAP_DIR/data.txt"
1630
+
1631
+ kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1632
+ kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1633
+
1634
+ kubectl -n $namespace get pod $POD_NAME && kubectl -n $namespace delete pod $POD_NAME
1635
+
1636
+ echo "starting with $IMAGE"
1637
+ kubectl -n $namespace run $POD_NAME \
1638
+ --image $IMAGE \
1639
+ -ti \
1640
+ --restart=Never \
1641
+ --attach \
1642
+ --rm \
1643
+ --overrides='{
1644
+ "spec":{
1645
+ "nodeSelector":{
1646
+ "workType": "workers"
1647
+ },
1648
+ "containers":[
1649
+ {
1650
+ "name":"'$POD_NAME'",
1651
+ "image":"'$IMAGE'",
1652
+ "imagePullPolicy":"Always",
1653
+ "command":[
1654
+ "/usr/src/app/bin/rails",
1655
+ "r",
1656
+ "/conf/script.rb"
1657
+ ],
1658
+ "resources":{
1659
+ "limits":{
1660
+ "memory": "4096Mi"
1661
+ }
1662
+ },
1663
+ "volumeMounts":[
1664
+ {
1665
+ "name":"conf",
1666
+ "mountPath":"/conf"
1667
+ }
1668
+ ],
1669
+ "envFrom": [ {
1670
+ "configMapRef": {
1671
+ "name": "colisweb-api"
1672
+ }
1673
+ }, {
1674
+ "secretRef": {
1675
+ "name": "colisweb-api"
1676
+ }
1677
+ }
1678
+ ]
1679
+ }
1680
+ ],
1681
+ "volumes":[
1682
+ {
1683
+ "name":"conf",
1684
+ "configMap":{ "name":"'$CONFIG_MAP'" }
1685
+ }
1686
+ ]
1687
+ }
1688
+ }
1689
+ '
1690
+
1691
+ kubectl -n $namespace delete configmap $CONFIG_MAP
1692
+ }
1693
+
1694
+ # example:
1695
+ # update_pickup_cp testing <( echo '{"wrong_cp": "59123", "corrected_cp": "59223", "delivery_ids": ["4192421", "4192425"]}' )
1696
+ update_pickup_cp() {
1697
+ run_ruby_k8s $1 update-pickup-cp "$SCRIPT_FULL_PATH/ruby/update_pickup_cp.rb" $2
1698
+ }
1699
+
1700
+
1701
+
1702
+ update_all_prices() {
1703
+ local namespace=$1
1704
+ local json_prices=$2
1705
+
1706
+ local json_size=$(wc -c < "$json_prices")
1707
+
1708
+ if ((json_size > 940000)); then
1709
+ command -v jq || (echo "jq not found (use brew install jq)" && return 1)
1710
+ local max_lines=3000
1711
+ local total_lines=$(jq '. | length' $json_prices)
1712
+ local iterations=$((total_lines / max_lines + 1))
1713
+ echo "$json_prices is too big, I'll split it for you in blocks of $max_lines lines. It will take $iterations runs"
1714
+ for (( i = 0 ; i < iterations ; i++ )) ; do
1715
+ local start=$((i * max_lines))
1716
+ local end=$(( (i + 1) * max_lines))
1717
+ local split_file=$(mktemp)
1718
+ jq -c ".[$start:$end]" $json_prices > $split_file
1719
+ local split_lines=$(jq '. | length' $split_file)
1720
+ echo "starting iteration $i from $start to $end with $split_file command -v has $split_lines lines"
1721
+ run_ruby_k8s $namespace "update-prices-$i" "$SCRIPT_FULL_PATH/ruby/update_prices.rb" $split_file
1722
+ done
1723
+ else
1724
+ run_ruby_k8s $namespace "update-prices" "$SCRIPT_FULL_PATH/ruby/update_prices.rb" $json_prices
1725
+ fi
1726
+ }
1727
+
1728
+
1729
+ update_surveys() {
1730
+ local namespace=$1
1731
+ local csv_surveys=$2
1732
+
1733
+ local csv_size=$(wc -c < "$csv_surveys")
1734
+
1735
+
1736
+ if ((csv_size > 940000)); then
1737
+ local max_lines=400
1738
+ local total_lines=$(wc -l < $csv_surveys)
1739
+ local iterations=$((total_lines / max_lines + 1))
1740
+ echo "$csv_surveys is too big, I'll split it for you in blocks of $max_lines lines. It will take $iterations runs"
1741
+ for (( i = 0 ; i < iterations ; i++ )) ; do
1742
+ local start=$((i * max_lines + 2))
1743
+ local end=$(( (i + 1) * max_lines + 1))
1744
+ local split_file=$(mktemp)
1745
+ head -1 $csv_surveys > $split_file
1746
+ sed -n ''"$start,${end}p" $csv_surveys >> $split_file
1747
+
1748
+
1749
+ local split_lines=$(wc -l < $split_file)
1750
+ echo "starting iteration $i from $start to $end with $split_file command -v has $split_lines lines"
1751
+ run_ruby_k8s $namespace "reimport-surveys-$i" "$SCRIPT_FULL_PATH/ruby/feedback_kpi_reuploader.rb" $split_file
1752
+ done
1753
+ else
1754
+ run_ruby_k8s $namespace "reimport-surveys" "$SCRIPT_FULL_PATH/ruby/feedback_kpi_reuploader.rb" $csv_surveys
1755
+ fi
1756
+ }
1757
+
1758
+ #!/usr/bin/env bash
1759
+
1760
+ configure_gitlab_ssh() {
1761
+ tmp_dir=$(mktemp -d)
1762
+ ssh-keyscan gitlab.com > $tmp_dir/known_hosts
1763
+ echo "$SSH_PRIVATE_KEY" > $tmp_dir/id_rsa
1764
+ chmod 600 $tmp_dir/id_rsa
1765
+ ssh -i $tmp_dir/id_rsa -T git@gitlab.com
1766
+ rm -Rf $tmp_dir
1767
+ }
1768
+
1769
+
1770
+ configure_gitlab_ssh_home() {
1771
+ mkdir ~/.ssh
1772
+ ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
1773
+ echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_rsa
1774
+ chmod 600 ~/.ssh/id_rsa
1775
+ ssh -T git@gitlab.com
1776
+ }
1777
+ #!/usr/bin/env bash
1778
+
1779
+ datadog_schedule_downtime() {
1780
+ SERVICES=$1
1781
+ DOWNTIME_MINUTES=${2:-30}
1782
+
1783
+ if [[ "$ENVIRONMENT" == "production" ]] ; then
1784
+ log "scheduling downtime for $SERVICES in $ENVIRONMENT"
1785
+ else
1786
+ return 0
1787
+ fi
1788
+
1789
+ for SERVICE in $SERVICES ; do
1790
+ datadog_schedule_downtime_single $SERVICE $DOWNTIME_MINUTES
1791
+ done
1792
+ }
1793
+
1794
+ datadog_schedule_downtime_single() {
1795
+ local SERVICE=$1
1796
+ local DOWNTIME_MINUTES=$2
1797
+
1798
+ START=$(date +%s)
1799
+ END=$((START + 60 * DOWNTIME_MINUTES))
1800
+
1801
+ log "scheduling a downtime on datadog for $SERVICE ($DOWNTIME_MINUTES minutes)"
1802
+ curl -X POST "https://api.datadoghq.com/api/v1/downtime" \
1803
+ -H "Content-Type: application/json" \
1804
+ -H "DD-API-KEY: ${DD_API_KEY}" \
1805
+ -H "DD-APPLICATION-KEY: ${DD_APP_KEY}" \
1806
+ -d '
1807
+ {
1808
+ "active": true,
1809
+ "downtime_type": 0,
1810
+ "start": $START,
1811
+ "end": $END,
1812
+ "message": "CA Deployment - performance for $SERVICE may be lower for next $DOWNTIME_MINUTES min",
1813
+ "monitor_tags": [
1814
+ "service:$SERVICE",
1815
+ "performance"
1816
+ ],
1817
+ "scope": [
1818
+ "env:production"
1819
+ ],
1820
+ "timezone": "Europe/Paris"
1821
+ }
1822
+ '
1823
+ }
1824
+ #!/usr/bin/env bash
1825
+
1826
+ docker_build_push() {
1827
+ read -r -a BUILD_ARGS <<< "$1"
1828
+ DOCKER_BUILD_ARGS="--build-arg VCS_REF=$(git rev-parse --short HEAD)"
1829
+ for ARG_NAME in "${BUILD_ARGS[@]}"
1830
+ do
1831
+ DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS --build-arg $ARG_NAME=${!ARG_NAME}"
1832
+ done
1833
+
1834
+ if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
1835
+ docker pull $DOCKER_IMAGE || true
1836
+ docker build $DOCKER_BUILD_ARGS -t $DOCKER_IMAGE_SHA --cache-from $DOCKER_IMAGE $DOCKER_STAGE_PATH
1837
+ docker push $DOCKER_IMAGE_SHA
1838
+ fi
1839
+ }
1840
+
1841
+ docker_promote() {
1842
+ # inspired by https://dille.name/blog/2018/09/20/how-to-tag-docker-images-without-pulling-them/
1843
+ OLD_TAG=${1//[^0-9a-zA-Z-.]/_}
1844
+ NEW_TAG=${2//[^0-9a-zA-Z-.]/_}
1845
+ echo "promoting from $OLD_TAG to $NEW_TAG"
1846
+ TOKEN=$(aws_ecr_token)
1847
+ CONTENT_TYPE="application/vnd.docker.distribution.manifest.v2+json"
1848
+ MANIFESTS_API="https://${DOCKER_REGISTRY}/v2/${APPLICATION}/manifests"
1849
+
1850
+ if MANIFEST=$(curl --fail -H "Authorization: Basic $TOKEN" -H "Accept: ${CONTENT_TYPE}" "$MANIFESTS_API/${OLD_TAG}"); then
1851
+ echo "authenticated on $MANIFESTS_API"
1852
+ else
1853
+ return 1
1854
+ fi
1855
+ if curl --fail -H "Authorization: Basic $TOKEN" -X PUT -H "Content-Type: ${CONTENT_TYPE}" -d "${MANIFEST}" "$MANIFESTS_API/$NEW_TAG" ; then
1856
+ echo "promoted ${APPLICATION} from $OLD_TAG to $NEW_TAG"
1857
+ else
1858
+ return 2
1859
+ fi
1860
+ }
1861
+
1862
+ ensure_images_exists() {
1863
+ for IMAGE_TO_CHECK in $(echo $1 | tr "," "\n"); do
1864
+ image_exists ${DOCKER_REGISTRY_ID} ${IMAGE_TO_CHECK} ${VERSION} || return 1
1865
+ done
1866
+ }
1867
+ #!/usr/bin/env bash
1868
+
1869
+ extract_yaml_config_variable() {
1870
+ set +e
1871
+ set +x
1872
+
1873
+ check_args "--environment" $1
1874
+ shift
1875
+ ENVIRONMENT=$1
1876
+ shift
1877
+
1878
+ check_args "--configs-path" $1
1879
+ shift
1880
+ CONFIGS_PATH=$1
1881
+ shift
1882
+
1883
+ check_args "--variable" $1
1884
+ shift
1885
+ VARIABLE=$1
1886
+ shift
1887
+
1888
+ [[ "$1" == "--optional" ]] && OPTIONAL=true || OPTIONAL=false
1889
+
1890
+ if [ ! -f ${CONFIGS_PATH}/common.yaml ]; then
1891
+ echo >&2 "Missing $CONFIGS_PATH/common.yaml configuration file"
1892
+ exit 1
1893
+ fi
1894
+ if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}.yaml ]; then
1895
+ echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT.yaml configuration file"
1896
+ exit 1
1897
+ fi
1898
+ if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}-secrets.yaml ]; then
1899
+ echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml configuration file"
1900
+ exit 1
1901
+ fi
1902
+
1903
+ result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT-secrets.yaml")
1904
+ if [ $? -ne 0 ] || [ "$result" = "null" ]; then
1905
+ result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT.yaml")
1906
+ if [ $? -ne 0 ] || [ "$result" = "null" ]; then
1907
+ result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/common.yaml")
1908
+ if [ $? -ne 0 ] || [ "$result" = "null" ]; then
1909
+ if [ $OPTIONAL = true ]; then
1910
+ echo ""
1911
+ exit 0
1912
+ else
1913
+ echo >&2 "Missing path $VARIABLE in $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml, $CONFIGS_PATH/$ENVIRONMENT.yaml or $CONFIGS_PATH/common.yaml"
1914
+ exit 1
1915
+ fi
1916
+ fi
1917
+ fi
1918
+ fi
1919
+ echo ${result}
1920
+ }
1921
+ #!/usr/bin/env bash
1922
+
1923
+ flyway_clean() {
1924
+ HOST="$1"
1925
+ PORT="$2"
1926
+ DATABASE="$3"
1927
+ USER="$4"
1928
+ PASSWORD="$5"
1929
+
1930
+ kubectl run -it --rm flywayclean \
1931
+ --image=flyway/flyway \
1932
+ --restart=Never \
1933
+ -- \
1934
+ -cleanDisabled=false \
1935
+ -url="jdbc:postgresql://$HOST:$PORT/$DATABASE" \
1936
+ -user="$USER" \
1937
+ -password="$PASSWORD" \
1938
+ clean
1939
+ }
1940
+
1941
+ #!/usr/bin/env bash
1942
+
1943
+ FLYWAY_VERSION="5.2.4"
1944
+
1945
+
1946
+ get_yaml_variable() {
1947
+ extract_yaml_config_variable --environment ${ENVIRONMENT} --configs-path $(pwd)/deploy --variable $@
1948
+ }
1949
+
1950
+ init_migrate_db() {
1951
+ set -e
1952
+
1953
+ check_env_vars 4 "APPLICATION" "ENVIRONMENT" "FLYWAY_VERSION" "MIGRATION_SQL_PATH"
1954
+
1955
+ PG_YAML_PATH=".${APPLICATION}config.postgres"
1956
+
1957
+ DB_PORT="5432"
1958
+ DB_HOST=$(get_yaml_variable "${PG_YAML_PATH}.host")
1959
+ DB_INIT_USERNAME=$(get_yaml_variable "${PG_YAML_PATH}.initUsername")
1960
+ DB_INIT_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.initPassword")
1961
+ DB_DATABASE=$(get_yaml_variable "${PG_YAML_PATH}.database")
1962
+ DB_USER=$(get_yaml_variable "${PG_YAML_PATH}.user")
1963
+ DB_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.password")
1964
+ DB_URL="jdbc:postgresql://${DB_HOST}:${DB_PORT}/${DB_DATABASE}"
1965
+
1966
+ DB_RO_USER=$(get_yaml_variable "${PG_YAML_PATH}.readOnlyUser" --optional)
1967
+ DB_RO_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.readOnlyPassword" --optional)
1968
+
1969
+ unset KUBECONFIG
1970
+
1971
+ configure_kubectl_for_ci ${ENVIRONMENT}
1972
+
1973
+ kube_init_service_database \
1974
+ --namespace ${ENVIRONMENT} \
1975
+ --service ${APPLICATION} \
1976
+ --db_host ${DB_HOST} \
1977
+ --db_port ${DB_PORT} \
1978
+ --db_init_username ${DB_INIT_USERNAME} \
1979
+ --db_init_password ${DB_INIT_PASSWORD} \
1980
+ --db_database ${DB_DATABASE} \
1981
+ --db_username ${DB_USER} \
1982
+ --db_password ${DB_PASSWORD}
1983
+
1984
+ if [[ ! -z "$DB_RO_USER" ]] && [[ ! -z "$DB_RO_USER" ]]; then
1985
+ kube_init_database_readonly_account \
1986
+ --namespace ${ENVIRONMENT} \
1987
+ --service ${APPLICATION} \
1988
+ --db_connection "$DB_INIT_USERNAME:$DB_INIT_PASSWORD@$DB_HOST:$DB_PORT" \
1989
+ --db_database ${DB_DATABASE} \
1990
+ --db_readonly_username ${DB_RO_USER} \
1991
+ --db_readonly_password ${DB_RO_PASSWORD}
1992
+ fi
1993
+
1994
+ flyway_migrate \
1995
+ --environment ${ENVIRONMENT} \
1996
+ --namespace ${ENVIRONMENT} \
1997
+ --service ${APPLICATION} \
1998
+ --db_url ${DB_URL} \
1999
+ --db_user ${DB_USER} \
2000
+ --db_password ${DB_PASSWORD} \
2001
+ --flyway_version ${FLYWAY_VERSION} \
2002
+ --flyway_sql_folder $(pwd)/${MIGRATION_SQL_PATH}
2003
+ }
2004
+
2005
+ flyway_migrate() {
2006
+ set -e
2007
+
2008
+ extract_args 8 \
2009
+ environment namespace service db_url db_user db_password flyway_version flyway_sql_folder $*
2010
+
2011
+ echo "running flyway migrations for service $service in environment $environment namespace $namespace for db_url $db_url with user $db_user"
2012
+ echo "migration files expected in $flyway_sql_folder"
2013
+
2014
+ CONFIGMAP_NAME="$service-flyway-migration-sql"
2015
+ POD_NAME="$service-flyway-migration"
2016
+
2017
+ configure_kubectl_for_ci $environment
2018
+
2019
+ kubectl -n $namespace delete configmap $CONFIGMAP_NAME --ignore-not-found
2020
+ kubectl -n $namespace delete pod $POD_NAME --ignore-not-found
2021
+ kubectl -n $namespace create configmap $CONFIGMAP_NAME --from-file=$flyway_sql_folder
2022
+
2023
+ kubectl -n $namespace run $POD_NAME --image ignored -ti --restart=Never --attach --rm --overrides='
2024
+ {
2025
+ "spec":{
2026
+ "containers":[
2027
+ {
2028
+ "name":"'$POD_NAME'",
2029
+ "image":"boxfuse/flyway:'$flyway_version'",
2030
+ "command":["flyway", "-url='$db_url'", "-user='$db_user'", "-password='$db_password'", "migrate"],
2031
+ "volumeMounts":[
2032
+ {
2033
+ "name":"sql",
2034
+ "mountPath":"/flyway/sql"
2035
+ }
2036
+ ]
2037
+ }
2038
+ ],
2039
+ "volumes":[
2040
+ {
2041
+ "name":"sql",
2042
+ "configMap":{
2043
+ "name":"'$CONFIGMAP_NAME'"
2044
+ }
2045
+ }
2046
+ ]
2047
+ }
2048
+ }
2049
+ '
2050
+
2051
+ kubectl -n $namespace delete configmap $CONFIGMAP_NAME
2052
+ }
2053
+
2054
+ #!/usr/bin/env bash
2055
+
2056
+ record_git_commit() {
2057
+ for file in $GIT_COMMIT_FILES; do
2058
+ sed -i 's&GIT_COMMIT&'"${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHORT_SHA}&" "$file"
2059
+ done
2060
+ }
2061
+
2062
+ gitlab_import_pgp_key() {
2063
+ if [ "$GITLAB_PGP_PRIVATE_KEY" != "" ]
2064
+ then
2065
+ KEY_FOLDER=<(echo "$GITLAB_PGP_PRIVATE_KEY")
2066
+ gpg --import $KEY_FOLDER > /dev/null
2067
+ else
2068
+ echo '$GITLAB_PGP_PRIVATE_KEY is not set'
2069
+ return 1
2070
+ fi
2071
+ }
2072
+
2073
+ git_reveal() {
2074
+ gitlab_import_pgp_key
2075
+ gpg --decrypt $1
2076
+ }
2077
+ #!/usr/bin/env bash
2078
+
2079
+ helm_deploy_v3() {
2080
+ APPLICATION=$1
2081
+ ENVIRONMENT=$2
2082
+ VERSION=$3
2083
+ deploy_chart_v3 \
2084
+ --path_configs deploy \
2085
+ --path_chart deploy/$APPLICATION \
2086
+ --application $APPLICATION \
2087
+ --environment $ENVIRONMENT \
2088
+ --namespace $ENVIRONMENT \
2089
+ --helm_extra_args --set global.version=$VERSION
2090
+ }
2091
+
2092
+ deploy_chart_v3() {
2093
+ set -e
2094
+ set -x
2095
+
2096
+ # Rigid parsing, but all args are mandatory (expect last) and flexible order is unnecessary
2097
+ check_args "--path_configs" $1; shift
2098
+ path_configs=$1; shift
2099
+ check_args "--path_chart" $1; shift
2100
+ path_chart=$1; shift
2101
+ check_args "--application" $1; shift
2102
+ application=$1; shift
2103
+ check_args "--environment" $1; shift
2104
+ environment=$1; shift
2105
+ check_args "--namespace" $1; shift
2106
+ namespace=$1; shift
2107
+ if [ $# -ne 0 ]; then
2108
+ check_args "--helm_extra_args" $1; shift
2109
+ helm_extra_args=$*
2110
+ fi
2111
+
2112
+ echo "================================"
2113
+ echo " Deploying $application"
2114
+ echo " - Environment: $environment"
2115
+ echo " - Namespace: $namespace"
2116
+ echo "================================"
2117
+
2118
+ root_path=$(pwd)
2119
+
2120
+ # Check the configs exists
2121
+
2122
+ check_config_file ${root_path}/${path_configs}/common.yaml
2123
+ check_config_file ${root_path}/${path_configs}/${namespace}.yaml
2124
+ check_config_file ${root_path}/${path_configs}/${namespace}-secrets.yaml
2125
+
2126
+ # Check the chart exists
2127
+ if [ ! -d ${root_path}/${path_chart} ] || [ ! -f ${root_path}/${path_chart}/Chart.yaml ]; then
2128
+ echo "Bad Chart $root_path/$path_chart : does not exists or missing Chart.yaml"
2129
+ print_usage
2130
+ exit 1
2131
+ fi
2132
+
2133
+ # Unset Kubectl configuration made via the KUBECONFIG env variable
2134
+ # it would override the config made by configure_kubectl_for
2135
+ # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
2136
+ unset KUBECONFIG
2137
+
2138
+ # Configure Kubectl
2139
+ configure_kubectl_for_ci ${environment}
2140
+
2141
+ # Configure helm3
2142
+ helm3 version --namespace ${namespace} || true
2143
+ # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
2144
+ helm3 repo add colisweb s3://colisweb-helm-charts/colisweb
2145
+ helm3 repo add stable https://charts.helm.sh/stable
2146
+ helm3 repo update
2147
+ helm3 dependency update ${root_path}/${path_chart}
2148
+
2149
+ # Gather values/*.yaml files
2150
+ values_path="${root_path}/${path_chart}/values"
2151
+ values_files=''
2152
+ [ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
2153
+
2154
+ # Deploy
2155
+ helm3 upgrade --install \
2156
+ --namespace ${namespace} \
2157
+ ${values_files} \
2158
+ -f ${root_path}/${path_configs}/common.yaml \
2159
+ -f ${root_path}/${path_configs}/${namespace}.yaml \
2160
+ -f ${root_path}/${path_configs}/${namespace}-secrets.yaml \
2161
+ ${helm_extra_args} \
2162
+ ${application} ${root_path}/${path_chart}
2163
+
2164
+ #send event to dd
2165
+ PUBLISHED_VERSION="$CI_COMMIT_REF_NAME-$CI_COMMIT_SHA"
2166
+ emit_datadog_deploy_event --environment $environment --service $application --version $PUBLISHED_VERSION
2167
+
2168
+ echo "================================"
2169
+ echo " Deployed $application"
2170
+ echo " - Environment: $environment"
2171
+ echo " - Namespace: $namespace"
2172
+ echo "================================"
2173
+
2174
+ set +x
2175
+ }
2176
+
2177
+ verify_deployments_v3() {
2178
+ set -e
2179
+
2180
+ # usage :
2181
+ # verify_deployments staging price
2182
+ # verify_deployments -t 15m testing price
2183
+
2184
+ if [ "$1" = "-t" ] ; then
2185
+ TIMEOUT=$2
2186
+ shift
2187
+ shift
2188
+ else
2189
+ TIMEOUT=5m
2190
+ fi
2191
+
2192
+ NAMESPACE=$1
2193
+ RELEASE=$2
2194
+
2195
+ # Get all Deployments names from the deployed chart
2196
+ DEPLOYMENTS=(
2197
+ $(helm3 get manifest --namespace $NAMESPACE $RELEASE | yq -rs '.[] | select(.kind=="Deployment") | .metadata.name')
2198
+ )
2199
+
2200
+ echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
2201
+
2202
+ PIDS=()
2203
+ for D in "${DEPLOYMENTS[@]}"; do
2204
+ kubectl -n ${NAMESPACE} rollout status deployment ${D} --timeout=${TIMEOUT} &
2205
+ PIDS+=($!)
2206
+ done
2207
+
2208
+ for P in ${PIDS[@]}; do
2209
+ wait $P
2210
+
2211
+ if [ $? -ne 0 ]; then
2212
+ echo "at least one deployment failed or timed out (after $TIMEOUT)"
2213
+ exit 1
2214
+ fi
2215
+ done
2216
+
2217
+ }
2218
+
2219
+ print_usage() {
2220
+ echo "Usage:"
2221
+ echo "deploy_chart \\"
2222
+ echo " --path_configs <path to .yaml namespaces and secret config files>"
2223
+ echo " --path_chart <path to Helm Chart>"
2224
+ echo " --application <application name used by Helm>"
2225
+ echo " --environment <infrastructure environment>"
2226
+ echo " --namespace <namespace>"
2227
+ echo " --helm-extra-args <extra args to pass to helm, ex: --set my.value=42 --set your.setting=on>"
2228
+ echo ""
2229
+ }
2230
+
2231
+ check_config_file() {
2232
+ local filename=$1
2233
+ if [ ! -f ${filename} ]; then
2234
+ echo "Missing $filename configuration file"
2235
+ print_usage
2236
+ exit 1
2237
+ fi
2238
+ }
2239
+
2240
+ #!/usr/bin/env bash
2241
+
2242
+ configure_kubectl_for_ci() {
2243
+ if [ -z ${GITLAB_PAT} ]; then
2244
+ echo "Cannot configure kubectl: no GITLAB_PAT configured"
2245
+ exit 1
2246
+ fi
2247
+
2248
+ infra_env="$1"
2249
+ valid_envs="[testing][staging][production][performance][tests][recette]"
2250
+ echo "$valid_envs" | grep -q "\[$infra_env\]"
2251
+
2252
+ if [ $? -ne 0 ]; then
2253
+ echo "Cannot configure kubectl for invalid env : $infra_env"
2254
+ echo "choose one of $valid_envs"
2255
+ exit 1
2256
+ fi
2257
+
2258
+ mkdir -p ~/.kube
2259
+ curl -fsS \
2260
+ --header "PRIVATE-TOKEN: $GITLAB_PAT" \
2261
+ "https://gitlab.com/api/v4/projects/8141053/jobs/artifacts/$infra_env/raw/$infra_env.kubeconfig?job=4_kubernetes_config_output" \
2262
+ > ~/.kube/$infra_env.kubeconfig
2263
+
2264
+ curl_return_code=$?
2265
+ if [ ${curl_return_code} -ne 0 ]; then
2266
+ echo "Cannot configure kubectl for $infra_env, get configuration failed with code $curl_return_code"
2267
+ exit ${curl_return_code}
2268
+ fi
2269
+
2270
+ rm -f ~/.kube/config
2271
+ ln -s ~/.kube/$infra_env.kubeconfig ~/.kube/config
2272
+ echo "Configured kubectl for env : $infra_env"
2273
+ }
2274
+ notify_new_deployment() {
2275
+ jq --version || (apt update && apt install -y jq)
2276
+
2277
+ CHAT_URL=${1:-$DEFAULT_CHAT_URL}
2278
+
2279
+ STATUS=$(echo $CI_JOB_STATUS | tr '[:lower:]' '[:upper:]' )
2280
+ ENV_NAME=$(echo $ENVIRONMENT | tr '[:lower:]' '[:upper:]' )
2281
+
2282
+ JOB_LINK="<$CI_JOB_URL| $CI_JOB_NAME $CI_JOB_ID>"
2283
+
2284
+ DESCRIPTION="
2285
+ $STATUS : Deployment for $CI_PROJECT_NAME on $ENV_NAME
2286
+ $JOB_LINK
2287
+ $CI_COMMIT_TITLE
2288
+ "
2289
+
2290
+ JSON_MESSAGE=$(jq -n --arg text "$DESCRIPTION" '{text: $text }')
2291
+ curl -X POST $CHAT_URL \
2292
+ --header "Content-Type: application/json" \
2293
+ --data "$JSON_MESSAGE"
2294
+ }
2295
+ notify_new_version() {
2296
+
2297
+ ! test -z $CI_COMMIT_TAG || exit 0
2298
+
2299
+ jq --version || (apt update && apt install -y jq)
2300
+
2301
+ KIND=$1
2302
+ CHAT_URL=${2:-$DEFAULT_CHAT_URL}
2303
+
2304
+ STATUS=$(echo $CI_JOB_STATUS | tr '[:lower:]' '[:upper:]' )
2305
+ ENV_NAME=$(echo $ENVIRONMENT | tr '[:lower:]' '[:upper:]' )
2306
+ TITLE="$ENV_NAME *$STATUS* $KIND for version *$CI_COMMIT_TAG* of *$CI_PROJECT_NAME* "
2307
+
2308
+ RELEASE_URL="https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/releases/$CI_COMMIT_TAG"
2309
+
2310
+ NOTES=$(curl --header "PRIVATE-TOKEN: $GITLAB_TOKEN" $RELEASE_URL |
2311
+ jq .description |
2312
+ sed -e 's/^"//' -e 's/"$//' |
2313
+ sed -E 's/\[([^]]+)\]\(([^)]+)\)/<\2|\1>/g' |
2314
+ sed -E 's/\\n/\'$'\n/g')
2315
+
2316
+ JOB_LINK="<$CI_JOB_URL| $CI_JOB_NAME $CI_JOB_ID>"
2317
+
2318
+ DESCRIPTION="
2319
+ $TITLE
2320
+ $JOB_LINK
2321
+ $NOTES
2322
+ "
2323
+
2324
+ JSON_MESSAGE=$(jq -n --arg text "$DESCRIPTION" '{text: $text }')
2325
+ curl -X POST $CHAT_URL \
2326
+ --header "Content-Type: application/json" \
2327
+ --data "$JSON_MESSAGE"
2328
+ }
2329
+ #!/usr/bin/env bash
2330
+
2331
+ skip_sbt_compile_cache() {
2332
+ COMPARED_BRANCH="${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-$CI_DEFAULT_BRANCH}"
2333
+ echo "branch to compare to: $COMPARED_BRANCH"
2334
+ git fetch origin $COMPARED_BRANCH
2335
+ echo "fetched $COMPARED_BRANCH"
2336
+ [[ "$CI_COMMIT_REF_NAME" =~ ^(master|develop)$ || $(git diff origin/$COMPARED_BRANCH --exit-code -- project) ]]
2337
+ }
2338
+ #!/usr/bin/env bash
2339
+
2340
+ # in case of trouble with functions for update history during import
2341
+ # https://stackoverflow.com/questions/56729192/pg-restore-fails-when-trying-to-create-function-referencing-table-that-does-not
2342
+
2343
+ # example: clone_databases --source_env testing --destination_env recette --services "order,notification,parcel,ikea"
2344
+ clone_databases() {
2345
+ export USERNAME="database-cloner"
2346
+
2347
+ set -e
2348
+
2349
+ extract_args 3 source_env destination_env services $*
2350
+
2351
+ dump_databases "$source_env" "$services"
2352
+ import_databases "$destination_env" "$services"
2353
+ }
2354
+
2355
+ dump_databases() {
2356
+ local env="$1"
2357
+ local services=$(echo -n "$2" | tr ',' '\n')
2358
+
2359
+ database_k8s_output_dump_path="/tmp/database_k8s_output_dump"
2360
+
2361
+ configure_kubectl_for "$env"
2362
+ set +e
2363
+ database_k8s "$env" > "$database_k8s_output_dump_path"
2364
+ set -e
2365
+
2366
+ source_pg_local_port=$(extract_pg_local_port "$database_k8s_output_dump_path")
2367
+
2368
+ for service in $services
2369
+ do
2370
+ service_path="/tmp/$service"
2371
+
2372
+ set +e
2373
+ git clone "git@gitlab.com:colisweb/back/$service.git" "$service_path"
2374
+ set -e
2375
+
2376
+ if cd "$service_path"; then
2377
+ echo "dump the database for service $service.."
2378
+
2379
+ git secret reveal -f
2380
+
2381
+ PG_YAML_PATH=".${service}config.postgres"
2382
+
2383
+ SOURCE_DB_DATABASE=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.database")
2384
+ SOURCE_DB_USER=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.user")
2385
+ SOURCE_DB_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.password")
2386
+
2387
+ export PGPASSWORD="$SOURCE_DB_PASSWORD"
2388
+
2389
+ DUMP_PATH="/tmp/db_dump_${service}.sql"
2390
+ pg_dump --no-owner -h localhost -p "$source_pg_local_port" -U "$SOURCE_DB_USER" "$SOURCE_DB_DATABASE" > "$DUMP_PATH"
2391
+
2392
+ cd ..
2393
+ rm -rf "$service_path"
2394
+ else
2395
+ echo "WARN: failed to clone $service - skipping"
2396
+ fi
2397
+ done
2398
+ }
2399
+
2400
+ import_databases() {
2401
+ local env="$1"
2402
+ local services=$(echo -n "$2" | tr ',' '\n')
2403
+
2404
+ database_k8s_output_import_path="/tmp/database_k8s_output_import"
2405
+
2406
+ configure_kubectl_for "$env"
2407
+ set +e
2408
+ database_k8s "$env" > "$database_k8s_output_import_path"
2409
+ set -e
2410
+
2411
+ destination_pg_local_port=$(extract_pg_local_port "$database_k8s_output_import_path")
2412
+
2413
+ for service in $services
2414
+ do
2415
+ service_path="/tmp/$service"
2416
+
2417
+ set +e
2418
+ git clone "git@gitlab.com:colisweb/back/$service.git" "$service_path"
2419
+ set -e
2420
+
2421
+ if cd "$service_path"; then
2422
+ echo "create and import database for $service.."
2423
+
2424
+ git secret reveal -f
2425
+
2426
+ PG_YAML_PATH=".${service}config.postgres"
2427
+
2428
+ DB_PORT="5432"
2429
+ DB_HOST=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.host")
2430
+ DB_INIT_USERNAME=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.initUsername")
2431
+ DB_INIT_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.initPassword")
2432
+ DB_DATABASE=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.database")
2433
+ DB_USER=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.user")
2434
+ DB_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.password")
2435
+
2436
+ kube_init_service_database \
2437
+ --namespace ${env} \
2438
+ --service ${service} \
2439
+ --db_host ${DB_HOST} \
2440
+ --db_port ${DB_PORT} \
2441
+ --db_init_username ${DB_INIT_USERNAME} \
2442
+ --db_init_password ${DB_INIT_PASSWORD} \
2443
+ --db_database ${DB_DATABASE} \
2444
+ --db_username ${DB_USER} \
2445
+ --db_password ${DB_PASSWORD}
2446
+
2447
+ echo "WARN: A complete clean of $DB_DATABASE on $DB_HOST will be operated"
2448
+ read -rsn1 -p"Press any key to continue";echo
2449
+ flyway_clean "$DB_HOST" "$DB_PORT" "$DB_DATABASE" "$DB_USER" "$DB_PASSWORD"
2450
+
2451
+ DUMP_PATH="/tmp/db_dump_${service}.sql"
2452
+ export PGPASSWORD="$DB_PASSWORD"
2453
+ set +e
2454
+ psql "postgres://$DB_USER@127.0.0.1:$destination_pg_local_port" -p "$DB_DATABASE" -f "$DUMP_PATH"
2455
+ set -e
2456
+
2457
+ cd ..
2458
+ rm -rf "$service_path"
2459
+ else
2460
+ echo "WARN: failed to clone $service - skipping"
2461
+ fi
2462
+ done
2463
+ }
2464
+
2465
+ extract_pg_local_port() {
2466
+ cat "$1" | grep 'postgres@127.0.0.1:' | sed 's/.*postgres@127.0.0.1:\(.*[0-9]\).*/\1/g'
2467
+ }
2468
+ #!/usr/bin/env bash
2469
+
2470
+ emit_datadog_deploy_event() {
2471
+ extract_args 3 environment service version $*
2472
+ check_env_vars 1 "DD_API_KEY"
2473
+
2474
+ response=$(
2475
+ curl -X POST -H "Content-type: application/json" \
2476
+ -d '{
2477
+ "title": "deploying '"$service"' to '"$environment"'",
2478
+ "text": "deploying '"$service"' version '"$version"' to '"$environment"'",
2479
+ "priority": "normal",
2480
+ "tags": ["service:'"$service"' ", "env:'"$environment"'" ,"action:'"deployment"'"] ,
2481
+
2482
+ "alert_type": "Info"
2483
+ }' \
2484
+ "https://api.datadoghq.com/api/v1/events?api_key=$DD_API_KEY"
2485
+ )
2486
+
2487
+ #echo $response
2488
+ EventID=$(echo $response | jq ".event.id")
2489
+ url=$(echo $response | jq ".event.url")
2490
+
2491
+ if [[ $EventID -ne 0 ]]; then
2492
+ echo "event successfully created check in datadog UI : $url"
2493
+ else
2494
+ echo " failed to create event "
2495
+ exit 1
2496
+ fi
2497
+ }
2498
+
2499
+ #!/usr/bin/env bash
2500
+
2501
+ # DEPRECATED
2502
+ emit_datadog_error_events() {
2503
+ set -e
2504
+ extract_args 4 title text priority environment $*
2505
+ check_env_vars 1 "DD_API_KEY"
2506
+
2507
+ curl -X POST -H "Content-type: application/json" \
2508
+ -d '{
2509
+ "title": "'"$title"'",
2510
+ "text": "'"$text"'",
2511
+ "priority": "'"$priority"'",
2512
+ "tags": ["environment:'"$environment"'"],
2513
+ "alert_type": "Error"
2514
+ }' \
2515
+ "https://api.datadoghq.com/api/v1/events?api_key=$DD_API_KEY"
2516
+ }
2517
+
2518
+ #!/usr/bin/env bash
2519
+ terraform_init() {
2520
+ SECTION=$1
2521
+ ENV=$2
2522
+ cd $SECTION
2523
+ terraform init -input=false
2524
+ terraform workspace select $ENV || terraform workspace new $ENV
2525
+ }