@colisweb/rescript-toolkit 4.14.16 → 4.15.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -97,22 +97,19 @@ aws_ecr_token() {
97
97
  # will delete images older than 8 weeks
98
98
  delete_images() {
99
99
 
100
- aws --version | grep -q "^aws-cli/2" || (echo "You must have aws-cli v2 installed to use this script") ; return 1
101
-
102
100
  REPO=$1
103
101
  WEEKS=${2:-16}
104
102
 
105
- WEEKS_AGO=$(date -j -v-${WEEKS}w +%s)
103
+ WEEKS_AGO=$(date -v-${WEEKS}w +%F)
106
104
 
107
105
  #Get all ecr images
108
106
  IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
109
107
 
110
108
  #Filter unnecessary values and map `imagePushedAt` to EPOCH
111
- TIMED_IMAGES=$(echo $IMAGES | jq .'[]' | jq "map({imagePushedAt: (.imagePushedAt[0:19]+\"Z\" | fromdateiso8601), imageDigest: .imageDigest}) | sort_by(.imagePushedAt) | .[:-1]")
109
+ NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
112
110
 
113
111
  #Filter on EPOCH
114
- OLD_IMAGES=$(echo $TIMED_IMAGES | jq "map(select (.imagePushedAt < $WEEKS_AGO)) | .[] " | jq -r '.imageDigest')
115
-
112
+ OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
116
113
  while IFS= read -r IMAGE; do
117
114
  if [ "$IMAGE" != "" ]; then
118
115
  echo "Deleting $IMAGE from $REPO"
@@ -132,6 +129,153 @@ delete_images_all_repos() {
132
129
  done <<< "$REPOSITORIES"
133
130
  }
134
131
 
132
+ delete_old_cache() {
133
+ DATE=${1:-$(date -v-1m +%F)}
134
+ CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
135
+
136
+ echo "deleting from cache $CACHE_BUCKET all older than $DATE"
137
+
138
+ aws_ecr_login
139
+
140
+ while read -r line; do
141
+ datum=$(echo $line | cut -c1-10)
142
+ if [[ "$datum" < "$DATE" ]] ; then
143
+ # Shell Parameter Expansion: ${parameter##word}
144
+ # Allow to return the result from "word" to the end of "parameters"
145
+ # Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
146
+ TO_DELETE="$CACHE_BUCKET${line##* project/}"
147
+ echo $TO_DELETE
148
+ aws s3 rm $TO_DELETE
149
+ fi
150
+ done < <(aws s3 ls $CACHE_BUCKET --recursive)
151
+ }
152
+
153
+ #!/usr/bin/env bash
154
+
155
+ # If gitlab is down or pipeline are stuck, hotfixes need to be available
156
+ # This script will publish docker images to ECR using your current git HEAD, then deploy them to a given environment.
157
+ # Some local files (git-commit.conf and sentry.properties) will be updated, take caution.
158
+ # No trace of this will appear on Gitlab (no releases, no pipelines, no tags).
159
+ # create_hotfix_scala $ENVIRONMENT $CHART_NAME [ $MODULE_NAME $MODULE_PATH $DEPLOYMENT ]
160
+ # create_hotfix_scala testing crm main modules/3-executables/main crm
161
+ # create_hotfix_scala testing notification \
162
+ # main-http modules/3-executables/main-http notification-http \
163
+ # main-consumer modules/3-executables/main-consumer notification-consumer
164
+
165
+ create_hotfix_scala() {
166
+
167
+ ENVIRONMENT=$1
168
+ CHART_NAME=$2
169
+ shift 2
170
+
171
+ SHORT_SHA=$(git rev-parse --short HEAD)
172
+ HOTFIX_TAG="hotfix-$SHORT_SHA"
173
+
174
+ gum confirm "Preparing $HOTFIX_TAG for $CHART_NAME ?" || exit
175
+ prepare_hotfix_scala $HOTFIX_TAG
176
+
177
+ gum confirm "Building $HOTFIX_TAG for $CHART_NAME ?" || exit
178
+ while [[ $# -gt 2 ]] ; do
179
+ build_hotfix_scala $HOTFIX_TAG "$1" "$2" "$3"
180
+ shift 3
181
+ done
182
+
183
+ gum confirm "Deploying $HOTFIX_TAG for $CHART_NAME ?" || exit
184
+ deploy_hotfix $CHART_NAME $ENVIRONMENT $HOTFIX_TAG
185
+ }
186
+
187
+ # Update local git-commit.conf and sentry.properties files using git short sha
188
+ prepare_hotfix_scala() {
189
+ HOTFIX_TAG=$1
190
+
191
+ git secret reveal -f
192
+ aws_ecr_login
193
+
194
+ COMMIT_CONF_FILES=$(find . -name "git-commit.conf")
195
+ SENTRY_PROPERTIES_FILES=$(find . -name "sentry.properties")
196
+
197
+ for file in $(echo "$COMMIT_CONF_FILES\n$SENTRY_PROPERTIES_FILES"); do
198
+ sed -i '' -e 's&GIT_COMMIT&'"$HOTFIX_TAG&" $file
199
+ done
200
+
201
+ }
202
+
203
+ # Build docker images locally and publish them to AWS ECR.
204
+ build_hotfix_scala() {
205
+
206
+ HOTFIX_TAG=$1
207
+ SBT_MODULE=$2
208
+ DOCKER_PATH=$3
209
+ DEPLOYMENT=$4
210
+
211
+ DOCKER_REGISTRY_ID="949316342391"
212
+ DOCKER_REGISTRY="$DOCKER_REGISTRY_ID.dkr.ecr.eu-west-1.amazonaws.com"
213
+ DOCKER_IMAGE=$DOCKER_REGISTRY/$DEPLOYMENT
214
+ HOTFIX_IMAGE=$DOCKER_IMAGE:$HOTFIX_TAG
215
+
216
+ #Build
217
+ sbt "project $SBT_MODULE" "Docker / stage"
218
+
219
+ #Publish
220
+ docker build --platform "linux/amd64" -t $HOTFIX_IMAGE --cache-from $DOCKER_IMAGE "$DOCKER_PATH/target/docker/stage"
221
+ docker push $HOTFIX_IMAGE
222
+
223
+ echo "Created hotfix $HOTFIX_IMAGE"
224
+ }
225
+
226
+ # Deploy the project in the given environment
227
+ deploy_hotfix() {
228
+ source $colisweb_scripts/ci/helm.sh
229
+
230
+ CHART_NAME=$1
231
+ ENVIRONMENT=$2
232
+ HOTFIX_TAG=$3
233
+
234
+ CONFIG_PATH=deploy
235
+ CHART_PATH=$CONFIG_PATH/$CHART_NAME
236
+ ROOT_PATH=$(pwd)
237
+
238
+ # Unset Kubectl configuration made via the KUBECONFIG env variable
239
+ # it would override the config made by configure_kubectl_for
240
+ # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
241
+ unset KUBECONFIG
242
+
243
+ # Configure Kubectl
244
+ configure_kubectl_for $ENVIRONMENT
245
+
246
+ # Avoiding "no local-index.yaml" or "empty local-index.yaml" error
247
+ cat > $HOME/Library/Caches/helm/repository/local-index.yaml <<EOT
248
+ apiVersion: v1
249
+ entries:
250
+ cronjob:
251
+ EOT
252
+
253
+ # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
254
+ helm3 repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
255
+ helm3 repo add stable https://charts.helm.sh/stable --force-update
256
+ helm3 repo update
257
+ helm3 dependency update ${ROOT_PATH}/${CHART_PATH}
258
+
259
+ # Gather values/*.yaml files
260
+ VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
261
+ VALUES_FILES=''
262
+ [ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
263
+
264
+ # Deploy
265
+ helm3 upgrade --install \
266
+ --namespace ${ENVIRONMENT} \
267
+ ${VALUES_FILES} \
268
+ -f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
269
+ -f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}.yaml \
270
+ -f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}-secrets.yaml \
271
+ --set global.version=$HOTFIX_TAG \
272
+ ${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
273
+
274
+
275
+ verify_deployments_v3 -t 10m $ENVIRONMENT $CHART_NAME
276
+
277
+ }
278
+
135
279
  #!/usr/bin/env bash
136
280
 
137
281
  image_exists() {
@@ -391,28 +535,29 @@ database_k8s() {
391
535
  HostName 127.0.0.1
392
536
  Port 2225
393
537
  LocalForward 24441 toutatis-testing-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
394
- LocalForward 25431 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
538
+ LocalForward 25431 toutatis-testing-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
539
+ LocalForward 25531 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
395
540
  Host bastion_staging
396
541
  HostName 127.0.0.1
397
542
  Port 2226
398
543
  LocalForward 24442 toutatis-staging-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
399
- LocalForward 25432 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
544
+ LocalForward 25432 toutatis-staging-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
400
545
  Host bastion_recette
401
546
  HostName 127.0.0.1
402
547
  Port 2228
403
548
  LocalForward 24446 toutatis-recette-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
404
- LocalForward 25436 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
549
+ LocalForward 25436 toutatis-recette-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
550
+ LocalForward 25536 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
405
551
  Host bastion_production
406
552
  HostName 127.0.0.1
407
553
  Port 2227
408
554
  LocalForward 24443 toutatis-production-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
409
- LocalForward 25433 api-production-rds-read-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
410
- LocalForward 25435 archive-ca.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
555
+ LocalForward 25433 toutatis-production-mysql-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
411
556
  EOF
412
557
  if [ "$MODE" = "production_rw" ] ; then
413
558
  cat >> "$bastion_config" <<EOF
414
559
  LocalForward 24444 toutatis-production-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
415
- LocalForward 25434 api-production-rds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
560
+ LocalForward 25434 toutatis-production-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
416
561
  EOF
417
562
  fi
418
563
 
@@ -432,7 +577,7 @@ psql_on_k8() {
432
577
  CONNECTION=$3
433
578
  shift 3
434
579
 
435
- kubectl -n $NAMESPACE run ${SERVICE}-database-init \
580
+ kubectl -n $NAMESPACE run ${SERVICE}-postgres-init \
436
581
  --image jbergknoff/postgresql-client \
437
582
  --restart=Never \
438
583
  --attach --rm \
@@ -443,13 +588,14 @@ psql_on_k8() {
443
588
 
444
589
  mysql_on_k8() {
445
590
  local namespace=$1
446
- local db_host=$2
447
- local db_port=$3
448
- local db_init_username=$4
449
- local db_init_password=$5
450
- local query=$6
451
-
452
- kubectl -n ${namespace} run datadog-database-init \
591
+ local service=$2
592
+ local db_host=$3
593
+ local db_port=$4
594
+ local db_init_username=$5
595
+ local db_init_password=$6
596
+ local query=$7
597
+
598
+ kubectl -n ${namespace} run ${service}-mysql-init \
453
599
  --image widdpim/mysql-client \
454
600
  --restart=Never \
455
601
  --attach --rm \
@@ -533,11 +679,11 @@ kube_init_datadog_in_database() {
533
679
  echo " Initializing Datadog Agent Requiement for namespace $namespace"
534
680
  echo "======================="
535
681
 
536
- set -x
537
-
538
- echo "Checking if Database '$db_datadog_username' exists"
682
+ echo "Checking if User '$db_datadog_username' exists"
683
+ local service="datadog"
684
+ found_db_users=$(mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'SELECT user FROM mysql.user;')
539
685
  set +e
540
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'SELECT user FROM mysql.user;' | grep "^$db_datadog_username$"
686
+ echo $found_db_users | grep "^$db_datadog_username$"
541
687
  return_code=$?
542
688
  set -e
543
689
 
@@ -548,29 +694,29 @@ kube_init_datadog_in_database() {
548
694
 
549
695
  # All the query come from this docs : https://docs.datadoghq.com/fr/database_monitoring/setup_mysql/selfhosted/?tab=mysql56
550
696
 
551
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'CREATE USER '"$db_datadog_username"'@"%" IDENTIFIED BY '"'$db_datadog_password'"';'
697
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'CREATE USER '"$db_datadog_username"'@"%" IDENTIFIED BY '"'$db_datadog_password'"';'
552
698
  echo "USER created $db_datadog_username"
553
699
 
554
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT REPLICATION CLIENT ON *.* TO datadog@"%" WITH MAX_USER_CONNECTIONS 5;'
700
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT REPLICATION CLIENT ON *.* TO datadog@"%" WITH MAX_USER_CONNECTIONS 5;'
555
701
  echo "ALTER USER $db_datadog_username"
556
702
 
557
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT PROCESS ON *.* TO '"$db_datadog_username"'@"%";'
703
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT PROCESS ON *.* TO '"$db_datadog_username"'@"%";'
558
704
  echo "Granted PROCESS for $db_datadog_username"
559
705
 
560
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT SELECT ON performance_schema.* TO '"$db_datadog_username"'@"%";'
706
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT SELECT ON performance_schema.* TO '"$db_datadog_username"'@"%";'
561
707
  echo "Granted SELECT on performance_schema for $db_datadog_username"
562
708
 
563
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'CREATE SCHEMA IF NOT EXISTS datadog;'
709
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'CREATE SCHEMA IF NOT EXISTS datadog;'
564
710
  echo "CREATE SCHEMA datadog"
565
711
 
566
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT EXECUTE ON datadog.* to '"$db_datadog_username"'@"%";'
712
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT EXECUTE ON datadog.* to '"$db_datadog_username"'@"%";'
567
713
  echo "Granted 'GRANT EXECUTE for $db_datadog_username on datadog"
568
714
 
569
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'GRANT CREATE TEMPORARY TABLES ON datadog.* TO '"$db_datadog_username"'@"%";'
715
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT CREATE TEMPORARY TABLES ON datadog.* TO '"$db_datadog_username"'@"%";'
570
716
  echo "Granted CREATE TEMPORARY TABLES for $db_datadog_username"
571
717
 
572
718
 
573
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.explain_statement;
719
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.explain_statement;
574
720
  DELIMITER $$
575
721
  CREATE PROCEDURE datadog.explain_statement(IN query TEXT)
576
722
  SQL SECURITY DEFINER
@@ -583,7 +729,7 @@ kube_init_datadog_in_database() {
583
729
  DELIMITER ;'
584
730
  echo "CREATE PROCEDURE PROCEDURE datadog.explain_statement"
585
731
 
586
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS '"$db_datadog_username"'.explain_statement;
732
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS '"$db_datadog_username"'.explain_statement;
587
733
  DELIMITER $$
588
734
  CREATE PROCEDURE '"$db_datadog_username"'.explain_statement(IN query TEXT)
589
735
  SQL SECURITY DEFINER
@@ -597,7 +743,7 @@ kube_init_datadog_in_database() {
597
743
  GRANT EXECUTE ON PROCEDURE '"$db_datadog_username"'.explain_statement TO datadog@"%";'
598
744
  echo "CREATE PROCEDURE on SCHEMA $db_datadog_schema for $db_datadog_username"
599
745
 
600
- mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.enable_events_statements_consumers;
746
+ mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.enable_events_statements_consumers;
601
747
  DELIMITER $$
602
748
  CREATE PROCEDURE datadog.enable_events_statements_consumers()
603
749
  SQL SECURITY DEFINER
@@ -615,6 +761,82 @@ kube_init_datadog_in_database() {
615
761
  echo "======================="
616
762
  }
617
763
 
764
+ kube_init_datadog_in_postgres_database() {
765
+ extract_args 7 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password $*
766
+
767
+ local service="datadog"
768
+ local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
769
+
770
+ echo "======================="
771
+ echo " Initializing $service Agent On PostgresSQL Database Requirement for namespace $namespace"
772
+ echo "======================="
773
+
774
+ echo "Checking if User '$db_datadog_username' exists"
775
+
776
+ set +e
777
+ if psql_on_k8 $namespace $service $db_connection -qtAc 'SELECT usename FROM pg_catalog.pg_user;' | grep "^$db_datadog_username$";
778
+ then
779
+ echo "User $db_datadog_username already exists - nothing to do"
780
+ else
781
+ echo "User $db_datadog_username does not exist - initializing"
782
+
783
+ set -e
784
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE USER '"$db_datadog_username"' WITH password '"'$db_datadog_password'"';'
785
+ echo "User created $db_datadog_username"
786
+
787
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE SCHEMA datadog;'
788
+ echo "Schema datadog created"
789
+
790
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT USAGE ON SCHEMA datadog TO datadog;'
791
+ echo "Granted usage for datadog schema to datadog"
792
+
793
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT USAGE ON SCHEMA public TO datadog;'
794
+ echo "Granted usage for public schema to datadog"
795
+
796
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT pg_monitor TO datadog;'
797
+ echo "Granted pg_monitor to datadog"
798
+
799
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE EXTENSION IF NOT EXISTS pg_stat_statements schema public;'
800
+ echo "Extension pg_stat_statements created"
801
+
802
+ local datadog_function_path="/tmp/datatog-explain-statement-function.sql"
803
+ local datadog_function="CREATE OR REPLACE FUNCTION datadog.explain_statement(
804
+ l_query TEXT,
805
+ OUT explain JSON
806
+ )
807
+ RETURNS SETOF JSON AS
808
+ \\$\\$
809
+ DECLARE
810
+ curs REFCURSOR;
811
+ plan JSON;
812
+
813
+ BEGIN
814
+ OPEN curs FOR EXECUTE pg_catalog.concat('EXPLAIN (FORMAT JSON) ', l_query);
815
+ FETCH curs INTO plan;
816
+ CLOSE curs;
817
+ RETURN QUERY SELECT plan;
818
+ END;
819
+ \\$\\$
820
+ LANGUAGE 'plpgsql'
821
+ RETURNS NULL ON NULL INPUT
822
+ SECURITY DEFINER;"
823
+
824
+ kubectl -n $namespace run $service-postgres-init \
825
+ --image jbergknoff/postgresql-client \
826
+ --restart=Never \
827
+ --attach --rm \
828
+ --command \
829
+ -- \
830
+ /bin/sh -c "echo -e \"$datadog_function\" > $datadog_function_path; psql postgresql://$db_connection -qf $datadog_function_path"
831
+
832
+ echo "Function datadog.explain_statement created"
833
+ fi
834
+
835
+ echo "======================="
836
+ echo " Database $service Initialization complete for namespace $namespace"
837
+ echo "======================="
838
+ }
839
+
618
840
  kube_init_service_database() {
619
841
 
620
842
  extract_args 9 namespace service db_host db_port db_init_username db_init_password db_database db_username db_password $*
@@ -637,17 +859,18 @@ kube_init_service_database() {
637
859
  psql_on_k8 $namespace $service $db_connection -c 'CREATE DATABASE '"$db_database"';'
638
860
  echo "DB created $db_database"
639
861
 
640
- psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_datadog_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
641
- echo "USER created $db_datadog_username"
862
+ psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
863
+ echo "USER created $db_username"
642
864
 
643
- psql_on_k8 $namespace $service $db_connection -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_datadog_username"';'
644
- echo "Granted all privileges for $db_datadog_username on $db_database"
865
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_username"';'
866
+ echo "Granted all privileges for $db_username on $db_database"
645
867
  fi
646
868
 
647
869
  echo "======================="
648
- echo " Database '$db_database' Initialization complete for namespace $namespace"
870
+ echo " Database '$db_database' Initialization complete for namespace $namespace"
649
871
  echo "======================="
650
872
  }
873
+
651
874
  #!/usr/bin/env bash
652
875
 
653
876
  # Port forward on the first matching pod
@@ -1165,6 +1388,36 @@ search_business() {
1165
1388
  curl $URL
1166
1389
  }
1167
1390
 
1391
+ #!/bin/bash
1392
+
1393
+ # source tolls.sh ; tolls antoine.thomas@colisweb.com
1394
+ function tolls() {
1395
+ USER=${1:-first.last@colisweb.com}
1396
+ FROM_DATE=${2:-"2023-02-01"}
1397
+ TO_DATE=${3:-"2023-02-28"}
1398
+
1399
+ USER=$(gum input --prompt "username : " --value $USER)
1400
+ TOKEN=$(./tour_details.sc login --user $USER --password $(gum input --password --placeholder password))
1401
+ [ "$TOKEN" != "" ] && echo "connected" || return 1
1402
+
1403
+ FROM_DATE=$(gum input --prompt "Date start : " --value $FROM_DATE)
1404
+ TO_DATE=$(gum input --prompt "Date end : " --value $TO_DATE)
1405
+ FILENAME="tours-${FROM_DATE}-TO-${TO_DATE}.json"
1406
+ curl --cookie "session=$TOKEN" "https://api.production.colisweb.com/api/v6/routes-plans/external?from=${FROM_DATE}&to=${TO_DATE}" > ~/Downloads/$FILENAME
1407
+ echo "Tournées téléchargées"
1408
+
1409
+ projectIds=$(./tour_details.sc allProjects --file ~/Downloads/$FILENAME | gum choose --no-limit | cut -d "," -f 2)
1410
+ echo "projets sélectionnés : $projectIds"
1411
+ tourIds=$(./tour_details.sc allTours --file ~/Downloads/$FILENAME --projectIds "$projectIds")
1412
+ echo "tournées sélectionnées : $tourIds"
1413
+
1414
+ TARGET="${FROM_DATE}-TO-${TO_DATE}.csv"
1415
+ echo "appels à HERE, écriture dans $TARGET"
1416
+ ./tour_details.sc allToursDetails --token $TOKEN --hereApiKey $HERE_API_KEY --routeIds "$tourIds" > "$TARGET"
1417
+
1418
+ echo "terminé"
1419
+ }
1420
+
1168
1421
  #!/usr/bin/env bash
1169
1422
 
1170
1423
  # possible syntax:
@@ -1269,17 +1522,67 @@ jconsole_k8s() {
1269
1522
 
1270
1523
  #!/usr/bin/env bash
1271
1524
 
1272
- # Interactive console on an existing pod. See also run_ruby_k8s
1525
+ # Interactive console on an new pod. See also run_ruby_k8s
1273
1526
  # Ex :
1274
1527
  # railsc_k8s production
1275
1528
  # railsc_k8s production "User.where(email:'toni@colisweb.com')"
1276
1529
  railsc_k8s() {
1277
1530
  ENV=$1
1278
1531
  COMMAND=$2
1532
+ [[ $ENV = "production" || $ENV = "staging" ]] && default_tag="master-latest" || default_tag="${ENV}-latest"
1533
+ local image_tag=${5:-$default_tag}
1534
+ local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
1535
+ local POD_NAME="colisweb-api-rails-console-$image_tag-$USERNAME"
1536
+
1537
+ kubectl -n $ENV get pod $POD_NAME && kubectl -n $ENV delete pod $POD_NAME
1538
+
1279
1539
  configure_kubectl_for $ENV
1280
- POD=$(kubectl -n $ENV get pods -o=name | grep colisweb-api-web | head -1 | sed -e 's/pod\///')
1281
- KUBERAILS="kubectl -n $ENV exec -ti $POD -- /usr/src/app/bin/rails c"
1540
+ echo "starting with $IMAGE"
1541
+
1542
+ kubectl -n $ENV run $POD_NAME \
1543
+ --image $IMAGE \
1544
+ --restart=Never \
1545
+ --overrides='{
1546
+ "spec":{
1547
+ "nodeSelector":{
1548
+ "workType": "workers"
1549
+ },
1550
+ "containers":[
1551
+ {
1552
+ "name":"'$POD_NAME'",
1553
+ "image":"'$IMAGE'",
1554
+ "imagePullPolicy":"Always",
1555
+ "command":[
1556
+ "sleep",
1557
+ "infinity"
1558
+ ],
1559
+ "resources":{
1560
+ "limits":{
1561
+ "memory": "2048Mi"
1562
+ }
1563
+ },
1564
+ "envFrom": [ {
1565
+ "configMapRef": {
1566
+ "name": "colisweb-api"
1567
+ }
1568
+ }, {
1569
+ "secretRef": {
1570
+ "name": "colisweb-api"
1571
+ }
1572
+ }
1573
+ ]
1574
+ }
1575
+ ]
1576
+ }
1577
+ }
1578
+ '
1579
+
1580
+ sleep 5
1581
+ KUBERAILS="kubectl -n $ENV exec -ti $POD_NAME -- /usr/src/app/bin/rails c"
1282
1582
  [ -z "$COMMAND" ] && eval $KUBERAILS || echo $COMMAND | eval $KUBERAILS
1583
+
1584
+ print "End of $POD_NAME "
1585
+ kubectl -n $ENV delete pods $POD_NAME
1283
1586
  }
1284
1587
 
1285
1588
  # Ex :
@@ -1315,7 +1618,7 @@ run_ruby_k8s() {
1315
1618
  local name=$2
1316
1619
  local ruby_script=$3
1317
1620
  local input_data=$4
1318
- [[ $namespace = "production" ]] && default_tag="master-latest" || default_tag="${namespace}-latest"
1621
+ [[ $namespace = "production" || $namespace = "staging" ]] && default_tag="master-latest" || default_tag="${namespace}-latest"
1319
1622
  local image_tag=${5:-$default_tag}
1320
1623
 
1321
1624
  if [ ! -r "$ruby_script" ]; then
@@ -1546,11 +1849,18 @@ docker_build_push() {
1546
1849
 
1547
1850
  if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
1548
1851
  docker pull $DOCKER_IMAGE || true
1549
- docker build $DOCKER_BUILD_ARGS -t $DOCKER_IMAGE_SHA --cache-from $DOCKER_IMAGE $DOCKER_STAGE_PATH
1852
+ SOURCE_URL=${CI_PROJECT_URL:8} # without "https://" protocol, like gitlab.com/colisweb-idl/colisweb/back/packing
1853
+ docker build $DOCKER_BUILD_ARGS \
1854
+ -t $DOCKER_IMAGE_SHA \
1855
+ --label org.opencontainers.image.revision=$(git rev-parse HEAD) \
1856
+ --label org.opencontainers.image.source=$SOURCE_URL \
1857
+ --cache-from $DOCKER_IMAGE \
1858
+ $DOCKER_STAGE_PATH
1550
1859
  docker push $DOCKER_IMAGE_SHA
1551
1860
  fi
1552
1861
  }
1553
1862
 
1863
+
1554
1864
  docker_promote() {
1555
1865
  # inspired by https://dille.name/blog/2018/09/20/how-to-tag-docker-images-without-pulling-them/
1556
1866
  OLD_TAG=${1//[^0-9a-zA-Z-.]/_}
@@ -1577,6 +1887,7 @@ docker_promote() {
1577
1887
  image_exists ${DOCKER_REGISTRY_ID} ${IMAGE_TO_CHECK} ${VERSION} || return 1
1578
1888
  done
1579
1889
  }
1890
+
1580
1891
  #!/usr/bin/env bash
1581
1892
 
1582
1893
  extract_yaml_config_variable() {
@@ -1653,7 +1964,7 @@ flyway_clean() {
1653
1964
 
1654
1965
  #!/usr/bin/env bash
1655
1966
 
1656
- FLYWAY_VERSION="5.2.4"
1967
+ FLYWAY_VERSION="7.4.0"
1657
1968
 
1658
1969
 
1659
1970
  get_yaml_variable() {
@@ -1739,7 +2050,7 @@ flyway_migrate() {
1739
2050
  "containers":[
1740
2051
  {
1741
2052
  "name":"'$POD_NAME'",
1742
- "image":"boxfuse/flyway:'$flyway_version'",
2053
+ "image":"flyway/flyway:'$flyway_version'",
1743
2054
  "command":["flyway", "-url='$db_url'", "-user='$db_user'", "-password='$db_password'", "migrate"],
1744
2055
  "volumeMounts":[
1745
2056
  {
@@ -1764,6 +2075,63 @@ flyway_migrate() {
1764
2075
  kubectl -n $namespace delete configmap $CONFIGMAP_NAME
1765
2076
  }
1766
2077
 
2078
+ #!/usr/bin/env bash
2079
+ flyway_repair() {
2080
+ set -e
2081
+ check_env_vars 4 "APPLICATION" "ENVIRONMENT" "FLYWAY_VERSION" "MIGRATION_SQL_PATH"
2082
+
2083
+ PG_YAML_PATH=".${APPLICATION}config.postgres"
2084
+
2085
+ DB_PORT="5432"
2086
+ DB_HOST=$(get_yaml_variable "${PG_YAML_PATH}.host")
2087
+ DB_DATABASE=$(get_yaml_variable "${PG_YAML_PATH}.database")
2088
+ DB_USER=$(get_yaml_variable "${PG_YAML_PATH}.user")
2089
+ DB_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.password")
2090
+ DB_URL="jdbc:postgresql://${DB_HOST}:${DB_PORT}/${DB_DATABASE}"
2091
+
2092
+ flyway_sql_folder=$(pwd)/${MIGRATION_SQL_PATH}
2093
+
2094
+ configure_kubectl_for_ci "${ENVIRONMENT}"
2095
+ POD_NAME="${APPLICATION}-flyway-repair"
2096
+ CONFIGMAP_NAME="${APPLICATION}-flyway-repair-sql"
2097
+
2098
+ kubectl -n "${ENVIRONMENT}" delete configmap $CONFIGMAP_NAME --ignore-not-found
2099
+ kubectl -n "${ENVIRONMENT}" delete pod $POD_NAME --ignore-not-found
2100
+ kubectl -n "${ENVIRONMENT}" create configmap $CONFIGMAP_NAME --from-file="${flyway_sql_folder}"
2101
+
2102
+ kubectl -n "${ENVIRONMENT}" run --rm -it "${POD_NAME}" \
2103
+ --image=flyway/flyway \
2104
+ --restart=Never \
2105
+ --overrides='
2106
+ {
2107
+ "spec":{
2108
+ "containers":[
2109
+ {
2110
+ "name":"'$POD_NAME'",
2111
+ "image":"flyway/flyway:'${FLYWAY_VERSION}'",
2112
+ "command":["flyway", "-url='$DB_URL'", "-user='$DB_USER'", "-password='$DB_PASSWORD'", "repair"],
2113
+ "volumeMounts":[
2114
+ {
2115
+ "name":"sql",
2116
+ "mountPath":"/flyway/sql"
2117
+ }
2118
+ ]
2119
+ }
2120
+ ],
2121
+ "volumes":[
2122
+ {
2123
+ "name":"sql",
2124
+ "configMap":{
2125
+ "name":"'$CONFIGMAP_NAME'"
2126
+ }
2127
+ }
2128
+ ]
2129
+ }
2130
+ }
2131
+ '
2132
+ kubectl -n "${ENVIRONMENT}" delete configmap $CONFIGMAP_NAME
2133
+ }
2134
+
1767
2135
  #!/usr/bin/env bash
1768
2136
 
1769
2137
  record_git_commit() {
@@ -1789,152 +2157,6 @@ git_reveal() {
1789
2157
  }
1790
2158
  #!/usr/bin/env bash
1791
2159
 
1792
- helm_deploy() {
1793
- APPLICATION=$1
1794
- ENVIRONMENT=$2
1795
- VERSION=$3
1796
- deploy_chart \
1797
- --path_configs deploy \
1798
- --path_chart deploy/$APPLICATION \
1799
- --application $APPLICATION \
1800
- --environment $ENVIRONMENT \
1801
- --namespace $ENVIRONMENT \
1802
- --helm_extra_args --set global.version=$VERSION
1803
- }
1804
-
1805
- deploy_chart() {
1806
- set -e
1807
- set -x
1808
-
1809
- # Rigid parsing, but all args are mandatory (expect last) and flexible order is unnecessary
1810
- check_args "--path_configs" $1; shift
1811
- path_configs=$1; shift
1812
-
1813
- check_args "--path_chart" $1; shift
1814
- path_chart=$1; shift
1815
-
1816
- check_args "--application" $1; shift
1817
- application=$1; shift
1818
-
1819
- check_args "--environment" $1; shift
1820
- environment=$1; shift
1821
-
1822
- check_args "--namespace" $1; shift
1823
- namespace=$1; shift
1824
-
1825
- if [ $# -ne 0 ]; then
1826
- check_args "--helm_extra_args" $1; shift
1827
- helm_extra_args=$*
1828
- fi
1829
-
1830
- echo "================================"
1831
- echo " Deploying $application"
1832
- echo " - Environment: $environment"
1833
- echo " - Namespace: $namespace"
1834
- echo "================================"
1835
-
1836
- root_path=$(pwd)
1837
-
1838
- # Check the configs exists
1839
-
1840
- check_config_file ${root_path}/${path_configs}/common.yaml
1841
- check_config_file ${root_path}/${path_configs}/${namespace}.yaml
1842
- check_config_file ${root_path}/${path_configs}/${namespace}-secrets.yaml
1843
-
1844
- # Check the chart exists
1845
- if [ ! -d ${root_path}/${path_chart} ] || [ ! -f ${root_path}/${path_chart}/Chart.yaml ]; then
1846
- echo "Bad Chart $root_path/$path_chart : does not exists or missing Chart.yaml"
1847
- print_usage
1848
- exit 1
1849
- fi
1850
-
1851
- # Unset Kubectl configuration made via the KUBECONFIG env variable
1852
- # it would override the config made by configure_kubectl_for
1853
- # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
1854
- unset KUBECONFIG
1855
-
1856
- # Configure Kubectl
1857
- configure_kubectl_for_ci ${environment}
1858
-
1859
- # Configure Helm
1860
- helm version --tiller-namespace ${namespace} || true
1861
- # Helm stable repo have changed and must be updated manually, in versions < v2.17.0
1862
- helm init --tiller-namespace ${namespace} --client-only --stable-repo-url https://charts.helm.sh/stable
1863
- helm repo add colisweb s3://colisweb-helm-charts/colisweb
1864
- helm repo update --strict
1865
- helm dependency update --tiller-namespace ${namespace} ${root_path}/${path_chart}
1866
-
1867
- # Gather values/*.yaml files
1868
- values_path="${root_path}/${path_chart}/values"
1869
- values_files=''
1870
- [ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
1871
-
1872
- # Deploy
1873
- helm upgrade -i \
1874
- --namespace ${namespace} \
1875
- --tiller-namespace ${namespace} \
1876
- ${values_files} \
1877
- -f ${root_path}/${path_configs}/common.yaml \
1878
- -f ${root_path}/${path_configs}/${namespace}.yaml \
1879
- -f ${root_path}/${path_configs}/${namespace}-secrets.yaml \
1880
- ${helm_extra_args} \
1881
- ${application} ${root_path}/${path_chart}
1882
-
1883
- #send event to dd
1884
- PUBLISHED_VERSION="$CI_COMMIT_REF_NAME-$CI_COMMIT_SHA"
1885
- emit_datadog_deploy_event --environment $environment --service $application --version $PUBLISHED_VERSION
1886
-
1887
- echo "================================"
1888
- echo " Deployed $application"
1889
- echo " - Environment: $environment"
1890
- echo " - Namespace: $namespace"
1891
- echo "================================"
1892
-
1893
- set +x
1894
- }
1895
-
1896
- verify_deployments() {
1897
- set -e
1898
-
1899
- # usage :
1900
- # verify_deployments staging price
1901
- # verify_deployments -t 15m testing price
1902
-
1903
- if [ "$1" == "-t" ]; then
1904
- TIMEOUT=$2
1905
- shift
1906
- shift
1907
- else
1908
- TIMEOUT=5m
1909
- fi
1910
-
1911
- NAMESPACE=$1
1912
- RELEASE=$2
1913
-
1914
- # Get all Deployments names from the deployed chart
1915
- DEPLOYMENTS=(
1916
- $(helm get manifest --tiller-namespace $NAMESPACE $RELEASE | yq -rs '.[] | select(.kind=="Deployment") | .metadata.name')
1917
- )
1918
-
1919
- echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
1920
-
1921
- PIDS=()
1922
- for D in "${DEPLOYMENTS[@]}"; do
1923
- kubectl -n ${NAMESPACE} rollout status deployment ${D} --timeout=${TIMEOUT} &
1924
- PIDS+=($!)
1925
- done
1926
-
1927
- for P in ${PIDS[@]}; do
1928
- wait $P
1929
-
1930
- if [ $? -ne 0 ]; then
1931
- echo "at least one deployment failed or timed out (after $TIMEOUT)"
1932
- exit 1
1933
- fi
1934
- done
1935
-
1936
- }
1937
-
1938
2160
  helm_deploy_v3() {
1939
2161
  APPLICATION=$1
1940
2162
  ENVIRONMENT=$2
@@ -2040,7 +2262,7 @@ verify_deployments_v3() {
2040
2262
  # verify_deployments staging price
2041
2263
  # verify_deployments -t 15m testing price
2042
2264
 
2043
- if [ "$1" == "-t" ]; then
2265
+ if [ "$1" = "-t" ] ; then
2044
2266
  TIMEOUT=$2
2045
2267
  shift
2046
2268
  shift