@colisweb/rescript-toolkit 4.14.16 → 4.15.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -97,22 +97,19 @@ aws_ecr_token() {
97
97
  # will delete images older than 8 weeks
98
98
  delete_images() {
99
99
 
100
- aws --version | grep -q "^aws-cli/2" || (echo "You must have aws-cli v2 installed to use this script") ; return 1
101
-
102
100
  REPO=$1
103
101
  WEEKS=${2:-16}
104
102
 
105
- WEEKS_AGO=$(date -j -v-${WEEKS}w +%s)
103
+ WEEKS_AGO=$(date -v-${WEEKS}w +%F)
106
104
 
107
105
  #Get all ecr images
108
106
  IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
109
107
 
110
108
  #Filter unnecessary values and map `imagePushedAt` to EPOCH
111
- TIMED_IMAGES=$(echo $IMAGES | jq .'[]' | jq "map({imagePushedAt: (.imagePushedAt[0:19]+\"Z\" | fromdateiso8601), imageDigest: .imageDigest}) | sort_by(.imagePushedAt) | .[:-1]")
109
+ NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
112
110
 
113
111
  #Filter on EPOCH
114
- OLD_IMAGES=$(echo $TIMED_IMAGES | jq "map(select (.imagePushedAt < $WEEKS_AGO)) | .[] " | jq -r '.imageDigest')
115
-
112
+ OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
116
113
  while IFS= read -r IMAGE; do
117
114
  if [ "$IMAGE" != "" ]; then
118
115
  echo "Deleting $IMAGE from $REPO"
@@ -132,6 +129,153 @@ delete_images_all_repos() {
132
129
  done <<< "$REPOSITORIES"
133
130
  }
134
131
 
132
+ delete_old_cache() {
133
+ DATE=${1:-$(date -v-1m +%F)}
134
+ CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
135
+
136
+ echo "deleting from cache $CACHE_BUCKET all older than $DATE"
137
+
138
+ aws_ecr_login
139
+
140
+ while read -r line; do
141
+ datum=$(echo $line | cut -c1-10)
142
+ if [[ "$datum" < "$DATE" ]] ; then
143
+ # Shell Parameter Expansion: ${parameter##word}
144
+ # Allow to return the result from "word" to the end of "parameters"
145
+ # Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
146
+ TO_DELETE="$CACHE_BUCKET${line##* project/}"
147
+ echo $TO_DELETE
148
+ aws s3 rm $TO_DELETE
149
+ fi
150
+ done < <(aws s3 ls $CACHE_BUCKET --recursive)
151
+ }
152
+
153
+ #!/usr/bin/env bash
154
+
155
+ # If gitlab is down or pipeline are stuck, hotfixes need to be available
156
+ # This script will publish docker images to ECR using your current git HEAD, then deploy them to a given environment.
157
+ # Some local files (git-commit.conf and sentry.properties) will be updated, take caution.
158
+ # No trace of this will appear on Gitlab (no releases, no pipelines, no tags).
159
+ # create_hotfix_scala $ENVIRONMENT $CHART_NAME [ $MODULE_NAME $MODULE_PATH $DEPLOYMENT ]
160
+ # create_hotfix_scala testing crm main modules/3-executables/main crm
161
+ # create_hotfix_scala testing notification \
162
+ # main-http modules/3-executables/main-http notification-http \
163
+ # main-consumer modules/3-executables/main-consumer notification-consumer
164
+
165
+ create_hotfix_scala() {
166
+
167
+ ENVIRONMENT=$1
168
+ CHART_NAME=$2
169
+ shift 2
170
+
171
+ SHORT_SHA=$(git rev-parse --short HEAD)
172
+ HOTFIX_TAG="hotfix-$SHORT_SHA"
173
+
174
+ gum confirm "Preparing $HOTFIX_TAG for $CHART_NAME ?" || exit
175
+ prepare_hotfix_scala $HOTFIX_TAG
176
+
177
+ gum confirm "Building $HOTFIX_TAG for $CHART_NAME ?" || exit
178
+ while [[ $# -gt 2 ]] ; do
179
+ build_hotfix_scala $HOTFIX_TAG "$1" "$2" "$3"
180
+ shift 3
181
+ done
182
+
183
+ gum confirm "Deploying $HOTFIX_TAG for $CHART_NAME ?" || exit
184
+ deploy_hotfix $CHART_NAME $ENVIRONMENT $HOTFIX_TAG
185
+ }
186
+
187
+ # Update local git-commit.conf and sentry.properties files using git short sha
188
+ prepare_hotfix_scala() {
189
+ HOTFIX_TAG=$1
190
+
191
+ git secret reveal -f
192
+ aws_ecr_login
193
+
194
+ COMMIT_CONF_FILES=$(find . -name "git-commit.conf")
195
+ SENTRY_PROPERTIES_FILES=$(find . -name "sentry.properties")
196
+
197
+ for file in $(echo "$COMMIT_CONF_FILES\n$SENTRY_PROPERTIES_FILES"); do
198
+ sed -i '' -e 's&GIT_COMMIT&'"$HOTFIX_TAG&" $file
199
+ done
200
+
201
+ }
202
+
203
+ # Build docker images locally and publish them to AWS ECR.
204
+ build_hotfix_scala() {
205
+
206
+ HOTFIX_TAG=$1
207
+ SBT_MODULE=$2
208
+ DOCKER_PATH=$3
209
+ DEPLOYMENT=$4
210
+
211
+ DOCKER_REGISTRY_ID="949316342391"
212
+ DOCKER_REGISTRY="$DOCKER_REGISTRY_ID.dkr.ecr.eu-west-1.amazonaws.com"
213
+ DOCKER_IMAGE=$DOCKER_REGISTRY/$DEPLOYMENT
214
+ HOTFIX_IMAGE=$DOCKER_IMAGE:$HOTFIX_TAG
215
+
216
+ #Build
217
+ sbt "project $SBT_MODULE" "Docker / stage"
218
+
219
+ #Publish
220
+ docker build --platform "linux/amd64" -t $HOTFIX_IMAGE --cache-from $DOCKER_IMAGE "$DOCKER_PATH/target/docker/stage"
221
+ docker push $HOTFIX_IMAGE
222
+
223
+ echo "Created hotfix $HOTFIX_IMAGE"
224
+ }
225
+
226
+ # Deploy the project in the given environment
227
+ deploy_hotfix() {
228
+ source $colisweb_scripts/ci/helm.sh
229
+
230
+ CHART_NAME=$1
231
+ ENVIRONMENT=$2
232
+ HOTFIX_TAG=$3
233
+
234
+ CONFIG_PATH=deploy
235
+ CHART_PATH=$CONFIG_PATH/$CHART_NAME
236
+ ROOT_PATH=$(pwd)
237
+
238
+ # Unset Kubectl configuration made via the KUBECONFIG env variable
239
+ # it would override the config made by configure_kubectl_for
240
+ # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
241
+ unset KUBECONFIG
242
+
243
+ # Configure Kubectl
244
+ configure_kubectl_for $ENVIRONMENT
245
+
246
+ # Avoiding "no local-index.yaml" or "empty local-index.yaml" error
247
+ cat > $HOME/Library/Caches/helm/repository/local-index.yaml <<EOT
248
+ apiVersion: v1
249
+ entries:
250
+ cronjob:
251
+ EOT
252
+
253
+ # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
254
+ helm3 repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
255
+ helm3 repo add stable https://charts.helm.sh/stable --force-update
256
+ helm3 repo update
257
+ helm3 dependency update ${ROOT_PATH}/${CHART_PATH}
258
+
259
+ # Gather values/*.yaml files
260
+ VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
261
+ VALUES_FILES=''
262
+ [ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
263
+
264
+ # Deploy
265
+ helm3 upgrade --install \
266
+ --namespace ${ENVIRONMENT} \
267
+ ${VALUES_FILES} \
268
+ -f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
269
+ -f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}.yaml \
270
+ -f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}-secrets.yaml \
271
+ --set global.version=$HOTFIX_TAG \
272
+ ${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
273
+
274
+
275
+ verify_deployments_v3 -t 10m $ENVIRONMENT $CHART_NAME
276
+
277
+ }
278
+
135
279
  #!/usr/bin/env bash
136
280
 
137
281
  image_exists() {
@@ -391,28 +535,29 @@ database_k8s() {
391
535
  HostName 127.0.0.1
392
536
  Port 2225
393
537
  LocalForward 24441 toutatis-testing-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
394
- LocalForward 25431 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
538
+ LocalForward 25431 toutatis-testing-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
539
+ LocalForward 25531 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
395
540
  Host bastion_staging
396
541
  HostName 127.0.0.1
397
542
  Port 2226
398
543
  LocalForward 24442 toutatis-staging-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
399
- LocalForward 25432 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
544
+ LocalForward 25432 toutatis-staging-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
400
545
  Host bastion_recette
401
546
  HostName 127.0.0.1
402
547
  Port 2228
403
548
  LocalForward 24446 toutatis-recette-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
404
- LocalForward 25436 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
549
+ LocalForward 25436 toutatis-recette-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
550
+ LocalForward 25536 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
405
551
  Host bastion_production
406
552
  HostName 127.0.0.1
407
553
  Port 2227
408
554
  LocalForward 24443 toutatis-production-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
409
- LocalForward 25433 api-production-rds-read-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
410
- LocalForward 25435 archive-ca.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
555
+ LocalForward 25433 toutatis-production-mysql-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
411
556
  EOF
412
557
  if [ "$MODE" = "production_rw" ] ; then
413
558
  cat >> "$bastion_config" <<EOF
414
559
  LocalForward 24444 toutatis-production-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
415
- LocalForward 25434 api-production-rds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
560
+ LocalForward 25434 toutatis-production-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
416
561
  EOF
417
562
  fi
418
563
 
@@ -533,9 +678,7 @@ kube_init_datadog_in_database() {
533
678
  echo " Initializing Datadog Agent Requiement for namespace $namespace"
534
679
  echo "======================="
535
680
 
536
- set -x
537
-
538
- echo "Checking if Database '$db_datadog_username' exists"
681
+ echo "Checking if User '$db_datadog_username' exists"
539
682
  set +e
540
683
  mysql_on_k8 $namespace $db_host $db_port $db_init_username $db_init_password 'SELECT user FROM mysql.user;' | grep "^$db_datadog_username$"
541
684
  return_code=$?
@@ -615,6 +758,82 @@ kube_init_datadog_in_database() {
615
758
  echo "======================="
616
759
  }
617
760
 
761
+ kube_init_datadog_in_postgres_database() {
762
+ extract_args 7 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password $*
763
+
764
+ local service="datadog"
765
+ local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
766
+
767
+ echo "======================="
768
+ echo " Initializing $service Agent On PostgresSQL Database Requirement for namespace $namespace"
769
+ echo "======================="
770
+
771
+ echo "Checking if User '$db_datadog_username' exists"
772
+
773
+ set +e
774
+ if psql_on_k8 $namespace $service $db_connection -qtAc 'SELECT usename FROM pg_catalog.pg_user;' | grep "^$db_datadog_username$";
775
+ then
776
+ echo "User $db_datadog_username already exists - nothing to do"
777
+ else
778
+ echo "User $db_datadog_username does not exist - initializing"
779
+
780
+ set -e
781
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE USER '"$db_datadog_username"' WITH password '"'$db_datadog_password'"';'
782
+ echo "User created $db_datadog_username"
783
+
784
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE SCHEMA datadog;'
785
+ echo "Schema datadog created"
786
+
787
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT USAGE ON SCHEMA datadog TO datadog;'
788
+ echo "Granted usage for datadog schema to datadog"
789
+
790
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT USAGE ON SCHEMA public TO datadog;'
791
+ echo "Granted usage for public schema to datadog"
792
+
793
+ psql_on_k8 $namespace $service $db_connection -qc 'GRANT pg_monitor TO datadog;'
794
+ echo "Granted pg_monitor to datadog"
795
+
796
+ psql_on_k8 $namespace $service $db_connection -qc 'CREATE EXTENSION IF NOT EXISTS pg_stat_statements schema public;'
797
+ echo "Extension pg_stat_statements created"
798
+
799
+ local datadog_function_path="/tmp/datatog-explain-statement-function.sql"
800
+ local datadog_function="CREATE OR REPLACE FUNCTION datadog.explain_statement(
801
+ l_query TEXT,
802
+ OUT explain JSON
803
+ )
804
+ RETURNS SETOF JSON AS
805
+ \\$\\$
806
+ DECLARE
807
+ curs REFCURSOR;
808
+ plan JSON;
809
+
810
+ BEGIN
811
+ OPEN curs FOR EXECUTE pg_catalog.concat('EXPLAIN (FORMAT JSON) ', l_query);
812
+ FETCH curs INTO plan;
813
+ CLOSE curs;
814
+ RETURN QUERY SELECT plan;
815
+ END;
816
+ \\$\\$
817
+ LANGUAGE 'plpgsql'
818
+ RETURNS NULL ON NULL INPUT
819
+ SECURITY DEFINER;"
820
+
821
+ kubectl -n $namespace run $service-database-init \
822
+ --image jbergknoff/postgresql-client \
823
+ --restart=Never \
824
+ --attach --rm \
825
+ --command \
826
+ -- \
827
+ /bin/sh -c "echo -e \"$datadog_function\" > $datadog_function_path; psql postgresql://$db_connection -qf $datadog_function_path"
828
+
829
+ echo "Function datadog.explain_statement created"
830
+ fi
831
+
832
+ echo "======================="
833
+ echo " Database $service Initialization complete for namespace $namespace"
834
+ echo "======================="
835
+ }
836
+
618
837
  kube_init_service_database() {
619
838
 
620
839
  extract_args 9 namespace service db_host db_port db_init_username db_init_password db_database db_username db_password $*
@@ -637,17 +856,18 @@ kube_init_service_database() {
637
856
  psql_on_k8 $namespace $service $db_connection -c 'CREATE DATABASE '"$db_database"';'
638
857
  echo "DB created $db_database"
639
858
 
640
- psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_datadog_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
641
- echo "USER created $db_datadog_username"
859
+ psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
860
+ echo "USER created $db_username"
642
861
 
643
- psql_on_k8 $namespace $service $db_connection -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_datadog_username"';'
644
- echo "Granted all privileges for $db_datadog_username on $db_database"
862
+ psql_on_k8 $namespace $service $db_connection -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_username"';'
863
+ echo "Granted all privileges for $db_username on $db_database"
645
864
  fi
646
865
 
647
866
  echo "======================="
648
- echo " Database '$db_database' Initialization complete for namespace $namespace"
867
+ echo " Database '$db_database' Initialization complete for namespace $namespace"
649
868
  echo "======================="
650
869
  }
870
+
651
871
  #!/usr/bin/env bash
652
872
 
653
873
  # Port forward on the first matching pod
@@ -1165,6 +1385,36 @@ search_business() {
1165
1385
  curl $URL
1166
1386
  }
1167
1387
 
1388
+ #!/bin/bash
1389
+
1390
+ # source tolls.sh ; tolls antoine.thomas@colisweb.com
1391
+ function tolls() {
1392
+ USER=${1:-first.last@colisweb.com}
1393
+ FROM_DATE=${2:-"2023-02-01"}
1394
+ TO_DATE=${3:-"2023-02-28"}
1395
+
1396
+ USER=$(gum input --prompt "username : " --value $USER)
1397
+ TOKEN=$(./tour_details.sc login --user $USER --password $(gum input --password --placeholder password))
1398
+ [ "$TOKEN" != "" ] && echo "connected" || return 1
1399
+
1400
+ FROM_DATE=$(gum input --prompt "Date start : " --value $FROM_DATE)
1401
+ TO_DATE=$(gum input --prompt "Date end : " --value $TO_DATE)
1402
+ FILENAME="tours-${FROM_DATE}-TO-${TO_DATE}.json"
1403
+ curl --cookie "session=$TOKEN" "https://api.production.colisweb.com/api/v6/routes-plans/external?from=${FROM_DATE}&to=${TO_DATE}" > ~/Downloads/$FILENAME
1404
+ echo "Tournées téléchargées"
1405
+
1406
+ projectIds=$(./tour_details.sc allProjects --file ~/Downloads/$FILENAME | gum choose --no-limit | cut -d "," -f 2)
1407
+ echo "projets sélectionnés : $projectIds"
1408
+ tourIds=$(./tour_details.sc allTours --file ~/Downloads/$FILENAME --projectIds "$projectIds")
1409
+ echo "tournées sélectionnées : $tourIds"
1410
+
1411
+ TARGET="${FROM_DATE}-TO-${TO_DATE}.csv"
1412
+ echo "appels à HERE, écriture dans $TARGET"
1413
+ ./tour_details.sc allToursDetails --token $TOKEN --hereApiKey $HERE_API_KEY --routeIds "$tourIds" > "$TARGET"
1414
+
1415
+ echo "terminé"
1416
+ }
1417
+
1168
1418
  #!/usr/bin/env bash
1169
1419
 
1170
1420
  # possible syntax:
@@ -1269,17 +1519,67 @@ jconsole_k8s() {
1269
1519
 
1270
1520
  #!/usr/bin/env bash
1271
1521
 
1272
- # Interactive console on an existing pod. See also run_ruby_k8s
1522
+ # Interactive console on an new pod. See also run_ruby_k8s
1273
1523
  # Ex :
1274
1524
  # railsc_k8s production
1275
1525
  # railsc_k8s production "User.where(email:'toni@colisweb.com')"
1276
1526
  railsc_k8s() {
1277
1527
  ENV=$1
1278
1528
  COMMAND=$2
1529
+ [[ $ENV = "production" || $ENV = "staging" ]] && default_tag="master-latest" || default_tag="${ENV}-latest"
1530
+ local image_tag=${5:-$default_tag}
1531
+ local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
1532
+ local POD_NAME="colisweb-api-rails-console-$image_tag-$USERNAME"
1533
+
1534
+ kubectl -n $ENV get pod $POD_NAME && kubectl -n $ENV delete pod $POD_NAME
1535
+
1279
1536
  configure_kubectl_for $ENV
1280
- POD=$(kubectl -n $ENV get pods -o=name | grep colisweb-api-web | head -1 | sed -e 's/pod\///')
1281
- KUBERAILS="kubectl -n $ENV exec -ti $POD -- /usr/src/app/bin/rails c"
1537
+ echo "starting with $IMAGE"
1538
+
1539
+ kubectl -n $ENV run $POD_NAME \
1540
+ --image $IMAGE \
1541
+ --restart=Never \
1542
+ --overrides='{
1543
+ "spec":{
1544
+ "nodeSelector":{
1545
+ "workType": "workers"
1546
+ },
1547
+ "containers":[
1548
+ {
1549
+ "name":"'$POD_NAME'",
1550
+ "image":"'$IMAGE'",
1551
+ "imagePullPolicy":"Always",
1552
+ "command":[
1553
+ "sleep",
1554
+ "infinity"
1555
+ ],
1556
+ "resources":{
1557
+ "limits":{
1558
+ "memory": "2048Mi"
1559
+ }
1560
+ },
1561
+ "envFrom": [ {
1562
+ "configMapRef": {
1563
+ "name": "colisweb-api"
1564
+ }
1565
+ }, {
1566
+ "secretRef": {
1567
+ "name": "colisweb-api"
1568
+ }
1569
+ }
1570
+ ]
1571
+ }
1572
+ ]
1573
+ }
1574
+ }
1575
+ '
1576
+
1577
+ sleep 5
1578
+ KUBERAILS="kubectl -n $ENV exec -ti $POD_NAME -- /usr/src/app/bin/rails c"
1282
1579
  [ -z "$COMMAND" ] && eval $KUBERAILS || echo $COMMAND | eval $KUBERAILS
1580
+
1581
+ print "End of $POD_NAME "
1582
+ kubectl -n $ENV delete pods $POD_NAME
1283
1583
  }
1284
1584
 
1285
1585
  # Ex :
@@ -1315,7 +1615,7 @@ run_ruby_k8s() {
1315
1615
  local name=$2
1316
1616
  local ruby_script=$3
1317
1617
  local input_data=$4
1318
- [[ $namespace = "production" ]] && default_tag="master-latest" || default_tag="${namespace}-latest"
1618
+ [[ $namespace = "production" || $namespace = "staging" ]] && default_tag="master-latest" || default_tag="${namespace}-latest"
1319
1619
  local image_tag=${5:-$default_tag}
1320
1620
 
1321
1621
  if [ ! -r "$ruby_script" ]; then
@@ -1546,11 +1846,18 @@ docker_build_push() {
1546
1846
 
1547
1847
  if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
1548
1848
  docker pull $DOCKER_IMAGE || true
1549
- docker build $DOCKER_BUILD_ARGS -t $DOCKER_IMAGE_SHA --cache-from $DOCKER_IMAGE $DOCKER_STAGE_PATH
1849
+ SOURCE_URL=${CI_PROJECT_URL:8} # without "https://" protocol, like gitlab.com/colisweb-idl/colisweb/back/packing
1850
+ docker build $DOCKER_BUILD_ARGS \
1851
+ -t $DOCKER_IMAGE_SHA \
1852
+ --label org.opencontainers.image.revision=$(git rev-parse HEAD) \
1853
+ --label org.opencontainers.image.source=$SOURCE_URL \
1854
+ --cache-from $DOCKER_IMAGE \
1855
+ $DOCKER_STAGE_PATH
1550
1856
  docker push $DOCKER_IMAGE_SHA
1551
1857
  fi
1552
1858
  }
1553
1859
 
1860
+
1554
1861
  docker_promote() {
1555
1862
  # inspired by https://dille.name/blog/2018/09/20/how-to-tag-docker-images-without-pulling-them/
1556
1863
  OLD_TAG=${1//[^0-9a-zA-Z-.]/_}
@@ -1577,6 +1884,7 @@ docker_promote() {
1577
1884
  image_exists ${DOCKER_REGISTRY_ID} ${IMAGE_TO_CHECK} ${VERSION} || return 1
1578
1885
  done
1579
1886
  }
1887
+
1580
1888
  #!/usr/bin/env bash
1581
1889
 
1582
1890
  extract_yaml_config_variable() {
@@ -1653,7 +1961,7 @@ flyway_clean() {
1653
1961
 
1654
1962
  #!/usr/bin/env bash
1655
1963
 
1656
- FLYWAY_VERSION="5.2.4"
1964
+ FLYWAY_VERSION="7.4.0"
1657
1965
 
1658
1966
 
1659
1967
  get_yaml_variable() {
@@ -1739,7 +2047,7 @@ flyway_migrate() {
1739
2047
  "containers":[
1740
2048
  {
1741
2049
  "name":"'$POD_NAME'",
1742
- "image":"boxfuse/flyway:'$flyway_version'",
2050
+ "image":"flyway/flyway:'$flyway_version'",
1743
2051
  "command":["flyway", "-url='$db_url'", "-user='$db_user'", "-password='$db_password'", "migrate"],
1744
2052
  "volumeMounts":[
1745
2053
  {
@@ -1764,6 +2072,63 @@ flyway_migrate() {
1764
2072
  kubectl -n $namespace delete configmap $CONFIGMAP_NAME
1765
2073
  }
1766
2074
 
2075
+ #!/usr/bin/env bash
2076
+ flyway_repair() {
2077
+ set -e
2078
+ check_env_vars 4 "APPLICATION" "ENVIRONMENT" "FLYWAY_VERSION" "MIGRATION_SQL_PATH"
2079
+
2080
+ PG_YAML_PATH=".${APPLICATION}config.postgres"
2081
+
2082
+ DB_PORT="5432"
2083
+ DB_HOST=$(get_yaml_variable "${PG_YAML_PATH}.host")
2084
+ DB_DATABASE=$(get_yaml_variable "${PG_YAML_PATH}.database")
2085
+ DB_USER=$(get_yaml_variable "${PG_YAML_PATH}.user")
2086
+ DB_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.password")
2087
+ DB_URL="jdbc:postgresql://${DB_HOST}:${DB_PORT}/${DB_DATABASE}"
2088
+
2089
+ flyway_sql_folder=$(pwd)/${MIGRATION_SQL_PATH}
2090
+
2091
+ configure_kubectl_for_ci "${ENVIRONMENT}"
2092
+ POD_NAME="${APPLICATION}-flyway-repair"
2093
+ CONFIGMAP_NAME="${APPLICATION}-flyway-repair-sql"
2094
+
2095
+ kubectl -n "${ENVIRONMENT}" delete configmap $CONFIGMAP_NAME --ignore-not-found
2096
+ kubectl -n "${ENVIRONMENT}" delete pod $POD_NAME --ignore-not-found
2097
+ kubectl -n "${ENVIRONMENT}" create configmap $CONFIGMAP_NAME --from-file="${flyway_sql_folder}"
2098
+
2099
+ kubectl -n "${ENVIRONMENT}" run --rm -it "${POD_NAME}" \
2100
+ --image=flyway/flyway \
2101
+ --restart=Never \
2102
+ --overrides='
2103
+ {
2104
+ "spec":{
2105
+ "containers":[
2106
+ {
2107
+ "name":"'$POD_NAME'",
2108
+ "image":"flyway/flyway:'${FLYWAY_VERSION}'",
2109
+ "command":["flyway", "-url='$DB_URL'", "-user='$DB_USER'", "-password='$DB_PASSWORD'", "repair"],
2110
+ "volumeMounts":[
2111
+ {
2112
+ "name":"sql",
2113
+ "mountPath":"/flyway/sql"
2114
+ }
2115
+ ]
2116
+ }
2117
+ ],
2118
+ "volumes":[
2119
+ {
2120
+ "name":"sql",
2121
+ "configMap":{
2122
+ "name":"'$CONFIGMAP_NAME'"
2123
+ }
2124
+ }
2125
+ ]
2126
+ }
2127
+ }
2128
+ '
2129
+ kubectl -n "${ENVIRONMENT}" delete configmap $CONFIGMAP_NAME
2130
+ }
2131
+
1767
2132
  #!/usr/bin/env bash
1768
2133
 
1769
2134
  record_git_commit() {
@@ -1789,152 +2154,6 @@ git_reveal() {
1789
2154
  }
1790
2155
  #!/usr/bin/env bash
1791
2156
 
1792
- helm_deploy() {
1793
- APPLICATION=$1
1794
- ENVIRONMENT=$2
1795
- VERSION=$3
1796
- deploy_chart \
1797
- --path_configs deploy \
1798
- --path_chart deploy/$APPLICATION \
1799
- --application $APPLICATION \
1800
- --environment $ENVIRONMENT \
1801
- --namespace $ENVIRONMENT \
1802
- --helm_extra_args --set global.version=$VERSION
1803
- }
1804
-
1805
- deploy_chart() {
1806
- set -e
1807
- set -x
1808
-
1809
- # Rigid parsing, but all args are mandatory (expect last) and flexible order is unnecessary
1810
- check_args "--path_configs" $1; shift
1811
- path_configs=$1; shift
1812
-
1813
- check_args "--path_chart" $1; shift
1814
- path_chart=$1; shift
1815
-
1816
- check_args "--application" $1; shift
1817
- application=$1; shift
1818
-
1819
- check_args "--environment" $1; shift
1820
- environment=$1; shift
1821
-
1822
- check_args "--namespace" $1; shift
1823
- namespace=$1; shift
1824
-
1825
- if [ $# -ne 0 ]; then
1826
- check_args "--helm_extra_args" $1; shift
1827
- helm_extra_args=$*
1828
- fi
1829
-
1830
- echo "================================"
1831
- echo " Deploying $application"
1832
- echo " - Environment: $environment"
1833
- echo " - Namespace: $namespace"
1834
- echo "================================"
1835
-
1836
- root_path=$(pwd)
1837
-
1838
- # Check the configs exists
1839
-
1840
- check_config_file ${root_path}/${path_configs}/common.yaml
1841
- check_config_file ${root_path}/${path_configs}/${namespace}.yaml
1842
- check_config_file ${root_path}/${path_configs}/${namespace}-secrets.yaml
1843
-
1844
- # Check the chart exists
1845
- if [ ! -d ${root_path}/${path_chart} ] || [ ! -f ${root_path}/${path_chart}/Chart.yaml ]; then
1846
- echo "Bad Chart $root_path/$path_chart : does not exists or missing Chart.yaml"
1847
- print_usage
1848
- exit 1
1849
- fi
1850
-
1851
- # Unset Kubectl configuration made via the KUBECONFIG env variable
1852
- # it would override the config made by configure_kubectl_for
1853
- # for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
1854
- unset KUBECONFIG
1855
-
1856
- # Configure Kubectl
1857
- configure_kubectl_for_ci ${environment}
1858
-
1859
- # Configure Helm
1860
- helm version --tiller-namespace ${namespace} || true
1861
- # Helm stable repo have changed and must be updated manually, in versions < v2.17.0
1862
- helm init --tiller-namespace ${namespace} --client-only --stable-repo-url https://charts.helm.sh/stable
1863
- helm repo add colisweb s3://colisweb-helm-charts/colisweb
1864
- helm repo update --strict
1865
- helm dependency update --tiller-namespace ${namespace} ${root_path}/${path_chart}
1866
-
1867
- # Gather values/*.yaml files
1868
- values_path="${root_path}/${path_chart}/values"
1869
- values_files=''
1870
- [ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
1871
-
1872
- # Deploy
1873
- helm upgrade -i \
1874
- --namespace ${namespace} \
1875
- --tiller-namespace ${namespace} \
1876
- ${values_files} \
1877
- -f ${root_path}/${path_configs}/common.yaml \
1878
- -f ${root_path}/${path_configs}/${namespace}.yaml \
1879
- -f ${root_path}/${path_configs}/${namespace}-secrets.yaml \
1880
- ${helm_extra_args} \
1881
- ${application} ${root_path}/${path_chart}
1882
-
1883
- #send event to dd
1884
- PUBLISHED_VERSION="$CI_COMMIT_REF_NAME-$CI_COMMIT_SHA"
1885
- emit_datadog_deploy_event --environment $environment --service $application --version $PUBLISHED_VERSION
1886
-
1887
- echo "================================"
1888
- echo " Deployed $application"
1889
- echo " - Environment: $environment"
1890
- echo " - Namespace: $namespace"
1891
- echo "================================"
1892
-
1893
- set +x
1894
- }
1895
-
1896
- verify_deployments() {
1897
- set -e
1898
-
1899
- # usage :
1900
- # verify_deployments staging price
1901
- # verify_deployments -t 15m testing price
1902
-
1903
- if [ "$1" == "-t" ]; then
1904
- TIMEOUT=$2
1905
- shift
1906
- shift
1907
- else
1908
- TIMEOUT=5m
1909
- fi
1910
-
1911
- NAMESPACE=$1
1912
- RELEASE=$2
1913
-
1914
- # Get all Deployments names from the deployed chart
1915
- DEPLOYMENTS=(
1916
- $(helm get manifest --tiller-namespace $NAMESPACE $RELEASE | yq -rs '.[] | select(.kind=="Deployment") | .metadata.name')
1917
- )
1918
-
1919
- echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
1920
-
1921
- PIDS=()
1922
- for D in "${DEPLOYMENTS[@]}"; do
1923
- kubectl -n ${NAMESPACE} rollout status deployment ${D} --timeout=${TIMEOUT} &
1924
- PIDS+=($!)
1925
- done
1926
-
1927
- for P in ${PIDS[@]}; do
1928
- wait $P
1929
-
1930
- if [ $? -ne 0 ]; then
1931
- echo "at least one deployment failed or timed out (after $TIMEOUT)"
1932
- exit 1
1933
- fi
1934
- done
1935
-
1936
- }
1937
-
1938
2157
  helm_deploy_v3() {
1939
2158
  APPLICATION=$1
1940
2159
  ENVIRONMENT=$2
@@ -2040,7 +2259,7 @@ verify_deployments_v3() {
2040
2259
  # verify_deployments staging price
2041
2260
  # verify_deployments -t 15m testing price
2042
2261
 
2043
- if [ "$1" == "-t" ]; then
2262
+ if [ "$1" = "-t" ] ; then
2044
2263
  TIMEOUT=$2
2045
2264
  shift
2046
2265
  shift