@colisweb/rescript-toolkit 5.46.4 → 5.46.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -62,7 +62,7 @@ extract_arg() {
62
62
  value=$3
63
63
  if [ "--$name" != "$passed" ]; then
64
64
  echo "missing argument $name"
65
- exit 1
65
+ return 1
66
66
  fi
67
67
  eval $name='$value'
68
68
  }
@@ -92,63 +92,6 @@ aws_ecr_token() {
92
92
  aws ecr get-authorization-token --region=eu-west-1 --output text --query 'authorizationData[].authorizationToken'
93
93
  }
94
94
 
95
- # you will need jq to use these commands. You can install it using "brew install jq"
96
- # delete_images colisweb_api 8
97
- # will delete images older than 8 weeks
98
- delete_images() {
99
-
100
- REPO=$1
101
- WEEKS=${2:-16}
102
-
103
- WEEKS_AGO=$(date -v-${WEEKS}w +%F)
104
-
105
- #Get all ecr images
106
- IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
107
-
108
- #Filter unnecessary values and map `imagePushedAt` to EPOCH
109
- NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
110
-
111
- #Filter on EPOCH
112
- OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
113
- while IFS= read -r IMAGE; do
114
- if [ "$IMAGE" != "" ]; then
115
- echo "Deleting $IMAGE from $REPO"
116
- AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
117
- fi
118
- done <<< "$OLD_IMAGES"
119
- }
120
-
121
- # delete_images_all_repos 12
122
- # will delete images in all repositories older than 12 weeks
123
- delete_images_all_repos() {
124
- REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
125
-
126
- while IFS= read -r REPO; do
127
- echo "processing ECR repository $REPO"
128
- delete_images $REPO $1
129
- done <<< "$REPOSITORIES"
130
- }
131
-
132
- delete_old_cache() {
133
- DATE=${1:-$(date -v-1m +%F)}
134
- CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
135
-
136
- echo "deleting from cache $CACHE_BUCKET all older than $DATE"
137
-
138
- aws_ecr_login
139
-
140
- while read -r line; do
141
- datum=$(echo $line | cut -c1-10)
142
- if [[ "$datum" < "$DATE" ]] ; then
143
- # Shell Parameter Expansion: ${parameter##word}
144
- # Allow to return the result from "word" to the end of "parameters"
145
- # Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
146
- TO_DELETE="$CACHE_BUCKET${line##* project/}"
147
- echo $TO_DELETE
148
- aws s3 rm $TO_DELETE
149
- fi
150
- done < <(aws s3 ls $CACHE_BUCKET --recursive)
151
- }
152
95
 
153
96
  #!/usr/bin/env bash
154
97
 
@@ -250,11 +193,11 @@ entries:
250
193
  cronjob:
251
194
  EOT
252
195
 
253
- # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
254
- helm3 repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
255
- helm3 repo add stable https://charts.helm.sh/stable --force-update
256
- helm3 repo update
257
- helm3 dependency update ${ROOT_PATH}/${CHART_PATH}
196
+ # helm stable repo have changed and must be updated manually, in versions < v2.17.0
197
+ helm repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
198
+ helm repo add stable https://charts.helm.sh/stable --force-update
199
+ helm repo update
200
+ helm dependency update ${ROOT_PATH}/${CHART_PATH}
258
201
 
259
202
  # Gather values/*.yaml files
260
203
  VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
@@ -262,7 +205,7 @@ EOT
262
205
  [ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
263
206
 
264
207
  # Deploy
265
- helm3 upgrade --install \
208
+ helm upgrade --install \
266
209
  --namespace ${ENVIRONMENT} \
267
210
  ${VALUES_FILES} \
268
211
  -f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
@@ -272,7 +215,7 @@ EOT
272
215
  ${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
273
216
 
274
217
 
275
- verify_deployments_v3 -t 10m $ENVIRONMENT $CHART_NAME
218
+ verify_deployments -t 10m $ENVIRONMENT $CHART_NAME
276
219
 
277
220
  }
278
221
 
@@ -509,12 +452,12 @@ configure_kubectl_for() {
509
452
  database_k8s() {
510
453
  MODE=$1
511
454
  case $MODE in
512
- "tests") SSH_LOCAL_PORT=2224;PG_LOCAL_PORT=24440;CA_LOCAL_PORT=25430;ENV="tests";;
513
- "testing") SSH_LOCAL_PORT=2225;PG_LOCAL_PORT=24441;CA_LOCAL_PORT=25431;ENV="testing";;
514
- "staging") SSH_LOCAL_PORT=2226;PG_LOCAL_PORT=24442;CA_LOCAL_PORT=25432;ENV="staging";;
515
- "production") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24443;CA_LOCAL_PORT=25433;ENV="production";;
516
- "production_rw") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24444;CA_LOCAL_PORT=25434;ENV="production";;
517
- "recette") SSH_LOCAL_PORT=2228;PG_LOCAL_PORT=24446;CA_LOCAL_PORT=25436;ENV="recette";;
455
+ "tests") SSH_LOCAL_PORT=2224;COMP_LOCAL_PORT=25550;PG_LOCAL_PORT=24440;CA_LOCAL_PORT=25430;ENV="tests";;
456
+ "testing") SSH_LOCAL_PORT=2225;COMP_LOCAL_PORT=25551;PG_LOCAL_PORT=24441;CA_LOCAL_PORT=25431;ENV="testing";;
457
+ "staging") SSH_LOCAL_PORT=2226;COMP_LOCAL_PORT=25552;PG_LOCAL_PORT=24442;CA_LOCAL_PORT=25432;ENV="staging";;
458
+ "production") SSH_LOCAL_PORT=2227;COMP_LOCAL_PORT=25553;PG_LOCAL_PORT=24443;CA_LOCAL_PORT=25433;ENV="production";;
459
+ "production_rw") SSH_LOCAL_PORT=2227;COMP_LOCAL_PORT=25554;PG_LOCAL_PORT=24444;CA_LOCAL_PORT=25434;ENV="production";;
460
+ "recette") SSH_LOCAL_PORT=2228;COMP_LOCAL_PORT=25556; PG_LOCAL_PORT=24446;CA_LOCAL_PORT=25436;ENV="recette";;
518
461
  *) echo "Unsupported ENV : $MODE"; return 1 ;;
519
462
  esac
520
463
 
@@ -535,23 +478,28 @@ database_k8s() {
535
478
  HostName 127.0.0.1
536
479
  Port 2225
537
480
  LocalForward 24441 toutatis-testing-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
481
+ LocalForward 25551 toutatis-testing-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
538
482
  LocalForward 25431 toutatis-testing-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
539
483
  LocalForward 25531 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
484
+ LocalForward 25561 toutatis-testing-oracle-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:1521
540
485
  Host bastion_staging
541
486
  HostName 127.0.0.1
542
487
  Port 2226
543
488
  LocalForward 24442 toutatis-staging-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
489
+ LocalForward 25552 toutatis-staging-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
544
490
  LocalForward 25432 toutatis-staging-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
545
491
  Host bastion_recette
546
492
  HostName 127.0.0.1
547
493
  Port 2228
548
494
  LocalForward 24446 toutatis-recette-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
495
+ LocalForward 25556 toutatis-recette-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
549
496
  LocalForward 25436 toutatis-recette-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
550
497
  LocalForward 25536 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
551
498
  Host bastion_production
552
499
  HostName 127.0.0.1
553
500
  Port 2227
554
501
  LocalForward 24443 toutatis-production-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
502
+ LocalForward 25553 toutatis-production-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
555
503
  LocalForward 25433 toutatis-production-mysql-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
556
504
  EOF
557
505
  if [ "$MODE" = "production_rw" ] ; then
@@ -565,6 +513,7 @@ EOF
565
513
  -F "$bastion_config" \
566
514
  "bastion_$ENV"
567
515
 
516
+ echo "sample command (composite) : 'psql postgres://postgres@127.0.0.1:$COMP_LOCAL_PORT'"
568
517
  echo "sample command : 'psql postgres://postgres@127.0.0.1:$PG_LOCAL_PORT'"
569
518
  echo "sample command : 'mysql -u colisweb -h 127.0.0.1 -P $CA_LOCAL_PORT -p db_name'"
570
519
 
@@ -908,14 +857,29 @@ EOF
908
857
  }
909
858
  #!/usr/bin/env bash
910
859
 
860
+ function kstatus() {
861
+ if [ -z "$3" ]
862
+ then
863
+ configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2"
864
+ else
865
+ configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2 | grep $3"
866
+ fi
867
+ }
868
+
869
+ #!/usr/bin/env bash
870
+
911
871
  k8_nodes_stats() {
912
- kubectl get nodes -o name |
913
- xargs kubectl describe |
914
- grep "^Name\|workType\|cpu \|memory " |
915
- sed -r 's/[ :=]+/\t/g' |
916
- sed 's/\tworkType\t//g' |
917
- sed -r 's/^Name/---\nName/g' |
918
- grep --color "Name\|web\|workers\|cpu\|memory\|---"
872
+ ENV=${1:-testing}
873
+
874
+ configure_kubectl_for "${ENV}"
875
+
876
+ kubectl get nodes -o name |
877
+ xargs kubectl describe |
878
+ grep "^Name\|workType\|cpu \|memory " |
879
+ sed -r 's/[ :=]+/\t/g' |
880
+ sed 's/\tworkType\t//g' |
881
+ sed -r 's/^Name/---\nName/g' |
882
+ grep --color "Name\|web\|workers\|cpu\|memory\|---"
919
883
  }
920
884
 
921
885
  #!/usr/bin/env bash
@@ -981,20 +945,59 @@ pod_copy_to() {
981
945
 
982
946
  pick_pod() {
983
947
  ENV=$1
984
- POD_FILTER="pod/$2"
948
+ POD_FILTER=$2
985
949
  configure_kubectl_for $ENV
986
950
 
987
951
  if [ -z "$2" ] ; then
988
952
  kubectl -n $ENV get pods | gum filter | cut -f1 -d" "
989
953
  else
990
- if PODS=$(kubectl -n $ENV get pods -o=name | grep "$POD_FILTER"); then
991
- echo $PODS | head -1 | sed -e 's/pod\///'
954
+ if PODS=$(kubectl -n $ENV get pods | grep -m1 "$POD_FILTER" | cut -f1 -d" "); then
955
+ echo $PODS
992
956
  else
993
957
  echo "no pods found on $ENV matching $POD_FILTER" >&2
994
958
  fi
995
959
  fi
996
960
  }
997
961
 
962
+ # pods_settings $ENV
963
+ # Will output a CSV (;) of all deployments on this environment with cpu and memory request and limits
964
+ # Errors and null outputs are ignored and won't be in the output.
965
+ pods_resources() {
966
+ ENV=$1
967
+ configure_kubectl_for $ENV
968
+ DEPLOYMENTS=(
969
+ $(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
970
+ )
971
+ echo "deployment; request_cpu; request_memory; limits_cpu;limits_memory"
972
+ for D in "${DEPLOYMENTS[@]}"; do
973
+ info=$(kubectl -n $ENV get deployment -o yaml $D |
974
+ yq '.spec.template.spec.containers[].resources' |
975
+ yq '.L = .requests.cpu + "; " + .requests.memory + "; " + .limits.cpu + "; " + .limits.memory' |
976
+ yq ".L") 2&>/dev/null
977
+ if ! [ "$info" = "null" ]; then
978
+ echo "$D; $info"
979
+ fi
980
+ done
981
+ }
982
+
983
+ pods_strategies() {
984
+ ENV=$1
985
+ configure_kubectl_for $ENV
986
+ DEPLOYMENTS=(
987
+ $(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
988
+ )
989
+ echo "deployment; max_surge; max_unavailable"
990
+ for D in "${DEPLOYMENTS[@]}"; do
991
+ info=$(kubectl -n $ENV get deployment -o yaml $D |
992
+ yq '.spec.strategy' |
993
+ yq '.L = .rollingUpdate.maxSurge + "; " + .rollingUpdate.maxUnavailable' |
994
+ yq ".L") 2&>/dev/null
995
+ if ! [ "$info" = "null" ]; then
996
+ echo "$D; $info"
997
+ fi
998
+ done
999
+ }
1000
+
998
1001
  #!/usr/bin/env bash
999
1002
 
1000
1003
  bastion_config_for_redis_ca() {
@@ -1429,6 +1432,182 @@ spec:
1429
1432
  }
1430
1433
 
1431
1434
 
1435
+ #!/usr/bin/env bash
1436
+
1437
+ # Usage info
1438
+ show_help_shell() {
1439
+ local help="""Usage: run_job_k8s -s SCRIPT [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
1440
+ Create a k8s job executing a script
1441
+
1442
+ -h display this help and exit
1443
+ -s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
1444
+ -e ENV opt. set execution environment (default to testing)
1445
+ -c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
1446
+ -p POD opt. name of the pod to create (default to $USERNAME)
1447
+ -f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
1448
+ ARGS opt. additional arguments for SCRIPT
1449
+
1450
+ The organisation of the files must be the same locally as on the pod :
1451
+ - /code containing the script to execute (arg -s) and the other needed files (if the arg -f is used, it must reference this directory)
1452
+ - /conf containing the secret file (arg -c if used)
1453
+ E.g. in the script \"/code/script.sc\", to use a secret file \"/conf/secret.sc\", the import should look like \"import \$file.^.conf.secret.sc\"
1454
+ """
1455
+ echo "$help"
1456
+ }
1457
+
1458
+ run_shell_k8s() {
1459
+
1460
+ #default values
1461
+ local namespace="testing"
1462
+ local name="$USERNAME"
1463
+ local secret=""
1464
+ local shell_folder=""
1465
+ local script_script=""
1466
+
1467
+ while getopts ":e:c:p:f:s:h" opt; do
1468
+ case $opt in
1469
+ e)
1470
+ namespace="$OPTARG" >&2
1471
+ ;;
1472
+ p)
1473
+ name="$OPTARG" >&2
1474
+ ;;
1475
+ c)
1476
+ secret="$OPTARG" >&2
1477
+ ;;
1478
+ f)
1479
+ shell_folder="$OPTARG" >&2
1480
+ ;;
1481
+ s)
1482
+ shell_script="$OPTARG" >&2
1483
+ ;;
1484
+ h)
1485
+ show_help_job
1486
+ return 0
1487
+ ;;
1488
+ :)
1489
+ echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
1490
+ return 0
1491
+ ;;
1492
+ \?)
1493
+ echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
1494
+ return 0
1495
+ ;;
1496
+ esac
1497
+ done
1498
+
1499
+ if [ -z "$shell_script" ]; then
1500
+ echo 'Missing -s. Run run_job_k8s -h for help' >&2
1501
+ return 0
1502
+ fi
1503
+
1504
+ shift "$((OPTIND-1))"
1505
+
1506
+ local script_args=$(
1507
+ if [ "$#" -gt 0 ] ; then
1508
+ printf '"'
1509
+ join_by '", "' $*
1510
+ printf '"'
1511
+ fi
1512
+ )
1513
+
1514
+
1515
+
1516
+
1517
+ local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/docker-infra-builder:v3.1.0"
1518
+ local JOB_NAME="job-shell-$name"
1519
+
1520
+ if [[ ! -r "$shell_script" ]]; then
1521
+ echo "shell script not found $shell_script"
1522
+ return 2
1523
+ else
1524
+ local CONFIG_MAP="config-$JOB_NAME"
1525
+ local CONFIG_MAP_DIR="$(mktemp -d)"
1526
+ local SECRET_MAP="secret-$JOB_NAME"
1527
+
1528
+ configure_kubectl_for $namespace
1529
+
1530
+ if [[ ! -z $shell_folder && -d $shell_folder ]] ; then
1531
+ cp -r "$shell_folder/" "$CONFIG_MAP_DIR"
1532
+ fi
1533
+ cp "$shell_script" "$CONFIG_MAP_DIR/script.sh"
1534
+
1535
+ kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
1536
+ kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
1537
+
1538
+ kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
1539
+ kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
1540
+
1541
+ kubectl -n $namespace get job $JOB_NAME && kubectl -n $namespace delete job $JOB_NAME
1542
+
1543
+ echo "starting $JOB_NAME with $IMAGE"
1544
+ fi
1545
+
1546
+ JOB_DEFINITION='
1547
+ apiVersion: batch/v1
1548
+ kind: Job
1549
+ metadata:
1550
+ name: '$JOB_NAME'
1551
+ namespace: '$namespace'
1552
+ spec:
1553
+ template:
1554
+ spec:
1555
+ containers:
1556
+ - name: '$JOB_NAME'
1557
+ command: ["bash", "/code/script.sh"]
1558
+ image: '$IMAGE'
1559
+ args: ['$script_args']
1560
+ env:
1561
+ - name: POD_NAME
1562
+ valueFrom:
1563
+ fieldRef:
1564
+ apiVersion: v1
1565
+ fieldPath: metadata.name
1566
+ - name: POD_NAMESPACE
1567
+ valueFrom:
1568
+ fieldRef:
1569
+ apiVersion: v1
1570
+ fieldPath: metadata.namespace
1571
+ - name: HOST_IP
1572
+ valueFrom:
1573
+ fieldRef:
1574
+ apiVersion: v1
1575
+ fieldPath: status.hostIP
1576
+ volumeMounts:
1577
+ - name: config
1578
+ mountPath: /code
1579
+ - name: secret
1580
+ mountPath: /conf
1581
+ readOnly: true
1582
+ resources:
1583
+ requests:
1584
+ cpu: 500m
1585
+ memory: 256Mi
1586
+ limits:
1587
+ cpu: 4000m
1588
+ memory: 1Gi
1589
+ nodeSelector:
1590
+ workType: workers
1591
+ restartPolicy: Never
1592
+ volumes:
1593
+ - name: config
1594
+ configMap:
1595
+ name: '$CONFIG_MAP'
1596
+ - name: secret
1597
+ secret:
1598
+ secretName: '$SECRET_MAP'
1599
+ - name: stockage
1600
+
1601
+ '
1602
+
1603
+
1604
+ echo $JOB_DEFINITION > /tmp/job.yaml
1605
+
1606
+ kubectl -n $namespace apply -f /tmp/job.yaml
1607
+
1608
+ }
1609
+
1610
+
1432
1611
  #!/usr/bin/env bash
1433
1612
 
1434
1613
  run_task() {
@@ -1547,6 +1726,423 @@ jwt_token() {
1547
1726
 
1548
1727
  #!/usr/bin/env bash
1549
1728
 
1729
+ alias update_devtool="git -C ~/.oh-my-zsh/custom/dev-tools/ pull"
1730
+
1731
+ SCRIPT_PATH=$SCRIPT_FULL_PATH/shell/run
1732
+ PATH="$PATH:$SCRIPT_PATH/script"
1733
+
1734
+ function get_token {
1735
+ local ENV=$1
1736
+ local LOGIN_FILE="$HOME/scriptlogin"
1737
+
1738
+ if [ ! -f "$LOGIN_FILE" ]; then
1739
+ cat > "$LOGIN_FILE" <<-'EOF'
1740
+ #!/bin/bash
1741
+ case $ENV in
1742
+ "testing")
1743
+ local BO_USERNAME=""
1744
+ local BO_PASSWORD=""
1745
+ ;;
1746
+ "recette")
1747
+ local BO_USERNAME=""
1748
+ local BO_PASSWORD=""
1749
+ ;;
1750
+ "staging")
1751
+ local BO_USERNAME=""
1752
+ local BO_PASSWORD=""
1753
+ ;;
1754
+ *)
1755
+ local BO_USERNAME=""
1756
+ local BO_PASSWORD=""
1757
+ echo "ENV ${ENV} inconu"
1758
+ return
1759
+ ;;
1760
+ esac
1761
+ EOF
1762
+ fi
1763
+
1764
+ source "${LOGIN_FILE}"
1765
+
1766
+ if [ -z "$BO_PASSWORD" ] || [ -z "$BO_USERNAME" ]
1767
+ then
1768
+ echo éditer le ficher "$LOGIN_FILE"
1769
+ return 1
1770
+ fi
1771
+
1772
+ curl -o /dev/null -D - "https://api.$ENV.colisweb.com/api/v6/authent/external/session" \
1773
+ --data-raw '{"username":"'"${BO_USERNAME}"'","password":"'"${BO_PASSWORD/\"/\\\"}"'"}' \
1774
+ --compressed 2> /dev/null | grep set-cook | sed -e 's/.*session=//g;s/;.*//g'
1775
+ }
1776
+
1777
+ function bash_array_to_json {
1778
+ function join {
1779
+ local IFS="$1"
1780
+ shift
1781
+ echo "$*"
1782
+ }
1783
+
1784
+ echo '["'"$(join , $*| sed -e 's/,/","/g' )"'"]' | jq
1785
+ }
1786
+
1787
+ function get_random_street {
1788
+ local CODE_POSTAUX_ARG=${1:-59000}
1789
+ IFS=',' read -r -a CODE_POSTAUX <<< "$CODE_POSTAUX_ARG"
1790
+ for CODE_POSTAL in "${CODE_POSTAUX[@]}"; do
1791
+ if [[ ! "$CODE_POSTAL" =~ ^[0-9]{5}$ ]]; then
1792
+ echo "Chaque CODE_POSTAL doit avoir une taille de 5 chiffre : $CODE_POSTAL"
1793
+ exit 1
1794
+ fi
1795
+ done
1796
+ local CODE_POSTAL=$(echo "${CODE_POSTAUX[@]}" | tr " " "\n" | sort -u -R | head -n 1)
1797
+
1798
+ get_random_street_in_cp $CODE_POSTAL
1799
+ }
1800
+
1801
+ function get_random_street_in_cp {
1802
+ local CODE_POSTAL=$1
1803
+
1804
+ FILENAME="rue-$CODE_POSTAL.lst"
1805
+ if [ ! -f "$FILENAME" ]; then
1806
+ curl --output tmp1.gz https://adresse.data.gouv.fr/data/ban/adresses/latest/csv/adresses-"${CODE_POSTAL:0:2}".csv.gz
1807
+ gzip -d tmp1.gz
1808
+ cut -d\; -f3,5,6,8 tmp1 | sed "/;$CODE_POSTAL;/!d" > "$FILENAME"
1809
+ rm tmp1
1810
+ fi
1811
+
1812
+ sort -R "$FILENAME" | head -n 1
1813
+ }
1814
+
1815
+ function rand_slot {
1816
+
1817
+ local SCENARIO=$2
1818
+ if [ -f "$SCENARIO" ]; then
1819
+ source "$SCENARIO"
1820
+ fi
1821
+ local ORDER_DATE="$1"
1822
+
1823
+ DEFAULT=(
1824
+ "06:00+01:00[Europe/Paris]-08:00+01:00[Europe/Paris]"
1825
+ "08:00+01:00[Europe/Paris]-10:00+01:00[Europe/Paris]"
1826
+ "10:00+01:00[Europe/Paris]-12:00+01:00[Europe/Paris]"
1827
+ "16:00+01:00[Europe/Paris]-18:00+01:00[Europe/Paris]"
1828
+ "18:00+01:00[Europe/Paris]-20:00+01:00[Europe/Paris]"
1829
+ )
1830
+ USAGE=${DELIVERY_SLOTS:-${DEFAULT[@]}}
1831
+
1832
+ IFS="-" read -r start_time end_time < <(echo "${USAGE[@]}" | tr " " "\n" | sort -u -R | head -n 1 )
1833
+
1834
+ echo '{"start":"'"${ORDER_DATE}T${start_time}"'", "end":"'"${ORDER_DATE}T${end_time}"'" }'
1835
+ }
1836
+
1837
+ function call_create_sfh_order {
1838
+ local ENV=$1
1839
+ local TOKEN=$2
1840
+ source "$3"
1841
+ local POS=$4
1842
+ local BARCODES="$5"
1843
+ local CODE_POSTAUX="$6"
1844
+ local PACKAGES=$(echo "$BARCODES" | jq '[{
1845
+ "barcode": .[],
1846
+ "length": 20.0,
1847
+ "height": 15.0,
1848
+ "width": 4.0,
1849
+ "weight": 1.5,
1850
+ "description": "test parcel",
1851
+ "options": [],
1852
+ "productTypology": "Classical",
1853
+ "packageType": "Parcel"
1854
+ }
1855
+ ]')
1856
+
1857
+ DELIVERY_OPTIONS_P='['
1858
+ for option in "${DELIVERY_OPTIONS[@]}"; do
1859
+ if [ "$DELIVERY_OPTIONS_P" != '[' ]; then
1860
+ DELIVERY_OPTIONS_P+=", "
1861
+ fi
1862
+ DELIVERY_OPTIONS_P+="\"$option\""
1863
+ done
1864
+ DELIVERY_OPTIONS_P+=']'
1865
+
1866
+ IFS=";" read -r nu rue code_postal ville < <(get_random_street "$CODE_POSTAUX")
1867
+
1868
+ if [ -n "$PICKUP_STORE_CODE" ]; then
1869
+ PICKUP_LOCATION='{
1870
+ "type": "store",
1871
+ "storeCode": "'"$PICKUP_STORE_CODE"'"
1872
+ }'
1873
+ elif [ -n "$PICKUP_WAREHOUSE_CODE" ]; then
1874
+ PICKUP_LOCATION='{
1875
+ "type": "Warehouse",
1876
+ "warehouseCode": "'"$PICKUP_WAREHOUSE_CODE"'"
1877
+ }'
1878
+ else
1879
+ echo PICKUP_WAREHOUSE_CODE ou PICKUP_STORE_CODE doit être définie dans la "$3"
1880
+ exit 1
1881
+ fi
1882
+ JSON='{
1883
+ "primaryOrderReference": "'"${PRIMARY_REF}${POS}"'",
1884
+ "secondaryOrderReference": null,
1885
+ "stages": [
1886
+ {
1887
+ "type": "Pickup",
1888
+ "packageBarcodes": '"$BARCODES"',
1889
+ "location": '"$PICKUP_LOCATION"'
1890
+ },
1891
+ {
1892
+ "type": "Dropoff",
1893
+ "packageBarcodes": '"$BARCODES"',
1894
+ "location": {
1895
+ "type": "Address",
1896
+ "address": {
1897
+ "address1": "'"$nu $rue"'",
1898
+ "postalCode": "'"$code_postal"'",
1899
+ "city": "'"$ville"'",
1900
+ "country": "France",
1901
+ "floor": 0,
1902
+ "lift": "with_lift"
1903
+ },
1904
+ "contact": {
1905
+ "name": "John Doe",
1906
+ "primaryPhone": "+33606060606"
1907
+ }
1908
+ }
1909
+ }
1910
+ ],
1911
+ "packages": '"$PACKAGES"',
1912
+ "owner": {
1913
+ "accountIdentifier": "'$ACCOUNT_IDENTIFIER'"
1914
+ },
1915
+ "deliveryOptions": '"$DELIVERY_OPTIONS_P"',
1916
+ "ecommerceValidationDate": "'"${ORDER_DATE}"'"
1917
+ }'
1918
+
1919
+ RESULT=$(curl -s -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON")
1920
+ ORDER_ID=$(jq ".orderId" -r <<< "$RESULT")
1921
+
1922
+ echo "nouvelle commande : https://bo.$ENV.colisweb.com/admin/orders/$ORDER_ID" >&2
1923
+
1924
+ echo "$RESULT"
1925
+ }
1926
+
1927
+
1928
+ function call_scan {
1929
+ local ENV=$1
1930
+ local TOKEN=$2
1931
+ source "$3"
1932
+ local BARCODES="$4"
1933
+ local SCAN=$(echo "$BARCODES" | jq '[{"barcode" :.[], "context": "shuttle"}]')
1934
+
1935
+ JSON='{"scans":'$SCAN'}'
1936
+
1937
+ curl -X POST https://api.$ENV.colisweb.com/api/v6/parcel/external/units/scans/bulk -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON"
1938
+ }
1939
+
1940
+
1941
+ function call_register_delivery {
1942
+ local ENV=$1
1943
+ local TOKEN=$2
1944
+
1945
+ SCENARIO=$3
1946
+ source "$SCENARIO"
1947
+
1948
+ local ORDER_ID=$4
1949
+ local BARCODES="$5"
1950
+
1951
+ DATA='{
1952
+ "slot": '"$(rand_slot "${DELIVERY_DATE}" "$SCENARIO")"',
1953
+ "storeIdOwner":"'"$STORE_ID_OWNER"'",
1954
+ "pickup":{"type":"hub","code":"'"$HUB"'"},
1955
+ "barcodes":'"$BARCODES"',
1956
+ "price":{"origin":"auto","amount":25.9},
1957
+ "allowCustomerSlotUpdate":false,
1958
+ "withForcedSlot": false
1959
+ }'
1960
+
1961
+ curl -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders/"$ORDER_ID"/deliveries \
1962
+ --cookie session="$TOKEN" --data-raw "$DATA"
1963
+ }
1964
+
1965
+
1966
+
1967
+ function _create_scenario_file_if_not_exist () {
1968
+ if [ ! -f "$SCENARIO" ]
1969
+ then
1970
+ cat > "$SCENARIO" <<-'EOF'
1971
+ DELIVERY_DATE=$(date -v+7d '+%Y-%m-%d') # ici on demande de crée a date d'aujourd'hui + 7 jours
1972
+ # peu remplacer -v+7d par -v+1d pour une livrasion programmer demain
1973
+ # utiliser que par create_many_sfh_order_and_delivery
1974
+ ENV="testing" # sur quelle enviromement lancer le scripts
1975
+ # ENV="staging"
1976
+ # ENV="recette"
1977
+
1978
+ ACCOUNT_IDENTIFIER="102" # pour la creation de order force utilies owner.accountIdentifier
1979
+ # sur l'appel api/v6/order/external/warehouse/orders
1980
+ # (creation de la commade)
1981
+ HUB="duck" # pour sur l'appel api/v6/order/external/warehouse/orders
1982
+ # parametre pickup.code (type est a "hub")
1983
+ STORE_ID_OWNER="184" # parametre pickup.storeIdOwner
1984
+ # sur l'appel api/v6/order/external/warehouse/orders
1985
+ # PICKUP_STORE_CODE="2" # si non commenté alors départ du magasin
1986
+ PICKUP_WAREHOUSE_CODE="422" # pour un départ d'entrepôt
1987
+
1988
+ BARCODES_COUNT=5 # nombres packages
1989
+ PREF="aaaa" # doit faire 4 caractères utilies pour générer les barecode
1990
+ # des packtages
1991
+
1992
+ CODE_POSTAUX=("59000", "75001") # liste code postale sur lequelle une addresse aléatoire seras choisi
1993
+ # (creation de la commade)
1994
+ DELIVERY_SLOTS=( # liste des horraires de créneau de livraison choisi aléatoirement
1995
+ "06:00+01:00[Europe/Paris]-08:00+01:00[Europe/Paris]"
1996
+ "08:00+01:00[Europe/Paris]-10:00+01:00[Europe/Paris]"
1997
+ "10:00+01:00[Europe/Paris]-12:00+01:00[Europe/Paris]"
1998
+ "16:00+01:00[Europe/Paris]-18:00+01:00[Europe/Paris]"
1999
+ "18:00+01:00[Europe/Paris]-20:00+01:00[Europe/Paris]"
2000
+ )
2001
+
2002
+ # DELIVERY_OPTIONS=("skill1" "skill2") # liste des nom skill - a décommanter
2003
+
2004
+ # normalement pas bessoin modifer
2005
+ ORDER_DATE=$(date '+%Y-%m-%d') # date du jour
2006
+ RAND=$(date +%y%m%d%H%M%S) # valueur peudo aleadoire (ici basé par date) doit faire 17 caractères
2007
+ BARCODE_PART=0000$RAND # utiliser pour générer les bare code les barecode sont :
2008
+ # {BARECODE_PART}{00000} a {BARECODE_PART}{BARECODES_COUNT}
2009
+ PRIMARY_REF=$PREF$RAND # primaryOrderReference de la commande
2010
+ EOF
2011
+ echo "éditer le fichier $SCENARIO"
2012
+ return 1
2013
+ fi
2014
+ }
2015
+
2016
+ #!/usr/bin/env bash
2017
+
2018
+ cleanup_merged_mr() {
2019
+ COLISWEB_IDL_GROUP=3054234
2020
+
2021
+ BEFORE=${1:- $(date -I -v -2y)}
2022
+
2023
+ for (( COUNTER=1; COUNTER<=12; COUNTER+=2 )); do
2024
+ cleanup_grouped_merged_mr $COLISWEB_IDL_GROUP $BEFORE $COUNTER &
2025
+ done
2026
+
2027
+ }
2028
+
2029
+ cleanup_grouped_merged_mr() {
2030
+ GROUP=$1
2031
+ BEFORE=$2
2032
+ PAGE_COUNT=$3
2033
+ MERGED_MRS=($(curl --header "PRIVATE-TOKEN: $GITLAB_PAT" \
2034
+ --url "https://gitlab.com/api/v4/groups/$GROUP/merge_requests?updated_before=${BEFORE}T08:00:00Z&status=merged&per_page=50&page=$PAGE_COUNT" |
2035
+ jq -r '.[] | {iid: .iid|tostring, pid:.project_id|tostring} | (.pid + "/merge_requests/" + .iid)'))
2036
+
2037
+ for MR in ${MERGED_MRS[@]}; do
2038
+ echo "https://gitlab.com/api/v4/projects/$MR"
2039
+ curl --request DELETE \
2040
+ --header "PRIVATE-TOKEN: $GITLAB_PAT" \
2041
+ --url "https://gitlab.com/api/v4/projects/$MR"
2042
+ done
2043
+ }
2044
+
2045
+ # you will need jq to use these commands. You can install it using "brew install jq"
2046
+ # cleanup_all_ecr_images 12
2047
+ # will delete images in all repositories older than 12 weeks
2048
+ # cleanup_single_ecr_repository colisweb-api 8
2049
+ # will delete images older than 8 weeks in the colisweb-api repository
2050
+ cleanup_all_ecr_images() {
2051
+ WEEKS=$1
2052
+
2053
+ # OR to work on bash and zsh
2054
+ CLEAN_BEFORE=$(date -v-${WEEKS}w +%F || date --date="-${WEEKS} weeks" +'%Y-%m-%d')
2055
+ REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[] |.[].repositoryName')
2056
+
2057
+ while read -r REPOSITORY; do
2058
+ echo "processing ECR repository $REPOSITORY before $CLEAN_BEFORE"
2059
+ cleanup_single_ecr_repository "$CLEAN_BEFORE" "$REPOSITORY"
2060
+ done <<< "$REPOSITORIES"
2061
+ }
2062
+
2063
+ cleanup_single_ecr_repository() {
2064
+ BEFORE=$1
2065
+ REPOSITORY=$2
2066
+
2067
+ echo "gettings tags for repository $REPOSITORY before $BEFORE"
2068
+
2069
+ ALL_TAGS=$(aws ecr describe-images --repository-name "$REPOSITORY" --output json |
2070
+ jq '.imageDetails' |
2071
+ jq '. |= sort_by(.imagePushedAt)' |
2072
+ jq --arg date $BEFORE '.[] | select(.imagePushedAt[0:10] < $date)' |
2073
+ jq 'select((.imageTags != null) or (.imageTags == []))' |
2074
+ jq 'select(.imageTags | any(endswith("latest")) | not)' |
2075
+ jq -r '.imageTags | join(" ")' |
2076
+ sort -u)
2077
+
2078
+ if [ -z "${ALL_TAGS}" ]; then
2079
+ echo "no tag to delete for repository $REPOSITORY"
2080
+ else
2081
+ echo "deleting $(echo $ALL_TAGS | wc -l) tags for $REPOSITORY"
2082
+
2083
+ while read image_tags; do
2084
+ SINGLE_TAG=$(echo $image_tags | grep -o '^\S*')
2085
+
2086
+ DIGESTS_TO_DELETE=$(docker buildx imagetools inspect \
2087
+ 949316342391.dkr.ecr.eu-west-1.amazonaws.com/$REPOSITORY:$SINGLE_TAG --raw |
2088
+ jq -r '[.manifests | .[].digest] | join(" imageDigest=") | "imageDigest=" + .' ||
2089
+ echo "")
2090
+
2091
+ TAGS_TO_DELETE=$(echo "$image_tags" | sed 's/[^ ]* */imageTag=&/g')
2092
+
2093
+ export AWS_PAGER=""
2094
+
2095
+ aws ecr batch-delete-image --repository-name "$REPOSITORY" --image-ids $(echo $TAGS_TO_DELETE) > /dev/null 2>&1
2096
+ test -z $DIGESTS_TO_DELETE ||
2097
+ aws ecr batch-delete-image --repository-name "$REPOSITORY" --image-ids $(echo $DIGESTS_TO_DELETE)> /dev/null 2>&1
2098
+ done <<< $ALL_TAGS
2099
+
2100
+ echo "deleted $(echo $ALL_TAGS | wc -l) tags"
2101
+ fi
2102
+
2103
+ }
2104
+
2105
+
2106
+ cleanup_ci_cache() {
2107
+ DATE=${1:-$(date -v-1m +%F)}
2108
+ CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
2109
+
2110
+ echo "deleting from cache $CACHE_BUCKET all older than $DATE"
2111
+
2112
+ aws_ecr_login
2113
+
2114
+ while read -r line; do
2115
+ datum=$(echo $line | cut -c1-10)
2116
+ if [[ "$datum" < "$DATE" ]] ; then
2117
+ # Shell Parameter Expansion: ${parameter##word}
2118
+ # Allow to return the result from "word" to the end of "parameters"
2119
+ # Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
2120
+ TO_DELETE="$CACHE_BUCKET${line##* project/}"
2121
+ echo $TO_DELETE
2122
+ aws s3 rm $TO_DELETE
2123
+ fi
2124
+ done < <(aws s3 ls $CACHE_BUCKET --recursive)
2125
+ }
2126
+
2127
+ cleanup_batch_definitions() {
2128
+ DEFINITION_NAME=$1
2129
+ ARNs=($(
2130
+ aws batch describe-job-definitions \
2131
+ --status ACTIVE \
2132
+ --job-definition-name "$DEFINITION_NAME" |
2133
+ jq '.jobDefinitions | sort_by(-.revision)' |
2134
+ jq 'del( .[0])' |
2135
+ jq -r '.[] | .jobDefinitionArn'
2136
+ )
2137
+ )
2138
+ for A in ${ARNs[@]}; do
2139
+ echo "deregister $A"
2140
+ aws batch deregister-job-definition --job-definition $A
2141
+ done
2142
+ echo "cleaned up all definitions except latest"
2143
+ }
2144
+ #!/usr/bin/env bash
2145
+
1550
2146
  ftp_ikea_k8s() {
1551
2147
  SSH_LOCAL_PORT=2230
1552
2148
  FTP_LOCAL_PORT=25500
@@ -2000,15 +2596,15 @@ extract_yaml_config_variable() {
2000
2596
 
2001
2597
  if [ ! -f ${CONFIGS_PATH}/common.yaml ]; then
2002
2598
  echo >&2 "Missing $CONFIGS_PATH/common.yaml configuration file"
2003
- exit 1
2599
+ return 1
2004
2600
  fi
2005
2601
  if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}.yaml ]; then
2006
2602
  echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT.yaml configuration file"
2007
- exit 1
2603
+ return 1
2008
2604
  fi
2009
2605
  if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}-secrets.yaml ]; then
2010
2606
  echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml configuration file"
2011
- exit 1
2607
+ return 1
2012
2608
  fi
2013
2609
 
2014
2610
  result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT-secrets.yaml")
@@ -2019,10 +2615,10 @@ extract_yaml_config_variable() {
2019
2615
  if [ $? -ne 0 ] || [ "$result" = "null" ]; then
2020
2616
  if [ $OPTIONAL = true ]; then
2021
2617
  echo ""
2022
- exit 0
2618
+ return 0
2023
2619
  else
2024
2620
  echo >&2 "Missing path $VARIABLE in $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml, $CONFIGS_PATH/$ENVIRONMENT.yaml or $CONFIGS_PATH/common.yaml"
2025
- exit 1
2621
+ return 1
2026
2622
  fi
2027
2623
  fi
2028
2624
  fi
@@ -2244,11 +2840,11 @@ git_reveal() {
2244
2840
  }
2245
2841
  #!/usr/bin/env bash
2246
2842
 
2247
- helm_deploy_v3() {
2843
+ helm_deploy() {
2248
2844
  APPLICATION=$1
2249
2845
  ENVIRONMENT=$2
2250
2846
  VERSION=$3
2251
- deploy_chart_v3 \
2847
+ deploy_chart \
2252
2848
  --path_configs deploy \
2253
2849
  --path_chart deploy/$APPLICATION \
2254
2850
  --application $APPLICATION \
@@ -2257,7 +2853,7 @@ helm_deploy_v3() {
2257
2853
  --helm_extra_args --set global.version=$VERSION
2258
2854
  }
2259
2855
 
2260
- deploy_chart_v3() {
2856
+ deploy_chart() {
2261
2857
  set -e
2262
2858
  set -x
2263
2859
 
@@ -2295,7 +2891,7 @@ deploy_chart_v3() {
2295
2891
  if [ ! -d ${root_path}/${path_chart} ] || [ ! -f ${root_path}/${path_chart}/Chart.yaml ]; then
2296
2892
  echo "Bad Chart $root_path/$path_chart : does not exists or missing Chart.yaml"
2297
2893
  print_usage
2298
- exit 1
2894
+ return 1
2299
2895
  fi
2300
2896
 
2301
2897
  # Unset Kubectl configuration made via the KUBECONFIG env variable
@@ -2306,13 +2902,13 @@ deploy_chart_v3() {
2306
2902
  # Configure Kubectl
2307
2903
  configure_kubectl_for ${environment}
2308
2904
 
2309
- # Configure helm3
2310
- helm3 version --namespace ${namespace} || true
2311
- # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
2312
- helm3 repo add colisweb s3://colisweb-helm-charts/colisweb
2313
- helm3 repo add stable https://charts.helm.sh/stable
2314
- helm3 repo update
2315
- helm3 dependency update ${root_path}/${path_chart}
2905
+ # Configure helm
2906
+ helm version --namespace ${namespace} || true
2907
+ # helm stable repo have changed and must be updated manually, in versions < v2.17.0
2908
+ helm repo add colisweb s3://colisweb-helm-charts/colisweb
2909
+ helm repo add stable https://charts.helm.sh/stable
2910
+ helm repo update
2911
+ helm dependency update ${root_path}/${path_chart}
2316
2912
 
2317
2913
  # Gather values/*.yaml files
2318
2914
  values_path="${root_path}/${path_chart}/values"
@@ -2320,7 +2916,7 @@ deploy_chart_v3() {
2320
2916
  [ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
2321
2917
 
2322
2918
  # Deploy
2323
- helm3 upgrade --install \
2919
+ helm upgrade --install \
2324
2920
  --namespace ${namespace} \
2325
2921
  ${values_files} \
2326
2922
  -f ${root_path}/${path_configs}/common.yaml \
@@ -2342,7 +2938,7 @@ deploy_chart_v3() {
2342
2938
  set +x
2343
2939
  }
2344
2940
 
2345
- verify_deployments_v3() {
2941
+ verify_deployments() {
2346
2942
  set -e
2347
2943
 
2348
2944
  # usage :
@@ -2362,7 +2958,7 @@ verify_deployments_v3() {
2362
2958
 
2363
2959
  # Get all Deployments names from the deployed chart
2364
2960
  DEPLOYMENTS=(
2365
- $(helm3 get manifest --namespace $NAMESPACE $RELEASE | yq -rs '.[] | select(.kind=="Deployment") | .metadata.name')
2961
+ $(helm get manifest --namespace $NAMESPACE $RELEASE | yq --no-doc -r 'select(.kind=="Deployment").metadata.name')
2366
2962
  )
2367
2963
 
2368
2964
  echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
@@ -2378,7 +2974,7 @@ verify_deployments_v3() {
2378
2974
 
2379
2975
  if [ $? -ne 0 ]; then
2380
2976
  echo "at least one deployment failed or timed out (after $TIMEOUT)"
2381
- exit 1
2977
+ return 1
2382
2978
  fi
2383
2979
  done
2384
2980
 
@@ -2401,7 +2997,7 @@ check_config_file() {
2401
2997
  if [ ! -f ${filename} ]; then
2402
2998
  echo "Missing $filename configuration file"
2403
2999
  print_usage
2404
- exit 1
3000
+ return 1
2405
3001
  fi
2406
3002
  }
2407
3003
 
@@ -2626,7 +3222,7 @@ emit_datadog_deploy_event() {
2626
3222
  echo "event successfully created check in datadog UI : $url"
2627
3223
  else
2628
3224
  echo " failed to create event "
2629
- exit 1
3225
+ return 1
2630
3226
  fi
2631
3227
  }
2632
3228