@colisweb/rescript-toolkit 5.37.1 → 5.37.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -92,63 +92,6 @@ aws_ecr_token() {
92
92
  aws ecr get-authorization-token --region=eu-west-1 --output text --query 'authorizationData[].authorizationToken'
93
93
  }
94
94
 
95
- # you will need jq to use these commands. You can install it using "brew install jq"
96
- # delete_images colisweb_api 8
97
- # will delete images older than 8 weeks
98
- delete_images() {
99
-
100
- REPO=$1
101
- WEEKS=${2:-16}
102
-
103
- WEEKS_AGO=$(date -v-${WEEKS}w +%F)
104
-
105
- #Get all ecr images
106
- IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
107
-
108
- #Filter unnecessary values and map `imagePushedAt` to EPOCH
109
- NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
110
-
111
- #Filter on EPOCH
112
- OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
113
- while IFS= read -r IMAGE; do
114
- if [ "$IMAGE" != "" ]; then
115
- echo "Deleting $IMAGE from $REPO"
116
- AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
117
- fi
118
- done <<< "$OLD_IMAGES"
119
- }
120
-
121
- # delete_images_all_repos 12
122
- # will delete images in all repositories older than 12 weeks
123
- delete_images_all_repos() {
124
- REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
125
-
126
- while IFS= read -r REPO; do
127
- echo "processing ECR repository $REPO"
128
- delete_images $REPO $1
129
- done <<< "$REPOSITORIES"
130
- }
131
-
132
- delete_old_cache() {
133
- DATE=${1:-$(date -v-1m +%F)}
134
- CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
135
-
136
- echo "deleting from cache $CACHE_BUCKET all older than $DATE"
137
-
138
- aws_ecr_login
139
-
140
- while read -r line; do
141
- datum=$(echo $line | cut -c1-10)
142
- if [[ "$datum" < "$DATE" ]] ; then
143
- # Shell Parameter Expansion: ${parameter##word}
144
- # Allow to return the result from "word" to the end of "parameters"
145
- # Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
146
- TO_DELETE="$CACHE_BUCKET${line##* project/}"
147
- echo $TO_DELETE
148
- aws s3 rm $TO_DELETE
149
- fi
150
- done < <(aws s3 ls $CACHE_BUCKET --recursive)
151
- }
152
95
 
153
96
  #!/usr/bin/env bash
154
97
 
@@ -250,11 +193,11 @@ entries:
250
193
  cronjob:
251
194
  EOT
252
195
 
253
- # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
254
- helm3 repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
255
- helm3 repo add stable https://charts.helm.sh/stable --force-update
256
- helm3 repo update
257
- helm3 dependency update ${ROOT_PATH}/${CHART_PATH}
196
+ # helm stable repo have changed and must be updated manually, in versions < v2.17.0
197
+ helm repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
198
+ helm repo add stable https://charts.helm.sh/stable --force-update
199
+ helm repo update
200
+ helm dependency update ${ROOT_PATH}/${CHART_PATH}
258
201
 
259
202
  # Gather values/*.yaml files
260
203
  VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
@@ -262,7 +205,7 @@ EOT
262
205
  [ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
263
206
 
264
207
  # Deploy
265
- helm3 upgrade --install \
208
+ helm upgrade --install \
266
209
  --namespace ${ENVIRONMENT} \
267
210
  ${VALUES_FILES} \
268
211
  -f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
@@ -272,7 +215,7 @@ EOT
272
215
  ${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
273
216
 
274
217
 
275
- verify_deployments_v3 -t 10m $ENVIRONMENT $CHART_NAME
218
+ verify_deployments -t 10m $ENVIRONMENT $CHART_NAME
276
219
 
277
220
  }
278
221
 
@@ -509,12 +452,12 @@ configure_kubectl_for() {
509
452
  database_k8s() {
510
453
  MODE=$1
511
454
  case $MODE in
512
- "tests") SSH_LOCAL_PORT=2224;PG_LOCAL_PORT=24440;CA_LOCAL_PORT=25430;ENV="tests";;
513
- "testing") SSH_LOCAL_PORT=2225;PG_LOCAL_PORT=24441;CA_LOCAL_PORT=25431;ENV="testing";;
514
- "staging") SSH_LOCAL_PORT=2226;PG_LOCAL_PORT=24442;CA_LOCAL_PORT=25432;ENV="staging";;
515
- "production") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24443;CA_LOCAL_PORT=25433;ENV="production";;
516
- "production_rw") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24444;CA_LOCAL_PORT=25434;ENV="production";;
517
- "recette") SSH_LOCAL_PORT=2228;PG_LOCAL_PORT=24446;CA_LOCAL_PORT=25436;ENV="recette";;
455
+ "tests") SSH_LOCAL_PORT=2224;COMP_LOCAL_PORT=25550;PG_LOCAL_PORT=24440;CA_LOCAL_PORT=25430;ENV="tests";;
456
+ "testing") SSH_LOCAL_PORT=2225;COMP_LOCAL_PORT=25551;PG_LOCAL_PORT=24441;CA_LOCAL_PORT=25431;ENV="testing";;
457
+ "staging") SSH_LOCAL_PORT=2226;COMP_LOCAL_PORT=25552;PG_LOCAL_PORT=24442;CA_LOCAL_PORT=25432;ENV="staging";;
458
+ "production") SSH_LOCAL_PORT=2227;COMP_LOCAL_PORT=25553;PG_LOCAL_PORT=24443;CA_LOCAL_PORT=25433;ENV="production";;
459
+ "production_rw") SSH_LOCAL_PORT=2227;COMP_LOCAL_PORT=25554;PG_LOCAL_PORT=24444;CA_LOCAL_PORT=25434;ENV="production";;
460
+ "recette") SSH_LOCAL_PORT=2228;COMP_LOCAL_PORT=25556; PG_LOCAL_PORT=24446;CA_LOCAL_PORT=25436;ENV="recette";;
518
461
  *) echo "Unsupported ENV : $MODE"; return 1 ;;
519
462
  esac
520
463
 
@@ -535,23 +478,27 @@ database_k8s() {
535
478
  HostName 127.0.0.1
536
479
  Port 2225
537
480
  LocalForward 24441 toutatis-testing-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
481
+ LocalForward 25551 toutatis-testing-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
538
482
  LocalForward 25431 toutatis-testing-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
539
483
  LocalForward 25531 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
540
484
  Host bastion_staging
541
485
  HostName 127.0.0.1
542
486
  Port 2226
543
487
  LocalForward 24442 toutatis-staging-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
488
+ LocalForward 25552 toutatis-staging-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
544
489
  LocalForward 25432 toutatis-staging-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
545
490
  Host bastion_recette
546
491
  HostName 127.0.0.1
547
492
  Port 2228
548
493
  LocalForward 24446 toutatis-recette-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
494
+ LocalForward 25556 toutatis-recette-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
549
495
  LocalForward 25436 toutatis-recette-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
550
496
  LocalForward 25536 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
551
497
  Host bastion_production
552
498
  HostName 127.0.0.1
553
499
  Port 2227
554
500
  LocalForward 24443 toutatis-production-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
501
+ LocalForward 25553 toutatis-production-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
555
502
  LocalForward 25433 toutatis-production-mysql-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
556
503
  EOF
557
504
  if [ "$MODE" = "production_rw" ] ; then
@@ -565,6 +512,7 @@ EOF
565
512
  -F "$bastion_config" \
566
513
  "bastion_$ENV"
567
514
 
515
+ echo "sample command (composite) : 'psql postgres://postgres@127.0.0.1:$COMP_LOCAL_PORT'"
568
516
  echo "sample command : 'psql postgres://postgres@127.0.0.1:$PG_LOCAL_PORT'"
569
517
  echo "sample command : 'mysql -u colisweb -h 127.0.0.1 -P $CA_LOCAL_PORT -p db_name'"
570
518
 
@@ -578,7 +526,7 @@ psql_on_k8() {
578
526
  shift 3
579
527
 
580
528
  kubectl -n $NAMESPACE run ${SERVICE}-postgres-init \
581
- --image jbergknoff/postgresql-client \
529
+ --image postgres:17.5-alpine3.21 \
582
530
  --restart=Never \
583
531
  --attach --rm \
584
532
  -- \
@@ -612,8 +560,6 @@ kube_init_database_once() {
612
560
  echo " Initializing Database '$db_database' for namespace $namespace"
613
561
  echo "======================="
614
562
 
615
- set -x
616
-
617
563
  echo "Checking if Database '$db_database' exists"
618
564
  set +e
619
565
  psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -lqtA | cut -d\| -f1 | grep "^$db_database$"
@@ -822,7 +768,7 @@ kube_init_datadog_in_postgres_database() {
822
768
  SECURITY DEFINER;"
823
769
 
824
770
  kubectl -n $namespace run $service-postgres-init \
825
- --image jbergknoff/postgresql-client \
771
+ --image postgres:17.5-alpine3.21 \
826
772
  --restart=Never \
827
773
  --attach --rm \
828
774
  --command \
@@ -843,8 +789,6 @@ kube_init_service_database() {
843
789
 
844
790
  local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
845
791
 
846
- set -x
847
-
848
792
  echo "Checking if Database '$db_database' exists"
849
793
  set +e
850
794
  psql_on_k8 $namespace $service $db_connection -lqtA | cut -d\| -f1 | grep "^$db_database$"
@@ -912,14 +856,29 @@ EOF
912
856
  }
913
857
  #!/usr/bin/env bash
914
858
 
859
+ function kstatus() {
860
+ if [ -z "$3" ]
861
+ then
862
+ configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2"
863
+ else
864
+ configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2 | grep $3"
865
+ fi
866
+ }
867
+
868
+ #!/usr/bin/env bash
869
+
915
870
  k8_nodes_stats() {
916
- kubectl get nodes -o name |
917
- xargs kubectl describe |
918
- grep "^Name\|workType\|cpu \|memory " |
919
- sed -r 's/[ :=]+/\t/g' |
920
- sed 's/\tworkType\t//g' |
921
- sed -r 's/^Name/---\nName/g' |
922
- grep --color "Name\|web\|workers\|cpu\|memory\|---"
871
+ ENV=${1:-testing}
872
+
873
+ configure_kubectl_for "${ENV}"
874
+
875
+ kubectl get nodes -o name |
876
+ xargs kubectl describe |
877
+ grep "^Name\|workType\|cpu \|memory " |
878
+ sed -r 's/[ :=]+/\t/g' |
879
+ sed 's/\tworkType\t//g' |
880
+ sed -r 's/^Name/---\nName/g' |
881
+ grep --color "Name\|web\|workers\|cpu\|memory\|---"
923
882
  }
924
883
 
925
884
  #!/usr/bin/env bash
@@ -999,6 +958,45 @@ pick_pod() {
999
958
  fi
1000
959
  }
1001
960
 
961
+ # pods_settings $ENV
962
+ # Will output a CSV (;) of all deployments on this environment with cpu and memory request and limits
963
+ # Errors and null outputs are ignored and won't be in the output.
964
+ pods_resources() {
965
+ ENV=$1
966
+ configure_kubectl_for $ENV
967
+ DEPLOYMENTS=(
968
+ $(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
969
+ )
970
+ echo "deployment; request_cpu; request_memory; limits_cpu;limits_memory"
971
+ for D in "${DEPLOYMENTS[@]}"; do
972
+ info=$(kubectl -n $ENV get deployment -o yaml $D |
973
+ yq '.spec.template.spec.containers[].resources' |
974
+ yq '.L = .requests.cpu + "; " + .requests.memory + "; " + .limits.cpu + "; " + .limits.memory' |
975
+ yq ".L") 2&>/dev/null
976
+ if ! [ "$info" = "null" ]; then
977
+ echo "$D; $info"
978
+ fi
979
+ done
980
+ }
981
+
982
+ pods_strategies() {
983
+ ENV=$1
984
+ configure_kubectl_for $ENV
985
+ DEPLOYMENTS=(
986
+ $(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
987
+ )
988
+ echo "deployment; max_surge; max_unavailable"
989
+ for D in "${DEPLOYMENTS[@]}"; do
990
+ info=$(kubectl -n $ENV get deployment -o yaml $D |
991
+ yq '.spec.strategy' |
992
+ yq '.L = .rollingUpdate.maxSurge + "; " + .rollingUpdate.maxUnavailable' |
993
+ yq ".L") 2&>/dev/null
994
+ if ! [ "$info" = "null" ]; then
995
+ echo "$D; $info"
996
+ fi
997
+ done
998
+ }
999
+
1002
1000
  #!/usr/bin/env bash
1003
1001
 
1004
1002
  bastion_config_for_redis_ca() {
@@ -1551,6 +1549,378 @@ jwt_token() {
1551
1549
 
1552
1550
  #!/usr/bin/env bash
1553
1551
 
1552
+ SCRIPT_PATH=$SCRIPT_FULL_PATH/shell/run
1553
+ PATH="$PATH:$SCRIPT_PATH/script"
1554
+
1555
+ function get_token {
1556
+ local ENV=$1
1557
+ local LOGIN_FILE="$HOME/scriptlogin"
1558
+
1559
+ if [ ! -f "$LOGIN_FILE" ]; then
1560
+ cat > "$LOGIN_FILE" <<-'EOF'
1561
+ #!/bin/bash
1562
+ case $ENV in
1563
+ "testing")
1564
+ local BO_USERNAME=""
1565
+ local BO_PASSWORD=""
1566
+ ;;
1567
+ "recette")
1568
+ local BO_USERNAME=""
1569
+ local BO_PASSWORD=""
1570
+ ;;
1571
+ "staging")
1572
+ local BO_USERNAME=""
1573
+ local BO_PASSWORD=""
1574
+ ;;
1575
+ *)
1576
+ local BO_USERNAME=""
1577
+ local BO_PASSWORD=""
1578
+ echo "ENV ${ENV} inconu"
1579
+ return
1580
+ ;;
1581
+ esac
1582
+ EOF
1583
+ fi
1584
+
1585
+ source "${LOGIN_FILE}"
1586
+
1587
+ if [ -z "$BO_PASSWORD" ] || [ -z "$BO_USERNAME" ]
1588
+ then
1589
+ echo éditer le ficher "$LOGIN_FILE"
1590
+ return 1
1591
+ fi
1592
+
1593
+ curl -o /dev/null -D - "https://api.$ENV.colisweb.com/api/v6/authent/external/session" \
1594
+ --data-raw '{"username":"'"${BO_USERNAME}"'","password":"'"${BO_PASSWORD/\"/\\\"}"'"}' \
1595
+ --compressed 2> /dev/null | grep set-cook | sed -e 's/.*session=//g;s/;.*//g'
1596
+ }
1597
+
1598
+ function bash_array_to_json {
1599
+ function join {
1600
+ local IFS="$1"
1601
+ shift
1602
+ echo "$*"
1603
+ }
1604
+
1605
+ echo '["'"$(join , $*| sed -e 's/,/","/g' )"'"]' | jq
1606
+ }
1607
+
1608
+ function get_random_street {
1609
+ local CODE_POSTAL=${1:-59000}
1610
+ if [[ ! "$CODE_POSTAL" =~ ^[0-9]{5}$ ]]; then
1611
+ echo "La CODE_POSTAL doit avoir une taille de 5 chiffre"
1612
+ exit 1
1613
+ fi
1614
+
1615
+ FILENAME="rue-$CODE_POSTAL.lst"
1616
+ if [ ! -f "$FILENAME" ]; then
1617
+ curl --output tmp1.gz https://adresse.data.gouv.fr/data/ban/adresses/latest/csv/adresses-"${CODE_POSTAL:0:2}".csv.gz
1618
+ gzip -d tmp1.gz
1619
+ cut -d\; -f3,5,6,8 tmp1 | sed "/;$CODE_POSTAL;/!d" > "$FILENAME"
1620
+ rm tmp1
1621
+ fi
1622
+
1623
+ sort -R "$FILENAME" | head -n 1
1624
+ }
1625
+
1626
+ function rand_slot {
1627
+
1628
+ local SCENARIO=$2
1629
+ if [ -f "$SCENARIO" ]; then
1630
+ source "$SCENARIO"
1631
+ fi
1632
+ local ORDER_DATE="$1"
1633
+
1634
+ DEFAULT=(
1635
+ "06:00+01:00[Europe/Paris]-08:00+01:00[Europe/Paris]"
1636
+ "08:00+01:00[Europe/Paris]-10:00+01:00[Europe/Paris]"
1637
+ "10:00+01:00[Europe/Paris]-12:00+01:00[Europe/Paris]"
1638
+ "16:00+01:00[Europe/Paris]-18:00+01:00[Europe/Paris]"
1639
+ "18:00+01:00[Europe/Paris]-20:00+01:00[Europe/Paris]"
1640
+ )
1641
+ USAGE=${DELIVERY_SLOTS:-${DEFAULT[@]}}
1642
+
1643
+ IFS="-" read -r start_time end_time < <(echo "${USAGE[@]}" | tr " " "\n" | sort -u -R | head -n 1 )
1644
+
1645
+ echo '{"start":"'"${ORDER_DATE}T${start_time}"'", "end":"'"${ORDER_DATE}T${end_time}"'" }'
1646
+ }
1647
+
1648
+ function call_create_sfh_order {
1649
+ local ENV=$1
1650
+ local TOKEN=$2
1651
+ source "$3"
1652
+ local POS=$4
1653
+ local BARCODES="$5"
1654
+ local CODE_POSTAL="$6"
1655
+ local PACKAGES=$(echo "$BARCODES" | jq '[{
1656
+ "barcode": .[],
1657
+ "length": 10.5,
1658
+ "height": 9.0,
1659
+ "width": 9.0,
1660
+ "weight": 10.11,
1661
+ "description": "test parel",
1662
+ "options": [],
1663
+ "productTypology": "Classical",
1664
+ "packageType": "Parcel"
1665
+ }
1666
+ ]')
1667
+
1668
+ DELIVERY_OPTIONS_P='['
1669
+ for option in "${DELIVERY_OPTIONS[@]}"; do
1670
+ if [ "$DELIVERY_OPTIONS_P" != '[' ]; then
1671
+ DELIVERY_OPTIONS_P+=", "
1672
+ fi
1673
+ DELIVERY_OPTIONS_P+="\"$option\""
1674
+ done
1675
+ DELIVERY_OPTIONS_P+=']'
1676
+
1677
+ IFS=";" read -r nu rue code_postal ville < <(get_random_street "$CODE_POSTAL")
1678
+ JSON='{
1679
+ "primaryOrderReference": "'"${PRIMARY_REF}${POS}"'",
1680
+ "secondaryOrderReference": null,
1681
+ "stages": [
1682
+ {
1683
+ "type": "Pickup",
1684
+ "packageBarcodes": '"$BARCODES"',
1685
+ "location": {
1686
+ "type": "Warehouse",
1687
+ "warehouseCode": "'"$PICKUP_WAREHOUSE_CODE"'"
1688
+ }
1689
+ },
1690
+ {
1691
+ "type": "Dropoff",
1692
+ "packageBarcodes": '"$BARCODES"',
1693
+ "location": {
1694
+ "type": "Address",
1695
+ "address": {
1696
+ "address1": "'"$nu $rue"'",
1697
+ "postalCode": "'"$code_postal"'",
1698
+ "city": "'"$ville"'",
1699
+ "country": "France",
1700
+ "floor": 0,
1701
+ "lift": "with_lift"
1702
+ },
1703
+ "contact": {
1704
+ "name": "John Doe",
1705
+ "primaryPhone": "+33606060606"
1706
+ }
1707
+ }
1708
+ }
1709
+ ],
1710
+ "packages": '"$PACKAGES"',
1711
+ "owner": {
1712
+ "accountIdentifier": "'$ACCOUNT_IDENTIFIER'"
1713
+ },
1714
+ "deliveryOptions": '"$DELIVERY_OPTIONS_P"',
1715
+ "ecommerceValidationDate": "'"${ORDER_DATE}"'"
1716
+ }'
1717
+
1718
+ RESULT=$(curl -s -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON")
1719
+ ORDER_ID=$(jq ".orderId" -r <<< "$RESULT")
1720
+
1721
+ echo "nouvelle commande : https://bo.$ENV.colisweb.com/admin/orders/$ORDER_ID" >&2
1722
+
1723
+ echo "$RESULT"
1724
+ }
1725
+
1726
+
1727
+ function call_scan {
1728
+ local ENV=$1
1729
+ local TOKEN=$2
1730
+ source "$3"
1731
+ local BARCODES="$4"
1732
+ local SCAN=$(echo "$BARCODES" | jq '[{"barcode" :.[], "context": "shuttle"}]')
1733
+
1734
+ JSON='{"scans":'$SCAN'}'
1735
+
1736
+ curl -X POST https://api.$ENV.colisweb.com/api/v6/parcel/external/units/scans/bulk -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON"
1737
+ }
1738
+
1739
+
1740
+ function call_register_delivery {
1741
+ local ENV=$1
1742
+ local TOKEN=$2
1743
+
1744
+ SCENARIO=$3
1745
+ source "$SCENARIO"
1746
+
1747
+ local ORDER_ID=$4
1748
+ local BARCODES="$5"
1749
+
1750
+ curl -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders/"$ORDER_ID"/deliveries \
1751
+ --cookie session="$TOKEN" --data-raw '{
1752
+ "slot": '"$(rand_slot "${DELIVERY_DATE}" "$SCENARIO")"',
1753
+ "storeIdOwner":"'"$STORE_ID_OWNER"'",
1754
+ "pickup":{"type":"hub","code":"'"$HUB"'"},
1755
+ "barcodes":'"$BARCODES"',
1756
+ "price":{"origin":"auto","amount":25.9},
1757
+ "allowCustomerSlotUpdate":false
1758
+ }'
1759
+ }
1760
+
1761
+
1762
+
1763
+ function _create_scenario_file_if_not_exist () {
1764
+ if [ ! -f "$SCENARIO" ]
1765
+ then
1766
+ cat > "$SCENARIO" <<-'EOF'
1767
+ DELIVERY_DATE=$(date -v+7d '+%Y-%m-%d') # ici on demande de crée a date d'aujourd'hui + 7 jours
1768
+ # peu remplacer -v+7d par -v+1d pour une livrasion programmer demain
1769
+ # utiliser que par create_many_sfh_order_and_delivery
1770
+ ENV="testing" # sur quelle enviromement lancer le scripts
1771
+ # ENV="staging"
1772
+ # ENV="recette"
1773
+
1774
+ ACCOUNT_IDENTIFIER="102" # pour la creation de order force utilies owner.accountIdentifier
1775
+ # sur l'appel api/v6/order/external/warehouse/orders
1776
+ # (creation de la commade)
1777
+ HUB="duck" # pour sur l'appel api/v6/order/external/warehouse/orders
1778
+ # parametre pickup.code (type est a "hub")
1779
+ STORE_ID_OWNER="184" # parametre pickup.storeIdOwner
1780
+ PICKUP_WAREHOUSE_CODE="422" # sur l'appel api/v6/order/external/warehouse/orders
1781
+ # parametre stages.[0].location.warehouseCode
1782
+
1783
+ BARCODES_COUNT=5 # nombres packages
1784
+ PREF="aaaa" # doit faire 4 caractères utilies pour générer les barecode
1785
+ # des packtages
1786
+
1787
+ CODE_POSTAL="59000" # code postale sur lequelle une addresse aléatoire seras choisi
1788
+ # (creation de la commade)
1789
+ DELIVERY_SLOTS=( # liste des horraires de créneau de livraison choisi aléatoirement
1790
+ "06:00+01:00[Europe/Paris]-08:00+01:00[Europe/Paris]"
1791
+ "08:00+01:00[Europe/Paris]-10:00+01:00[Europe/Paris]"
1792
+ "10:00+01:00[Europe/Paris]-12:00+01:00[Europe/Paris]"
1793
+ "16:00+01:00[Europe/Paris]-18:00+01:00[Europe/Paris]"
1794
+ "18:00+01:00[Europe/Paris]-20:00+01:00[Europe/Paris]"
1795
+ )
1796
+
1797
+ # DELIVERY_OPTIONS=("skill1" "skill2") # liste des skill - a décommanter
1798
+
1799
+ # normalement pas bessoin modifer
1800
+ ORDER_DATE=$(date '+%Y-%m-%d') # date du jour
1801
+ RAND=$(date +%y%m%d%H%M%S) # valueur peudo aleadoire (ici basé par date) doit faire 17 caractères
1802
+ BARCODE_PART=0000$RAND # utiliser pour générer les bare code les barecode sont :
1803
+ # {BARECODE_PART}{00000} a {BARECODE_PART}{BARECODES_COUNT}
1804
+ PRIMARY_REF=$PREF$RAND # primaryOrderReference de la commande
1805
+ EOF
1806
+ echo "éditer le fichier $SCENARIO"
1807
+ return 1
1808
+ fi
1809
+ }
1810
+
1811
+ #!/usr/bin/env bash
1812
+
1813
+ cleanup_merged_mr() {
1814
+ COLISWEB_IDL_GROUP=3054234
1815
+
1816
+ BEFORE=${1:- $(date -I -v -2y)}
1817
+
1818
+ for (( COUNTER=1; COUNTER<=12; COUNTER+=2 )); do
1819
+ cleanup_grouped_merged_mr $COLISWEB_IDL_GROUP $BEFORE $COUNTER &
1820
+ done
1821
+
1822
+ }
1823
+
1824
+ cleanup_grouped_merged_mr() {
1825
+ GROUP=$1
1826
+ BEFORE=$2
1827
+ PAGE_COUNT=$3
1828
+ MERGED_MRS=($(curl --header "PRIVATE-TOKEN: $GITLAB_PAT" \
1829
+ --url "https://gitlab.com/api/v4/groups/$GROUP/merge_requests?updated_before=${BEFORE}T08:00:00Z&status=merged&per_page=50&page=$PAGE_COUNT" |
1830
+ jq -r '.[] | {iid: .iid|tostring, pid:.project_id|tostring} | (.pid + "/merge_requests/" + .iid)'))
1831
+
1832
+ for MR in ${MERGED_MRS[@]}; do
1833
+ echo "https://gitlab.com/api/v4/projects/$MR"
1834
+ curl --request DELETE \
1835
+ --header "PRIVATE-TOKEN: $GITLAB_PAT" \
1836
+ --url "https://gitlab.com/api/v4/projects/$MR"
1837
+ done
1838
+ }
1839
+
1840
+ # you will need jq to use these commands. You can install it using "brew install jq"
1841
+ # cleanup_all_ecr_images 12
1842
+ # will delete images in all repositories older than 12 weeks
1843
+ # cleanup_single_ecr_repository colisweb-api 8
1844
+ # will delete images older than 8 weeks in the colisweb-api repository
1845
+ cleanup_all_ecr_images() {
1846
+ WEEKS=$1
1847
+
1848
+ # OR to work on bash and zsh
1849
+ CLEAN_BEFORE=$(date -v-${WEEKS}w +%F || date --date="-${WEEKS} weeks" +'%Y-%m-%d')
1850
+ REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[] |.[].repositoryName')
1851
+
1852
+ while read -r REPOSITORY; do
1853
+ echo "processing ECR repository $REPOSITORY before $CLEAN_BEFORE"
1854
+ cleanup_single_ecr_repository "$CLEAN_BEFORE" "$REPOSITORY"
1855
+ done <<< "$REPOSITORIES"
1856
+ }
1857
+
1858
+ cleanup_single_ecr_repository() {
1859
+ BEFORE=$1
1860
+ REPOSITORY=$2
1861
+
1862
+ echo "gettings tags for repository $REPOSITORY before $BEFORE"
1863
+
1864
+ ALL_TAGS=$(aws ecr describe-images --repository-name "$REPOSITORY" --output json |
1865
+ jq '.imageDetails' |
1866
+ jq '. |= sort_by(.imagePushedAt)' |
1867
+ jq --arg date $BEFORE '.[] | select(.imagePushedAt[0:10] < $date)' |
1868
+ jq 'select((.imageTags != null) or (.imageTags == []))' |
1869
+ jq 'select(.imageTags | any(endswith("latest")) | not)' |
1870
+ jq -r '.imageTags | join(" ")' |
1871
+ sort -u)
1872
+
1873
+ if [ -z "${ALL_TAGS}" ]; then
1874
+ echo "no tag to delete for repository $REPOSITORY"
1875
+ else
1876
+ echo "deleting $(echo $ALL_TAGS | wc -l) tags for $REPOSITORY"
1877
+
1878
+ while read image_tags; do
1879
+ SINGLE_TAG=$(echo $image_tags | grep -o '^\S*')
1880
+
1881
+ DIGESTS_TO_DELETE=$(docker buildx imagetools inspect \
1882
+ 949316342391.dkr.ecr.eu-west-1.amazonaws.com/$REPOSITORY:$SINGLE_TAG --raw |
1883
+ jq -r '[.manifests | .[].digest] | join(" imageDigest=") | "imageDigest=" + .' ||
1884
+ echo "")
1885
+
1886
+ TAGS_TO_DELETE=$(echo "$image_tags" | sed 's/[^ ]* */imageTag=&/g')
1887
+
1888
+ export AWS_PAGER=""
1889
+
1890
+ aws ecr batch-delete-image --repository-name "$REPOSITORY" --image-ids $(echo $TAGS_TO_DELETE) > /dev/null 2>&1
1891
+ test -z $DIGESTS_TO_DELETE ||
1892
+ aws ecr batch-delete-image --repository-name "$REPOSITORY" --image-ids $(echo $DIGESTS_TO_DELETE)> /dev/null 2>&1
1893
+ done <<< $ALL_TAGS
1894
+
1895
+ echo "deleted $(echo $ALL_TAGS | wc -l) tags"
1896
+ fi
1897
+
1898
+ }
1899
+
1900
+
1901
+ cleanup_ci_cache() {
1902
+ DATE=${1:-$(date -v-1m +%F)}
1903
+ CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
1904
+
1905
+ echo "deleting from cache $CACHE_BUCKET all older than $DATE"
1906
+
1907
+ aws_ecr_login
1908
+
1909
+ while read -r line; do
1910
+ datum=$(echo $line | cut -c1-10)
1911
+ if [[ "$datum" < "$DATE" ]] ; then
1912
+ # Shell Parameter Expansion: ${parameter##word}
1913
+ # Allow to return the result from "word" to the end of "parameters"
1914
+ # Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
1915
+ TO_DELETE="$CACHE_BUCKET${line##* project/}"
1916
+ echo $TO_DELETE
1917
+ aws s3 rm $TO_DELETE
1918
+ fi
1919
+ done < <(aws s3 ls $CACHE_BUCKET --recursive)
1920
+ }
1921
+
1922
+ #!/usr/bin/env bash
1923
+
1554
1924
  ftp_ikea_k8s() {
1555
1925
  SSH_LOCAL_PORT=2230
1556
1926
  FTP_LOCAL_PORT=25500
@@ -1928,6 +2298,7 @@ datadog_schedule_downtime_single() {
1928
2298
  docker_build_push() {
1929
2299
  read -r -a BUILD_ARGS <<< "$1"
1930
2300
  DOCKER_BUILD_ARGS="--build-arg VCS_REF=$(git rev-parse --short HEAD)"
2301
+
1931
2302
  for ARG_NAME in "${BUILD_ARGS[@]}"
1932
2303
  do
1933
2304
  DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS --build-arg $ARG_NAME=${!ARG_NAME}"
@@ -1936,13 +2307,17 @@ docker_build_push() {
1936
2307
  if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
1937
2308
  docker pull $DOCKER_IMAGE || true
1938
2309
  SOURCE_URL=${CI_PROJECT_URL:8} # without "https://" protocol, like gitlab.com/colisweb-idl/colisweb/back/packing
1939
- docker build $DOCKER_BUILD_ARGS \
2310
+
2311
+ docker buildx create --use
2312
+
2313
+ docker buildx build $DOCKER_BUILD_ARGS \
1940
2314
  -t $DOCKER_IMAGE_SHA \
2315
+ --platform "linux/arm64,linux/amd64" \
1941
2316
  --label org.opencontainers.image.revision=$(git rev-parse HEAD) \
1942
2317
  --label org.opencontainers.image.source=$SOURCE_URL \
1943
- --cache-from $DOCKER_IMAGE \
2318
+ --provenance=false \
2319
+ --push \
1944
2320
  $DOCKER_STAGE_PATH
1945
- docker push $DOCKER_IMAGE_SHA
1946
2321
  fi
1947
2322
  }
1948
2323
 
@@ -2078,7 +2453,7 @@ init_migrate_db() {
2078
2453
 
2079
2454
  unset KUBECONFIG
2080
2455
 
2081
- configure_kubectl_for_ci ${ENVIRONMENT}
2456
+ configure_kubectl_for ${ENVIRONMENT}
2082
2457
 
2083
2458
  kube_init_service_database \
2084
2459
  --namespace ${ENVIRONMENT} \
@@ -2124,7 +2499,7 @@ flyway_migrate() {
2124
2499
  CONFIGMAP_NAME="$service-flyway-migration-sql"
2125
2500
  POD_NAME="$service-flyway-migration"
2126
2501
 
2127
- configure_kubectl_for_ci $environment
2502
+ configure_kubectl_for $environment
2128
2503
 
2129
2504
  kubectl -n $namespace delete configmap $CONFIGMAP_NAME --ignore-not-found
2130
2505
  kubectl -n $namespace delete pod $POD_NAME --ignore-not-found
@@ -2177,7 +2552,7 @@ flyway_migrate() {
2177
2552
 
2178
2553
  flyway_sql_folder=$(pwd)/${MIGRATION_SQL_PATH}
2179
2554
 
2180
- configure_kubectl_for_ci "${ENVIRONMENT}"
2555
+ configure_kubectl_for "${ENVIRONMENT}"
2181
2556
  POD_NAME="${APPLICATION}-flyway-repair"
2182
2557
  CONFIGMAP_NAME="${APPLICATION}-flyway-repair-sql"
2183
2558
 
@@ -2243,11 +2618,11 @@ git_reveal() {
2243
2618
  }
2244
2619
  #!/usr/bin/env bash
2245
2620
 
2246
- helm_deploy_v3() {
2621
+ helm_deploy() {
2247
2622
  APPLICATION=$1
2248
2623
  ENVIRONMENT=$2
2249
2624
  VERSION=$3
2250
- deploy_chart_v3 \
2625
+ deploy_chart \
2251
2626
  --path_configs deploy \
2252
2627
  --path_chart deploy/$APPLICATION \
2253
2628
  --application $APPLICATION \
@@ -2256,7 +2631,7 @@ helm_deploy_v3() {
2256
2631
  --helm_extra_args --set global.version=$VERSION
2257
2632
  }
2258
2633
 
2259
- deploy_chart_v3() {
2634
+ deploy_chart() {
2260
2635
  set -e
2261
2636
  set -x
2262
2637
 
@@ -2303,15 +2678,15 @@ deploy_chart_v3() {
2303
2678
  unset KUBECONFIG
2304
2679
 
2305
2680
  # Configure Kubectl
2306
- configure_kubectl_for_ci ${environment}
2681
+ configure_kubectl_for ${environment}
2307
2682
 
2308
- # Configure helm3
2309
- helm3 version --namespace ${namespace} || true
2310
- # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
2311
- helm3 repo add colisweb s3://colisweb-helm-charts/colisweb
2312
- helm3 repo add stable https://charts.helm.sh/stable
2313
- helm3 repo update
2314
- helm3 dependency update ${root_path}/${path_chart}
2683
+ # Configure helm
2684
+ helm version --namespace ${namespace} || true
2685
+ # helm stable repo have changed and must be updated manually, in versions < v2.17.0
2686
+ helm repo add colisweb s3://colisweb-helm-charts/colisweb
2687
+ helm repo add stable https://charts.helm.sh/stable
2688
+ helm repo update
2689
+ helm dependency update ${root_path}/${path_chart}
2315
2690
 
2316
2691
  # Gather values/*.yaml files
2317
2692
  values_path="${root_path}/${path_chart}/values"
@@ -2319,7 +2694,7 @@ deploy_chart_v3() {
2319
2694
  [ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
2320
2695
 
2321
2696
  # Deploy
2322
- helm3 upgrade --install \
2697
+ helm upgrade --install \
2323
2698
  --namespace ${namespace} \
2324
2699
  ${values_files} \
2325
2700
  -f ${root_path}/${path_configs}/common.yaml \
@@ -2341,7 +2716,7 @@ deploy_chart_v3() {
2341
2716
  set +x
2342
2717
  }
2343
2718
 
2344
- verify_deployments_v3() {
2719
+ verify_deployments() {
2345
2720
  set -e
2346
2721
 
2347
2722
  # usage :
@@ -2361,7 +2736,7 @@ verify_deployments_v3() {
2361
2736
 
2362
2737
  # Get all Deployments names from the deployed chart
2363
2738
  DEPLOYMENTS=(
2364
- $(helm3 get manifest --namespace $NAMESPACE $RELEASE | yq -rs '.[] | select(.kind=="Deployment") | .metadata.name')
2739
+ $(helm get manifest --namespace $NAMESPACE $RELEASE | yq --no-doc -r 'select(.kind=="Deployment").metadata.name')
2365
2740
  )
2366
2741
 
2367
2742
  echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
@@ -2404,40 +2779,6 @@ check_config_file() {
2404
2779
  fi
2405
2780
  }
2406
2781
 
2407
- #!/usr/bin/env bash
2408
-
2409
- configure_kubectl_for_ci() {
2410
- if [ -z ${GITLAB_PAT} ]; then
2411
- echo "Cannot configure kubectl: no GITLAB_PAT configured"
2412
- exit 1
2413
- fi
2414
-
2415
- infra_env="$1"
2416
- valid_envs="[testing][staging][production][performance][tests][recette]"
2417
- echo "$valid_envs" | grep -q "\[$infra_env\]"
2418
-
2419
- if [ $? -ne 0 ]; then
2420
- echo "Cannot configure kubectl for invalid env : $infra_env"
2421
- echo "choose one of $valid_envs"
2422
- exit 1
2423
- fi
2424
-
2425
- mkdir -p ~/.kube
2426
- curl -fsS \
2427
- --header "PRIVATE-TOKEN: $GITLAB_PAT" \
2428
- "https://gitlab.com/api/v4/projects/8141053/jobs/artifacts/$infra_env/raw/$infra_env.kubeconfig?job=4_kubernetes_config_output" \
2429
- > ~/.kube/$infra_env.kubeconfig
2430
-
2431
- curl_return_code=$?
2432
- if [ ${curl_return_code} -ne 0 ]; then
2433
- echo "Cannot configure kubectl for $infra_env, get configuration failed with code $curl_return_code"
2434
- exit ${curl_return_code}
2435
- fi
2436
-
2437
- rm -f ~/.kube/config
2438
- ln -s ~/.kube/$infra_env.kubeconfig ~/.kube/config
2439
- echo "Configured kubectl for env : $infra_env"
2440
- }
2441
2782
  notify_new_deployment() {
2442
2783
  jq --version || (apt update && apt install -y jq)
2443
2784