@colisweb/rescript-toolkit 5.41.1 → 5.42.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -92,63 +92,6 @@ aws_ecr_token() {
92
92
  aws ecr get-authorization-token --region=eu-west-1 --output text --query 'authorizationData[].authorizationToken'
93
93
  }
94
94
 
95
- # you will need jq to use these commands. You can install it using "brew install jq"
96
- # delete_images colisweb_api 8
97
- # will delete images older than 8 weeks
98
- delete_images() {
99
-
100
- REPO=$1
101
- WEEKS=${2:-16}
102
-
103
- WEEKS_AGO=$(date -v-${WEEKS}w +%F)
104
-
105
- #Get all ecr images
106
- IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
107
-
108
- #Filter unnecessary values and map `imagePushedAt` to EPOCH
109
- NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
110
-
111
- #Filter on EPOCH
112
- OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
113
- while IFS= read -r IMAGE; do
114
- if [ "$IMAGE" != "" ]; then
115
- echo "Deleting $IMAGE from $REPO"
116
- AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
117
- fi
118
- done <<< "$OLD_IMAGES"
119
- }
120
-
121
- # delete_images_all_repos 12
122
- # will delete images in all repositories older than 12 weeks
123
- delete_images_all_repos() {
124
- REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
125
-
126
- while IFS= read -r REPO; do
127
- echo "processing ECR repository $REPO"
128
- delete_images $REPO $1
129
- done <<< "$REPOSITORIES"
130
- }
131
-
132
- delete_old_cache() {
133
- DATE=${1:-$(date -v-1m +%F)}
134
- CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
135
-
136
- echo "deleting from cache $CACHE_BUCKET all older than $DATE"
137
-
138
- aws_ecr_login
139
-
140
- while read -r line; do
141
- datum=$(echo $line | cut -c1-10)
142
- if [[ "$datum" < "$DATE" ]] ; then
143
- # Shell Parameter Expansion: ${parameter##word}
144
- # Allow to return the result from "word" to the end of "parameters"
145
- # Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
146
- TO_DELETE="$CACHE_BUCKET${line##* project/}"
147
- echo $TO_DELETE
148
- aws s3 rm $TO_DELETE
149
- fi
150
- done < <(aws s3 ls $CACHE_BUCKET --recursive)
151
- }
152
95
 
153
96
  #!/usr/bin/env bash
154
97
 
@@ -250,11 +193,11 @@ entries:
250
193
  cronjob:
251
194
  EOT
252
195
 
253
- # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
254
- helm3 repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
255
- helm3 repo add stable https://charts.helm.sh/stable --force-update
256
- helm3 repo update
257
- helm3 dependency update ${ROOT_PATH}/${CHART_PATH}
196
+ # helm stable repo have changed and must be updated manually, in versions < v2.17.0
197
+ helm repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
198
+ helm repo add stable https://charts.helm.sh/stable --force-update
199
+ helm repo update
200
+ helm dependency update ${ROOT_PATH}/${CHART_PATH}
258
201
 
259
202
  # Gather values/*.yaml files
260
203
  VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
@@ -262,7 +205,7 @@ EOT
262
205
  [ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
263
206
 
264
207
  # Deploy
265
- helm3 upgrade --install \
208
+ helm upgrade --install \
266
209
  --namespace ${ENVIRONMENT} \
267
210
  ${VALUES_FILES} \
268
211
  -f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
@@ -272,7 +215,7 @@ EOT
272
215
  ${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
273
216
 
274
217
 
275
- verify_deployments_v3 -t 10m $ENVIRONMENT $CHART_NAME
218
+ verify_deployments -t 10m $ENVIRONMENT $CHART_NAME
276
219
 
277
220
  }
278
221
 
@@ -509,12 +452,12 @@ configure_kubectl_for() {
509
452
  database_k8s() {
510
453
  MODE=$1
511
454
  case $MODE in
512
- "tests") SSH_LOCAL_PORT=2224;PG_LOCAL_PORT=24440;CA_LOCAL_PORT=25430;ENV="tests";;
513
- "testing") SSH_LOCAL_PORT=2225;PG_LOCAL_PORT=24441;CA_LOCAL_PORT=25431;ENV="testing";;
514
- "staging") SSH_LOCAL_PORT=2226;PG_LOCAL_PORT=24442;CA_LOCAL_PORT=25432;ENV="staging";;
515
- "production") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24443;CA_LOCAL_PORT=25433;ENV="production";;
516
- "production_rw") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24444;CA_LOCAL_PORT=25434;ENV="production";;
517
- "recette") SSH_LOCAL_PORT=2228;PG_LOCAL_PORT=24446;CA_LOCAL_PORT=25436;ENV="recette";;
455
+ "tests") SSH_LOCAL_PORT=2224;COMP_LOCAL_PORT=25550;PG_LOCAL_PORT=24440;CA_LOCAL_PORT=25430;ENV="tests";;
456
+ "testing") SSH_LOCAL_PORT=2225;COMP_LOCAL_PORT=25551;PG_LOCAL_PORT=24441;CA_LOCAL_PORT=25431;ENV="testing";;
457
+ "staging") SSH_LOCAL_PORT=2226;COMP_LOCAL_PORT=25552;PG_LOCAL_PORT=24442;CA_LOCAL_PORT=25432;ENV="staging";;
458
+ "production") SSH_LOCAL_PORT=2227;COMP_LOCAL_PORT=25553;PG_LOCAL_PORT=24443;CA_LOCAL_PORT=25433;ENV="production";;
459
+ "production_rw") SSH_LOCAL_PORT=2227;COMP_LOCAL_PORT=25554;PG_LOCAL_PORT=24444;CA_LOCAL_PORT=25434;ENV="production";;
460
+ "recette") SSH_LOCAL_PORT=2228;COMP_LOCAL_PORT=25556; PG_LOCAL_PORT=24446;CA_LOCAL_PORT=25436;ENV="recette";;
518
461
  *) echo "Unsupported ENV : $MODE"; return 1 ;;
519
462
  esac
520
463
 
@@ -535,23 +478,28 @@ database_k8s() {
535
478
  HostName 127.0.0.1
536
479
  Port 2225
537
480
  LocalForward 24441 toutatis-testing-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
481
+ LocalForward 25551 toutatis-testing-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
538
482
  LocalForward 25431 toutatis-testing-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
539
483
  LocalForward 25531 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
484
+ LocalForward 25561 toutatis-testing-oracle-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:1521
540
485
  Host bastion_staging
541
486
  HostName 127.0.0.1
542
487
  Port 2226
543
488
  LocalForward 24442 toutatis-staging-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
489
+ LocalForward 25552 toutatis-staging-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
544
490
  LocalForward 25432 toutatis-staging-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
545
491
  Host bastion_recette
546
492
  HostName 127.0.0.1
547
493
  Port 2228
548
494
  LocalForward 24446 toutatis-recette-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
495
+ LocalForward 25556 toutatis-recette-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
549
496
  LocalForward 25436 toutatis-recette-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
550
497
  LocalForward 25536 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
551
498
  Host bastion_production
552
499
  HostName 127.0.0.1
553
500
  Port 2227
554
501
  LocalForward 24443 toutatis-production-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
502
+ LocalForward 25553 toutatis-production-composite-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
555
503
  LocalForward 25433 toutatis-production-mysql-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
556
504
  EOF
557
505
  if [ "$MODE" = "production_rw" ] ; then
@@ -565,6 +513,7 @@ EOF
565
513
  -F "$bastion_config" \
566
514
  "bastion_$ENV"
567
515
 
516
+ echo "sample command (composite) : 'psql postgres://postgres@127.0.0.1:$COMP_LOCAL_PORT'"
568
517
  echo "sample command : 'psql postgres://postgres@127.0.0.1:$PG_LOCAL_PORT'"
569
518
  echo "sample command : 'mysql -u colisweb -h 127.0.0.1 -P $CA_LOCAL_PORT -p db_name'"
570
519
 
@@ -612,8 +561,6 @@ kube_init_database_once() {
612
561
  echo " Initializing Database '$db_database' for namespace $namespace"
613
562
  echo "======================="
614
563
 
615
- set -x
616
-
617
564
  echo "Checking if Database '$db_database' exists"
618
565
  set +e
619
566
  psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -lqtA | cut -d\| -f1 | grep "^$db_database$"
@@ -843,8 +790,6 @@ kube_init_service_database() {
843
790
 
844
791
  local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
845
792
 
846
- set -x
847
-
848
793
  echo "Checking if Database '$db_database' exists"
849
794
  set +e
850
795
  psql_on_k8 $namespace $service $db_connection -lqtA | cut -d\| -f1 | grep "^$db_database$"
@@ -912,14 +857,29 @@ EOF
912
857
  }
913
858
  #!/usr/bin/env bash
914
859
 
860
+ function kstatus() {
861
+ if [ -z "$3" ]
862
+ then
863
+ configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2"
864
+ else
865
+ configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2 | grep $3"
866
+ fi
867
+ }
868
+
869
+ #!/usr/bin/env bash
870
+
915
871
  k8_nodes_stats() {
916
- kubectl get nodes -o name |
917
- xargs kubectl describe |
918
- grep "^Name\|workType\|cpu \|memory " |
919
- sed -r 's/[ :=]+/\t/g' |
920
- sed 's/\tworkType\t//g' |
921
- sed -r 's/^Name/---\nName/g' |
922
- grep --color "Name\|web\|workers\|cpu\|memory\|---"
872
+ ENV=${1:-testing}
873
+
874
+ configure_kubectl_for "${ENV}"
875
+
876
+ kubectl get nodes -o name |
877
+ xargs kubectl describe |
878
+ grep "^Name\|workType\|cpu \|memory " |
879
+ sed -r 's/[ :=]+/\t/g' |
880
+ sed 's/\tworkType\t//g' |
881
+ sed -r 's/^Name/---\nName/g' |
882
+ grep --color "Name\|web\|workers\|cpu\|memory\|---"
923
883
  }
924
884
 
925
885
  #!/usr/bin/env bash
@@ -999,6 +959,45 @@ pick_pod() {
999
959
  fi
1000
960
  }
1001
961
 
962
+ # pods_settings $ENV
963
+ # Will output a CSV (;) of all deployments on this environment with cpu and memory request and limits
964
+ # Errors and null outputs are ignored and won't be in the output.
965
+ pods_resources() {
966
+ ENV=$1
967
+ configure_kubectl_for $ENV
968
+ DEPLOYMENTS=(
969
+ $(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
970
+ )
971
+ echo "deployment; request_cpu; request_memory; limits_cpu;limits_memory"
972
+ for D in "${DEPLOYMENTS[@]}"; do
973
+ info=$(kubectl -n $ENV get deployment -o yaml $D |
974
+ yq '.spec.template.spec.containers[].resources' |
975
+ yq '.L = .requests.cpu + "; " + .requests.memory + "; " + .limits.cpu + "; " + .limits.memory' |
976
+ yq ".L") 2&>/dev/null
977
+ if ! [ "$info" = "null" ]; then
978
+ echo "$D; $info"
979
+ fi
980
+ done
981
+ }
982
+
983
+ pods_strategies() {
984
+ ENV=$1
985
+ configure_kubectl_for $ENV
986
+ DEPLOYMENTS=(
987
+ $(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
988
+ )
989
+ echo "deployment; max_surge; max_unavailable"
990
+ for D in "${DEPLOYMENTS[@]}"; do
991
+ info=$(kubectl -n $ENV get deployment -o yaml $D |
992
+ yq '.spec.strategy' |
993
+ yq '.L = .rollingUpdate.maxSurge + "; " + .rollingUpdate.maxUnavailable' |
994
+ yq ".L") 2&>/dev/null
995
+ if ! [ "$info" = "null" ]; then
996
+ echo "$D; $info"
997
+ fi
998
+ done
999
+ }
1000
+
1002
1001
  #!/usr/bin/env bash
1003
1002
 
1004
1003
  bastion_config_for_redis_ca() {
@@ -1551,6 +1550,397 @@ jwt_token() {
1551
1550
 
1552
1551
  #!/usr/bin/env bash
1553
1552
 
1553
+ SCRIPT_PATH=$SCRIPT_FULL_PATH/shell/run
1554
+ PATH="$PATH:$SCRIPT_PATH/script"
1555
+
1556
+ function get_token {
1557
+ local ENV=$1
1558
+ local LOGIN_FILE="$HOME/scriptlogin"
1559
+
1560
+ if [ ! -f "$LOGIN_FILE" ]; then
1561
+ cat > "$LOGIN_FILE" <<-'EOF'
1562
+ #!/bin/bash
1563
+ case $ENV in
1564
+ "testing")
1565
+ local BO_USERNAME=""
1566
+ local BO_PASSWORD=""
1567
+ ;;
1568
+ "recette")
1569
+ local BO_USERNAME=""
1570
+ local BO_PASSWORD=""
1571
+ ;;
1572
+ "staging")
1573
+ local BO_USERNAME=""
1574
+ local BO_PASSWORD=""
1575
+ ;;
1576
+ *)
1577
+ local BO_USERNAME=""
1578
+ local BO_PASSWORD=""
1579
+ echo "ENV ${ENV} inconu"
1580
+ return
1581
+ ;;
1582
+ esac
1583
+ EOF
1584
+ fi
1585
+
1586
+ source "${LOGIN_FILE}"
1587
+
1588
+ if [ -z "$BO_PASSWORD" ] || [ -z "$BO_USERNAME" ]
1589
+ then
1590
+ echo éditer le ficher "$LOGIN_FILE"
1591
+ return 1
1592
+ fi
1593
+
1594
+ curl -o /dev/null -D - "https://api.$ENV.colisweb.com/api/v6/authent/external/session" \
1595
+ --data-raw '{"username":"'"${BO_USERNAME}"'","password":"'"${BO_PASSWORD/\"/\\\"}"'"}' \
1596
+ --compressed 2> /dev/null | grep set-cook | sed -e 's/.*session=//g;s/;.*//g'
1597
+ }
1598
+
1599
+ function bash_array_to_json {
1600
+ function join {
1601
+ local IFS="$1"
1602
+ shift
1603
+ echo "$*"
1604
+ }
1605
+
1606
+ echo '["'"$(join , $*| sed -e 's/,/","/g' )"'"]' | jq
1607
+ }
1608
+
1609
+ function get_random_street {
1610
+ local CODE_POSTAL=${1:-59000}
1611
+ if [[ ! "$CODE_POSTAL" =~ ^[0-9]{5}$ ]]; then
1612
+ echo "La CODE_POSTAL doit avoir une taille de 5 chiffre"
1613
+ exit 1
1614
+ fi
1615
+
1616
+ FILENAME="rue-$CODE_POSTAL.lst"
1617
+ if [ ! -f "$FILENAME" ]; then
1618
+ curl --output tmp1.gz https://adresse.data.gouv.fr/data/ban/adresses/latest/csv/adresses-"${CODE_POSTAL:0:2}".csv.gz
1619
+ gzip -d tmp1.gz
1620
+ cut -d\; -f3,5,6,8 tmp1 | sed "/;$CODE_POSTAL;/!d" > "$FILENAME"
1621
+ rm tmp1
1622
+ fi
1623
+
1624
+ sort -R "$FILENAME" | head -n 1
1625
+ }
1626
+
1627
+ function rand_slot {
1628
+
1629
+ local SCENARIO=$2
1630
+ if [ -f "$SCENARIO" ]; then
1631
+ source "$SCENARIO"
1632
+ fi
1633
+ local ORDER_DATE="$1"
1634
+
1635
+ DEFAULT=(
1636
+ "06:00+01:00[Europe/Paris]-08:00+01:00[Europe/Paris]"
1637
+ "08:00+01:00[Europe/Paris]-10:00+01:00[Europe/Paris]"
1638
+ "10:00+01:00[Europe/Paris]-12:00+01:00[Europe/Paris]"
1639
+ "16:00+01:00[Europe/Paris]-18:00+01:00[Europe/Paris]"
1640
+ "18:00+01:00[Europe/Paris]-20:00+01:00[Europe/Paris]"
1641
+ )
1642
+ USAGE=${DELIVERY_SLOTS:-${DEFAULT[@]}}
1643
+
1644
+ IFS="-" read -r start_time end_time < <(echo "${USAGE[@]}" | tr " " "\n" | sort -u -R | head -n 1 )
1645
+
1646
+ echo '{"start":"'"${ORDER_DATE}T${start_time}"'", "end":"'"${ORDER_DATE}T${end_time}"'" }'
1647
+ }
1648
+
1649
+ function call_create_sfh_order {
1650
+ local ENV=$1
1651
+ local TOKEN=$2
1652
+ source "$3"
1653
+ local POS=$4
1654
+ local BARCODES="$5"
1655
+ local CODE_POSTAL="$6"
1656
+ local PACKAGES=$(echo "$BARCODES" | jq '[{
1657
+ "barcode": .[],
1658
+ "length": 10.5,
1659
+ "height": 9.0,
1660
+ "width": 9.0,
1661
+ "weight": 10.11,
1662
+ "description": "test parel",
1663
+ "options": [],
1664
+ "productTypology": "Classical",
1665
+ "packageType": "Parcel"
1666
+ }
1667
+ ]')
1668
+
1669
+ DELIVERY_OPTIONS_P='['
1670
+ for option in "${DELIVERY_OPTIONS[@]}"; do
1671
+ if [ "$DELIVERY_OPTIONS_P" != '[' ]; then
1672
+ DELIVERY_OPTIONS_P+=", "
1673
+ fi
1674
+ DELIVERY_OPTIONS_P+="\"$option\""
1675
+ done
1676
+ DELIVERY_OPTIONS_P+=']'
1677
+
1678
+ # IFS=";" read -r nu rue code_postal ville < <(get_random_street "$CODE_POSTAL")
1679
+ IFS=";" read -r nu rue code_postal ville < <(echo "6;Rue Maxime Gorki;59000;Lille")
1680
+
1681
+ if [ -n "$PICKUP_STORE_CODE" ]; then
1682
+ PICKUP_LOCATION='{
1683
+ "type": "store",
1684
+ "storeCode": "'"$PICKUP_STORE_CODE"'"
1685
+ }'
1686
+ elif [ -n "$PICKUP_WAREHOUSE_CODE" ]; then
1687
+ PICKUP_LOCATION='{
1688
+ "type": "Warehouse",
1689
+ "warehouseCode": "'"$PICKUP_WAREHOUSE_CODE"'"
1690
+ }'
1691
+ else
1692
+ echo PICKUP_WAREHOUSE_CODE ou PICKUP_STORE_CODE doit être définie dans la "$3"
1693
+ exit 1
1694
+ fi
1695
+
1696
+ JSON='{
1697
+ "primaryOrderReference": "'"${PRIMARY_REF}${POS}"'",
1698
+ "secondaryOrderReference": null,
1699
+ "stages": [
1700
+ {
1701
+ "type": "Pickup",
1702
+ "packageBarcodes": '"$BARCODES"',
1703
+ "location": '"$PICKUP_LOCATION"'
1704
+ },
1705
+ {
1706
+ "type": "Dropoff",
1707
+ "packageBarcodes": '"$BARCODES"',
1708
+ "location": {
1709
+ "type": "Address",
1710
+ "address": {
1711
+ "address1": "'"$nu $rue"'",
1712
+ "postalCode": "'"$code_postal"'",
1713
+ "city": "'"$ville"'",
1714
+ "country": "France",
1715
+ "floor": 0,
1716
+ "lift": "with_lift"
1717
+ },
1718
+ "contact": {
1719
+ "name": "John Doe",
1720
+ "primaryPhone": "+33606060606",
1721
+ "email": "david.adler@colisweb.com"
1722
+ }
1723
+ }
1724
+ }
1725
+ ],
1726
+ "packages": '"$PACKAGES"',
1727
+ "owner": {
1728
+ "accountIdentifier": "'$ACCOUNT_IDENTIFIER'"
1729
+ },
1730
+ "deliveryOptions": '"$DELIVERY_OPTIONS_P"',
1731
+ "ecommerceValidationDate": "'"${ORDER_DATE}"'"
1732
+ }'
1733
+
1734
+ RESULT=$(curl -s -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON")
1735
+ ORDER_ID=$(jq ".orderId" -r <<< "$RESULT")
1736
+
1737
+ echo "nouvelle commande : https://bo.$ENV.colisweb.com/admin/orders/$ORDER_ID" >&2
1738
+
1739
+ echo "$RESULT"
1740
+ }
1741
+
1742
+
1743
+ function call_scan {
1744
+ local ENV=$1
1745
+ local TOKEN=$2
1746
+ source "$3"
1747
+ local BARCODES="$4"
1748
+ local SCAN=$(echo "$BARCODES" | jq '[{"barcode" :.[], "context": "shuttle"}]')
1749
+
1750
+ JSON='{"scans":'$SCAN'}'
1751
+
1752
+ curl -X POST https://api.$ENV.colisweb.com/api/v6/parcel/external/units/scans/bulk -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON"
1753
+ }
1754
+
1755
+
1756
+ function call_register_delivery {
1757
+ local ENV=$1
1758
+ local TOKEN=$2
1759
+
1760
+ SCENARIO=$3
1761
+ source "$SCENARIO"
1762
+
1763
+ local ORDER_ID=$4
1764
+ local BARCODES="$5"
1765
+
1766
+ DATA='{
1767
+ "slot": '"$(rand_slot "${DELIVERY_DATE}" "$SCENARIO")"',
1768
+ "storeIdOwner":"'"$STORE_ID_OWNER"'",
1769
+ "pickup":{"type":"hub","code":"'"$HUB"'"},
1770
+ "barcodes":'"$BARCODES"',
1771
+ "price":{"origin":"auto","amount":25.9},
1772
+ "allowCustomerSlotUpdate":false,
1773
+ "withForcedSlot": false
1774
+ }'
1775
+
1776
+ curl -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders/"$ORDER_ID"/deliveries \
1777
+ --cookie session="$TOKEN" --data-raw "$DATA"
1778
+ }
1779
+
1780
+
1781
+
1782
+ function _create_scenario_file_if_not_exist () {
1783
+ if [ ! -f "$SCENARIO" ]
1784
+ then
1785
+ cat > "$SCENARIO" <<-'EOF'
1786
+ DELIVERY_DATE=$(date -v+7d '+%Y-%m-%d') # ici on demande de crée a date d'aujourd'hui + 7 jours
1787
+ # peu remplacer -v+7d par -v+1d pour une livrasion programmer demain
1788
+ # utiliser que par create_many_sfh_order_and_delivery
1789
+ ENV="testing" # sur quelle enviromement lancer le scripts
1790
+ # ENV="staging"
1791
+ # ENV="recette"
1792
+
1793
+ ACCOUNT_IDENTIFIER="102" # pour la creation de order force utilies owner.accountIdentifier
1794
+ # sur l'appel api/v6/order/external/warehouse/orders
1795
+ # (creation de la commade)
1796
+ HUB="duck" # pour sur l'appel api/v6/order/external/warehouse/orders
1797
+ # parametre pickup.code (type est a "hub")
1798
+ STORE_ID_OWNER="184" # parametre pickup.storeIdOwner
1799
+ # sur l'appel api/v6/order/external/warehouse/orders
1800
+ # PICKUP_STORE_CODE="2" # si non commenté alors départ du magasin
1801
+ PICKUP_WAREHOUSE_CODE="422" # pour un départ d'entrepôt
1802
+
1803
+ BARCODES_COUNT=5 # nombres packages
1804
+ PREF="aaaa" # doit faire 4 caractères utilies pour générer les barecode
1805
+ # des packtages
1806
+
1807
+ CODE_POSTAL="59000" # code postale sur lequelle une addresse aléatoire seras choisi
1808
+ # (creation de la commade)
1809
+ DELIVERY_SLOTS=( # liste des horraires de créneau de livraison choisi aléatoirement
1810
+ "06:00+01:00[Europe/Paris]-08:00+01:00[Europe/Paris]"
1811
+ "08:00+01:00[Europe/Paris]-10:00+01:00[Europe/Paris]"
1812
+ "10:00+01:00[Europe/Paris]-12:00+01:00[Europe/Paris]"
1813
+ "16:00+01:00[Europe/Paris]-18:00+01:00[Europe/Paris]"
1814
+ "18:00+01:00[Europe/Paris]-20:00+01:00[Europe/Paris]"
1815
+ )
1816
+
1817
+ # DELIVERY_OPTIONS=("skill1" "skill2") # liste des nom skill - a décommanter
1818
+
1819
+ # normalement pas bessoin modifer
1820
+ ORDER_DATE=$(date '+%Y-%m-%d') # date du jour
1821
+ RAND=$(date +%y%m%d%H%M%S) # valueur peudo aleadoire (ici basé par date) doit faire 17 caractères
1822
+ BARCODE_PART=0000$RAND # utiliser pour générer les bare code les barecode sont :
1823
+ # {BARECODE_PART}{00000} a {BARECODE_PART}{BARECODES_COUNT}
1824
+ PRIMARY_REF=$PREF$RAND # primaryOrderReference de la commande
1825
+ EOF
1826
+ echo "éditer le fichier $SCENARIO"
1827
+ return 1
1828
+ fi
1829
+ }
1830
+
1831
+ #!/usr/bin/env bash
1832
+
1833
+ cleanup_merged_mr() {
1834
+ COLISWEB_IDL_GROUP=3054234
1835
+
1836
+ BEFORE=${1:- $(date -I -v -2y)}
1837
+
1838
+ for (( COUNTER=1; COUNTER<=12; COUNTER+=2 )); do
1839
+ cleanup_grouped_merged_mr $COLISWEB_IDL_GROUP $BEFORE $COUNTER &
1840
+ done
1841
+
1842
+ }
1843
+
1844
+ cleanup_grouped_merged_mr() {
1845
+ GROUP=$1
1846
+ BEFORE=$2
1847
+ PAGE_COUNT=$3
1848
+ MERGED_MRS=($(curl --header "PRIVATE-TOKEN: $GITLAB_PAT" \
1849
+ --url "https://gitlab.com/api/v4/groups/$GROUP/merge_requests?updated_before=${BEFORE}T08:00:00Z&status=merged&per_page=50&page=$PAGE_COUNT" |
1850
+ jq -r '.[] | {iid: .iid|tostring, pid:.project_id|tostring} | (.pid + "/merge_requests/" + .iid)'))
1851
+
1852
+ for MR in ${MERGED_MRS[@]}; do
1853
+ echo "https://gitlab.com/api/v4/projects/$MR"
1854
+ curl --request DELETE \
1855
+ --header "PRIVATE-TOKEN: $GITLAB_PAT" \
1856
+ --url "https://gitlab.com/api/v4/projects/$MR"
1857
+ done
1858
+ }
1859
+
1860
+ # you will need jq to use these commands. You can install it using "brew install jq"
1861
+ # cleanup_all_ecr_images 12
1862
+ # will delete images in all repositories older than 12 weeks
1863
+ # cleanup_single_ecr_repository colisweb-api 8
1864
+ # will delete images older than 8 weeks in the colisweb-api repository
1865
+ cleanup_all_ecr_images() {
1866
+ WEEKS=$1
1867
+
1868
+ # OR to work on bash and zsh
1869
+ CLEAN_BEFORE=$(date -v-${WEEKS}w +%F || date --date="-${WEEKS} weeks" +'%Y-%m-%d')
1870
+ REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[] |.[].repositoryName')
1871
+
1872
+ while read -r REPOSITORY; do
1873
+ echo "processing ECR repository $REPOSITORY before $CLEAN_BEFORE"
1874
+ cleanup_single_ecr_repository "$CLEAN_BEFORE" "$REPOSITORY"
1875
+ done <<< "$REPOSITORIES"
1876
+ }
1877
+
1878
+ cleanup_single_ecr_repository() {
1879
+ BEFORE=$1
1880
+ REPOSITORY=$2
1881
+
1882
+ echo "gettings tags for repository $REPOSITORY before $BEFORE"
1883
+
1884
+ ALL_TAGS=$(aws ecr describe-images --repository-name "$REPOSITORY" --output json |
1885
+ jq '.imageDetails' |
1886
+ jq '. |= sort_by(.imagePushedAt)' |
1887
+ jq --arg date $BEFORE '.[] | select(.imagePushedAt[0:10] < $date)' |
1888
+ jq 'select((.imageTags != null) or (.imageTags == []))' |
1889
+ jq 'select(.imageTags | any(endswith("latest")) | not)' |
1890
+ jq -r '.imageTags | join(" ")' |
1891
+ sort -u)
1892
+
1893
+ if [ -z "${ALL_TAGS}" ]; then
1894
+ echo "no tag to delete for repository $REPOSITORY"
1895
+ else
1896
+ echo "deleting $(echo $ALL_TAGS | wc -l) tags for $REPOSITORY"
1897
+
1898
+ while read image_tags; do
1899
+ SINGLE_TAG=$(echo $image_tags | grep -o '^\S*')
1900
+
1901
+ DIGESTS_TO_DELETE=$(docker buildx imagetools inspect \
1902
+ 949316342391.dkr.ecr.eu-west-1.amazonaws.com/$REPOSITORY:$SINGLE_TAG --raw |
1903
+ jq -r '[.manifests | .[].digest] | join(" imageDigest=") | "imageDigest=" + .' ||
1904
+ echo "")
1905
+
1906
+ TAGS_TO_DELETE=$(echo "$image_tags" | sed 's/[^ ]* */imageTag=&/g')
1907
+
1908
+ export AWS_PAGER=""
1909
+
1910
+ aws ecr batch-delete-image --repository-name "$REPOSITORY" --image-ids $(echo $TAGS_TO_DELETE) > /dev/null 2>&1
1911
+ test -z $DIGESTS_TO_DELETE ||
1912
+ aws ecr batch-delete-image --repository-name "$REPOSITORY" --image-ids $(echo $DIGESTS_TO_DELETE)> /dev/null 2>&1
1913
+ done <<< $ALL_TAGS
1914
+
1915
+ echo "deleted $(echo $ALL_TAGS | wc -l) tags"
1916
+ fi
1917
+
1918
+ }
1919
+
1920
+
1921
+ cleanup_ci_cache() {
1922
+ DATE=${1:-$(date -v-1m +%F)}
1923
+ CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
1924
+
1925
+ echo "deleting from cache $CACHE_BUCKET all older than $DATE"
1926
+
1927
+ aws_ecr_login
1928
+
1929
+ while read -r line; do
1930
+ datum=$(echo $line | cut -c1-10)
1931
+ if [[ "$datum" < "$DATE" ]] ; then
1932
+ # Shell Parameter Expansion: ${parameter##word}
1933
+ # Allow to return the result from "word" to the end of "parameters"
1934
+ # Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
1935
+ TO_DELETE="$CACHE_BUCKET${line##* project/}"
1936
+ echo $TO_DELETE
1937
+ aws s3 rm $TO_DELETE
1938
+ fi
1939
+ done < <(aws s3 ls $CACHE_BUCKET --recursive)
1940
+ }
1941
+
1942
+ #!/usr/bin/env bash
1943
+
1554
1944
  ftp_ikea_k8s() {
1555
1945
  SSH_LOCAL_PORT=2230
1556
1946
  FTP_LOCAL_PORT=25500
@@ -1928,6 +2318,7 @@ datadog_schedule_downtime_single() {
1928
2318
  docker_build_push() {
1929
2319
  read -r -a BUILD_ARGS <<< "$1"
1930
2320
  DOCKER_BUILD_ARGS="--build-arg VCS_REF=$(git rev-parse --short HEAD)"
2321
+
1931
2322
  for ARG_NAME in "${BUILD_ARGS[@]}"
1932
2323
  do
1933
2324
  DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS --build-arg $ARG_NAME=${!ARG_NAME}"
@@ -1936,13 +2327,17 @@ docker_build_push() {
1936
2327
  if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
1937
2328
  docker pull $DOCKER_IMAGE || true
1938
2329
  SOURCE_URL=${CI_PROJECT_URL:8} # without "https://" protocol, like gitlab.com/colisweb-idl/colisweb/back/packing
1939
- docker build $DOCKER_BUILD_ARGS \
2330
+
2331
+ docker buildx create --use
2332
+
2333
+ docker buildx build $DOCKER_BUILD_ARGS \
1940
2334
  -t $DOCKER_IMAGE_SHA \
2335
+ --platform "linux/arm64,linux/amd64" \
1941
2336
  --label org.opencontainers.image.revision=$(git rev-parse HEAD) \
1942
2337
  --label org.opencontainers.image.source=$SOURCE_URL \
1943
- --cache-from $DOCKER_IMAGE \
2338
+ --provenance=false \
2339
+ --push \
1944
2340
  $DOCKER_STAGE_PATH
1945
- docker push $DOCKER_IMAGE_SHA
1946
2341
  fi
1947
2342
  }
1948
2343
 
@@ -2243,11 +2638,11 @@ git_reveal() {
2243
2638
  }
2244
2639
  #!/usr/bin/env bash
2245
2640
 
2246
- helm_deploy_v3() {
2641
+ helm_deploy() {
2247
2642
  APPLICATION=$1
2248
2643
  ENVIRONMENT=$2
2249
2644
  VERSION=$3
2250
- deploy_chart_v3 \
2645
+ deploy_chart \
2251
2646
  --path_configs deploy \
2252
2647
  --path_chart deploy/$APPLICATION \
2253
2648
  --application $APPLICATION \
@@ -2256,7 +2651,7 @@ helm_deploy_v3() {
2256
2651
  --helm_extra_args --set global.version=$VERSION
2257
2652
  }
2258
2653
 
2259
- deploy_chart_v3() {
2654
+ deploy_chart() {
2260
2655
  set -e
2261
2656
  set -x
2262
2657
 
@@ -2305,13 +2700,13 @@ deploy_chart_v3() {
2305
2700
  # Configure Kubectl
2306
2701
  configure_kubectl_for ${environment}
2307
2702
 
2308
- # Configure helm3
2309
- helm3 version --namespace ${namespace} || true
2310
- # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
2311
- helm3 repo add colisweb s3://colisweb-helm-charts/colisweb
2312
- helm3 repo add stable https://charts.helm.sh/stable
2313
- helm3 repo update
2314
- helm3 dependency update ${root_path}/${path_chart}
2703
+ # Configure helm
2704
+ helm version --namespace ${namespace} || true
2705
+ # helm stable repo have changed and must be updated manually, in versions < v2.17.0
2706
+ helm repo add colisweb s3://colisweb-helm-charts/colisweb
2707
+ helm repo add stable https://charts.helm.sh/stable
2708
+ helm repo update
2709
+ helm dependency update ${root_path}/${path_chart}
2315
2710
 
2316
2711
  # Gather values/*.yaml files
2317
2712
  values_path="${root_path}/${path_chart}/values"
@@ -2319,7 +2714,7 @@ deploy_chart_v3() {
2319
2714
  [ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
2320
2715
 
2321
2716
  # Deploy
2322
- helm3 upgrade --install \
2717
+ helm upgrade --install \
2323
2718
  --namespace ${namespace} \
2324
2719
  ${values_files} \
2325
2720
  -f ${root_path}/${path_configs}/common.yaml \
@@ -2341,7 +2736,7 @@ deploy_chart_v3() {
2341
2736
  set +x
2342
2737
  }
2343
2738
 
2344
- verify_deployments_v3() {
2739
+ verify_deployments() {
2345
2740
  set -e
2346
2741
 
2347
2742
  # usage :
@@ -2361,7 +2756,7 @@ verify_deployments_v3() {
2361
2756
 
2362
2757
  # Get all Deployments names from the deployed chart
2363
2758
  DEPLOYMENTS=(
2364
- $(helm3 get manifest --namespace $NAMESPACE $RELEASE | yq -rs '.[] | select(.kind=="Deployment") | .metadata.name')
2759
+ $(helm get manifest --namespace $NAMESPACE $RELEASE | yq --no-doc -r 'select(.kind=="Deployment").metadata.name')
2365
2760
  )
2366
2761
 
2367
2762
  echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"