@colisweb/rescript-toolkit 5.24.1 → 5.24.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -92,63 +92,6 @@ aws_ecr_token() {
92
92
  aws ecr get-authorization-token --region=eu-west-1 --output text --query 'authorizationData[].authorizationToken'
93
93
  }
94
94
 
95
- # you will need jq to use these commands. You can install it using "brew install jq"
96
- # delete_images colisweb_api 8
97
- # will delete images older than 8 weeks
98
- delete_images() {
99
-
100
- REPO=$1
101
- WEEKS=${2:-16}
102
-
103
- WEEKS_AGO=$(date -v-${WEEKS}w +%F)
104
-
105
- #Get all ecr images
106
- IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
107
-
108
- #Filter unnecessary values and map `imagePushedAt` to EPOCH
109
- NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
110
-
111
- #Filter on EPOCH
112
- OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
113
- while IFS= read -r IMAGE; do
114
- if [ "$IMAGE" != "" ]; then
115
- echo "Deleting $IMAGE from $REPO"
116
- AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
117
- fi
118
- done <<< "$OLD_IMAGES"
119
- }
120
-
121
- # delete_images_all_repos 12
122
- # will delete images in all repositories older than 12 weeks
123
- delete_images_all_repos() {
124
- REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
125
-
126
- while IFS= read -r REPO; do
127
- echo "processing ECR repository $REPO"
128
- delete_images $REPO $1
129
- done <<< "$REPOSITORIES"
130
- }
131
-
132
- delete_old_cache() {
133
- DATE=${1:-$(date -v-1m +%F)}
134
- CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
135
-
136
- echo "deleting from cache $CACHE_BUCKET all older than $DATE"
137
-
138
- aws_ecr_login
139
-
140
- while read -r line; do
141
- datum=$(echo $line | cut -c1-10)
142
- if [[ "$datum" < "$DATE" ]] ; then
143
- # Shell Parameter Expansion: ${parameter##word}
144
- # Allow to return the result from "word" to the end of "parameters"
145
- # Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
146
- TO_DELETE="$CACHE_BUCKET${line##* project/}"
147
- echo $TO_DELETE
148
- aws s3 rm $TO_DELETE
149
- fi
150
- done < <(aws s3 ls $CACHE_BUCKET --recursive)
151
- }
152
95
 
153
96
  #!/usr/bin/env bash
154
97
 
@@ -250,11 +193,11 @@ entries:
250
193
  cronjob:
251
194
  EOT
252
195
 
253
- # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
254
- helm3 repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
255
- helm3 repo add stable https://charts.helm.sh/stable --force-update
256
- helm3 repo update
257
- helm3 dependency update ${ROOT_PATH}/${CHART_PATH}
196
+ # helm stable repo have changed and must be updated manually, in versions < v2.17.0
197
+ helm repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
198
+ helm repo add stable https://charts.helm.sh/stable --force-update
199
+ helm repo update
200
+ helm dependency update ${ROOT_PATH}/${CHART_PATH}
258
201
 
259
202
  # Gather values/*.yaml files
260
203
  VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
@@ -262,7 +205,7 @@ EOT
262
205
  [ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
263
206
 
264
207
  # Deploy
265
- helm3 upgrade --install \
208
+ helm upgrade --install \
266
209
  --namespace ${ENVIRONMENT} \
267
210
  ${VALUES_FILES} \
268
211
  -f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
@@ -272,7 +215,7 @@ EOT
272
215
  ${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
273
216
 
274
217
 
275
- verify_deployments_v3 -t 10m $ENVIRONMENT $CHART_NAME
218
+ verify_deployments -t 10m $ENVIRONMENT $CHART_NAME
276
219
 
277
220
  }
278
221
 
@@ -596,7 +539,7 @@ mysql_on_k8() {
596
539
  local query=$7
597
540
 
598
541
  kubectl -n ${namespace} run ${service}-mysql-init \
599
- --image widdpim/mysql-client \
542
+ --image arey/mysql-client \
600
543
  --restart=Never \
601
544
  --attach --rm \
602
545
  -- \
@@ -612,8 +555,6 @@ kube_init_database_once() {
612
555
  echo " Initializing Database '$db_database' for namespace $namespace"
613
556
  echo "======================="
614
557
 
615
- set -x
616
-
617
558
  echo "Checking if Database '$db_database' exists"
618
559
  set +e
619
560
  psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -lqtA | cut -d\| -f1 | grep "^$db_database$"
@@ -676,14 +617,14 @@ kube_init_datadog_in_database() {
676
617
  extract_args 8 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password db_datadog_schema $*
677
618
 
678
619
  echo "======================="
679
- echo " Initializing Datadog Agent Requiement for namespace $namespace"
620
+ echo " Initializing Datadog Agent Requirement for namespace $namespace"
680
621
  echo "======================="
681
622
 
682
623
  echo "Checking if User '$db_datadog_username' exists"
683
624
  local service="datadog"
684
625
  found_db_users=$(mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'SELECT user FROM mysql.user;')
685
626
  set +e
686
- echo $found_db_users | grep "^$db_datadog_username$"
627
+ echo "$found_db_users" | grep "^$db_datadog_username$"
687
628
  return_code=$?
688
629
  set -e
689
630
 
@@ -843,8 +784,6 @@ kube_init_service_database() {
843
784
 
844
785
  local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
845
786
 
846
- set -x
847
-
848
787
  echo "Checking if Database '$db_database' exists"
849
788
  set +e
850
789
  psql_on_k8 $namespace $service $db_connection -lqtA | cut -d\| -f1 | grep "^$db_database$"
@@ -873,6 +812,68 @@ kube_init_service_database() {
873
812
 
874
813
  #!/usr/bin/env bash
875
814
 
815
+ # Allow to use JMX connection to retrieve data and metrics from the pods within kubernetes
816
+ # You will need visualVM to use this tool https://visualvm.github.io/
817
+ # ex: bind_jmx testing notification
818
+ bind_jmx() {
819
+
820
+ local ENV=$1
821
+ local SERVICE_NAME=$2
822
+ local PORT=2242
823
+
824
+ start_ssh_bastion $ENV $PORT
825
+
826
+ echo "root" | ssh -f -N -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -D 7777 root@127.0.0.1 -p 2242
827
+ local PODS=$(kubectl -n $ENV get pods -o wide | grep $SERVICE_NAME | grep -Eo '^[^ ]+')
828
+
829
+ echo "Choose one of the following pod to get metrics from..."
830
+ local POD_NAME=$(gum choose $PODS)
831
+ local POD_IP=$(
832
+ kubectl -n $ENV get pods -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIP}{"\n"}{end}' |
833
+ grep $POD_NAME |
834
+ cut -d' ' -f2 |
835
+ head -1
836
+ )
837
+
838
+ jconsole -J-DsocksProxyHost=localhost \
839
+ -J-DsocksProxyPort=7777 \
840
+ service:jmx:rmi:///jndi/rmi://$POD_IP:7199/jmxrmi \
841
+ -J-DsocksNonProxyHosts= &
842
+
843
+ cat << EOF
844
+ Now start VisualVM
845
+ Preferences > Network > Manual Proxy Settings
846
+ SOCKS Proxy Line: Set 'localhost' and Port '7777'
847
+ File > Add JMX Connection
848
+ Set $POD_IP:7199, check 'do not require an SSL connection'
849
+ Remember to kill you bastion afterward using 'stop_ssh_bastion'
850
+ EOF
851
+ }
852
+ #!/usr/bin/env bash
853
+
854
+ function kstatus() {
855
+ if [ -z "$3" ]
856
+ then
857
+ configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2"
858
+ else
859
+ configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2 | grep $3"
860
+ fi
861
+ }
862
+
863
+ #!/usr/bin/env bash
864
+
865
+ k8_nodes_stats() {
866
+ kubectl get nodes -o name |
867
+ xargs kubectl describe |
868
+ grep "^Name\|workType\|cpu \|memory " |
869
+ sed -r 's/[ :=]+/\t/g' |
870
+ sed 's/\tworkType\t//g' |
871
+ sed -r 's/^Name/---\nName/g' |
872
+ grep --color "Name\|web\|workers\|cpu\|memory\|---"
873
+ }
874
+
875
+ #!/usr/bin/env bash
876
+
876
877
  # Port forward on the first matching pod
877
878
  # Ex :
878
879
  # pod_forward testing notification-http
@@ -948,6 +949,45 @@ pick_pod() {
948
949
  fi
949
950
  }
950
951
 
952
+ # pods_settings $ENV
953
+ # Will output a CSV (;) of all deployments on this environment with cpu and memory request and limits
954
+ # Errors and null outputs are ignored and won't be in the output.
955
+ pods_resources() {
956
+ ENV=$1
957
+ configure_kubectl_for $ENV
958
+ DEPLOYMENTS=(
959
+ $(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
960
+ )
961
+ echo "deployment; request_cpu; request_memory; limits_cpu;limits_memory"
962
+ for D in "${DEPLOYMENTS[@]}"; do
963
+ info=$(kubectl -n $ENV get deployment -o yaml $D |
964
+ yq '.spec.template.spec.containers[].resources' |
965
+ yq '.L = .requests.cpu + "; " + .requests.memory + "; " + .limits.cpu + "; " + .limits.memory' |
966
+ yq ".L") 2&>/dev/null
967
+ if ! [ "$info" = "null" ]; then
968
+ echo "$D; $info"
969
+ fi
970
+ done
971
+ }
972
+
973
+ pods_strategies() {
974
+ ENV=$1
975
+ configure_kubectl_for $ENV
976
+ DEPLOYMENTS=(
977
+ $(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
978
+ )
979
+ echo "deployment; max_surge; max_unavailable"
980
+ for D in "${DEPLOYMENTS[@]}"; do
981
+ info=$(kubectl -n $ENV get deployment -o yaml $D |
982
+ yq '.spec.strategy' |
983
+ yq '.L = .rollingUpdate.maxSurge + "; " + .rollingUpdate.maxUnavailable' |
984
+ yq ".L") 2&>/dev/null
985
+ if ! [ "$info" = "null" ]; then
986
+ echo "$D; $info"
987
+ fi
988
+ done
989
+ }
990
+
951
991
  #!/usr/bin/env bash
952
992
 
953
993
  bastion_config_for_redis_ca() {
@@ -1498,6 +1538,309 @@ jwt_token() {
1498
1538
  fi
1499
1539
  }
1500
1540
 
1541
+ #!/bin/bash
1542
+
1543
+ SCRIPT_PATH=$(dirname $(readlink -f $0))
1544
+ PATH="$PATH:$SCRIPT_PATH/script"
1545
+
1546
+ function get_token {
1547
+ local ENV=$1
1548
+ local LOGINFILE="$HOME/scriptlogin"
1549
+
1550
+ if [ ! -f "$LOGINFILE" ]; then
1551
+ cat > "$LOGINFILE" <<-'EOF'
1552
+ #!/bin/bash
1553
+ case $ENV in
1554
+ "testing")
1555
+ local USERLOGIN=""
1556
+ local PASSWORD=""
1557
+ ;;
1558
+ "recette")
1559
+ local USERLOGIN=""
1560
+ local PASSWORD=""
1561
+ ;;
1562
+ "staging")
1563
+ local USERLOGIN=""
1564
+ local PASSWORD=""
1565
+ ;;
1566
+ *)
1567
+ local USERLOGIN=""
1568
+ local PASSWORD=""
1569
+ echo "ENV ${ENV} inconue"
1570
+ return
1571
+ ;;
1572
+ esac
1573
+ EOF
1574
+ fi
1575
+
1576
+ source "${LOGINFILE}"
1577
+
1578
+ if [ -z "$PASSWORD" ] || [ -z "$USERLOGIN" ]
1579
+ then
1580
+ echo éditer le ficher "$LOGINFILE"
1581
+ return 1
1582
+ fi
1583
+
1584
+ curl -o /dev/null -D - "https://api.$ENV.colisweb.com/api/v6/authent/external/session" \
1585
+ --data-raw '{"username":"'"${USERLOGIN}"'","password":"'"${PASSWORD/\"/\\\"}"'"}' \
1586
+ --compressed 2> /dev/null | grep set-cook | sed -e 's/.*session=//g;s/;.*//g'
1587
+ }
1588
+
1589
+ function bash_array_to_json {
1590
+ function join {
1591
+ local IFS="$1"
1592
+ shift
1593
+ echo "$*"
1594
+ }
1595
+
1596
+ echo '["'"$(join , $*| sed -e 's/,/","/g' )"'"]' | jq
1597
+ }
1598
+
1599
+ function get_random_street {
1600
+ if [ ! -f "rue.lst" ]; then
1601
+ curl --output tmp1.gz https://adresse.data.gouv.fr/data/ban/adresses/latest/csv/adresses-59.csv.gz
1602
+ gzip -d tmp1.gz
1603
+ cut -d\; -f3,5,6,8 tmp1 | sed '/;Lille/!d' > rue.lst
1604
+ rm tmp
1605
+ fi
1606
+
1607
+ sort -R rue.lst | head -n 1
1608
+ }
1609
+
1610
+ function rand_slot {
1611
+ DATE="$1"
1612
+
1613
+ USAGE=$(cat <<-EOF
1614
+ {"start":"${DATE}T06:00:00.000Z", "end":"${DATE}T08:00:00.000Z" }
1615
+ {"start":"${DATE}T08:00:00.000Z", "end":"${DATE}T10:00:00.000Z" }
1616
+ {"start":"${DATE}T10:00:00.000Z", "end":"${DATE}T12:00:00.000Z" }
1617
+ {"start":"${DATE}T16:00:00.000Z", "end":"${DATE}T18:00:00.000Z" }
1618
+ {"start":"${DATE}T18:00:00.000Z", "end":"${DATE}T20:00:00.000Z" }
1619
+ EOF
1620
+ )
1621
+
1622
+ echo "$USAGE" | sort -u -R | head -n 1
1623
+ }
1624
+
1625
+ function call_create_sfh_order {
1626
+ local ENV=$1
1627
+ local TOKEN=$2
1628
+ source "$3"
1629
+ local POS=$4
1630
+ local BARECODES="$5"
1631
+ local PACKAGES=$(echo "$BARECODES" | jq '[{
1632
+ "barcode": .[],
1633
+ "length": 10.5,
1634
+ "height": 9.0,
1635
+ "width": 9.0,
1636
+ "weight": 10.11,
1637
+ "description": "test parel",
1638
+ "options": [],
1639
+ "productTypology": "Classical",
1640
+ "packageType": "Parcel"
1641
+ }
1642
+ ]')
1643
+
1644
+ IFS=";" read -r nu rue code_postal ville < <(get_random_street)
1645
+ JSON='{
1646
+ "primaryOrderReference": "'"${PRIMARY_REF}${POS}"'",
1647
+ "secondaryOrderReference": null,
1648
+ "stages": [
1649
+ {
1650
+ "type": "Pickup",
1651
+ "packageBarcodes": '"$BARECODES"',
1652
+ "location": {
1653
+ "type": "Warehouse",
1654
+ "warehouseCode": "'"$PICKUP_WAREHOUSE_CODE"'"
1655
+ }
1656
+ },
1657
+ {
1658
+ "type": "Dropoff",
1659
+ "packageBarcodes": '"$BARECODES"',
1660
+ "location": {
1661
+ "type": "Address",
1662
+ "address": {
1663
+ "address1": "'"$nu $rue"'",
1664
+ "postalCode": "'"$code_postal"'",
1665
+ "city": "'"$ville"'",
1666
+ "country": "France",
1667
+ "floor": 0,
1668
+ "lift": "with_lift"
1669
+ },
1670
+ "contact": {
1671
+ "name": "John Doe",
1672
+ "primaryPhone": "+33606060606"
1673
+ }
1674
+ }
1675
+ }
1676
+ ],
1677
+ "packages": '"$PACKAGES"',
1678
+ "owner": {
1679
+ "accountIdentifier": "'$ACCOUNTIDENTIFIER'"
1680
+ },
1681
+ "deliveryOptions": [],
1682
+ "ecommerceValidationDate": "'"${DATE}"'"
1683
+ }'
1684
+
1685
+ curl -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON"
1686
+ }
1687
+
1688
+
1689
+ function call_scan {
1690
+ local ENV=$1
1691
+ local TOKEN=$2
1692
+ source "$3"
1693
+ local BARECODES="$4"
1694
+ local SCAN=$(echo "$BARECODES" | jq '[{"barcode" :.[], "context": "shuttle"}]')
1695
+
1696
+ IFS=";" read -r nu rue code_postal ville < <(get_random_street)
1697
+ JSON='{"scans":'$SCAN'}'
1698
+
1699
+ curl -X POST https://api.$ENV.colisweb.com/api/v6/parcel/external/units/scans/bulk -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON"
1700
+ }
1701
+
1702
+
1703
+ function call_register_delivery {
1704
+ local ENV=$1
1705
+ local TOKEN=$2
1706
+
1707
+ SCENARIO=$3
1708
+ source "$SCENARIO"
1709
+
1710
+ local ORDERID=$4
1711
+ local BARECODES="$5"
1712
+
1713
+ curl -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders/"$ORDERID"/deliveries \
1714
+ --cookie session="$TOKEN" --data-raw '{
1715
+ "slot": '"$(rand_slot ${DELIVERY_DATE})"',
1716
+ "storeIdOwner":"'"$STOREIDOWNER"'",
1717
+ "pickup":{"type":"hub","code":"'"$HUB"'"},
1718
+ "barcodes":'"$BARECODES"',
1719
+ "price":{"origin":"auto","amount":25.9},
1720
+ "allowCustomerSlotUpdate":false
1721
+ }'
1722
+ }
1723
+
1724
+
1725
+
1726
+ function _create_scenario_file_if_not_exist () {
1727
+ if [ ! -f "$SCENARIO" ]
1728
+ then
1729
+ cat > "$SCENARIO" <<-'EOF'
1730
+ DELIVERY_DATE=$(date -v+7d '+%Y-%m-%d')
1731
+ ENV="testing"
1732
+ # ENV="staging"
1733
+ # ENV="recette"
1734
+
1735
+ ACCOUNTIDENTIFIER="102"
1736
+ HUB="duck"
1737
+ STOREIDOWNER="184"
1738
+
1739
+ PICKUP_WAREHOUSE_CODE="422"
1740
+
1741
+ BARECODES_COUNT=5
1742
+ PREF="aaaa"
1743
+
1744
+
1745
+ DATE=$(date '+%Y-%m-%d')
1746
+ RAND=$(date +%y%m%d%H%M%S)
1747
+ BARECODE_PART=0000$RAND
1748
+ PRIMARY_REF=$PREF$RAND
1749
+ EOF
1750
+ echo "éditer le fichier $SCENARIO"
1751
+ return 1
1752
+ fi
1753
+ }
1754
+
1755
+ #!/usr/bin/env bash
1756
+
1757
+ cleanup_merged_mr() {
1758
+ COLISWEB_IDL_GROUP=3054234
1759
+
1760
+ BEFORE=${1:- $(date -I -v -2y)}
1761
+
1762
+ for (( COUNTER=1; COUNTER<=12; COUNTER+=2 )); do
1763
+ cleanup_grouped_merged_mr $COLISWEB_IDL_GROUP $BEFORE $COUNTER &
1764
+ done
1765
+
1766
+ }
1767
+
1768
+ cleanup_grouped_merged_mr() {
1769
+ GROUP=$1
1770
+ BEFORE=$2
1771
+ PAGE_COUNT=$3
1772
+ MERGED_MRS=($(curl --header "PRIVATE-TOKEN: $GITLAB_PAT" \
1773
+ --url "https://gitlab.com/api/v4/groups/$GROUP/merge_requests?updated_before=${BEFORE}T08:00:00Z&status=merged&per_page=50&page=$PAGE_COUNT" |
1774
+ jq -r '.[] | {iid: .iid|tostring, pid:.project_id|tostring} | (.pid + "/merge_requests/" + .iid)'))
1775
+
1776
+ for MR in ${MERGED_MRS[@]}; do
1777
+ echo "https://gitlab.com/api/v4/projects/$MR"
1778
+ curl --request DELETE \
1779
+ --header "PRIVATE-TOKEN: $GITLAB_PAT" \
1780
+ --url "https://gitlab.com/api/v4/projects/$MR"
1781
+ done
1782
+ }
1783
+ # FIXME
1784
+ # image index (docker manifest) does not have tags and images are tagged but not marked as related to the index.
1785
+ # Should be fixed using more complex procedure to relate index and images.
1786
+ # you will need jq to use these commands. You can install it using "brew install jq"
1787
+ # cleanup_ecr_images colisweb_api 8
1788
+ # will delete images older than 8 weeks
1789
+ cleanup_ecr_images() {
1790
+
1791
+ REPO=$1
1792
+ WEEKS=${2:-16}
1793
+
1794
+ WEEKS_AGO=$(date -v-${WEEKS}w +%F)
1795
+
1796
+ #Get all ecr images
1797
+ IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
1798
+
1799
+ #Filter unnecessary values and map `imagePushedAt` to EPOCH
1800
+ NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
1801
+
1802
+ #Filter on EPOCH
1803
+ OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
1804
+ while IFS= read -r IMAGE; do
1805
+ if [ "$IMAGE" != "" ]; then
1806
+ echo "Deleting $IMAGE from $REPO"
1807
+ AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
1808
+ fi
1809
+ done <<< "$OLD_IMAGES"
1810
+ }
1811
+
1812
+ # cleanup_all_ecr_images 12
1813
+ # will delete images in all repositories older than 12 weeks
1814
+ cleanup_all_ecr_images() {
1815
+ REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
1816
+
1817
+ while IFS= read -r REPO; do
1818
+ echo "processing ECR repository $REPO"
1819
+ cleanup_ecr_images $REPO $1
1820
+ done <<< "$REPOSITORIES"
1821
+ }
1822
+
1823
+ cleanup_ci_cache() {
1824
+ DATE=${1:-$(date -v-1m +%F)}
1825
+ CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
1826
+
1827
+ echo "deleting from cache $CACHE_BUCKET all older than $DATE"
1828
+
1829
+ aws_ecr_login
1830
+
1831
+ while read -r line; do
1832
+ datum=$(echo $line | cut -c1-10)
1833
+ if [[ "$datum" < "$DATE" ]] ; then
1834
+ # Shell Parameter Expansion: ${parameter##word}
1835
+ # Allow to return the result from "word" to the end of "parameters"
1836
+ # Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
1837
+ TO_DELETE="$CACHE_BUCKET${line##* project/}"
1838
+ echo $TO_DELETE
1839
+ aws s3 rm $TO_DELETE
1840
+ fi
1841
+ done < <(aws s3 ls $CACHE_BUCKET --recursive)
1842
+ }
1843
+
1501
1844
  #!/usr/bin/env bash
1502
1845
 
1503
1846
  ftp_ikea_k8s() {
@@ -1877,6 +2220,7 @@ datadog_schedule_downtime_single() {
1877
2220
  docker_build_push() {
1878
2221
  read -r -a BUILD_ARGS <<< "$1"
1879
2222
  DOCKER_BUILD_ARGS="--build-arg VCS_REF=$(git rev-parse --short HEAD)"
2223
+
1880
2224
  for ARG_NAME in "${BUILD_ARGS[@]}"
1881
2225
  do
1882
2226
  DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS --build-arg $ARG_NAME=${!ARG_NAME}"
@@ -1885,13 +2229,17 @@ docker_build_push() {
1885
2229
  if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
1886
2230
  docker pull $DOCKER_IMAGE || true
1887
2231
  SOURCE_URL=${CI_PROJECT_URL:8} # without "https://" protocol, like gitlab.com/colisweb-idl/colisweb/back/packing
1888
- docker build $DOCKER_BUILD_ARGS \
2232
+
2233
+ docker buildx create --use
2234
+
2235
+ docker buildx build $DOCKER_BUILD_ARGS \
1889
2236
  -t $DOCKER_IMAGE_SHA \
2237
+ --platform "linux/arm64,linux/amd64" \
1890
2238
  --label org.opencontainers.image.revision=$(git rev-parse HEAD) \
1891
2239
  --label org.opencontainers.image.source=$SOURCE_URL \
1892
- --cache-from $DOCKER_IMAGE \
2240
+ --provenance=false \
2241
+ --push \
1893
2242
  $DOCKER_STAGE_PATH
1894
- docker push $DOCKER_IMAGE_SHA
1895
2243
  fi
1896
2244
  }
1897
2245
 
@@ -2027,7 +2375,7 @@ init_migrate_db() {
2027
2375
 
2028
2376
  unset KUBECONFIG
2029
2377
 
2030
- configure_kubectl_for_ci ${ENVIRONMENT}
2378
+ configure_kubectl_for ${ENVIRONMENT}
2031
2379
 
2032
2380
  kube_init_service_database \
2033
2381
  --namespace ${ENVIRONMENT} \
@@ -2073,7 +2421,7 @@ flyway_migrate() {
2073
2421
  CONFIGMAP_NAME="$service-flyway-migration-sql"
2074
2422
  POD_NAME="$service-flyway-migration"
2075
2423
 
2076
- configure_kubectl_for_ci $environment
2424
+ configure_kubectl_for $environment
2077
2425
 
2078
2426
  kubectl -n $namespace delete configmap $CONFIGMAP_NAME --ignore-not-found
2079
2427
  kubectl -n $namespace delete pod $POD_NAME --ignore-not-found
@@ -2126,7 +2474,7 @@ flyway_migrate() {
2126
2474
 
2127
2475
  flyway_sql_folder=$(pwd)/${MIGRATION_SQL_PATH}
2128
2476
 
2129
- configure_kubectl_for_ci "${ENVIRONMENT}"
2477
+ configure_kubectl_for "${ENVIRONMENT}"
2130
2478
  POD_NAME="${APPLICATION}-flyway-repair"
2131
2479
  CONFIGMAP_NAME="${APPLICATION}-flyway-repair-sql"
2132
2480
 
@@ -2192,11 +2540,11 @@ git_reveal() {
2192
2540
  }
2193
2541
  #!/usr/bin/env bash
2194
2542
 
2195
- helm_deploy_v3() {
2543
+ helm_deploy() {
2196
2544
  APPLICATION=$1
2197
2545
  ENVIRONMENT=$2
2198
2546
  VERSION=$3
2199
- deploy_chart_v3 \
2547
+ deploy_chart \
2200
2548
  --path_configs deploy \
2201
2549
  --path_chart deploy/$APPLICATION \
2202
2550
  --application $APPLICATION \
@@ -2205,7 +2553,7 @@ helm_deploy_v3() {
2205
2553
  --helm_extra_args --set global.version=$VERSION
2206
2554
  }
2207
2555
 
2208
- deploy_chart_v3() {
2556
+ deploy_chart() {
2209
2557
  set -e
2210
2558
  set -x
2211
2559
 
@@ -2252,15 +2600,15 @@ deploy_chart_v3() {
2252
2600
  unset KUBECONFIG
2253
2601
 
2254
2602
  # Configure Kubectl
2255
- configure_kubectl_for_ci ${environment}
2603
+ configure_kubectl_for ${environment}
2256
2604
 
2257
- # Configure helm3
2258
- helm3 version --namespace ${namespace} || true
2259
- # helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
2260
- helm3 repo add colisweb s3://colisweb-helm-charts/colisweb
2261
- helm3 repo add stable https://charts.helm.sh/stable
2262
- helm3 repo update
2263
- helm3 dependency update ${root_path}/${path_chart}
2605
+ # Configure helm
2606
+ helm version --namespace ${namespace} || true
2607
+ # helm stable repo have changed and must be updated manually, in versions < v2.17.0
2608
+ helm repo add colisweb s3://colisweb-helm-charts/colisweb
2609
+ helm repo add stable https://charts.helm.sh/stable
2610
+ helm repo update
2611
+ helm dependency update ${root_path}/${path_chart}
2264
2612
 
2265
2613
  # Gather values/*.yaml files
2266
2614
  values_path="${root_path}/${path_chart}/values"
@@ -2268,7 +2616,7 @@ deploy_chart_v3() {
2268
2616
  [ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
2269
2617
 
2270
2618
  # Deploy
2271
- helm3 upgrade --install \
2619
+ helm upgrade --install \
2272
2620
  --namespace ${namespace} \
2273
2621
  ${values_files} \
2274
2622
  -f ${root_path}/${path_configs}/common.yaml \
@@ -2290,7 +2638,7 @@ deploy_chart_v3() {
2290
2638
  set +x
2291
2639
  }
2292
2640
 
2293
- verify_deployments_v3() {
2641
+ verify_deployments() {
2294
2642
  set -e
2295
2643
 
2296
2644
  # usage :
@@ -2310,7 +2658,7 @@ verify_deployments_v3() {
2310
2658
 
2311
2659
  # Get all Deployments names from the deployed chart
2312
2660
  DEPLOYMENTS=(
2313
- $(helm3 get manifest --namespace $NAMESPACE $RELEASE | yq -rs '.[] | select(.kind=="Deployment") | .metadata.name')
2661
+ $(helm get manifest --namespace $NAMESPACE $RELEASE | yq --no-doc -r 'select(.kind=="Deployment").metadata.name')
2314
2662
  )
2315
2663
 
2316
2664
  echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
@@ -2353,40 +2701,6 @@ check_config_file() {
2353
2701
  fi
2354
2702
  }
2355
2703
 
2356
- #!/usr/bin/env bash
2357
-
2358
- configure_kubectl_for_ci() {
2359
- if [ -z ${GITLAB_PAT} ]; then
2360
- echo "Cannot configure kubectl: no GITLAB_PAT configured"
2361
- exit 1
2362
- fi
2363
-
2364
- infra_env="$1"
2365
- valid_envs="[testing][staging][production][performance][tests][recette]"
2366
- echo "$valid_envs" | grep -q "\[$infra_env\]"
2367
-
2368
- if [ $? -ne 0 ]; then
2369
- echo "Cannot configure kubectl for invalid env : $infra_env"
2370
- echo "choose one of $valid_envs"
2371
- exit 1
2372
- fi
2373
-
2374
- mkdir -p ~/.kube
2375
- curl -fsS \
2376
- --header "PRIVATE-TOKEN: $GITLAB_PAT" \
2377
- "https://gitlab.com/api/v4/projects/8141053/jobs/artifacts/$infra_env/raw/$infra_env.kubeconfig?job=4_kubernetes_config_output" \
2378
- > ~/.kube/$infra_env.kubeconfig
2379
-
2380
- curl_return_code=$?
2381
- if [ ${curl_return_code} -ne 0 ]; then
2382
- echo "Cannot configure kubectl for $infra_env, get configuration failed with code $curl_return_code"
2383
- exit ${curl_return_code}
2384
- fi
2385
-
2386
- rm -f ~/.kube/config
2387
- ln -s ~/.kube/$infra_env.kubeconfig ~/.kube/config
2388
- echo "Configured kubectl for env : $infra_env"
2389
- }
2390
2704
  notify_new_deployment() {
2391
2705
  jq --version || (apt update && apt install -y jq)
2392
2706
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@colisweb/rescript-toolkit",
3
- "version": "5.24.1",
3
+ "version": "5.24.2",
4
4
  "type": "module",
5
5
  "scripts": {
6
6
  "clean": "rescript clean",
@@ -0,0 +1,30 @@
1
+ module Array = {
2
+ include Belt.Array
3
+
4
+ let findMap: (array<'a>, 'a => option<'b>) => option<'b> = (array, findMapper) => {
5
+ let result = ref(None)
6
+ let index = ref(0)
7
+
8
+ while result.contents->Option.isNone && index.contents < array->Array.length {
9
+ result := findMapper(array[index.contents]->Option.getExn)
10
+ index := index.contents + 1
11
+ }
12
+
13
+ result.contents
14
+ }
15
+
16
+ let findMapWithIndex: (array<'a>, (int, 'a) => option<'b>) => option<'b> = (
17
+ array,
18
+ findMapper,
19
+ ) => {
20
+ let result = ref(None)
21
+ let index = ref(0)
22
+
23
+ while result.contents->Option.isNone && index.contents < array->Array.length {
24
+ result := findMapper(array[index.contents]->Option.getExn, index.contents)
25
+ index := index.contents + 1
26
+ }
27
+
28
+ result.contents
29
+ }
30
+ }