@colisweb/rescript-toolkit 5.24.3 → 5.24.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -92,63 +92,6 @@ aws_ecr_token() {
|
|
|
92
92
|
aws ecr get-authorization-token --region=eu-west-1 --output text --query 'authorizationData[].authorizationToken'
|
|
93
93
|
}
|
|
94
94
|
|
|
95
|
-
# you will need jq to use these commands. You can install it using "brew install jq"
|
|
96
|
-
# delete_images colisweb_api 8
|
|
97
|
-
# will delete images older than 8 weeks
|
|
98
|
-
delete_images() {
|
|
99
|
-
|
|
100
|
-
REPO=$1
|
|
101
|
-
WEEKS=${2:-16}
|
|
102
|
-
|
|
103
|
-
WEEKS_AGO=$(date -v-${WEEKS}w +%F)
|
|
104
|
-
|
|
105
|
-
#Get all ecr images
|
|
106
|
-
IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
|
|
107
|
-
|
|
108
|
-
#Filter unnecessary values and map `imagePushedAt` to EPOCH
|
|
109
|
-
NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
|
|
110
|
-
|
|
111
|
-
#Filter on EPOCH
|
|
112
|
-
OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
|
|
113
|
-
while IFS= read -r IMAGE; do
|
|
114
|
-
if [ "$IMAGE" != "" ]; then
|
|
115
|
-
echo "Deleting $IMAGE from $REPO"
|
|
116
|
-
AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
|
|
117
|
-
fi
|
|
118
|
-
done <<< "$OLD_IMAGES"
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
# delete_images_all_repos 12
|
|
122
|
-
# will delete images in all repositories older than 12 weeks
|
|
123
|
-
delete_images_all_repos() {
|
|
124
|
-
REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
|
|
125
|
-
|
|
126
|
-
while IFS= read -r REPO; do
|
|
127
|
-
echo "processing ECR repository $REPO"
|
|
128
|
-
delete_images $REPO $1
|
|
129
|
-
done <<< "$REPOSITORIES"
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
delete_old_cache() {
|
|
133
|
-
DATE=${1:-$(date -v-1m +%F)}
|
|
134
|
-
CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
|
|
135
|
-
|
|
136
|
-
echo "deleting from cache $CACHE_BUCKET all older than $DATE"
|
|
137
|
-
|
|
138
|
-
aws_ecr_login
|
|
139
|
-
|
|
140
|
-
while read -r line; do
|
|
141
|
-
datum=$(echo $line | cut -c1-10)
|
|
142
|
-
if [[ "$datum" < "$DATE" ]] ; then
|
|
143
|
-
# Shell Parameter Expansion: ${parameter##word}
|
|
144
|
-
# Allow to return the result from "word" to the end of "parameters"
|
|
145
|
-
# Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
|
|
146
|
-
TO_DELETE="$CACHE_BUCKET${line##* project/}"
|
|
147
|
-
echo $TO_DELETE
|
|
148
|
-
aws s3 rm $TO_DELETE
|
|
149
|
-
fi
|
|
150
|
-
done < <(aws s3 ls $CACHE_BUCKET --recursive)
|
|
151
|
-
}
|
|
152
95
|
|
|
153
96
|
#!/usr/bin/env bash
|
|
154
97
|
|
|
@@ -250,11 +193,11 @@ entries:
|
|
|
250
193
|
cronjob:
|
|
251
194
|
EOT
|
|
252
195
|
|
|
253
|
-
#
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
196
|
+
# helm stable repo have changed and must be updated manually, in versions < v2.17.0
|
|
197
|
+
helm repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
|
|
198
|
+
helm repo add stable https://charts.helm.sh/stable --force-update
|
|
199
|
+
helm repo update
|
|
200
|
+
helm dependency update ${ROOT_PATH}/${CHART_PATH}
|
|
258
201
|
|
|
259
202
|
# Gather values/*.yaml files
|
|
260
203
|
VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
|
|
@@ -262,7 +205,7 @@ EOT
|
|
|
262
205
|
[ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
|
|
263
206
|
|
|
264
207
|
# Deploy
|
|
265
|
-
|
|
208
|
+
helm upgrade --install \
|
|
266
209
|
--namespace ${ENVIRONMENT} \
|
|
267
210
|
${VALUES_FILES} \
|
|
268
211
|
-f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
|
|
@@ -272,7 +215,7 @@ EOT
|
|
|
272
215
|
${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
|
|
273
216
|
|
|
274
217
|
|
|
275
|
-
|
|
218
|
+
verify_deployments -t 10m $ENVIRONMENT $CHART_NAME
|
|
276
219
|
|
|
277
220
|
}
|
|
278
221
|
|
|
@@ -612,8 +555,6 @@ kube_init_database_once() {
|
|
|
612
555
|
echo " Initializing Database '$db_database' for namespace $namespace"
|
|
613
556
|
echo "======================="
|
|
614
557
|
|
|
615
|
-
set -x
|
|
616
|
-
|
|
617
558
|
echo "Checking if Database '$db_database' exists"
|
|
618
559
|
set +e
|
|
619
560
|
psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -lqtA | cut -d\| -f1 | grep "^$db_database$"
|
|
@@ -843,8 +784,6 @@ kube_init_service_database() {
|
|
|
843
784
|
|
|
844
785
|
local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
|
|
845
786
|
|
|
846
|
-
set -x
|
|
847
|
-
|
|
848
787
|
echo "Checking if Database '$db_database' exists"
|
|
849
788
|
set +e
|
|
850
789
|
psql_on_k8 $namespace $service $db_connection -lqtA | cut -d\| -f1 | grep "^$db_database$"
|
|
@@ -912,6 +851,17 @@ EOF
|
|
|
912
851
|
}
|
|
913
852
|
#!/usr/bin/env bash
|
|
914
853
|
|
|
854
|
+
function kstatus() {
|
|
855
|
+
if [ -z "$3" ]
|
|
856
|
+
then
|
|
857
|
+
configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2"
|
|
858
|
+
else
|
|
859
|
+
configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2 | grep $3"
|
|
860
|
+
fi
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
#!/usr/bin/env bash
|
|
864
|
+
|
|
915
865
|
k8_nodes_stats() {
|
|
916
866
|
kubectl get nodes -o name |
|
|
917
867
|
xargs kubectl describe |
|
|
@@ -999,6 +949,45 @@ pick_pod() {
|
|
|
999
949
|
fi
|
|
1000
950
|
}
|
|
1001
951
|
|
|
952
|
+
# pods_settings $ENV
|
|
953
|
+
# Will output a CSV (;) of all deployments on this environment with cpu and memory request and limits
|
|
954
|
+
# Errors and null outputs are ignored and won't be in the output.
|
|
955
|
+
pods_resources() {
|
|
956
|
+
ENV=$1
|
|
957
|
+
configure_kubectl_for $ENV
|
|
958
|
+
DEPLOYMENTS=(
|
|
959
|
+
$(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
|
|
960
|
+
)
|
|
961
|
+
echo "deployment; request_cpu; request_memory; limits_cpu;limits_memory"
|
|
962
|
+
for D in "${DEPLOYMENTS[@]}"; do
|
|
963
|
+
info=$(kubectl -n $ENV get deployment -o yaml $D |
|
|
964
|
+
yq '.spec.template.spec.containers[].resources' |
|
|
965
|
+
yq '.L = .requests.cpu + "; " + .requests.memory + "; " + .limits.cpu + "; " + .limits.memory' |
|
|
966
|
+
yq ".L") 2&>/dev/null
|
|
967
|
+
if ! [ "$info" = "null" ]; then
|
|
968
|
+
echo "$D; $info"
|
|
969
|
+
fi
|
|
970
|
+
done
|
|
971
|
+
}
|
|
972
|
+
|
|
973
|
+
pods_strategies() {
|
|
974
|
+
ENV=$1
|
|
975
|
+
configure_kubectl_for $ENV
|
|
976
|
+
DEPLOYMENTS=(
|
|
977
|
+
$(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
|
|
978
|
+
)
|
|
979
|
+
echo "deployment; max_surge; max_unavailable"
|
|
980
|
+
for D in "${DEPLOYMENTS[@]}"; do
|
|
981
|
+
info=$(kubectl -n $ENV get deployment -o yaml $D |
|
|
982
|
+
yq '.spec.strategy' |
|
|
983
|
+
yq '.L = .rollingUpdate.maxSurge + "; " + .rollingUpdate.maxUnavailable' |
|
|
984
|
+
yq ".L") 2&>/dev/null
|
|
985
|
+
if ! [ "$info" = "null" ]; then
|
|
986
|
+
echo "$D; $info"
|
|
987
|
+
fi
|
|
988
|
+
done
|
|
989
|
+
}
|
|
990
|
+
|
|
1002
991
|
#!/usr/bin/env bash
|
|
1003
992
|
|
|
1004
993
|
bastion_config_for_redis_ca() {
|
|
@@ -1549,6 +1538,309 @@ jwt_token() {
|
|
|
1549
1538
|
fi
|
|
1550
1539
|
}
|
|
1551
1540
|
|
|
1541
|
+
#!/bin/bash
|
|
1542
|
+
|
|
1543
|
+
SCRIPT_PATH=$(dirname $(readlink -f $0))
|
|
1544
|
+
PATH="$PATH:$SCRIPT_PATH/script"
|
|
1545
|
+
|
|
1546
|
+
function get_token {
|
|
1547
|
+
local ENV=$1
|
|
1548
|
+
local LOGINFILE="$HOME/scriptlogin"
|
|
1549
|
+
|
|
1550
|
+
if [ ! -f "$LOGINFILE" ]; then
|
|
1551
|
+
cat > "$LOGINFILE" <<-'EOF'
|
|
1552
|
+
#!/bin/bash
|
|
1553
|
+
case $ENV in
|
|
1554
|
+
"testing")
|
|
1555
|
+
local USERLOGIN=""
|
|
1556
|
+
local PASSWORD=""
|
|
1557
|
+
;;
|
|
1558
|
+
"recette")
|
|
1559
|
+
local USERLOGIN=""
|
|
1560
|
+
local PASSWORD=""
|
|
1561
|
+
;;
|
|
1562
|
+
"staging")
|
|
1563
|
+
local USERLOGIN=""
|
|
1564
|
+
local PASSWORD=""
|
|
1565
|
+
;;
|
|
1566
|
+
*)
|
|
1567
|
+
local USERLOGIN=""
|
|
1568
|
+
local PASSWORD=""
|
|
1569
|
+
echo "ENV ${ENV} inconue"
|
|
1570
|
+
return
|
|
1571
|
+
;;
|
|
1572
|
+
esac
|
|
1573
|
+
EOF
|
|
1574
|
+
fi
|
|
1575
|
+
|
|
1576
|
+
source "${LOGINFILE}"
|
|
1577
|
+
|
|
1578
|
+
if [ -z "$PASSWORD" ] || [ -z "$USERLOGIN" ]
|
|
1579
|
+
then
|
|
1580
|
+
echo éditer le ficher "$LOGINFILE"
|
|
1581
|
+
return 1
|
|
1582
|
+
fi
|
|
1583
|
+
|
|
1584
|
+
curl -o /dev/null -D - "https://api.$ENV.colisweb.com/api/v6/authent/external/session" \
|
|
1585
|
+
--data-raw '{"username":"'"${USERLOGIN}"'","password":"'"${PASSWORD/\"/\\\"}"'"}' \
|
|
1586
|
+
--compressed 2> /dev/null | grep set-cook | sed -e 's/.*session=//g;s/;.*//g'
|
|
1587
|
+
}
|
|
1588
|
+
|
|
1589
|
+
function bash_array_to_json {
|
|
1590
|
+
function join {
|
|
1591
|
+
local IFS="$1"
|
|
1592
|
+
shift
|
|
1593
|
+
echo "$*"
|
|
1594
|
+
}
|
|
1595
|
+
|
|
1596
|
+
echo '["'"$(join , $*| sed -e 's/,/","/g' )"'"]' | jq
|
|
1597
|
+
}
|
|
1598
|
+
|
|
1599
|
+
function get_random_street {
|
|
1600
|
+
if [ ! -f "rue.lst" ]; then
|
|
1601
|
+
curl --output tmp1.gz https://adresse.data.gouv.fr/data/ban/adresses/latest/csv/adresses-59.csv.gz
|
|
1602
|
+
gzip -d tmp1.gz
|
|
1603
|
+
cut -d\; -f3,5,6,8 tmp1 | sed '/;Lille/!d' > rue.lst
|
|
1604
|
+
rm tmp
|
|
1605
|
+
fi
|
|
1606
|
+
|
|
1607
|
+
sort -R rue.lst | head -n 1
|
|
1608
|
+
}
|
|
1609
|
+
|
|
1610
|
+
function rand_slot {
|
|
1611
|
+
DATE="$1"
|
|
1612
|
+
|
|
1613
|
+
USAGE=$(cat <<-EOF
|
|
1614
|
+
{"start":"${DATE}T06:00:00.000Z", "end":"${DATE}T08:00:00.000Z" }
|
|
1615
|
+
{"start":"${DATE}T08:00:00.000Z", "end":"${DATE}T10:00:00.000Z" }
|
|
1616
|
+
{"start":"${DATE}T10:00:00.000Z", "end":"${DATE}T12:00:00.000Z" }
|
|
1617
|
+
{"start":"${DATE}T16:00:00.000Z", "end":"${DATE}T18:00:00.000Z" }
|
|
1618
|
+
{"start":"${DATE}T18:00:00.000Z", "end":"${DATE}T20:00:00.000Z" }
|
|
1619
|
+
EOF
|
|
1620
|
+
)
|
|
1621
|
+
|
|
1622
|
+
echo "$USAGE" | sort -u -R | head -n 1
|
|
1623
|
+
}
|
|
1624
|
+
|
|
1625
|
+
function call_create_sfh_order {
|
|
1626
|
+
local ENV=$1
|
|
1627
|
+
local TOKEN=$2
|
|
1628
|
+
source "$3"
|
|
1629
|
+
local POS=$4
|
|
1630
|
+
local BARECODES="$5"
|
|
1631
|
+
local PACKAGES=$(echo "$BARECODES" | jq '[{
|
|
1632
|
+
"barcode": .[],
|
|
1633
|
+
"length": 10.5,
|
|
1634
|
+
"height": 9.0,
|
|
1635
|
+
"width": 9.0,
|
|
1636
|
+
"weight": 10.11,
|
|
1637
|
+
"description": "test parel",
|
|
1638
|
+
"options": [],
|
|
1639
|
+
"productTypology": "Classical",
|
|
1640
|
+
"packageType": "Parcel"
|
|
1641
|
+
}
|
|
1642
|
+
]')
|
|
1643
|
+
|
|
1644
|
+
IFS=";" read -r nu rue code_postal ville < <(get_random_street)
|
|
1645
|
+
JSON='{
|
|
1646
|
+
"primaryOrderReference": "'"${PRIMARY_REF}${POS}"'",
|
|
1647
|
+
"secondaryOrderReference": null,
|
|
1648
|
+
"stages": [
|
|
1649
|
+
{
|
|
1650
|
+
"type": "Pickup",
|
|
1651
|
+
"packageBarcodes": '"$BARECODES"',
|
|
1652
|
+
"location": {
|
|
1653
|
+
"type": "Warehouse",
|
|
1654
|
+
"warehouseCode": "'"$PICKUP_WAREHOUSE_CODE"'"
|
|
1655
|
+
}
|
|
1656
|
+
},
|
|
1657
|
+
{
|
|
1658
|
+
"type": "Dropoff",
|
|
1659
|
+
"packageBarcodes": '"$BARECODES"',
|
|
1660
|
+
"location": {
|
|
1661
|
+
"type": "Address",
|
|
1662
|
+
"address": {
|
|
1663
|
+
"address1": "'"$nu $rue"'",
|
|
1664
|
+
"postalCode": "'"$code_postal"'",
|
|
1665
|
+
"city": "'"$ville"'",
|
|
1666
|
+
"country": "France",
|
|
1667
|
+
"floor": 0,
|
|
1668
|
+
"lift": "with_lift"
|
|
1669
|
+
},
|
|
1670
|
+
"contact": {
|
|
1671
|
+
"name": "John Doe",
|
|
1672
|
+
"primaryPhone": "+33606060606"
|
|
1673
|
+
}
|
|
1674
|
+
}
|
|
1675
|
+
}
|
|
1676
|
+
],
|
|
1677
|
+
"packages": '"$PACKAGES"',
|
|
1678
|
+
"owner": {
|
|
1679
|
+
"accountIdentifier": "'$ACCOUNTIDENTIFIER'"
|
|
1680
|
+
},
|
|
1681
|
+
"deliveryOptions": [],
|
|
1682
|
+
"ecommerceValidationDate": "'"${DATE}"'"
|
|
1683
|
+
}'
|
|
1684
|
+
|
|
1685
|
+
curl -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON"
|
|
1686
|
+
}
|
|
1687
|
+
|
|
1688
|
+
|
|
1689
|
+
function call_scan {
|
|
1690
|
+
local ENV=$1
|
|
1691
|
+
local TOKEN=$2
|
|
1692
|
+
source "$3"
|
|
1693
|
+
local BARECODES="$4"
|
|
1694
|
+
local SCAN=$(echo "$BARECODES" | jq '[{"barcode" :.[], "context": "shuttle"}]')
|
|
1695
|
+
|
|
1696
|
+
IFS=";" read -r nu rue code_postal ville < <(get_random_street)
|
|
1697
|
+
JSON='{"scans":'$SCAN'}'
|
|
1698
|
+
|
|
1699
|
+
curl -X POST https://api.$ENV.colisweb.com/api/v6/parcel/external/units/scans/bulk -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON"
|
|
1700
|
+
}
|
|
1701
|
+
|
|
1702
|
+
|
|
1703
|
+
function call_register_delivery {
|
|
1704
|
+
local ENV=$1
|
|
1705
|
+
local TOKEN=$2
|
|
1706
|
+
|
|
1707
|
+
SCENARIO=$3
|
|
1708
|
+
source "$SCENARIO"
|
|
1709
|
+
|
|
1710
|
+
local ORDERID=$4
|
|
1711
|
+
local BARECODES="$5"
|
|
1712
|
+
|
|
1713
|
+
curl -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders/"$ORDERID"/deliveries \
|
|
1714
|
+
--cookie session="$TOKEN" --data-raw '{
|
|
1715
|
+
"slot": '"$(rand_slot ${DELIVERY_DATE})"',
|
|
1716
|
+
"storeIdOwner":"'"$STOREIDOWNER"'",
|
|
1717
|
+
"pickup":{"type":"hub","code":"'"$HUB"'"},
|
|
1718
|
+
"barcodes":'"$BARECODES"',
|
|
1719
|
+
"price":{"origin":"auto","amount":25.9},
|
|
1720
|
+
"allowCustomerSlotUpdate":false
|
|
1721
|
+
}'
|
|
1722
|
+
}
|
|
1723
|
+
|
|
1724
|
+
|
|
1725
|
+
|
|
1726
|
+
function _create_scenario_file_if_not_exist () {
|
|
1727
|
+
if [ ! -f "$SCENARIO" ]
|
|
1728
|
+
then
|
|
1729
|
+
cat > "$SCENARIO" <<-'EOF'
|
|
1730
|
+
DELIVERY_DATE=$(date -v+7d '+%Y-%m-%d')
|
|
1731
|
+
ENV="testing"
|
|
1732
|
+
# ENV="staging"
|
|
1733
|
+
# ENV="recette"
|
|
1734
|
+
|
|
1735
|
+
ACCOUNTIDENTIFIER="102"
|
|
1736
|
+
HUB="duck"
|
|
1737
|
+
STOREIDOWNER="184"
|
|
1738
|
+
|
|
1739
|
+
PICKUP_WAREHOUSE_CODE="422"
|
|
1740
|
+
|
|
1741
|
+
BARECODES_COUNT=5
|
|
1742
|
+
PREF="aaaa"
|
|
1743
|
+
|
|
1744
|
+
|
|
1745
|
+
DATE=$(date '+%Y-%m-%d')
|
|
1746
|
+
RAND=$(date +%y%m%d%H%M%S)
|
|
1747
|
+
BARECODE_PART=0000$RAND
|
|
1748
|
+
PRIMARY_REF=$PREF$RAND
|
|
1749
|
+
EOF
|
|
1750
|
+
echo "éditer le fichier $SCENARIO"
|
|
1751
|
+
return 1
|
|
1752
|
+
fi
|
|
1753
|
+
}
|
|
1754
|
+
|
|
1755
|
+
#!/usr/bin/env bash
|
|
1756
|
+
|
|
1757
|
+
cleanup_merged_mr() {
|
|
1758
|
+
COLISWEB_IDL_GROUP=3054234
|
|
1759
|
+
|
|
1760
|
+
BEFORE=${1:- $(date -I -v -2y)}
|
|
1761
|
+
|
|
1762
|
+
for (( COUNTER=1; COUNTER<=12; COUNTER+=2 )); do
|
|
1763
|
+
cleanup_grouped_merged_mr $COLISWEB_IDL_GROUP $BEFORE $COUNTER &
|
|
1764
|
+
done
|
|
1765
|
+
|
|
1766
|
+
}
|
|
1767
|
+
|
|
1768
|
+
cleanup_grouped_merged_mr() {
|
|
1769
|
+
GROUP=$1
|
|
1770
|
+
BEFORE=$2
|
|
1771
|
+
PAGE_COUNT=$3
|
|
1772
|
+
MERGED_MRS=($(curl --header "PRIVATE-TOKEN: $GITLAB_PAT" \
|
|
1773
|
+
--url "https://gitlab.com/api/v4/groups/$GROUP/merge_requests?updated_before=${BEFORE}T08:00:00Z&status=merged&per_page=50&page=$PAGE_COUNT" |
|
|
1774
|
+
jq -r '.[] | {iid: .iid|tostring, pid:.project_id|tostring} | (.pid + "/merge_requests/" + .iid)'))
|
|
1775
|
+
|
|
1776
|
+
for MR in ${MERGED_MRS[@]}; do
|
|
1777
|
+
echo "https://gitlab.com/api/v4/projects/$MR"
|
|
1778
|
+
curl --request DELETE \
|
|
1779
|
+
--header "PRIVATE-TOKEN: $GITLAB_PAT" \
|
|
1780
|
+
--url "https://gitlab.com/api/v4/projects/$MR"
|
|
1781
|
+
done
|
|
1782
|
+
}
|
|
1783
|
+
# FIXME
|
|
1784
|
+
# image index (docker manifest) does not have tags and images are tagged but not marked as related to the index.
|
|
1785
|
+
# Should be fixed using more complex procedure to relate index and images.
|
|
1786
|
+
# you will need jq to use these commands. You can install it using "brew install jq"
|
|
1787
|
+
# cleanup_ecr_images colisweb_api 8
|
|
1788
|
+
# will delete images older than 8 weeks
|
|
1789
|
+
cleanup_ecr_images() {
|
|
1790
|
+
|
|
1791
|
+
REPO=$1
|
|
1792
|
+
WEEKS=${2:-16}
|
|
1793
|
+
|
|
1794
|
+
WEEKS_AGO=$(date -v-${WEEKS}w +%F)
|
|
1795
|
+
|
|
1796
|
+
#Get all ecr images
|
|
1797
|
+
IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
|
|
1798
|
+
|
|
1799
|
+
#Filter unnecessary values and map `imagePushedAt` to EPOCH
|
|
1800
|
+
NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
|
|
1801
|
+
|
|
1802
|
+
#Filter on EPOCH
|
|
1803
|
+
OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
|
|
1804
|
+
while IFS= read -r IMAGE; do
|
|
1805
|
+
if [ "$IMAGE" != "" ]; then
|
|
1806
|
+
echo "Deleting $IMAGE from $REPO"
|
|
1807
|
+
AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
|
|
1808
|
+
fi
|
|
1809
|
+
done <<< "$OLD_IMAGES"
|
|
1810
|
+
}
|
|
1811
|
+
|
|
1812
|
+
# cleanup_all_ecr_images 12
|
|
1813
|
+
# will delete images in all repositories older than 12 weeks
|
|
1814
|
+
cleanup_all_ecr_images() {
|
|
1815
|
+
REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
|
|
1816
|
+
|
|
1817
|
+
while IFS= read -r REPO; do
|
|
1818
|
+
echo "processing ECR repository $REPO"
|
|
1819
|
+
cleanup_ecr_images $REPO $1
|
|
1820
|
+
done <<< "$REPOSITORIES"
|
|
1821
|
+
}
|
|
1822
|
+
|
|
1823
|
+
cleanup_ci_cache() {
|
|
1824
|
+
DATE=${1:-$(date -v-1m +%F)}
|
|
1825
|
+
CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
|
|
1826
|
+
|
|
1827
|
+
echo "deleting from cache $CACHE_BUCKET all older than $DATE"
|
|
1828
|
+
|
|
1829
|
+
aws_ecr_login
|
|
1830
|
+
|
|
1831
|
+
while read -r line; do
|
|
1832
|
+
datum=$(echo $line | cut -c1-10)
|
|
1833
|
+
if [[ "$datum" < "$DATE" ]] ; then
|
|
1834
|
+
# Shell Parameter Expansion: ${parameter##word}
|
|
1835
|
+
# Allow to return the result from "word" to the end of "parameters"
|
|
1836
|
+
# Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
|
|
1837
|
+
TO_DELETE="$CACHE_BUCKET${line##* project/}"
|
|
1838
|
+
echo $TO_DELETE
|
|
1839
|
+
aws s3 rm $TO_DELETE
|
|
1840
|
+
fi
|
|
1841
|
+
done < <(aws s3 ls $CACHE_BUCKET --recursive)
|
|
1842
|
+
}
|
|
1843
|
+
|
|
1552
1844
|
#!/usr/bin/env bash
|
|
1553
1845
|
|
|
1554
1846
|
ftp_ikea_k8s() {
|
|
@@ -1928,6 +2220,7 @@ datadog_schedule_downtime_single() {
|
|
|
1928
2220
|
docker_build_push() {
|
|
1929
2221
|
read -r -a BUILD_ARGS <<< "$1"
|
|
1930
2222
|
DOCKER_BUILD_ARGS="--build-arg VCS_REF=$(git rev-parse --short HEAD)"
|
|
2223
|
+
|
|
1931
2224
|
for ARG_NAME in "${BUILD_ARGS[@]}"
|
|
1932
2225
|
do
|
|
1933
2226
|
DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS --build-arg $ARG_NAME=${!ARG_NAME}"
|
|
@@ -1936,13 +2229,17 @@ docker_build_push() {
|
|
|
1936
2229
|
if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
|
|
1937
2230
|
docker pull $DOCKER_IMAGE || true
|
|
1938
2231
|
SOURCE_URL=${CI_PROJECT_URL:8} # without "https://" protocol, like gitlab.com/colisweb-idl/colisweb/back/packing
|
|
1939
|
-
|
|
2232
|
+
|
|
2233
|
+
docker buildx create --use
|
|
2234
|
+
|
|
2235
|
+
docker buildx build $DOCKER_BUILD_ARGS \
|
|
1940
2236
|
-t $DOCKER_IMAGE_SHA \
|
|
2237
|
+
--platform "linux/arm64,linux/amd64" \
|
|
1941
2238
|
--label org.opencontainers.image.revision=$(git rev-parse HEAD) \
|
|
1942
2239
|
--label org.opencontainers.image.source=$SOURCE_URL \
|
|
1943
|
-
--
|
|
2240
|
+
--provenance=false \
|
|
2241
|
+
--push \
|
|
1944
2242
|
$DOCKER_STAGE_PATH
|
|
1945
|
-
docker push $DOCKER_IMAGE_SHA
|
|
1946
2243
|
fi
|
|
1947
2244
|
}
|
|
1948
2245
|
|
|
@@ -2078,7 +2375,7 @@ init_migrate_db() {
|
|
|
2078
2375
|
|
|
2079
2376
|
unset KUBECONFIG
|
|
2080
2377
|
|
|
2081
|
-
|
|
2378
|
+
configure_kubectl_for ${ENVIRONMENT}
|
|
2082
2379
|
|
|
2083
2380
|
kube_init_service_database \
|
|
2084
2381
|
--namespace ${ENVIRONMENT} \
|
|
@@ -2124,7 +2421,7 @@ flyway_migrate() {
|
|
|
2124
2421
|
CONFIGMAP_NAME="$service-flyway-migration-sql"
|
|
2125
2422
|
POD_NAME="$service-flyway-migration"
|
|
2126
2423
|
|
|
2127
|
-
|
|
2424
|
+
configure_kubectl_for $environment
|
|
2128
2425
|
|
|
2129
2426
|
kubectl -n $namespace delete configmap $CONFIGMAP_NAME --ignore-not-found
|
|
2130
2427
|
kubectl -n $namespace delete pod $POD_NAME --ignore-not-found
|
|
@@ -2177,7 +2474,7 @@ flyway_migrate() {
|
|
|
2177
2474
|
|
|
2178
2475
|
flyway_sql_folder=$(pwd)/${MIGRATION_SQL_PATH}
|
|
2179
2476
|
|
|
2180
|
-
|
|
2477
|
+
configure_kubectl_for "${ENVIRONMENT}"
|
|
2181
2478
|
POD_NAME="${APPLICATION}-flyway-repair"
|
|
2182
2479
|
CONFIGMAP_NAME="${APPLICATION}-flyway-repair-sql"
|
|
2183
2480
|
|
|
@@ -2243,11 +2540,11 @@ git_reveal() {
|
|
|
2243
2540
|
}
|
|
2244
2541
|
#!/usr/bin/env bash
|
|
2245
2542
|
|
|
2246
|
-
|
|
2543
|
+
helm_deploy() {
|
|
2247
2544
|
APPLICATION=$1
|
|
2248
2545
|
ENVIRONMENT=$2
|
|
2249
2546
|
VERSION=$3
|
|
2250
|
-
|
|
2547
|
+
deploy_chart \
|
|
2251
2548
|
--path_configs deploy \
|
|
2252
2549
|
--path_chart deploy/$APPLICATION \
|
|
2253
2550
|
--application $APPLICATION \
|
|
@@ -2256,7 +2553,7 @@ helm_deploy_v3() {
|
|
|
2256
2553
|
--helm_extra_args --set global.version=$VERSION
|
|
2257
2554
|
}
|
|
2258
2555
|
|
|
2259
|
-
|
|
2556
|
+
deploy_chart() {
|
|
2260
2557
|
set -e
|
|
2261
2558
|
set -x
|
|
2262
2559
|
|
|
@@ -2303,15 +2600,15 @@ deploy_chart_v3() {
|
|
|
2303
2600
|
unset KUBECONFIG
|
|
2304
2601
|
|
|
2305
2602
|
# Configure Kubectl
|
|
2306
|
-
|
|
2603
|
+
configure_kubectl_for ${environment}
|
|
2307
2604
|
|
|
2308
|
-
# Configure
|
|
2309
|
-
|
|
2310
|
-
#
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
|
|
2314
|
-
|
|
2605
|
+
# Configure helm
|
|
2606
|
+
helm version --namespace ${namespace} || true
|
|
2607
|
+
# helm stable repo have changed and must be updated manually, in versions < v2.17.0
|
|
2608
|
+
helm repo add colisweb s3://colisweb-helm-charts/colisweb
|
|
2609
|
+
helm repo add stable https://charts.helm.sh/stable
|
|
2610
|
+
helm repo update
|
|
2611
|
+
helm dependency update ${root_path}/${path_chart}
|
|
2315
2612
|
|
|
2316
2613
|
# Gather values/*.yaml files
|
|
2317
2614
|
values_path="${root_path}/${path_chart}/values"
|
|
@@ -2319,7 +2616,7 @@ deploy_chart_v3() {
|
|
|
2319
2616
|
[ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
|
|
2320
2617
|
|
|
2321
2618
|
# Deploy
|
|
2322
|
-
|
|
2619
|
+
helm upgrade --install \
|
|
2323
2620
|
--namespace ${namespace} \
|
|
2324
2621
|
${values_files} \
|
|
2325
2622
|
-f ${root_path}/${path_configs}/common.yaml \
|
|
@@ -2341,7 +2638,7 @@ deploy_chart_v3() {
|
|
|
2341
2638
|
set +x
|
|
2342
2639
|
}
|
|
2343
2640
|
|
|
2344
|
-
|
|
2641
|
+
verify_deployments() {
|
|
2345
2642
|
set -e
|
|
2346
2643
|
|
|
2347
2644
|
# usage :
|
|
@@ -2361,7 +2658,7 @@ verify_deployments_v3() {
|
|
|
2361
2658
|
|
|
2362
2659
|
# Get all Deployments names from the deployed chart
|
|
2363
2660
|
DEPLOYMENTS=(
|
|
2364
|
-
$(
|
|
2661
|
+
$(helm get manifest --namespace $NAMESPACE $RELEASE | yq --no-doc -r 'select(.kind=="Deployment").metadata.name')
|
|
2365
2662
|
)
|
|
2366
2663
|
|
|
2367
2664
|
echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
|
|
@@ -2404,40 +2701,6 @@ check_config_file() {
|
|
|
2404
2701
|
fi
|
|
2405
2702
|
}
|
|
2406
2703
|
|
|
2407
|
-
#!/usr/bin/env bash
|
|
2408
|
-
|
|
2409
|
-
configure_kubectl_for_ci() {
|
|
2410
|
-
if [ -z ${GITLAB_PAT} ]; then
|
|
2411
|
-
echo "Cannot configure kubectl: no GITLAB_PAT configured"
|
|
2412
|
-
exit 1
|
|
2413
|
-
fi
|
|
2414
|
-
|
|
2415
|
-
infra_env="$1"
|
|
2416
|
-
valid_envs="[testing][staging][production][performance][tests][recette]"
|
|
2417
|
-
echo "$valid_envs" | grep -q "\[$infra_env\]"
|
|
2418
|
-
|
|
2419
|
-
if [ $? -ne 0 ]; then
|
|
2420
|
-
echo "Cannot configure kubectl for invalid env : $infra_env"
|
|
2421
|
-
echo "choose one of $valid_envs"
|
|
2422
|
-
exit 1
|
|
2423
|
-
fi
|
|
2424
|
-
|
|
2425
|
-
mkdir -p ~/.kube
|
|
2426
|
-
curl -fsS \
|
|
2427
|
-
--header "PRIVATE-TOKEN: $GITLAB_PAT" \
|
|
2428
|
-
"https://gitlab.com/api/v4/projects/8141053/jobs/artifacts/$infra_env/raw/$infra_env.kubeconfig?job=4_kubernetes_config_output" \
|
|
2429
|
-
> ~/.kube/$infra_env.kubeconfig
|
|
2430
|
-
|
|
2431
|
-
curl_return_code=$?
|
|
2432
|
-
if [ ${curl_return_code} -ne 0 ]; then
|
|
2433
|
-
echo "Cannot configure kubectl for $infra_env, get configuration failed with code $curl_return_code"
|
|
2434
|
-
exit ${curl_return_code}
|
|
2435
|
-
fi
|
|
2436
|
-
|
|
2437
|
-
rm -f ~/.kube/config
|
|
2438
|
-
ln -s ~/.kube/$infra_env.kubeconfig ~/.kube/config
|
|
2439
|
-
echo "Configured kubectl for env : $infra_env"
|
|
2440
|
-
}
|
|
2441
2704
|
notify_new_deployment() {
|
|
2442
2705
|
jq --version || (apt update && apt install -y jq)
|
|
2443
2706
|
|
|
@@ -92,63 +92,6 @@ aws_ecr_token() {
|
|
|
92
92
|
aws ecr get-authorization-token --region=eu-west-1 --output text --query 'authorizationData[].authorizationToken'
|
|
93
93
|
}
|
|
94
94
|
|
|
95
|
-
# you will need jq to use these commands. You can install it using "brew install jq"
|
|
96
|
-
# delete_images colisweb_api 8
|
|
97
|
-
# will delete images older than 8 weeks
|
|
98
|
-
delete_images() {
|
|
99
|
-
|
|
100
|
-
REPO=$1
|
|
101
|
-
WEEKS=${2:-16}
|
|
102
|
-
|
|
103
|
-
WEEKS_AGO=$(date -v-${WEEKS}w +%F)
|
|
104
|
-
|
|
105
|
-
#Get all ecr images
|
|
106
|
-
IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
|
|
107
|
-
|
|
108
|
-
#Filter unnecessary values and map `imagePushedAt` to EPOCH
|
|
109
|
-
NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
|
|
110
|
-
|
|
111
|
-
#Filter on EPOCH
|
|
112
|
-
OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
|
|
113
|
-
while IFS= read -r IMAGE; do
|
|
114
|
-
if [ "$IMAGE" != "" ]; then
|
|
115
|
-
echo "Deleting $IMAGE from $REPO"
|
|
116
|
-
AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
|
|
117
|
-
fi
|
|
118
|
-
done <<< "$OLD_IMAGES"
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
# delete_images_all_repos 12
|
|
122
|
-
# will delete images in all repositories older than 12 weeks
|
|
123
|
-
delete_images_all_repos() {
|
|
124
|
-
REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
|
|
125
|
-
|
|
126
|
-
while IFS= read -r REPO; do
|
|
127
|
-
echo "processing ECR repository $REPO"
|
|
128
|
-
delete_images $REPO $1
|
|
129
|
-
done <<< "$REPOSITORIES"
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
delete_old_cache() {
|
|
133
|
-
DATE=${1:-$(date -v-1m +%F)}
|
|
134
|
-
CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
|
|
135
|
-
|
|
136
|
-
echo "deleting from cache $CACHE_BUCKET all older than $DATE"
|
|
137
|
-
|
|
138
|
-
aws_ecr_login
|
|
139
|
-
|
|
140
|
-
while read -r line; do
|
|
141
|
-
datum=$(echo $line | cut -c1-10)
|
|
142
|
-
if [[ "$datum" < "$DATE" ]] ; then
|
|
143
|
-
# Shell Parameter Expansion: ${parameter##word}
|
|
144
|
-
# Allow to return the result from "word" to the end of "parameters"
|
|
145
|
-
# Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
|
|
146
|
-
TO_DELETE="$CACHE_BUCKET${line##* project/}"
|
|
147
|
-
echo $TO_DELETE
|
|
148
|
-
aws s3 rm $TO_DELETE
|
|
149
|
-
fi
|
|
150
|
-
done < <(aws s3 ls $CACHE_BUCKET --recursive)
|
|
151
|
-
}
|
|
152
95
|
|
|
153
96
|
#!/usr/bin/env bash
|
|
154
97
|
|
|
@@ -250,11 +193,11 @@ entries:
|
|
|
250
193
|
cronjob:
|
|
251
194
|
EOT
|
|
252
195
|
|
|
253
|
-
#
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
196
|
+
# helm stable repo have changed and must be updated manually, in versions < v2.17.0
|
|
197
|
+
helm repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
|
|
198
|
+
helm repo add stable https://charts.helm.sh/stable --force-update
|
|
199
|
+
helm repo update
|
|
200
|
+
helm dependency update ${ROOT_PATH}/${CHART_PATH}
|
|
258
201
|
|
|
259
202
|
# Gather values/*.yaml files
|
|
260
203
|
VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
|
|
@@ -262,7 +205,7 @@ EOT
|
|
|
262
205
|
[ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
|
|
263
206
|
|
|
264
207
|
# Deploy
|
|
265
|
-
|
|
208
|
+
helm upgrade --install \
|
|
266
209
|
--namespace ${ENVIRONMENT} \
|
|
267
210
|
${VALUES_FILES} \
|
|
268
211
|
-f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
|
|
@@ -272,7 +215,7 @@ EOT
|
|
|
272
215
|
${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
|
|
273
216
|
|
|
274
217
|
|
|
275
|
-
|
|
218
|
+
verify_deployments -t 10m $ENVIRONMENT $CHART_NAME
|
|
276
219
|
|
|
277
220
|
}
|
|
278
221
|
|
|
@@ -612,8 +555,6 @@ kube_init_database_once() {
|
|
|
612
555
|
echo " Initializing Database '$db_database' for namespace $namespace"
|
|
613
556
|
echo "======================="
|
|
614
557
|
|
|
615
|
-
set -x
|
|
616
|
-
|
|
617
558
|
echo "Checking if Database '$db_database' exists"
|
|
618
559
|
set +e
|
|
619
560
|
psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -lqtA | cut -d\| -f1 | grep "^$db_database$"
|
|
@@ -843,8 +784,6 @@ kube_init_service_database() {
|
|
|
843
784
|
|
|
844
785
|
local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
|
|
845
786
|
|
|
846
|
-
set -x
|
|
847
|
-
|
|
848
787
|
echo "Checking if Database '$db_database' exists"
|
|
849
788
|
set +e
|
|
850
789
|
psql_on_k8 $namespace $service $db_connection -lqtA | cut -d\| -f1 | grep "^$db_database$"
|
|
@@ -912,6 +851,17 @@ EOF
|
|
|
912
851
|
}
|
|
913
852
|
#!/usr/bin/env bash
|
|
914
853
|
|
|
854
|
+
function kstatus() {
|
|
855
|
+
if [ -z "$3" ]
|
|
856
|
+
then
|
|
857
|
+
configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2"
|
|
858
|
+
else
|
|
859
|
+
configure_kubectl_for $1 && watch -n 1 "kubectl -n $1 get $2 | grep $3"
|
|
860
|
+
fi
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
#!/usr/bin/env bash
|
|
864
|
+
|
|
915
865
|
k8_nodes_stats() {
|
|
916
866
|
kubectl get nodes -o name |
|
|
917
867
|
xargs kubectl describe |
|
|
@@ -999,6 +949,45 @@ pick_pod() {
|
|
|
999
949
|
fi
|
|
1000
950
|
}
|
|
1001
951
|
|
|
952
|
+
# pods_settings $ENV
|
|
953
|
+
# Will output a CSV (;) of all deployments on this environment with cpu and memory request and limits
|
|
954
|
+
# Errors and null outputs are ignored and won't be in the output.
|
|
955
|
+
pods_resources() {
|
|
956
|
+
ENV=$1
|
|
957
|
+
configure_kubectl_for $ENV
|
|
958
|
+
DEPLOYMENTS=(
|
|
959
|
+
$(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
|
|
960
|
+
)
|
|
961
|
+
echo "deployment; request_cpu; request_memory; limits_cpu;limits_memory"
|
|
962
|
+
for D in "${DEPLOYMENTS[@]}"; do
|
|
963
|
+
info=$(kubectl -n $ENV get deployment -o yaml $D |
|
|
964
|
+
yq '.spec.template.spec.containers[].resources' |
|
|
965
|
+
yq '.L = .requests.cpu + "; " + .requests.memory + "; " + .limits.cpu + "; " + .limits.memory' |
|
|
966
|
+
yq ".L") 2&>/dev/null
|
|
967
|
+
if ! [ "$info" = "null" ]; then
|
|
968
|
+
echo "$D; $info"
|
|
969
|
+
fi
|
|
970
|
+
done
|
|
971
|
+
}
|
|
972
|
+
|
|
973
|
+
pods_strategies() {
|
|
974
|
+
ENV=$1
|
|
975
|
+
configure_kubectl_for $ENV
|
|
976
|
+
DEPLOYMENTS=(
|
|
977
|
+
$(kubectl -n $ENV get deployments | grep -Eo '^[^ ]+' | grep -v 'NAME')
|
|
978
|
+
)
|
|
979
|
+
echo "deployment; max_surge; max_unavailable"
|
|
980
|
+
for D in "${DEPLOYMENTS[@]}"; do
|
|
981
|
+
info=$(kubectl -n $ENV get deployment -o yaml $D |
|
|
982
|
+
yq '.spec.strategy' |
|
|
983
|
+
yq '.L = .rollingUpdate.maxSurge + "; " + .rollingUpdate.maxUnavailable' |
|
|
984
|
+
yq ".L") 2&>/dev/null
|
|
985
|
+
if ! [ "$info" = "null" ]; then
|
|
986
|
+
echo "$D; $info"
|
|
987
|
+
fi
|
|
988
|
+
done
|
|
989
|
+
}
|
|
990
|
+
|
|
1002
991
|
#!/usr/bin/env bash
|
|
1003
992
|
|
|
1004
993
|
bastion_config_for_redis_ca() {
|
|
@@ -1549,6 +1538,309 @@ jwt_token() {
|
|
|
1549
1538
|
fi
|
|
1550
1539
|
}
|
|
1551
1540
|
|
|
1541
|
+
#!/bin/bash
|
|
1542
|
+
|
|
1543
|
+
SCRIPT_PATH=$(dirname $(readlink -f $0))
|
|
1544
|
+
PATH="$PATH:$SCRIPT_PATH/script"
|
|
1545
|
+
|
|
1546
|
+
function get_token {
|
|
1547
|
+
local ENV=$1
|
|
1548
|
+
local LOGINFILE="$HOME/scriptlogin"
|
|
1549
|
+
|
|
1550
|
+
if [ ! -f "$LOGINFILE" ]; then
|
|
1551
|
+
cat > "$LOGINFILE" <<-'EOF'
|
|
1552
|
+
#!/bin/bash
|
|
1553
|
+
case $ENV in
|
|
1554
|
+
"testing")
|
|
1555
|
+
local USERLOGIN=""
|
|
1556
|
+
local PASSWORD=""
|
|
1557
|
+
;;
|
|
1558
|
+
"recette")
|
|
1559
|
+
local USERLOGIN=""
|
|
1560
|
+
local PASSWORD=""
|
|
1561
|
+
;;
|
|
1562
|
+
"staging")
|
|
1563
|
+
local USERLOGIN=""
|
|
1564
|
+
local PASSWORD=""
|
|
1565
|
+
;;
|
|
1566
|
+
*)
|
|
1567
|
+
local USERLOGIN=""
|
|
1568
|
+
local PASSWORD=""
|
|
1569
|
+
echo "ENV ${ENV} inconue"
|
|
1570
|
+
return
|
|
1571
|
+
;;
|
|
1572
|
+
esac
|
|
1573
|
+
EOF
|
|
1574
|
+
fi
|
|
1575
|
+
|
|
1576
|
+
source "${LOGINFILE}"
|
|
1577
|
+
|
|
1578
|
+
if [ -z "$PASSWORD" ] || [ -z "$USERLOGIN" ]
|
|
1579
|
+
then
|
|
1580
|
+
echo éditer le ficher "$LOGINFILE"
|
|
1581
|
+
return 1
|
|
1582
|
+
fi
|
|
1583
|
+
|
|
1584
|
+
curl -o /dev/null -D - "https://api.$ENV.colisweb.com/api/v6/authent/external/session" \
|
|
1585
|
+
--data-raw '{"username":"'"${USERLOGIN}"'","password":"'"${PASSWORD/\"/\\\"}"'"}' \
|
|
1586
|
+
--compressed 2> /dev/null | grep set-cook | sed -e 's/.*session=//g;s/;.*//g'
|
|
1587
|
+
}
|
|
1588
|
+
|
|
1589
|
+
function bash_array_to_json {
|
|
1590
|
+
function join {
|
|
1591
|
+
local IFS="$1"
|
|
1592
|
+
shift
|
|
1593
|
+
echo "$*"
|
|
1594
|
+
}
|
|
1595
|
+
|
|
1596
|
+
echo '["'"$(join , $*| sed -e 's/,/","/g' )"'"]' | jq
|
|
1597
|
+
}
|
|
1598
|
+
|
|
1599
|
+
function get_random_street {
|
|
1600
|
+
if [ ! -f "rue.lst" ]; then
|
|
1601
|
+
curl --output tmp1.gz https://adresse.data.gouv.fr/data/ban/adresses/latest/csv/adresses-59.csv.gz
|
|
1602
|
+
gzip -d tmp1.gz
|
|
1603
|
+
cut -d\; -f3,5,6,8 tmp1 | sed '/;Lille/!d' > rue.lst
|
|
1604
|
+
rm tmp
|
|
1605
|
+
fi
|
|
1606
|
+
|
|
1607
|
+
sort -R rue.lst | head -n 1
|
|
1608
|
+
}
|
|
1609
|
+
|
|
1610
|
+
function rand_slot {
|
|
1611
|
+
DATE="$1"
|
|
1612
|
+
|
|
1613
|
+
USAGE=$(cat <<-EOF
|
|
1614
|
+
{"start":"${DATE}T06:00:00.000Z", "end":"${DATE}T08:00:00.000Z" }
|
|
1615
|
+
{"start":"${DATE}T08:00:00.000Z", "end":"${DATE}T10:00:00.000Z" }
|
|
1616
|
+
{"start":"${DATE}T10:00:00.000Z", "end":"${DATE}T12:00:00.000Z" }
|
|
1617
|
+
{"start":"${DATE}T16:00:00.000Z", "end":"${DATE}T18:00:00.000Z" }
|
|
1618
|
+
{"start":"${DATE}T18:00:00.000Z", "end":"${DATE}T20:00:00.000Z" }
|
|
1619
|
+
EOF
|
|
1620
|
+
)
|
|
1621
|
+
|
|
1622
|
+
echo "$USAGE" | sort -u -R | head -n 1
|
|
1623
|
+
}
|
|
1624
|
+
|
|
1625
|
+
function call_create_sfh_order {
|
|
1626
|
+
local ENV=$1
|
|
1627
|
+
local TOKEN=$2
|
|
1628
|
+
source "$3"
|
|
1629
|
+
local POS=$4
|
|
1630
|
+
local BARECODES="$5"
|
|
1631
|
+
local PACKAGES=$(echo "$BARECODES" | jq '[{
|
|
1632
|
+
"barcode": .[],
|
|
1633
|
+
"length": 10.5,
|
|
1634
|
+
"height": 9.0,
|
|
1635
|
+
"width": 9.0,
|
|
1636
|
+
"weight": 10.11,
|
|
1637
|
+
"description": "test parel",
|
|
1638
|
+
"options": [],
|
|
1639
|
+
"productTypology": "Classical",
|
|
1640
|
+
"packageType": "Parcel"
|
|
1641
|
+
}
|
|
1642
|
+
]')
|
|
1643
|
+
|
|
1644
|
+
IFS=";" read -r nu rue code_postal ville < <(get_random_street)
|
|
1645
|
+
JSON='{
|
|
1646
|
+
"primaryOrderReference": "'"${PRIMARY_REF}${POS}"'",
|
|
1647
|
+
"secondaryOrderReference": null,
|
|
1648
|
+
"stages": [
|
|
1649
|
+
{
|
|
1650
|
+
"type": "Pickup",
|
|
1651
|
+
"packageBarcodes": '"$BARECODES"',
|
|
1652
|
+
"location": {
|
|
1653
|
+
"type": "Warehouse",
|
|
1654
|
+
"warehouseCode": "'"$PICKUP_WAREHOUSE_CODE"'"
|
|
1655
|
+
}
|
|
1656
|
+
},
|
|
1657
|
+
{
|
|
1658
|
+
"type": "Dropoff",
|
|
1659
|
+
"packageBarcodes": '"$BARECODES"',
|
|
1660
|
+
"location": {
|
|
1661
|
+
"type": "Address",
|
|
1662
|
+
"address": {
|
|
1663
|
+
"address1": "'"$nu $rue"'",
|
|
1664
|
+
"postalCode": "'"$code_postal"'",
|
|
1665
|
+
"city": "'"$ville"'",
|
|
1666
|
+
"country": "France",
|
|
1667
|
+
"floor": 0,
|
|
1668
|
+
"lift": "with_lift"
|
|
1669
|
+
},
|
|
1670
|
+
"contact": {
|
|
1671
|
+
"name": "John Doe",
|
|
1672
|
+
"primaryPhone": "+33606060606"
|
|
1673
|
+
}
|
|
1674
|
+
}
|
|
1675
|
+
}
|
|
1676
|
+
],
|
|
1677
|
+
"packages": '"$PACKAGES"',
|
|
1678
|
+
"owner": {
|
|
1679
|
+
"accountIdentifier": "'$ACCOUNTIDENTIFIER'"
|
|
1680
|
+
},
|
|
1681
|
+
"deliveryOptions": [],
|
|
1682
|
+
"ecommerceValidationDate": "'"${DATE}"'"
|
|
1683
|
+
}'
|
|
1684
|
+
|
|
1685
|
+
curl -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON"
|
|
1686
|
+
}
|
|
1687
|
+
|
|
1688
|
+
|
|
1689
|
+
function call_scan {
|
|
1690
|
+
local ENV=$1
|
|
1691
|
+
local TOKEN=$2
|
|
1692
|
+
source "$3"
|
|
1693
|
+
local BARECODES="$4"
|
|
1694
|
+
local SCAN=$(echo "$BARECODES" | jq '[{"barcode" :.[], "context": "shuttle"}]')
|
|
1695
|
+
|
|
1696
|
+
IFS=";" read -r nu rue code_postal ville < <(get_random_street)
|
|
1697
|
+
JSON='{"scans":'$SCAN'}'
|
|
1698
|
+
|
|
1699
|
+
curl -X POST https://api.$ENV.colisweb.com/api/v6/parcel/external/units/scans/bulk -H 'content-type: application/json' --cookie session="$TOKEN" --data-raw "$JSON"
|
|
1700
|
+
}
|
|
1701
|
+
|
|
1702
|
+
|
|
1703
|
+
function call_register_delivery {
|
|
1704
|
+
local ENV=$1
|
|
1705
|
+
local TOKEN=$2
|
|
1706
|
+
|
|
1707
|
+
SCENARIO=$3
|
|
1708
|
+
source "$SCENARIO"
|
|
1709
|
+
|
|
1710
|
+
local ORDERID=$4
|
|
1711
|
+
local BARECODES="$5"
|
|
1712
|
+
|
|
1713
|
+
curl -X POST https://api.$ENV.colisweb.com/api/v6/order/external/warehouse/orders/"$ORDERID"/deliveries \
|
|
1714
|
+
--cookie session="$TOKEN" --data-raw '{
|
|
1715
|
+
"slot": '"$(rand_slot ${DELIVERY_DATE})"',
|
|
1716
|
+
"storeIdOwner":"'"$STOREIDOWNER"'",
|
|
1717
|
+
"pickup":{"type":"hub","code":"'"$HUB"'"},
|
|
1718
|
+
"barcodes":'"$BARECODES"',
|
|
1719
|
+
"price":{"origin":"auto","amount":25.9},
|
|
1720
|
+
"allowCustomerSlotUpdate":false
|
|
1721
|
+
}'
|
|
1722
|
+
}
|
|
1723
|
+
|
|
1724
|
+
|
|
1725
|
+
|
|
1726
|
+
function _create_scenario_file_if_not_exist () {
|
|
1727
|
+
if [ ! -f "$SCENARIO" ]
|
|
1728
|
+
then
|
|
1729
|
+
cat > "$SCENARIO" <<-'EOF'
|
|
1730
|
+
DELIVERY_DATE=$(date -v+7d '+%Y-%m-%d')
|
|
1731
|
+
ENV="testing"
|
|
1732
|
+
# ENV="staging"
|
|
1733
|
+
# ENV="recette"
|
|
1734
|
+
|
|
1735
|
+
ACCOUNTIDENTIFIER="102"
|
|
1736
|
+
HUB="duck"
|
|
1737
|
+
STOREIDOWNER="184"
|
|
1738
|
+
|
|
1739
|
+
PICKUP_WAREHOUSE_CODE="422"
|
|
1740
|
+
|
|
1741
|
+
BARECODES_COUNT=5
|
|
1742
|
+
PREF="aaaa"
|
|
1743
|
+
|
|
1744
|
+
|
|
1745
|
+
DATE=$(date '+%Y-%m-%d')
|
|
1746
|
+
RAND=$(date +%y%m%d%H%M%S)
|
|
1747
|
+
BARECODE_PART=0000$RAND
|
|
1748
|
+
PRIMARY_REF=$PREF$RAND
|
|
1749
|
+
EOF
|
|
1750
|
+
echo "éditer le fichier $SCENARIO"
|
|
1751
|
+
return 1
|
|
1752
|
+
fi
|
|
1753
|
+
}
|
|
1754
|
+
|
|
1755
|
+
#!/usr/bin/env bash
|
|
1756
|
+
|
|
1757
|
+
cleanup_merged_mr() {
|
|
1758
|
+
COLISWEB_IDL_GROUP=3054234
|
|
1759
|
+
|
|
1760
|
+
BEFORE=${1:- $(date -I -v -2y)}
|
|
1761
|
+
|
|
1762
|
+
for (( COUNTER=1; COUNTER<=12; COUNTER+=2 )); do
|
|
1763
|
+
cleanup_grouped_merged_mr $COLISWEB_IDL_GROUP $BEFORE $COUNTER &
|
|
1764
|
+
done
|
|
1765
|
+
|
|
1766
|
+
}
|
|
1767
|
+
|
|
1768
|
+
cleanup_grouped_merged_mr() {
|
|
1769
|
+
GROUP=$1
|
|
1770
|
+
BEFORE=$2
|
|
1771
|
+
PAGE_COUNT=$3
|
|
1772
|
+
MERGED_MRS=($(curl --header "PRIVATE-TOKEN: $GITLAB_PAT" \
|
|
1773
|
+
--url "https://gitlab.com/api/v4/groups/$GROUP/merge_requests?updated_before=${BEFORE}T08:00:00Z&status=merged&per_page=50&page=$PAGE_COUNT" |
|
|
1774
|
+
jq -r '.[] | {iid: .iid|tostring, pid:.project_id|tostring} | (.pid + "/merge_requests/" + .iid)'))
|
|
1775
|
+
|
|
1776
|
+
for MR in ${MERGED_MRS[@]}; do
|
|
1777
|
+
echo "https://gitlab.com/api/v4/projects/$MR"
|
|
1778
|
+
curl --request DELETE \
|
|
1779
|
+
--header "PRIVATE-TOKEN: $GITLAB_PAT" \
|
|
1780
|
+
--url "https://gitlab.com/api/v4/projects/$MR"
|
|
1781
|
+
done
|
|
1782
|
+
}
|
|
1783
|
+
# FIXME
|
|
1784
|
+
# image index (docker manifest) does not have tags and images are tagged but not marked as related to the index.
|
|
1785
|
+
# Should be fixed using more complex procedure to relate index and images.
|
|
1786
|
+
# you will need jq to use these commands. You can install it using "brew install jq"
|
|
1787
|
+
# cleanup_ecr_images colisweb_api 8
|
|
1788
|
+
# will delete images older than 8 weeks
|
|
1789
|
+
cleanup_ecr_images() {
|
|
1790
|
+
|
|
1791
|
+
REPO=$1
|
|
1792
|
+
WEEKS=${2:-16}
|
|
1793
|
+
|
|
1794
|
+
WEEKS_AGO=$(date -v-${WEEKS}w +%F)
|
|
1795
|
+
|
|
1796
|
+
#Get all ecr images
|
|
1797
|
+
IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
|
|
1798
|
+
|
|
1799
|
+
#Filter unnecessary values and map `imagePushedAt` to EPOCH
|
|
1800
|
+
NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
|
|
1801
|
+
|
|
1802
|
+
#Filter on EPOCH
|
|
1803
|
+
OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
|
|
1804
|
+
while IFS= read -r IMAGE; do
|
|
1805
|
+
if [ "$IMAGE" != "" ]; then
|
|
1806
|
+
echo "Deleting $IMAGE from $REPO"
|
|
1807
|
+
AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
|
|
1808
|
+
fi
|
|
1809
|
+
done <<< "$OLD_IMAGES"
|
|
1810
|
+
}
|
|
1811
|
+
|
|
1812
|
+
# cleanup_all_ecr_images 12
|
|
1813
|
+
# will delete images in all repositories older than 12 weeks
|
|
1814
|
+
cleanup_all_ecr_images() {
|
|
1815
|
+
REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
|
|
1816
|
+
|
|
1817
|
+
while IFS= read -r REPO; do
|
|
1818
|
+
echo "processing ECR repository $REPO"
|
|
1819
|
+
cleanup_ecr_images $REPO $1
|
|
1820
|
+
done <<< "$REPOSITORIES"
|
|
1821
|
+
}
|
|
1822
|
+
|
|
1823
|
+
cleanup_ci_cache() {
|
|
1824
|
+
DATE=${1:-$(date -v-1m +%F)}
|
|
1825
|
+
CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
|
|
1826
|
+
|
|
1827
|
+
echo "deleting from cache $CACHE_BUCKET all older than $DATE"
|
|
1828
|
+
|
|
1829
|
+
aws_ecr_login
|
|
1830
|
+
|
|
1831
|
+
while read -r line; do
|
|
1832
|
+
datum=$(echo $line | cut -c1-10)
|
|
1833
|
+
if [[ "$datum" < "$DATE" ]] ; then
|
|
1834
|
+
# Shell Parameter Expansion: ${parameter##word}
|
|
1835
|
+
# Allow to return the result from "word" to the end of "parameters"
|
|
1836
|
+
# Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
|
|
1837
|
+
TO_DELETE="$CACHE_BUCKET${line##* project/}"
|
|
1838
|
+
echo $TO_DELETE
|
|
1839
|
+
aws s3 rm $TO_DELETE
|
|
1840
|
+
fi
|
|
1841
|
+
done < <(aws s3 ls $CACHE_BUCKET --recursive)
|
|
1842
|
+
}
|
|
1843
|
+
|
|
1552
1844
|
#!/usr/bin/env bash
|
|
1553
1845
|
|
|
1554
1846
|
ftp_ikea_k8s() {
|
|
@@ -1928,6 +2220,7 @@ datadog_schedule_downtime_single() {
|
|
|
1928
2220
|
docker_build_push() {
|
|
1929
2221
|
read -r -a BUILD_ARGS <<< "$1"
|
|
1930
2222
|
DOCKER_BUILD_ARGS="--build-arg VCS_REF=$(git rev-parse --short HEAD)"
|
|
2223
|
+
|
|
1931
2224
|
for ARG_NAME in "${BUILD_ARGS[@]}"
|
|
1932
2225
|
do
|
|
1933
2226
|
DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS --build-arg $ARG_NAME=${!ARG_NAME}"
|
|
@@ -1936,13 +2229,17 @@ docker_build_push() {
|
|
|
1936
2229
|
if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
|
|
1937
2230
|
docker pull $DOCKER_IMAGE || true
|
|
1938
2231
|
SOURCE_URL=${CI_PROJECT_URL:8} # without "https://" protocol, like gitlab.com/colisweb-idl/colisweb/back/packing
|
|
1939
|
-
|
|
2232
|
+
|
|
2233
|
+
docker buildx create --use
|
|
2234
|
+
|
|
2235
|
+
docker buildx build $DOCKER_BUILD_ARGS \
|
|
1940
2236
|
-t $DOCKER_IMAGE_SHA \
|
|
2237
|
+
--platform "linux/arm64,linux/amd64" \
|
|
1941
2238
|
--label org.opencontainers.image.revision=$(git rev-parse HEAD) \
|
|
1942
2239
|
--label org.opencontainers.image.source=$SOURCE_URL \
|
|
1943
|
-
--
|
|
2240
|
+
--provenance=false \
|
|
2241
|
+
--push \
|
|
1944
2242
|
$DOCKER_STAGE_PATH
|
|
1945
|
-
docker push $DOCKER_IMAGE_SHA
|
|
1946
2243
|
fi
|
|
1947
2244
|
}
|
|
1948
2245
|
|
|
@@ -2078,7 +2375,7 @@ init_migrate_db() {
|
|
|
2078
2375
|
|
|
2079
2376
|
unset KUBECONFIG
|
|
2080
2377
|
|
|
2081
|
-
|
|
2378
|
+
configure_kubectl_for ${ENVIRONMENT}
|
|
2082
2379
|
|
|
2083
2380
|
kube_init_service_database \
|
|
2084
2381
|
--namespace ${ENVIRONMENT} \
|
|
@@ -2124,7 +2421,7 @@ flyway_migrate() {
|
|
|
2124
2421
|
CONFIGMAP_NAME="$service-flyway-migration-sql"
|
|
2125
2422
|
POD_NAME="$service-flyway-migration"
|
|
2126
2423
|
|
|
2127
|
-
|
|
2424
|
+
configure_kubectl_for $environment
|
|
2128
2425
|
|
|
2129
2426
|
kubectl -n $namespace delete configmap $CONFIGMAP_NAME --ignore-not-found
|
|
2130
2427
|
kubectl -n $namespace delete pod $POD_NAME --ignore-not-found
|
|
@@ -2177,7 +2474,7 @@ flyway_migrate() {
|
|
|
2177
2474
|
|
|
2178
2475
|
flyway_sql_folder=$(pwd)/${MIGRATION_SQL_PATH}
|
|
2179
2476
|
|
|
2180
|
-
|
|
2477
|
+
configure_kubectl_for "${ENVIRONMENT}"
|
|
2181
2478
|
POD_NAME="${APPLICATION}-flyway-repair"
|
|
2182
2479
|
CONFIGMAP_NAME="${APPLICATION}-flyway-repair-sql"
|
|
2183
2480
|
|
|
@@ -2243,11 +2540,11 @@ git_reveal() {
|
|
|
2243
2540
|
}
|
|
2244
2541
|
#!/usr/bin/env bash
|
|
2245
2542
|
|
|
2246
|
-
|
|
2543
|
+
helm_deploy() {
|
|
2247
2544
|
APPLICATION=$1
|
|
2248
2545
|
ENVIRONMENT=$2
|
|
2249
2546
|
VERSION=$3
|
|
2250
|
-
|
|
2547
|
+
deploy_chart \
|
|
2251
2548
|
--path_configs deploy \
|
|
2252
2549
|
--path_chart deploy/$APPLICATION \
|
|
2253
2550
|
--application $APPLICATION \
|
|
@@ -2256,7 +2553,7 @@ helm_deploy_v3() {
|
|
|
2256
2553
|
--helm_extra_args --set global.version=$VERSION
|
|
2257
2554
|
}
|
|
2258
2555
|
|
|
2259
|
-
|
|
2556
|
+
deploy_chart() {
|
|
2260
2557
|
set -e
|
|
2261
2558
|
set -x
|
|
2262
2559
|
|
|
@@ -2303,15 +2600,15 @@ deploy_chart_v3() {
|
|
|
2303
2600
|
unset KUBECONFIG
|
|
2304
2601
|
|
|
2305
2602
|
# Configure Kubectl
|
|
2306
|
-
|
|
2603
|
+
configure_kubectl_for ${environment}
|
|
2307
2604
|
|
|
2308
|
-
# Configure
|
|
2309
|
-
|
|
2310
|
-
#
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
|
|
2314
|
-
|
|
2605
|
+
# Configure helm
|
|
2606
|
+
helm version --namespace ${namespace} || true
|
|
2607
|
+
# helm stable repo have changed and must be updated manually, in versions < v2.17.0
|
|
2608
|
+
helm repo add colisweb s3://colisweb-helm-charts/colisweb
|
|
2609
|
+
helm repo add stable https://charts.helm.sh/stable
|
|
2610
|
+
helm repo update
|
|
2611
|
+
helm dependency update ${root_path}/${path_chart}
|
|
2315
2612
|
|
|
2316
2613
|
# Gather values/*.yaml files
|
|
2317
2614
|
values_path="${root_path}/${path_chart}/values"
|
|
@@ -2319,7 +2616,7 @@ deploy_chart_v3() {
|
|
|
2319
2616
|
[ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
|
|
2320
2617
|
|
|
2321
2618
|
# Deploy
|
|
2322
|
-
|
|
2619
|
+
helm upgrade --install \
|
|
2323
2620
|
--namespace ${namespace} \
|
|
2324
2621
|
${values_files} \
|
|
2325
2622
|
-f ${root_path}/${path_configs}/common.yaml \
|
|
@@ -2341,7 +2638,7 @@ deploy_chart_v3() {
|
|
|
2341
2638
|
set +x
|
|
2342
2639
|
}
|
|
2343
2640
|
|
|
2344
|
-
|
|
2641
|
+
verify_deployments() {
|
|
2345
2642
|
set -e
|
|
2346
2643
|
|
|
2347
2644
|
# usage :
|
|
@@ -2361,7 +2658,7 @@ verify_deployments_v3() {
|
|
|
2361
2658
|
|
|
2362
2659
|
# Get all Deployments names from the deployed chart
|
|
2363
2660
|
DEPLOYMENTS=(
|
|
2364
|
-
$(
|
|
2661
|
+
$(helm get manifest --namespace $NAMESPACE $RELEASE | yq --no-doc -r 'select(.kind=="Deployment").metadata.name')
|
|
2365
2662
|
)
|
|
2366
2663
|
|
|
2367
2664
|
echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
|
|
@@ -2404,40 +2701,6 @@ check_config_file() {
|
|
|
2404
2701
|
fi
|
|
2405
2702
|
}
|
|
2406
2703
|
|
|
2407
|
-
#!/usr/bin/env bash
|
|
2408
|
-
|
|
2409
|
-
configure_kubectl_for_ci() {
|
|
2410
|
-
if [ -z ${GITLAB_PAT} ]; then
|
|
2411
|
-
echo "Cannot configure kubectl: no GITLAB_PAT configured"
|
|
2412
|
-
exit 1
|
|
2413
|
-
fi
|
|
2414
|
-
|
|
2415
|
-
infra_env="$1"
|
|
2416
|
-
valid_envs="[testing][staging][production][performance][tests][recette]"
|
|
2417
|
-
echo "$valid_envs" | grep -q "\[$infra_env\]"
|
|
2418
|
-
|
|
2419
|
-
if [ $? -ne 0 ]; then
|
|
2420
|
-
echo "Cannot configure kubectl for invalid env : $infra_env"
|
|
2421
|
-
echo "choose one of $valid_envs"
|
|
2422
|
-
exit 1
|
|
2423
|
-
fi
|
|
2424
|
-
|
|
2425
|
-
mkdir -p ~/.kube
|
|
2426
|
-
curl -fsS \
|
|
2427
|
-
--header "PRIVATE-TOKEN: $GITLAB_PAT" \
|
|
2428
|
-
"https://gitlab.com/api/v4/projects/8141053/jobs/artifacts/$infra_env/raw/$infra_env.kubeconfig?job=4_kubernetes_config_output" \
|
|
2429
|
-
> ~/.kube/$infra_env.kubeconfig
|
|
2430
|
-
|
|
2431
|
-
curl_return_code=$?
|
|
2432
|
-
if [ ${curl_return_code} -ne 0 ]; then
|
|
2433
|
-
echo "Cannot configure kubectl for $infra_env, get configuration failed with code $curl_return_code"
|
|
2434
|
-
exit ${curl_return_code}
|
|
2435
|
-
fi
|
|
2436
|
-
|
|
2437
|
-
rm -f ~/.kube/config
|
|
2438
|
-
ln -s ~/.kube/$infra_env.kubeconfig ~/.kube/config
|
|
2439
|
-
echo "Configured kubectl for env : $infra_env"
|
|
2440
|
-
}
|
|
2441
2704
|
notify_new_deployment() {
|
|
2442
2705
|
jq --version || (apt update && apt install -y jq)
|
|
2443
2706
|
|
package/package.json
CHANGED
|
@@ -10,7 +10,7 @@ module Footer = {
|
|
|
10
10
|
<footer
|
|
11
11
|
className={cx([
|
|
12
12
|
className,
|
|
13
|
-
"flex-none text-gray-600 text-center text-lg pt-4 print:hidden",
|
|
13
|
+
"flex-none text-gray-600 text-center text-lg pt-4 print:hidden mt-auto",
|
|
14
14
|
hasSidebar ? "lg:ml-20" : "",
|
|
15
15
|
])}>
|
|
16
16
|
{switch children {
|