@colisweb/rescript-toolkit 5.46.4 → 5.47.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.secure_files/{ci-functions-v18.0.0 → ci-functions-v20.3.4} +679 -83
- package/.secure_files/{ci-functions-v18.0.1 → ci-functions-v20.4.0} +679 -83
- package/.secure_files/{ci-functions-v17.13.0 → ci-functions-v20.4.1} +699 -103
- package/.secure_files/{ci-functions-v17.14.0 → ci-functions-v20.4.2} +699 -103
- package/.secure_files/ci-functions-v20.4.3 +3255 -0
- package/.secure_files/ci-functions-v20.5.0 +3255 -0
- package/package.json +1 -1
- package/src/form/Toolkit__Form.res +2 -0
- package/src/tailwind/tailwind.config.cjs +22 -0
- package/src/ui/Toolkit__Ui.res +1 -0
- package/src/ui/Toolkit__Ui_Sheet.res +54 -0
- package/src/ui/Toolkit__Ui_Sheet.resi +9 -0
- package/src/vendors/Radix.res +6 -0
- package/.secure_files/ci-functions-v17.12.0 +0 -2658
- package/.secure_files/ci-functions-v17.12.0-feat-add-mysql-service-1.0.7beta +0 -2658
|
@@ -1,2658 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env bash
|
|
2
|
-
|
|
3
|
-
#VARIABLES
|
|
4
|
-
export SCRIPT_FULL_PATH=$(dirname "$0")
|
|
5
|
-
|
|
6
|
-
##FUNCTIONS
|
|
7
|
-
# https://stackoverflow.com/questions/1527049/how-can-i-join-elements-of-an-array-in-bash
|
|
8
|
-
join_by() {
|
|
9
|
-
local d=${1-} f=${2-}
|
|
10
|
-
if shift 2; then
|
|
11
|
-
printf %s "$f" "${@/#/$d}"
|
|
12
|
-
fi
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
mkstring() {
|
|
16
|
-
local start=$1
|
|
17
|
-
local separator=$2
|
|
18
|
-
local end=$3
|
|
19
|
-
shift 3
|
|
20
|
-
|
|
21
|
-
if [ $# -gt 0 ]; then
|
|
22
|
-
printf $start
|
|
23
|
-
join_by $separator $*
|
|
24
|
-
printf $end
|
|
25
|
-
fi
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
md5all() {
|
|
29
|
-
all_hash=$(mktemp)
|
|
30
|
-
for name in $*; do
|
|
31
|
-
find $name -type f -exec cat {} \; | md5sum | cut -f1 -d ' ' >> $all_hash
|
|
32
|
-
done;
|
|
33
|
-
cat $all_hash | md5sum | cut -f1 -d ' '
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
log() {
|
|
37
|
-
echo "$*" >&2
|
|
38
|
-
}
|
|
39
|
-
#!/usr/bin/env bash
|
|
40
|
-
|
|
41
|
-
check_args() {
|
|
42
|
-
if [ -z $2 ] || [ "$1" != "$2" ]; then
|
|
43
|
-
echo >&2 "missing argument $1"
|
|
44
|
-
return 1
|
|
45
|
-
fi
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
check_env_vars() {
|
|
49
|
-
ArgsCount=$1 && shift
|
|
50
|
-
for ((i = 0; i < $ArgsCount; i++)); do
|
|
51
|
-
if [[ -z "${!1}" ]]; then
|
|
52
|
-
echo >&2 "missing ENV $1"
|
|
53
|
-
return 1
|
|
54
|
-
fi
|
|
55
|
-
shift
|
|
56
|
-
done
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
extract_arg() {
|
|
60
|
-
name=$1
|
|
61
|
-
passed=$2
|
|
62
|
-
value=$3
|
|
63
|
-
if [ "--$name" != "$passed" ]; then
|
|
64
|
-
echo "missing argument $name"
|
|
65
|
-
exit 1
|
|
66
|
-
fi
|
|
67
|
-
eval $name='$value'
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
extract_args() {
|
|
71
|
-
declare -a Array_Args
|
|
72
|
-
ArgsCount=$1 && shift
|
|
73
|
-
for ((i = 0; i < $ArgsCount; i++)); do
|
|
74
|
-
Array_Args[i]=$1 && shift
|
|
75
|
-
done
|
|
76
|
-
for ArgName in "${Array_Args[@]}"; do
|
|
77
|
-
extract_arg "$ArgName" $* && shift 2
|
|
78
|
-
done
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
#!/usr/bin/env bash
|
|
82
|
-
|
|
83
|
-
aws_ecr_login() {
|
|
84
|
-
PATH=/root/.local/bin:$PATH
|
|
85
|
-
|
|
86
|
-
aws ecr get-login-password \
|
|
87
|
-
| docker login --username AWS --password-stdin 949316342391.dkr.ecr.eu-west-1.amazonaws.com \
|
|
88
|
-
|| (echo "you should update to AWS CLI version 2 https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-mac.html " $(aws ecr get-login --region=eu-west-1 --no-include-email) )
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
aws_ecr_token() {
|
|
92
|
-
aws ecr get-authorization-token --region=eu-west-1 --output text --query 'authorizationData[].authorizationToken'
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
# you will need jq to use these commands. You can install it using "brew install jq"
|
|
96
|
-
# delete_images colisweb_api 8
|
|
97
|
-
# will delete images older than 8 weeks
|
|
98
|
-
delete_images() {
|
|
99
|
-
|
|
100
|
-
REPO=$1
|
|
101
|
-
WEEKS=${2:-16}
|
|
102
|
-
|
|
103
|
-
WEEKS_AGO=$(date -v-${WEEKS}w +%F)
|
|
104
|
-
|
|
105
|
-
#Get all ecr images
|
|
106
|
-
IMAGES=$(aws ecr describe-images --repository-name $REPO --output json)
|
|
107
|
-
|
|
108
|
-
#Filter unnecessary values and map `imagePushedAt` to EPOCH
|
|
109
|
-
NON_LATEST_IMAGES=$(echo $IMAGES | jq '[.imageDetails[] | select(.imageTags | any(endswith("latest")) | not)]')
|
|
110
|
-
|
|
111
|
-
#Filter on EPOCH
|
|
112
|
-
OLD_IMAGES=$(echo $NON_LATEST_IMAGES | jq --arg date $WEEKS_AGO '.[] | select(.imagePushedAt[0:10] < $date).imageDigest')
|
|
113
|
-
while IFS= read -r IMAGE; do
|
|
114
|
-
if [ "$IMAGE" != "" ]; then
|
|
115
|
-
echo "Deleting $IMAGE from $REPO"
|
|
116
|
-
AWS_PAGER="" aws ecr batch-delete-image --repository-name $REPO --image-ids imageDigest=$IMAGE
|
|
117
|
-
fi
|
|
118
|
-
done <<< "$OLD_IMAGES"
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
# delete_images_all_repos 12
|
|
122
|
-
# will delete images in all repositories older than 12 weeks
|
|
123
|
-
delete_images_all_repos() {
|
|
124
|
-
REPOSITORIES=$(aws ecr describe-repositories --output json | jq -r '.[]|.[].repositoryName')
|
|
125
|
-
|
|
126
|
-
while IFS= read -r REPO; do
|
|
127
|
-
echo "processing ECR repository $REPO"
|
|
128
|
-
delete_images $REPO $1
|
|
129
|
-
done <<< "$REPOSITORIES"
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
delete_old_cache() {
|
|
133
|
-
DATE=${1:-$(date -v-1m +%F)}
|
|
134
|
-
CACHE_BUCKET=${2:-"s3://gitlab-colisweb-distributed-cache/project/"}
|
|
135
|
-
|
|
136
|
-
echo "deleting from cache $CACHE_BUCKET all older than $DATE"
|
|
137
|
-
|
|
138
|
-
aws_ecr_login
|
|
139
|
-
|
|
140
|
-
while read -r line; do
|
|
141
|
-
datum=$(echo $line | cut -c1-10)
|
|
142
|
-
if [[ "$datum" < "$DATE" ]] ; then
|
|
143
|
-
# Shell Parameter Expansion: ${parameter##word}
|
|
144
|
-
# Allow to return the result from "word" to the end of "parameters"
|
|
145
|
-
# Here we need the end of the string after "project/" (corresponding to the S3 gitlab project id and filename)
|
|
146
|
-
TO_DELETE="$CACHE_BUCKET${line##* project/}"
|
|
147
|
-
echo $TO_DELETE
|
|
148
|
-
aws s3 rm $TO_DELETE
|
|
149
|
-
fi
|
|
150
|
-
done < <(aws s3 ls $CACHE_BUCKET --recursive)
|
|
151
|
-
}
|
|
152
|
-
|
|
153
|
-
#!/usr/bin/env bash
|
|
154
|
-
|
|
155
|
-
# If gitlab is down or pipeline are stuck, hotfixes need to be available
|
|
156
|
-
# This script will publish docker images to ECR using your current git HEAD, then deploy them to a given environment.
|
|
157
|
-
# Some local files (git-commit.conf and sentry.properties) will be updated, take caution.
|
|
158
|
-
# No trace of this will appear on Gitlab (no releases, no pipelines, no tags).
|
|
159
|
-
# create_hotfix_scala $ENVIRONMENT $CHART_NAME [ $MODULE_NAME $MODULE_PATH $DEPLOYMENT ]
|
|
160
|
-
# create_hotfix_scala testing crm main modules/3-executables/main crm
|
|
161
|
-
# create_hotfix_scala testing notification \
|
|
162
|
-
# main-http modules/3-executables/main-http notification-http \
|
|
163
|
-
# main-consumer modules/3-executables/main-consumer notification-consumer
|
|
164
|
-
|
|
165
|
-
create_hotfix_scala() {
|
|
166
|
-
|
|
167
|
-
ENVIRONMENT=$1
|
|
168
|
-
CHART_NAME=$2
|
|
169
|
-
shift 2
|
|
170
|
-
|
|
171
|
-
SHORT_SHA=$(git rev-parse --short HEAD)
|
|
172
|
-
HOTFIX_TAG="hotfix-$SHORT_SHA"
|
|
173
|
-
|
|
174
|
-
gum confirm "Preparing $HOTFIX_TAG for $CHART_NAME ?" || exit
|
|
175
|
-
prepare_hotfix_scala $HOTFIX_TAG
|
|
176
|
-
|
|
177
|
-
gum confirm "Building $HOTFIX_TAG for $CHART_NAME ?" || exit
|
|
178
|
-
while [[ $# -gt 2 ]] ; do
|
|
179
|
-
build_hotfix_scala $HOTFIX_TAG "$1" "$2" "$3"
|
|
180
|
-
shift 3
|
|
181
|
-
done
|
|
182
|
-
|
|
183
|
-
gum confirm "Deploying $HOTFIX_TAG for $CHART_NAME ?" || exit
|
|
184
|
-
deploy_hotfix $CHART_NAME $ENVIRONMENT $HOTFIX_TAG
|
|
185
|
-
}
|
|
186
|
-
|
|
187
|
-
# Update local git-commit.conf and sentry.properties files using git short sha
|
|
188
|
-
prepare_hotfix_scala() {
|
|
189
|
-
HOTFIX_TAG=$1
|
|
190
|
-
|
|
191
|
-
git secret reveal -f
|
|
192
|
-
aws_ecr_login
|
|
193
|
-
|
|
194
|
-
COMMIT_CONF_FILES=$(find . -name "git-commit.conf")
|
|
195
|
-
SENTRY_PROPERTIES_FILES=$(find . -name "sentry.properties")
|
|
196
|
-
|
|
197
|
-
for file in $(echo "$COMMIT_CONF_FILES\n$SENTRY_PROPERTIES_FILES"); do
|
|
198
|
-
sed -i '' -e 's&GIT_COMMIT&'"$HOTFIX_TAG&" $file
|
|
199
|
-
done
|
|
200
|
-
|
|
201
|
-
}
|
|
202
|
-
|
|
203
|
-
# Build docker images locally and publish them to AWS ECR.
|
|
204
|
-
build_hotfix_scala() {
|
|
205
|
-
|
|
206
|
-
HOTFIX_TAG=$1
|
|
207
|
-
SBT_MODULE=$2
|
|
208
|
-
DOCKER_PATH=$3
|
|
209
|
-
DEPLOYMENT=$4
|
|
210
|
-
|
|
211
|
-
DOCKER_REGISTRY_ID="949316342391"
|
|
212
|
-
DOCKER_REGISTRY="$DOCKER_REGISTRY_ID.dkr.ecr.eu-west-1.amazonaws.com"
|
|
213
|
-
DOCKER_IMAGE=$DOCKER_REGISTRY/$DEPLOYMENT
|
|
214
|
-
HOTFIX_IMAGE=$DOCKER_IMAGE:$HOTFIX_TAG
|
|
215
|
-
|
|
216
|
-
#Build
|
|
217
|
-
sbt "project $SBT_MODULE" "Docker / stage"
|
|
218
|
-
|
|
219
|
-
#Publish
|
|
220
|
-
docker build --platform "linux/amd64" -t $HOTFIX_IMAGE --cache-from $DOCKER_IMAGE "$DOCKER_PATH/target/docker/stage"
|
|
221
|
-
docker push $HOTFIX_IMAGE
|
|
222
|
-
|
|
223
|
-
echo "Created hotfix $HOTFIX_IMAGE"
|
|
224
|
-
}
|
|
225
|
-
|
|
226
|
-
# Deploy the project in the given environment
|
|
227
|
-
deploy_hotfix() {
|
|
228
|
-
source $colisweb_scripts/ci/helm.sh
|
|
229
|
-
|
|
230
|
-
CHART_NAME=$1
|
|
231
|
-
ENVIRONMENT=$2
|
|
232
|
-
HOTFIX_TAG=$3
|
|
233
|
-
|
|
234
|
-
CONFIG_PATH=deploy
|
|
235
|
-
CHART_PATH=$CONFIG_PATH/$CHART_NAME
|
|
236
|
-
ROOT_PATH=$(pwd)
|
|
237
|
-
|
|
238
|
-
# Unset Kubectl configuration made via the KUBECONFIG env variable
|
|
239
|
-
# it would override the config made by configure_kubectl_for
|
|
240
|
-
# for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
|
|
241
|
-
unset KUBECONFIG
|
|
242
|
-
|
|
243
|
-
# Configure Kubectl
|
|
244
|
-
configure_kubectl_for $ENVIRONMENT
|
|
245
|
-
|
|
246
|
-
# Avoiding "no local-index.yaml" or "empty local-index.yaml" error
|
|
247
|
-
cat > $HOME/Library/Caches/helm/repository/local-index.yaml <<EOT
|
|
248
|
-
apiVersion: v1
|
|
249
|
-
entries:
|
|
250
|
-
cronjob:
|
|
251
|
-
EOT
|
|
252
|
-
|
|
253
|
-
# helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
|
|
254
|
-
helm3 repo add colisweb s3://colisweb-helm-charts/colisweb --force-update
|
|
255
|
-
helm3 repo add stable https://charts.helm.sh/stable --force-update
|
|
256
|
-
helm3 repo update
|
|
257
|
-
helm3 dependency update ${ROOT_PATH}/${CHART_PATH}
|
|
258
|
-
|
|
259
|
-
# Gather values/*.yaml files
|
|
260
|
-
VALUES_PATH="${ROOT_PATH}/${CHART_NAME}/values"
|
|
261
|
-
VALUES_FILES=''
|
|
262
|
-
[ -d $VALUES_PATH ] && VALUES_FILES=$(find $VALUES_PATH -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
|
|
263
|
-
|
|
264
|
-
# Deploy
|
|
265
|
-
helm3 upgrade --install \
|
|
266
|
-
--namespace ${ENVIRONMENT} \
|
|
267
|
-
${VALUES_FILES} \
|
|
268
|
-
-f ${ROOT_PATH}/${CONFIG_PATH}/common.yaml \
|
|
269
|
-
-f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}.yaml \
|
|
270
|
-
-f ${ROOT_PATH}/${CONFIG_PATH}/${ENVIRONMENT}-secrets.yaml \
|
|
271
|
-
--set global.version=$HOTFIX_TAG \
|
|
272
|
-
${CHART_NAME} ${ROOT_PATH}/${CHART_PATH}
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
verify_deployments_v3 -t 10m $ENVIRONMENT $CHART_NAME
|
|
276
|
-
|
|
277
|
-
}
|
|
278
|
-
|
|
279
|
-
#!/usr/bin/env bash
|
|
280
|
-
|
|
281
|
-
image_exists() {
|
|
282
|
-
set -e
|
|
283
|
-
|
|
284
|
-
REGISTRY=$1
|
|
285
|
-
REPOSITORY=$2
|
|
286
|
-
IMAGE=$3
|
|
287
|
-
|
|
288
|
-
TAGGED_IMAGE="$REGISTRY/$REPOSITORY:$IMAGE"
|
|
289
|
-
|
|
290
|
-
aws ecr describe-images --registry-id $REGISTRY --repository-name $REPOSITORY --image-ids "imageTag=$IMAGE"
|
|
291
|
-
|
|
292
|
-
if [ $? -eq 0 ]
|
|
293
|
-
then
|
|
294
|
-
echo "Image $TAGGED_IMAGE already present in distant repo"
|
|
295
|
-
return 0
|
|
296
|
-
else
|
|
297
|
-
echo "Image $TAGGED_IMAGE NOT present in distant repo"
|
|
298
|
-
return 1
|
|
299
|
-
fi
|
|
300
|
-
}
|
|
301
|
-
#!/usr/bin/env bash
|
|
302
|
-
|
|
303
|
-
gmm() {
|
|
304
|
-
git checkout $1
|
|
305
|
-
git pull
|
|
306
|
-
git checkout $2
|
|
307
|
-
git pull
|
|
308
|
-
git merge $1
|
|
309
|
-
git push
|
|
310
|
-
}
|
|
311
|
-
|
|
312
|
-
git_damn_merge() {
|
|
313
|
-
git checkout $1
|
|
314
|
-
git pull
|
|
315
|
-
git checkout $2
|
|
316
|
-
git dammit
|
|
317
|
-
git merge $1
|
|
318
|
-
git push
|
|
319
|
-
}
|
|
320
|
-
|
|
321
|
-
git_prune_local_branches() {
|
|
322
|
-
git branch -r |
|
|
323
|
-
awk '{print $1}' |
|
|
324
|
-
egrep -v -f /dev/fd/0 <(git branch -vv | grep origin) |
|
|
325
|
-
awk '{print $1}' |
|
|
326
|
-
xargs git branch -d
|
|
327
|
-
}
|
|
328
|
-
|
|
329
|
-
gum_checkout() {
|
|
330
|
-
git branch -a | cut -f3- -d "/" | gum filter | xargs git checkout
|
|
331
|
-
}
|
|
332
|
-
|
|
333
|
-
# useful option :
|
|
334
|
-
# export GIT_SUBLINE_MERGE_NON_INTERACTIVE_MODE=TRUE
|
|
335
|
-
# see https://github.com/paulaltin/git-subline-merge
|
|
336
|
-
setup_subline_merge() {
|
|
337
|
-
location=${1:-"--local"}
|
|
338
|
-
|
|
339
|
-
case $location in
|
|
340
|
-
--local)
|
|
341
|
-
if [ -d ".git" ]; then
|
|
342
|
-
echo "* merge=subline" >>.git/info/attributes
|
|
343
|
-
else
|
|
344
|
-
echo "Cannot use local option, not in a git repository"
|
|
345
|
-
return 1
|
|
346
|
-
fi
|
|
347
|
-
;;
|
|
348
|
-
--global)
|
|
349
|
-
echo "* merge=subline" >>~/.gitattributes
|
|
350
|
-
;;
|
|
351
|
-
*)
|
|
352
|
-
echo "unknown argument $location"
|
|
353
|
-
return 2
|
|
354
|
-
;;
|
|
355
|
-
esac
|
|
356
|
-
|
|
357
|
-
git config $location merge.conflictStyle diff3
|
|
358
|
-
git config $location merge.subline.driver "$colisweb_scripts/shell-session/shell/dev/git-subline-merge %O %A %B %L %P"
|
|
359
|
-
git config $location merge.subline.recursive binary
|
|
360
|
-
}
|
|
361
|
-
|
|
362
|
-
rebase_from_ancestor() {
|
|
363
|
-
set -x
|
|
364
|
-
branch=$1
|
|
365
|
-
tip=$(git rev-parse HEAD)
|
|
366
|
-
ancestor=$(git merge-base $branch $tip)
|
|
367
|
-
commits=$(git log $ancestor..$tip)
|
|
368
|
-
git reset --hard $ancestor
|
|
369
|
-
git merge --squash $tip
|
|
370
|
-
git commit -m "squashed commmits $commits" || echo "nothing committed"
|
|
371
|
-
git rebase $branch -Xtheirs
|
|
372
|
-
}
|
|
373
|
-
|
|
374
|
-
#!/usr/bin/env bash
|
|
375
|
-
|
|
376
|
-
import_all_pgp_keys() {
|
|
377
|
-
echo "importing all PGP keys"
|
|
378
|
-
gpg --import $SCRIPT_FULL_PATH/pgp_keys/*.key
|
|
379
|
-
}
|
|
380
|
-
|
|
381
|
-
remove_all_persons_from_secrets() {
|
|
382
|
-
echo "cleanup git secret"
|
|
383
|
-
WHO_KNOWS=($(git secret whoknows))
|
|
384
|
-
git secret removeperson $WHO_KNOWS
|
|
385
|
-
echo "Removed secrets access for $WHO_KNOWS"
|
|
386
|
-
}
|
|
387
|
-
|
|
388
|
-
all_pgp_emails() {
|
|
389
|
-
gpg --show-key $SCRIPT_FULL_PATH/pgp_keys/*.key | sed -rn "s/.*<(.*)>/\1/p"
|
|
390
|
-
}
|
|
391
|
-
|
|
392
|
-
set_all_secret_keys() {
|
|
393
|
-
|
|
394
|
-
import_all_pgp_keys
|
|
395
|
-
|
|
396
|
-
git secret reveal -f
|
|
397
|
-
|
|
398
|
-
remove_all_persons_from_secrets
|
|
399
|
-
|
|
400
|
-
if [ $# -eq 0 ]; then
|
|
401
|
-
echo "No emails supplied, using dev-tools pgp keys as source"
|
|
402
|
-
IN_THE_KNOW=($(gum choose --no-limit $(all_pgp_emails)))
|
|
403
|
-
else
|
|
404
|
-
IN_THE_KNOW=($*)
|
|
405
|
-
fi
|
|
406
|
-
|
|
407
|
-
git secret tell $IN_THE_KNOW
|
|
408
|
-
git secret hide
|
|
409
|
-
git secret whoknows
|
|
410
|
-
|
|
411
|
-
echo "all secrets updated, you'll need to commit the changes"
|
|
412
|
-
}
|
|
413
|
-
|
|
414
|
-
#!/usr/bin/env bash
|
|
415
|
-
|
|
416
|
-
start_ssh_bastion() {
|
|
417
|
-
ENV=$1
|
|
418
|
-
SSH_LOCAL_PORT=$2
|
|
419
|
-
POD_NAME=ssh-bastion-$USERNAME
|
|
420
|
-
CONFIG_MAP_NAME=ssh-bastion-$USERNAME
|
|
421
|
-
configure_kubectl_for $ENV
|
|
422
|
-
kubectl get pods -o name | grep pod/$POD_NAME
|
|
423
|
-
if [ $? -eq 0 ]; then
|
|
424
|
-
echo "$POD_NAME is already running"
|
|
425
|
-
else
|
|
426
|
-
#configmap
|
|
427
|
-
kubectl get configmap $CONFIG_MAP_NAME && kubectl delete configmap $CONFIG_MAP_NAME
|
|
428
|
-
tempdir=$(mktemp -d)
|
|
429
|
-
cat <<EOF > $tempdir/sshd_config
|
|
430
|
-
AllowTcpForwarding yes
|
|
431
|
-
Port 2222
|
|
432
|
-
PermitRootLogin yes
|
|
433
|
-
AuthorizedKeysFile /etc/ssh/authorized_keys
|
|
434
|
-
EOF
|
|
435
|
-
cp ~/.ssh/id_rsa.pub $tempdir/authorized_keys
|
|
436
|
-
kubectl create configmap $CONFIG_MAP_NAME --from-file=$tempdir
|
|
437
|
-
|
|
438
|
-
#pod
|
|
439
|
-
kubectl get pod $POD_NAME && kubectl delete pod $POD_NAME
|
|
440
|
-
cat <<EOF | kubectl create -f -
|
|
441
|
-
|
|
442
|
-
apiVersion: v1
|
|
443
|
-
kind: Pod
|
|
444
|
-
metadata:
|
|
445
|
-
name: $POD_NAME
|
|
446
|
-
spec:
|
|
447
|
-
containers:
|
|
448
|
-
- name: $POD_NAME
|
|
449
|
-
image: sickp/alpine-sshd:7.4
|
|
450
|
-
ports:
|
|
451
|
-
- containerPort: 2222
|
|
452
|
-
volumeMounts:
|
|
453
|
-
- mountPath: /etc/ssh/sshd_config
|
|
454
|
-
name: ssh-config
|
|
455
|
-
subPath: sshd_config
|
|
456
|
-
- mountPath: /etc/ssh/authorized_keys
|
|
457
|
-
name: ssh-config
|
|
458
|
-
subPath: authorized_keys
|
|
459
|
-
volumes:
|
|
460
|
-
- name: ssh-config
|
|
461
|
-
configMap:
|
|
462
|
-
name: $CONFIG_MAP_NAME
|
|
463
|
-
EOF
|
|
464
|
-
|
|
465
|
-
fi
|
|
466
|
-
|
|
467
|
-
# You need a recent kubectl for wait to work (1.15 works), install or upgrade
|
|
468
|
-
# with brew :
|
|
469
|
-
# brew install kubernetes-cli
|
|
470
|
-
# brew upgrade kubernetes-cli
|
|
471
|
-
kubectl wait --for=condition=Ready pod/$POD_NAME
|
|
472
|
-
|
|
473
|
-
# kube port-forward
|
|
474
|
-
lsof -ti tcp:$SSH_LOCAL_PORT | xargs kill
|
|
475
|
-
kubectl port-forward $POD_NAME $SSH_LOCAL_PORT:2222 &
|
|
476
|
-
while ! nc -z 127.0.0.1 $SSH_LOCAL_PORT; do
|
|
477
|
-
sleep 1
|
|
478
|
-
done
|
|
479
|
-
echo "forwarding ssh via local port $SSH_LOCAL_PORT"
|
|
480
|
-
echo "remember to terminate the bastion with 'stop_ssh_bastion'"
|
|
481
|
-
}
|
|
482
|
-
|
|
483
|
-
stop_ssh_bastion() {
|
|
484
|
-
POD_NAME=ssh-bastion-$USERNAME
|
|
485
|
-
kubectl delete pod $POD_NAME
|
|
486
|
-
}
|
|
487
|
-
|
|
488
|
-
#!/usr/bin/env bash
|
|
489
|
-
|
|
490
|
-
configure_kubectl_for() {
|
|
491
|
-
local infra_env="$1"
|
|
492
|
-
local valid_envs="[testing][staging][production][performance][tests][recette]"
|
|
493
|
-
echo "$valid_envs" | grep -q "\[$infra_env\]"
|
|
494
|
-
|
|
495
|
-
if [ $? -ne 0 ]; then
|
|
496
|
-
echo "Cannot configure kubectl for invalid env : $infra_env"
|
|
497
|
-
echo "choose one of $valid_envs"
|
|
498
|
-
return 1
|
|
499
|
-
fi
|
|
500
|
-
|
|
501
|
-
aws eks update-kubeconfig --name "toutatis-$infra_env-eks" >&2
|
|
502
|
-
}
|
|
503
|
-
|
|
504
|
-
#!/usr/bin/env bash
|
|
505
|
-
|
|
506
|
-
# WARNING : never try to do a dump directly from the database_production_ca
|
|
507
|
-
# this could cause lot of lock database issues.
|
|
508
|
-
# always use database_production_read_replica_ca instead
|
|
509
|
-
database_k8s() {
|
|
510
|
-
MODE=$1
|
|
511
|
-
case $MODE in
|
|
512
|
-
"tests") SSH_LOCAL_PORT=2224;PG_LOCAL_PORT=24440;CA_LOCAL_PORT=25430;ENV="tests";;
|
|
513
|
-
"testing") SSH_LOCAL_PORT=2225;PG_LOCAL_PORT=24441;CA_LOCAL_PORT=25431;ENV="testing";;
|
|
514
|
-
"staging") SSH_LOCAL_PORT=2226;PG_LOCAL_PORT=24442;CA_LOCAL_PORT=25432;ENV="staging";;
|
|
515
|
-
"production") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24443;CA_LOCAL_PORT=25433;ENV="production";;
|
|
516
|
-
"production_rw") SSH_LOCAL_PORT=2227;PG_LOCAL_PORT=24444;CA_LOCAL_PORT=25434;ENV="production";;
|
|
517
|
-
"recette") SSH_LOCAL_PORT=2228;PG_LOCAL_PORT=24446;CA_LOCAL_PORT=25436;ENV="recette";;
|
|
518
|
-
*) echo "Unsupported ENV : $MODE"; return 1 ;;
|
|
519
|
-
esac
|
|
520
|
-
|
|
521
|
-
start_ssh_bastion $ENV $SSH_LOCAL_PORT
|
|
522
|
-
|
|
523
|
-
lsof -ti tcp:$PG_LOCAL_PORT | xargs kill
|
|
524
|
-
|
|
525
|
-
bastion_config=$(mktemp)
|
|
526
|
-
cat > "$bastion_config" <<EOF
|
|
527
|
-
UserKnownHostsFile /dev/null
|
|
528
|
-
StrictHostKeyChecking no
|
|
529
|
-
User root
|
|
530
|
-
Host bastion_tests
|
|
531
|
-
HostName 127.0.0.1
|
|
532
|
-
Port 2224
|
|
533
|
-
LocalForward 24440 toutatis-tests-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
|
|
534
|
-
Host bastion_testing
|
|
535
|
-
HostName 127.0.0.1
|
|
536
|
-
Port 2225
|
|
537
|
-
LocalForward 24441 toutatis-testing-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
|
|
538
|
-
LocalForward 25431 toutatis-testing-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
|
|
539
|
-
LocalForward 25531 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
|
|
540
|
-
Host bastion_staging
|
|
541
|
-
HostName 127.0.0.1
|
|
542
|
-
Port 2226
|
|
543
|
-
LocalForward 24442 toutatis-staging-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
|
|
544
|
-
LocalForward 25432 toutatis-staging-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
|
|
545
|
-
Host bastion_recette
|
|
546
|
-
HostName 127.0.0.1
|
|
547
|
-
Port 2228
|
|
548
|
-
LocalForward 24446 toutatis-recette-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
|
|
549
|
-
LocalForward 25436 toutatis-recette-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
|
|
550
|
-
LocalForward 25536 testapirds.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
|
|
551
|
-
Host bastion_production
|
|
552
|
-
HostName 127.0.0.1
|
|
553
|
-
Port 2227
|
|
554
|
-
LocalForward 24443 toutatis-production-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
|
|
555
|
-
LocalForward 25433 toutatis-production-mysql-db-replica.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
|
|
556
|
-
EOF
|
|
557
|
-
if [ "$MODE" = "production_rw" ] ; then
|
|
558
|
-
cat >> "$bastion_config" <<EOF
|
|
559
|
-
LocalForward 24444 toutatis-production-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:5432
|
|
560
|
-
LocalForward 25434 toutatis-production-mysql-db.ca0rjdmnxf1x.eu-west-1.rds.amazonaws.com:3306
|
|
561
|
-
EOF
|
|
562
|
-
fi
|
|
563
|
-
|
|
564
|
-
ssh -f -N \
|
|
565
|
-
-F "$bastion_config" \
|
|
566
|
-
"bastion_$ENV"
|
|
567
|
-
|
|
568
|
-
echo "sample command : 'psql postgres://postgres@127.0.0.1:$PG_LOCAL_PORT'"
|
|
569
|
-
echo "sample command : 'mysql -u colisweb -h 127.0.0.1 -P $CA_LOCAL_PORT -p db_name'"
|
|
570
|
-
|
|
571
|
-
echo "run 'kubectl delete pod $POD_NAME' when you have finished"
|
|
572
|
-
}
|
|
573
|
-
|
|
574
|
-
psql_on_k8() {
|
|
575
|
-
NAMESPACE=$1
|
|
576
|
-
SERVICE=$2
|
|
577
|
-
CONNECTION=$3
|
|
578
|
-
shift 3
|
|
579
|
-
|
|
580
|
-
kubectl -n $NAMESPACE run ${SERVICE}-postgres-init \
|
|
581
|
-
--image jbergknoff/postgresql-client \
|
|
582
|
-
--restart=Never \
|
|
583
|
-
--attach --rm \
|
|
584
|
-
-- \
|
|
585
|
-
postgresql://${CONNECTION} \
|
|
586
|
-
"$*"
|
|
587
|
-
}
|
|
588
|
-
|
|
589
|
-
mysql_on_k8() {
|
|
590
|
-
local namespace=$1
|
|
591
|
-
local service=$2
|
|
592
|
-
local db_host=$3
|
|
593
|
-
local db_port=$4
|
|
594
|
-
local db_init_username=$5
|
|
595
|
-
local db_init_password=$6
|
|
596
|
-
local query=$7
|
|
597
|
-
|
|
598
|
-
kubectl -n ${namespace} run ${service}-mysql-init \
|
|
599
|
-
--image arey/mysql-client \
|
|
600
|
-
--restart=Never \
|
|
601
|
-
--attach --rm \
|
|
602
|
-
-- \
|
|
603
|
-
mysql --host=$db_host --user=$db_init_username --password=$db_init_password --port=$db_port --execute="$query"
|
|
604
|
-
}
|
|
605
|
-
#!/usr/bin/env bash
|
|
606
|
-
|
|
607
|
-
kube_init_database_once() {
|
|
608
|
-
|
|
609
|
-
extract_args 8 namespace db_host db_port db_init_username db_init_password db_database db_username db_password $*
|
|
610
|
-
|
|
611
|
-
echo "======================="
|
|
612
|
-
echo " Initializing Database '$db_database' for namespace $namespace"
|
|
613
|
-
echo "======================="
|
|
614
|
-
|
|
615
|
-
set -x
|
|
616
|
-
|
|
617
|
-
echo "Checking if Database '$db_database' exists"
|
|
618
|
-
set +e
|
|
619
|
-
psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -lqtA | cut -d\| -f1 | grep "^$db_database$"
|
|
620
|
-
return_code=$?
|
|
621
|
-
set -e
|
|
622
|
-
|
|
623
|
-
if [ ${return_code} -eq 0 ]; then
|
|
624
|
-
echo "Database $db_database already exists - nothing to do"
|
|
625
|
-
else
|
|
626
|
-
echo "Database $db_database does not exist - initializing"
|
|
627
|
-
|
|
628
|
-
psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'CREATE DATABASE '"$db_database"';'
|
|
629
|
-
echo "DB created $db_database"
|
|
630
|
-
|
|
631
|
-
psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'CREATE USER '"$db_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
|
|
632
|
-
echo "USER created $db_username"
|
|
633
|
-
|
|
634
|
-
psql_on_k8 $namespace once "$db_init_username:$db_init_password@$db_host:$db_port" -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_username"';'
|
|
635
|
-
echo "Granted all privileges for $db_username on $db_database"
|
|
636
|
-
fi
|
|
637
|
-
|
|
638
|
-
echo "======================="
|
|
639
|
-
echo " Database '$db_database' Initialization complete for namespace $namespace"
|
|
640
|
-
echo "======================="
|
|
641
|
-
}
|
|
642
|
-
|
|
643
|
-
kube_init_database_readonly_account() {
|
|
644
|
-
|
|
645
|
-
extract_args 6 namespace service db_connection db_database db_readonly_username db_readonly_password $*
|
|
646
|
-
|
|
647
|
-
echo "======================="
|
|
648
|
-
echo " Initializing Readonly Account '$db_readonly_username' for '$db_database' for namespace $namespace"
|
|
649
|
-
echo "======================="
|
|
650
|
-
|
|
651
|
-
# Print commands before execution, except echo
|
|
652
|
-
trap '[[ $BASH_COMMAND != echo* ]] && echo $BASH_COMMAND' DEBUG
|
|
653
|
-
|
|
654
|
-
echo "Checking if Readonly account '$db_readonly_username' for '$db_database' exists"
|
|
655
|
-
set +e
|
|
656
|
-
psql_on_k8 $namespace $service $db_connection -qtAc 'SELECT rolname FROM pg_roles;' | grep "^$db_readonly_username$"
|
|
657
|
-
return_code=$?
|
|
658
|
-
set -e
|
|
659
|
-
|
|
660
|
-
if [ ${return_code} -eq 0 ]; then
|
|
661
|
-
echo "Account $db_readonly_username already exists - nothing to do"
|
|
662
|
-
else
|
|
663
|
-
echo "Account $db_readonly_username does not exist - creating"
|
|
664
|
-
|
|
665
|
-
psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_readonly_username"' WITH ENCRYPTED PASSWORD '"'$db_readonly_password'"';'
|
|
666
|
-
psql_on_k8 $namespace $service $db_connection -c 'GRANT CONNECT ON DATABASE '"$db_database"' TO '"$db_readonly_username"';'
|
|
667
|
-
psql_on_k8 $namespace $service $db_connection -c 'GRANT USAGE ON SCHEMA public TO '"$db_readonly_username"';'
|
|
668
|
-
psql_on_k8 $namespace $service $db_connection -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO '"$db_readonly_username"';'
|
|
669
|
-
psql_on_k8 $namespace $service $db_connection -c 'ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO '"$db_readonly_username"';'
|
|
670
|
-
|
|
671
|
-
echo "Created user with read-only permissions for $db_readonly_username on $db_database (schema public)"
|
|
672
|
-
fi
|
|
673
|
-
}
|
|
674
|
-
|
|
675
|
-
kube_init_datadog_in_database() {
|
|
676
|
-
extract_args 8 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password db_datadog_schema $*
|
|
677
|
-
|
|
678
|
-
echo "======================="
|
|
679
|
-
echo " Initializing Datadog Agent Requirement for namespace $namespace"
|
|
680
|
-
echo "======================="
|
|
681
|
-
|
|
682
|
-
echo "Checking if User '$db_datadog_username' exists"
|
|
683
|
-
local service="datadog"
|
|
684
|
-
found_db_users=$(mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'SELECT user FROM mysql.user;')
|
|
685
|
-
set +e
|
|
686
|
-
echo "$found_db_users" | grep "^$db_datadog_username$"
|
|
687
|
-
return_code=$?
|
|
688
|
-
set -e
|
|
689
|
-
|
|
690
|
-
if [ ${return_code} -eq 0 ]; then
|
|
691
|
-
echo "User $db_datadog_username already exists - nothing to do"
|
|
692
|
-
else
|
|
693
|
-
echo "User $db_datadog_username does not exist - initializing"
|
|
694
|
-
|
|
695
|
-
# All the query come from this docs : https://docs.datadoghq.com/fr/database_monitoring/setup_mysql/selfhosted/?tab=mysql56
|
|
696
|
-
|
|
697
|
-
mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'CREATE USER '"$db_datadog_username"'@"%" IDENTIFIED BY '"'$db_datadog_password'"';'
|
|
698
|
-
echo "USER created $db_datadog_username"
|
|
699
|
-
|
|
700
|
-
mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT REPLICATION CLIENT ON *.* TO datadog@"%" WITH MAX_USER_CONNECTIONS 5;'
|
|
701
|
-
echo "ALTER USER $db_datadog_username"
|
|
702
|
-
|
|
703
|
-
mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT PROCESS ON *.* TO '"$db_datadog_username"'@"%";'
|
|
704
|
-
echo "Granted PROCESS for $db_datadog_username"
|
|
705
|
-
|
|
706
|
-
mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT SELECT ON performance_schema.* TO '"$db_datadog_username"'@"%";'
|
|
707
|
-
echo "Granted SELECT on performance_schema for $db_datadog_username"
|
|
708
|
-
|
|
709
|
-
mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'CREATE SCHEMA IF NOT EXISTS datadog;'
|
|
710
|
-
echo "CREATE SCHEMA datadog"
|
|
711
|
-
|
|
712
|
-
mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT EXECUTE ON datadog.* to '"$db_datadog_username"'@"%";'
|
|
713
|
-
echo "Granted 'GRANT EXECUTE for $db_datadog_username on datadog"
|
|
714
|
-
|
|
715
|
-
mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'GRANT CREATE TEMPORARY TABLES ON datadog.* TO '"$db_datadog_username"'@"%";'
|
|
716
|
-
echo "Granted CREATE TEMPORARY TABLES for $db_datadog_username"
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.explain_statement;
|
|
720
|
-
DELIMITER $$
|
|
721
|
-
CREATE PROCEDURE datadog.explain_statement(IN query TEXT)
|
|
722
|
-
SQL SECURITY DEFINER
|
|
723
|
-
BEGIN
|
|
724
|
-
SET @explain := CONCAT("EXPLAIN FORMAT=json ", query);
|
|
725
|
-
PREPARE stmt FROM @explain;
|
|
726
|
-
EXECUTE stmt;
|
|
727
|
-
DEALLOCATE PREPARE stmt;
|
|
728
|
-
END $$
|
|
729
|
-
DELIMITER ;'
|
|
730
|
-
echo "CREATE PROCEDURE PROCEDURE datadog.explain_statement"
|
|
731
|
-
|
|
732
|
-
mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS '"$db_datadog_username"'.explain_statement;
|
|
733
|
-
DELIMITER $$
|
|
734
|
-
CREATE PROCEDURE '"$db_datadog_username"'.explain_statement(IN query TEXT)
|
|
735
|
-
SQL SECURITY DEFINER
|
|
736
|
-
BEGIN
|
|
737
|
-
SET @explain := CONCAT("EXPLAIN FORMAT=json ", query);
|
|
738
|
-
PREPARE stmt FROM @explain;
|
|
739
|
-
EXECUTE stmt;
|
|
740
|
-
DEALLOCATE PREPARE stmt;
|
|
741
|
-
END $$
|
|
742
|
-
DELIMITER ;
|
|
743
|
-
GRANT EXECUTE ON PROCEDURE '"$db_datadog_username"'.explain_statement TO datadog@"%";'
|
|
744
|
-
echo "CREATE PROCEDURE on SCHEMA $db_datadog_schema for $db_datadog_username"
|
|
745
|
-
|
|
746
|
-
mysql_on_k8 $namespace $service $db_host $db_port $db_init_username $db_init_password 'DROP PROCEDURE IF EXISTS datadog.enable_events_statements_consumers;
|
|
747
|
-
DELIMITER $$
|
|
748
|
-
CREATE PROCEDURE datadog.enable_events_statements_consumers()
|
|
749
|
-
SQL SECURITY DEFINER
|
|
750
|
-
BEGIN
|
|
751
|
-
UPDATE performance_schema.setup_consumers SET enabled="YES" WHERE name LIKE "events_statements_%";
|
|
752
|
-
END $$
|
|
753
|
-
DELIMITER ;
|
|
754
|
-
GRANT EXECUTE ON PROCEDURE datadog.enable_events_statements_consumers TO datadog@"%";'
|
|
755
|
-
|
|
756
|
-
echo "CREATE PROCEDURE on datadog.enable_events_statements_consumers"
|
|
757
|
-
fi
|
|
758
|
-
|
|
759
|
-
echo "======================="
|
|
760
|
-
echo " Database '$db_datadog_schema' Initialization complete for namespace $namespace"
|
|
761
|
-
echo "======================="
|
|
762
|
-
}
|
|
763
|
-
|
|
764
|
-
kube_init_datadog_in_postgres_database() {
|
|
765
|
-
extract_args 7 namespace db_host db_port db_init_username db_init_password db_datadog_username db_datadog_password $*
|
|
766
|
-
|
|
767
|
-
local service="datadog"
|
|
768
|
-
local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
|
|
769
|
-
|
|
770
|
-
echo "======================="
|
|
771
|
-
echo " Initializing $service Agent On PostgresSQL Database Requirement for namespace $namespace"
|
|
772
|
-
echo "======================="
|
|
773
|
-
|
|
774
|
-
echo "Checking if User '$db_datadog_username' exists"
|
|
775
|
-
|
|
776
|
-
set +e
|
|
777
|
-
if psql_on_k8 $namespace $service $db_connection -qtAc 'SELECT usename FROM pg_catalog.pg_user;' | grep "^$db_datadog_username$";
|
|
778
|
-
then
|
|
779
|
-
echo "User $db_datadog_username already exists - nothing to do"
|
|
780
|
-
else
|
|
781
|
-
echo "User $db_datadog_username does not exist - initializing"
|
|
782
|
-
|
|
783
|
-
set -e
|
|
784
|
-
psql_on_k8 $namespace $service $db_connection -qc 'CREATE USER '"$db_datadog_username"' WITH password '"'$db_datadog_password'"';'
|
|
785
|
-
echo "User created $db_datadog_username"
|
|
786
|
-
|
|
787
|
-
psql_on_k8 $namespace $service $db_connection -qc 'CREATE SCHEMA datadog;'
|
|
788
|
-
echo "Schema datadog created"
|
|
789
|
-
|
|
790
|
-
psql_on_k8 $namespace $service $db_connection -qc 'GRANT USAGE ON SCHEMA datadog TO datadog;'
|
|
791
|
-
echo "Granted usage for datadog schema to datadog"
|
|
792
|
-
|
|
793
|
-
psql_on_k8 $namespace $service $db_connection -qc 'GRANT USAGE ON SCHEMA public TO datadog;'
|
|
794
|
-
echo "Granted usage for public schema to datadog"
|
|
795
|
-
|
|
796
|
-
psql_on_k8 $namespace $service $db_connection -qc 'GRANT pg_monitor TO datadog;'
|
|
797
|
-
echo "Granted pg_monitor to datadog"
|
|
798
|
-
|
|
799
|
-
psql_on_k8 $namespace $service $db_connection -qc 'CREATE EXTENSION IF NOT EXISTS pg_stat_statements schema public;'
|
|
800
|
-
echo "Extension pg_stat_statements created"
|
|
801
|
-
|
|
802
|
-
local datadog_function_path="/tmp/datatog-explain-statement-function.sql"
|
|
803
|
-
local datadog_function="CREATE OR REPLACE FUNCTION datadog.explain_statement(
|
|
804
|
-
l_query TEXT,
|
|
805
|
-
OUT explain JSON
|
|
806
|
-
)
|
|
807
|
-
RETURNS SETOF JSON AS
|
|
808
|
-
\\$\\$
|
|
809
|
-
DECLARE
|
|
810
|
-
curs REFCURSOR;
|
|
811
|
-
plan JSON;
|
|
812
|
-
|
|
813
|
-
BEGIN
|
|
814
|
-
OPEN curs FOR EXECUTE pg_catalog.concat('EXPLAIN (FORMAT JSON) ', l_query);
|
|
815
|
-
FETCH curs INTO plan;
|
|
816
|
-
CLOSE curs;
|
|
817
|
-
RETURN QUERY SELECT plan;
|
|
818
|
-
END;
|
|
819
|
-
\\$\\$
|
|
820
|
-
LANGUAGE 'plpgsql'
|
|
821
|
-
RETURNS NULL ON NULL INPUT
|
|
822
|
-
SECURITY DEFINER;"
|
|
823
|
-
|
|
824
|
-
kubectl -n $namespace run $service-postgres-init \
|
|
825
|
-
--image jbergknoff/postgresql-client \
|
|
826
|
-
--restart=Never \
|
|
827
|
-
--attach --rm \
|
|
828
|
-
--command \
|
|
829
|
-
-- \
|
|
830
|
-
/bin/sh -c "echo -e \"$datadog_function\" > $datadog_function_path; psql postgresql://$db_connection -qf $datadog_function_path"
|
|
831
|
-
|
|
832
|
-
echo "Function datadog.explain_statement created"
|
|
833
|
-
fi
|
|
834
|
-
|
|
835
|
-
echo "======================="
|
|
836
|
-
echo " Database $service Initialization complete for namespace $namespace"
|
|
837
|
-
echo "======================="
|
|
838
|
-
}
|
|
839
|
-
|
|
840
|
-
kube_init_service_database() {
|
|
841
|
-
|
|
842
|
-
extract_args 9 namespace service db_host db_port db_init_username db_init_password db_database db_username db_password $*
|
|
843
|
-
|
|
844
|
-
local db_connection="$db_init_username:$db_init_password@$db_host:$db_port"
|
|
845
|
-
|
|
846
|
-
set -x
|
|
847
|
-
|
|
848
|
-
echo "Checking if Database '$db_database' exists"
|
|
849
|
-
set +e
|
|
850
|
-
psql_on_k8 $namespace $service $db_connection -lqtA | cut -d\| -f1 | grep "^$db_database$"
|
|
851
|
-
return_code=$?
|
|
852
|
-
set -e
|
|
853
|
-
|
|
854
|
-
if [ ${return_code} -eq 0 ]; then
|
|
855
|
-
echo "Database $db_database already exists - nothing to do"
|
|
856
|
-
else
|
|
857
|
-
echo "Database $db_database does not exist - initializing"
|
|
858
|
-
|
|
859
|
-
psql_on_k8 $namespace $service $db_connection -c 'CREATE DATABASE '"$db_database"';'
|
|
860
|
-
echo "DB created $db_database"
|
|
861
|
-
|
|
862
|
-
psql_on_k8 $namespace $service $db_connection -c 'CREATE USER '"$db_username"' WITH ENCRYPTED PASSWORD '"'$db_password'"';'
|
|
863
|
-
echo "USER created $db_username"
|
|
864
|
-
|
|
865
|
-
psql_on_k8 $namespace $service $db_connection -c 'GRANT ALL PRIVILEGES ON DATABASE '"$db_database"' TO '"$db_username"';'
|
|
866
|
-
echo "Granted all privileges for $db_username on $db_database"
|
|
867
|
-
fi
|
|
868
|
-
|
|
869
|
-
echo "======================="
|
|
870
|
-
echo " Database '$db_database' Initialization complete for namespace $namespace"
|
|
871
|
-
echo "======================="
|
|
872
|
-
}
|
|
873
|
-
|
|
874
|
-
#!/usr/bin/env bash
|
|
875
|
-
|
|
876
|
-
# Allow to use JMX connection to retrieve data and metrics from the pods within kubernetes
|
|
877
|
-
# You will need visualVM to use this tool https://visualvm.github.io/
|
|
878
|
-
# ex: bind_jmx testing notification
|
|
879
|
-
bind_jmx() {
|
|
880
|
-
|
|
881
|
-
local ENV=$1
|
|
882
|
-
local SERVICE_NAME=$2
|
|
883
|
-
local PORT=2242
|
|
884
|
-
|
|
885
|
-
start_ssh_bastion $ENV $PORT
|
|
886
|
-
|
|
887
|
-
echo "root" | ssh -f -N -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -D 7777 root@127.0.0.1 -p 2242
|
|
888
|
-
local PODS=$(kubectl -n $ENV get pods -o wide | grep $SERVICE_NAME | grep -Eo '^[^ ]+')
|
|
889
|
-
|
|
890
|
-
echo "Choose one of the following pod to get metrics from..."
|
|
891
|
-
local POD_NAME=$(gum choose $PODS)
|
|
892
|
-
local POD_IP=$(
|
|
893
|
-
kubectl -n $ENV get pods -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIP}{"\n"}{end}' |
|
|
894
|
-
grep $POD_NAME |
|
|
895
|
-
cut -d' ' -f2 |
|
|
896
|
-
head -1
|
|
897
|
-
)
|
|
898
|
-
|
|
899
|
-
jconsole -J-DsocksProxyHost=localhost \
|
|
900
|
-
-J-DsocksProxyPort=7777 \
|
|
901
|
-
service:jmx:rmi:///jndi/rmi://$POD_IP:7199/jmxrmi \
|
|
902
|
-
-J-DsocksNonProxyHosts= &
|
|
903
|
-
|
|
904
|
-
cat << EOF
|
|
905
|
-
Now start VisualVM
|
|
906
|
-
Preferences > Network > Manual Proxy Settings
|
|
907
|
-
SOCKS Proxy Line: Set 'localhost' and Port '7777'
|
|
908
|
-
File > Add JMX Connection
|
|
909
|
-
Set $POD_IP:7199, check 'do not require an SSL connection'
|
|
910
|
-
Remember to kill you bastion afterward using 'stop_ssh_bastion'
|
|
911
|
-
EOF
|
|
912
|
-
}
|
|
913
|
-
#!/usr/bin/env bash
|
|
914
|
-
|
|
915
|
-
k8_nodes_stats() {
|
|
916
|
-
kubectl get nodes -o name |
|
|
917
|
-
xargs kubectl describe |
|
|
918
|
-
grep "^Name\|workType\|cpu \|memory " |
|
|
919
|
-
sed -r 's/[ :=]+/\t/g' |
|
|
920
|
-
sed 's/\tworkType\t//g' |
|
|
921
|
-
sed -r 's/^Name/---\nName/g' |
|
|
922
|
-
grep --color "Name\|web\|workers\|cpu\|memory\|---"
|
|
923
|
-
}
|
|
924
|
-
|
|
925
|
-
#!/usr/bin/env bash
|
|
926
|
-
|
|
927
|
-
# Port forward on the first matching pod
|
|
928
|
-
# Ex :
|
|
929
|
-
# pod_forward testing notification-http
|
|
930
|
-
# pod_forward testing colisweb-api-web 3333 3000
|
|
931
|
-
pod_forward() {
|
|
932
|
-
ENV=$1
|
|
933
|
-
POD_FILTER=$2
|
|
934
|
-
LOCAL_PORT=${3:-8080}
|
|
935
|
-
POD_PORT=${4:-8080}
|
|
936
|
-
|
|
937
|
-
if PID=$(lsof -ti tcp:$LOCAL_PORT); then
|
|
938
|
-
echo "killing process $PID which uses port $LOCAL_PORT"
|
|
939
|
-
kill $PID
|
|
940
|
-
fi
|
|
941
|
-
|
|
942
|
-
configure_kubectl_for $ENV
|
|
943
|
-
|
|
944
|
-
POD=`pick_pod $ENV $POD_FILTER`
|
|
945
|
-
|
|
946
|
-
echo "setting up forwarding to $POD"
|
|
947
|
-
kubectl -n $ENV port-forward $POD $LOCAL_PORT:$POD_PORT &
|
|
948
|
-
PID=$!
|
|
949
|
-
|
|
950
|
-
while ! echo exit | nc localhost $LOCAL_PORT > /dev/null; do
|
|
951
|
-
sleep 1
|
|
952
|
-
echo "waiting for port $LOCAL_PORT to be open locally"
|
|
953
|
-
done
|
|
954
|
-
echo "port $LOCAL_PORT is now available on localhost, forwarding to $ENV $POD:$POD_PORT"
|
|
955
|
-
echo 'you can terminate it with "kill '$PID'" or "kill $(lsof -ti tcp:'$LOCAL_PORT')"'
|
|
956
|
-
}
|
|
957
|
-
|
|
958
|
-
# prompts to pick a pod and run a command like bash inside
|
|
959
|
-
# pod_exec testing
|
|
960
|
-
# pod_exec testing bash
|
|
961
|
-
# pod_exec testing bash colisweb-api
|
|
962
|
-
pod_exec() {
|
|
963
|
-
ENV=$1
|
|
964
|
-
COMMAND=${2:-bash}
|
|
965
|
-
configure_kubectl_for $ENV
|
|
966
|
-
POD_FILTER=$3
|
|
967
|
-
POD=`pick_pod $ENV $POD_FILTER`
|
|
968
|
-
echo "running $COMMAND inside $POD"
|
|
969
|
-
kubectl -n $ENV exec -ti $POD -- $COMMAND
|
|
970
|
-
}
|
|
971
|
-
|
|
972
|
-
# prompts to pick a pod and copy from a local file to the pod
|
|
973
|
-
# pod_copy_to testing localfile remotefile
|
|
974
|
-
# pod_copy_to testing localfile remotefile colisweb-api
|
|
975
|
-
pod_copy_to() {
|
|
976
|
-
ENV=$1
|
|
977
|
-
LOCAL_FILE=$2
|
|
978
|
-
REMOTE_FILE=$3
|
|
979
|
-
configure_kubectl_for $ENV
|
|
980
|
-
POD_FILTER=$4
|
|
981
|
-
POD=`pick_pod $ENV $POD_FILTER`
|
|
982
|
-
kubectl cp $LOCAL_FILE $ENV/$POD:$REMOTE_FILE
|
|
983
|
-
}
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
pick_pod() {
|
|
987
|
-
ENV=$1
|
|
988
|
-
POD_FILTER="pod/$2"
|
|
989
|
-
configure_kubectl_for $ENV
|
|
990
|
-
|
|
991
|
-
if [ -z "$2" ] ; then
|
|
992
|
-
kubectl -n $ENV get pods | gum filter | cut -f1 -d" "
|
|
993
|
-
else
|
|
994
|
-
if PODS=$(kubectl -n $ENV get pods -o=name | grep "$POD_FILTER"); then
|
|
995
|
-
echo $PODS | head -1 | sed -e 's/pod\///'
|
|
996
|
-
else
|
|
997
|
-
echo "no pods found on $ENV matching $POD_FILTER" >&2
|
|
998
|
-
fi
|
|
999
|
-
fi
|
|
1000
|
-
}
|
|
1001
|
-
|
|
1002
|
-
#!/usr/bin/env bash
|
|
1003
|
-
|
|
1004
|
-
bastion_config_for_redis_ca() {
|
|
1005
|
-
ssh_config xufte6.0001.euw1.cache.amazonaws.com redis 2223 63789 tests testing recette-001 sandbox prod > $1
|
|
1006
|
-
}
|
|
1007
|
-
|
|
1008
|
-
bastion_config_for_redis_toutatis() {
|
|
1009
|
-
ssh_config xufte6.0001.euw1.cache.amazonaws.com toutatis 2223 63789 tests testing recette staging production > $1
|
|
1010
|
-
}
|
|
1011
|
-
|
|
1012
|
-
ssh_config() {
|
|
1013
|
-
host=$1
|
|
1014
|
-
host_prefix=$2
|
|
1015
|
-
port0=$3
|
|
1016
|
-
forward0=$4
|
|
1017
|
-
shift 4
|
|
1018
|
-
instance_names=("$@") # /!\ indices start at 1 with zsh
|
|
1019
|
-
ssh_header
|
|
1020
|
-
|
|
1021
|
-
environments=(tests testing recette staging production)
|
|
1022
|
-
|
|
1023
|
-
length=${#environments[@]}
|
|
1024
|
-
for (( i=1; i<=${length}; i++ ));
|
|
1025
|
-
do
|
|
1026
|
-
bastion_block bastion_${environments[$i]} $(($port0 + $i)) $(($forward0 + $i)) ${host_prefix}-${instance_names[$i]}.$host
|
|
1027
|
-
done
|
|
1028
|
-
}
|
|
1029
|
-
|
|
1030
|
-
ssh_header() {
|
|
1031
|
-
cat <<EOF
|
|
1032
|
-
UserKnownHostsFile /dev/null
|
|
1033
|
-
StrictHostKeyChecking no
|
|
1034
|
-
User root
|
|
1035
|
-
EOF
|
|
1036
|
-
}
|
|
1037
|
-
|
|
1038
|
-
bastion_block() {
|
|
1039
|
-
cat <<EOF
|
|
1040
|
-
Host $1
|
|
1041
|
-
HostName 127.0.0.1
|
|
1042
|
-
Port $2
|
|
1043
|
-
LocalForward $3 $4:6379
|
|
1044
|
-
EOF
|
|
1045
|
-
}
|
|
1046
|
-
|
|
1047
|
-
redis_k8s() {
|
|
1048
|
-
MODE=$1
|
|
1049
|
-
REDIS_INSTANCE=${2:-ca}
|
|
1050
|
-
case $MODE in
|
|
1051
|
-
"tests") SSH_LOCAL_PORT=2224;REDIS_LOCAL_PORT=63790;ENV="tests";;
|
|
1052
|
-
"testing") SSH_LOCAL_PORT=2225;REDIS_LOCAL_PORT=63791;ENV="testing";;
|
|
1053
|
-
"recette") SSH_LOCAL_PORT=2226;REDIS_LOCAL_PORT=63792;ENV="recette";;
|
|
1054
|
-
"staging") SSH_LOCAL_PORT=2227;REDIS_LOCAL_PORT=63793;ENV="staging";;
|
|
1055
|
-
"production") SSH_LOCAL_PORT=2228;REDIS_LOCAL_PORT=63794;ENV="production";;
|
|
1056
|
-
*) echo "Unsupported ENV : $MODE"; return 1 ;;
|
|
1057
|
-
esac
|
|
1058
|
-
|
|
1059
|
-
start_ssh_bastion $ENV $SSH_LOCAL_PORT
|
|
1060
|
-
|
|
1061
|
-
lsof -ti tcp:$REDIS_LOCAL_PORT | xargs kill
|
|
1062
|
-
|
|
1063
|
-
bastion_config=$(mktemp)
|
|
1064
|
-
case $REDIS_INSTANCE in
|
|
1065
|
-
"ca") bastion_config_for_redis_ca "$bastion_config";;
|
|
1066
|
-
"toutatis") bastion_config_for_redis_toutatis "$bastion_config";;
|
|
1067
|
-
*) echo "Unsupported redis instance (ca or toutatis available) : $REDIS_INSTANCE"; return 1;;
|
|
1068
|
-
esac
|
|
1069
|
-
|
|
1070
|
-
ssh -f -N \
|
|
1071
|
-
-F "$bastion_config" \
|
|
1072
|
-
"bastion_$ENV"
|
|
1073
|
-
|
|
1074
|
-
echo "sample command : 'redis-cli -p $REDIS_LOCAL_PORT'"
|
|
1075
|
-
echo "run 'kubectl delete pod $POD_NAME' when you have finished"
|
|
1076
|
-
|
|
1077
|
-
redis-cli -p $REDIS_LOCAL_PORT
|
|
1078
|
-
}
|
|
1079
|
-
|
|
1080
|
-
#!/usr/bin/env bash
|
|
1081
|
-
|
|
1082
|
-
#Create a k8s cron jobs that will be run regularly
|
|
1083
|
-
#See run_cron_job_k8s -h for more details
|
|
1084
|
-
|
|
1085
|
-
run_cron_job_k8s() {
|
|
1086
|
-
|
|
1087
|
-
#default values
|
|
1088
|
-
local namespace="testing"
|
|
1089
|
-
local name="$USERNAME"
|
|
1090
|
-
local SCHEDULE="00 05 * * *"
|
|
1091
|
-
local secret=""
|
|
1092
|
-
local amm_folder=""
|
|
1093
|
-
local amm_script=""
|
|
1094
|
-
|
|
1095
|
-
while getopts ":e:c:p:f:s:t:h" opt; do
|
|
1096
|
-
case $opt in
|
|
1097
|
-
e)
|
|
1098
|
-
namespace="$OPTARG" >&2
|
|
1099
|
-
;;
|
|
1100
|
-
t)
|
|
1101
|
-
SCHEDULE="$OPTARG" >&2
|
|
1102
|
-
;;
|
|
1103
|
-
p)
|
|
1104
|
-
name="$OPTARG" >&2
|
|
1105
|
-
;;
|
|
1106
|
-
c)
|
|
1107
|
-
secret="$OPTARG" >&2
|
|
1108
|
-
;;
|
|
1109
|
-
f)
|
|
1110
|
-
amm_folder="$OPTARG" >&2
|
|
1111
|
-
;;
|
|
1112
|
-
s)
|
|
1113
|
-
amm_script="$OPTARG" >&2
|
|
1114
|
-
;;
|
|
1115
|
-
h)
|
|
1116
|
-
show_help_cron_job
|
|
1117
|
-
return 0
|
|
1118
|
-
;;
|
|
1119
|
-
:)
|
|
1120
|
-
echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
|
|
1121
|
-
return 0
|
|
1122
|
-
;;
|
|
1123
|
-
\?)
|
|
1124
|
-
echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
|
|
1125
|
-
return 0
|
|
1126
|
-
;;
|
|
1127
|
-
esac
|
|
1128
|
-
done
|
|
1129
|
-
|
|
1130
|
-
if [ -z "$amm_script" ]; then
|
|
1131
|
-
echo 'Missing -s. Run run_cron_job_k8s -h for help' >&2
|
|
1132
|
-
return 0
|
|
1133
|
-
fi
|
|
1134
|
-
|
|
1135
|
-
shift "$((OPTIND-1))"
|
|
1136
|
-
|
|
1137
|
-
local script_args=$(
|
|
1138
|
-
if [ "$#" -gt 0 ] ; then
|
|
1139
|
-
printf '"'
|
|
1140
|
-
join_by '", "' $*
|
|
1141
|
-
printf '"'
|
|
1142
|
-
fi
|
|
1143
|
-
)
|
|
1144
|
-
|
|
1145
|
-
local IMAGE="lolhens/ammonite:2.5.4"
|
|
1146
|
-
local CRONJOB_NAME="cronjob-ammonite-$name"
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
configure_kubectl_for $namespace
|
|
1150
|
-
|
|
1151
|
-
if [[ ! -r "$amm_script" ]]; then
|
|
1152
|
-
echo "ammonite script not found $amm_script"
|
|
1153
|
-
return 2
|
|
1154
|
-
else
|
|
1155
|
-
local CONFIG_MAP="config-$CRONJOB_NAME"
|
|
1156
|
-
local SECRET_MAP="secret-$CRONJOB_NAME"
|
|
1157
|
-
local CONFIG_MAP_DIR="$(mktemp -d)"
|
|
1158
|
-
|
|
1159
|
-
if [[ ! -z $amm_folder && -d $amm_folder ]] ; then
|
|
1160
|
-
cp -r "$amm_folder/" "$CONFIG_MAP_DIR"
|
|
1161
|
-
fi
|
|
1162
|
-
cp "$amm_script" "$CONFIG_MAP_DIR/script.sc"
|
|
1163
|
-
|
|
1164
|
-
kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
|
|
1165
|
-
kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
|
|
1166
|
-
|
|
1167
|
-
kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
|
|
1168
|
-
kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
|
|
1169
|
-
|
|
1170
|
-
kubectl -n $namespace get cronjob $CRONJOB_NAME && kubectl -n $namespace delete cronjob $CRONJOB_NAME
|
|
1171
|
-
|
|
1172
|
-
echo "starting $CRONJOB_NAME with $IMAGE"
|
|
1173
|
-
|
|
1174
|
-
JOB_DEFINITION='
|
|
1175
|
-
apiVersion: batch/v1
|
|
1176
|
-
kind: CronJob
|
|
1177
|
-
metadata:
|
|
1178
|
-
name: '$CRONJOB_NAME'
|
|
1179
|
-
namespace: '$namespace'
|
|
1180
|
-
spec:
|
|
1181
|
-
schedule: "'$SCHEDULE'"
|
|
1182
|
-
concurrencyPolicy: Forbid
|
|
1183
|
-
jobTemplate:
|
|
1184
|
-
spec:
|
|
1185
|
-
backoffLimit: 0
|
|
1186
|
-
template:
|
|
1187
|
-
spec:
|
|
1188
|
-
nodeSelector:
|
|
1189
|
-
workType: "workers"
|
|
1190
|
-
restartPolicy: Never
|
|
1191
|
-
volumes:
|
|
1192
|
-
- name: config
|
|
1193
|
-
configMap:
|
|
1194
|
-
name: '$CONFIG_MAP'
|
|
1195
|
-
- name: secret
|
|
1196
|
-
secret:
|
|
1197
|
-
secretName: '$SECRET_MAP'
|
|
1198
|
-
containers:
|
|
1199
|
-
- name: '$CRONJOB_NAME'
|
|
1200
|
-
command: ["amm", "/code/script.sc"]
|
|
1201
|
-
image: '$IMAGE'
|
|
1202
|
-
imagePullPolicy: IfNotPresent
|
|
1203
|
-
args: ['$script_args']
|
|
1204
|
-
env:
|
|
1205
|
-
- name: POD_NAME
|
|
1206
|
-
valueFrom:
|
|
1207
|
-
fieldRef:
|
|
1208
|
-
apiVersion: v1
|
|
1209
|
-
fieldPath: metadata.name
|
|
1210
|
-
- name: POD_NAMESPACE
|
|
1211
|
-
valueFrom:
|
|
1212
|
-
fieldRef:
|
|
1213
|
-
apiVersion: v1
|
|
1214
|
-
fieldPath: metadata.namespace
|
|
1215
|
-
- name: HOST_IP
|
|
1216
|
-
valueFrom:
|
|
1217
|
-
fieldRef:
|
|
1218
|
-
apiVersion: v1
|
|
1219
|
-
fieldPath: status.hostIP
|
|
1220
|
-
volumeMounts:
|
|
1221
|
-
- name: config
|
|
1222
|
-
mountPath: /code
|
|
1223
|
-
- name: secret
|
|
1224
|
-
mountPath: /conf
|
|
1225
|
-
readOnly: true
|
|
1226
|
-
resources:
|
|
1227
|
-
requests:
|
|
1228
|
-
cpu: 500m
|
|
1229
|
-
memory: 256Mi
|
|
1230
|
-
limits:
|
|
1231
|
-
cpu: 4000m
|
|
1232
|
-
memory: 512Mi
|
|
1233
|
-
envFrom:
|
|
1234
|
-
- configMapRef:
|
|
1235
|
-
name: '$CONFIG_MAP'
|
|
1236
|
-
- secretRef:
|
|
1237
|
-
name: '$SECRET_MAP'
|
|
1238
|
-
'
|
|
1239
|
-
|
|
1240
|
-
echo $JOB_DEFINITION > /tmp/job.yaml
|
|
1241
|
-
|
|
1242
|
-
kubectl -n $namespace apply -f /tmp/job.yaml
|
|
1243
|
-
|
|
1244
|
-
fi
|
|
1245
|
-
}
|
|
1246
|
-
|
|
1247
|
-
# Usage info
|
|
1248
|
-
show_help_cron_job() {
|
|
1249
|
-
#p:f:s
|
|
1250
|
-
local help="""Usage: run_cron_job_k8s -s SCRIPT [-t TIME] [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
|
|
1251
|
-
Create a k8s cron job that will be run a script regularly
|
|
1252
|
-
|
|
1253
|
-
-h display this help and exit
|
|
1254
|
-
-s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
|
|
1255
|
-
-t TIME opt. time when the job will be launched. TIME should be in CRON syntax (default to 00 05 * * *, ie 5AM UTC)
|
|
1256
|
-
-e ENV opt. set execution environment (default to testing)
|
|
1257
|
-
-c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
|
|
1258
|
-
-p POD opt. name of the pod to create (default to $USERNAME)
|
|
1259
|
-
-f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
|
|
1260
|
-
ARGS opt. additional arguments for SCRIPT
|
|
1261
|
-
"""
|
|
1262
|
-
echo "$help"
|
|
1263
|
-
}
|
|
1264
|
-
|
|
1265
|
-
#!/usr/bin/env bash
|
|
1266
|
-
|
|
1267
|
-
# Usage info
|
|
1268
|
-
show_help_job() {
|
|
1269
|
-
local help="""Usage: run_job_k8s -s SCRIPT [-e ENV] [-c CONFIG] [-p POD] [-f FOLDER] [ARGS]
|
|
1270
|
-
Create a k8s job executing a script
|
|
1271
|
-
|
|
1272
|
-
-h display this help and exit
|
|
1273
|
-
-s SCRIPT run script SCRIPT on a pod (SCRIPT must be a .sc file)
|
|
1274
|
-
-e ENV opt. set execution environment (default to testing)
|
|
1275
|
-
-c CONFIG opt. secret file needed for the script (must be a .sc file, not a .secret file)
|
|
1276
|
-
-p POD opt. name of the pod to create (default to $USERNAME)
|
|
1277
|
-
-f FOLDER opt. name of the folder containing the scripts to execute (if SCRIPT needs other files)
|
|
1278
|
-
ARGS opt. additional arguments for SCRIPT
|
|
1279
|
-
|
|
1280
|
-
The organisation of the files must be the same locally as on the pod :
|
|
1281
|
-
- /code containing the script to execute (arg -s) and the other needed files (if the arg -f is used, it must reference this directory)
|
|
1282
|
-
- /conf containing the secret file (arg -c if used)
|
|
1283
|
-
E.g. in the script \"/code/script.sc\", to use a secret file \"/conf/secret.sc\", the import should look like \"import \$file.^.conf.secret.sc\"
|
|
1284
|
-
"""
|
|
1285
|
-
echo "$help"
|
|
1286
|
-
}
|
|
1287
|
-
|
|
1288
|
-
run_job_k8s() {
|
|
1289
|
-
|
|
1290
|
-
#default values
|
|
1291
|
-
local namespace="testing"
|
|
1292
|
-
local name="$USERNAME"
|
|
1293
|
-
local secret=""
|
|
1294
|
-
local amm_folder=""
|
|
1295
|
-
local amm_script=""
|
|
1296
|
-
|
|
1297
|
-
while getopts ":e:c:p:f:s:h" opt; do
|
|
1298
|
-
case $opt in
|
|
1299
|
-
e)
|
|
1300
|
-
namespace="$OPTARG" >&2
|
|
1301
|
-
;;
|
|
1302
|
-
p)
|
|
1303
|
-
name="$OPTARG" >&2
|
|
1304
|
-
;;
|
|
1305
|
-
c)
|
|
1306
|
-
secret="$OPTARG" >&2
|
|
1307
|
-
;;
|
|
1308
|
-
f)
|
|
1309
|
-
amm_folder="$OPTARG" >&2
|
|
1310
|
-
;;
|
|
1311
|
-
s)
|
|
1312
|
-
amm_script="$OPTARG" >&2
|
|
1313
|
-
;;
|
|
1314
|
-
h)
|
|
1315
|
-
show_help_job
|
|
1316
|
-
return 0
|
|
1317
|
-
;;
|
|
1318
|
-
:)
|
|
1319
|
-
echo "Option -$OPTARG requires an argument. Run run_cron_job_k8s -h for help" >&2
|
|
1320
|
-
return 0
|
|
1321
|
-
;;
|
|
1322
|
-
\?)
|
|
1323
|
-
echo "Invalid option: -$OPTARG. Run run_cron_job_k8s -h for help" >&2
|
|
1324
|
-
return 0
|
|
1325
|
-
;;
|
|
1326
|
-
esac
|
|
1327
|
-
done
|
|
1328
|
-
|
|
1329
|
-
if [ -z "$amm_script" ]; then
|
|
1330
|
-
echo 'Missing -s. Run run_job_k8s -h for help' >&2
|
|
1331
|
-
return 0
|
|
1332
|
-
fi
|
|
1333
|
-
|
|
1334
|
-
shift "$((OPTIND-1))"
|
|
1335
|
-
|
|
1336
|
-
local script_args=$(
|
|
1337
|
-
if [ "$#" -gt 0 ] ; then
|
|
1338
|
-
printf '"'
|
|
1339
|
-
join_by '", "' $*
|
|
1340
|
-
printf '"'
|
|
1341
|
-
fi
|
|
1342
|
-
)
|
|
1343
|
-
|
|
1344
|
-
local IMAGE="lolhens/ammonite:2.5.4"
|
|
1345
|
-
local JOB_NAME="job-ammonite-$name"
|
|
1346
|
-
|
|
1347
|
-
if [[ ! -r "$amm_script" ]]; then
|
|
1348
|
-
echo "ammonite script not found $amm_script"
|
|
1349
|
-
return 2
|
|
1350
|
-
else
|
|
1351
|
-
local CONFIG_MAP="config-$JOB_NAME"
|
|
1352
|
-
local CONFIG_MAP_DIR="$(mktemp -d)"
|
|
1353
|
-
local SECRET_MAP="secret-$JOB_NAME"
|
|
1354
|
-
|
|
1355
|
-
configure_kubectl_for $namespace
|
|
1356
|
-
|
|
1357
|
-
if [[ ! -z $amm_folder && -d $amm_folder ]] ; then
|
|
1358
|
-
cp -r "$amm_folder/" "$CONFIG_MAP_DIR"
|
|
1359
|
-
fi
|
|
1360
|
-
cp "$amm_script" "$CONFIG_MAP_DIR/script.sc"
|
|
1361
|
-
|
|
1362
|
-
kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
|
|
1363
|
-
kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
|
|
1364
|
-
|
|
1365
|
-
kubectl -n $namespace get secret $SECRET_MAP && kubectl -n $namespace delete secret $SECRET_MAP
|
|
1366
|
-
kubectl -n $namespace create secret generic $SECRET_MAP --from-file="$secret"
|
|
1367
|
-
|
|
1368
|
-
kubectl -n $namespace get job $JOB_NAME && kubectl -n $namespace delete job $JOB_NAME
|
|
1369
|
-
|
|
1370
|
-
echo "starting $JOB_NAME with $IMAGE"
|
|
1371
|
-
fi
|
|
1372
|
-
|
|
1373
|
-
JOB_DEFINITION='
|
|
1374
|
-
apiVersion: batch/v1
|
|
1375
|
-
kind: Job
|
|
1376
|
-
metadata:
|
|
1377
|
-
name: '$JOB_NAME'
|
|
1378
|
-
namespace: '$namespace'
|
|
1379
|
-
spec:
|
|
1380
|
-
template:
|
|
1381
|
-
spec:
|
|
1382
|
-
containers:
|
|
1383
|
-
- name: '$JOB_NAME'
|
|
1384
|
-
command: ["amm", "/code/script.sc"]
|
|
1385
|
-
image: '$IMAGE'
|
|
1386
|
-
args: ['$script_args']
|
|
1387
|
-
env:
|
|
1388
|
-
- name: POD_NAME
|
|
1389
|
-
valueFrom:
|
|
1390
|
-
fieldRef:
|
|
1391
|
-
apiVersion: v1
|
|
1392
|
-
fieldPath: metadata.name
|
|
1393
|
-
- name: POD_NAMESPACE
|
|
1394
|
-
valueFrom:
|
|
1395
|
-
fieldRef:
|
|
1396
|
-
apiVersion: v1
|
|
1397
|
-
fieldPath: metadata.namespace
|
|
1398
|
-
- name: HOST_IP
|
|
1399
|
-
valueFrom:
|
|
1400
|
-
fieldRef:
|
|
1401
|
-
apiVersion: v1
|
|
1402
|
-
fieldPath: status.hostIP
|
|
1403
|
-
volumeMounts:
|
|
1404
|
-
- name: config
|
|
1405
|
-
mountPath: /code
|
|
1406
|
-
- name: secret
|
|
1407
|
-
mountPath: /conf
|
|
1408
|
-
readOnly: true
|
|
1409
|
-
resources:
|
|
1410
|
-
requests:
|
|
1411
|
-
cpu: 500m
|
|
1412
|
-
memory: 256Mi
|
|
1413
|
-
limits:
|
|
1414
|
-
cpu: 4000m
|
|
1415
|
-
memory: 1Gi
|
|
1416
|
-
nodeSelector:
|
|
1417
|
-
workType: workers
|
|
1418
|
-
restartPolicy: Never
|
|
1419
|
-
volumes:
|
|
1420
|
-
- name: config
|
|
1421
|
-
configMap:
|
|
1422
|
-
name: '$CONFIG_MAP'
|
|
1423
|
-
- name: secret
|
|
1424
|
-
secret:
|
|
1425
|
-
secretName: '$SECRET_MAP'
|
|
1426
|
-
'
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
echo $JOB_DEFINITION > /tmp/job.yaml
|
|
1430
|
-
|
|
1431
|
-
kubectl -n $namespace apply -f /tmp/job.yaml
|
|
1432
|
-
|
|
1433
|
-
}
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
#!/usr/bin/env bash
|
|
1437
|
-
|
|
1438
|
-
run_task() {
|
|
1439
|
-
set -e
|
|
1440
|
-
|
|
1441
|
-
check_args "--namespace" $1
|
|
1442
|
-
shift
|
|
1443
|
-
NAMESPACE=$1
|
|
1444
|
-
shift
|
|
1445
|
-
check_args "--image" $1
|
|
1446
|
-
shift
|
|
1447
|
-
IMAGE=$1
|
|
1448
|
-
shift
|
|
1449
|
-
check_args "--name" $1
|
|
1450
|
-
shift
|
|
1451
|
-
NAME=$1
|
|
1452
|
-
shift
|
|
1453
|
-
|
|
1454
|
-
set -x
|
|
1455
|
-
|
|
1456
|
-
kubectl -n ${NAMESPACE} run ${NAME} \
|
|
1457
|
-
--image ${IMAGE} \
|
|
1458
|
-
--restart=Never \
|
|
1459
|
-
--attach --rm \
|
|
1460
|
-
$*
|
|
1461
|
-
}
|
|
1462
|
-
geocode_address() {
|
|
1463
|
-
ADDRESS=$(sed -e 's: :%20:g' <(echo "$*"))
|
|
1464
|
-
URL="https://maps.googleapis.com/maps/api/geocode/json?address=${ADDRESS}&key=${GOOGLE_API_KEY}"
|
|
1465
|
-
curl $URL
|
|
1466
|
-
}
|
|
1467
|
-
|
|
1468
|
-
search_business() {
|
|
1469
|
-
SIREN=$1
|
|
1470
|
-
shift
|
|
1471
|
-
QUERY=$(sed -e 's: :+:g' <(echo "$*"))
|
|
1472
|
-
URL="https://data.opendatasoft.com/api/records/1.0/search/?dataset=sirene_v3%40public&q=${QUERY}&sort=datederniertraitementetablissement&facet=trancheeffectifsetablissement&facet=libellecommuneetablissement&facet=departementetablissementi&refine.siren=${SIREN}"
|
|
1473
|
-
curl $URL
|
|
1474
|
-
}
|
|
1475
|
-
|
|
1476
|
-
#!/bin/bash
|
|
1477
|
-
|
|
1478
|
-
# source tolls.sh ; tolls antoine.thomas@colisweb.com
|
|
1479
|
-
function tolls() {
|
|
1480
|
-
USER=${1:-first.last@colisweb.com}
|
|
1481
|
-
FROM_DATE=${2:-"2023-02-01"}
|
|
1482
|
-
TO_DATE=${3:-"2023-02-28"}
|
|
1483
|
-
|
|
1484
|
-
USER=$(gum input --prompt "username : " --value $USER)
|
|
1485
|
-
TOKEN=$(./tour_details.sc login --user $USER --password $(gum input --password --placeholder password))
|
|
1486
|
-
[ "$TOKEN" != "" ] && echo "connected" || return 1
|
|
1487
|
-
|
|
1488
|
-
FROM_DATE=$(gum input --prompt "Date start : " --value $FROM_DATE)
|
|
1489
|
-
TO_DATE=$(gum input --prompt "Date end : " --value $TO_DATE)
|
|
1490
|
-
FILENAME="tours-${FROM_DATE}-TO-${TO_DATE}.json"
|
|
1491
|
-
curl --cookie "session=$TOKEN" "https://api.production.colisweb.com/api/v6/routes-plans/external?from=${FROM_DATE}&to=${TO_DATE}" > ~/Downloads/$FILENAME
|
|
1492
|
-
echo "Tournées téléchargées"
|
|
1493
|
-
|
|
1494
|
-
projectIds=$(./tour_details.sc allProjects --file ~/Downloads/$FILENAME | gum choose --no-limit | cut -d "," -f 2)
|
|
1495
|
-
echo "projets sélectionnés : $projectIds"
|
|
1496
|
-
tourIds=$(./tour_details.sc allTours --file ~/Downloads/$FILENAME --projectIds "$projectIds")
|
|
1497
|
-
echo "tournées sélectionnées : $tourIds"
|
|
1498
|
-
|
|
1499
|
-
TARGET="${FROM_DATE}-TO-${TO_DATE}.csv"
|
|
1500
|
-
echo "appels à HERE, écriture dans $TARGET"
|
|
1501
|
-
./tour_details.sc allToursDetails --token $TOKEN --hereApiKey $HERE_API_KEY --routeIds "$tourIds" > "$TARGET"
|
|
1502
|
-
|
|
1503
|
-
echo "terminé"
|
|
1504
|
-
}
|
|
1505
|
-
|
|
1506
|
-
#!/usr/bin/env bash
|
|
1507
|
-
|
|
1508
|
-
# possible syntax:
|
|
1509
|
-
# login
|
|
1510
|
-
# login testing
|
|
1511
|
-
# login testing userid
|
|
1512
|
-
login() {
|
|
1513
|
-
ENV=${1:-`gum choose testing staging production recette`} && \
|
|
1514
|
-
USER=${2:-`gum input --placeholder username`} && \
|
|
1515
|
-
PASSWORD=`gum input --password --placeholder password` && \
|
|
1516
|
-
TOKEN=`$SCRIPT_FULL_PATH/scala/auth.sc login --env $ENV --user $USER --password $PASSWORD` && \
|
|
1517
|
-
export TOKEN_$ENV=$TOKEN && \
|
|
1518
|
-
echo "login success for $USER on $ENV" >&2
|
|
1519
|
-
}
|
|
1520
|
-
|
|
1521
|
-
# you need to call login first (see above)
|
|
1522
|
-
# possible syntax:
|
|
1523
|
-
# recompute_tour
|
|
1524
|
-
# recompute_tour testing
|
|
1525
|
-
# recompute_tour testing draft
|
|
1526
|
-
# recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09
|
|
1527
|
-
# recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09 TODAY
|
|
1528
|
-
# recompute_tour testing draft 28bf9967-b5f3-4294-8855-cfd2fa36ec09 FRIDAY
|
|
1529
|
-
recompute_tour() {
|
|
1530
|
-
ENV=${1:-`gum choose testing staging production recette`}
|
|
1531
|
-
MODE=${2:-`gum choose draft definitive`}
|
|
1532
|
-
PROJECT_ID=${3:-`pick_project $ENV`}
|
|
1533
|
-
DAY=${4:-`gum choose TODAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY SUNDAY`}
|
|
1534
|
-
jwt_token $ENV
|
|
1535
|
-
scala/tour_config.sc $MODE -t $TOKEN -p $PROJECT_ID -d $DAY
|
|
1536
|
-
}
|
|
1537
|
-
|
|
1538
|
-
pick_project() {
|
|
1539
|
-
ENV=${1:-`gum choose testing staging production recette`}
|
|
1540
|
-
jwt_token $ENV
|
|
1541
|
-
scala/tour_config.sc list -t $TOKEN -e $ENV | gum filter | cut -f1
|
|
1542
|
-
}
|
|
1543
|
-
|
|
1544
|
-
jwt_token() {
|
|
1545
|
-
ENV=${1:-`gum choose testing staging production recette`}
|
|
1546
|
-
eval 'TOKEN=$TOKEN_'$ENV
|
|
1547
|
-
if ! $SCRIPT_FULL_PATH/scala/auth.sc check -t $TOKEN -e $ENV ; then
|
|
1548
|
-
login $ENV
|
|
1549
|
-
fi
|
|
1550
|
-
}
|
|
1551
|
-
|
|
1552
|
-
#!/usr/bin/env bash
|
|
1553
|
-
|
|
1554
|
-
ftp_ikea_k8s() {
|
|
1555
|
-
SSH_LOCAL_PORT=2230
|
|
1556
|
-
FTP_LOCAL_PORT=25500
|
|
1557
|
-
start_ssh_bastion testing $SSH_LOCAL_PORT
|
|
1558
|
-
|
|
1559
|
-
lsof -ti tcp:$FTP_LOCAL_PORT | xargs kill
|
|
1560
|
-
|
|
1561
|
-
bastion_config=$(mktemp)
|
|
1562
|
-
cat > "$bastion_config" <<EOF
|
|
1563
|
-
UserKnownHostsFile /dev/null
|
|
1564
|
-
StrictHostKeyChecking no
|
|
1565
|
-
User root
|
|
1566
|
-
Host bastion_ftp
|
|
1567
|
-
HostName 127.0.0.1
|
|
1568
|
-
Port 2230
|
|
1569
|
-
LocalForward 25500 ft.centiro.ikea.com:22
|
|
1570
|
-
EOF
|
|
1571
|
-
|
|
1572
|
-
ssh -f -N \
|
|
1573
|
-
-F "$bastion_config" \
|
|
1574
|
-
"bastion_ftp"
|
|
1575
|
-
|
|
1576
|
-
sftp -P $FTP_LOCAL_PORT colisweb.fr@127.0.0.1
|
|
1577
|
-
}
|
|
1578
|
-
|
|
1579
|
-
#!/usr/bin/env bash
|
|
1580
|
-
|
|
1581
|
-
# usage:
|
|
1582
|
-
# jconsole_k8s testing colisweb-api-web
|
|
1583
|
-
|
|
1584
|
-
jconsole_k8s() {
|
|
1585
|
-
ENV=$1
|
|
1586
|
-
NAME=$2
|
|
1587
|
-
|
|
1588
|
-
start_ssh_bastion $ENV 2242
|
|
1589
|
-
POD_IP=$( \
|
|
1590
|
-
kubectl -n $ENV get pods -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIP}{"\n"}{end}' \
|
|
1591
|
-
| grep "$NAME" | cut -d' ' -f2 | head -1 \
|
|
1592
|
-
)
|
|
1593
|
-
echo "selected POD with ip $POD_IP"
|
|
1594
|
-
echo "use 'root' as password"
|
|
1595
|
-
ssh -f -N -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -D 7777 root@127.0.0.1 -p 2242
|
|
1596
|
-
|
|
1597
|
-
jconsole \
|
|
1598
|
-
-J-DsocksProxyHost=localhost \
|
|
1599
|
-
-J-DsocksProxyPort=7777 \
|
|
1600
|
-
-J-DsocksNonProxyHosts= \
|
|
1601
|
-
service:jmx:rmi:///jndi/rmi://$POD_IP:7199/jmxrmi \
|
|
1602
|
-
&
|
|
1603
|
-
|
|
1604
|
-
echo "remember to stop with 'stop_ssh_bastion'"
|
|
1605
|
-
|
|
1606
|
-
}
|
|
1607
|
-
|
|
1608
|
-
#!/usr/bin/env bash
|
|
1609
|
-
|
|
1610
|
-
# Interactive console on an new pod. See also run_ruby_k8s
|
|
1611
|
-
# Ex :
|
|
1612
|
-
# railsc_k8s production
|
|
1613
|
-
# railsc_k8s production "User.where(email:'toni@colisweb.com')"
|
|
1614
|
-
railsc_k8s() {
|
|
1615
|
-
ENV=$1
|
|
1616
|
-
COMMAND=$2
|
|
1617
|
-
[[ $ENV = "production" || $ENV = "staging" ]] && default_tag="master-latest" || default_tag="${ENV}-latest"
|
|
1618
|
-
local image_tag=${5:-$default_tag}
|
|
1619
|
-
local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
|
|
1620
|
-
local POD_NAME="colisweb-api-rails-console-$image_tag-$USERNAME"
|
|
1621
|
-
|
|
1622
|
-
kubectl -n $ENV get pod $POD_NAME && kubectl -n $ENV delete pod $POD_NAME
|
|
1623
|
-
|
|
1624
|
-
configure_kubectl_for $ENV
|
|
1625
|
-
echo "starting with $IMAGE"
|
|
1626
|
-
|
|
1627
|
-
kubectl -n $ENV run $POD_NAME \
|
|
1628
|
-
--image $IMAGE \
|
|
1629
|
-
--restart=Never \
|
|
1630
|
-
--overrides='{
|
|
1631
|
-
"spec":{
|
|
1632
|
-
"nodeSelector":{
|
|
1633
|
-
"workType": "workers"
|
|
1634
|
-
},
|
|
1635
|
-
"containers":[
|
|
1636
|
-
{
|
|
1637
|
-
"name":"'$POD_NAME'",
|
|
1638
|
-
"image":"'$IMAGE'",
|
|
1639
|
-
"imagePullPolicy":"Always",
|
|
1640
|
-
"command":[
|
|
1641
|
-
"sleep",
|
|
1642
|
-
"infinity"
|
|
1643
|
-
],
|
|
1644
|
-
"resources":{
|
|
1645
|
-
"limits":{
|
|
1646
|
-
"memory": "2048Mi"
|
|
1647
|
-
}
|
|
1648
|
-
},
|
|
1649
|
-
"envFrom": [ {
|
|
1650
|
-
"configMapRef": {
|
|
1651
|
-
"name": "colisweb-api"
|
|
1652
|
-
}
|
|
1653
|
-
}, {
|
|
1654
|
-
"secretRef": {
|
|
1655
|
-
"name": "colisweb-api"
|
|
1656
|
-
}
|
|
1657
|
-
}
|
|
1658
|
-
]
|
|
1659
|
-
}
|
|
1660
|
-
]
|
|
1661
|
-
}
|
|
1662
|
-
}
|
|
1663
|
-
'
|
|
1664
|
-
|
|
1665
|
-
sleep 5
|
|
1666
|
-
KUBERAILS="kubectl -n $ENV exec -ti $POD_NAME -- /usr/src/app/bin/rails c"
|
|
1667
|
-
[ -z "$COMMAND" ] && eval $KUBERAILS || echo $COMMAND | eval $KUBERAILS
|
|
1668
|
-
|
|
1669
|
-
print "End of $POD_NAME "
|
|
1670
|
-
kubectl -n $ENV delete pods $POD_NAME
|
|
1671
|
-
}
|
|
1672
|
-
|
|
1673
|
-
# Ex :
|
|
1674
|
-
# create_user testing claire.lien@colisweb.com super_admin clairemdp
|
|
1675
|
-
create_user() {
|
|
1676
|
-
ENV=$1
|
|
1677
|
-
EMAIL=$2
|
|
1678
|
-
ROLE=$3
|
|
1679
|
-
PASSWORD=$4
|
|
1680
|
-
railsc_k8s $ENV "User.where(email:'$EMAIL', role:'$ROLE').first_or_create.update_attributes!(password: '$PASSWORD')"
|
|
1681
|
-
}
|
|
1682
|
-
|
|
1683
|
-
# Ex :
|
|
1684
|
-
# delete_user testing claire.lien@colisweb.com
|
|
1685
|
-
delete_user() {
|
|
1686
|
-
ENV=$1
|
|
1687
|
-
EMAIL=$2
|
|
1688
|
-
railsc_k8s $ENV "User.find_by(email:'$EMAIL').destroy"
|
|
1689
|
-
}
|
|
1690
|
-
|
|
1691
|
-
# NON Interactive console on an new pod, for long-running tasks (a few minutes)
|
|
1692
|
-
# See also railsc_k8s
|
|
1693
|
-
# file.txt will be available from /conf/data.txt in the ruby code
|
|
1694
|
-
# examples :
|
|
1695
|
-
# run_ruby_k8s testing demo <(echo "pp JSON.parse(File.read('/conf/data.txt'))") <(echo '{ "content": 123 }')
|
|
1696
|
-
# run_ruby_k8s testing demo ~/.oh-my-zsh/custom/dev-tools/shell-session/ruby/demo.rb <(echo '{ "content": 123 }')
|
|
1697
|
-
run_ruby_k8s() {
|
|
1698
|
-
if [ $# -lt 4 ]; then
|
|
1699
|
-
echo "usage : run_ruby_k8s production name-for-pod script.rb file.txt"
|
|
1700
|
-
return 1
|
|
1701
|
-
fi
|
|
1702
|
-
local namespace=$1
|
|
1703
|
-
local name=$2
|
|
1704
|
-
local ruby_script=$3
|
|
1705
|
-
local input_data=$4
|
|
1706
|
-
[[ $namespace = "production" || $namespace = "staging" ]] && default_tag="master-latest" || default_tag="${namespace}-latest"
|
|
1707
|
-
local image_tag=${5:-$default_tag}
|
|
1708
|
-
|
|
1709
|
-
if [ ! -r "$ruby_script" ]; then
|
|
1710
|
-
echo "ruby script not found $ruby_script"
|
|
1711
|
-
return 2
|
|
1712
|
-
fi
|
|
1713
|
-
|
|
1714
|
-
if [ ! -r "$input_data" ]; then
|
|
1715
|
-
echo "data not found $input_data"
|
|
1716
|
-
return 3
|
|
1717
|
-
fi
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
local IMAGE="949316342391.dkr.ecr.eu-west-1.amazonaws.com/colisweb-api:$image_tag"
|
|
1721
|
-
local POD_NAME="colisweb-api-script-$name"
|
|
1722
|
-
local CONFIG_MAP="config-$POD_NAME"
|
|
1723
|
-
local CONFIG_MAP_DIR="$(mktemp -d)"
|
|
1724
|
-
|
|
1725
|
-
|
|
1726
|
-
configure_kubectl_for $namespace
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
cp "$ruby_script" "$CONFIG_MAP_DIR/script.rb"
|
|
1730
|
-
cp "$input_data" "$CONFIG_MAP_DIR/data.txt"
|
|
1731
|
-
|
|
1732
|
-
kubectl -n $namespace get configmap $CONFIG_MAP && kubectl -n $namespace delete configmap $CONFIG_MAP
|
|
1733
|
-
kubectl -n $namespace create configmap $CONFIG_MAP --from-file="$CONFIG_MAP_DIR"
|
|
1734
|
-
|
|
1735
|
-
kubectl -n $namespace get pod $POD_NAME && kubectl -n $namespace delete pod $POD_NAME
|
|
1736
|
-
|
|
1737
|
-
echo "starting with $IMAGE"
|
|
1738
|
-
kubectl -n $namespace run $POD_NAME \
|
|
1739
|
-
--image $IMAGE \
|
|
1740
|
-
-ti \
|
|
1741
|
-
--restart=Never \
|
|
1742
|
-
--attach \
|
|
1743
|
-
--rm \
|
|
1744
|
-
--overrides='{
|
|
1745
|
-
"spec":{
|
|
1746
|
-
"nodeSelector":{
|
|
1747
|
-
"workType": "workers"
|
|
1748
|
-
},
|
|
1749
|
-
"containers":[
|
|
1750
|
-
{
|
|
1751
|
-
"name":"'$POD_NAME'",
|
|
1752
|
-
"image":"'$IMAGE'",
|
|
1753
|
-
"imagePullPolicy":"Always",
|
|
1754
|
-
"command":[
|
|
1755
|
-
"/usr/src/app/bin/rails",
|
|
1756
|
-
"r",
|
|
1757
|
-
"/conf/script.rb"
|
|
1758
|
-
],
|
|
1759
|
-
"resources":{
|
|
1760
|
-
"limits":{
|
|
1761
|
-
"memory": "4096Mi"
|
|
1762
|
-
}
|
|
1763
|
-
},
|
|
1764
|
-
"volumeMounts":[
|
|
1765
|
-
{
|
|
1766
|
-
"name":"conf",
|
|
1767
|
-
"mountPath":"/conf"
|
|
1768
|
-
}
|
|
1769
|
-
],
|
|
1770
|
-
"envFrom": [ {
|
|
1771
|
-
"configMapRef": {
|
|
1772
|
-
"name": "colisweb-api"
|
|
1773
|
-
}
|
|
1774
|
-
}, {
|
|
1775
|
-
"secretRef": {
|
|
1776
|
-
"name": "colisweb-api"
|
|
1777
|
-
}
|
|
1778
|
-
}
|
|
1779
|
-
]
|
|
1780
|
-
}
|
|
1781
|
-
],
|
|
1782
|
-
"volumes":[
|
|
1783
|
-
{
|
|
1784
|
-
"name":"conf",
|
|
1785
|
-
"configMap":{ "name":"'$CONFIG_MAP'" }
|
|
1786
|
-
}
|
|
1787
|
-
]
|
|
1788
|
-
}
|
|
1789
|
-
}
|
|
1790
|
-
'
|
|
1791
|
-
|
|
1792
|
-
kubectl -n $namespace delete configmap $CONFIG_MAP
|
|
1793
|
-
}
|
|
1794
|
-
|
|
1795
|
-
# example:
|
|
1796
|
-
# update_pickup_cp testing <( echo '{"wrong_cp": "59123", "corrected_cp": "59223", "delivery_ids": ["4192421", "4192425"]}' )
|
|
1797
|
-
update_pickup_cp() {
|
|
1798
|
-
run_ruby_k8s $1 update-pickup-cp "$SCRIPT_FULL_PATH/ruby/update_pickup_cp.rb" $2
|
|
1799
|
-
}
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
|
|
1803
|
-
update_all_prices() {
|
|
1804
|
-
local namespace=$1
|
|
1805
|
-
local json_prices=$2
|
|
1806
|
-
|
|
1807
|
-
local json_size=$(wc -c < "$json_prices")
|
|
1808
|
-
|
|
1809
|
-
if ((json_size > 940000)); then
|
|
1810
|
-
command -v jq || (echo "jq not found (use brew install jq)" && return 1)
|
|
1811
|
-
local max_lines=3000
|
|
1812
|
-
local total_lines=$(jq '. | length' $json_prices)
|
|
1813
|
-
local iterations=$((total_lines / max_lines + 1))
|
|
1814
|
-
echo "$json_prices is too big, I'll split it for you in blocks of $max_lines lines. It will take $iterations runs"
|
|
1815
|
-
for (( i = 0 ; i < iterations ; i++ )) ; do
|
|
1816
|
-
local start=$((i * max_lines))
|
|
1817
|
-
local end=$(( (i + 1) * max_lines))
|
|
1818
|
-
local split_file=$(mktemp)
|
|
1819
|
-
jq -c ".[$start:$end]" $json_prices > $split_file
|
|
1820
|
-
local split_lines=$(jq '. | length' $split_file)
|
|
1821
|
-
echo "starting iteration $i from $start to $end with $split_file command -v has $split_lines lines"
|
|
1822
|
-
run_ruby_k8s $namespace "update-prices-$i" "$SCRIPT_FULL_PATH/ruby/update_prices.rb" $split_file
|
|
1823
|
-
done
|
|
1824
|
-
else
|
|
1825
|
-
run_ruby_k8s $namespace "update-prices" "$SCRIPT_FULL_PATH/ruby/update_prices.rb" $json_prices
|
|
1826
|
-
fi
|
|
1827
|
-
}
|
|
1828
|
-
|
|
1829
|
-
|
|
1830
|
-
update_surveys() {
|
|
1831
|
-
local namespace=$1
|
|
1832
|
-
local csv_surveys=$2
|
|
1833
|
-
|
|
1834
|
-
local csv_size=$(wc -c < "$csv_surveys")
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
if ((csv_size > 940000)); then
|
|
1838
|
-
local max_lines=400
|
|
1839
|
-
local total_lines=$(wc -l < $csv_surveys)
|
|
1840
|
-
local iterations=$((total_lines / max_lines + 1))
|
|
1841
|
-
echo "$csv_surveys is too big, I'll split it for you in blocks of $max_lines lines. It will take $iterations runs"
|
|
1842
|
-
for (( i = 0 ; i < iterations ; i++ )) ; do
|
|
1843
|
-
local start=$((i * max_lines + 2))
|
|
1844
|
-
local end=$(( (i + 1) * max_lines + 1))
|
|
1845
|
-
local split_file=$(mktemp)
|
|
1846
|
-
head -1 $csv_surveys > $split_file
|
|
1847
|
-
sed -n ''"$start,${end}p" $csv_surveys >> $split_file
|
|
1848
|
-
|
|
1849
|
-
|
|
1850
|
-
local split_lines=$(wc -l < $split_file)
|
|
1851
|
-
echo "starting iteration $i from $start to $end with $split_file command -v has $split_lines lines"
|
|
1852
|
-
run_ruby_k8s $namespace "reimport-surveys-$i" "$SCRIPT_FULL_PATH/ruby/feedback_kpi_reuploader.rb" $split_file
|
|
1853
|
-
done
|
|
1854
|
-
else
|
|
1855
|
-
run_ruby_k8s $namespace "reimport-surveys" "$SCRIPT_FULL_PATH/ruby/feedback_kpi_reuploader.rb" $csv_surveys
|
|
1856
|
-
fi
|
|
1857
|
-
}
|
|
1858
|
-
|
|
1859
|
-
#!/usr/bin/env bash
|
|
1860
|
-
|
|
1861
|
-
configure_gitlab_ssh() {
|
|
1862
|
-
tmp_dir=$(mktemp -d)
|
|
1863
|
-
ssh-keyscan gitlab.com > $tmp_dir/known_hosts
|
|
1864
|
-
echo "$SSH_PRIVATE_KEY" > $tmp_dir/id_rsa
|
|
1865
|
-
chmod 600 $tmp_dir/id_rsa
|
|
1866
|
-
ssh -i $tmp_dir/id_rsa -T git@gitlab.com
|
|
1867
|
-
rm -Rf $tmp_dir
|
|
1868
|
-
}
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
configure_gitlab_ssh_home() {
|
|
1872
|
-
mkdir ~/.ssh
|
|
1873
|
-
ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
|
|
1874
|
-
echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_rsa
|
|
1875
|
-
chmod 600 ~/.ssh/id_rsa
|
|
1876
|
-
ssh -T git@gitlab.com
|
|
1877
|
-
}
|
|
1878
|
-
#!/usr/bin/env bash
|
|
1879
|
-
|
|
1880
|
-
datadog_schedule_downtime() {
|
|
1881
|
-
SERVICES=$1
|
|
1882
|
-
DOWNTIME_MINUTES=${2:-30}
|
|
1883
|
-
|
|
1884
|
-
if [[ "$ENVIRONMENT" == "production" ]] ; then
|
|
1885
|
-
log "scheduling downtime for $SERVICES in $ENVIRONMENT"
|
|
1886
|
-
else
|
|
1887
|
-
return 0
|
|
1888
|
-
fi
|
|
1889
|
-
|
|
1890
|
-
for SERVICE in $SERVICES ; do
|
|
1891
|
-
datadog_schedule_downtime_single $SERVICE $DOWNTIME_MINUTES
|
|
1892
|
-
done
|
|
1893
|
-
}
|
|
1894
|
-
|
|
1895
|
-
datadog_schedule_downtime_single() {
|
|
1896
|
-
local SERVICE=$1
|
|
1897
|
-
local DOWNTIME_MINUTES=$2
|
|
1898
|
-
|
|
1899
|
-
START=$(date +%s)
|
|
1900
|
-
END=$((START + 60 * DOWNTIME_MINUTES))
|
|
1901
|
-
|
|
1902
|
-
log "scheduling a downtime on datadog for $SERVICE ($DOWNTIME_MINUTES minutes)"
|
|
1903
|
-
curl -X POST "https://api.datadoghq.com/api/v1/downtime" \
|
|
1904
|
-
-H "Content-Type: application/json" \
|
|
1905
|
-
-H "DD-API-KEY: ${DD_API_KEY}" \
|
|
1906
|
-
-H "DD-APPLICATION-KEY: ${DD_APP_KEY}" \
|
|
1907
|
-
-d '
|
|
1908
|
-
{
|
|
1909
|
-
"active": true,
|
|
1910
|
-
"downtime_type": 0,
|
|
1911
|
-
"start": '$START',
|
|
1912
|
-
"end": '$END',
|
|
1913
|
-
"message": "CA Deployment - performance for '$SERVICE' may be lower for next '$DOWNTIME_MINUTES' min",
|
|
1914
|
-
"monitor_tags": [
|
|
1915
|
-
"service:'$SERVICE'",
|
|
1916
|
-
"performance"
|
|
1917
|
-
],
|
|
1918
|
-
"scope": [
|
|
1919
|
-
"env:production"
|
|
1920
|
-
],
|
|
1921
|
-
"timezone": "Europe/Paris"
|
|
1922
|
-
}
|
|
1923
|
-
'
|
|
1924
|
-
}
|
|
1925
|
-
|
|
1926
|
-
#!/usr/bin/env bash
|
|
1927
|
-
|
|
1928
|
-
docker_build_push() {
|
|
1929
|
-
read -r -a BUILD_ARGS <<< "$1"
|
|
1930
|
-
DOCKER_BUILD_ARGS="--build-arg VCS_REF=$(git rev-parse --short HEAD)"
|
|
1931
|
-
for ARG_NAME in "${BUILD_ARGS[@]}"
|
|
1932
|
-
do
|
|
1933
|
-
DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS --build-arg $ARG_NAME=${!ARG_NAME}"
|
|
1934
|
-
done
|
|
1935
|
-
|
|
1936
|
-
if ! image_exists $DOCKER_REGISTRY_ID $APPLICATION $CI_COMMIT_SHORT_SHA ; then
|
|
1937
|
-
docker pull $DOCKER_IMAGE || true
|
|
1938
|
-
SOURCE_URL=${CI_PROJECT_URL:8} # without "https://" protocol, like gitlab.com/colisweb-idl/colisweb/back/packing
|
|
1939
|
-
docker build $DOCKER_BUILD_ARGS \
|
|
1940
|
-
-t $DOCKER_IMAGE_SHA \
|
|
1941
|
-
--label org.opencontainers.image.revision=$(git rev-parse HEAD) \
|
|
1942
|
-
--label org.opencontainers.image.source=$SOURCE_URL \
|
|
1943
|
-
--cache-from $DOCKER_IMAGE \
|
|
1944
|
-
$DOCKER_STAGE_PATH
|
|
1945
|
-
docker push $DOCKER_IMAGE_SHA
|
|
1946
|
-
fi
|
|
1947
|
-
}
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
docker_promote() {
|
|
1951
|
-
# inspired by https://dille.name/blog/2018/09/20/how-to-tag-docker-images-without-pulling-them/
|
|
1952
|
-
OLD_TAG=${1//[^0-9a-zA-Z-.]/_}
|
|
1953
|
-
NEW_TAG=${2//[^0-9a-zA-Z-.]/_}
|
|
1954
|
-
echo "promoting from $OLD_TAG to $NEW_TAG"
|
|
1955
|
-
TOKEN=$(aws_ecr_token)
|
|
1956
|
-
CONTENT_TYPE="application/vnd.docker.distribution.manifest.v2+json"
|
|
1957
|
-
MANIFESTS_API="https://${DOCKER_REGISTRY}/v2/${APPLICATION}/manifests"
|
|
1958
|
-
|
|
1959
|
-
if MANIFEST=$(curl --fail -H "Authorization: Basic $TOKEN" -H "Accept: ${CONTENT_TYPE}" "$MANIFESTS_API/${OLD_TAG}"); then
|
|
1960
|
-
echo "authenticated on $MANIFESTS_API"
|
|
1961
|
-
else
|
|
1962
|
-
return 1
|
|
1963
|
-
fi
|
|
1964
|
-
if curl --fail -H "Authorization: Basic $TOKEN" -X PUT -H "Content-Type: ${CONTENT_TYPE}" -d "${MANIFEST}" "$MANIFESTS_API/$NEW_TAG" ; then
|
|
1965
|
-
echo "promoted ${APPLICATION} from $OLD_TAG to $NEW_TAG"
|
|
1966
|
-
else
|
|
1967
|
-
return 2
|
|
1968
|
-
fi
|
|
1969
|
-
}
|
|
1970
|
-
|
|
1971
|
-
ensure_images_exists() {
|
|
1972
|
-
for IMAGE_TO_CHECK in $(echo $1 | tr "," "\n"); do
|
|
1973
|
-
image_exists ${DOCKER_REGISTRY_ID} ${IMAGE_TO_CHECK} ${VERSION} || return 1
|
|
1974
|
-
done
|
|
1975
|
-
}
|
|
1976
|
-
|
|
1977
|
-
#!/usr/bin/env bash
|
|
1978
|
-
|
|
1979
|
-
extract_yaml_config_variable() {
|
|
1980
|
-
set +e
|
|
1981
|
-
set +x
|
|
1982
|
-
|
|
1983
|
-
check_args "--environment" $1
|
|
1984
|
-
shift
|
|
1985
|
-
ENVIRONMENT=$1
|
|
1986
|
-
shift
|
|
1987
|
-
|
|
1988
|
-
check_args "--configs-path" $1
|
|
1989
|
-
shift
|
|
1990
|
-
CONFIGS_PATH=$1
|
|
1991
|
-
shift
|
|
1992
|
-
|
|
1993
|
-
check_args "--variable" $1
|
|
1994
|
-
shift
|
|
1995
|
-
VARIABLE=$1
|
|
1996
|
-
shift
|
|
1997
|
-
|
|
1998
|
-
[[ "$1" == "--optional" ]] && OPTIONAL=true || OPTIONAL=false
|
|
1999
|
-
|
|
2000
|
-
if [ ! -f ${CONFIGS_PATH}/common.yaml ]; then
|
|
2001
|
-
echo >&2 "Missing $CONFIGS_PATH/common.yaml configuration file"
|
|
2002
|
-
exit 1
|
|
2003
|
-
fi
|
|
2004
|
-
if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}.yaml ]; then
|
|
2005
|
-
echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT.yaml configuration file"
|
|
2006
|
-
exit 1
|
|
2007
|
-
fi
|
|
2008
|
-
if [ ! -f ${CONFIGS_PATH}/${ENVIRONMENT}-secrets.yaml ]; then
|
|
2009
|
-
echo >&2 "Missing $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml configuration file"
|
|
2010
|
-
exit 1
|
|
2011
|
-
fi
|
|
2012
|
-
|
|
2013
|
-
result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT-secrets.yaml")
|
|
2014
|
-
if [ $? -ne 0 ] || [ "$result" = "null" ]; then
|
|
2015
|
-
result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/$ENVIRONMENT.yaml")
|
|
2016
|
-
if [ $? -ne 0 ] || [ "$result" = "null" ]; then
|
|
2017
|
-
result=$(yq -r ${VARIABLE} "$CONFIGS_PATH/common.yaml")
|
|
2018
|
-
if [ $? -ne 0 ] || [ "$result" = "null" ]; then
|
|
2019
|
-
if [ $OPTIONAL = true ]; then
|
|
2020
|
-
echo ""
|
|
2021
|
-
exit 0
|
|
2022
|
-
else
|
|
2023
|
-
echo >&2 "Missing path $VARIABLE in $CONFIGS_PATH/$ENVIRONMENT-secrets.yaml, $CONFIGS_PATH/$ENVIRONMENT.yaml or $CONFIGS_PATH/common.yaml"
|
|
2024
|
-
exit 1
|
|
2025
|
-
fi
|
|
2026
|
-
fi
|
|
2027
|
-
fi
|
|
2028
|
-
fi
|
|
2029
|
-
echo ${result}
|
|
2030
|
-
}
|
|
2031
|
-
#!/usr/bin/env bash
|
|
2032
|
-
|
|
2033
|
-
flyway_clean() {
|
|
2034
|
-
HOST="$1"
|
|
2035
|
-
PORT="$2"
|
|
2036
|
-
DATABASE="$3"
|
|
2037
|
-
USER="$4"
|
|
2038
|
-
PASSWORD="$5"
|
|
2039
|
-
|
|
2040
|
-
kubectl run -it --rm flywayclean \
|
|
2041
|
-
--image=flyway/flyway \
|
|
2042
|
-
--restart=Never \
|
|
2043
|
-
-- \
|
|
2044
|
-
-cleanDisabled=false \
|
|
2045
|
-
-url="jdbc:postgresql://$HOST:$PORT/$DATABASE" \
|
|
2046
|
-
-user="$USER" \
|
|
2047
|
-
-password="$PASSWORD" \
|
|
2048
|
-
clean
|
|
2049
|
-
}
|
|
2050
|
-
|
|
2051
|
-
#!/usr/bin/env bash
|
|
2052
|
-
|
|
2053
|
-
FLYWAY_VERSION="7.4.0"
|
|
2054
|
-
|
|
2055
|
-
|
|
2056
|
-
get_yaml_variable() {
|
|
2057
|
-
extract_yaml_config_variable --environment ${ENVIRONMENT} --configs-path $(pwd)/deploy --variable $@
|
|
2058
|
-
}
|
|
2059
|
-
|
|
2060
|
-
init_migrate_db() {
|
|
2061
|
-
set -e
|
|
2062
|
-
|
|
2063
|
-
check_env_vars 4 "APPLICATION" "ENVIRONMENT" "FLYWAY_VERSION" "MIGRATION_SQL_PATH"
|
|
2064
|
-
|
|
2065
|
-
PG_YAML_PATH=".${APPLICATION}config.postgres"
|
|
2066
|
-
|
|
2067
|
-
DB_PORT="5432"
|
|
2068
|
-
DB_HOST=$(get_yaml_variable "${PG_YAML_PATH}.host")
|
|
2069
|
-
DB_INIT_USERNAME=$(get_yaml_variable "${PG_YAML_PATH}.initUsername")
|
|
2070
|
-
DB_INIT_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.initPassword")
|
|
2071
|
-
DB_DATABASE=$(get_yaml_variable "${PG_YAML_PATH}.database")
|
|
2072
|
-
DB_USER=$(get_yaml_variable "${PG_YAML_PATH}.user")
|
|
2073
|
-
DB_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.password")
|
|
2074
|
-
DB_URL="jdbc:postgresql://${DB_HOST}:${DB_PORT}/${DB_DATABASE}"
|
|
2075
|
-
|
|
2076
|
-
DB_RO_USER=$(get_yaml_variable "${PG_YAML_PATH}.readOnlyUser" --optional)
|
|
2077
|
-
DB_RO_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.readOnlyPassword" --optional)
|
|
2078
|
-
|
|
2079
|
-
unset KUBECONFIG
|
|
2080
|
-
|
|
2081
|
-
configure_kubectl_for ${ENVIRONMENT}
|
|
2082
|
-
|
|
2083
|
-
kube_init_service_database \
|
|
2084
|
-
--namespace ${ENVIRONMENT} \
|
|
2085
|
-
--service ${APPLICATION} \
|
|
2086
|
-
--db_host ${DB_HOST} \
|
|
2087
|
-
--db_port ${DB_PORT} \
|
|
2088
|
-
--db_init_username ${DB_INIT_USERNAME} \
|
|
2089
|
-
--db_init_password ${DB_INIT_PASSWORD} \
|
|
2090
|
-
--db_database ${DB_DATABASE} \
|
|
2091
|
-
--db_username ${DB_USER} \
|
|
2092
|
-
--db_password ${DB_PASSWORD}
|
|
2093
|
-
|
|
2094
|
-
if [[ ! -z "$DB_RO_USER" ]] && [[ ! -z "$DB_RO_USER" ]]; then
|
|
2095
|
-
kube_init_database_readonly_account \
|
|
2096
|
-
--namespace ${ENVIRONMENT} \
|
|
2097
|
-
--service ${APPLICATION} \
|
|
2098
|
-
--db_connection "$DB_INIT_USERNAME:$DB_INIT_PASSWORD@$DB_HOST:$DB_PORT" \
|
|
2099
|
-
--db_database ${DB_DATABASE} \
|
|
2100
|
-
--db_readonly_username ${DB_RO_USER} \
|
|
2101
|
-
--db_readonly_password ${DB_RO_PASSWORD}
|
|
2102
|
-
fi
|
|
2103
|
-
|
|
2104
|
-
flyway_migrate \
|
|
2105
|
-
--environment ${ENVIRONMENT} \
|
|
2106
|
-
--namespace ${ENVIRONMENT} \
|
|
2107
|
-
--service ${APPLICATION} \
|
|
2108
|
-
--db_url ${DB_URL} \
|
|
2109
|
-
--db_user ${DB_USER} \
|
|
2110
|
-
--db_password ${DB_PASSWORD} \
|
|
2111
|
-
--flyway_version ${FLYWAY_VERSION} \
|
|
2112
|
-
--flyway_sql_folder $(pwd)/${MIGRATION_SQL_PATH}
|
|
2113
|
-
}
|
|
2114
|
-
|
|
2115
|
-
flyway_migrate() {
|
|
2116
|
-
set -e
|
|
2117
|
-
|
|
2118
|
-
extract_args 8 \
|
|
2119
|
-
environment namespace service db_url db_user db_password flyway_version flyway_sql_folder $*
|
|
2120
|
-
|
|
2121
|
-
echo "running flyway migrations for service $service in environment $environment namespace $namespace for db_url $db_url with user $db_user"
|
|
2122
|
-
echo "migration files expected in $flyway_sql_folder"
|
|
2123
|
-
|
|
2124
|
-
CONFIGMAP_NAME="$service-flyway-migration-sql"
|
|
2125
|
-
POD_NAME="$service-flyway-migration"
|
|
2126
|
-
|
|
2127
|
-
configure_kubectl_for $environment
|
|
2128
|
-
|
|
2129
|
-
kubectl -n $namespace delete configmap $CONFIGMAP_NAME --ignore-not-found
|
|
2130
|
-
kubectl -n $namespace delete pod $POD_NAME --ignore-not-found
|
|
2131
|
-
kubectl -n $namespace create configmap $CONFIGMAP_NAME --from-file=$flyway_sql_folder
|
|
2132
|
-
|
|
2133
|
-
kubectl -n $namespace run $POD_NAME --image ignored -ti --restart=Never --attach --rm --overrides='
|
|
2134
|
-
{
|
|
2135
|
-
"spec":{
|
|
2136
|
-
"containers":[
|
|
2137
|
-
{
|
|
2138
|
-
"name":"'$POD_NAME'",
|
|
2139
|
-
"image":"flyway/flyway:'$flyway_version'",
|
|
2140
|
-
"command":["flyway", "-url='$db_url'", "-user='$db_user'", "-password='$db_password'", "migrate"],
|
|
2141
|
-
"volumeMounts":[
|
|
2142
|
-
{
|
|
2143
|
-
"name":"sql",
|
|
2144
|
-
"mountPath":"/flyway/sql"
|
|
2145
|
-
}
|
|
2146
|
-
]
|
|
2147
|
-
}
|
|
2148
|
-
],
|
|
2149
|
-
"volumes":[
|
|
2150
|
-
{
|
|
2151
|
-
"name":"sql",
|
|
2152
|
-
"configMap":{
|
|
2153
|
-
"name":"'$CONFIGMAP_NAME'"
|
|
2154
|
-
}
|
|
2155
|
-
}
|
|
2156
|
-
]
|
|
2157
|
-
}
|
|
2158
|
-
}
|
|
2159
|
-
'
|
|
2160
|
-
|
|
2161
|
-
kubectl -n $namespace delete configmap $CONFIGMAP_NAME
|
|
2162
|
-
}
|
|
2163
|
-
|
|
2164
|
-
#!/usr/bin/env bash
|
|
2165
|
-
flyway_repair() {
|
|
2166
|
-
set -e
|
|
2167
|
-
check_env_vars 4 "APPLICATION" "ENVIRONMENT" "FLYWAY_VERSION" "MIGRATION_SQL_PATH"
|
|
2168
|
-
|
|
2169
|
-
PG_YAML_PATH=".${APPLICATION}config.postgres"
|
|
2170
|
-
|
|
2171
|
-
DB_PORT="5432"
|
|
2172
|
-
DB_HOST=$(get_yaml_variable "${PG_YAML_PATH}.host")
|
|
2173
|
-
DB_DATABASE=$(get_yaml_variable "${PG_YAML_PATH}.database")
|
|
2174
|
-
DB_USER=$(get_yaml_variable "${PG_YAML_PATH}.user")
|
|
2175
|
-
DB_PASSWORD=$(get_yaml_variable "${PG_YAML_PATH}.password")
|
|
2176
|
-
DB_URL="jdbc:postgresql://${DB_HOST}:${DB_PORT}/${DB_DATABASE}"
|
|
2177
|
-
|
|
2178
|
-
flyway_sql_folder=$(pwd)/${MIGRATION_SQL_PATH}
|
|
2179
|
-
|
|
2180
|
-
configure_kubectl_for "${ENVIRONMENT}"
|
|
2181
|
-
POD_NAME="${APPLICATION}-flyway-repair"
|
|
2182
|
-
CONFIGMAP_NAME="${APPLICATION}-flyway-repair-sql"
|
|
2183
|
-
|
|
2184
|
-
kubectl -n "${ENVIRONMENT}" delete configmap $CONFIGMAP_NAME --ignore-not-found
|
|
2185
|
-
kubectl -n "${ENVIRONMENT}" delete pod $POD_NAME --ignore-not-found
|
|
2186
|
-
kubectl -n "${ENVIRONMENT}" create configmap $CONFIGMAP_NAME --from-file="${flyway_sql_folder}"
|
|
2187
|
-
|
|
2188
|
-
kubectl -n "${ENVIRONMENT}" run --rm -it "${POD_NAME}" \
|
|
2189
|
-
--image=flyway/flyway \
|
|
2190
|
-
--restart=Never \
|
|
2191
|
-
--overrides='
|
|
2192
|
-
{
|
|
2193
|
-
"spec":{
|
|
2194
|
-
"containers":[
|
|
2195
|
-
{
|
|
2196
|
-
"name":"'$POD_NAME'",
|
|
2197
|
-
"image":"flyway/flyway:'${FLYWAY_VERSION}'",
|
|
2198
|
-
"command":["flyway", "-url='$DB_URL'", "-user='$DB_USER'", "-password='$DB_PASSWORD'", "repair"],
|
|
2199
|
-
"volumeMounts":[
|
|
2200
|
-
{
|
|
2201
|
-
"name":"sql",
|
|
2202
|
-
"mountPath":"/flyway/sql"
|
|
2203
|
-
}
|
|
2204
|
-
]
|
|
2205
|
-
}
|
|
2206
|
-
],
|
|
2207
|
-
"volumes":[
|
|
2208
|
-
{
|
|
2209
|
-
"name":"sql",
|
|
2210
|
-
"configMap":{
|
|
2211
|
-
"name":"'$CONFIGMAP_NAME'"
|
|
2212
|
-
}
|
|
2213
|
-
}
|
|
2214
|
-
]
|
|
2215
|
-
}
|
|
2216
|
-
}
|
|
2217
|
-
'
|
|
2218
|
-
kubectl -n "${ENVIRONMENT}" delete configmap $CONFIGMAP_NAME
|
|
2219
|
-
}
|
|
2220
|
-
|
|
2221
|
-
#!/usr/bin/env bash
|
|
2222
|
-
|
|
2223
|
-
record_git_commit() {
|
|
2224
|
-
for file in $GIT_COMMIT_FILES; do
|
|
2225
|
-
sed -i 's&GIT_COMMIT&'"${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHORT_SHA}&" "$file"
|
|
2226
|
-
done
|
|
2227
|
-
}
|
|
2228
|
-
|
|
2229
|
-
gitlab_import_pgp_key() {
|
|
2230
|
-
if [ "$GITLAB_PGP_PRIVATE_KEY" != "" ]
|
|
2231
|
-
then
|
|
2232
|
-
KEY_FOLDER=<(echo "$GITLAB_PGP_PRIVATE_KEY")
|
|
2233
|
-
gpg --import $KEY_FOLDER > /dev/null
|
|
2234
|
-
else
|
|
2235
|
-
echo '$GITLAB_PGP_PRIVATE_KEY is not set'
|
|
2236
|
-
return 1
|
|
2237
|
-
fi
|
|
2238
|
-
}
|
|
2239
|
-
|
|
2240
|
-
git_reveal() {
|
|
2241
|
-
gitlab_import_pgp_key
|
|
2242
|
-
gpg --decrypt $1
|
|
2243
|
-
}
|
|
2244
|
-
#!/usr/bin/env bash
|
|
2245
|
-
|
|
2246
|
-
helm_deploy_v3() {
|
|
2247
|
-
APPLICATION=$1
|
|
2248
|
-
ENVIRONMENT=$2
|
|
2249
|
-
VERSION=$3
|
|
2250
|
-
deploy_chart_v3 \
|
|
2251
|
-
--path_configs deploy \
|
|
2252
|
-
--path_chart deploy/$APPLICATION \
|
|
2253
|
-
--application $APPLICATION \
|
|
2254
|
-
--environment $ENVIRONMENT \
|
|
2255
|
-
--namespace $ENVIRONMENT \
|
|
2256
|
-
--helm_extra_args --set global.version=$VERSION
|
|
2257
|
-
}
|
|
2258
|
-
|
|
2259
|
-
deploy_chart_v3() {
|
|
2260
|
-
set -e
|
|
2261
|
-
set -x
|
|
2262
|
-
|
|
2263
|
-
# Rigid parsing, but all args are mandatory (expect last) and flexible order is unnecessary
|
|
2264
|
-
check_args "--path_configs" $1; shift
|
|
2265
|
-
path_configs=$1; shift
|
|
2266
|
-
check_args "--path_chart" $1; shift
|
|
2267
|
-
path_chart=$1; shift
|
|
2268
|
-
check_args "--application" $1; shift
|
|
2269
|
-
application=$1; shift
|
|
2270
|
-
check_args "--environment" $1; shift
|
|
2271
|
-
environment=$1; shift
|
|
2272
|
-
check_args "--namespace" $1; shift
|
|
2273
|
-
namespace=$1; shift
|
|
2274
|
-
if [ $# -ne 0 ]; then
|
|
2275
|
-
check_args "--helm_extra_args" $1; shift
|
|
2276
|
-
helm_extra_args=$*
|
|
2277
|
-
fi
|
|
2278
|
-
|
|
2279
|
-
echo "================================"
|
|
2280
|
-
echo " Deploying $application"
|
|
2281
|
-
echo " - Environment: $environment"
|
|
2282
|
-
echo " - Namespace: $namespace"
|
|
2283
|
-
echo "================================"
|
|
2284
|
-
|
|
2285
|
-
root_path=$(pwd)
|
|
2286
|
-
|
|
2287
|
-
# Check the configs exists
|
|
2288
|
-
|
|
2289
|
-
check_config_file ${root_path}/${path_configs}/common.yaml
|
|
2290
|
-
check_config_file ${root_path}/${path_configs}/${namespace}.yaml
|
|
2291
|
-
check_config_file ${root_path}/${path_configs}/${namespace}-secrets.yaml
|
|
2292
|
-
|
|
2293
|
-
# Check the chart exists
|
|
2294
|
-
if [ ! -d ${root_path}/${path_chart} ] || [ ! -f ${root_path}/${path_chart}/Chart.yaml ]; then
|
|
2295
|
-
echo "Bad Chart $root_path/$path_chart : does not exists or missing Chart.yaml"
|
|
2296
|
-
print_usage
|
|
2297
|
-
exit 1
|
|
2298
|
-
fi
|
|
2299
|
-
|
|
2300
|
-
# Unset Kubectl configuration made via the KUBECONFIG env variable
|
|
2301
|
-
# it would override the config made by configure_kubectl_for
|
|
2302
|
-
# for example, using Gitlab runners in Kubernetes sets this variable and causes conflict
|
|
2303
|
-
unset KUBECONFIG
|
|
2304
|
-
|
|
2305
|
-
# Configure Kubectl
|
|
2306
|
-
configure_kubectl_for ${environment}
|
|
2307
|
-
|
|
2308
|
-
# Configure helm3
|
|
2309
|
-
helm3 version --namespace ${namespace} || true
|
|
2310
|
-
# helm3 stable repo have changed and must be updated manually, in versions < v2.17.0
|
|
2311
|
-
helm3 repo add colisweb s3://colisweb-helm-charts/colisweb
|
|
2312
|
-
helm3 repo add stable https://charts.helm.sh/stable
|
|
2313
|
-
helm3 repo update
|
|
2314
|
-
helm3 dependency update ${root_path}/${path_chart}
|
|
2315
|
-
|
|
2316
|
-
# Gather values/*.yaml files
|
|
2317
|
-
values_path="${root_path}/${path_chart}/values"
|
|
2318
|
-
values_files=''
|
|
2319
|
-
[ -d $values_path ] && values_files=$(find $values_path -type f -maxdepth 1 -name "*.yaml" | sed 's/^/ -f /' | tr -d \\n | sed 's/%//')
|
|
2320
|
-
|
|
2321
|
-
# Deploy
|
|
2322
|
-
helm3 upgrade --install \
|
|
2323
|
-
--namespace ${namespace} \
|
|
2324
|
-
${values_files} \
|
|
2325
|
-
-f ${root_path}/${path_configs}/common.yaml \
|
|
2326
|
-
-f ${root_path}/${path_configs}/${namespace}.yaml \
|
|
2327
|
-
-f ${root_path}/${path_configs}/${namespace}-secrets.yaml \
|
|
2328
|
-
${helm_extra_args} \
|
|
2329
|
-
${application} ${root_path}/${path_chart}
|
|
2330
|
-
|
|
2331
|
-
#send event to dd
|
|
2332
|
-
PUBLISHED_VERSION="$CI_COMMIT_REF_NAME-$CI_COMMIT_SHA"
|
|
2333
|
-
emit_datadog_deploy_event --environment $environment --service $application --version $PUBLISHED_VERSION
|
|
2334
|
-
|
|
2335
|
-
echo "================================"
|
|
2336
|
-
echo " Deployed $application"
|
|
2337
|
-
echo " - Environment: $environment"
|
|
2338
|
-
echo " - Namespace: $namespace"
|
|
2339
|
-
echo "================================"
|
|
2340
|
-
|
|
2341
|
-
set +x
|
|
2342
|
-
}
|
|
2343
|
-
|
|
2344
|
-
verify_deployments_v3() {
|
|
2345
|
-
set -e
|
|
2346
|
-
|
|
2347
|
-
# usage :
|
|
2348
|
-
# verify_deployments staging price
|
|
2349
|
-
# verify_deployments -t 15m testing price
|
|
2350
|
-
|
|
2351
|
-
if [ "$1" = "-t" ] ; then
|
|
2352
|
-
TIMEOUT=$2
|
|
2353
|
-
shift
|
|
2354
|
-
shift
|
|
2355
|
-
else
|
|
2356
|
-
TIMEOUT=5m
|
|
2357
|
-
fi
|
|
2358
|
-
|
|
2359
|
-
NAMESPACE=$1
|
|
2360
|
-
RELEASE=$2
|
|
2361
|
-
|
|
2362
|
-
# Get all Deployments names from the deployed chart
|
|
2363
|
-
DEPLOYMENTS=(
|
|
2364
|
-
$(helm3 get manifest --namespace $NAMESPACE $RELEASE | yq -rs '.[] | select(.kind=="Deployment") | .metadata.name')
|
|
2365
|
-
)
|
|
2366
|
-
|
|
2367
|
-
echo "verifying on $NAMESPACE deployments ${DEPLOYMENTS[@]} with a timeout of $TIMEOUT"
|
|
2368
|
-
|
|
2369
|
-
PIDS=()
|
|
2370
|
-
for D in "${DEPLOYMENTS[@]}"; do
|
|
2371
|
-
kubectl -n ${NAMESPACE} rollout status deployment ${D} --timeout=${TIMEOUT} &
|
|
2372
|
-
PIDS+=($!)
|
|
2373
|
-
done
|
|
2374
|
-
|
|
2375
|
-
for P in ${PIDS[@]}; do
|
|
2376
|
-
wait $P
|
|
2377
|
-
|
|
2378
|
-
if [ $? -ne 0 ]; then
|
|
2379
|
-
echo "at least one deployment failed or timed out (after $TIMEOUT)"
|
|
2380
|
-
exit 1
|
|
2381
|
-
fi
|
|
2382
|
-
done
|
|
2383
|
-
|
|
2384
|
-
}
|
|
2385
|
-
|
|
2386
|
-
print_usage() {
|
|
2387
|
-
echo "Usage:"
|
|
2388
|
-
echo "deploy_chart \\"
|
|
2389
|
-
echo " --path_configs <path to .yaml namespaces and secret config files>"
|
|
2390
|
-
echo " --path_chart <path to Helm Chart>"
|
|
2391
|
-
echo " --application <application name used by Helm>"
|
|
2392
|
-
echo " --environment <infrastructure environment>"
|
|
2393
|
-
echo " --namespace <namespace>"
|
|
2394
|
-
echo " --helm-extra-args <extra args to pass to helm, ex: --set my.value=42 --set your.setting=on>"
|
|
2395
|
-
echo ""
|
|
2396
|
-
}
|
|
2397
|
-
|
|
2398
|
-
check_config_file() {
|
|
2399
|
-
local filename=$1
|
|
2400
|
-
if [ ! -f ${filename} ]; then
|
|
2401
|
-
echo "Missing $filename configuration file"
|
|
2402
|
-
print_usage
|
|
2403
|
-
exit 1
|
|
2404
|
-
fi
|
|
2405
|
-
}
|
|
2406
|
-
|
|
2407
|
-
notify_new_deployment() {
|
|
2408
|
-
jq --version || (apt update && apt install -y jq)
|
|
2409
|
-
|
|
2410
|
-
CHAT_URL=${1:-$DEFAULT_CHAT_URL}
|
|
2411
|
-
|
|
2412
|
-
STATUS=$(echo $CI_JOB_STATUS | tr '[:lower:]' '[:upper:]' )
|
|
2413
|
-
ENV_NAME=$(echo $ENVIRONMENT | tr '[:lower:]' '[:upper:]' )
|
|
2414
|
-
|
|
2415
|
-
JOB_LINK="<$CI_JOB_URL| $CI_JOB_NAME $CI_JOB_ID>"
|
|
2416
|
-
|
|
2417
|
-
DESCRIPTION="
|
|
2418
|
-
$STATUS : Deployment for $CI_PROJECT_NAME on $ENV_NAME
|
|
2419
|
-
$JOB_LINK
|
|
2420
|
-
$CI_COMMIT_TITLE
|
|
2421
|
-
"
|
|
2422
|
-
|
|
2423
|
-
JSON_MESSAGE=$(jq -n --arg text "$DESCRIPTION" '{text: $text }')
|
|
2424
|
-
curl -X POST $CHAT_URL \
|
|
2425
|
-
--header "Content-Type: application/json" \
|
|
2426
|
-
--data "$JSON_MESSAGE"
|
|
2427
|
-
}
|
|
2428
|
-
notify_new_version() {
|
|
2429
|
-
|
|
2430
|
-
! test -z $CI_COMMIT_TAG || exit 0
|
|
2431
|
-
|
|
2432
|
-
jq --version || (apt update && apt install -y jq)
|
|
2433
|
-
|
|
2434
|
-
KIND=$1
|
|
2435
|
-
CHAT_URL=${2:-$DEFAULT_CHAT_URL}
|
|
2436
|
-
|
|
2437
|
-
STATUS=$(echo $CI_JOB_STATUS | tr '[:lower:]' '[:upper:]' )
|
|
2438
|
-
ENV_NAME=$(echo $ENVIRONMENT | tr '[:lower:]' '[:upper:]' )
|
|
2439
|
-
TITLE="$ENV_NAME *$STATUS* $KIND for version *$CI_COMMIT_TAG* of *$CI_PROJECT_NAME* "
|
|
2440
|
-
|
|
2441
|
-
RELEASE_URL="https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/releases/$CI_COMMIT_TAG"
|
|
2442
|
-
|
|
2443
|
-
NOTES=$(curl --header "PRIVATE-TOKEN: $GITLAB_TOKEN" $RELEASE_URL |
|
|
2444
|
-
jq .description |
|
|
2445
|
-
sed -e 's/^"//' -e 's/"$//' |
|
|
2446
|
-
sed -E 's/\[([^]]+)\]\(([^)]+)\)/<\2|\1>/g' |
|
|
2447
|
-
sed -E 's/\\n/\'$'\n/g')
|
|
2448
|
-
|
|
2449
|
-
JOB_LINK="<$CI_JOB_URL| $CI_JOB_NAME $CI_JOB_ID>"
|
|
2450
|
-
|
|
2451
|
-
DESCRIPTION="
|
|
2452
|
-
$TITLE
|
|
2453
|
-
$JOB_LINK
|
|
2454
|
-
$NOTES
|
|
2455
|
-
"
|
|
2456
|
-
|
|
2457
|
-
JSON_MESSAGE=$(jq -n --arg text "$DESCRIPTION" '{text: $text }')
|
|
2458
|
-
curl -X POST $CHAT_URL \
|
|
2459
|
-
--header "Content-Type: application/json" \
|
|
2460
|
-
--data "$JSON_MESSAGE"
|
|
2461
|
-
}
|
|
2462
|
-
#!/usr/bin/env bash
|
|
2463
|
-
|
|
2464
|
-
skip_sbt_compile_cache() {
|
|
2465
|
-
COMPARED_BRANCH="${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-$CI_DEFAULT_BRANCH}"
|
|
2466
|
-
echo "branch to compare to: $COMPARED_BRANCH"
|
|
2467
|
-
git fetch origin $COMPARED_BRANCH
|
|
2468
|
-
echo "fetched $COMPARED_BRANCH"
|
|
2469
|
-
[[ "$CI_COMMIT_REF_NAME" =~ ^(master|develop)$ || $(git diff origin/$COMPARED_BRANCH --exit-code -- project) ]]
|
|
2470
|
-
}
|
|
2471
|
-
#!/usr/bin/env bash
|
|
2472
|
-
|
|
2473
|
-
# in case of trouble with functions for update history during import
|
|
2474
|
-
# https://stackoverflow.com/questions/56729192/pg-restore-fails-when-trying-to-create-function-referencing-table-that-does-not
|
|
2475
|
-
|
|
2476
|
-
# example: clone_databases --source_env testing --destination_env recette --services "order,notification,parcel,ikea"
|
|
2477
|
-
clone_databases() {
|
|
2478
|
-
export USERNAME="database-cloner"
|
|
2479
|
-
|
|
2480
|
-
set -e
|
|
2481
|
-
|
|
2482
|
-
extract_args 3 source_env destination_env services $*
|
|
2483
|
-
|
|
2484
|
-
dump_databases "$source_env" "$services"
|
|
2485
|
-
import_databases "$destination_env" "$services"
|
|
2486
|
-
}
|
|
2487
|
-
|
|
2488
|
-
dump_databases() {
|
|
2489
|
-
local env="$1"
|
|
2490
|
-
local services=$(echo -n "$2" | tr ',' '\n')
|
|
2491
|
-
|
|
2492
|
-
database_k8s_output_dump_path="/tmp/database_k8s_output_dump"
|
|
2493
|
-
|
|
2494
|
-
configure_kubectl_for "$env"
|
|
2495
|
-
set +e
|
|
2496
|
-
database_k8s "$env" > "$database_k8s_output_dump_path"
|
|
2497
|
-
set -e
|
|
2498
|
-
|
|
2499
|
-
source_pg_local_port=$(extract_pg_local_port "$database_k8s_output_dump_path")
|
|
2500
|
-
|
|
2501
|
-
for service in $services
|
|
2502
|
-
do
|
|
2503
|
-
service_path="/tmp/$service"
|
|
2504
|
-
|
|
2505
|
-
set +e
|
|
2506
|
-
git clone "git@gitlab.com:colisweb/back/$service.git" "$service_path"
|
|
2507
|
-
set -e
|
|
2508
|
-
|
|
2509
|
-
if cd "$service_path"; then
|
|
2510
|
-
echo "dump the database for service $service.."
|
|
2511
|
-
|
|
2512
|
-
git secret reveal -f
|
|
2513
|
-
|
|
2514
|
-
PG_YAML_PATH=".${service}config.postgres"
|
|
2515
|
-
|
|
2516
|
-
SOURCE_DB_DATABASE=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.database")
|
|
2517
|
-
SOURCE_DB_USER=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.user")
|
|
2518
|
-
SOURCE_DB_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.password")
|
|
2519
|
-
|
|
2520
|
-
export PGPASSWORD="$SOURCE_DB_PASSWORD"
|
|
2521
|
-
|
|
2522
|
-
DUMP_PATH="/tmp/db_dump_${service}.sql"
|
|
2523
|
-
pg_dump --no-owner -h localhost -p "$source_pg_local_port" -U "$SOURCE_DB_USER" "$SOURCE_DB_DATABASE" > "$DUMP_PATH"
|
|
2524
|
-
|
|
2525
|
-
cd ..
|
|
2526
|
-
rm -rf "$service_path"
|
|
2527
|
-
else
|
|
2528
|
-
echo "WARN: failed to clone $service - skipping"
|
|
2529
|
-
fi
|
|
2530
|
-
done
|
|
2531
|
-
}
|
|
2532
|
-
|
|
2533
|
-
import_databases() {
|
|
2534
|
-
local env="$1"
|
|
2535
|
-
local services=$(echo -n "$2" | tr ',' '\n')
|
|
2536
|
-
|
|
2537
|
-
database_k8s_output_import_path="/tmp/database_k8s_output_import"
|
|
2538
|
-
|
|
2539
|
-
configure_kubectl_for "$env"
|
|
2540
|
-
set +e
|
|
2541
|
-
database_k8s "$env" > "$database_k8s_output_import_path"
|
|
2542
|
-
set -e
|
|
2543
|
-
|
|
2544
|
-
destination_pg_local_port=$(extract_pg_local_port "$database_k8s_output_import_path")
|
|
2545
|
-
|
|
2546
|
-
for service in $services
|
|
2547
|
-
do
|
|
2548
|
-
service_path="/tmp/$service"
|
|
2549
|
-
|
|
2550
|
-
set +e
|
|
2551
|
-
git clone "git@gitlab.com:colisweb/back/$service.git" "$service_path"
|
|
2552
|
-
set -e
|
|
2553
|
-
|
|
2554
|
-
if cd "$service_path"; then
|
|
2555
|
-
echo "create and import database for $service.."
|
|
2556
|
-
|
|
2557
|
-
git secret reveal -f
|
|
2558
|
-
|
|
2559
|
-
PG_YAML_PATH=".${service}config.postgres"
|
|
2560
|
-
|
|
2561
|
-
DB_PORT="5432"
|
|
2562
|
-
DB_HOST=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.host")
|
|
2563
|
-
DB_INIT_USERNAME=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.initUsername")
|
|
2564
|
-
DB_INIT_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.initPassword")
|
|
2565
|
-
DB_DATABASE=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.database")
|
|
2566
|
-
DB_USER=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.user")
|
|
2567
|
-
DB_PASSWORD=$(extract_yaml_config_variable --environment "$env" --configsPath ./deploy --variable "${PG_YAML_PATH}.password")
|
|
2568
|
-
|
|
2569
|
-
kube_init_service_database \
|
|
2570
|
-
--namespace ${env} \
|
|
2571
|
-
--service ${service} \
|
|
2572
|
-
--db_host ${DB_HOST} \
|
|
2573
|
-
--db_port ${DB_PORT} \
|
|
2574
|
-
--db_init_username ${DB_INIT_USERNAME} \
|
|
2575
|
-
--db_init_password ${DB_INIT_PASSWORD} \
|
|
2576
|
-
--db_database ${DB_DATABASE} \
|
|
2577
|
-
--db_username ${DB_USER} \
|
|
2578
|
-
--db_password ${DB_PASSWORD}
|
|
2579
|
-
|
|
2580
|
-
echo "WARN: A complete clean of $DB_DATABASE on $DB_HOST will be operated"
|
|
2581
|
-
read -rsn1 -p"Press any key to continue";echo
|
|
2582
|
-
flyway_clean "$DB_HOST" "$DB_PORT" "$DB_DATABASE" "$DB_USER" "$DB_PASSWORD"
|
|
2583
|
-
|
|
2584
|
-
DUMP_PATH="/tmp/db_dump_${service}.sql"
|
|
2585
|
-
export PGPASSWORD="$DB_PASSWORD"
|
|
2586
|
-
set +e
|
|
2587
|
-
psql "postgres://$DB_USER@127.0.0.1:$destination_pg_local_port" -p "$DB_DATABASE" -f "$DUMP_PATH"
|
|
2588
|
-
set -e
|
|
2589
|
-
|
|
2590
|
-
cd ..
|
|
2591
|
-
rm -rf "$service_path"
|
|
2592
|
-
else
|
|
2593
|
-
echo "WARN: failed to clone $service - skipping"
|
|
2594
|
-
fi
|
|
2595
|
-
done
|
|
2596
|
-
}
|
|
2597
|
-
|
|
2598
|
-
extract_pg_local_port() {
|
|
2599
|
-
cat "$1" | grep 'postgres@127.0.0.1:' | sed 's/.*postgres@127.0.0.1:\(.*[0-9]\).*/\1/g'
|
|
2600
|
-
}
|
|
2601
|
-
#!/usr/bin/env bash
|
|
2602
|
-
|
|
2603
|
-
emit_datadog_deploy_event() {
|
|
2604
|
-
extract_args 3 environment service version $*
|
|
2605
|
-
check_env_vars 1 "DD_API_KEY"
|
|
2606
|
-
|
|
2607
|
-
response=$(
|
|
2608
|
-
curl -X POST -H "Content-type: application/json" \
|
|
2609
|
-
-d '{
|
|
2610
|
-
"title": "deploying '"$service"' to '"$environment"'",
|
|
2611
|
-
"text": "deploying '"$service"' version '"$version"' to '"$environment"'",
|
|
2612
|
-
"priority": "normal",
|
|
2613
|
-
"tags": ["service:'"$service"' ", "env:'"$environment"'" ,"action:'"deployment"'"] ,
|
|
2614
|
-
|
|
2615
|
-
"alert_type": "Info"
|
|
2616
|
-
}' \
|
|
2617
|
-
"https://api.datadoghq.com/api/v1/events?api_key=$DD_API_KEY"
|
|
2618
|
-
)
|
|
2619
|
-
|
|
2620
|
-
#echo $response
|
|
2621
|
-
EventID=$(echo $response | jq ".event.id")
|
|
2622
|
-
url=$(echo $response | jq ".event.url")
|
|
2623
|
-
|
|
2624
|
-
if [[ $EventID -ne 0 ]]; then
|
|
2625
|
-
echo "event successfully created check in datadog UI : $url"
|
|
2626
|
-
else
|
|
2627
|
-
echo " failed to create event "
|
|
2628
|
-
exit 1
|
|
2629
|
-
fi
|
|
2630
|
-
}
|
|
2631
|
-
|
|
2632
|
-
#!/usr/bin/env bash
|
|
2633
|
-
|
|
2634
|
-
# DEPRECATED
|
|
2635
|
-
emit_datadog_error_events() {
|
|
2636
|
-
set -e
|
|
2637
|
-
extract_args 4 title text priority environment $*
|
|
2638
|
-
check_env_vars 1 "DD_API_KEY"
|
|
2639
|
-
|
|
2640
|
-
curl -X POST -H "Content-type: application/json" \
|
|
2641
|
-
-d '{
|
|
2642
|
-
"title": "'"$title"'",
|
|
2643
|
-
"text": "'"$text"'",
|
|
2644
|
-
"priority": "'"$priority"'",
|
|
2645
|
-
"tags": ["environment:'"$environment"'"],
|
|
2646
|
-
"alert_type": "Error"
|
|
2647
|
-
}' \
|
|
2648
|
-
"https://api.datadoghq.com/api/v1/events?api_key=$DD_API_KEY"
|
|
2649
|
-
}
|
|
2650
|
-
|
|
2651
|
-
#!/usr/bin/env bash
|
|
2652
|
-
terraform_init() {
|
|
2653
|
-
SECTION=$1
|
|
2654
|
-
ENV=$2
|
|
2655
|
-
cd $SECTION
|
|
2656
|
-
terraform init -input=false
|
|
2657
|
-
terraform workspace select $ENV || terraform workspace new $ENV
|
|
2658
|
-
}
|