@depup/artillery 2.0.30-depup.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/README.md +63 -0
  2. package/bin/run +29 -0
  3. package/bin/run.cmd +3 -0
  4. package/changes.json +138 -0
  5. package/console-reporter.js +1 -0
  6. package/lib/artillery-global.js +33 -0
  7. package/lib/cli/banner.js +8 -0
  8. package/lib/cli/common-flags.js +80 -0
  9. package/lib/cli/hooks/version.js +20 -0
  10. package/lib/cmds/dino.js +109 -0
  11. package/lib/cmds/quick.js +122 -0
  12. package/lib/cmds/report.js +34 -0
  13. package/lib/cmds/run-aci.js +91 -0
  14. package/lib/cmds/run-fargate.js +192 -0
  15. package/lib/cmds/run-lambda.js +96 -0
  16. package/lib/cmds/run.js +671 -0
  17. package/lib/console-capture.js +92 -0
  18. package/lib/console-reporter.js +438 -0
  19. package/lib/create-bom/built-in-plugins.js +12 -0
  20. package/lib/create-bom/create-bom.js +301 -0
  21. package/lib/dispatcher.js +9 -0
  22. package/lib/dist.js +222 -0
  23. package/lib/index.js +5 -0
  24. package/lib/launch-platform.js +439 -0
  25. package/lib/load-plugins.js +113 -0
  26. package/lib/platform/aws/aws-cloudwatch.js +106 -0
  27. package/lib/platform/aws/aws-create-sqs-queue.js +58 -0
  28. package/lib/platform/aws/aws-ensure-s3-bucket-exists.js +78 -0
  29. package/lib/platform/aws/aws-get-account-id.js +26 -0
  30. package/lib/platform/aws/aws-get-bucket-region.js +18 -0
  31. package/lib/platform/aws/aws-get-credentials.js +28 -0
  32. package/lib/platform/aws/aws-get-default-region.js +26 -0
  33. package/lib/platform/aws/aws-whoami.js +15 -0
  34. package/lib/platform/aws/constants.js +7 -0
  35. package/lib/platform/aws/iam-cf-templates/aws-iam-fargate-cf-template.yml +219 -0
  36. package/lib/platform/aws/iam-cf-templates/aws-iam-lambda-cf-template.yml +125 -0
  37. package/lib/platform/aws/iam-cf-templates/gh-oidc-fargate.yml +241 -0
  38. package/lib/platform/aws/iam-cf-templates/gh-oidc-lambda.yml +153 -0
  39. package/lib/platform/aws-ecs/ecs.js +247 -0
  40. package/lib/platform/aws-ecs/legacy/aws-util.js +134 -0
  41. package/lib/platform/aws-ecs/legacy/bom.js +528 -0
  42. package/lib/platform/aws-ecs/legacy/constants.js +27 -0
  43. package/lib/platform/aws-ecs/legacy/create-s3-client.js +24 -0
  44. package/lib/platform/aws-ecs/legacy/create-test.js +247 -0
  45. package/lib/platform/aws-ecs/legacy/errors.js +34 -0
  46. package/lib/platform/aws-ecs/legacy/find-public-subnets.js +149 -0
  47. package/lib/platform/aws-ecs/legacy/plugins/artillery-plugin-inspect-script/index.js +27 -0
  48. package/lib/platform/aws-ecs/legacy/plugins/artillery-plugin-sqs-reporter/azure-aqs.js +80 -0
  49. package/lib/platform/aws-ecs/legacy/plugins/artillery-plugin-sqs-reporter/index.js +202 -0
  50. package/lib/platform/aws-ecs/legacy/plugins.js +16 -0
  51. package/lib/platform/aws-ecs/legacy/run-cluster.js +1994 -0
  52. package/lib/platform/aws-ecs/legacy/sqs-reporter.js +401 -0
  53. package/lib/platform/aws-ecs/legacy/tags.js +22 -0
  54. package/lib/platform/aws-ecs/legacy/test-run-status.js +9 -0
  55. package/lib/platform/aws-ecs/legacy/time.js +67 -0
  56. package/lib/platform/aws-ecs/legacy/util.js +97 -0
  57. package/lib/platform/aws-ecs/worker/Dockerfile +64 -0
  58. package/lib/platform/aws-ecs/worker/helpers.sh +80 -0
  59. package/lib/platform/aws-ecs/worker/loadgen-worker +656 -0
  60. package/lib/platform/aws-lambda/dependencies.js +130 -0
  61. package/lib/platform/aws-lambda/index.js +734 -0
  62. package/lib/platform/aws-lambda/lambda-handler/a9-handler-dependencies.js +73 -0
  63. package/lib/platform/aws-lambda/lambda-handler/a9-handler-helpers.js +43 -0
  64. package/lib/platform/aws-lambda/lambda-handler/a9-handler-index.js +235 -0
  65. package/lib/platform/aws-lambda/lambda-handler/package.json +15 -0
  66. package/lib/platform/aws-lambda/prices.js +29 -0
  67. package/lib/platform/az/aci.js +694 -0
  68. package/lib/platform/az/aqs-queue-consumer.js +88 -0
  69. package/lib/platform/az/regions.js +52 -0
  70. package/lib/platform/cloud/api.js +72 -0
  71. package/lib/platform/cloud/cloud.js +448 -0
  72. package/lib/platform/cloud/http-client.js +19 -0
  73. package/lib/platform/local/artillery-worker-local.js +154 -0
  74. package/lib/platform/local/index.js +174 -0
  75. package/lib/platform/local/worker.js +261 -0
  76. package/lib/platform/worker-states.js +13 -0
  77. package/lib/queue-consumer/index.js +56 -0
  78. package/lib/stash.js +41 -0
  79. package/lib/telemetry.js +78 -0
  80. package/lib/util/await-on-ee.js +24 -0
  81. package/lib/util/generate-id.js +9 -0
  82. package/lib/util/parse-tag-string.js +21 -0
  83. package/lib/util/prepare-test-execution-plan.js +216 -0
  84. package/lib/util/sleep.js +7 -0
  85. package/lib/util/validate-script.js +132 -0
  86. package/lib/util.js +294 -0
  87. package/lib/utils-config.js +31 -0
  88. package/package.json +323 -0
  89. package/types.d.ts +317 -0
  90. package/util.js +1 -0
@@ -0,0 +1,656 @@
1
+ #!/usr/bin/env bash
2
+
3
+ set -euo pipefail
4
+ IFS=$'\n\t'
5
+
6
+ DEBUGX=${DEBUGX:-""}
7
+ DEBUG=${DEBUG:-""}
8
+
9
+ if [[ -n $DEBUGX ]] ; then
10
+ set -x
11
+ fi
12
+
13
+ declare -r DIR=$(cd "$(dirname "$0")" && pwd)
14
+
15
+ source "$DIR/helpers.sh"
16
+
17
+ # shellcheck disable=2155
18
+ # declare -r DIR=$(cd "$(dirname "$0")" && pwd)
19
+
20
+ declare -r ERR_ARGS=10
21
+ declare -r ERR_TEST_DIR_EMPTY=3
22
+ declare -r ERR_SIGNAL_SYNC=4
23
+ declare -r ERR_GO_TIMEOUT=5
24
+ declare -r ERR_CLI_ERROR=6
25
+ declare -r ERR_INTERRUPTED=7
26
+ declare -r ERR_UNKNOWN_PLATFORM=8
27
+ declare -r ERR_DEP=9
28
+ declare -r ERR_DEP_INSTALL=10 # npm install / yarn install failed
29
+ declare -r ERR_NO_LICENSE=11
30
+ declare -r ERR_CLI_ERROR_EXPECT=21
31
+
32
+ ERR_EXTRA_INFO=""
33
+
34
+ # shellcheck disable=2155
35
+ declare -r TEST_DATA="$(pwd)/test_data"
36
+
37
+ WAIT_TIMEOUT=${WAIT_TIMEOUT:-600}
38
+
39
+ declare -t EXIT_CODE=0
40
+
41
+ CLI_RUNNING="no"
42
+ CLI_STATUS=
43
+ CLI_PID=
44
+
45
+ CLEANING_UP="no"
46
+
47
+ #mode="${MODE:-run}" # "run" or "bootstrap"
48
+ is_azure=
49
+ azure_storage_container_name=
50
+
51
+ s3_test_data_path=
52
+ cli_args=()
53
+ cli_args_encoded=
54
+ aws_region=
55
+ sqs_queue_url=
56
+ test_run_id=
57
+ s3_run_data_base_path=
58
+ s3_run_data_path=
59
+
60
+ # This is set once we know if we're on Azure or AWS
61
+ worker_id=
62
+ is_leader=${IS_LEADER:-false} # true or false
63
+
64
+ declare -r DEPENDENCIES=(jq aws az pwgen node npm yarn tree)
65
+
66
+ send_message () {
67
+ local body="$1" # body of the message, a string
68
+ local type="$2" # type of the message: debug, leader, ensure
69
+
70
+ if [[ "$is_azure" = "yes" ]] ; then
71
+ send_message_aqs "$body" "$type"
72
+ else
73
+ send_message_sqs "$body" "$type"
74
+ fi
75
+ }
76
+
77
+ send_message_aqs () {
78
+ set +e
79
+ set +o pipefail
80
+
81
+ local body="$1"
82
+ local type="$2"
83
+
84
+ local aqs_message_payload="{\"msg\":\"$body\",\"type\":\"$type\"}"
85
+ local aqs_message_attributes="{\"testId\": \"${test_run_id}\", \"workerId\": \"${worker_id}\"}"
86
+
87
+ >/dev/null az storage message put \
88
+ --content "{ \"payload\": $aqs_message_payload, \"attributes\": $aqs_message_attributes }" \
89
+ --queue-name "$AQS_QUEUE_NAME" \
90
+ --account-name "$AZURE_STORAGE_ACCOUNT" || true
91
+
92
+ set -e
93
+ set -o pipefail
94
+ }
95
+
96
+ send_message_sqs () {
97
+ set +e
98
+ set +o pipefail
99
+
100
+ local body="$1"
101
+ local type="$2"
102
+
103
+ local sqs_message_body="{\"msg\":\"$body\",\"type\":\"$type\"}"
104
+ local sqs_message_attributes="{\"testId\": {\"StringValue\": \"${test_run_id}\", \"DataType\": \"String\"}, \"workerId\": {\"StringValue\": \"${worker_id}\", \"DataType\": \"String\"}}"
105
+
106
+ >/dev/null aws sqs send-message \
107
+ --queue-url "${sqs_queue_url}" \
108
+ --message-body "$sqs_message_body" \
109
+ --message-attributes "$sqs_message_attributes" \
110
+ --message-group-id "${test_run_id}" \
111
+ --message-deduplication-id "$(pwgen -A 32 1)" \
112
+ --region "$aws_region" || true
113
+
114
+ set -e
115
+ set -o pipefail
116
+ }
117
+
118
+ send_event () {
119
+ set +e
120
+ set +o pipefail
121
+
122
+ local payload="$1"
123
+
124
+ if [[ "$is_azure" = "yes" ]] ; then
125
+ send_event_aqs "$payload"
126
+ else
127
+ send_event_sqs "$payload"
128
+ fi
129
+
130
+ set -e
131
+ set -o pipefail
132
+ }
133
+
134
+ send_event_sqs () {
135
+ local payload="$1"
136
+
137
+ local sqs_message_attributes="{\"testId\": {\"StringValue\": \"${test_run_id}\", \"DataType\": \"String\"}, \"workerId\": {\"StringValue\": \"${worker_id}\", \"DataType\": \"String\"}}"
138
+
139
+ >/dev/null aws sqs send-message \
140
+ --queue-url "${sqs_queue_url}" \
141
+ --message-body "$payload" \
142
+ --message-attributes "$sqs_message_attributes" \
143
+ --message-group-id "${test_run_id}" \
144
+ --message-deduplication-id "$(pwgen -A 32 1)" \
145
+ --region "$aws_region"
146
+ }
147
+
148
+ send_event_aqs () {
149
+ local payload="$1"
150
+
151
+ local aqs_message_attributes="{\"testId\": \"${test_run_id}\", \"workerId\": \"${worker_id}\"}"
152
+
153
+ >/dev/null az storage message put \
154
+ --content "{ \"payload\": $payload, \"attributes\": $aqs_message_attributes }" \
155
+ --queue-name "$AQS_QUEUE_NAME" \
156
+ --account-name "$AZURE_STORAGE_ACCOUNT"
157
+ }
158
+
159
+
160
+
161
+ install_npm_dependencies () {
162
+ if [[ $(jq -r .modules "$METADATA_FILE") != "null" ]] ; then
163
+ echo "Installing required npm dependencies"
164
+ for dep in $(jq -r '.modules[]' "$METADATA_FILE") ; do
165
+ echo "installing $dep"
166
+ npm install --quiet "$dep"
167
+ done
168
+ else
169
+ echo "No extra npm modules to install"
170
+ fi
171
+
172
+ if [[ -f "$TEST_DATA/package.json" ]] ; then
173
+ echo "Installing dependencies in package.json"
174
+ if [[ -f "$TEST_DATA/yarn.lock" ]] ; then
175
+ # TODO: Test yarn's exit code
176
+ yarn install
177
+ else
178
+ set +e
179
+ npm install --loglevel=silent
180
+
181
+ if [[ -f "npm-debug.log" ]] ; then
182
+ cat npm-debug.log
183
+ EXIT_CODE="$ERR_DEP_INSTALL"
184
+ exit
185
+ else
186
+ echo "npm install completed"
187
+ fi
188
+ set -e
189
+ fi
190
+ else
191
+ npm init -y --quiet
192
+ fi
193
+ }
194
+
195
+ check_dependencies () {
196
+ for dep in "${DEPENDENCIES[@]}" ; do
197
+ set +e
198
+ if ! command -v "$dep" > /dev/null ; then
199
+ echo "Error: could not find $dep in \$PATH. Please install $dep."
200
+ exit $ERR_DEP
201
+ fi
202
+ set -e
203
+ done
204
+ }
205
+
206
+ sync_test_data () {
207
+ mkdir "$TEST_DATA" || true
208
+ pushd "$TEST_DATA" >/dev/null
209
+
210
+ echo "is_azure: $is_azure"
211
+
212
+ if [[ "$is_azure" = "yes" ]] ; then
213
+ sync_test_data_azure
214
+ else
215
+ sync_test_data_s3
216
+ fi
217
+
218
+ debug "$(pwd)"
219
+ debug "$(ls -a)"
220
+ }
221
+
222
+ sync_test_data_azure () {
223
+ # TODO: Exclude node_modules_stream.zip
224
+ # This recreates the directory structure in the container, i.e. we'll have tests/$test_run_id here with all files under it
225
+ # So we need to move them all up two levels to the current directory
226
+ az storage blob download-batch -d . --account-name "$AZURE_STORAGE_ACCOUNT" -s "$azure_storage_container_name" --pattern "tests/$test_run_id/*"
227
+ local tmpdir="$(mktemp -d)"
228
+ set +e
229
+ mv tests/$test_run_id/{.,}* $tmpdir
230
+ rm -rf tests/$test_run_id
231
+ mv $tmpdir/{.,}* .
232
+ set -e
233
+ }
234
+
235
+ sync_test_data_s3 () {
236
+ aws s3 sync --exclude node_modules_stream.zip "$s3_test_data_path" . >/dev/null
237
+ }
238
+
239
+ check_test_data () {
240
+ file_count=$(find . -maxdepth 1 -name "*" | grep -v '^.$' -c)
241
+ if [[ ! $file_count -gt 0 ]]; then
242
+ echo "$TEST_DATA seems to be empty"
243
+ EXIT_CODE=$ERR_TEST_DIR_EMPTY
244
+ exit
245
+ fi
246
+ }
247
+
248
+ install_dependencies () {
249
+ pushd "$TEST_DATA" >/dev/null
250
+
251
+ local METADATA_FILE="metadata.json"
252
+
253
+ debug "$(cat $METADATA_FILE || true)"
254
+
255
+ # Needed to install all packages to the dir of the test files.
256
+ export NODE_PATH="$TEST_DATA:${NODE_PATH:-""}"
257
+
258
+ generate_npmrc >> ~/.npmrc
259
+
260
+ # Leader: pre-install modules for everyone else
261
+ if [[ "$is_leader" = "true" ]] ; then
262
+ send_message "leader npm pack start `date +%s`" "debug"
263
+ install_npm_dependencies
264
+
265
+ if [[ ! -d "node_modules" ]] ; then
266
+ mkdir node_modules
267
+ touch node_modules/.artillery
268
+ fi
269
+
270
+ zip -r -q node_modules.zip node_modules # | aws s3 cp - "$s3_test_data_path/node_modules_stream.zip"
271
+ echo "Modules pre-packaged"
272
+
273
+ # aws s3 mv "$s3_test_data_path/node_modules_stream.zip" "$s3_test_data_path/node_modules.zip"
274
+
275
+ if [[ "$is_azure" = "yes" ]] ; then
276
+ az storage blob upload --overwrite --account-name "$AZURE_STORAGE_ACCOUNT" --container-name "$azure_storage_container_name" --file node_modules.zip --name "tests/$test_run_id/node_modules.zip"
277
+ else
278
+ aws s3 cp node_modules.zip "$s3_test_data_path/node_modules.zip"
279
+ fi
280
+
281
+ send_message "leader npm prepack end `date +%s`" "debug"
282
+ send_message "prepack_end" "leader"
283
+ else
284
+ # wait until node_modules.zip is available and unzip, or timeout
285
+ # TODO: use aws s3api wait object-exists with a custom timeout
286
+ send_message "follower npm prepack wait start `date +%s`" "debug"
287
+
288
+ if [[ "$is_azure" = "yes" ]] ; then
289
+ wait_for_go "tests/$test_run_id/node_modules.zip"
290
+ else
291
+ wait_for_go "$s3_test_data_path/node_modules.zip"
292
+ fi
293
+ unzip -o -q node_modules.zip
294
+ send_message "follower npm prepack wait end `date +%s`" "debug"
295
+ fi
296
+
297
+ tree -I node_modules
298
+ }
299
+
300
+ signal_ready () {
301
+ local synced_filename="synced_${worker_id}.json"
302
+ echo "{ \"worker_id\": \"${worker_id}\" }" >> "$synced_filename"
303
+ local synced_dest=
304
+ local cp_status=
305
+
306
+ if [[ "$is_azure" = "yes" ]] ; then
307
+
308
+ send_event "{\"event\": \"workerReady\"}"
309
+
310
+ synced_dest="${azure_storage_container_name}/$synced_filename"
311
+ az storage blob upload --overwrite --account-name "$AZURE_STORAGE_ACCOUNT" --container-name "$azure_storage_container_name" --file "$synced_filename" --name "tests/$test_run_id/$synced_filename"
312
+ cp_status=$?
313
+ else
314
+ synced_dest="${s3_run_data_path}/${synced_filename}"
315
+ aws s3 cp "$synced_filename" "$synced_dest" 1>/dev/null 2>/dev/null
316
+ cp_status=$?
317
+ fi
318
+
319
+ if [[ $cp_status -ne 0 ]]; then
320
+ echo "could not send synced signal (to: $synced_dest)"
321
+ EXIT_CODE=$ERR_SIGNAL_SYNC
322
+ exit
323
+ else
324
+ echo "Worker $worker_id synced up & ready"
325
+ fi
326
+ }
327
+
328
+ wait_for_go () {
329
+ local SLEEP=2
330
+ local slept=0
331
+ local objpath=
332
+
333
+ if [[ "$is_azure" = "yes" ]] ; then
334
+ objpath="${1:-tests/$test_run_id/go.json}"
335
+ else
336
+ objpath="${1:-$s3_run_data_path/go.json}"
337
+ fi
338
+
339
+ echo "Waiting... ($objpath)"
340
+
341
+ while true ; do
342
+ set +e
343
+
344
+ if [[ "$is_azure" = "yes" ]] ; then
345
+ az storage blob download --account-name "$AZURE_STORAGE_ACCOUNT" --container-name "$azure_storage_container_name" --name "$objpath" --file "$(basename $objpath)" 1>/dev/null 2>/dev/null
346
+ else
347
+ aws s3 cp "$objpath" . 1>/dev/null 2>/dev/null
348
+ fi
349
+
350
+
351
+ local cp_exit_code=$?
352
+ set -e
353
+
354
+ if [[ $cp_exit_code -eq 0 ]]; then
355
+ break
356
+ else
357
+ if [[ $slept -ge $WAIT_TIMEOUT ]]; then
358
+ echo "Timed out waiting for go signal"
359
+ EXIT_CODE=$ERR_GO_TIMEOUT
360
+ exit
361
+ else
362
+ echo -n "."
363
+ sleep $SLEEP
364
+ (( slept = slept + SLEEP ))
365
+ fi
366
+ fi
367
+ done
368
+ }
369
+
370
+ send_no_license_message () {
371
+ set +e
372
+
373
+ az storage message put \
374
+ --content "{\"payload\":{\"event\":\"workerError\",\"reason\":\"License not found - https://docs.art/az/license\", \"exitCode\":$ERR_NO_LICENSE},\"attributes\":{\"testId\": \"${test_run_id}\", \"workerId\": \"${worker_id}\"}}" \
375
+ --queue-name "$AQS_QUEUE_NAME" \
376
+ --account-name "$AZURE_STORAGE_ACCOUNT" || true
377
+
378
+ set -e
379
+ }
380
+
381
+ decode_cli_args () {
382
+ debug "encoded args $cli_args_encoded"
383
+ local decoded_args=
384
+ decoded_args=$(echo "$cli_args_encoded" | base64d)
385
+ debug "decoded: $decoded_args"
386
+
387
+ for an_arg in $(echo "$cli_args_encoded" | base64d | jq -r '.[] | @base64') ; do
388
+ local decoded_arg=
389
+ decoded_arg="$(printf -- "%s" "$an_arg" | base64d)"
390
+ debug "decoded CLI arg: %s" "$decoded_arg"
391
+ cli_args+=("$decoded_arg")
392
+ done
393
+ }
394
+
395
+ run_a9 () {
396
+ # NOTE: node_modules is required for plugins to be loaded
397
+ export NODE_PATH="$TEST_DATA/node_modules:${NODE_PATH:-""}"
398
+ export DEBUG=${DEBUG:-"debug:mode:off"} # can set via --launch-config if needed
399
+
400
+ export ARTILLERY_PLUGIN_PATH=${ARTILLERY_PLUGIN_PATH:-""}:/artillery/packages/artillery/lib/platform/aws-ecs/legacy/plugins
401
+
402
+ export ARTILLERY_PLUGINS="{\"sqs-reporter\":{\"region\": \"${aws_region}\"},\"inspect-script\":{}}"
403
+ export SQS_TAGS="[{\"key\": \"testId\", \"value\": \"${test_run_id}\"},{\"key\":\"workerId\", \"value\":\"${worker_id}\"}]"
404
+
405
+ if [[ "$is_azure" = "yes" ]] ; then
406
+ export AZURE_STORAGE_QUEUE_URL=$sqs_queue_url
407
+ else
408
+ export SQS_QUEUE_URL=$sqs_queue_url
409
+ export SQS_REGION=$aws_region
410
+ fi
411
+
412
+ export ARTILLERY_DISABLE_ENSURE=true
413
+
414
+ debug "CLI args:"
415
+ debug "${cli_args[@]}"
416
+
417
+ # set max header size to 32KB -- solves the HPE_HEADER_OVERFLOW error
418
+ # set max old space size to 12GB - max allocatable on Fargate
419
+ MAX_OLD_SPACE_SIZE=${MAX_OLD_SPACE_SIZE:-12288}
420
+ export NODE_OPTIONS="--max-http-header-size=32768 --max-old-space-size=$MAX_OLD_SPACE_SIZE ${NODE_OPTIONS:-""}"
421
+
422
+ (set +eu ; ${ARTILLERY_BINARY:-"artillery"} "${cli_args[@]}" ; echo $? > exitCode ; set -eu) | tee output.txt &
423
+ debug "node processes:"
424
+ debug "$(pgrep -lfa node)"
425
+ sleep 5
426
+ CLI_PID=$(pgrep -lfa node | grep artillery | awk '{print $1}')
427
+ CLI_RUNNING="yes"
428
+
429
+ debug "CLI pid:"
430
+ debug "$CLI_PID"
431
+
432
+ while kill -0 $CLI_PID 2> /dev/null ; do
433
+ sleep 5 # signal handler will fire after we wake up
434
+ done
435
+
436
+ CLI_RUNNING="no"
437
+ CLI_STATUS=$(cat exitCode)
438
+
439
+ printf "Finished with code %s\n" "$CLI_STATUS"
440
+
441
+ case `grep "inspect-script.config.ensure" "output.txt" >/dev/null; echo $?` in
442
+ 0)
443
+ # ensure spec found
444
+ echo "got ensure spec"
445
+ local ensure_spec=$(grep 'inspect-script.config.ensure' "output.txt" |awk -F 'ensure=' '{print $2}'|head -n 1)
446
+ send_message "$ensure_spec" "ensure"
447
+ ;;
448
+ 1)
449
+ # no ensure spec
450
+ echo "no ensure spec" >&2
451
+ ;;
452
+ *)
453
+ # error - ignore
454
+ echo "error while looking for ensure spec, ignoring" >&2
455
+ ;;
456
+ esac
457
+
458
+ # TODO: Upload to Storage Blob if on Azure
459
+ if [[ "$is_azure" != "yes" ]] ; then
460
+ aws s3 cp output.txt "${s3_run_data_path}/worker-log-${worker_id}.txt"
461
+ echo "log: ${s3_run_data_path}/worker-log-${worker_id}.txt"
462
+ fi
463
+
464
+ if [[ $CLI_STATUS -eq 0 ]] ; then
465
+ EXIT_CODE=0
466
+ elif [[ $CLI_STATUS -eq $ERR_CLI_ERROR_EXPECT ]] ; then
467
+ EXIT_CODE=$ERR_CLI_ERROR_EXPECT
468
+ else
469
+ EXIT_CODE=$ERR_CLI_ERROR
470
+ fi
471
+
472
+ exit $EXIT_CODE
473
+ }
474
+
475
+ main () {
476
+ debug "$@"
477
+
478
+ decode_cli_args
479
+
480
+ s3_run_data_path="${s3_run_data_base_path}/${test_run_id}"
481
+ progress "Test run ID = $test_run_id"
482
+ progress "Syncing test data"
483
+ sync_test_data
484
+ check_test_data
485
+
486
+ progress "Installing dependencies"
487
+ install_dependencies
488
+
489
+ progress "Ready to run"
490
+ signal_ready
491
+ progress "Waiting for green signal"
492
+ wait_for_go
493
+ progress "Off we go!"
494
+ run_a9
495
+ }
496
+
497
+ usage () {
498
+ cat << EOF
499
+ usage: $0 - run worker
500
+ EOF
501
+ }
502
+
503
+ while getopts "z:p:a:r:q:i:d:t:h?" OPTION
504
+ do
505
+ case $OPTION in
506
+ h)
507
+ usage
508
+ exit 0
509
+ ;;
510
+ z)
511
+ is_azure="$OPTARG"
512
+ ;;
513
+ p)
514
+ s3_test_data_path="$OPTARG"
515
+ ;;
516
+ a)
517
+ cli_args_encoded="$OPTARG"
518
+ ;;
519
+ r)
520
+ aws_region="$OPTARG"
521
+ ;;
522
+ q)
523
+ # Can also be AQS queue URL
524
+ sqs_queue_url="$OPTARG"
525
+ ;;
526
+ i)
527
+ test_run_id="$OPTARG"
528
+ ;;
529
+ d)
530
+ s3_run_data_base_path="$OPTARG"
531
+ ;;
532
+ t)
533
+ WAIT_TIMEOUT="$OPTARG"
534
+ ;;
535
+ \?)
536
+ usage
537
+ exit $ERR_ARGS
538
+ ;;
539
+ :)
540
+ echo "Unknown option: -$OPTARG" >&2; exit 1;;
541
+ * ) echo "Unimplemented option: -$OPTARG" >&2; exit 1;;
542
+ esac
543
+ done
544
+
545
+ # shellcheck disable=2004
546
+ shift $(($OPTIND - 1)) # remove all args processed by getopts
547
+
548
+ if [[ ! $# -eq 0 ]] ; then
549
+ usage
550
+ EXIT_CODE=$ERR_ARGS
551
+ exit
552
+ fi
553
+
554
+ if [[ -z $s3_test_data_path || -z $cli_args_encoded || -z $test_run_id ]] ; then
555
+ echo "Some required argument(s) not provided, aborting" >&2
556
+ EXIT_CODE=$ERR_ARGS
557
+ exit
558
+ fi
559
+
560
+ if [[ "$is_azure" = "yes" ]] ; then
561
+ # Remap for convenience
562
+ azure_storage_container_name="$s3_test_data_path"
563
+
564
+ az login --service-principal -u $AZURE_CLIENT_ID -p $AZURE_CLIENT_SECRET --tenant $AZURE_TENANT_ID
565
+ fi
566
+
567
+ if [[ "$is_azure" != "yes" ]] ; then
568
+ taskArn=$(curl -s "$ECS_CONTAINER_METADATA_URI_V4/task" \
569
+ | jq -r ".TaskARN" \
570
+ | cut -d "/" -f 3)
571
+ fi
572
+
573
+ worker_id=${WORKER_ID_OVERRIDE:-$(pwgen -A 12 1)}
574
+ worker_id=${taskArn:-$worker_id}
575
+ # make available to Artillery custom scripts/environment
576
+ export WORKER_ID="$worker_id"
577
+
578
+ progress "============================"
579
+ progress "Worker starting up, ID = $worker_id, version = ${WORKER_VERSION:-unknown}, leader = $is_leader"
580
+ progress "============================"
581
+
582
+ cleanup () {
583
+ local sig="$1"
584
+
585
+ debug "cleanup called, signal:"
586
+ debug "$sig"
587
+
588
+ if [[ $CLEANING_UP = "no" ]] ; then
589
+ CLEANING_UP="yes"
590
+
591
+ if [[ "$is_azure" = "yes" ]] ; then
592
+ if [[ "$is_leader" = "true" ]] ; then
593
+ if [[ -z "${AZURE_RETAIN_BLOBS:-""}" ]] ; then
594
+ # This exits with 0 regardless of whether the pattern matches any
595
+ # blobs or not so it's OK to run this multiple times
596
+ az storage blob delete-batch \
597
+ --account-name "$AZURE_STORAGE_ACCOUNT" \
598
+ -s "$azure_storage_container_name" \
599
+ --pattern "tests/$test_run_id/*"
600
+ fi
601
+ fi
602
+ fi
603
+
604
+ # Abnormal exit:
605
+ if [[ $CLI_RUNNING = "yes" ]] ; then
606
+ printf "Interrupted with %s, stopping\n" "$sig"
607
+ EXIT_CODE=$ERR_INTERRUPTED
608
+ kill -TERM $CLI_PID
609
+ set +e
610
+ timeout 20 tail --pid $CLI_PID -f /dev/null
611
+ if [[ $? -eq 124 ]] ; then
612
+ # timeout exits with 124 if the process it's waiting on is still running
613
+ # i.e. if tail is still running it means the Artillery CLI did not exit:
614
+ kill -KILL $CLI_PID
615
+ CLI_STATUS=143 # SIGTERM (128 + 15)
616
+ else
617
+ # Preserve the exit code of the CLI
618
+ CLI_STATUS=$(cat exitCode)
619
+ fi
620
+ set -e
621
+ CLI_RUNNING="no"
622
+ fi
623
+
624
+ local sqs_message_body=
625
+ if [[ $EXIT_CODE -eq 0 ]] ; then
626
+ sqs_message_body='{"event": "workerDone"}'
627
+ else
628
+ # If 137 then something SIGKILL'ed Artillery
629
+ local extra_info=$(printf "%s" "$$ERR_EXTRA_INFO" | jq -sR)
630
+ sqs_message_body="{\"event\": \"workerError\", \"exitCode\": \"$EXIT_CODE\" }"
631
+ fi
632
+
633
+ send_event "$sqs_message_body"
634
+
635
+ debug "Message body: $sqs_message_body"
636
+ exit $EXIT_CODE
637
+ else
638
+ if [[ ! $sig = "EXIT" ]] ; then
639
+ # EXIT will always fire after a TERM/INT, so if
640
+ # that's the case we don't need to print this message.
641
+ printf "Received %s but cleaning up already\n" "$sig"
642
+ fi
643
+ fi
644
+ }
645
+
646
+ set_trap_with_arg () {
647
+ func="$1" ; shift
648
+ for sig ; do
649
+ # shellcheck disable=2064
650
+ trap "$func $sig" "$sig"
651
+ done
652
+ }
653
+
654
+ set_trap_with_arg cleanup INT TERM EXIT
655
+
656
+ main "$@"