hastci 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.envrc +5 -0
- data/.rspec +3 -0
- data/.standard.yml +7 -0
- data/.zed/settings.json +24 -0
- data/CHANGELOG.md +5 -0
- data/CODE_OF_CONDUCT.md +132 -0
- data/LICENSE.txt +21 -0
- data/README.md +43 -0
- data/Rakefile +17 -0
- data/bin/console +11 -0
- data/bin/hastci-rspec +5 -0
- data/bin/setup +8 -0
- data/devenv.lock +171 -0
- data/devenv.nix +23 -0
- data/devenv.yaml +6 -0
- data/lib/hastci/ack_worker.rb +146 -0
- data/lib/hastci/adapters/rspec/runner.rb +205 -0
- data/lib/hastci/api_client.rb +310 -0
- data/lib/hastci/api_error.rb +13 -0
- data/lib/hastci/claim_result.rb +27 -0
- data/lib/hastci/cli.rb +101 -0
- data/lib/hastci/config.rb +112 -0
- data/lib/hastci/configuration_error.rb +5 -0
- data/lib/hastci/error.rb +5 -0
- data/lib/hastci/error_collector.rb +18 -0
- data/lib/hastci/exit_codes.rb +13 -0
- data/lib/hastci/fatal_api_error.rb +5 -0
- data/lib/hastci/heartbeat.rb +84 -0
- data/lib/hastci/pact.rb +11 -0
- data/lib/hastci/queue_drained.rb +5 -0
- data/lib/hastci/retry_exhausted_error.rb +12 -0
- data/lib/hastci/retryable_error.rb +5 -0
- data/lib/hastci/session.rb +259 -0
- data/lib/hastci/task.rb +23 -0
- data/lib/hastci/task_buffer.rb +207 -0
- data/lib/hastci/task_result.rb +37 -0
- data/lib/hastci/version.rb +7 -0
- data/lib/hastci.rb +35 -0
- data/sig/hastci.rbs +4 -0
- data/spec/pacts/hastci_rspec-hastci_api.json +385 -0
- metadata +112 -0
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "pathname"
|
|
4
|
+
require "rspec/core"
|
|
5
|
+
|
|
6
|
+
module HastCI
|
|
7
|
+
module Adapters
|
|
8
|
+
module RSpec
|
|
9
|
+
class Runner
|
|
10
|
+
DEFAULT_CLOCK = -> { Process.clock_gettime(Process::CLOCK_MONOTONIC) }
|
|
11
|
+
private_constant :DEFAULT_CLOCK
|
|
12
|
+
|
|
13
|
+
def initialize(
|
|
14
|
+
argv:,
|
|
15
|
+
session:,
|
|
16
|
+
err:,
|
|
17
|
+
out:,
|
|
18
|
+
clock: DEFAULT_CLOCK,
|
|
19
|
+
configuration: ::RSpec.configuration,
|
|
20
|
+
world: ::RSpec.world
|
|
21
|
+
)
|
|
22
|
+
@session = session
|
|
23
|
+
@err = err
|
|
24
|
+
@out = out
|
|
25
|
+
@clock = clock
|
|
26
|
+
|
|
27
|
+
@options = ::RSpec::Core::ConfigurationOptions.new(argv)
|
|
28
|
+
@configuration = configuration
|
|
29
|
+
@world = world
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
def run
|
|
33
|
+
::RSpec::Core::Runner.disable_autorun!
|
|
34
|
+
setup
|
|
35
|
+
return @configuration.reporter.exit_early(exit_code) if @world.wants_to_quit
|
|
36
|
+
|
|
37
|
+
run_specs(@world.ordered_example_groups).tap do
|
|
38
|
+
persist_example_statuses
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
private
|
|
43
|
+
|
|
44
|
+
def setup
|
|
45
|
+
configure
|
|
46
|
+
return if @world.wants_to_quit
|
|
47
|
+
|
|
48
|
+
@configuration.load_spec_files
|
|
49
|
+
ensure
|
|
50
|
+
@world.announce_filters
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def configure
|
|
54
|
+
@configuration.error_stream = @err
|
|
55
|
+
@configuration.output_stream = @out if @configuration.output_stream == $stdout
|
|
56
|
+
@options.configure(@configuration)
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
def exit_code(examples_passed = false)
|
|
60
|
+
return @configuration.error_exit_code || @configuration.failure_exit_code if @world.non_example_failure
|
|
61
|
+
return @configuration.failure_exit_code unless examples_passed
|
|
62
|
+
|
|
63
|
+
0
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def persist_example_statuses
|
|
67
|
+
return if @configuration.dry_run
|
|
68
|
+
return unless (path = @configuration.example_status_persistence_file_path)
|
|
69
|
+
|
|
70
|
+
::RSpec::Core::ExampleStatusPersister.persist(@world.all_examples, path)
|
|
71
|
+
rescue SystemCallError => e
|
|
72
|
+
::RSpec.warning "Could not write example statuses to #{path} (configured as " \
|
|
73
|
+
"`config.example_status_persistence_file_path`) due to a " \
|
|
74
|
+
"system error: #{e.inspect}. Please check that the config " \
|
|
75
|
+
"option is set to an accessible, valid file path", call_site: nil
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
def run_specs(example_groups)
|
|
79
|
+
examples_count = @world.example_count(example_groups)
|
|
80
|
+
examples_passed = @configuration.reporter.report(examples_count) do |reporter|
|
|
81
|
+
@configuration.with_suite_hooks do
|
|
82
|
+
return @configuration.failure_exit_code if examples_count == 0 && @configuration.fail_if_no_examples
|
|
83
|
+
|
|
84
|
+
seed_if_needed(example_groups)
|
|
85
|
+
run_tasks(example_groups, reporter)
|
|
86
|
+
end
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
exit_code(examples_passed)
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
def seed_if_needed(example_groups)
|
|
93
|
+
return unless @session.seeder?
|
|
94
|
+
|
|
95
|
+
@session.seed(tasks: ordered_task_names(example_groups))
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
def run_tasks(example_groups, reporter)
|
|
99
|
+
groups_by_file = group_example_groups_by_file(example_groups)
|
|
100
|
+
all_passed = true
|
|
101
|
+
tasks_completed = 0
|
|
102
|
+
|
|
103
|
+
@session.each_task do |task|
|
|
104
|
+
break if stop_if_quitting!
|
|
105
|
+
|
|
106
|
+
result = run_task(task, groups_by_file, reporter)
|
|
107
|
+
@session.ack(result)
|
|
108
|
+
all_passed &&= result.passed?
|
|
109
|
+
tasks_completed += 1
|
|
110
|
+
|
|
111
|
+
HastCI.logger.debug("[HastCI] Task done: #{task.name} (#{result.status}, #{result.duration_s.round(2)}s)")
|
|
112
|
+
|
|
113
|
+
break if stop_if_quitting!
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
HastCI.logger.info("[HastCI] Completed #{tasks_completed} tasks")
|
|
117
|
+
all_passed
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
def run_task(task, groups_by_file, reporter)
|
|
121
|
+
groups = groups_by_file.fetch(task.name) do
|
|
122
|
+
available_files = groups_by_file.keys.sort.first(10).join(", ")
|
|
123
|
+
available_suffix = (groups_by_file.size > 10) ? " (and #{groups_by_file.size - 10} more)" : ""
|
|
124
|
+
raise HastCI::Error,
|
|
125
|
+
"Unknown spec file: #{task.name}. " \
|
|
126
|
+
"Available: #{available_files}#{available_suffix}. " \
|
|
127
|
+
"This may indicate a mismatch between seeded tasks and loaded specs."
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
before_failed = reporter.failed_examples.length
|
|
131
|
+
start_time = @clock.call
|
|
132
|
+
|
|
133
|
+
group_passed = groups.map { |group| group.run(reporter) }.all?
|
|
134
|
+
|
|
135
|
+
duration_s = @clock.call - start_time
|
|
136
|
+
after_failed = reporter.failed_examples.length
|
|
137
|
+
new_failures = reporter.failed_examples[before_failed...after_failed]
|
|
138
|
+
|
|
139
|
+
status = (group_passed && new_failures.empty?) ? :passed : :failed
|
|
140
|
+
|
|
141
|
+
HastCI::TaskResult.new(
|
|
142
|
+
task_id: task.id,
|
|
143
|
+
status: status,
|
|
144
|
+
duration_s: duration_s,
|
|
145
|
+
logs: build_logs(new_failures)
|
|
146
|
+
)
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
def build_logs(failed_examples)
|
|
150
|
+
{
|
|
151
|
+
summary: summary_for(failed_examples),
|
|
152
|
+
failures: failed_examples.map { |example| failure_payload(example) }
|
|
153
|
+
}
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
def summary_for(failed_examples)
|
|
157
|
+
return "passed" if failed_examples.empty?
|
|
158
|
+
|
|
159
|
+
"failed: #{failed_examples.length}"
|
|
160
|
+
end
|
|
161
|
+
|
|
162
|
+
def failure_payload(example)
|
|
163
|
+
exception = example.exception
|
|
164
|
+
|
|
165
|
+
{
|
|
166
|
+
file: example.metadata[:file_path],
|
|
167
|
+
line: example.metadata[:line_number],
|
|
168
|
+
message: exception&.message,
|
|
169
|
+
backtrace: Array(exception&.backtrace)
|
|
170
|
+
}
|
|
171
|
+
end
|
|
172
|
+
|
|
173
|
+
def ordered_task_names(example_groups)
|
|
174
|
+
example_groups.map { |group| normalize_path(group.metadata.fetch(:file_path)) }.uniq
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
def group_example_groups_by_file(example_groups)
|
|
178
|
+
example_groups.group_by { |group| normalize_path(group.metadata.fetch(:file_path)) }
|
|
179
|
+
end
|
|
180
|
+
|
|
181
|
+
def stop_if_quitting!
|
|
182
|
+
return false unless @world.wants_to_quit
|
|
183
|
+
|
|
184
|
+
@session.request_stop!(:user_interrupt)
|
|
185
|
+
true
|
|
186
|
+
end
|
|
187
|
+
|
|
188
|
+
def normalize_path(path)
|
|
189
|
+
path = path.sub(%r{\A\./}, "")
|
|
190
|
+
pathname = Pathname.new(path)
|
|
191
|
+
return path unless pathname.absolute?
|
|
192
|
+
|
|
193
|
+
begin
|
|
194
|
+
pathname.relative_path_from(Pathname.pwd).to_s
|
|
195
|
+
# :nocov:
|
|
196
|
+
rescue ArgumentError
|
|
197
|
+
# Different drive on Windows or cannot relativize
|
|
198
|
+
path
|
|
199
|
+
end
|
|
200
|
+
# :nocov:
|
|
201
|
+
end
|
|
202
|
+
end
|
|
203
|
+
end
|
|
204
|
+
end
|
|
205
|
+
end
|
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "json"
|
|
4
|
+
require "net/http"
|
|
5
|
+
require "securerandom"
|
|
6
|
+
require "uri"
|
|
7
|
+
|
|
8
|
+
module HastCI
|
|
9
|
+
class ApiClient
|
|
10
|
+
DEFAULT_MAX_RETRIES = 5
|
|
11
|
+
DEFAULT_INITIAL_BACKOFF = 0.5
|
|
12
|
+
DEFAULT_MAX_BACKOFF = 30
|
|
13
|
+
DEFAULT_KEEP_ALIVE_TIMEOUT = 30
|
|
14
|
+
DEFAULT_OPEN_TIMEOUT = 10
|
|
15
|
+
DEFAULT_READ_TIMEOUT = 60
|
|
16
|
+
DEFAULT_WRITE_TIMEOUT = 30
|
|
17
|
+
|
|
18
|
+
private_constant :DEFAULT_MAX_RETRIES, :DEFAULT_INITIAL_BACKOFF, :DEFAULT_MAX_BACKOFF,
|
|
19
|
+
:DEFAULT_KEEP_ALIVE_TIMEOUT, :DEFAULT_OPEN_TIMEOUT, :DEFAULT_READ_TIMEOUT, :DEFAULT_WRITE_TIMEOUT
|
|
20
|
+
|
|
21
|
+
# Separate connections allow concurrent operations without blocking.
|
|
22
|
+
# E.g., heartbeat can continue while the main thread waits on a claim.
|
|
23
|
+
CONNECTION_DEFAULT = :default
|
|
24
|
+
CONNECTION_HEARTBEAT = :heartbeat
|
|
25
|
+
CONNECTION_ACK = :ack
|
|
26
|
+
|
|
27
|
+
private_constant :CONNECTION_DEFAULT, :CONNECTION_HEARTBEAT, :CONNECTION_ACK
|
|
28
|
+
|
|
29
|
+
def initialize(config:, sleeper: Kernel.method(:sleep), max_retries: nil, random: Random.new)
|
|
30
|
+
@config = config
|
|
31
|
+
@max_retries = max_retries || config.api_max_retries || DEFAULT_MAX_RETRIES
|
|
32
|
+
@sleeper = sleeper
|
|
33
|
+
@random = random
|
|
34
|
+
|
|
35
|
+
@base_url = URI.parse(config.api_base_url)
|
|
36
|
+
@api_key = config.api_key
|
|
37
|
+
|
|
38
|
+
@connections = {}
|
|
39
|
+
@connection_mutexes = Hash.new { |h, k| h[k] = Mutex.new }
|
|
40
|
+
@global_mutex = Mutex.new
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def disconnect!
|
|
44
|
+
@global_mutex.synchronize do
|
|
45
|
+
@connections.each_value do |conn|
|
|
46
|
+
conn&.finish if conn&.started?
|
|
47
|
+
rescue IOError
|
|
48
|
+
end
|
|
49
|
+
@connections.clear
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def init_run(
|
|
54
|
+
run_key: @config.run_key,
|
|
55
|
+
worker_id: @config.worker_id,
|
|
56
|
+
commit_sha: @config.commit_sha
|
|
57
|
+
)
|
|
58
|
+
response = ensure_hash_response(
|
|
59
|
+
post_json("/runs/init", {
|
|
60
|
+
run_key: run_key,
|
|
61
|
+
worker_id: worker_id,
|
|
62
|
+
commit_sha: commit_sha
|
|
63
|
+
}),
|
|
64
|
+
context: "/runs/init"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
{
|
|
68
|
+
run_id: fetch_required(response, "run_id", context: "/runs/init"),
|
|
69
|
+
status: fetch_required(response, "status", context: "/runs/init").to_sym,
|
|
70
|
+
role: fetch_required(response, "role", context: "/runs/init").to_sym
|
|
71
|
+
}
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
def seed(run_id:, tasks:)
|
|
75
|
+
post_json("/runs/#{run_id}/seed", {
|
|
76
|
+
tasks: tasks.map { |name| {name: name} }
|
|
77
|
+
})
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
def run_status(run_id:)
|
|
81
|
+
response = ensure_hash_response(get_json("/runs/#{run_id}/status"), context: "/runs/status")
|
|
82
|
+
|
|
83
|
+
{
|
|
84
|
+
status: fetch_required(response, "status", context: "/runs/status").to_sym
|
|
85
|
+
}
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
def claim(
|
|
89
|
+
run_key: @config.run_key,
|
|
90
|
+
worker_id: @config.worker_id,
|
|
91
|
+
batch: @config.claim_batch_size
|
|
92
|
+
)
|
|
93
|
+
response = ensure_hash_response(
|
|
94
|
+
post_json("/tasks/claim?batch=#{batch}", {
|
|
95
|
+
run_key: run_key,
|
|
96
|
+
worker_id: worker_id
|
|
97
|
+
}),
|
|
98
|
+
context: "/tasks/claim"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
tasks = Array(response.fetch("tasks", [])).map do |task_data|
|
|
102
|
+
Task.new(
|
|
103
|
+
id: fetch_required(task_data, "id", context: "/tasks/claim"),
|
|
104
|
+
name: fetch_required(task_data, "name", context: "/tasks/claim")
|
|
105
|
+
)
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
ClaimResult.new(
|
|
109
|
+
tasks: tasks,
|
|
110
|
+
queue_state: response["queue_state"]&.to_sym,
|
|
111
|
+
remaining: response["remaining"],
|
|
112
|
+
should_stop: response["should_stop"]
|
|
113
|
+
)
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
def ack(task_id:, status:, duration_s:, logs:)
|
|
117
|
+
post_json("/tasks/#{task_id}/ack", {
|
|
118
|
+
status: status.to_s,
|
|
119
|
+
duration_s: duration_s,
|
|
120
|
+
logs: logs
|
|
121
|
+
}, pool: CONNECTION_ACK)
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
def heartbeat(
|
|
125
|
+
run_key: @config.run_key,
|
|
126
|
+
worker_id: @config.worker_id
|
|
127
|
+
)
|
|
128
|
+
post_json("/workers/heartbeat", {
|
|
129
|
+
run_key: run_key,
|
|
130
|
+
worker_id: worker_id
|
|
131
|
+
}, pool: CONNECTION_HEARTBEAT)
|
|
132
|
+
|
|
133
|
+
nil
|
|
134
|
+
end
|
|
135
|
+
|
|
136
|
+
private
|
|
137
|
+
|
|
138
|
+
def get_json(path, pool: CONNECTION_DEFAULT)
|
|
139
|
+
uri = build_uri(path)
|
|
140
|
+
request = Net::HTTP::Get.new(uri)
|
|
141
|
+
request["Authorization"] = "Bearer #{@api_key}"
|
|
142
|
+
|
|
143
|
+
execute_request(request, pool: pool)
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
def post_json(path, body, pool: CONNECTION_DEFAULT)
|
|
147
|
+
uri = build_uri(path)
|
|
148
|
+
request = Net::HTTP::Post.new(uri)
|
|
149
|
+
request["Content-Type"] = "application/json"
|
|
150
|
+
request["Authorization"] = "Bearer #{@api_key}"
|
|
151
|
+
request.body = JSON.generate(body)
|
|
152
|
+
|
|
153
|
+
execute_request(request, pool: pool)
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
def build_uri(path)
|
|
157
|
+
base = @base_url.to_s.chomp("/")
|
|
158
|
+
relative_path = path.delete_prefix("/")
|
|
159
|
+
URI.parse("#{base}/#{relative_path}")
|
|
160
|
+
end
|
|
161
|
+
|
|
162
|
+
def execute_request(request, pool:)
|
|
163
|
+
request_id = SecureRandom.hex(4) if @config.debug
|
|
164
|
+
start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC) if @config.debug
|
|
165
|
+
log_request(request, request_id) if @config.debug
|
|
166
|
+
|
|
167
|
+
with_retry(pool) do |attempt|
|
|
168
|
+
log_retry(request_id, attempt) if @config.debug && attempt > 0
|
|
169
|
+
|
|
170
|
+
response = nil
|
|
171
|
+
mutex = connection_mutex(pool)
|
|
172
|
+
mutex.synchronize do
|
|
173
|
+
connection = ensure_connection!(pool)
|
|
174
|
+
response = connection.request(request)
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
elapsed = Process.clock_gettime(Process::CLOCK_MONOTONIC) - start_time if @config.debug
|
|
178
|
+
log_response(response, request_id, elapsed) if @config.debug
|
|
179
|
+
parse_response(response)
|
|
180
|
+
end
|
|
181
|
+
end
|
|
182
|
+
|
|
183
|
+
def log_request(request, request_id)
|
|
184
|
+
HastCI.logger.debug("[#{request_id}] #{request.method} #{request.path}")
|
|
185
|
+
return unless request.body
|
|
186
|
+
|
|
187
|
+
body_preview = if request.body.length > 200
|
|
188
|
+
"#{request.body[0, 200]}... (#{request.body.length} bytes)"
|
|
189
|
+
else
|
|
190
|
+
request.body
|
|
191
|
+
end
|
|
192
|
+
HastCI.logger.debug("[#{request_id}] Body: #{body_preview}")
|
|
193
|
+
end
|
|
194
|
+
|
|
195
|
+
def log_response(response, request_id, elapsed_s)
|
|
196
|
+
HastCI.logger.debug("[#{request_id}] Response: #{response.code} (#{(elapsed_s * 1000).round}ms)")
|
|
197
|
+
end
|
|
198
|
+
|
|
199
|
+
def log_retry(request_id, attempt)
|
|
200
|
+
HastCI.logger.debug("[#{request_id}] Retry attempt #{attempt}")
|
|
201
|
+
end
|
|
202
|
+
|
|
203
|
+
def ensure_connection!(pool)
|
|
204
|
+
conn = @connections[pool]
|
|
205
|
+
@connections[pool] = create_connection if conn.nil? || !conn.started?
|
|
206
|
+
@connections[pool]
|
|
207
|
+
end
|
|
208
|
+
|
|
209
|
+
def connection_mutex(pool)
|
|
210
|
+
@global_mutex.synchronize do
|
|
211
|
+
@connection_mutexes[pool]
|
|
212
|
+
end
|
|
213
|
+
end
|
|
214
|
+
|
|
215
|
+
def create_connection
|
|
216
|
+
http = Net::HTTP.new(@base_url.host, @base_url.port)
|
|
217
|
+
http.use_ssl = @base_url.scheme == "https"
|
|
218
|
+
http.open_timeout = DEFAULT_OPEN_TIMEOUT
|
|
219
|
+
http.read_timeout = DEFAULT_READ_TIMEOUT
|
|
220
|
+
http.write_timeout = DEFAULT_WRITE_TIMEOUT
|
|
221
|
+
http.keep_alive_timeout = DEFAULT_KEEP_ALIVE_TIMEOUT
|
|
222
|
+
http.start
|
|
223
|
+
http
|
|
224
|
+
end
|
|
225
|
+
|
|
226
|
+
def parse_response(response)
|
|
227
|
+
status = response.code.to_i
|
|
228
|
+
|
|
229
|
+
return parse_success_response(response) if status.between?(200, 299)
|
|
230
|
+
|
|
231
|
+
raise_for_status(response, status)
|
|
232
|
+
end
|
|
233
|
+
|
|
234
|
+
def ensure_hash_response(response, context:)
|
|
235
|
+
return response if response.is_a?(Hash)
|
|
236
|
+
|
|
237
|
+
raise FatalApiError.new("Invalid response for #{context}", response_body: response)
|
|
238
|
+
end
|
|
239
|
+
|
|
240
|
+
def fetch_required(hash, key, context:)
|
|
241
|
+
hash.fetch(key)
|
|
242
|
+
rescue KeyError
|
|
243
|
+
raise FatalApiError.new("Missing key in #{context} response: #{key}", response_body: hash)
|
|
244
|
+
end
|
|
245
|
+
|
|
246
|
+
def parse_success_response(response)
|
|
247
|
+
body = response.body.to_s
|
|
248
|
+
|
|
249
|
+
return {} if body.strip.empty?
|
|
250
|
+
|
|
251
|
+
JSON.parse(body)
|
|
252
|
+
rescue JSON::ParserError
|
|
253
|
+
raise FatalApiError.new("Invalid JSON response", response_body: body)
|
|
254
|
+
end
|
|
255
|
+
|
|
256
|
+
def raise_for_status(response, status)
|
|
257
|
+
body = response.body
|
|
258
|
+
|
|
259
|
+
case status
|
|
260
|
+
when 410
|
|
261
|
+
raise QueueDrained.new("Queue drained", status_code: status, response_body: body)
|
|
262
|
+
when 400..499
|
|
263
|
+
raise FatalApiError.new("API error: #{status}", status_code: status, response_body: body)
|
|
264
|
+
when 500..599
|
|
265
|
+
raise RetryableError.new("Server error: #{status}", status_code: status, response_body: body)
|
|
266
|
+
else
|
|
267
|
+
raise FatalApiError.new("Unexpected response: #{status}", status_code: status, response_body: body)
|
|
268
|
+
end
|
|
269
|
+
end
|
|
270
|
+
|
|
271
|
+
def with_retry(pool)
|
|
272
|
+
attempt = 0
|
|
273
|
+
|
|
274
|
+
begin
|
|
275
|
+
yield(attempt)
|
|
276
|
+
rescue RetryableError, Errno::ECONNREFUSED, Errno::ETIMEDOUT, Errno::ECONNRESET,
|
|
277
|
+
Net::OpenTimeout, Net::ReadTimeout, Net::WriteTimeout, IOError => e
|
|
278
|
+
attempt += 1
|
|
279
|
+
if attempt > @max_retries
|
|
280
|
+
raise RetryExhaustedError.new(
|
|
281
|
+
"Network error after #{@max_retries} retries: #{e.message}",
|
|
282
|
+
original_error: e
|
|
283
|
+
)
|
|
284
|
+
end
|
|
285
|
+
|
|
286
|
+
reset_connection(pool)
|
|
287
|
+
@sleeper.call(backoff_time(attempt))
|
|
288
|
+
retry
|
|
289
|
+
end
|
|
290
|
+
end
|
|
291
|
+
|
|
292
|
+
def reset_connection(pool)
|
|
293
|
+
mutex = connection_mutex(pool)
|
|
294
|
+
mutex.synchronize do
|
|
295
|
+
conn = @connections[pool]
|
|
296
|
+
begin
|
|
297
|
+
conn&.finish if conn&.started?
|
|
298
|
+
rescue IOError
|
|
299
|
+
end
|
|
300
|
+
@connections[pool] = nil
|
|
301
|
+
end
|
|
302
|
+
end
|
|
303
|
+
|
|
304
|
+
def backoff_time(retry_count)
|
|
305
|
+
base = DEFAULT_INITIAL_BACKOFF * (2**(retry_count - 1))
|
|
306
|
+
jitter = @random.rand * 0.5 * base
|
|
307
|
+
[base + jitter, DEFAULT_MAX_BACKOFF].min
|
|
308
|
+
end
|
|
309
|
+
end
|
|
310
|
+
end
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module HastCI
|
|
4
|
+
class ApiError < Error
|
|
5
|
+
attr_reader :status_code, :response_body
|
|
6
|
+
|
|
7
|
+
def initialize(message, status_code: nil, response_body: nil)
|
|
8
|
+
@status_code = status_code
|
|
9
|
+
@response_body = response_body
|
|
10
|
+
super(message)
|
|
11
|
+
end
|
|
12
|
+
end
|
|
13
|
+
end
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module HastCI
|
|
4
|
+
class ClaimResult
|
|
5
|
+
attr_reader :tasks, :queue_state, :remaining, :should_stop
|
|
6
|
+
|
|
7
|
+
def initialize(tasks:, queue_state:, remaining: nil, should_stop: nil)
|
|
8
|
+
@tasks = tasks
|
|
9
|
+
@queue_state = queue_state
|
|
10
|
+
@remaining = remaining
|
|
11
|
+
@should_stop = should_stop
|
|
12
|
+
freeze
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def drained?
|
|
16
|
+
queue_state == :drained
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def empty?
|
|
20
|
+
tasks.empty?
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
def cancelled?
|
|
24
|
+
should_stop == true
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
end
|
data/lib/hastci/cli.rb
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module HastCI
|
|
4
|
+
class CLI
|
|
5
|
+
def self.run(argv:, env:, err:, out:, &runner_block)
|
|
6
|
+
config = Config.load_from_env(env)
|
|
7
|
+
configure_logging(config, err)
|
|
8
|
+
|
|
9
|
+
runner_exit_code = ExitCodes::SUCCESS
|
|
10
|
+
interrupted = false
|
|
11
|
+
|
|
12
|
+
cleanup_result = HastCI::Session.run(config: config) do |session|
|
|
13
|
+
setup_interrupt_handler(session, err) { interrupted = true }
|
|
14
|
+
|
|
15
|
+
session.start!
|
|
16
|
+
|
|
17
|
+
runner_exit_code = if session.stopping?
|
|
18
|
+
ExitCodes::SUCCESS
|
|
19
|
+
else
|
|
20
|
+
runner_block.call(session, argv, err, out)
|
|
21
|
+
end
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
print_errors(cleanup_result, err: err)
|
|
25
|
+
|
|
26
|
+
final_exit_code(
|
|
27
|
+
runner_exit_code: runner_exit_code,
|
|
28
|
+
cleanup_result: cleanup_result,
|
|
29
|
+
interrupted: interrupted
|
|
30
|
+
)
|
|
31
|
+
rescue HastCI::Error => e
|
|
32
|
+
err.puts(e.message)
|
|
33
|
+
exit_code_for_error(e)
|
|
34
|
+
rescue Interrupt
|
|
35
|
+
err.puts("\nInterrupted - shutting down...")
|
|
36
|
+
ExitCodes::CANCELLED
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
def self.print_errors(cleanup_result, err:)
|
|
40
|
+
err.puts("Failed to flush ACKs. Exiting with error.") unless cleanup_result[:flush_ok]
|
|
41
|
+
err.puts(cleanup_result[:first_error].message) if cleanup_result[:first_error]
|
|
42
|
+
err.puts("Run was cancelled by the server.") if cleanup_result[:stop_reason] == :server_cancelled
|
|
43
|
+
end
|
|
44
|
+
private_class_method :print_errors
|
|
45
|
+
|
|
46
|
+
def self.final_exit_code(runner_exit_code:, cleanup_result:, interrupted:)
|
|
47
|
+
return ExitCodes::CANCELLED if interrupted
|
|
48
|
+
|
|
49
|
+
stop_reason = cleanup_result[:stop_reason]
|
|
50
|
+
first_error = cleanup_result[:first_error]
|
|
51
|
+
flush_ok = cleanup_result[:flush_ok]
|
|
52
|
+
|
|
53
|
+
return ExitCodes::CANCELLED if stop_reason == :server_cancelled
|
|
54
|
+
return exit_code_for_error(first_error) if first_error
|
|
55
|
+
return ExitCodes::NETWORK_ERROR unless flush_ok
|
|
56
|
+
return ExitCodes::TEST_FAILURES if runner_exit_code != ExitCodes::SUCCESS
|
|
57
|
+
|
|
58
|
+
ExitCodes::SUCCESS
|
|
59
|
+
end
|
|
60
|
+
private_class_method :final_exit_code
|
|
61
|
+
|
|
62
|
+
def self.exit_code_for_error(error)
|
|
63
|
+
case error
|
|
64
|
+
when ConfigurationError
|
|
65
|
+
ExitCodes::CONFIGURATION_ERROR
|
|
66
|
+
when RetryableError, RetryExhaustedError
|
|
67
|
+
ExitCodes::NETWORK_ERROR
|
|
68
|
+
when ApiError
|
|
69
|
+
ExitCodes::API_ERROR
|
|
70
|
+
else
|
|
71
|
+
ExitCodes::INTERNAL_ERROR
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
private_class_method :exit_code_for_error
|
|
75
|
+
|
|
76
|
+
def self.configure_logging(config, err)
|
|
77
|
+
HastCI.logger = Logger.new(err)
|
|
78
|
+
HastCI.logger.level = Logger.const_get(config.log_level)
|
|
79
|
+
end
|
|
80
|
+
private_class_method :configure_logging
|
|
81
|
+
|
|
82
|
+
# :nocov:
|
|
83
|
+
def self.setup_interrupt_handler(session, err, &on_interrupt)
|
|
84
|
+
handler = lambda do |signal|
|
|
85
|
+
if session.stopping?
|
|
86
|
+
err.puts("\nForce quit!")
|
|
87
|
+
exit!(1)
|
|
88
|
+
else
|
|
89
|
+
on_interrupt&.call
|
|
90
|
+
session.request_stop!(:user_interrupt)
|
|
91
|
+
err.puts("\nReceived #{signal} - shutting down after current task...")
|
|
92
|
+
end
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
trap("INT") { handler.call("SIGINT") }
|
|
96
|
+
trap("TERM") { handler.call("SIGTERM") }
|
|
97
|
+
end
|
|
98
|
+
private_class_method :setup_interrupt_handler
|
|
99
|
+
# :nocov:
|
|
100
|
+
end
|
|
101
|
+
end
|