carson 3.22.1 → 3.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/API.md +15 -11
- data/MANUAL.md +32 -38
- data/README.md +6 -9
- data/RELEASE.md +15 -0
- data/VERSION +1 -1
- data/carson.gemspec +1 -0
- data/lib/carson/adapters/agent.rb +2 -2
- data/lib/carson/branch.rb +38 -0
- data/lib/carson/cli.rb +13 -14
- data/lib/carson/config.rb +34 -18
- data/lib/carson/delivery.rb +64 -0
- data/lib/carson/ledger.rb +305 -0
- data/lib/carson/repository.rb +47 -0
- data/lib/carson/revision.rb +30 -0
- data/lib/carson/runtime/audit.rb +6 -6
- data/lib/carson/runtime/deliver.rb +112 -169
- data/lib/carson/runtime/govern.rb +232 -368
- data/lib/carson/runtime/housekeep.rb +4 -4
- data/lib/carson/runtime/local/onboard.rb +5 -5
- data/lib/carson/runtime/local/prune.rb +4 -4
- data/lib/carson/runtime/local/template.rb +4 -4
- data/lib/carson/runtime/review/gate_support.rb +14 -12
- data/lib/carson/runtime/review/sweep_support.rb +2 -2
- data/lib/carson/runtime/review/utility.rb +1 -1
- data/lib/carson/runtime/setup.rb +10 -27
- data/lib/carson/runtime/status.rb +87 -226
- data/lib/carson/runtime.rb +25 -2
- data/lib/carson/worktree.rb +5 -5
- data/lib/carson.rb +5 -0
- metadata +27 -2
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
# Passive ledger record for one branch-to-authority delivery attempt.
|
|
2
|
+
module Carson
|
|
3
|
+
class Delivery
|
|
4
|
+
ACTIVE_STATES = %w[preparing gated queued integrating escalated].freeze
|
|
5
|
+
BLOCKED_STATES = %w[gated escalated].freeze
|
|
6
|
+
READY_STATES = %w[queued].freeze
|
|
7
|
+
TERMINAL_STATES = %w[integrated failed superseded].freeze
|
|
8
|
+
|
|
9
|
+
attr_reader :id, :repository, :branch, :head, :worktree_path, :authority, :status,
|
|
10
|
+
:pull_request_number, :pull_request_url, :revision_count, :cause, :summary,
|
|
11
|
+
:created_at, :updated_at, :integrated_at, :superseded_at
|
|
12
|
+
|
|
13
|
+
def initialize(
|
|
14
|
+
id:, repository:, branch:, head:, worktree_path:, authority:, status:,
|
|
15
|
+
pull_request_number:, pull_request_url:, revision_count:, cause:, summary:,
|
|
16
|
+
created_at:, updated_at:, integrated_at:, superseded_at:
|
|
17
|
+
)
|
|
18
|
+
@id = id
|
|
19
|
+
@repository = repository
|
|
20
|
+
@branch = branch
|
|
21
|
+
@head = head
|
|
22
|
+
@worktree_path = worktree_path
|
|
23
|
+
@authority = authority
|
|
24
|
+
@status = status
|
|
25
|
+
@pull_request_number = pull_request_number
|
|
26
|
+
@pull_request_url = pull_request_url
|
|
27
|
+
@revision_count = revision_count
|
|
28
|
+
@cause = cause
|
|
29
|
+
@summary = summary
|
|
30
|
+
@created_at = created_at
|
|
31
|
+
@updated_at = updated_at
|
|
32
|
+
@integrated_at = integrated_at
|
|
33
|
+
@superseded_at = superseded_at
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
def active?
|
|
37
|
+
ACTIVE_STATES.include?( status )
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def blocked?
|
|
41
|
+
BLOCKED_STATES.include?( status )
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
def ready?
|
|
45
|
+
READY_STATES.include?( status )
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def integrated?
|
|
49
|
+
status == "integrated"
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
def failed?
|
|
53
|
+
status == "failed"
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
def superseded?
|
|
57
|
+
status == "superseded"
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
def terminal?
|
|
61
|
+
TERMINAL_STATES.include?( status )
|
|
62
|
+
end
|
|
63
|
+
end
|
|
64
|
+
end
|
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
# SQLite-backed ledger for Carson's deliveries and revisions.
|
|
2
|
+
require "fileutils"
|
|
3
|
+
require "sqlite3"
|
|
4
|
+
require "time"
|
|
5
|
+
|
|
6
|
+
module Carson
|
|
7
|
+
class Ledger
|
|
8
|
+
UNSET = Object.new
|
|
9
|
+
ACTIVE_DELIVERY_STATES = %w[preparing gated queued integrating escalated].freeze
|
|
10
|
+
|
|
11
|
+
def initialize( path: )
|
|
12
|
+
@path = File.expand_path( path )
|
|
13
|
+
prepare!
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
attr_reader :path
|
|
17
|
+
|
|
18
|
+
# Ensures the SQLite schema exists before Carson uses the ledger.
|
|
19
|
+
def prepare!
|
|
20
|
+
FileUtils.mkdir_p( File.dirname( path ) )
|
|
21
|
+
|
|
22
|
+
with_database do |database|
|
|
23
|
+
database.execute_batch( <<~SQL )
|
|
24
|
+
CREATE TABLE IF NOT EXISTS deliveries (
|
|
25
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
26
|
+
repo_path TEXT NOT NULL,
|
|
27
|
+
branch_name TEXT NOT NULL,
|
|
28
|
+
head TEXT NOT NULL,
|
|
29
|
+
worktree_path TEXT,
|
|
30
|
+
authority TEXT NOT NULL,
|
|
31
|
+
status TEXT NOT NULL,
|
|
32
|
+
pr_number INTEGER,
|
|
33
|
+
pr_url TEXT,
|
|
34
|
+
revision_count INTEGER NOT NULL DEFAULT 0,
|
|
35
|
+
cause TEXT,
|
|
36
|
+
summary TEXT,
|
|
37
|
+
created_at TEXT NOT NULL,
|
|
38
|
+
updated_at TEXT NOT NULL,
|
|
39
|
+
integrated_at TEXT,
|
|
40
|
+
superseded_at TEXT
|
|
41
|
+
);
|
|
42
|
+
|
|
43
|
+
CREATE UNIQUE INDEX IF NOT EXISTS index_deliveries_on_identity
|
|
44
|
+
ON deliveries ( repo_path, branch_name, head );
|
|
45
|
+
|
|
46
|
+
CREATE INDEX IF NOT EXISTS index_deliveries_on_state
|
|
47
|
+
ON deliveries ( repo_path, status, created_at );
|
|
48
|
+
|
|
49
|
+
CREATE TABLE IF NOT EXISTS revisions (
|
|
50
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
51
|
+
delivery_id INTEGER NOT NULL,
|
|
52
|
+
number INTEGER NOT NULL,
|
|
53
|
+
cause TEXT NOT NULL,
|
|
54
|
+
provider TEXT NOT NULL,
|
|
55
|
+
status TEXT NOT NULL,
|
|
56
|
+
started_at TEXT NOT NULL,
|
|
57
|
+
finished_at TEXT,
|
|
58
|
+
summary TEXT,
|
|
59
|
+
FOREIGN KEY ( delivery_id ) REFERENCES deliveries ( id )
|
|
60
|
+
);
|
|
61
|
+
|
|
62
|
+
CREATE UNIQUE INDEX IF NOT EXISTS index_revisions_on_delivery_number
|
|
63
|
+
ON revisions ( delivery_id, number );
|
|
64
|
+
SQL
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
# Creates or refreshes a delivery for the same branch head.
|
|
69
|
+
def upsert_delivery( repository:, branch_name:, head:, worktree_path:, authority:, pr_number:, pr_url:, status:, summary:, cause: )
|
|
70
|
+
timestamp = now_utc
|
|
71
|
+
|
|
72
|
+
with_database do |database|
|
|
73
|
+
row = database.get_first_row(
|
|
74
|
+
"SELECT * FROM deliveries WHERE repo_path = ? AND branch_name = ? AND head = ? LIMIT 1",
|
|
75
|
+
[ repository.path, branch_name, head ]
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
if row
|
|
79
|
+
database.execute(
|
|
80
|
+
<<~SQL,
|
|
81
|
+
UPDATE deliveries
|
|
82
|
+
SET worktree_path = ?, authority = ?, status = ?, pr_number = ?, pr_url = ?,
|
|
83
|
+
cause = ?, summary = ?, updated_at = ?
|
|
84
|
+
WHERE id = ?
|
|
85
|
+
SQL
|
|
86
|
+
[ worktree_path, authority, status, pr_number, pr_url, cause, summary, timestamp, row.fetch( "id" ) ]
|
|
87
|
+
)
|
|
88
|
+
return fetch_delivery( database: database, id: row.fetch( "id" ), repository: repository )
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
supersede_branch!( database: database, repository: repository, branch_name: branch_name, timestamp: timestamp )
|
|
92
|
+
database.execute(
|
|
93
|
+
<<~SQL,
|
|
94
|
+
INSERT INTO deliveries (
|
|
95
|
+
repo_path, branch_name, head, worktree_path, authority, status,
|
|
96
|
+
pr_number, pr_url, revision_count, cause, summary, created_at, updated_at
|
|
97
|
+
) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, 0, ?, ?, ?, ? )
|
|
98
|
+
SQL
|
|
99
|
+
[
|
|
100
|
+
repository.path, branch_name, head, worktree_path, authority, status,
|
|
101
|
+
pr_number, pr_url, cause, summary, timestamp, timestamp
|
|
102
|
+
]
|
|
103
|
+
)
|
|
104
|
+
fetch_delivery( database: database, id: database.last_insert_row_id, repository: repository )
|
|
105
|
+
end
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
# Looks up the active delivery for a branch, if one exists.
|
|
109
|
+
def active_delivery( repo_path:, branch_name: )
|
|
110
|
+
with_database do |database|
|
|
111
|
+
row = database.get_first_row(
|
|
112
|
+
<<~SQL,
|
|
113
|
+
SELECT * FROM deliveries
|
|
114
|
+
WHERE repo_path = ? AND branch_name = ? AND status IN ( #{active_state_placeholders} )
|
|
115
|
+
ORDER BY updated_at DESC
|
|
116
|
+
LIMIT 1
|
|
117
|
+
SQL
|
|
118
|
+
[ repo_path, branch_name, *ACTIVE_DELIVERY_STATES ]
|
|
119
|
+
)
|
|
120
|
+
build_delivery( row: row ) if row
|
|
121
|
+
end
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
# Lists active deliveries for a repository in creation order.
|
|
125
|
+
def active_deliveries( repo_path: )
|
|
126
|
+
with_database do |database|
|
|
127
|
+
rows = database.execute(
|
|
128
|
+
<<~SQL,
|
|
129
|
+
SELECT * FROM deliveries
|
|
130
|
+
WHERE repo_path = ? AND status IN ( #{active_state_placeholders} )
|
|
131
|
+
ORDER BY created_at ASC, id ASC
|
|
132
|
+
SQL
|
|
133
|
+
[ repo_path, *ACTIVE_DELIVERY_STATES ]
|
|
134
|
+
)
|
|
135
|
+
rows.map { |row| build_delivery( row: row ) }
|
|
136
|
+
end
|
|
137
|
+
end
|
|
138
|
+
|
|
139
|
+
# Lists queued deliveries ready for integration.
|
|
140
|
+
def queued_deliveries( repo_path: )
|
|
141
|
+
with_database do |database|
|
|
142
|
+
database.execute(
|
|
143
|
+
"SELECT * FROM deliveries WHERE repo_path = ? AND status = ? ORDER BY created_at ASC, id ASC",
|
|
144
|
+
[ repo_path, "queued" ]
|
|
145
|
+
).map { |row| build_delivery( row: row ) }
|
|
146
|
+
end
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
# Updates a delivery record in place.
|
|
150
|
+
def update_delivery(
|
|
151
|
+
delivery:,
|
|
152
|
+
status: UNSET,
|
|
153
|
+
pr_number: UNSET,
|
|
154
|
+
pr_url: UNSET,
|
|
155
|
+
cause: UNSET,
|
|
156
|
+
summary: UNSET,
|
|
157
|
+
worktree_path: UNSET,
|
|
158
|
+
revision_count: UNSET,
|
|
159
|
+
integrated_at: UNSET,
|
|
160
|
+
superseded_at: UNSET
|
|
161
|
+
)
|
|
162
|
+
updates = {}
|
|
163
|
+
updates[ "status" ] = status unless status.equal?( UNSET )
|
|
164
|
+
updates[ "pr_number" ] = pr_number unless pr_number.equal?( UNSET )
|
|
165
|
+
updates[ "pr_url" ] = pr_url unless pr_url.equal?( UNSET )
|
|
166
|
+
updates[ "cause" ] = cause unless cause.equal?( UNSET )
|
|
167
|
+
updates[ "summary" ] = summary unless summary.equal?( UNSET )
|
|
168
|
+
updates[ "worktree_path" ] = worktree_path unless worktree_path.equal?( UNSET )
|
|
169
|
+
updates[ "revision_count" ] = revision_count unless revision_count.equal?( UNSET )
|
|
170
|
+
updates[ "integrated_at" ] = integrated_at unless integrated_at.equal?( UNSET )
|
|
171
|
+
updates[ "superseded_at" ] = superseded_at unless superseded_at.equal?( UNSET )
|
|
172
|
+
updates[ "updated_at" ] = now_utc
|
|
173
|
+
|
|
174
|
+
with_database do |database|
|
|
175
|
+
assignments = updates.keys.map { |key| "#{key} = ?" }.join( ", " )
|
|
176
|
+
database.execute(
|
|
177
|
+
"UPDATE deliveries SET #{assignments} WHERE id = ?",
|
|
178
|
+
updates.values + [ delivery.id ]
|
|
179
|
+
)
|
|
180
|
+
fetch_delivery( database: database, id: delivery.id, repository: delivery.repository )
|
|
181
|
+
end
|
|
182
|
+
end
|
|
183
|
+
|
|
184
|
+
# Records one revision cycle against a delivery and bumps the delivery counter.
|
|
185
|
+
def record_revision( delivery:, cause:, provider:, status:, summary: )
|
|
186
|
+
timestamp = now_utc
|
|
187
|
+
|
|
188
|
+
with_database do |database|
|
|
189
|
+
next_number = database.get_first_value(
|
|
190
|
+
"SELECT COALESCE( MAX(number), 0 ) + 1 FROM revisions WHERE delivery_id = ?",
|
|
191
|
+
[ delivery.id ]
|
|
192
|
+
).to_i
|
|
193
|
+
database.execute(
|
|
194
|
+
<<~SQL,
|
|
195
|
+
INSERT INTO revisions ( delivery_id, number, cause, provider, status, started_at, finished_at, summary )
|
|
196
|
+
VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )
|
|
197
|
+
SQL
|
|
198
|
+
[
|
|
199
|
+
delivery.id, next_number, cause, provider, status, timestamp,
|
|
200
|
+
( status == "completed" || status == "failed" || status == "stalled" ) ? timestamp : nil,
|
|
201
|
+
summary
|
|
202
|
+
]
|
|
203
|
+
)
|
|
204
|
+
database.execute(
|
|
205
|
+
"UPDATE deliveries SET revision_count = ?, updated_at = ? WHERE id = ?",
|
|
206
|
+
[ next_number, timestamp, delivery.id ]
|
|
207
|
+
)
|
|
208
|
+
build_revision(
|
|
209
|
+
row: database.get_first_row( "SELECT * FROM revisions WHERE id = ?", [ database.last_insert_row_id ] )
|
|
210
|
+
)
|
|
211
|
+
end
|
|
212
|
+
end
|
|
213
|
+
|
|
214
|
+
# Lists revisions for a delivery in ascending order.
|
|
215
|
+
def revisions_for_delivery( delivery_id: )
|
|
216
|
+
with_database do |database|
|
|
217
|
+
database.execute(
|
|
218
|
+
"SELECT * FROM revisions WHERE delivery_id = ? ORDER BY number ASC, id ASC",
|
|
219
|
+
[ delivery_id ]
|
|
220
|
+
).map { |row| build_revision( row: row ) }
|
|
221
|
+
end
|
|
222
|
+
end
|
|
223
|
+
|
|
224
|
+
private
|
|
225
|
+
|
|
226
|
+
def with_database
|
|
227
|
+
database = SQLite3::Database.new( path )
|
|
228
|
+
database.results_as_hash = true
|
|
229
|
+
database.busy_timeout = 5_000
|
|
230
|
+
database.execute( "PRAGMA journal_mode = WAL" )
|
|
231
|
+
yield database
|
|
232
|
+
ensure
|
|
233
|
+
database&.close
|
|
234
|
+
end
|
|
235
|
+
|
|
236
|
+
def fetch_delivery( database:, id:, repository: nil )
|
|
237
|
+
row = database.get_first_row( "SELECT * FROM deliveries WHERE id = ?", [ id ] )
|
|
238
|
+
build_delivery( row: row, repository: repository )
|
|
239
|
+
end
|
|
240
|
+
|
|
241
|
+
def build_delivery( row:, repository: nil )
|
|
242
|
+
return nil unless row
|
|
243
|
+
|
|
244
|
+
repository ||= Repository.new(
|
|
245
|
+
path: row.fetch( "repo_path" ),
|
|
246
|
+
authority: row.fetch( "authority" ),
|
|
247
|
+
runtime: nil
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
Delivery.new(
|
|
251
|
+
id: row.fetch( "id" ),
|
|
252
|
+
repository: repository,
|
|
253
|
+
branch: row.fetch( "branch_name" ),
|
|
254
|
+
head: row.fetch( "head" ),
|
|
255
|
+
worktree_path: row.fetch( "worktree_path" ),
|
|
256
|
+
authority: row.fetch( "authority" ),
|
|
257
|
+
status: row.fetch( "status" ),
|
|
258
|
+
pull_request_number: row.fetch( "pr_number" ),
|
|
259
|
+
pull_request_url: row.fetch( "pr_url" ),
|
|
260
|
+
revision_count: row.fetch( "revision_count" ).to_i,
|
|
261
|
+
cause: row.fetch( "cause" ),
|
|
262
|
+
summary: row.fetch( "summary" ),
|
|
263
|
+
created_at: row.fetch( "created_at" ),
|
|
264
|
+
updated_at: row.fetch( "updated_at" ),
|
|
265
|
+
integrated_at: row.fetch( "integrated_at" ),
|
|
266
|
+
superseded_at: row.fetch( "superseded_at" )
|
|
267
|
+
)
|
|
268
|
+
end
|
|
269
|
+
|
|
270
|
+
def build_revision( row: )
|
|
271
|
+
return nil unless row
|
|
272
|
+
|
|
273
|
+
Revision.new(
|
|
274
|
+
id: row.fetch( "id" ),
|
|
275
|
+
delivery_id: row.fetch( "delivery_id" ),
|
|
276
|
+
number: row.fetch( "number" ).to_i,
|
|
277
|
+
cause: row.fetch( "cause" ),
|
|
278
|
+
provider: row.fetch( "provider" ),
|
|
279
|
+
status: row.fetch( "status" ),
|
|
280
|
+
started_at: row.fetch( "started_at" ),
|
|
281
|
+
finished_at: row.fetch( "finished_at" ),
|
|
282
|
+
summary: row.fetch( "summary" )
|
|
283
|
+
)
|
|
284
|
+
end
|
|
285
|
+
|
|
286
|
+
def supersede_branch!( database:, repository:, branch_name:, timestamp: )
|
|
287
|
+
database.execute(
|
|
288
|
+
<<~SQL,
|
|
289
|
+
UPDATE deliveries
|
|
290
|
+
SET status = ?, superseded_at = ?, updated_at = ?
|
|
291
|
+
WHERE repo_path = ? AND branch_name = ? AND status IN ( #{active_state_placeholders} )
|
|
292
|
+
SQL
|
|
293
|
+
[ "superseded", timestamp, timestamp, repository.path, branch_name, *ACTIVE_DELIVERY_STATES ]
|
|
294
|
+
)
|
|
295
|
+
end
|
|
296
|
+
|
|
297
|
+
def active_state_placeholders
|
|
298
|
+
ACTIVE_DELIVERY_STATES.map { "?" }.join( ", " )
|
|
299
|
+
end
|
|
300
|
+
|
|
301
|
+
def now_utc
|
|
302
|
+
Time.now.utc.iso8601
|
|
303
|
+
end
|
|
304
|
+
end
|
|
305
|
+
end
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# Passive repository record reconstructed from git state and Carson's ledger.
|
|
2
|
+
module Carson
|
|
3
|
+
class Repository
|
|
4
|
+
attr_reader :path, :authority
|
|
5
|
+
|
|
6
|
+
def initialize( path:, authority:, runtime: )
|
|
7
|
+
@path = File.expand_path( path )
|
|
8
|
+
@authority = authority
|
|
9
|
+
@runtime = runtime
|
|
10
|
+
end
|
|
11
|
+
|
|
12
|
+
# Human-readable repository name derived from the filesystem path.
|
|
13
|
+
def name
|
|
14
|
+
File.basename( path )
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
# Returns a passive branch record for the given branch name.
|
|
18
|
+
def branch( name )
|
|
19
|
+
Branch.new( repository: self, name: name, runtime: runtime )
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
# Lists local branches as passive branch records.
|
|
23
|
+
def branches
|
|
24
|
+
runtime.git_capture!( "for-each-ref", "--format=%(refname:short)", "refs/heads" )
|
|
25
|
+
.lines
|
|
26
|
+
.map( &:strip )
|
|
27
|
+
.reject( &:empty? )
|
|
28
|
+
.map { |branch_name| branch( branch_name ) }
|
|
29
|
+
rescue StandardError
|
|
30
|
+
[]
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
# Reports the repository's delivery-centred state for status surfaces.
|
|
34
|
+
def status
|
|
35
|
+
{
|
|
36
|
+
name: name,
|
|
37
|
+
path: path,
|
|
38
|
+
authority: authority,
|
|
39
|
+
branches: runtime.ledger.active_deliveries( repo_path: path ).map { |delivery| delivery.branch }
|
|
40
|
+
}
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
private
|
|
44
|
+
|
|
45
|
+
attr_reader :runtime
|
|
46
|
+
end
|
|
47
|
+
end
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# Passive ledger record for one feedback-driven revision cycle.
|
|
2
|
+
module Carson
|
|
3
|
+
class Revision
|
|
4
|
+
attr_reader :id, :delivery_id, :number, :cause, :provider, :status, :started_at, :finished_at, :summary
|
|
5
|
+
|
|
6
|
+
def initialize( id:, delivery_id:, number:, cause:, provider:, status:, started_at:, finished_at:, summary: )
|
|
7
|
+
@id = id
|
|
8
|
+
@delivery_id = delivery_id
|
|
9
|
+
@number = number
|
|
10
|
+
@cause = cause
|
|
11
|
+
@provider = provider
|
|
12
|
+
@status = status
|
|
13
|
+
@started_at = started_at
|
|
14
|
+
@finished_at = finished_at
|
|
15
|
+
@summary = summary
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
def open?
|
|
19
|
+
%w[queued running].include?( status )
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def completed?
|
|
23
|
+
status == "completed"
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def failed?
|
|
27
|
+
%w[failed stalled].include?( status )
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
end
|
data/lib/carson/runtime/audit.rb
CHANGED
|
@@ -78,7 +78,7 @@ module Carson
|
|
|
78
78
|
failing_count = checks.fetch( :failing_count )
|
|
79
79
|
pending_count = checks.fetch( :pending_count )
|
|
80
80
|
total = checks.fetch( :required_total )
|
|
81
|
-
fail_names = checks.fetch( :failing ).map {
|
|
81
|
+
fail_names = checks.fetch( :failing ).map { |entry| entry.fetch( :name ) }.join( ", " )
|
|
82
82
|
if failing_count.positive? && pending_count.positive?
|
|
83
83
|
audit_concise_problems << "Checks: #{failing_count} failing (#{fail_names}), #{pending_count} pending of #{total} required."
|
|
84
84
|
elsif failing_count.positive?
|
|
@@ -95,11 +95,11 @@ module Carson
|
|
|
95
95
|
if baseline_status == "block"
|
|
96
96
|
parts = []
|
|
97
97
|
if default_branch_baseline.fetch( :failing_count ).positive?
|
|
98
|
-
names = default_branch_baseline.fetch( :failing ).map {
|
|
98
|
+
names = default_branch_baseline.fetch( :failing ).map { |entry| entry.fetch( :name ) }.join( ", " )
|
|
99
99
|
parts << "#{default_branch_baseline.fetch( :failing_count )} failing (#{names})"
|
|
100
100
|
end
|
|
101
101
|
if default_branch_baseline.fetch( :pending_count ).positive?
|
|
102
|
-
names = default_branch_baseline.fetch( :pending ).map {
|
|
102
|
+
names = default_branch_baseline.fetch( :pending ).map { |entry| entry.fetch( :name ) }.join( ", " )
|
|
103
103
|
parts << "#{default_branch_baseline.fetch( :pending_count )} pending (#{names})"
|
|
104
104
|
end
|
|
105
105
|
parts << "no check-runs for active workflows" if default_branch_baseline.fetch( :no_check_evidence )
|
|
@@ -107,11 +107,11 @@ module Carson
|
|
|
107
107
|
elsif baseline_status == "attention"
|
|
108
108
|
parts = []
|
|
109
109
|
if default_branch_baseline.fetch( :advisory_failing_count ).positive?
|
|
110
|
-
names = default_branch_baseline.fetch( :advisory_failing ).map {
|
|
110
|
+
names = default_branch_baseline.fetch( :advisory_failing ).map { |entry| entry.fetch( :name ) }.join( ", " )
|
|
111
111
|
parts << "#{default_branch_baseline.fetch( :advisory_failing_count )} advisory failing (#{names})"
|
|
112
112
|
end
|
|
113
113
|
if default_branch_baseline.fetch( :advisory_pending_count ).positive?
|
|
114
|
-
names = default_branch_baseline.fetch( :advisory_pending ).map {
|
|
114
|
+
names = default_branch_baseline.fetch( :advisory_pending ).map { |entry| entry.fetch( :name ) }.join( ", " )
|
|
115
115
|
parts << "#{default_branch_baseline.fetch( :advisory_pending_count )} advisory pending (#{names})"
|
|
116
116
|
end
|
|
117
117
|
audit_concise_problems << "Baseline (#{default_branch_baseline.fetch( :default_branch, config.main_branch )}): #{parts.join( ', ' )}."
|
|
@@ -288,7 +288,7 @@ module Carson
|
|
|
288
288
|
return report
|
|
289
289
|
end
|
|
290
290
|
checks_data = JSON.parse( checks_stdout )
|
|
291
|
-
pending = checks_data.select {
|
|
291
|
+
pending = checks_data.select { |entry| entry[ "bucket" ].to_s == "pending" }
|
|
292
292
|
failing = checks_data.select { |entry| check_entry_failing?( entry: entry ) }
|
|
293
293
|
report[ :checks ][ :status ] = checks_success ? "ok" : ( checks_exit == 8 ? "pending" : "attention" )
|
|
294
294
|
report[ :checks ][ :required_total ] = checks_data.count
|