gitlab_quality-test_tooling 2.10.0 → 2.18.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile.lock +3 -3
- data/README.md +22 -5
- data/exe/{feature-readiness-check → feature-readiness-checklist} +2 -2
- data/exe/feature-readiness-evaluation +62 -0
- data/exe/relate-failure-issue +5 -0
- data/lib/gitlab_quality/test_tooling/feature_readiness/analyzed_items/analyzed_epic.rb +94 -0
- data/lib/gitlab_quality/test_tooling/feature_readiness/analyzed_items/analyzed_issue.rb +92 -0
- data/lib/gitlab_quality/test_tooling/feature_readiness/analyzed_items/analyzed_merge_request.rb +139 -0
- data/lib/gitlab_quality/test_tooling/feature_readiness/concerns/work_item_concern.rb +26 -12
- data/lib/gitlab_quality/test_tooling/feature_readiness/evaluation.rb +82 -0
- data/lib/gitlab_quality/test_tooling/feature_readiness/operational_readiness_check.rb +4 -4
- data/lib/gitlab_quality/test_tooling/gitlab_client/issues_client.rb +7 -1
- data/lib/gitlab_quality/test_tooling/gitlab_client/merge_requests_client.rb +21 -0
- data/lib/gitlab_quality/test_tooling/gitlab_client/merge_requests_dry_client.rb +0 -10
- data/lib/gitlab_quality/test_tooling/gitlab_client/work_items_client.rb +71 -34
- data/lib/gitlab_quality/test_tooling/report/concerns/results_reporter.rb +1 -1
- data/lib/gitlab_quality/test_tooling/report/concerns/utils.rb +3 -3
- data/lib/gitlab_quality/test_tooling/report/feature_readiness/report_on_epic.rb +174 -0
- data/lib/gitlab_quality/test_tooling/report/flaky_test_issue.rb +1 -1
- data/lib/gitlab_quality/test_tooling/report/generate_test_session.rb +1 -1
- data/lib/gitlab_quality/test_tooling/report/group_issues/error_message_normalizer.rb +49 -0
- data/lib/gitlab_quality/test_tooling/report/group_issues/error_pattern_matcher.rb +36 -0
- data/lib/gitlab_quality/test_tooling/report/group_issues/failure_processor.rb +73 -0
- data/lib/gitlab_quality/test_tooling/report/group_issues/group_results_in_issues.rb +48 -0
- data/lib/gitlab_quality/test_tooling/report/group_issues/incident_checker.rb +61 -0
- data/lib/gitlab_quality/test_tooling/report/group_issues/issue_base.rb +48 -0
- data/lib/gitlab_quality/test_tooling/report/group_issues/issue_creator.rb +44 -0
- data/lib/gitlab_quality/test_tooling/report/group_issues/issue_finder.rb +79 -0
- data/lib/gitlab_quality/test_tooling/report/group_issues/issue_formatter.rb +83 -0
- data/lib/gitlab_quality/test_tooling/report/group_issues/issue_manager.rb +33 -0
- data/lib/gitlab_quality/test_tooling/report/group_issues/issue_updater.rb +87 -0
- data/lib/gitlab_quality/test_tooling/report/relate_failure_issue.rb +149 -12
- data/lib/gitlab_quality/test_tooling/runtime/env.rb +1 -1
- data/lib/gitlab_quality/test_tooling/test_meta/processor/add_to_blocking_processor.rb +1 -1
- data/lib/gitlab_quality/test_tooling/test_meta/processor/add_to_quarantine_processor.rb +1 -1
- data/lib/gitlab_quality/test_tooling/test_meta/test_meta_updater.rb +38 -8
- data/lib/gitlab_quality/test_tooling/test_result/base_test_result.rb +17 -4
- data/lib/gitlab_quality/test_tooling/version.rb +1 -1
- data/lib/gitlab_quality/test_tooling.rb +2 -0
- metadata +34 -10
@@ -14,7 +14,7 @@ module GitlabQuality
|
|
14
14
|
|
15
15
|
def initialize(token:, project: nil, group: nil, limit_to_minutes: nil, search_labels: [], issue_is_blocking: false, dry_run: false)
|
16
16
|
@token = token
|
17
|
-
@project = project
|
17
|
+
@project = "#{group}/#{project}"
|
18
18
|
@group = group
|
19
19
|
@limit_to_minutes = limit_to_minutes
|
20
20
|
@search_labels = search_labels
|
@@ -26,7 +26,7 @@ module GitlabQuality
|
|
26
26
|
created_after = utc_time_minus_mins(limit_to_minutes)
|
27
27
|
|
28
28
|
epics = work_items_client.paginated_call(:group_work_items,
|
29
|
-
labels: search_labels.concat(BASE_LABELS_FOR_SEARCH), state: 'opened', created_after: created_after, extras: [:work_item_fields])
|
29
|
+
labels: search_labels.concat(BASE_LABELS_FOR_SEARCH).uniq, state: 'opened', created_after: created_after, extras: [:work_item_fields])
|
30
30
|
|
31
31
|
epics.each do |epic|
|
32
32
|
process_epic(epic)
|
@@ -58,11 +58,11 @@ module GitlabQuality
|
|
58
58
|
end
|
59
59
|
|
60
60
|
def process_epic(epic) # rubocop:disable Metrics/AbcSize
|
61
|
-
epic = fetch_work_item(epic[:iid], work_items_client)
|
61
|
+
epic = fetch_work_item(epic[:iid], work_items_client, [:notes, :linked_items, :labels, :hierarchy])
|
62
62
|
|
63
63
|
return if has_a_child_epic?(epic)
|
64
64
|
|
65
|
-
pre_check_comment = add_operational_readiness_precheck_comment(epic, work_items_client)
|
65
|
+
pre_check_comment = add_operational_readiness_precheck_comment(epic, work_items_client, label_client)
|
66
66
|
|
67
67
|
return unless note_has_emoji?(pre_check_comment, 'white_check_mark') && !has_operational_readiness_issue_linked?(linked_issue_iids(epic), issue_client)
|
68
68
|
|
@@ -72,6 +72,12 @@ module GitlabQuality
|
|
72
72
|
end
|
73
73
|
end
|
74
74
|
|
75
|
+
def related_merge_requests(iid:)
|
76
|
+
handle_gitlab_client_exceptions do
|
77
|
+
client.related_merge_requests(project, iid).auto_paginate
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
75
81
|
def find_issue_discussions(iid:)
|
76
82
|
handle_gitlab_client_exceptions do
|
77
83
|
client.issue_discussions(project, iid, order_by: 'created_at', sort: 'asc').auto_paginate
|
@@ -169,7 +175,7 @@ module GitlabQuality
|
|
169
175
|
def find_commit_parent(project, sha)
|
170
176
|
handle_gitlab_client_exceptions do
|
171
177
|
# In a merged results commit, the first parent is the one from
|
172
|
-
# the
|
178
|
+
# the default branch, and the second parent is from the branch
|
173
179
|
# itself (more likely to have caused the issue)
|
174
180
|
client.commit(project, sha).parent_ids.last
|
175
181
|
end
|
@@ -1,5 +1,16 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require 'gitlab'
|
4
|
+
|
5
|
+
module Gitlab
|
6
|
+
# Monkey patch the Gitlab client to allow passing query options
|
7
|
+
class Client
|
8
|
+
def merge_request_diffs(project, merge_request_iid, options = {})
|
9
|
+
get("/projects/#{url_encode(project)}/merge_requests/#{merge_request_iid}/diffs", query: options).auto_paginate
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
3
14
|
module GitlabQuality
|
4
15
|
module TestTooling
|
5
16
|
module GitlabClient
|
@@ -10,6 +21,12 @@ module GitlabQuality
|
|
10
21
|
end
|
11
22
|
end
|
12
23
|
|
24
|
+
def merge_request_diffs(merge_request_iid:)
|
25
|
+
handle_gitlab_client_exceptions do
|
26
|
+
client.merge_request_diffs(project, merge_request_iid, per_page: 100)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
13
30
|
def create_merge_request(title:, source_branch:, target_branch:, description:, labels:, assignee_id: nil, reviewer_ids: [])
|
14
31
|
attrs = {
|
15
32
|
source_branch: source_branch,
|
@@ -33,6 +50,10 @@ module GitlabQuality
|
|
33
50
|
merge_request
|
34
51
|
end
|
35
52
|
|
53
|
+
def merge_request(id:, options: {})
|
54
|
+
client.merge_request(project, id, options)
|
55
|
+
end
|
56
|
+
|
36
57
|
def find(iid: nil, options: {}, &select)
|
37
58
|
select ||= :itself
|
38
59
|
|
@@ -4,16 +4,6 @@ module GitlabQuality
|
|
4
4
|
module TestTooling
|
5
5
|
module GitlabClient
|
6
6
|
class MergeRequestsDryClient < MergeRequestsClient
|
7
|
-
def find_merge_request_changes(merge_request_iid:)
|
8
|
-
puts "Finding changes for merge_request_id #{merge_request_iid}"
|
9
|
-
puts "project: #{project}"
|
10
|
-
end
|
11
|
-
|
12
|
-
def merge_request_changed_files(merge_request_iid:)
|
13
|
-
puts "Changed files for #{merge_request_iid}"
|
14
|
-
[]
|
15
|
-
end
|
16
|
-
|
17
7
|
def find_note(body:, merge_request_iid:)
|
18
8
|
puts "Find note for #{merge_request_iid} with body: #{body} for mr_iid: #{merge_request_iid}"
|
19
9
|
end
|
@@ -3,15 +3,14 @@
|
|
3
3
|
module GitlabQuality
|
4
4
|
module TestTooling
|
5
5
|
module GitlabClient
|
6
|
-
# The GitLab client is used for API access: https://github.com/NARKOZ/gitlab
|
7
6
|
class WorkItemsClient < GitlabGraphqlClient
|
8
|
-
def work_item(workitem_iid:
|
7
|
+
def work_item(workitem_iid:, widgets: [:notes, :linked_items, :labels, :hierarchy])
|
9
8
|
query = <<~GQL
|
10
9
|
query {
|
11
10
|
namespace(fullPath: "#{group}") {
|
12
11
|
workItem(iid: "#{workitem_iid}") {
|
13
12
|
#{work_item_fields}
|
14
|
-
#{work_item_widgets}
|
13
|
+
#{work_item_widgets(widgets)}
|
15
14
|
}
|
16
15
|
}
|
17
16
|
}
|
@@ -19,7 +18,7 @@ module GitlabQuality
|
|
19
18
|
post(query)[:workItem]
|
20
19
|
end
|
21
20
|
|
22
|
-
def group_work_items(labels: [], cursor: '', state: 'opened', created_after: nil, extras: [])
|
21
|
+
def group_work_items(labels: [], cursor: '', state: 'opened', created_after: nil, extras: [:work_item_fields])
|
23
22
|
query = <<~GQL
|
24
23
|
query {
|
25
24
|
group(fullPath: "#{group}") {
|
@@ -73,6 +72,18 @@ module GitlabQuality
|
|
73
72
|
post(query)
|
74
73
|
end
|
75
74
|
|
75
|
+
def update_note(note_id:, body:)
|
76
|
+
query = <<~GQL
|
77
|
+
mutation UpdateNote {
|
78
|
+
updateNote(input: { body: "#{body}", id: "#{note_id}" }) {
|
79
|
+
clientMutationId
|
80
|
+
errors
|
81
|
+
}
|
82
|
+
}
|
83
|
+
GQL
|
84
|
+
post(query)
|
85
|
+
end
|
86
|
+
|
76
87
|
def create_linked_items(work_item_id:, item_ids:, link_type:)
|
77
88
|
query = <<~GQL
|
78
89
|
mutation WorkItemAddLinkedItems {
|
@@ -160,50 +171,76 @@ module GitlabQuality
|
|
160
171
|
GQL
|
161
172
|
end
|
162
173
|
|
163
|
-
def
|
174
|
+
def work_item_widget_notes
|
164
175
|
<<~GQL
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
#{note_fields}
|
172
|
-
}
|
176
|
+
... on WorkItemWidgetNotes {
|
177
|
+
discussions(filter: ONLY_COMMENTS) {
|
178
|
+
nodes {
|
179
|
+
notes {
|
180
|
+
nodes {
|
181
|
+
#{note_fields}
|
173
182
|
}
|
174
183
|
}
|
175
|
-
}
|
176
|
-
}
|
177
|
-
... on WorkItemWidgetLinkedItems {
|
178
|
-
linkedItems {
|
179
|
-
nodes {
|
180
|
-
linkType
|
181
|
-
workItem {
|
182
|
-
#{work_item_fields}
|
183
|
-
}
|
184
184
|
}
|
185
|
-
}
|
186
185
|
}
|
186
|
+
}
|
187
|
+
GQL
|
188
|
+
end
|
187
189
|
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
190
|
+
def work_item_widget_linked_items
|
191
|
+
<<~GQL
|
192
|
+
... on WorkItemWidgetLinkedItems {
|
193
|
+
linkedItems {
|
194
|
+
nodes {
|
195
|
+
linkType
|
196
|
+
workItem {
|
197
|
+
#{work_item_fields}
|
198
|
+
}
|
199
|
+
}
|
193
200
|
}
|
201
|
+
}
|
202
|
+
GQL
|
203
|
+
end
|
204
|
+
|
205
|
+
def work_item_widget_labels
|
206
|
+
<<~GQL
|
207
|
+
... on WorkItemWidgetLabels{
|
208
|
+
labels{
|
209
|
+
nodes{
|
210
|
+
title
|
211
|
+
}
|
194
212
|
}
|
213
|
+
}
|
214
|
+
GQL
|
215
|
+
end
|
195
216
|
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
217
|
+
def work_item_widget_hierarchy
|
218
|
+
<<~GQL
|
219
|
+
... on WorkItemWidgetHierarchy {
|
220
|
+
children {
|
221
|
+
nodes{
|
222
|
+
#{work_item_fields}
|
223
|
+
}
|
202
224
|
}
|
203
225
|
}
|
204
226
|
GQL
|
205
227
|
end
|
206
228
|
|
229
|
+
def work_item_widgets(widgets = [])
|
230
|
+
<<~GQL
|
231
|
+
widgets(onlyTypes: [#{types_for_widgets(widgets)}]) {
|
232
|
+
#{work_item_widget_notes if widgets.include?(:notes)}
|
233
|
+
#{work_item_widget_linked_items if widgets.include?(:linked_items)}
|
234
|
+
#{work_item_widget_labels if widgets.include?(:labels)}
|
235
|
+
#{work_item_widget_hierarchy if widgets.include?(:hierarchy)}
|
236
|
+
}
|
237
|
+
GQL
|
238
|
+
end
|
239
|
+
|
240
|
+
def types_for_widgets(widgets = [])
|
241
|
+
widgets.map(&:upcase).join(', ')
|
242
|
+
end
|
243
|
+
|
207
244
|
# https://docs.gitlab.com/api/graphql/reference/#note
|
208
245
|
def note_fields
|
209
246
|
<<~GQL
|
@@ -43,9 +43,9 @@ module GitlabQuality
|
|
43
43
|
# Some of those run in their own project, so CI_PROJECT_NAME is the name we need. Those are:
|
44
44
|
# nightly, staging, canary, production, and preprod
|
45
45
|
#
|
46
|
-
# MR, master
|
47
|
-
# master
|
48
|
-
# So we assume that we're reporting a master
|
46
|
+
# MR, master, and gitlab tests run in gitlab-qa, but we only want to report tests run on
|
47
|
+
# master because the other pipelines will be monitored by the author of the MR that triggered them.
|
48
|
+
# So we assume that we're reporting a master pipeline if the project name is 'gitlab'.
|
49
49
|
|
50
50
|
@pipeline ||= Runtime::Env.pipeline_from_project_name
|
51
51
|
end
|
@@ -0,0 +1,174 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'pp'
|
4
|
+
require 'stringio'
|
5
|
+
|
6
|
+
module GitlabQuality
|
7
|
+
module TestTooling
|
8
|
+
module Report
|
9
|
+
module FeatureReadiness
|
10
|
+
class ReportOnEpic
|
11
|
+
FEATURE_READINESS_REPORT_COMMENT_ID = '<!-- FEATURE READINESS REPORT COMMENT -->'
|
12
|
+
|
13
|
+
class << self
|
14
|
+
include GitlabQuality::TestTooling::FeatureReadiness::Concerns::WorkItemConcern
|
15
|
+
|
16
|
+
def report(analyzed_epic, work_item_client)
|
17
|
+
must_haves_report_rows = generate_report_rows(analyzed_epic, :must_haves)
|
18
|
+
should_haves_report_rows = generate_report_rows(analyzed_epic, :should_haves)
|
19
|
+
|
20
|
+
existing_note = existing_note_containing_text(FEATURE_READINESS_REPORT_COMMENT_ID, analyzed_epic[:epic_iid], work_item_client)
|
21
|
+
|
22
|
+
if existing_note
|
23
|
+
work_item_client.update_note(note_id: existing_note[:id],
|
24
|
+
body: comment({ must_haves: must_haves_report_rows, should_haves: should_haves_report_rows }, analyzed_epic).tr('"', "'"))
|
25
|
+
else
|
26
|
+
work_item_client.create_discussion(id: analyzed_epic[:epic_id],
|
27
|
+
note: comment({ must_haves: must_haves_report_rows, should_haves: should_haves_report_rows }, analyzed_epic).tr('"', "'"))
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
private
|
32
|
+
|
33
|
+
def generate_report_rows(epic, type)
|
34
|
+
status_checks = check_statuses(epic)
|
35
|
+
create_rows(epic, type, status_checks)
|
36
|
+
end
|
37
|
+
|
38
|
+
def create_rows(epic, type, status_checks)
|
39
|
+
if type == :must_haves
|
40
|
+
[
|
41
|
+
create_documentation_row(epic, status_checks),
|
42
|
+
create_feature_flag_row(epic, status_checks),
|
43
|
+
create_unit_tests_coverage_row(status_checks)
|
44
|
+
|
45
|
+
]
|
46
|
+
else
|
47
|
+
[
|
48
|
+
create_feature_tests_row(epic, status_checks),
|
49
|
+
create_e2e_tests_row(epic, status_checks)
|
50
|
+
]
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
def create_documentation_row(epic, status_checks)
|
55
|
+
["Documentation added?", status_icon(status_checks[:has_docs]),
|
56
|
+
prepend_text('Added in:', format_links(epic[:doc_mrs]))]
|
57
|
+
end
|
58
|
+
|
59
|
+
def create_feature_flag_row(epic, status_checks)
|
60
|
+
["Feature Flag added?", status_icon(status_checks[:feature_flag_added]),
|
61
|
+
prepend_text('Added in:', format_links(epic[:feature_flag_mrs]))]
|
62
|
+
end
|
63
|
+
|
64
|
+
def create_feature_tests_row(epic, status_checks)
|
65
|
+
["Feature tests added?", status_icon(status_checks[:has_feature_specs]),
|
66
|
+
format_links(epic[:feature_spec_mrs])]
|
67
|
+
end
|
68
|
+
|
69
|
+
def create_e2e_tests_row(epic, status_checks)
|
70
|
+
["End-to-end tests added?", status_icon(status_checks[:has_e2e_specs]),
|
71
|
+
format_links(epic[:e2e_spec_mrs])]
|
72
|
+
end
|
73
|
+
|
74
|
+
def create_unit_tests_coverage_row(status_checks)
|
75
|
+
["Unit tests coverage complete?", status_icon(status_checks[:has_complete_unit_tests]),
|
76
|
+
prepend_text('Coverage missing for:', format_links(status_checks[:missing_specs]))]
|
77
|
+
end
|
78
|
+
|
79
|
+
def prepend_text(prepend_text, text)
|
80
|
+
return "#{prepend_text} #{text}" unless text.empty?
|
81
|
+
|
82
|
+
text
|
83
|
+
end
|
84
|
+
|
85
|
+
def check_statuses(epic)
|
86
|
+
{
|
87
|
+
has_docs: epic[:doc_mrs].any?,
|
88
|
+
feature_flag_added: epic[:feature_flag_mrs].any?,
|
89
|
+
has_feature_specs: epic[:feature_spec_mrs].any?,
|
90
|
+
has_e2e_specs: epic[:e2e_spec_mrs].any?,
|
91
|
+
missing_specs: missing_spec_mrs(epic),
|
92
|
+
has_complete_unit_tests: missing_spec_mrs(epic).empty?
|
93
|
+
}
|
94
|
+
end
|
95
|
+
|
96
|
+
def comment(rows, epic)
|
97
|
+
# Generate markdown table
|
98
|
+
must_haves_table_rows = rows[:must_haves].map do |description, status, links|
|
99
|
+
"| #{description} | #{status} | #{links} |"
|
100
|
+
end.join("\n")
|
101
|
+
|
102
|
+
should_haves_table_rows = rows[:should_haves].map do |description, status, links|
|
103
|
+
"| #{description} | #{status} | #{links} |"
|
104
|
+
end.join("\n")
|
105
|
+
|
106
|
+
<<~COMMENT
|
107
|
+
#{FEATURE_READINESS_REPORT_COMMENT_ID}
|
108
|
+
|
109
|
+
# :vertical_traffic_light: Feature Readiness Evaluation Report
|
110
|
+
|
111
|
+
### :octagonal_sign: Must haves
|
112
|
+
|
113
|
+
| Evaluation | Result | Notes |
|
114
|
+
|------------|--------|-------|
|
115
|
+
#{must_haves_table_rows}
|
116
|
+
|
117
|
+
### :warning: Should haves
|
118
|
+
|
119
|
+
| Evaluation | Result | Notes |
|
120
|
+
|------------|--------|-------|
|
121
|
+
#{should_haves_table_rows}
|
122
|
+
|
123
|
+
#{data(epic)}
|
124
|
+
|
125
|
+
---
|
126
|
+
|
127
|
+
_Please note that this automation is under testing. Please add any feedback on [this issue](https://gitlab.com/gitlab-org/quality/quality-engineering/team-tasks/-/issues/3587)._
|
128
|
+
|
129
|
+
COMMENT
|
130
|
+
end
|
131
|
+
|
132
|
+
def status_icon(condition)
|
133
|
+
condition ? ':white_check_mark:' : ':x:'
|
134
|
+
end
|
135
|
+
|
136
|
+
def format_links(data)
|
137
|
+
return '' if data.empty?
|
138
|
+
|
139
|
+
data.map do |item|
|
140
|
+
item.map { |key, url| "[#{key}](#{url})" }.first
|
141
|
+
end.join(", ")
|
142
|
+
end
|
143
|
+
|
144
|
+
def missing_spec_mrs(epic)
|
145
|
+
epic[:issues].flat_map do |issue|
|
146
|
+
issue[:merge_requests].flat_map do |mr|
|
147
|
+
next [] unless mr[:files_with_missing_specs]&.any?
|
148
|
+
|
149
|
+
mr[:files_with_missing_specs].map do |file|
|
150
|
+
{ file => mr[:merge_request_web_url] }
|
151
|
+
end
|
152
|
+
end.compact
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
def data(epic)
|
157
|
+
output = StringIO.new
|
158
|
+
PP.pp(epic, output)
|
159
|
+
<<~DATA
|
160
|
+
<details><summary>Expand for data</summary>
|
161
|
+
|
162
|
+
```ruby
|
163
|
+
#{output.string}
|
164
|
+
```
|
165
|
+
|
166
|
+
</details>
|
167
|
+
DATA
|
168
|
+
end
|
169
|
+
end
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|
173
|
+
end
|
174
|
+
end
|
@@ -16,7 +16,7 @@ module GitlabQuality
|
|
16
16
|
NEW_ISSUE_LABELS = Set.new(['type::maintenance', 'priority::3', 'severity::3', *IDENTITY_LABELS]).freeze
|
17
17
|
REPORT_SECTION_HEADER = '### Flakiness reports'
|
18
18
|
REPORTS_DOCUMENTATION = <<~DOC
|
19
|
-
Flaky tests were detected. Please refer to the [Flaky tests reproducibility instructions](https://docs.gitlab.com/
|
19
|
+
Flaky tests were detected. Please refer to the [Flaky tests reproducibility instructions](https://docs.gitlab.com/development/testing_guide/unhealthy_tests/#how-to-reproduce-a-flaky-test-locally)
|
20
20
|
to learn more about how to reproduce them.
|
21
21
|
DOC
|
22
22
|
|
@@ -33,7 +33,7 @@ module GitlabQuality
|
|
33
33
|
issue = gitlab.create_issue(
|
34
34
|
title: "#{Time.now.strftime('%Y-%m-%d')} Test session report | #{Runtime::Env.qa_run_type}",
|
35
35
|
description: generate_description(tests),
|
36
|
-
labels: ['automation:bot-authored', '
|
36
|
+
labels: ['automation:bot-authored', 'E2E', 'triage report', pipeline_name_label],
|
37
37
|
confidential: confidential
|
38
38
|
)
|
39
39
|
|
@@ -0,0 +1,49 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'digest'
|
4
|
+
require 'openssl'
|
5
|
+
|
6
|
+
module GitlabQuality
|
7
|
+
module TestTooling
|
8
|
+
module Report
|
9
|
+
module GroupIssues
|
10
|
+
class ErrorMessageNormalizer
|
11
|
+
NORMALIZATION_PATTERNS = [
|
12
|
+
{ pattern: /\d{4}-\d{2}-\d{2}T?[ ]?\d{2}:\d{2}:\d{2}(\.\d+)?Z?/, replacement: "<TIMESTAMP>" },
|
13
|
+
{ pattern: /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i, replacement: "<UUID>" },
|
14
|
+
{ pattern: /Correlation Id: [\w]+/, replacement: "Correlation Id: <UUID>" },
|
15
|
+
{ pattern: /Fabrication of QA::Resource::[A-Za-z:]+/, replacement: "Fabrication of QA::Resource::<RESOURCE>" },
|
16
|
+
{ pattern: /\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(:\d+)?\b/, replacement: "<IP>" },
|
17
|
+
{ pattern: /user\d+/, replacement: "<USER>" },
|
18
|
+
{ pattern: /group\d+/, replacement: "<GROUP>" },
|
19
|
+
{ pattern: /project\d+/, replacement: "<PROJECT>" },
|
20
|
+
{ pattern: %r{https?://[^/\s]+/[^\s]*}, replacement: "<URL>" },
|
21
|
+
{ pattern: %r{/tmp/[^\s]+}, replacement: "<TMPFILE>" },
|
22
|
+
{ pattern: %r{/var/[^\s]+}, replacement: "<VARFILE>" },
|
23
|
+
{ pattern: /token=[^\s&]+/, replacement: "token=<TOKEN>" },
|
24
|
+
{ pattern: /after \d+ seconds/, replacement: "after <N> seconds" },
|
25
|
+
{ pattern: /waited \d+ seconds/, replacement: "waited <N> seconds" },
|
26
|
+
{ pattern: /\d+ attempts?/, replacement: "<N> attempts" },
|
27
|
+
{ pattern: /\s+/, replacement: " " }
|
28
|
+
].freeze
|
29
|
+
|
30
|
+
def normalize(message)
|
31
|
+
return "" if message.nil? || message.empty?
|
32
|
+
|
33
|
+
result = message.dup.strip
|
34
|
+
|
35
|
+
NORMALIZATION_PATTERNS.each do |pattern_rule|
|
36
|
+
result.gsub!(pattern_rule[:pattern], pattern_rule[:replacement])
|
37
|
+
end
|
38
|
+
|
39
|
+
result.strip
|
40
|
+
end
|
41
|
+
|
42
|
+
def create_fingerprint(normalized_message)
|
43
|
+
OpenSSL::Digest::SHA256.hexdigest(normalized_message.downcase)[0..15]
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GitlabQuality
|
4
|
+
module TestTooling
|
5
|
+
module Report
|
6
|
+
module GroupIssues
|
7
|
+
class ErrorPatternMatcher
|
8
|
+
ENVIRONMENT_ERROR_PATTERNS = [
|
9
|
+
{ name: "http_500_api_fabrication", pattern: /Fabrication of .+ using the API failed \(500\)/i },
|
10
|
+
{ name: "http_500_internal_server", pattern: /(500 Internal Server Error|request returned \(500\)|Expected \(200\), request returned \(500\))/i },
|
11
|
+
{ name: "http_400_backend_failing", pattern: /failed \(400\) with.+connections to all backends failing/i },
|
12
|
+
{ name: "http_503_service_unavailable", pattern: /Unexpected status code 503/i },
|
13
|
+
{ name: "pipeline_creation_timeout", pattern: /Wait for pipeline to be created failed after \d+ seconds/i },
|
14
|
+
{ name: "event_timeout", pattern: /(Timed out waiting for event|EventNotFoundError: Timed out waiting)/i },
|
15
|
+
{ name: "git_rpc_failure", pattern: /error: RPC failed; HTTP 500/i },
|
16
|
+
{ name: "repository_fabricate_error", pattern: /Repository fabricate/i }
|
17
|
+
].freeze
|
18
|
+
|
19
|
+
def match(error_message)
|
20
|
+
return nil if error_message.nil? || error_message.empty?
|
21
|
+
|
22
|
+
ENVIRONMENT_ERROR_PATTERNS.find { |pattern_def| error_message.match?(pattern_def[:pattern]) }
|
23
|
+
end
|
24
|
+
|
25
|
+
def environment_error?(error_message)
|
26
|
+
!match(error_message).nil?
|
27
|
+
end
|
28
|
+
|
29
|
+
def pattern_name(error_message)
|
30
|
+
match(error_message)&.dig(:name)
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
@@ -0,0 +1,73 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GitlabQuality
|
4
|
+
module TestTooling
|
5
|
+
module Report
|
6
|
+
module GroupIssues
|
7
|
+
class FailureProcessor
|
8
|
+
DEFAULT_MIN_FAILURES = 2
|
9
|
+
|
10
|
+
def initialize(options = {})
|
11
|
+
@options = options
|
12
|
+
@pattern_matcher = options[:pattern_matcher] || ErrorPatternMatcher.new
|
13
|
+
@normalizer = options[:normalizer] || ErrorMessageNormalizer.new
|
14
|
+
@config = options[:config] || {}
|
15
|
+
end
|
16
|
+
|
17
|
+
def process_failures(failures, &)
|
18
|
+
Runtime::Logger.info "Processing #{failures.size} failures for grouping..."
|
19
|
+
grouped_failures = {}
|
20
|
+
|
21
|
+
failures.each do |failure|
|
22
|
+
process_single_failure(failure, grouped_failures)
|
23
|
+
end
|
24
|
+
|
25
|
+
Runtime::Logger.info "Found #{grouped_failures.size} groups before filtering"
|
26
|
+
grouped_failures.each_value(&)
|
27
|
+
end
|
28
|
+
|
29
|
+
def filter_groups_by_threshold(grouped_failures)
|
30
|
+
min_failures = @config.dig(:thresholds, :min_failures_to_group) || DEFAULT_MIN_FAILURES
|
31
|
+
|
32
|
+
grouped_failures.select! do |_fingerprint, grouped_failure|
|
33
|
+
grouped_failure[:failures].size >= min_failures
|
34
|
+
end
|
35
|
+
|
36
|
+
Runtime::Logger.info "Found #{grouped_failures.size} groups after filtering"
|
37
|
+
end
|
38
|
+
|
39
|
+
private
|
40
|
+
|
41
|
+
def process_single_failure(failure, grouped_failures)
|
42
|
+
error_message = failure.dig(:exception, 'message') || failure.dig(:exceptions, 0, 'message')
|
43
|
+
Runtime::Logger.info "Processing failure: #{failure[:description]}"
|
44
|
+
Runtime::Logger.info "Error message: #{error_message[0..100]}..." if error_message
|
45
|
+
|
46
|
+
return unless error_message && @pattern_matcher.environment_error?(error_message)
|
47
|
+
|
48
|
+
Runtime::Logger.info "Identified as environment error"
|
49
|
+
group_environment_failure(failure, error_message, grouped_failures)
|
50
|
+
end
|
51
|
+
|
52
|
+
def group_environment_failure(failure, error_message, grouped_failures)
|
53
|
+
normalized_message = @normalizer.normalize(error_message)
|
54
|
+
fingerprint = @normalizer.create_fingerprint(normalized_message)
|
55
|
+
pattern_name = @pattern_matcher.pattern_name(error_message)
|
56
|
+
|
57
|
+
grouped_failures[fingerprint] ||= build_grouped_failure(fingerprint, pattern_name, normalized_message)
|
58
|
+
grouped_failures[fingerprint][:failures] << failure
|
59
|
+
end
|
60
|
+
|
61
|
+
def build_grouped_failure(fingerprint, pattern_name, normalized_message)
|
62
|
+
{
|
63
|
+
fingerprint: fingerprint,
|
64
|
+
pattern_name: pattern_name,
|
65
|
+
normalized_message: normalized_message,
|
66
|
+
failures: []
|
67
|
+
}
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|