jirametrics 2.17.1 → 2.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 4e9534fd6ca22944557cfe40c63ebf5c30ff111458ec52dd3e35617246315ce8
4
- data.tar.gz: 2feb93d3ae826f133902664751f3cee4e54de3512daaaa882ae78fed47605e65
3
+ metadata.gz: b543d43a9d49fc8e9da3f0a484e37ca8001c2137357c903276ff0998094ab902
4
+ data.tar.gz: 21ab2447b75c14616cdabf685dd844bad51ef5f73babe1625b075f487fd97ae2
5
5
  SHA512:
6
- metadata.gz: 84942ea82c68aa66325299e5db0e5f5a4fe4af95eb909d1fde004435b53898c60ee7dde2251fec0e075f4b2db7ba2f1eade384795370f3b90d7230926e79d2b9
7
- data.tar.gz: 9015a7c5ef2b60726dca272a0b5b07de9f62d76e076038e4c8d45cbae13facf37f7e8f45dac82639c72615bbee549de756d3fb4792d21345fb4d3dc3fb17a32e
6
+ metadata.gz: e042949446b1b6e6419473d75d59116b165c9b2328f3db2a9231ae6a089720dc113b7a6d69a36291fcb26b614a881482fa3a487e339c55ca096cc8e51dc73a22
7
+ data.tar.gz: ffb695346193cf32062fac1622b7da8afe6263c1804986fe71764b391bc21e8df3522fe801047cf27ce09177dd87230deaa9b1a68003fa708b6ebc2a9f107c43
@@ -13,9 +13,9 @@ class AtlassianDocumentFormat
13
13
  input
14
14
  .gsub(/{color:(#\w{6})}([^{]+){color}/, '<span style="color: \1">\2</span>') # Colours
15
15
  .gsub(/\[~accountid:([^\]]+)\]/) { expand_account_id $1 } # Tagged people
16
- .gsub(/\[([^\|]+)\|(https?[^\]]+)\]/, '<a href="\2">\1</a>') # URLs
16
+ .gsub(/\[([^|]+)\|(https?[^\]]+)\]/, '<a href="\2">\1</a>') # URLs
17
17
  .gsub("\n", '<br />')
18
- elsif input['content']
18
+ elsif input&.[]('content')
19
19
  input['content'].collect { |element| adf_node_to_html element }.join("\n")
20
20
  else
21
21
  # We have an actual ADF document with no content.
@@ -25,7 +25,7 @@ class DownloadIssueData
25
25
  end
26
26
 
27
27
  class Downloader
28
- CURRENT_METADATA_VERSION = 4
28
+ CURRENT_METADATA_VERSION = 5
29
29
 
30
30
  attr_accessor :metadata
31
31
  attr_reader :file_system
@@ -284,92 +284,4 @@ class Downloader
284
284
  def file_prefix
285
285
  @download_config.project_config.get_file_prefix
286
286
  end
287
-
288
- def download_issues board:
289
- log " Downloading primary issues for board #{board.id} from #{jira_instance_type}", both: true
290
- path = File.join(@target_path, "#{file_prefix}_issues/")
291
- unless @file_system.dir_exist?(path)
292
- log " Creating path #{path}"
293
- @file_system.mkdir(path)
294
- end
295
-
296
- filter_id = @board_id_to_filter_id[board.id]
297
- jql = make_jql(filter_id: filter_id)
298
- intercept_jql = @download_config.project_config.settings['intercept_jql']
299
- jql = intercept_jql.call jql if intercept_jql
300
-
301
- issue_data_hash = search_for_issues jql: jql, board_id: board.id, path: path
302
-
303
- loop do
304
- related_issue_keys = Set.new
305
- issue_data_hash
306
- .values
307
- .reject { |data| data.up_to_date }
308
- .each_slice(100) do |slice|
309
- slice = bulk_fetch_issues(
310
- issue_datas: slice, board: board, in_initial_query: true
311
- )
312
- slice.each do |data|
313
- @file_system.save_json(
314
- json: data.issue.raw, filename: data.cache_path
315
- )
316
- # Set the timestamp on the file to match the updated one so that we don't have
317
- # to parse the file just to find the timestamp
318
- @file_system.utime time: data.issue.updated, file: data.cache_path
319
-
320
- issue = data.issue
321
- next unless issue
322
-
323
- parent_key = issue.parent_key(project_config: @download_config.project_config)
324
- related_issue_keys << parent_key if parent_key
325
-
326
- # Sub-tasks
327
- issue.raw['fields']['subtasks']&.each do |raw_subtask|
328
- related_issue_keys << raw_subtask['key']
329
- end
330
- end
331
- end
332
-
333
- # Remove all the ones we already downloaded
334
- related_issue_keys.reject! { |key| issue_data_hash[key] }
335
-
336
- related_issue_keys.each do |key|
337
- data = DownloadIssueData.new key: key
338
- data.found_in_primary_query = false
339
- data.up_to_date = false
340
- data.cache_path = File.join(path, "#{key}-#{board.id}.json")
341
- issue_data_hash[key] = data
342
- end
343
- break if related_issue_keys.empty?
344
-
345
- log " Downloading linked issues for board #{board.id}", both: true
346
- end
347
-
348
- delete_issues_from_cache_that_are_not_in_server(
349
- issue_data_hash: issue_data_hash, path: path
350
- )
351
- end
352
-
353
- def delete_issues_from_cache_that_are_not_in_server issue_data_hash:, path:
354
- # The gotcha with deleted issues is that they just stop being returned in queries
355
- # and we have no way to know that they should be removed from our local cache.
356
- # With the new approach, we ask for every issue that Jira knows about (within
357
- # the parameters of the query) and then delete anything that's in our local cache
358
- # but wasn't returned.
359
- @file_system.foreach path do |file|
360
- next if file.start_with? '.'
361
- unless /^(?<key>\w+-\d+)-\d+\.json$/ =~ file
362
- raise "Unexpected filename in #{path}: #{file}"
363
- end
364
- next if issue_data_hash[key] # Still in Jira
365
-
366
- file_to_delete = File.join(path, file)
367
- log " Removing #{file_to_delete} from local cache"
368
- file_system.unlink file_to_delete
369
- end
370
- end
371
-
372
- def last_modified filename:
373
- File.mtime(filename) if File.exist?(filename)
374
- end
375
287
  end
@@ -111,4 +111,92 @@ class DownloaderForCloud < Downloader
111
111
  break if next_page_token.nil?
112
112
  end
113
113
  end
114
+
115
+ def download_issues board:
116
+ log " Downloading primary issues for board #{board.id} from #{jira_instance_type}", both: true
117
+ path = File.join(@target_path, "#{file_prefix}_issues/")
118
+ unless @file_system.dir_exist?(path)
119
+ log " Creating path #{path}"
120
+ @file_system.mkdir(path)
121
+ end
122
+
123
+ filter_id = @board_id_to_filter_id[board.id]
124
+ jql = make_jql(filter_id: filter_id)
125
+ intercept_jql = @download_config.project_config.settings['intercept_jql']
126
+ jql = intercept_jql.call jql if intercept_jql
127
+
128
+ issue_data_hash = search_for_issues jql: jql, board_id: board.id, path: path
129
+
130
+ loop do
131
+ related_issue_keys = Set.new
132
+ issue_data_hash
133
+ .values
134
+ .reject { |data| data.up_to_date }
135
+ .each_slice(100) do |slice|
136
+ slice = bulk_fetch_issues(
137
+ issue_datas: slice, board: board, in_initial_query: true
138
+ )
139
+ slice.each do |data|
140
+ @file_system.save_json(
141
+ json: data.issue.raw, filename: data.cache_path
142
+ )
143
+ # Set the timestamp on the file to match the updated one so that we don't have
144
+ # to parse the file just to find the timestamp
145
+ @file_system.utime time: data.issue.updated, file: data.cache_path
146
+
147
+ issue = data.issue
148
+ next unless issue
149
+
150
+ parent_key = issue.parent_key(project_config: @download_config.project_config)
151
+ related_issue_keys << parent_key if parent_key
152
+
153
+ # Sub-tasks
154
+ issue.raw['fields']['subtasks']&.each do |raw_subtask|
155
+ related_issue_keys << raw_subtask['key']
156
+ end
157
+ end
158
+ end
159
+
160
+ # Remove all the ones we already downloaded
161
+ related_issue_keys.reject! { |key| issue_data_hash[key] }
162
+
163
+ related_issue_keys.each do |key|
164
+ data = DownloadIssueData.new key: key
165
+ data.found_in_primary_query = false
166
+ data.up_to_date = false
167
+ data.cache_path = File.join(path, "#{key}-#{board.id}.json")
168
+ issue_data_hash[key] = data
169
+ end
170
+ break if related_issue_keys.empty?
171
+
172
+ log " Downloading linked issues for board #{board.id}", both: true
173
+ end
174
+
175
+ delete_issues_from_cache_that_are_not_in_server(
176
+ issue_data_hash: issue_data_hash, path: path
177
+ )
178
+ end
179
+
180
+ def delete_issues_from_cache_that_are_not_in_server issue_data_hash:, path:
181
+ # The gotcha with deleted issues is that they just stop being returned in queries
182
+ # and we have no way to know that they should be removed from our local cache.
183
+ # With the new approach, we ask for every issue that Jira knows about (within
184
+ # the parameters of the query) and then delete anything that's in our local cache
185
+ # but wasn't returned.
186
+ @file_system.foreach path do |file|
187
+ next if file.start_with? '.'
188
+ unless /^(?<key>\w+-\d+)-\d+\.json$/ =~ file
189
+ raise "Unexpected filename in #{path}: #{file}"
190
+ end
191
+ next if issue_data_hash[key] # Still in Jira
192
+
193
+ file_to_delete = File.join(path, file)
194
+ log " Removing #{file_to_delete} from local cache"
195
+ file_system.unlink file_to_delete
196
+ end
197
+ end
198
+
199
+ def last_modified filename:
200
+ File.mtime(filename) if File.exist?(filename)
201
+ end
114
202
  end
@@ -5,64 +5,90 @@ class DownloaderForDataCenter < Downloader
5
5
  'Jira DataCenter'
6
6
  end
7
7
 
8
- def search_for_issues jql:, board_id:, path:
8
+ def download_issues board:
9
+ log " Downloading primary issues for board #{board.id}", both: true
10
+ path = File.join(@target_path, "#{file_prefix}_issues/")
11
+ unless Dir.exist?(path)
12
+ log " Creating path #{path}"
13
+ Dir.mkdir(path)
14
+ end
15
+
16
+ filter_id = board_id_to_filter_id[board.id]
17
+ jql = make_jql(filter_id: filter_id)
18
+ jira_search_by_jql(jql: jql, initial_query: true, board: board, path: path)
19
+
20
+ log " Downloading linked issues for board #{board.id}", both: true
21
+ loop do
22
+ @issue_keys_pending_download.reject! { |key| @issue_keys_downloaded_in_current_run.include? key }
23
+ break if @issue_keys_pending_download.empty?
24
+
25
+ keys_to_request = @issue_keys_pending_download[0..99]
26
+ @issue_keys_pending_download.reject! { |key| keys_to_request.include? key }
27
+ jql = "key in (#{keys_to_request.join(', ')})"
28
+ jira_search_by_jql(jql: jql, initial_query: false, board: board, path: path)
29
+ end
30
+ end
31
+
32
+ def jira_search_by_jql jql:, initial_query:, board:, path:
33
+ intercept_jql = @download_config.project_config.settings['intercept_jql']
34
+ jql = intercept_jql.call jql if intercept_jql
35
+
9
36
  log " JQL: #{jql}"
10
37
  escaped_jql = CGI.escape jql
11
38
 
12
- hash = {}
13
39
  max_results = 100
14
40
  start_at = 0
15
41
  total = 1
16
42
  while start_at < total
17
43
  json = @jira_gateway.call_url relative_url: '/rest/api/2/search' \
18
- "?jql=#{escaped_jql}&maxResults=#{max_results}&startAt=#{start_at}&fields=updated"
19
- json['issues'].each do |i|
20
- key = i['key']
21
- cache_path = File.join(path, "#{key}-#{board_id}.json")
22
- last_modified = Time.parse(i['fields']['updated'])
23
- data = DownloadIssueData.new(
24
- key: key,
25
- last_modified: last_modified,
26
- found_in_primary_query: true,
27
- cache_path: cache_path,
28
- up_to_date: last_modified(filename: cache_path) == last_modified
29
- )
30
- hash[key] = data
44
+ "?jql=#{escaped_jql}&maxResults=#{max_results}&startAt=#{start_at}&expand=changelog&fields=*all"
45
+
46
+ json['issues'].each do |issue_json|
47
+ issue_json['exporter'] = {
48
+ 'in_initial_query' => initial_query
49
+ }
50
+ identify_other_issues_to_be_downloaded raw_issue: issue_json, board: board
51
+ file = "#{issue_json['key']}-#{board.id}.json"
52
+
53
+ @file_system.save_json(json: issue_json, filename: File.join(path, file))
31
54
  end
55
+
32
56
  total = json['total'].to_i
33
57
  max_results = json['maxResults']
34
58
 
35
- message = " Found #{json['issues'].count} issues"
59
+ message = " Downloaded #{start_at + 1}-#{[start_at + max_results, total].min} of #{total} issues to #{path} "
36
60
  log message, both: true
37
61
 
38
62
  start_at += json['issues'].size
39
63
  end
40
- hash
41
64
  end
42
65
 
43
- def bulk_fetch_issues issue_datas:, board:, in_initial_query:
44
- log " Downloading #{issue_datas.size} issues", both: true
45
- payload = {
46
- 'expand' => [
47
- 'changelog'
48
- ],
49
- 'fields' => ['*all'],
50
- 'issueIdsOrKeys' => issue_datas.collect(&:key)
51
- }
52
- response = @jira_gateway.post_request(
53
- relative_url: '/rest/api/2/issue/bulkfetch',
54
- payload: JSON.generate(payload)
55
- )
56
- response['issues'].each do |issue_json|
57
- issue_json['exporter'] = {
58
- 'in_initial_query' => in_initial_query
59
- }
60
- issue = Issue.new(raw: issue_json, board: board)
61
- data = issue_datas.find { |d| d.key == issue.key }
62
- data.up_to_date = true
63
- data.last_modified = issue.updated
64
- data.issue = issue
66
+ def make_jql filter_id:, today: Date.today
67
+ segments = []
68
+ segments << "filter=#{filter_id}"
69
+
70
+ start_date = @download_config.start_date today: today
71
+
72
+ if start_date
73
+ @download_date_range = start_date..today.to_date
74
+
75
+ # For an incremental download, we want to query from the end of the previous one, not from the
76
+ # beginning of the full range.
77
+ @start_date_in_query = metadata['date_end'] || @download_date_range.begin
78
+ log " Incremental download only. Pulling from #{@start_date_in_query}", both: true if metadata['date_end']
79
+
80
+ # Catch-all to pick up anything that's been around since before the range started but hasn't
81
+ # had an update during the range.
82
+ catch_all = '((status changed OR Sprint is not EMPTY) AND statusCategory != Done)'
83
+
84
+ # Pick up any issues that had a status change in the range
85
+ start_date_text = @start_date_in_query.strftime '%Y-%m-%d'
86
+ # find_in_range = %((status changed DURING ("#{start_date_text} 00:00","#{end_date_text} 23:59")))
87
+ find_in_range = %(updated >= "#{start_date_text} 00:00")
88
+
89
+ segments << "(#{find_in_range} OR #{catch_all})"
65
90
  end
66
- issue_datas
91
+
92
+ segments.join ' AND '
67
93
  end
68
94
  end
@@ -19,9 +19,10 @@ class Issue
19
19
 
20
20
  # There are cases where we create an Issue of fragments like linked issues and those won't have
21
21
  # changelogs.
22
- return unless @raw['changelog']
22
+ load_history_into_changes if @raw['changelog']
23
23
 
24
- load_history_into_changes
24
+ # As above with fragments, there may not be a fields section
25
+ return unless @raw['fields']
25
26
 
26
27
  # If this is an older pull of data then comments may not be there.
27
28
  load_comments_into_changes if @raw['fields']['comment']
@@ -152,7 +153,7 @@ class Issue
152
153
  # Are we currently in this status? If yes, then return the most recent status change.
153
154
  def currently_in_status *status_names
154
155
  change = most_recent_status_change
155
- return false if change.nil?
156
+ return nil if change.nil?
156
157
 
157
158
  change if change.current_status_matches(*status_names)
158
159
  end
@@ -162,7 +163,7 @@ class Issue
162
163
  category_ids = find_status_category_ids_by_names category_names
163
164
 
164
165
  change = most_recent_status_change
165
- return false if change.nil?
166
+ return nil if change.nil?
166
167
 
167
168
  status = find_or_create_status id: change.value_id, name: change.value
168
169
  change if status && category_ids.include?(status.category.id)
@@ -237,11 +238,11 @@ class Issue
237
238
  end
238
239
 
239
240
  def assigned_to
240
- @raw['fields']&.[]('assignee')&.[]('displayName')
241
+ @raw['fields']['assignee']&.[]('displayName')
241
242
  end
242
243
 
243
244
  def assigned_to_icon_url
244
- @raw['fields']&.[]('assignee')&.[]('avatarUrls')&.[]('16x16')
245
+ @raw['fields']['assignee']&.[]('avatarUrls')&.[]('16x16')
245
246
  end
246
247
 
247
248
  # Many test failures are simply unreadable because the default inspect on this class goes
@@ -763,6 +764,9 @@ class Issue
763
764
  first_status = nil
764
765
  first_status_id = nil
765
766
 
767
+ # There won't be a created timestamp in cases where this was a linked issue
768
+ return unless @raw['fields']['created']
769
+
766
770
  created_time = parse_time @raw['fields']['created']
767
771
  first_change = @changes.find { |change| change.field == field_name }
768
772
  if first_change.nil?
@@ -18,11 +18,15 @@ class JiraGateway
18
18
 
19
19
  def post_request relative_url:, payload:
20
20
  command = make_curl_command url: "#{@jira_url}#{relative_url}", method: 'POST'
21
+ exec_and_parse_response command: command, stdin_data: payload
22
+ end
23
+
24
+ def exec_and_parse_response command:, stdin_data:
21
25
  log_entry = " #{command.gsub(/\s+/, ' ')}"
22
26
  log_entry = sanitize_message log_entry
23
27
  @file_system.log log_entry
24
28
 
25
- stdout, stderr, status = Open3.capture3(command, stdin_data: payload)
29
+ stdout, stderr, status = capture3(command, stdin_data: stdin_data)
26
30
  unless status.success?
27
31
  @file_system.log "Failed call with exit status #{status.exitstatus}!"
28
32
  @file_system.log "Returned (stdout): #{stdout.inspect}"
@@ -31,16 +35,20 @@ class JiraGateway
31
35
  "See #{@file_system.logfile_name} for details"
32
36
  end
33
37
 
34
- @file_system.log "Returned (stderr): #{stderr}" unless stderr == ''
38
+ @file_system.log "Returned (stderr): #{stderr.inspect}" unless stderr == ''
35
39
  raise 'no response from curl on stdout' if stdout == ''
36
40
 
37
41
  parse_response(command: command, result: stdout)
38
42
  end
39
43
 
44
+ def capture3 command, stdin_data:
45
+ # In it's own method so we can mock it out in tests
46
+ Open3.capture3(command, stdin_data: stdin_data)
47
+ end
48
+
40
49
  def call_url relative_url:
41
50
  command = make_curl_command url: "#{@jira_url}#{relative_url}"
42
- result = call_command command
43
- parse_response(command: command, result: result)
51
+ exec_and_parse_response command: command, stdin_data: nil
44
52
  end
45
53
 
46
54
  def parse_response command:, result:
@@ -64,20 +72,6 @@ class JiraGateway
64
72
  message.gsub(token, '[API_TOKEN]')
65
73
  end
66
74
 
67
- def call_command command
68
- log_entry = " #{command.gsub(/\s+/, ' ')}"
69
- log_entry = sanitize_message log_entry
70
- @file_system.log log_entry
71
-
72
- result = `#{command}`
73
- @file_system.log result unless $CHILD_STATUS.success?
74
- return result if $CHILD_STATUS.success?
75
-
76
- @file_system.log "Failed call with exit status #{$CHILD_STATUS.exitstatus}."
77
- raise "Failed call with exit status #{$CHILD_STATUS.exitstatus}. " \
78
- "See #{@file_system.logfile_name} for details"
79
- end
80
-
81
75
  def load_jira_config jira_config
82
76
  @jira_url = jira_config['url']
83
77
  raise 'Must specify URL in config' if @jira_url.nil?
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: jirametrics
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.17.1
4
+ version: '2.18'
5
5
  platform: ruby
6
6
  authors:
7
7
  - Mike Bowler