jirametrics 2.17 → 2.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: fe52b964cb227f3dac580d4648a325d61fed7c342a7dd7d7bcb62dddd571af61
4
- data.tar.gz: 59eec793880fafb6c5e53b7daf87feb78b53b7624390adfa86ed6ca0be8125f6
3
+ metadata.gz: b543d43a9d49fc8e9da3f0a484e37ca8001c2137357c903276ff0998094ab902
4
+ data.tar.gz: 21ab2447b75c14616cdabf685dd844bad51ef5f73babe1625b075f487fd97ae2
5
5
  SHA512:
6
- metadata.gz: a98b06c724a2fe57511382a952093d73a7dad414c493c5298c8d35c8e889117252a2a3a6d69eadd274ca6dd90ca03cf0231e23da7b18041752d60bbad9b2fc82
7
- data.tar.gz: 90fc55a7e86358636797f00e6cd4fa4eb5268b3a993ed48ff3bc8aecd0e3ae7629d721a814f5e520b58feed72075858613d11faccdea1a0548ac4a7c19a31479
6
+ metadata.gz: e042949446b1b6e6419473d75d59116b165c9b2328f3db2a9231ae6a089720dc113b7a6d69a36291fcb26b614a881482fa3a487e339c55ca096cc8e51dc73a22
7
+ data.tar.gz: ffb695346193cf32062fac1622b7da8afe6263c1804986fe71764b391bc21e8df3522fe801047cf27ce09177dd87230deaa9b1a68003fa708b6ebc2a9f107c43
@@ -2,11 +2,12 @@
2
2
 
3
3
  require 'random-word'
4
4
 
5
- class Anonymizer
5
+ class Anonymizer < ChartBase
6
6
  # needed for testing
7
7
  attr_reader :project_config, :issues
8
8
 
9
9
  def initialize project_config:, date_adjustment: -200
10
+ super()
10
11
  @project_config = project_config
11
12
  @issues = @project_config.issues
12
13
  @all_boards = @project_config.all_boards
@@ -130,18 +131,19 @@ class Anonymizer
130
131
  end
131
132
  end
132
133
 
133
- def shift_all_dates
134
- @file_system.log "Shifting all dates by #{@date_adjustment} days"
134
+ def shift_all_dates date_adjustment: @date_adjustment
135
+ adjustment_in_seconds = 60 * 60 * 24 * date_adjustment
136
+ @file_system.log "Shifting all dates by #{label_days date_adjustment}"
135
137
  @issues.each do |issue|
136
138
  issue.changes.each do |change|
137
- change.time = change.time + @date_adjustment
139
+ change.time = change.time + adjustment_in_seconds
138
140
  end
139
141
 
140
- issue.raw['fields']['updated'] = (issue.updated + @date_adjustment).to_s
142
+ issue.raw['fields']['updated'] = (issue.updated + adjustment_in_seconds).to_s
141
143
  end
142
144
 
143
145
  range = @project_config.time_range
144
- @project_config.time_range = (range.begin + @date_adjustment)..(range.end + @date_adjustment)
146
+ @project_config.time_range = (range.begin + date_adjustment)..(range.end + date_adjustment)
145
147
  end
146
148
 
147
149
  def random_name
@@ -13,9 +13,9 @@ class AtlassianDocumentFormat
13
13
  input
14
14
  .gsub(/{color:(#\w{6})}([^{]+){color}/, '<span style="color: \1">\2</span>') # Colours
15
15
  .gsub(/\[~accountid:([^\]]+)\]/) { expand_account_id $1 } # Tagged people
16
- .gsub(/\[([^\|]+)\|(https?[^\]]+)\]/, '<a href="\2">\1</a>') # URLs
16
+ .gsub(/\[([^|]+)\|(https?[^\]]+)\]/, '<a href="\2">\1</a>') # URLs
17
17
  .gsub("\n", '<br />')
18
- elsif input['content']
18
+ elsif input&.[]('content')
19
19
  input['content'].collect { |element| adf_node_to_html element }.join("\n")
20
20
  else
21
21
  # We have an actual ADF document with no content.
@@ -1,8 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class ChangeItem
4
- attr_reader :field, :value_id, :old_value_id, :raw, :time, :author_raw
5
- attr_accessor :value, :old_value
4
+ attr_reader :field, :value_id, :old_value_id, :raw, :author_raw
5
+ attr_accessor :value, :old_value, :time
6
6
 
7
7
  def initialize raw:, author_raw:, time:, artificial: false
8
8
  @raw = raw
@@ -25,7 +25,7 @@ class DownloadIssueData
25
25
  end
26
26
 
27
27
  class Downloader
28
- CURRENT_METADATA_VERSION = 4
28
+ CURRENT_METADATA_VERSION = 5
29
29
 
30
30
  attr_accessor :metadata
31
31
  attr_reader :file_system
@@ -284,118 +284,4 @@ class Downloader
284
284
  def file_prefix
285
285
  @download_config.project_config.get_file_prefix
286
286
  end
287
-
288
- def download_issues board:
289
- log " Downloading primary issues for board #{board.id} from #{jira_instance_type}", both: true
290
- path = File.join(@target_path, "#{file_prefix}_issues/")
291
- unless @file_system.dir_exist?(path)
292
- log " Creating path #{path}"
293
- @file_system.mkdir(path)
294
- end
295
-
296
- filter_id = @board_id_to_filter_id[board.id]
297
- jql = make_jql(filter_id: filter_id)
298
- intercept_jql = @download_config.project_config.settings['intercept_jql']
299
- jql = intercept_jql.call jql if intercept_jql
300
-
301
- issue_data_hash = search_for_issues jql: jql, board_id: board.id, path: path
302
-
303
- loop do
304
- related_issue_keys = Set.new
305
- issue_data_hash
306
- .values
307
- .reject { |data| data.up_to_date }
308
- .each_slice(100) do |slice|
309
- slice = bulk_fetch_issues(
310
- issue_datas: slice, board: board, in_initial_query: true
311
- )
312
- slice.each do |data|
313
- @file_system.save_json(
314
- json: data.issue.raw, filename: data.cache_path
315
- )
316
- # Set the timestamp on the file to match the updated one so that we don't have
317
- # to parse the file just to find the timestamp
318
- @file_system.utime time: data.issue.updated, file: data.cache_path
319
-
320
- issue = data.issue
321
- next unless issue
322
-
323
- parent_key = issue.parent_key(project_config: @download_config.project_config)
324
- related_issue_keys << parent_key if parent_key
325
-
326
- # Sub-tasks
327
- issue.raw['fields']['subtasks']&.each do |raw_subtask|
328
- related_issue_keys << raw_subtask['key']
329
- end
330
- end
331
- end
332
-
333
- # Remove all the ones we already downloaded
334
- related_issue_keys.reject! { |key| issue_data_hash[key] }
335
-
336
- related_issue_keys.each do |key|
337
- data = DownloadIssueData.new key: key
338
- data.found_in_primary_query = false
339
- data.up_to_date = false
340
- data.cache_path = File.join(path, "#{key}-#{board.id}.json")
341
- issue_data_hash[key] = data
342
- end
343
- break if related_issue_keys.empty?
344
-
345
- log " Downloading linked issues for board #{board.id}", both: true
346
- end
347
-
348
- delete_issues_from_cache_that_are_not_in_server(
349
- issue_data_hash: issue_data_hash, path: path
350
- )
351
- end
352
-
353
- def bulk_fetch_issues issue_datas:, board:, in_initial_query:
354
- log " Downloading #{issue_datas.size} issues", both: true
355
- payload = {
356
- 'expand' => [
357
- 'changelog'
358
- ],
359
- 'fields' => ['*all'],
360
- 'issueIdsOrKeys' => issue_datas.collect(&:key)
361
- }
362
- response = @jira_gateway.post_request(
363
- relative_url: issue_bulk_fetch_api,
364
- payload: JSON.generate(payload)
365
- )
366
- response['issues'].each do |issue_json|
367
- issue_json['exporter'] = {
368
- 'in_initial_query' => in_initial_query
369
- }
370
- issue = Issue.new(raw: issue_json, board: board)
371
- data = issue_datas.find { |d| d.key == issue.key }
372
- data.up_to_date = true
373
- data.last_modified = issue.updated
374
- data.issue = issue
375
- end
376
- issue_datas
377
- end
378
-
379
- def delete_issues_from_cache_that_are_not_in_server issue_data_hash:, path:
380
- # The gotcha with deleted issues is that they just stop being returned in queries
381
- # and we have no way to know that they should be removed from our local cache.
382
- # With the new approach, we ask for every issue that Jira knows about (within
383
- # the parameters of the query) and then delete anything that's in our local cache
384
- # but wasn't returned.
385
- @file_system.foreach path do |file|
386
- next if file.start_with? '.'
387
- unless /^(?<key>\w+-\d+)-\d+\.json$/ =~ file
388
- raise "Unexpected filename in #{path}: #{file}"
389
- end
390
- next if issue_data_hash[key] # Still in Jira
391
-
392
- file_to_delete = File.join(path, file)
393
- log " Removing #{file_to_delete} from local cache"
394
- file_system.unlink file_to_delete
395
- end
396
- end
397
-
398
- def last_modified filename:
399
- File.mtime(filename) if File.exist?(filename)
400
- end
401
287
  end
@@ -44,7 +44,159 @@ class DownloaderForCloud < Downloader
44
44
  hash
45
45
  end
46
46
 
47
- def issue_bulk_fetch_api
48
- '/rest/api/3/issue/bulkfetch'
47
+ def bulk_fetch_issues issue_datas:, board:, in_initial_query:
48
+ # We used to use the expand option to pull in the changelog directly. Unfortunately
49
+ # that only returns the "recent" changes, not all of them. So now we get the issue
50
+ # without changes and then make a second call for that changes. Then we insert it
51
+ # into the raw issue as if it had been there all along.
52
+ log " Downloading #{issue_datas.size} issues", both: true
53
+ payload = {
54
+ 'fields' => ['*all'],
55
+ 'issueIdsOrKeys' => issue_datas.collect(&:key)
56
+ }
57
+ response = @jira_gateway.post_request(
58
+ relative_url: '/rest/api/3/issue/bulkfetch',
59
+ payload: JSON.generate(payload)
60
+ )
61
+
62
+ attach_changelog_to_issues issue_datas: issue_datas, issue_jsons: response['issues']
63
+
64
+ response['issues'].each do |issue_json|
65
+ issue_json['exporter'] = {
66
+ 'in_initial_query' => in_initial_query
67
+ }
68
+ issue = Issue.new(raw: issue_json, board: board)
69
+ data = issue_datas.find { |d| d.key == issue.key }
70
+ data.up_to_date = true
71
+ data.last_modified = issue.updated
72
+ data.issue = issue
73
+ end
74
+
75
+ issue_datas
76
+ end
77
+
78
+ def attach_changelog_to_issues issue_datas:, issue_jsons:
79
+ max_results = 10_000 # The max jira accepts is 10K
80
+ payload = {
81
+ 'issueIdsOrKeys' => issue_datas.collect(&:key),
82
+ 'maxResults' => max_results
83
+ }
84
+ loop do
85
+ response = @jira_gateway.post_request(
86
+ relative_url: '/rest/api/3/changelog/bulkfetch',
87
+ payload: JSON.generate(payload)
88
+ )
89
+
90
+ response['issueChangeLogs'].each do |issue_change_log|
91
+ issue_id = issue_change_log['issueId']
92
+ json = issue_jsons.find { |json| json['id'] == issue_id }
93
+
94
+ unless json['changelog']
95
+ # If this is our first time in, there won't be a changelog section
96
+ json['changelog'] = {
97
+ 'startAt' => 0,
98
+ 'maxResults' => max_results,
99
+ 'total' => 0,
100
+ 'histories' => []
101
+ }
102
+ end
103
+
104
+ new_changes = issue_change_log['changeHistories']
105
+ json['changelog']['total'] += new_changes.size
106
+ json['changelog']['histories'] += new_changes
107
+ end
108
+
109
+ next_page_token = response['nextPageToken']
110
+ payload['nextPageToken'] = next_page_token
111
+ break if next_page_token.nil?
112
+ end
113
+ end
114
+
115
+ def download_issues board:
116
+ log " Downloading primary issues for board #{board.id} from #{jira_instance_type}", both: true
117
+ path = File.join(@target_path, "#{file_prefix}_issues/")
118
+ unless @file_system.dir_exist?(path)
119
+ log " Creating path #{path}"
120
+ @file_system.mkdir(path)
121
+ end
122
+
123
+ filter_id = @board_id_to_filter_id[board.id]
124
+ jql = make_jql(filter_id: filter_id)
125
+ intercept_jql = @download_config.project_config.settings['intercept_jql']
126
+ jql = intercept_jql.call jql if intercept_jql
127
+
128
+ issue_data_hash = search_for_issues jql: jql, board_id: board.id, path: path
129
+
130
+ loop do
131
+ related_issue_keys = Set.new
132
+ issue_data_hash
133
+ .values
134
+ .reject { |data| data.up_to_date }
135
+ .each_slice(100) do |slice|
136
+ slice = bulk_fetch_issues(
137
+ issue_datas: slice, board: board, in_initial_query: true
138
+ )
139
+ slice.each do |data|
140
+ @file_system.save_json(
141
+ json: data.issue.raw, filename: data.cache_path
142
+ )
143
+ # Set the timestamp on the file to match the updated one so that we don't have
144
+ # to parse the file just to find the timestamp
145
+ @file_system.utime time: data.issue.updated, file: data.cache_path
146
+
147
+ issue = data.issue
148
+ next unless issue
149
+
150
+ parent_key = issue.parent_key(project_config: @download_config.project_config)
151
+ related_issue_keys << parent_key if parent_key
152
+
153
+ # Sub-tasks
154
+ issue.raw['fields']['subtasks']&.each do |raw_subtask|
155
+ related_issue_keys << raw_subtask['key']
156
+ end
157
+ end
158
+ end
159
+
160
+ # Remove all the ones we already downloaded
161
+ related_issue_keys.reject! { |key| issue_data_hash[key] }
162
+
163
+ related_issue_keys.each do |key|
164
+ data = DownloadIssueData.new key: key
165
+ data.found_in_primary_query = false
166
+ data.up_to_date = false
167
+ data.cache_path = File.join(path, "#{key}-#{board.id}.json")
168
+ issue_data_hash[key] = data
169
+ end
170
+ break if related_issue_keys.empty?
171
+
172
+ log " Downloading linked issues for board #{board.id}", both: true
173
+ end
174
+
175
+ delete_issues_from_cache_that_are_not_in_server(
176
+ issue_data_hash: issue_data_hash, path: path
177
+ )
178
+ end
179
+
180
+ def delete_issues_from_cache_that_are_not_in_server issue_data_hash:, path:
181
+ # The gotcha with deleted issues is that they just stop being returned in queries
182
+ # and we have no way to know that they should be removed from our local cache.
183
+ # With the new approach, we ask for every issue that Jira knows about (within
184
+ # the parameters of the query) and then delete anything that's in our local cache
185
+ # but wasn't returned.
186
+ @file_system.foreach path do |file|
187
+ next if file.start_with? '.'
188
+ unless /^(?<key>\w+-\d+)-\d+\.json$/ =~ file
189
+ raise "Unexpected filename in #{path}: #{file}"
190
+ end
191
+ next if issue_data_hash[key] # Still in Jira
192
+
193
+ file_to_delete = File.join(path, file)
194
+ log " Removing #{file_to_delete} from local cache"
195
+ file_system.unlink file_to_delete
196
+ end
197
+ end
198
+
199
+ def last_modified filename:
200
+ File.mtime(filename) if File.exist?(filename)
49
201
  end
50
202
  end
@@ -5,42 +5,90 @@ class DownloaderForDataCenter < Downloader
5
5
  'Jira DataCenter'
6
6
  end
7
7
 
8
- def search_for_issues jql:, board_id:, path:
8
+ def download_issues board:
9
+ log " Downloading primary issues for board #{board.id}", both: true
10
+ path = File.join(@target_path, "#{file_prefix}_issues/")
11
+ unless Dir.exist?(path)
12
+ log " Creating path #{path}"
13
+ Dir.mkdir(path)
14
+ end
15
+
16
+ filter_id = board_id_to_filter_id[board.id]
17
+ jql = make_jql(filter_id: filter_id)
18
+ jira_search_by_jql(jql: jql, initial_query: true, board: board, path: path)
19
+
20
+ log " Downloading linked issues for board #{board.id}", both: true
21
+ loop do
22
+ @issue_keys_pending_download.reject! { |key| @issue_keys_downloaded_in_current_run.include? key }
23
+ break if @issue_keys_pending_download.empty?
24
+
25
+ keys_to_request = @issue_keys_pending_download[0..99]
26
+ @issue_keys_pending_download.reject! { |key| keys_to_request.include? key }
27
+ jql = "key in (#{keys_to_request.join(', ')})"
28
+ jira_search_by_jql(jql: jql, initial_query: false, board: board, path: path)
29
+ end
30
+ end
31
+
32
+ def jira_search_by_jql jql:, initial_query:, board:, path:
33
+ intercept_jql = @download_config.project_config.settings['intercept_jql']
34
+ jql = intercept_jql.call jql if intercept_jql
35
+
9
36
  log " JQL: #{jql}"
10
37
  escaped_jql = CGI.escape jql
11
38
 
12
- hash = {}
13
39
  max_results = 100
14
40
  start_at = 0
15
41
  total = 1
16
42
  while start_at < total
17
43
  json = @jira_gateway.call_url relative_url: '/rest/api/2/search' \
18
- "?jql=#{escaped_jql}&maxResults=#{max_results}&startAt=#{start_at}&fields=updated"
19
- json['issues'].each do |i|
20
- key = i['key']
21
- cache_path = File.join(path, "#{key}-#{board_id}.json")
22
- last_modified = Time.parse(i['fields']['updated'])
23
- data = DownloadIssueData.new(
24
- key: key,
25
- last_modified: last_modified,
26
- found_in_primary_query: true,
27
- cache_path: cache_path,
28
- up_to_date: last_modified(filename: cache_path) == last_modified
29
- )
30
- hash[key] = data
44
+ "?jql=#{escaped_jql}&maxResults=#{max_results}&startAt=#{start_at}&expand=changelog&fields=*all"
45
+
46
+ json['issues'].each do |issue_json|
47
+ issue_json['exporter'] = {
48
+ 'in_initial_query' => initial_query
49
+ }
50
+ identify_other_issues_to_be_downloaded raw_issue: issue_json, board: board
51
+ file = "#{issue_json['key']}-#{board.id}.json"
52
+
53
+ @file_system.save_json(json: issue_json, filename: File.join(path, file))
31
54
  end
55
+
32
56
  total = json['total'].to_i
33
57
  max_results = json['maxResults']
34
58
 
35
- message = " Found #{json['issues'].count} issues"
59
+ message = " Downloaded #{start_at + 1}-#{[start_at + max_results, total].min} of #{total} issues to #{path} "
36
60
  log message, both: true
37
61
 
38
62
  start_at += json['issues'].size
39
63
  end
40
- hash
41
64
  end
42
65
 
43
- def issue_bulk_fetch_api
44
- '/rest/api/2/issue/bulkfetch'
66
+ def make_jql filter_id:, today: Date.today
67
+ segments = []
68
+ segments << "filter=#{filter_id}"
69
+
70
+ start_date = @download_config.start_date today: today
71
+
72
+ if start_date
73
+ @download_date_range = start_date..today.to_date
74
+
75
+ # For an incremental download, we want to query from the end of the previous one, not from the
76
+ # beginning of the full range.
77
+ @start_date_in_query = metadata['date_end'] || @download_date_range.begin
78
+ log " Incremental download only. Pulling from #{@start_date_in_query}", both: true if metadata['date_end']
79
+
80
+ # Catch-all to pick up anything that's been around since before the range started but hasn't
81
+ # had an update during the range.
82
+ catch_all = '((status changed OR Sprint is not EMPTY) AND statusCategory != Done)'
83
+
84
+ # Pick up any issues that had a status change in the range
85
+ start_date_text = @start_date_in_query.strftime '%Y-%m-%d'
86
+ # find_in_range = %((status changed DURING ("#{start_date_text} 00:00","#{end_date_text} 23:59")))
87
+ find_in_range = %(updated >= "#{start_date_text} 00:00")
88
+
89
+ segments << "(#{find_in_range} OR #{catch_all})"
90
+ end
91
+
92
+ segments.join ' AND '
45
93
  end
46
94
  end
@@ -65,14 +65,14 @@ class Exporter
65
65
  puts "Full output from downloader in #{file_system.logfile_name}"
66
66
  end
67
67
 
68
- def info keys, name_filter:
68
+ def info key, name_filter:
69
69
  selected = []
70
70
  each_project_config(name_filter: name_filter) do |project|
71
71
  project.evaluate_next_level
72
72
 
73
73
  project.run load_only: true
74
74
  project.issues.each do |issue|
75
- selected << [project, issue] if keys.include? issue.key
75
+ selected << [project, issue] if key == issue.key
76
76
  end
77
77
  rescue => e # rubocop:disable Style/RescueStandardError
78
78
  # This happens when we're attempting to load an aggregated project because it hasn't been
@@ -81,7 +81,7 @@ class Exporter
81
81
  end
82
82
 
83
83
  if selected.empty?
84
- file_system.log "No issues found to match #{keys.collect(&:inspect).join(', ')}"
84
+ file_system.log "No issues found to match #{key.inspect}"
85
85
  else
86
86
  selected.each do |project, issue|
87
87
  file_system.log "\nProject #{project.name}", also_write_to_stderr: true
@@ -19,9 +19,10 @@ class Issue
19
19
 
20
20
  # There are cases where we create an Issue of fragments like linked issues and those won't have
21
21
  # changelogs.
22
- return unless @raw['changelog']
22
+ load_history_into_changes if @raw['changelog']
23
23
 
24
- load_history_into_changes
24
+ # As above with fragments, there may not be a fields section
25
+ return unless @raw['fields']
25
26
 
26
27
  # If this is an older pull of data then comments may not be there.
27
28
  load_comments_into_changes if @raw['fields']['comment']
@@ -152,7 +153,7 @@ class Issue
152
153
  # Are we currently in this status? If yes, then return the most recent status change.
153
154
  def currently_in_status *status_names
154
155
  change = most_recent_status_change
155
- return false if change.nil?
156
+ return nil if change.nil?
156
157
 
157
158
  change if change.current_status_matches(*status_names)
158
159
  end
@@ -162,7 +163,7 @@ class Issue
162
163
  category_ids = find_status_category_ids_by_names category_names
163
164
 
164
165
  change = most_recent_status_change
165
- return false if change.nil?
166
+ return nil if change.nil?
166
167
 
167
168
  status = find_or_create_status id: change.value_id, name: change.value
168
169
  change if status && category_ids.include?(status.category.id)
@@ -212,7 +213,11 @@ class Issue
212
213
  end
213
214
 
214
215
  def parse_time text
215
- Time.parse(text).getlocal(@timezone_offset)
216
+ if text.is_a? String
217
+ Time.parse(text).getlocal(@timezone_offset)
218
+ else
219
+ Time.at(text / 1000).getlocal(@timezone_offset)
220
+ end
216
221
  end
217
222
 
218
223
  def created
@@ -233,11 +238,11 @@ class Issue
233
238
  end
234
239
 
235
240
  def assigned_to
236
- @raw['fields']&.[]('assignee')&.[]('displayName')
241
+ @raw['fields']['assignee']&.[]('displayName')
237
242
  end
238
243
 
239
244
  def assigned_to_icon_url
240
- @raw['fields']&.[]('assignee')&.[]('avatarUrls')&.[]('16x16')
245
+ @raw['fields']['assignee']&.[]('avatarUrls')&.[]('16x16')
241
246
  end
242
247
 
243
248
  # Many test failures are simply unreadable because the default inspect on this class goes
@@ -759,6 +764,9 @@ class Issue
759
764
  first_status = nil
760
765
  first_status_id = nil
761
766
 
767
+ # There won't be a created timestamp in cases where this was a linked issue
768
+ return unless @raw['fields']['created']
769
+
762
770
  created_time = parse_time @raw['fields']['created']
763
771
  first_change = @changes.find { |change| change.field == field_name }
764
772
  if first_change.nil?
@@ -7,7 +7,7 @@ require 'open3'
7
7
 
8
8
  class JiraGateway
9
9
  attr_accessor :ignore_ssl_errors
10
- attr_reader :jira_url, :settings
10
+ attr_reader :jira_url, :settings, :file_system
11
11
 
12
12
  def initialize file_system:, jira_config:, settings:
13
13
  @file_system = file_system
@@ -18,21 +18,37 @@ class JiraGateway
18
18
 
19
19
  def post_request relative_url:, payload:
20
20
  command = make_curl_command url: "#{@jira_url}#{relative_url}", method: 'POST'
21
- stdout, stderr, status = Open3.capture3(command, stdin_data: payload)
22
- @file_system.log "Error: #{stderr}" unless stderr == ''
23
- raise 'no response' if stdout == ''
24
- return parse_response(command: command, result: stdout) if status.success?
25
-
26
- @file_system.log result
27
- @file_system.log "Failed call with exit status #{status.exitstatus}."
28
- raise "Failed call with exit status #{status.exitstatus}. " \
29
- "See #{@file_system.logfile_name} for details"
21
+ exec_and_parse_response command: command, stdin_data: payload
22
+ end
23
+
24
+ def exec_and_parse_response command:, stdin_data:
25
+ log_entry = " #{command.gsub(/\s+/, ' ')}"
26
+ log_entry = sanitize_message log_entry
27
+ @file_system.log log_entry
28
+
29
+ stdout, stderr, status = capture3(command, stdin_data: stdin_data)
30
+ unless status.success?
31
+ @file_system.log "Failed call with exit status #{status.exitstatus}!"
32
+ @file_system.log "Returned (stdout): #{stdout.inspect}"
33
+ @file_system.log "Returned (stderr): #{stderr.inspect}"
34
+ raise "Failed call with exit status #{status.exitstatus}. " \
35
+ "See #{@file_system.logfile_name} for details"
36
+ end
37
+
38
+ @file_system.log "Returned (stderr): #{stderr.inspect}" unless stderr == ''
39
+ raise 'no response from curl on stdout' if stdout == ''
40
+
41
+ parse_response(command: command, result: stdout)
42
+ end
43
+
44
+ def capture3 command, stdin_data:
45
+ # In it's own method so we can mock it out in tests
46
+ Open3.capture3(command, stdin_data: stdin_data)
30
47
  end
31
48
 
32
49
  def call_url relative_url:
33
50
  command = make_curl_command url: "#{@jira_url}#{relative_url}"
34
- result = call_command command
35
- parse_response(command: command, result: result)
51
+ exec_and_parse_response command: command, stdin_data: nil
36
52
  end
37
53
 
38
54
  def parse_response command:, result:
@@ -53,21 +69,7 @@ class JiraGateway
53
69
  token = @jira_api_token || @jira_personal_access_token
54
70
  raise 'Neither Jira API Token or personal access token has been set' unless token
55
71
 
56
- message.gsub(@jira_api_token, '[API_TOKEN]')
57
- end
58
-
59
- def call_command command
60
- log_entry = " #{command.gsub(/\s+/, ' ')}"
61
- log_entry = sanitize_message log_entry if @jira_api_token
62
- @file_system.log log_entry
63
-
64
- result = `#{command}`
65
- @file_system.log result unless $CHILD_STATUS.success?
66
- return result if $CHILD_STATUS.success?
67
-
68
- @file_system.log "Failed call with exit status #{$CHILD_STATUS.exitstatus}."
69
- raise "Failed call with exit status #{$CHILD_STATUS.exitstatus}. " \
70
- "See #{@file_system.logfile_name} for details"
72
+ message.gsub(token, '[API_TOKEN]')
71
73
  end
72
74
 
73
75
  def load_jira_config jira_config
data/lib/jirametrics.rb CHANGED
@@ -47,9 +47,9 @@ class JiraMetrics < Thor
47
47
 
48
48
  option :config
49
49
  desc 'info', 'Dump information about one issue'
50
- def info keys
50
+ def info key
51
51
  load_config options[:config]
52
- Exporter.instance.info(keys, name_filter: options[:name] || '*')
52
+ Exporter.instance.info(key, name_filter: options[:name] || '*')
53
53
  end
54
54
 
55
55
  no_commands do
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: jirametrics
3
3
  version: !ruby/object:Gem::Version
4
- version: '2.17'
4
+ version: '2.18'
5
5
  platform: ruby
6
6
  authors:
7
7
  - Mike Bowler
8
8
  bindir: bin
9
9
  cert_chain: []
10
- date: 2025-09-20 00:00:00.000000000 Z
10
+ date: 1980-01-02 00:00:00.000000000 Z
11
11
  dependencies:
12
12
  - !ruby/object:Gem::Dependency
13
13
  name: random-word
@@ -159,7 +159,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
159
159
  - !ruby/object:Gem::Version
160
160
  version: '0'
161
161
  requirements: []
162
- rubygems_version: 3.6.2
162
+ rubygems_version: 3.7.2
163
163
  specification_version: 4
164
164
  summary: Extract Jira metrics
165
165
  test_files: []