oneacct-export 0.2.7 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (92) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/.rspec +1 -0
  4. data/.travis.yml +1 -21
  5. data/bin/oneacct-export +7 -6
  6. data/config/conf.yml +25 -7
  7. data/lib/data_validators/apel_data_validator.rb +99 -0
  8. data/lib/data_validators/data_compute.rb +57 -0
  9. data/lib/data_validators/data_validator.rb +12 -0
  10. data/lib/data_validators/data_validator_helper.rb +15 -0
  11. data/lib/data_validators/logstash_data_validator.rb +82 -0
  12. data/lib/data_validators/pbs_data_validator.rb +86 -0
  13. data/lib/errors/not_implemented_error.rb +3 -0
  14. data/lib/errors/validation_error.rb +3 -0
  15. data/lib/errors.rb +2 -0
  16. data/lib/input_validator.rb +12 -2
  17. data/lib/one_data_accessor.rb +11 -10
  18. data/lib/one_worker.rb +109 -137
  19. data/lib/oneacct_exporter/version.rb +1 -1
  20. data/lib/oneacct_exporter.rb +9 -7
  21. data/lib/oneacct_opts.rb +36 -13
  22. data/lib/output_types.rb +5 -0
  23. data/lib/redis_conf.rb +2 -2
  24. data/lib/settings.rb +3 -3
  25. data/lib/templates/apel-0.2.erb +6 -6
  26. data/lib/templates/logstash-0.1.erb +3 -0
  27. data/lib/templates/pbs-0.1.erb +6 -0
  28. data/mock/{one_worker_vm8.xml → one_worker_vm_dn01.xml} +76 -74
  29. data/mock/one_worker_vm_dn02.xml +174 -0
  30. data/mock/{one_worker_DISK_missing.xml → one_worker_vm_empty_disk_records.xml} +10 -6
  31. data/mock/one_worker_vm_empty_history_records.xml +131 -0
  32. data/mock/one_worker_vm_image_name01.xml +175 -0
  33. data/mock/{one_worker_valid_machine.xml → one_worker_vm_image_name02.xml} +35 -7
  34. data/mock/one_worker_vm_image_name03.xml +167 -0
  35. data/mock/{one_worker_vm2.xml → one_worker_vm_image_name04.xml} +38 -9
  36. data/mock/{one_worker_vm1.xml → one_worker_vm_image_name05.xml} +36 -8
  37. data/mock/{one_worker_vm9.xml → one_worker_vm_image_name06.xml} +8 -5
  38. data/oneacct-export.gemspec +1 -0
  39. data/spec/data_validators/apel_data_validator_spec.rb +497 -0
  40. data/spec/data_validators/data_compute_spec.rb +193 -0
  41. data/spec/data_validators/data_validator_helper_spec.rb +66 -0
  42. data/spec/data_validators/data_validator_spec.rb +14 -0
  43. data/spec/data_validators/logstash_data_validator_spec.rb +469 -0
  44. data/spec/data_validators/pbs_data_validator_spec.rb +353 -0
  45. data/spec/one_worker_spec.rb +234 -542
  46. data/spec/oneacct_exporter_spec.rb +1 -41
  47. data/spec/oneacct_opts_spec.rb +135 -32
  48. data/spec/spec_helper.rb +18 -1
  49. metadata +51 -52
  50. data/mock/one_worker_DEPLOY_ID_missing.xml +0 -136
  51. data/mock/one_worker_DISK_SIZE_nan.xml +0 -147
  52. data/mock/one_worker_ETIME_0.xml +0 -137
  53. data/mock/one_worker_ETIME_missing.xml +0 -136
  54. data/mock/one_worker_ETIME_nan.xml +0 -137
  55. data/mock/one_worker_GID_missing.xml +0 -136
  56. data/mock/one_worker_GNAME_missing.xml +0 -136
  57. data/mock/one_worker_HISTORY_RECORDS_missing.xml +0 -91
  58. data/mock/one_worker_HISTORY_many.xml +0 -137
  59. data/mock/one_worker_HISTORY_missing.xml +0 -93
  60. data/mock/one_worker_HISTORY_one.xml +0 -115
  61. data/mock/one_worker_IMAGE_ID_missing.xml +0 -136
  62. data/mock/one_worker_MEMORY_0.xml +0 -137
  63. data/mock/one_worker_MEMORY_missing.xml +0 -135
  64. data/mock/one_worker_MEMORY_nan.xml +0 -137
  65. data/mock/one_worker_NET_RX_0.xml +0 -137
  66. data/mock/one_worker_NET_RX_missing.xml +0 -136
  67. data/mock/one_worker_NET_RX_nan.xml +0 -137
  68. data/mock/one_worker_NET_TX_0.xml +0 -137
  69. data/mock/one_worker_NET_TX_missing.xml +0 -136
  70. data/mock/one_worker_NET_TX_nan.xml +0 -137
  71. data/mock/one_worker_RETIME_0_RUNNING.xml +0 -115
  72. data/mock/one_worker_RETIME_0_STOPPED.xml +0 -115
  73. data/mock/one_worker_RETIME_missing.xml +0 -114
  74. data/mock/one_worker_RSTIME_0.xml +0 -115
  75. data/mock/one_worker_RSTIME_>_RETIME.xml +0 -115
  76. data/mock/one_worker_RSTIME_missing.xml +0 -114
  77. data/mock/one_worker_STATE_missing.xml +0 -136
  78. data/mock/one_worker_STATE_out_of_range.xml +0 -137
  79. data/mock/one_worker_STIME_>_ETIME.xml +0 -137
  80. data/mock/one_worker_STIME_missing.xml +0 -136
  81. data/mock/one_worker_STIME_nan.xml +0 -137
  82. data/mock/one_worker_TEMPLATE_missing.xml +0 -79
  83. data/mock/one_worker_UID_missing.xml +0 -136
  84. data/mock/one_worker_VCPU_0.xml +0 -137
  85. data/mock/one_worker_VCPU_missing.xml +0 -136
  86. data/mock/one_worker_VCPU_nan.xml +0 -137
  87. data/mock/one_worker_malformed_vm.xml +0 -136
  88. data/mock/one_worker_vm3.xml +0 -137
  89. data/mock/one_worker_vm4.xml +0 -106
  90. data/mock/one_worker_vm5.xml +0 -106
  91. data/mock/one_worker_vm6.xml +0 -107
  92. data/mock/one_worker_vm7.xml +0 -147
data/lib/one_worker.rb CHANGED
@@ -7,29 +7,43 @@ require 'one_writer'
7
7
  require 'sidekiq_conf'
8
8
  require 'oneacct_exporter/log'
9
9
  require 'settings'
10
+ require 'data_validators/apel_data_validator'
11
+ require 'data_validators/pbs_data_validator'
12
+ require 'data_validators/logstash_data_validator'
13
+ require 'output_types'
14
+ require 'errors'
10
15
 
11
16
  # Sidekiq worker class
12
17
  class OneWorker
13
18
  include Sidekiq::Worker
19
+ include OutputTypes
20
+ include Errors
21
+
22
+ sidekiq_options retry: 5, dead: false, \
23
+ queue: (Settings['sidekiq'] && Settings.sidekiq['queue']) ? Settings.sidekiq['queue'].to_sym : :default
24
+
25
+ # Prepare data that are specific for output type and common for every virtual machine
26
+ def output_type_specific_data
27
+ data = {}
28
+ if Settings.output['output_type'] == PBS_OT && Settings.output['pbs']
29
+ data['realm'] = Settings.output.pbs['realm']
30
+ data['pbs_queue'] = Settings.output.pbs['queue']
31
+ data['scratch_type'] = Settings.output.pbs['scratch_type']
32
+ data['host'] = Settings.output.pbs['host_identifier']
33
+ end
14
34
 
15
- sidekiq_options retry: 5, dead: false,\
16
- queue: (Settings['sidekiq'] && Settings.sidekiq['queue']) ? Settings.sidekiq['queue'].to_sym : :default
17
-
18
- B_IN_GB = 1_073_741_824
19
-
20
- STRING = /[[:print:]]+/
21
- NUMBER = /[[:digit:]]+/
22
- NON_ZERO = /[1-9][[:digit:]]*/
23
- STATES = %w(started started suspended started suspended suspended completed completed suspended)
35
+ if Settings.output['output_type'] == APEL_OT
36
+ data['endpoint'] = Settings.output.apel['endpoint'].chomp('/')
37
+ data['site_name'] = Settings.output.apel['site_name']
38
+ data['cloud_type'] = Settings.output.apel['cloud_type']
39
+ end
24
40
 
25
- # Prepare data that are common for every virtual machine
26
- def common_data
27
- common_data = {}
28
- common_data['endpoint'] = Settings['endpoint'].chomp('/')
29
- common_data['site_name'] = Settings['site_name']
30
- common_data['cloud_type'] = Settings['cloud_type']
41
+ if Settings.output['output_type'] == LOGSTASH_OT
42
+ data['host'] = Settings.output.logstash['host']
43
+ data['port'] = Settings.output.logstash['port']
44
+ end
31
45
 
32
- common_data
46
+ data
33
47
  end
34
48
 
35
49
  # Create mapping of user ID and specified element
@@ -53,7 +67,7 @@ class OneWorker
53
67
  oda.mapping(pool_type, mapping)
54
68
  rescue => e
55
69
  msg = "Couldn't create map: #{e.message}. "\
56
- 'Stopping to avoid malformed records.'
70
+ 'Stopping to avoid malformed records.'
57
71
  logger.error(msg)
58
72
  raise msg
59
73
  end
@@ -68,80 +82,75 @@ class OneWorker
68
82
  return nil
69
83
  end
70
84
 
71
- # Obtain and parse required data from vm
85
+ # Obtain and parse required data from vm
72
86
  #
73
87
  # @return [Hash] required data from virtual machine
74
88
  def process_vm(vm, user_map, image_map)
75
- data = common_data.clone
89
+ data = output_type_specific_data
90
+
91
+ data['vm_uuid'] = vm['ID']
92
+ data['start_time'] = vm['STIME']
93
+ data['end_time'] = vm['ETIME']
94
+ data['machine_name'] = vm['DEPLOY_ID']
95
+ data['user_id'] = vm['UID']
96
+ data['group_id'] = vm['GID']
97
+ data['user_dn'] = vm['USER_TEMPLATE/USER_X509_DN']
98
+ data['user_dn'] ||= user_map[data['user_id']]
99
+ data['user_name'] = vm['UNAME']
100
+ data['group_name'] = vm['GNAME']
101
+ data['status_code'] = vm['STATE']
102
+ data['status'] = vm.state_str
103
+ data['cpu_count'] = vm['TEMPLATE/VCPU']
104
+ data['network_inbound'] = vm['NET_TX']
105
+ data['network_outbound'] = vm['NET_RX']
106
+ data['memory'] = vm['TEMPLATE/MEMORY']
107
+ data['image_name'] = vm['TEMPLATE/DISK[1]/VMCATCHER_EVENT_AD_MPURI']
108
+ data['image_name'] ||= image_map[vm['TEMPLATE/DISK[1]/IMAGE_ID']]
109
+ data['image_name'] ||= mixin(vm)
110
+ data['image_name'] ||= vm['TEMPLATE/DISK[1]/IMAGE_ID']
111
+ data['history'] = history_records(vm)
112
+ data['disks'] = disk_records(vm)
76
113
 
77
- data['vm_uuid'] = parse(vm['ID'], STRING)
78
- unless vm['STIME']
79
- logger.error('Skipping a malformed record. '\
80
- "VM with id #{data['vm_uuid']} has no StartTime.")
81
- return nil
82
- end
114
+ data
115
+ end
83
116
 
84
- data['start_time'] = Time.at(parse(vm['STIME'], NUMBER).to_i)
85
- start_time = data['start_time'].to_i
86
- if start_time == 0
87
- logger.error('Skipping a malformed record. '\
88
- "VM with id #{data['vm_uuid']} has malformed StartTime.")
89
- return nil
90
- end
91
- data['end_time'] = parse(vm['ETIME'], NON_ZERO)
92
- end_time = data['end_time'].to_i
93
- data['end_time'] = Time.at(end_time) if end_time != 0
94
-
95
- if end_time != 0 && start_time > end_time
96
- logger.error('Skipping malformed record. '\
97
- "VM with id #{data['vm_uuid']} has wrong time entries.")
98
- return nil
117
+ # Returns an array of history records from vm
118
+ #
119
+ # @param [OpenNebula::VirtualMachine] vm virtual machine
120
+ #
121
+ # @return [Array] array of hashes representing vm's history records
122
+ def history_records(vm)
123
+ history = []
124
+ vm.each 'HISTORY_RECORDS/HISTORY' do |h|
125
+ history_record = {}
126
+ history_record['start_time'] = h['STIME']
127
+ history_record['end_time'] = h['ETIME']
128
+ history_record['rstart_time'] = h['RSTIME']
129
+ history_record['rend_time'] = h['RETIME']
130
+ history_record['seq'] = h['SEQ']
131
+ history_record['hostname'] = h['HOSTNAME']
132
+
133
+ history << history_record
99
134
  end
100
135
 
101
- data['machine_name'] = parse(vm['DEPLOY_ID'], STRING, "one-#{data['vm_uuid']}")
102
- data['user_id'] = parse(vm['UID'], STRING)
103
- data['group_id'] = parse(vm['GID'], STRING)
104
- data['user_name'] = parse(vm['USER_TEMPLATE/USER_X509_DN'], STRING, nil)
105
- data['user_name'] = parse(user_map[data['user_id']], STRING) unless data['user_name']
106
- data['fqan'] = parse(vm['GNAME'], STRING, nil)
107
-
108
- if vm['STATE']
109
- data['status'] = parse(STATES[vm['STATE'].to_i], STRING)
110
- else
111
- data['status'] = 'NULL'
112
- end
136
+ history
137
+ end
113
138
 
114
- unless vm['HISTORY_RECORDS/HISTORY[1]']
115
- logger.warn('Skipping malformed record. '\
116
- "VM with id #{data['vm_uuid']} has no history records.")
117
- return nil
139
+ # Returns an array of disk records from vm
140
+ #
141
+ # @param [OpenNebula::VirtualMachine] vm virtual machine
142
+ #
143
+ # @return [Array] array of hashes representing vm's disk records
144
+ def disk_records(vm)
145
+ disks = []
146
+ vm.each 'TEMPLATE/DISK' do |d|
147
+ disk = {}
148
+ disk['size'] = d['SIZE']
149
+
150
+ disks << disk
118
151
  end
119
152
 
120
- rstime = sum_rstime(vm)
121
- return nil unless rstime
122
-
123
- data['duration'] = parse(rstime.to_s, NON_ZERO)
124
-
125
- suspend = (end_time - start_time) - data['duration'].to_i unless end_time == 0
126
- data['suspend'] = parse(suspend.to_s, NUMBER)
127
-
128
- data['cpu_count'] = parse(vm['TEMPLATE/VCPU'], NON_ZERO, '1')
129
-
130
- net_tx = parse(vm['NET_TX'], NUMBER, 0)
131
- data['network_inbound'] = (net_tx.to_i / B_IN_GB).round
132
- net_rx = parse(vm['NET_RX'], NUMBER, 0)
133
- data['network_outbound'] = (net_rx.to_i / B_IN_GB).round
134
-
135
- data['memory'] = parse(vm['TEMPLATE/MEMORY'], NUMBER, '0')
136
-
137
- data['image_name'] = parse(vm['TEMPLATE/DISK[1]/VMCATCHER_EVENT_AD_MPURI'], STRING, nil)
138
- data['image_name'] = parse(image_map[vm['TEMPLATE/DISK[1]/IMAGE_ID']], STRING, nil) unless data['image_name']
139
- data['image_name'] = parse(mixin(vm), STRING, nil) unless data['image_name']
140
- data['image_name'] = parse(vm['TEMPLATE/DISK[1]/IMAGE_ID'], STRING) unless data['image_name']
141
-
142
- data['disk_size'] = sum_disk_size(vm)
143
-
144
- data
153
+ disks
145
154
  end
146
155
 
147
156
  # Look for 'os_tpl' OCCI mixin to better identifie virtual machine's image
@@ -163,56 +172,10 @@ class OneWorker
163
172
  nil # nothing found
164
173
  end
165
174
 
166
- # Sums RSTIME (time when virtual machine was actually running)
167
- #
168
- # @param [OpenNebula::VirtualMachine] vm virtual machine
169
- #
170
- # @return [Integer] RSTIME
171
- def sum_rstime(vm)
172
- rstime = 0
173
- vm.each 'HISTORY_RECORDS/HISTORY' do |h|
174
- next unless h['RSTIME'] && h['RETIME'] && h['RSTIME'] != '0'
175
- if h['RETIME'] == '0' && STATES[vm['STATE'].to_i] != 'completed'
176
- rstime += Time.now.to_i - h['RSTIME'].to_i
177
- next
178
- end
179
- if h['RSTIME'].to_i > h['RETIME'].to_i
180
- logger.warn('Skipping malformed record. '\
181
- "VM with id #{vm['ID']} has wrong CpuDuration.")
182
- rstime = nil
183
- break
184
- end
185
- rstime += h['RETIME'].to_i - h['RSTIME'].to_i
186
- end
187
-
188
- rstime
189
- end
190
-
191
- # Sums disk size of all disks within the virtual machine
192
- #
193
- # @param [OpenNebula::VirtualMachine] vm virtual machine
194
- #
195
- # @return [Integer] sum of disk sizes in GB rounded up
196
- def sum_disk_size(vm)
197
- disk_size = 'NULL'
198
- vm.each 'TEMPLATE/DISK' do |disk|
199
- return 'NULL' unless disk['SIZE']
200
-
201
- size = parse(disk['SIZE'], NUMBER, nil)
202
- unless size
203
- logger.warn("Disk size invalid for VM with id #{vm['ID']}.")
204
- return 'NULL'
205
- end
206
- disk_size = disk_size.to_i + size.to_i
207
- end
208
-
209
- disk_size = (disk_size/1000.0).ceil unless disk_size.to_i == 0
210
- disk_size
211
- end
212
-
213
175
  # Sidekiq specific method, specifies the purpose of the worker
214
176
  #
215
- # @param [String] vms IDs of virtual machines to process in form of numbers separated by '|' (easier for cooperation with redis)
177
+ # @param [String] vms IDs of virtual machines to process in form of numbers separated by '|'
178
+ # (easier for cooperation with redis)
216
179
  # @param [String] file_number number of the output file
217
180
  def perform(vms, file_number)
218
181
  OneacctExporter::Log.setup_log_level(logger)
@@ -229,18 +192,31 @@ class OneWorker
229
192
  vm = load_vm(vm_id, oda)
230
193
  next unless vm
231
194
 
232
- logger.debug("Processing vm with id: #{vm_id}.")
233
- vm_data = process_vm(vm, user_map, image_map)
234
- next unless vm_data
195
+ begin
196
+ logger.debug("Processing vm with id: #{vm_id}.")
197
+ vm_data = process_vm(vm, user_map, image_map)
198
+
199
+ validator = DataValidators::ApelDataValidator.new(logger) if Settings.output['output_type'] == APEL_OT
200
+ validator = DataValidators::PbsDataValidator.new(logger) if Settings.output['output_type'] == PBS_OT
201
+ validator = DataValidators::LogstashDataValidator.new(logger) if Settings.output['output_type'] == LOGSTASH_OT
202
+
203
+ vm_data = validator.validate_data(vm_data) if validator
204
+ rescue Errors::ValidationError => e
205
+ logger.error("Error occured during processing of vm with id: #{vm_id}. #{e.message}")
206
+ next
207
+ end
235
208
 
236
209
  logger.debug("Adding vm with data: #{vm_data} for export.")
237
210
  data << vm_data
238
211
  end
239
212
 
240
- write_data(data, file_number)
213
+ write_data(data, file_number) unless data.empty?
241
214
  end
242
215
 
243
216
  # Write processed data into output directory
217
+ #
218
+ # @param [Hash] data data to be written into file
219
+ # @param [Fixnum] file_number sequence number of file data will be written to
244
220
  def write_data(data, file_number)
245
221
  logger.debug('Creating writer...')
246
222
  ow = OneWriter.new(data, file_number, logger)
@@ -250,8 +226,4 @@ class OneWorker
250
226
  logger.error(msg)
251
227
  raise msg
252
228
  end
253
-
254
- def parse(value, regex, substitute = 'NULL')
255
- regex =~ value ? value : substitute
256
- end
257
229
  end
@@ -1,3 +1,3 @@
1
1
  class OneacctExporter
2
- VERSION = '0.2.7'
2
+ VERSION = '0.3.0'
3
3
  end
@@ -8,12 +8,13 @@ require 'sidekiq/api'
8
8
  #
9
9
  # @attr_reader [any logger] log logger for the class
10
10
  # @attr_reader [Hash] range range of dates, requesting only virtual machines within the range
11
- # @attr_reader [Hash] groups user groups, requesting only virtual machines with owners that belong to one of the group
11
+ # @attr_reader [Hash] groups user groups, requesting only virtual machines with owners that
12
+ # belong to one of the group
12
13
  # @attr_reader [TrueClass, FalseClass] blocking says whether to run export in blocking mode or not
13
14
  # @attr_reader [Integer] timeout timeout for blocking mode
14
- # @attr_reader [TrueClass, FalseClass] compatibility says whether to run export in compatibility mode or not
15
+ # @attr_reader [TrueClass, FalseClass] compatibility says whether to run export in compatibility
16
+ # mode or not
15
17
  class OneacctExporter
16
-
17
18
  attr_reader :log, :range, :groups, :blocking, :timeout, :compatibility
18
19
 
19
20
  def initialize(options, log)
@@ -36,11 +37,11 @@ class OneacctExporter
36
37
  oda = OneDataAccessor.new(@compatibility, @log)
37
38
 
38
39
  vms = []
39
- #load records of virtual machines in batches
40
+ # load records of virtual machines in batches
40
41
  while vms = oda.vms(batch_number, @range, @groups)
41
42
  @log.info("Starting worker with batch number: #{batch_number}.")
42
43
  unless vms.empty?
43
- #add a new job for every batch to the Sidekiq's queue
44
+ # add a new job for every batch to the Sidekiq's queue
44
45
  OneWorker.perform_async(vms.join('|'), new_file_number)
45
46
  new_file_number += 1
46
47
  end
@@ -94,8 +95,9 @@ class OneacctExporter
94
95
  # Clean output directory of previous entries
95
96
  def clean_output_dir
96
97
  output_dir = Dir.new(Settings.output['output_dir'])
97
- output_dir.entries.each do |entry|
98
- File.delete("#{Settings.output['output_dir']}/#{entry}") if /[0-9]{14}/ =~ entry
98
+ entries = output_dir.entries.select { |entry| entry != '.' && entry != '..' }
99
+ entries.each do |entry|
100
+ File.delete("#{output_dir.path}/#{entry}")
99
101
  end
100
102
  end
101
103
  end
data/lib/oneacct_opts.rb CHANGED
@@ -6,6 +6,8 @@ require 'settings'
6
6
 
7
7
  # Class for parsing command line arguments
8
8
  class OneacctOpts
9
+ include OutputTypes
10
+
9
11
  BLOCKING_DEFAULT = false
10
12
  TIMEOUT_DEFAULT = 60 * 60
11
13
  COMPATIBILITY_DEFAULT = false
@@ -84,9 +86,7 @@ class OneacctOpts
84
86
  # Set default values for not specified options
85
87
  def self.set_defaults(options)
86
88
  options.blocking = BLOCKING_DEFAULT unless options.blocking
87
- unless options.timeout
88
- options.timeout = TIMEOUT_DEFAULT if options.blocking
89
- end
89
+ options.timeout = TIMEOUT_DEFAULT if options.blocking unless options.timeout
90
90
  options.compatibility = COMPATIBILITY_DEFAULT unless options.compatibility
91
91
  end
92
92
 
@@ -97,24 +97,24 @@ class OneacctOpts
97
97
 
98
98
  # Make sure command line parameters are sane
99
99
  def self.check_options_restrictions(options)
100
- #make sure date range make sense
100
+ # make sure date range make sense
101
101
  if options.records_from && options.records_to && options.records_from >= options.records_to
102
102
  fail ArgumentError, 'Wrong time range for records retrieval.'
103
103
  end
104
104
 
105
- #make sure only one group restriction is used
105
+ # make sure only one group restriction is used
106
106
  if options.include_groups && options.exclude_groups
107
107
  fail ArgumentError, 'Mixing of group options is not possible.'
108
108
  end
109
109
 
110
- #make sure group file option is not used without specifying group restriction type
110
+ # make sure group file option is not used without specifying group restriction type
111
111
  unless options.include_groups || options.exclude_groups
112
112
  if options.groups_file
113
113
  fail ArgumentError, 'Cannot use group file without specifying group restriction type.'
114
114
  end
115
115
  end
116
116
 
117
- #make sure that timeout option is not used without blocking option
117
+ # make sure that timeout option is not used without blocking option
118
118
  if options.timeout && !options.blocking
119
119
  fail ArgumentError, 'Cannot set timeout without a blocking mode.'
120
120
  end
@@ -122,22 +122,45 @@ class OneacctOpts
122
122
 
123
123
  # Make sure configuration is sane
124
124
  def self.check_settings_restrictions
125
- #make sure all mandatory parameters are set
126
- unless Settings['site_name'] && Settings['cloud_type'] && Settings['endpoint'] &&
127
- Settings['output'] && Settings.output['output_dir'] && Settings.output['output_type']
125
+ # make sure all mandatory parameters are set
126
+ unless Settings['output'] && Settings.output['output_dir'] && Settings.output['output_type']
128
127
  fail ArgumentError, 'Missing some mandatory parameters. Check your configuration file.'
129
128
  end
130
129
 
131
- #make sure log file is specified while loggin to file
130
+ # make sure log file is specified while loggin to file
132
131
  if Settings['logging'] && Settings.logging['log_type'] == 'file' &&
133
- !Settings.logging['log_file']
132
+ !Settings.logging['log_file']
134
133
  fail ArgumentError, 'Missing file for logging. Check your configuration file.'
135
134
  end
136
135
 
137
- #make sure specified template really exists
136
+ check_output_type_specific_settings
137
+
138
+ # make sure specified template really exists
138
139
  template_filename = OneWriter.template_filename(Settings.output['output_type'])
139
140
  unless File.exist?(template_filename)
140
141
  fail ArgumentError, "Non-existing template #{Settings.output['output_type']}."
141
142
  end
142
143
  end
144
+
145
+ def self.check_output_type_specific_settings
146
+ if Settings.output['output_type'] == APEL_OT
147
+ unless Settings.output['apel'] && Settings.output.apel['site_name'] &&
148
+ Settings.output.apel['cloud_type'] && Settings.output.apel['endpoint']
149
+ fail ArgumentError, 'Missing some mandatory parameters for APEL output type. Check your configuration file.'
150
+ end
151
+ end
152
+
153
+ if Settings.output['output_type'] == PBS_OT && Settings.output['pbs']
154
+ Settings.output.pbs['realm'] ||= 'META'
155
+ Settings.output.pbs['queue'] ||= 'cloud'
156
+ Settings.output.pbs['scratch_type'] ||= 'local'
157
+ Settings.output.pbs['host_identifier'] ||= 'on_localhost'
158
+ end
159
+
160
+ if Settings.output['output_type'] == LOGSTASH_OT
161
+ unless Settings.output['logstash'] && Settings.output.logstash['host'] && Settings.output.logstash['port']
162
+ fail ArgumentError, 'Missing some mandatory parameters for logstash output type. Check your configuration file.'
163
+ end
164
+ end
165
+ end
143
166
  end
@@ -0,0 +1,5 @@
1
+ module OutputTypes
2
+ APEL_OT = 'apel-0.2'
3
+ PBS_OT = 'pbs-0.1'
4
+ LOGSTASH_OT = 'logstash-0.1'
5
+ end
data/lib/redis_conf.rb CHANGED
@@ -8,7 +8,7 @@ class RedisConf
8
8
 
9
9
  # Read and parse Redis server configuration options
10
10
  #
11
- # @return [Hash] redis server options ready for use
11
+ # @return [Hash] redis server options ready for use
12
12
  def self.options
13
13
  options = {}
14
14
  if Settings['redis']
@@ -20,7 +20,7 @@ class RedisConf
20
20
  options[:url] ||= 'redis://localhost:6379'
21
21
 
22
22
  fail ArgumentError, "#{options[:url]} is not a valid URL."\
23
- unless is_uri?(options[:url])
23
+ unless uri?(options[:url])
24
24
 
25
25
  if Settings['redis'] && Settings.redis['password']
26
26
  fail ArgumentError, 'Redis password cannot be empty'\
data/lib/settings.rb CHANGED
@@ -1,11 +1,11 @@
1
1
  require 'settingslogic'
2
2
 
3
- # Class representing OneacctExport settings
3
+ # Class representing OneacctExport settings
4
4
  class Settings < Settingslogic
5
5
  CONF_NAME = 'conf.yml'
6
6
 
7
- #three possible configuration file locations in order by preference
8
- #if configuration file is found rest of the locations are ignored
7
+ # three possible configuration file locations in order by preference
8
+ # if configuration file is found rest of the locations are ignored
9
9
  source "#{ENV['HOME']}/.oneacct-export/#{CONF_NAME}"\
10
10
  if File.exist?("#{ENV['HOME']}/.oneacct-export/#{CONF_NAME}")
11
11
  source "/etc/oneacct-export/#{CONF_NAME}"\
@@ -5,9 +5,9 @@ SiteName: <%= vm['site_name']%>
5
5
  MachineName: <%= vm['machine_name']%>
6
6
  LocalUserId: <%= vm['user_id']%>
7
7
  LocalGroupId: <%= vm['group_id']%>
8
- GlobalUserName: <%= vm['user_name']%>
9
- <% if vm['fqan']-%>
10
- FQAN: /<%= vm['fqan']%>/Role=NULL/Capability=NULL
8
+ GlobalUserName: <%= vm['user_dn']%>
9
+ <% if vm['group_name']-%>
10
+ FQAN: /<%= vm['group_name']%>/Role=NULL/Capability=NULL
11
11
  <% else -%>
12
12
  FQAN: NULL
13
13
  <% end -%>
@@ -15,14 +15,14 @@ Status: <%= vm['status']%>
15
15
  StartTime: <%= vm['start_time'].to_i%>
16
16
  EndTime: <%= vm['end_time'].to_s == vm['end_time'] ? vm['end_time'] : vm['end_time'].to_i%>
17
17
  SuspendDuration: <%= vm['suspend']%>
18
- WallDuration: <%= vm['duration']%>
19
- CpuDuration: <%= vm['duration']%>
18
+ WallDuration: <%= vm['duration'].to_i != 0 ? vm['duration'].to_i : 'NULL'%>
19
+ CpuDuration: <%= vm['duration'].to_i != 0 ? vm['duration'].to_i : 'NULL'%>
20
20
  CpuCount: <%= vm['cpu_count']%>
21
21
  NetworkType: NULL
22
22
  NetworkInbound: <%= vm['network_inbound']%>
23
23
  NetworkOutbound: <%= vm['network_outbound']%>
24
24
  Memory: <%= vm['memory']%>
25
- Disk: <%= vm['disk_size']%>
25
+ Disk: <%= vm['disk_size'].to_i != 0 ? (vm['disk_size']/1000.0).ceil : vm['disk_size']%>
26
26
  StorageRecordId: NULL
27
27
  ImageId: <%= vm['image_name']%>
28
28
  CloudType: OpenNebula
@@ -0,0 +1,3 @@
1
+ <% for vm in @data -%>
2
+ { "@source": "<%= vm['host'] %>_<%= vm['port'] %>", "@tags": [ "history" ], "@fields": <%= vm.to_json %>, "@timestamp": "<%= Time.at(vm['start_time']).utc.strftime('%FT%T%:z') %>" }
3
+ <% end -%>
@@ -0,0 +1,6 @@
1
+ <%- for vm in @data -%>
2
+ <%- for history_record in vm['history'] -%>
3
+ <%= history_record['start_time'].strftime('%D %T') %>;<%= history_record['state']%>;<%= vm['host']%>_<%= vm['machine_name']%>-<%= history_record['seq']%>;user=<%= vm['user_name']%> group=<%= vm['group_name']%> jobname=<%= vm['machine_name']%> queue=<%= vm['pbs_queue']%> ctime=<%= history_record['start_time'].to_i%> qtime=<%= history_record['start_time'].to_i%> etime=<%= history_record['start_time'].to_i%> start=<%= history_record['start_time'].to_i%> end=<%= history_record['end_time'].to_i%> owner=<%= vm['user_name']%>@<%= vm['realm']%> sched_nodespec=host=<%= history_record['hostname']%>:ppn=<%= vm['cpu_count']%>:mem=<%= vm['memory'].to_i*1024%>KB:vmem=<%= vm['memory'].to_i*1024%>KB<% if vm['scratch_type'] -%>:scratch_type=<%= vm['scratch_type']%><% end -%><% if vm['disk_size'] -%>:scratch_volume=<%= vm['disk_size']%><% end -%>mb Resource_List.mem=<%= vm['memory']%>mb Resource_List.nodect=1 <% if vm['disk_size'] -%>Resource_List.scratch=<%= vm['disk_size']%>mb <% end -%>Resource_List.vmem=<%= (vm['memory'].to_i/1024.0).ceil%>gb Resource_List.walltime=<%= (vm['duration'].to_i/3600).floor%>:<%= vm['duration'].utc.strftime('%M:%S')%> resc_req_total.mem=<%= vm['memory'].to_i*1024%>kb resc_req_total.nodect=1 resc_req_total.procs=<%= vm['cpu_count']%> resc_req_total.scratch=<%= vm['disk_size']%>mb resc_req_total.vmem=<%= vm['memory'].to_i*1024%>kb resc_req_total.walltime=<%= (vm['duration'].to_i/3600).floor%>:<%= vm['duration'].utc.strftime('%M:%S')%> Exit_status=0 resources_used.cput=<%= (vm['duration'].to_i/3600).floor%>:<%= vm['duration'].utc.strftime('%M:%S')%> resources_used.mem=<%= vm['memory'].to_i*1024%>kb resources_used.vmem=<%= vm['memory'].to_i*1024%>kb resources_used.walltime=<%= (vm['duration'].to_i/3600).floor%>:<%= vm['duration'].utc.strftime('%M:%S')%>
4
+ <%- end -%>
5
+ <%- end -%>
6
+