oneacct-export 0.3.0 → 0.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +9 -1
  3. data/bin/oneacct-export +15 -3
  4. data/config/conf.yml +1 -0
  5. data/examples/usr/bin/oneacct-export-cron +31 -7
  6. data/lib/data_validators/apel_data_validator.rb +17 -5
  7. data/lib/data_validators/data_validator_helper.rb +1 -0
  8. data/lib/input_validator.rb +5 -0
  9. data/lib/one_data_accessor.rb +62 -10
  10. data/lib/one_worker.rb +89 -10
  11. data/lib/one_writer.rb +3 -1
  12. data/lib/oneacct_exporter.rb +3 -5
  13. data/lib/oneacct_exporter/version.rb +1 -1
  14. data/lib/oneacct_opts.rb +13 -3
  15. data/lib/output_types.rb +3 -3
  16. data/lib/templates/apel-0.2.erb +1 -1
  17. data/lib/templates/apel-0.4.erb +34 -0
  18. data/mock/one_data_accessor_cluster_01.xml +27 -0
  19. data/mock/one_data_accessor_cluster_02.xml +23 -0
  20. data/mock/one_data_accessor_host_01.xml +65 -0
  21. data/mock/one_data_accessor_host_02.xml +66 -0
  22. data/mock/one_data_accessor_host_03.xml +63 -0
  23. data/mock/one_data_accessor_host_04.xml +63 -0
  24. data/mock/one_data_accessor_host_05.xml +63 -0
  25. data/mock/one_worker_vm_number_of_public_ips_01.xml +275 -0
  26. data/mock/one_worker_vm_number_of_public_ips_02.xml +164 -0
  27. data/mock/one_worker_vm_number_of_public_ips_03.xml +189 -0
  28. data/mock/one_worker_vm_search_benchmark_01.xml +175 -0
  29. data/mock/one_worker_vm_search_benchmark_02.xml +175 -0
  30. data/oneacct-export.gemspec +6 -5
  31. data/spec/data_validators/apel_data_validator_spec.rb +72 -0
  32. data/spec/one_data_accessor_spec.rb +114 -43
  33. data/spec/one_worker_spec.rb +148 -18
  34. data/spec/oneacct_exporter_spec.rb +3 -13
  35. data/spec/oneacct_opts_spec.rb +22 -0
  36. data/spec/spec_helper.rb +2 -1
  37. metadata +40 -13
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: e33fa6806cd2ea1a65ac02313c0a3aa3cf549095
4
- data.tar.gz: 3691a9b9a6eea6c22987c3f78934c199dc7e6596
3
+ metadata.gz: cbb6f93cdabe253f898feaff7aa924dc5bbbffa7
4
+ data.tar.gz: 57bb740a6f19b3656a1c06c420d6ea3810535ba5
5
5
  SHA512:
6
- metadata.gz: 0b049a0279a7cbdb37b002faa9c554596201aee6965a13789f0b234657421f4e32ce03add5708458540cfc37d80307a922837acf10028ecb60e311bdb98da60a
7
- data.tar.gz: bf8ad5f4cce2931091cfcd5851780b2cbf6c42eb0f5d7e949135147387bb657653fec11cc7b8e0bfcb76fe5af69773aef608b18ece6ab873d438d9ae2978d685
6
+ metadata.gz: 5eb5405b239359b3bda92ea4a895bd8eb5cb409e273918a9ea8a437579164b9877e4d8d80cab34f60dd71649b71a724e7c6f54853abea5cddf825d2aaddf51ab
7
+ data.tar.gz: 0863fe3000a3d01131846d3d128d62e6977e39775e887a2fe25c5694c356803de5184d7ba0d8a169cab74cd30d16c51b55c876a0b5d9cccea33256b1d30e0b09
data/README.md CHANGED
@@ -165,9 +165,17 @@ Usage oneacct-export [options]
165
165
  ##Continuous integration
166
166
  [Continuous integration for OneacctExport by Travis-CI](http://travis-ci.org/EGI-FCTF/oneacct_export/)
167
167
 
168
- ## Contributing
168
+ ##Development
169
+ ###Contributing
169
170
  1. Fork it ( https://github.com/EGI-FCTF/oneacct_export/fork )
170
171
  2. Create your feature branch (`git checkout -b my-new-feature`)
171
172
  3. Commit your changes (`git commit -am 'Add some feature'`)
172
173
  4. Push to the branch (`git push origin my-new-feature`)
173
174
  5. Create a new Pull Request
175
+
176
+ ###Debugging
177
+ To change the log level of `oneacct-export` and `sidekiq` you have to set the environment variable **ONEACCT_EXPORT_LOG_LEVEL** to log level you need. Supported log levels are `DEBUG`, `INFO`, `WARN` and `ERROR`.
178
+ ```bash
179
+ export ONEACCT_EXPORT_LOG_LEVEL=DEBUG
180
+ ```
181
+
@@ -8,6 +8,9 @@ require 'settings'
8
8
  require 'fileutils'
9
9
  require 'json'
10
10
  require 'oneacct_opts'
11
+ require 'chronic_duration'
12
+
13
+ ChronicDuration.raise_exceptions = true
11
14
 
12
15
  # parse options from command line
13
16
  options = OneacctOpts.parse(ARGV)
@@ -31,9 +34,18 @@ end
31
34
 
32
35
  OneacctExporter::Log.setup_log_level(log)
33
36
 
34
- range = {}
35
- range[:from] = options.records_from
36
- range[:to] = options.records_to
37
+ begin
38
+ range = {}
39
+ range[:from] = Time.now - ChronicDuration.parse(options.records_for) if options.records_for
40
+
41
+ if options.records_from || options.records_to
42
+ range[:from] = options.records_from
43
+ range[:to] = options.records_to
44
+ end
45
+ rescue ChronicDuration::DurationParseError => e
46
+ puts "Cannot parse a time period: #{e.message} Quitting."
47
+ exit
48
+ end
37
49
 
38
50
  groups = {}
39
51
  groups[:include] = options.include_groups if options.include_groups
@@ -8,6 +8,7 @@ defaults: &defaults
8
8
  site_name: Undefined # Usually a short provider name, e.g. CESNET
9
9
  cloud_type: OpenNebula # CMF type, only OpenNebula is supported
10
10
  endpoint: https://occi.localhost.com:11443/ # URL of your OCCI endpoint, e.g. https://fqdn.example.com:11443/
11
+ # cloud_compute_service: # Name identifying cloud resource within the site. Allows multiple cloud resources within a site. i.e. a level of granularity.
11
12
  pbs: # Options for pbs output format
12
13
  realm: REALM # Owner's realm, e.g. META
13
14
  queue: cloud # Queue name
@@ -28,17 +28,41 @@
28
28
  conf_dir="/etc/oneacct-export"
29
29
  omnibus_base_dir="/opt/oneacct-export"
30
30
 
31
- week_start_epoch=$((`date +%s`-604800))
32
- week_start_date=`date -d @$week_start_epoch +%F`
33
-
34
- if [ "x$1" = "x--all" ] || [ "x$1" = "x-all" ]; then
31
+ if [ "x$1" = "x--all" ] || [ "x$1" = "x-a" ]; then
35
32
  # export all available accounting records
36
- options="--blocking --timeout 3600"
33
+ options=""
34
+ blocking_timeout="3600"
35
+ elif [ "x$1" = "x--two-weeks" ]; then
36
+ # export only records from the last two weeks
37
+ options="--records-for '2 weeks'"
38
+ blocking_timeout="1800"
39
+ elif [ "x$1" = "x--month" ] || [ "x$1" = "x-m" ]; then
40
+ # export only records from the last month
41
+ options="--records-for month"
42
+ blocking_timeout="2700"
43
+ elif [ "x$1" = "x--two-months" ]; then
44
+ # export only records from the last two months
45
+ options="--records-for '2 months'"
46
+ blocking_timeout="2700"
47
+ elif [ "x$1" = "x--six-months" ]; then
48
+ # export only records from the last six months
49
+ options="--records-for '6 months'"
50
+ blocking_timeout="3600"
51
+ elif [ "x$1" = "x--year" ] || [ "x$1" = "x-y" ]; then
52
+ # export only records from the last year
53
+ options="--records-for year"
54
+ blocking_timeout="3600"
55
+ elif [ "x$1" = "x" ] || [ "x$1" = "x--week" ] || [ "x$1" = "x-w" ]; then
56
+ # export only records from the last week (default)
57
+ options="--records-for week"
58
+ blocking_timeout="1800"
37
59
  else
38
- # export only records from the last 7 days
39
- options="--records-from $week_start_date --blocking --timeout 1800"
60
+ echo "Unknown option $1."
61
+ exit 1
40
62
  fi
41
63
 
64
+ options="$options --blocking --timeout $blocking_timeout"
65
+
42
66
  if [ -f "$conf_dir/compat.one" ]; then
43
67
  options="$options --compatibility-mode"
44
68
  fi
@@ -42,7 +42,8 @@ module DataValidators
42
42
  # valid_data['network_outbound'] - defaults to 0
43
43
  # valid_data['memory'] - defaults to 0
44
44
  # valid_data['image_name'] - defaults to NULL
45
- # valid_data['disk_size'] -defaults to NULL
45
+ # valid_data['disk_size'] - defaults to NULL
46
+ # valid_data['cloud_compute_service'] - defaults to NULL
46
47
  def validate_data(data = nil)
47
48
  unless data
48
49
  fail Errors::ValidationError, 'Skipping a malformed record. '\
@@ -61,7 +62,7 @@ module DataValidators
61
62
  valid_data['start_time'] = Time.at(start_time)
62
63
  fail_validation 'EndTime' unless number?(data['end_time'])
63
64
  end_time = data['end_time'].to_i
64
- valid_data['end_time'] = end_time == 0 ? 'NULL' : Time.at(end_time)
65
+ valid_data['end_time'] = end_time == 0 ? DEFAULT_VALUE : Time.at(end_time)
65
66
  fail_validation 'EndTime' if end_time != 0 && valid_data['start_time'] > valid_data['end_time']
66
67
 
67
68
  valid_data['machine_name'] = default(data['machine_name'], :string, "one-#{valid_data['vm_uuid']}")
@@ -76,13 +77,13 @@ module DataValidators
76
77
  status = status.to_i
77
78
  fail_validation 'Status' unless status.to_s == data['status_code'] && status < STATES.size && status >= 0
78
79
  end
79
- valid_data['status'] = status ? STATES[status] : 'NULL'
80
+ valid_data['status'] = status ? STATES[status] : DEFAULT_VALUE
80
81
 
81
82
  fail_validation 'HISTORY_RECORDS' if (!data['history']) || data['history'].empty?
82
83
 
83
84
  duration = sum_rstime(data['history'], valid_data['status'] == 'completed', valid_data['vm_uuid'])
84
85
  valid_data['duration'] = Time.at(duration)
85
- valid_data['suspend'] = end_time == 0 ? 'NULL' : (end_time - start_time) - duration
86
+ valid_data['suspend'] = end_time == 0 ? DEFAULT_VALUE : (end_time - start_time) - duration
86
87
  valid_data['cpu_count'] = default(data['cpu_count'], :nzn, '1')
87
88
 
88
89
  valid_data['network_inbound'] = (default(data['network_inbound'], :number, 0).to_i / B_IN_GB).round
@@ -91,7 +92,18 @@ module DataValidators
91
92
  valid_data['memory'] = default(data['memory'], :number, '0')
92
93
  valid_data['image_name'] = default(data['image_name'], :string, DEFAULT_VALUE)
93
94
  disk_size_sum = sum_disk_size(data['disks'], valid_data['vm_uuid'])
94
- valid_data['disk_size'] = disk_size_sum ? disk_size_sum : 'NULL'
95
+ valid_data['disk_size'] = disk_size_sum ? disk_size_sum : DEFAULT_VALUE
96
+
97
+ valid_data['cloud_compute_service'] = default(data['cloud_compute_service'], :string, DEFAULT_VALUE)
98
+
99
+ valid_data['number_of_public_ips'] = default(data['number_of_public_ips'], :number, 0)
100
+
101
+ valid_data['benchmark_type'] = default(data['benchmark_type'], :string, DEFAULT_VALUE)
102
+ valid_data['benchmark_value'] = default(data['benchmark_value'], :decimal, DEFAULT_VALUE)
103
+ if valid_data['benchmark_type'] == DEFAULT_VALUE || valid_data['benchmark_value'] == DEFAULT_VALUE
104
+ valid_data['benchmark_type'] = DEFAULT_VALUE
105
+ valid_data['benchmark_value'] = DEFAULT_VALUE
106
+ end
95
107
 
96
108
  valid_data
97
109
  end
@@ -9,6 +9,7 @@ module DataValidators
9
9
  def default(value, condition_method, default_value)
10
10
  return string?(value) ? value : default_value if condition_method == :string
11
11
  return number?(value) ? value : default_value if condition_method == :number
12
+ return decimal?(value) ? value : default_value if condition_method == :decimal
12
13
  return non_zero_number?(value) ? value : default_value if condition_method == :nzn
13
14
  end
14
15
  end
@@ -4,6 +4,7 @@ require 'uri'
4
4
  module InputValidator
5
5
  URI_RE = /\A#{URI.regexp}\z/
6
6
  NUMBER_RE = /\A[[:digit:]]+\z/
7
+ DECIMAL_RE = /\A[[:digit:]]+\.[[:digit:]]+\z/
7
8
  STRING_RE = /\A[[:print:]]+\z/
8
9
  NON_ZERO_NUMBER_RE = /\A[1-9][[:digit:]]*\z/
9
10
 
@@ -15,6 +16,10 @@ module InputValidator
15
16
  is?(object, NUMBER_RE)
16
17
  end
17
18
 
19
+ def decimal?(object)
20
+ is?(object, DECIMAL_RE) || number?(object)
21
+ end
22
+
18
23
  def uri?(object)
19
24
  is?(object, URI_RE)
20
25
  end
@@ -18,6 +18,7 @@ class OneDataAccessor
18
18
  STATE_DONE = '6'
19
19
 
20
20
  attr_reader :log, :batch_size, :client, :compatibility
21
+ attr_accessor :start_vm_id
21
22
 
22
23
  def initialize(compatibility, log = nil)
23
24
  @log = log ? log : Logger.new(STDOUT)
@@ -27,6 +28,7 @@ class OneDataAccessor
27
28
  fail ArgumentError, 'Wrong number of vms per file.' unless number?(@batch_size)
28
29
 
29
30
  @compatibility_vm_pool = nil
31
+ @start_vm_id = 0
30
32
 
31
33
  initialize_client
32
34
  end
@@ -87,15 +89,14 @@ class OneDataAccessor
87
89
 
88
90
  # Retriev IDs of specified virtual machines
89
91
  #
90
- # @param [Integer] batch_number
91
92
  # @param [Hash] range date range into which virtual machine has to belong
92
93
  # @param [Hash] groups groups into one of which owner of the virtual machine has to belong
93
94
  #
94
95
  # @return [Array] array with virtual machines' IDs
95
- def vms(batch_number, range, groups)
96
+ def vms(range, groups)
96
97
  vms = []
97
98
  # load specific batch
98
- vm_pool = load_vm_pool(batch_number)
99
+ vm_pool = load_vm_pool
99
100
  return nil if vm_pool.count == 0
100
101
 
101
102
  @log.debug("Searching for vms based on range: #{range} and groups: #{groups}.")
@@ -145,11 +146,11 @@ class OneDataAccessor
145
146
  # Load part of virtual machine pool
146
147
  #
147
148
  # @param [Integer] batch_number
148
- def load_vm_pool(batch_number)
149
- fail ArgumentError, "#{batch_number} is not a valid number" unless number?(batch_number)
150
- @log.debug("Loading vm pool with batch number: #{batch_number}.")
151
- from = batch_number * @batch_size
152
- to = (batch_number + 1) * @batch_size - 1
149
+ def load_vm_pool
150
+ @log.debug("Loading vm pool from id: #{start_vm_id}.")
151
+ from = @start_vm_id
152
+ how_many = @batch_size
153
+ to = from + how_many - 1
153
154
 
154
155
  # if in compatibility mode, whole virtual machine pool has to be loaded for the first time
155
156
  if @compatibility
@@ -160,12 +161,17 @@ class OneDataAccessor
160
161
  @compatibility_vm_pool = vm_pool.to_a
161
162
  end
162
163
 
163
- return @compatibility_vm_pool[from..to] || []
164
+ pool = @compatibility_vm_pool[from..to] || []
165
+ @start_vm_id = pool.last.id + 1 unless pool.empty?
166
+
167
+ return pool
164
168
  else
165
169
  vm_pool = OpenNebula::VirtualMachinePool.new(@client)
166
- rc = vm_pool.info(OpenNebula::Pool::INFO_ALL, from, to, OpenNebula::VirtualMachinePool::INFO_ALL_VM)
170
+ rc = vm_pool.info(OpenNebula::Pool::INFO_ALL, from, -how_many, OpenNebula::VirtualMachinePool::INFO_ALL_VM)
167
171
  check_retval(rc, Errors::ResourceRetrievalError)
168
172
 
173
+ @start_vm_id = vm_pool.entries.last.id + 1 unless vm_pool.count == 0
174
+
169
175
  return vm_pool
170
176
  end
171
177
  end
@@ -186,4 +192,50 @@ class OneDataAccessor
186
192
  fail e_klass, rc.message
187
193
  end
188
194
  end
195
+
196
+ # Check all hosts and gain benchmark name and value.
197
+ #
198
+ # @return [Hash] hosts' IDs and hash with benchmark name and value
199
+ def benchmark_map
200
+ host_pool = OpenNebula::HostPool.new(@client)
201
+ rc = host_pool.info
202
+ check_retval(rc, Errors::ResourceRetrievalError)
203
+
204
+ bench_map = {}
205
+
206
+ host_pool.each do |host|
207
+ structure = {}
208
+ benchmark_values = nil
209
+ benchmark_type = nil
210
+
211
+ if (benchmark_type = host['TEMPLATE/BENCHMARK_TYPE'])
212
+ benchmark_values = host['TEMPLATE/BENCHMARK_VALUES'].split(/\s*\n\s*/)
213
+ else
214
+ cluster_id = host['CLUSTER_ID'].to_i
215
+
216
+ unless cluster_id == -1
217
+ searched_cluster = OpenNebula::Cluster.new(OpenNebula::Cluster.build_xml(cluster_id), @client)
218
+ rc = searched_cluster.info
219
+ check_retval(rc, Errors::ResourceRetrievalError)
220
+
221
+ if (benchmark_type = searched_cluster['TEMPLATE/BENCHMARK_TYPE'])
222
+ benchmark_values = searched_cluster['TEMPLATE/BENCHMARK_VALUES'].split(/\s*\n\s*/)
223
+ end
224
+ end
225
+ end
226
+
227
+ if benchmark_values
228
+ mixins = {}
229
+ benchmark_values.each do |value|
230
+ values = value.split(/\s+/, 2)
231
+ mixins[values[0]] = values[1]
232
+ end
233
+ structure = { :benchmark_type => benchmark_type, :mixins => mixins }
234
+ end
235
+
236
+ bench_map[host['ID']] = structure
237
+ end
238
+
239
+ bench_map
240
+ end
189
241
  end
@@ -6,12 +6,14 @@ require 'one_data_accessor'
6
6
  require 'one_writer'
7
7
  require 'sidekiq_conf'
8
8
  require 'oneacct_exporter/log'
9
+ require 'oneacct_exporter/version'
9
10
  require 'settings'
10
11
  require 'data_validators/apel_data_validator'
11
12
  require 'data_validators/pbs_data_validator'
12
13
  require 'data_validators/logstash_data_validator'
13
14
  require 'output_types'
14
15
  require 'errors'
16
+ require 'ipaddr'
15
17
 
16
18
  # Sidekiq worker class
17
19
  class OneWorker
@@ -22,23 +24,34 @@ class OneWorker
22
24
  sidekiq_options retry: 5, dead: false, \
23
25
  queue: (Settings['sidekiq'] && Settings.sidekiq['queue']) ? Settings.sidekiq['queue'].to_sym : :default
24
26
 
27
+ IGNORED_NETWORKS=["10.0.0.0/8","172.16.0.0/12","192.168.0.0/16"].map {|x| IPAddr.new x}
28
+
29
+ # Prepare data that are common for all the output types
30
+ def common_data
31
+ data = {}
32
+ data['oneacct_export_version'] = ::OneacctExporter::VERSION
33
+
34
+ data
35
+ end
36
+
25
37
  # Prepare data that are specific for output type and common for every virtual machine
26
38
  def output_type_specific_data
27
39
  data = {}
28
- if Settings.output['output_type'] == PBS_OT && Settings.output['pbs']
40
+ if PBS_OT.include?(Settings.output['output_type']) && Settings.output['pbs']
29
41
  data['realm'] = Settings.output.pbs['realm']
30
42
  data['pbs_queue'] = Settings.output.pbs['queue']
31
43
  data['scratch_type'] = Settings.output.pbs['scratch_type']
32
44
  data['host'] = Settings.output.pbs['host_identifier']
33
45
  end
34
46
 
35
- if Settings.output['output_type'] == APEL_OT
47
+ if APEL_OT.include?(Settings.output['output_type'])
36
48
  data['endpoint'] = Settings.output.apel['endpoint'].chomp('/')
37
49
  data['site_name'] = Settings.output.apel['site_name']
38
50
  data['cloud_type'] = Settings.output.apel['cloud_type']
51
+ data['cloud_compute_service'] = Settings.output.apel['cloud_compute_service']
39
52
  end
40
53
 
41
- if Settings.output['output_type'] == LOGSTASH_OT
54
+ if LOGSTASH_OT.include?(Settings.output['output_type'])
42
55
  data['host'] = Settings.output.logstash['host']
43
56
  data['port'] = Settings.output.logstash['port']
44
57
  end
@@ -62,6 +75,11 @@ class OneWorker
62
75
  create_map(OpenNebula::ImagePool, 'TEMPLATE/VMCATCHER_EVENT_AD_MPURI', oda)
63
76
  end
64
77
 
78
+ def create_cluster_map(oda)
79
+ logger.debug('Creating cluster map.')
80
+ create_map(OpenNebula::ClusterPool, 'TEMPLATE/APEL_SITE_NAME', oda)
81
+ end
82
+
65
83
  # Generic method for mapping creation
66
84
  def create_map(pool_type, mapping, oda)
67
85
  oda.mapping(pool_type, mapping)
@@ -85,8 +103,9 @@ class OneWorker
85
103
  # Obtain and parse required data from vm
86
104
  #
87
105
  # @return [Hash] required data from virtual machine
88
- def process_vm(vm, user_map, image_map)
89
- data = output_type_specific_data
106
+ def process_vm(vm, user_map, image_map, cluster_map, benchmark_map)
107
+ data = common_data
108
+ data.merge! output_type_specific_data
90
109
 
91
110
  data['vm_uuid'] = vm['ID']
92
111
  data['start_time'] = vm['STIME']
@@ -110,6 +129,14 @@ class OneWorker
110
129
  data['image_name'] ||= vm['TEMPLATE/DISK[1]/IMAGE_ID']
111
130
  data['history'] = history_records(vm)
112
131
  data['disks'] = disk_records(vm)
132
+ data['number_of_public_ips'] = number_of_public_ips(vm)
133
+
134
+ benchmark = search_benchmark(vm, benchmark_map)
135
+ data['benchmark_type'] = benchmark[:benchmark_type]
136
+ data['benchmark_value'] = benchmark[:benchmark_value]
137
+
138
+ site_name = cluster_map[vm['HISTORY_RECORDS/HISTORY[1]/CID']]
139
+ data['site_name'] = site_name if site_name
113
140
 
114
141
  data
115
142
  end
@@ -153,7 +180,23 @@ class OneWorker
153
180
  disks
154
181
  end
155
182
 
156
- # Look for 'os_tpl' OCCI mixin to better identifie virtual machine's image
183
+ # Returns number of unique public ip addresses of vm
184
+ #
185
+ # @param [OpenNebula::VirtualMachine] vm virtual machine
186
+ #
187
+ # @return [Integer] number of unique public ip addresses represented by integer
188
+ def number_of_public_ips(vm)
189
+ all_ips = []
190
+ vm.each 'TEMPLATE/NIC' do |nic|
191
+ nic.each 'IP' do |ip|
192
+ all_ips << ip.text if ip_public?(ip)
193
+ end
194
+ end
195
+
196
+ all_ips.uniq.length
197
+ end
198
+
199
+ # Look for 'os_tpl' OCCI mixin to better identify virtual machine's image
157
200
  #
158
201
  # @param [OpenNebula::VirtualMachine] vm virtual machine
159
202
  #
@@ -185,6 +228,8 @@ class OneWorker
185
228
  oda = OneDataAccessor.new(false, logger)
186
229
  user_map = create_user_map(oda)
187
230
  image_map = create_image_map(oda)
231
+ cluster_map = create_cluster_map(oda)
232
+ benchmark_map = oda.benchmark_map
188
233
 
189
234
  data = []
190
235
 
@@ -194,11 +239,11 @@ class OneWorker
194
239
 
195
240
  begin
196
241
  logger.debug("Processing vm with id: #{vm_id}.")
197
- vm_data = process_vm(vm, user_map, image_map)
242
+ vm_data = process_vm(vm, user_map, image_map, cluster_map, benchmark_map)
198
243
 
199
- validator = DataValidators::ApelDataValidator.new(logger) if Settings.output['output_type'] == APEL_OT
200
- validator = DataValidators::PbsDataValidator.new(logger) if Settings.output['output_type'] == PBS_OT
201
- validator = DataValidators::LogstashDataValidator.new(logger) if Settings.output['output_type'] == LOGSTASH_OT
244
+ validator = DataValidators::ApelDataValidator.new(logger) if APEL_OT.include?(Settings.output['output_type'])
245
+ validator = DataValidators::PbsDataValidator.new(logger) if PBS_OT.include?(Settings.output['output_type'])
246
+ validator = DataValidators::LogstashDataValidator.new(logger) if LOGSTASH_OT.include?(Settings.output['output_type'])
202
247
 
203
248
  vm_data = validator.validate_data(vm_data) if validator
204
249
  rescue Errors::ValidationError => e
@@ -226,4 +271,38 @@ class OneWorker
226
271
  logger.error(msg)
227
272
  raise msg
228
273
  end
274
+
275
+ # Search benchmark type and value virtual machine.
276
+ #
277
+ # @param [OpenNebula::VirtualMachine] vm virtual machine
278
+ # @param [Hash] benchmark_map map of all hosts' benchmarks
279
+ #
280
+ # @return [Hash] benchmark type and value or both can be nil
281
+ def search_benchmark(vm, benchmark_map)
282
+ nil_benchmark = { :benchmark_type => nil, :benchmark_value => nil }
283
+ map = benchmark_map[vm['HISTORY_RECORDS/HISTORY[last()]/HID']]
284
+ return nil_benchmark unless map
285
+ return nil_benchmark unless vm['USER_TEMPLATE/OCCI_COMPUTE_MIXINS']
286
+
287
+ occi_compute_mixins = vm['USER_TEMPLATE/OCCI_COMPUTE_MIXINS'].split(/\s+/)
288
+ occi_compute_mixins.each do |mixin|
289
+ return { :benchmark_type => map[:benchmark_type], :benchmark_value => map[:mixins][mixin] } if map[:mixins].has_key?(mixin)
290
+ end
291
+ nil_benchmark
292
+ end
293
+
294
+ private
295
+
296
+ # Check if IP is public
297
+ #
298
+ # @param [String] ip address
299
+ #
300
+ # @return [Bool] true or false
301
+ def ip_public?(ip)
302
+ ip_obj = IPAddr.new(ip.text)
303
+ IGNORED_NETWORKS.each do |net|
304
+ return false if net.include? ip_obj
305
+ end
306
+ true
307
+ end
229
308
  end