cosmos 4.4.0 → 4.4.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (98) hide show
  1. checksums.yaml +5 -5
  2. data/.dockerignore +2 -0
  3. data/.gitignore +1 -0
  4. data/.travis.yml +6 -6
  5. data/Dockerfile +65 -0
  6. data/Manifest.txt +12 -2
  7. data/README.md +5 -0
  8. data/Rakefile +52 -0
  9. data/appveyor.yml +18 -8
  10. data/autohotkey/config/tools/cmd_sequence/cmd_sequence.txt +2 -0
  11. data/autohotkey/lib/cmd_sequence_exporter.rb +52 -0
  12. data/autohotkey/procedures/collect.rb +2 -2
  13. data/autohotkey/procedures/collect_util.rb +1 -1
  14. data/autohotkey/procedures/script_test.rb +1 -1
  15. data/autohotkey/tools/CmdSenderAHK2 +18 -0
  16. data/autohotkey/tools/cmd_sender.ahk +34 -6
  17. data/autohotkey/tools/cmd_sender2.ahk +4 -0
  18. data/autohotkey/tools/cmd_sequence.ahk +21 -8
  19. data/autohotkey/tools/config_editor.ahk +4 -4
  20. data/bin/cstol_converter +1 -1
  21. data/cosmos.gemspec +1 -1
  22. data/data/config/command_modifiers.yaml +16 -1
  23. data/data/config/param_item_modifiers.yaml +5 -0
  24. data/data/config/system.yaml +31 -1
  25. data/data/config/telemetry_modifiers.yaml +16 -1
  26. data/data/crc.txt +415 -410
  27. data/demo/config/dart/Gemfile +1 -6
  28. data/demo/config/data/crc.txt +244 -243
  29. data/demo/config/system/system.txt +3 -0
  30. data/demo/config/system/system2.txt +3 -0
  31. data/demo/config/system/system_alt_ports.txt +3 -0
  32. data/demo/config/targets/INST/cmd_tlm/inst_cmds.txt +3 -3
  33. data/demo/config/targets/INST/cmd_tlm/inst_tlm.txt +4 -0
  34. data/demo/config/targets/INST/cmd_tlm/inst_tlm_override.txt +12 -0
  35. data/demo/config/targets/INST/lib/sim_inst.rb +2 -2
  36. data/demo/config/targets/INST/target.txt +1 -0
  37. data/demo/procedures/cosmos_api_test.rb +8 -8
  38. data/install/config/dart/Gemfile +2 -7
  39. data/install/config/data/crc.txt +143 -143
  40. data/install/config/system/system.txt +3 -0
  41. data/lib/cosmos/dart/config/boot.rb +1 -1
  42. data/lib/cosmos/dart/config/database.yml +2 -0
  43. data/lib/cosmos/dart/lib/dart_common.rb +11 -4
  44. data/lib/cosmos/dart/lib/dart_constants.rb +15 -0
  45. data/lib/cosmos/dart/lib/dart_decom_query.rb +5 -6
  46. data/lib/cosmos/dart/lib/dart_decommutator.rb +66 -56
  47. data/lib/cosmos/dart/lib/dart_master_query.rb +71 -0
  48. data/lib/cosmos/dart/lib/dart_reducer_worker_thread.rb +165 -134
  49. data/lib/cosmos/dart/processes/dart.rb +4 -2
  50. data/lib/cosmos/dart/processes/dart_decom_server.rb +2 -2
  51. data/lib/cosmos/dart/processes/dart_ingester.rb +38 -1
  52. data/lib/cosmos/dart/processes/dart_master.rb +44 -0
  53. data/lib/cosmos/dart/processes/dart_util.rb +115 -0
  54. data/lib/cosmos/gui/widgets/dart_meta_frame.rb +21 -2
  55. data/lib/cosmos/interfaces/protocols/length_protocol.rb +5 -0
  56. data/lib/cosmos/io/json_drb.rb +3 -3
  57. data/lib/cosmos/io/posix_serial_driver.rb +1 -1
  58. data/lib/cosmos/io/win32_serial_driver.rb +23 -2
  59. data/lib/cosmos/packet_logs/packet_log_reader.rb +2 -2
  60. data/lib/cosmos/packets/packet.rb +1 -1
  61. data/lib/cosmos/packets/packet_config.rb +26 -8
  62. data/lib/cosmos/packets/structure.rb +17 -0
  63. data/lib/cosmos/packets/structure_item.rb +5 -1
  64. data/lib/cosmos/packets/telemetry.rb +7 -1
  65. data/lib/cosmos/system/system.rb +115 -48
  66. data/lib/cosmos/tools/cmd_sender/cmd_params.rb +360 -0
  67. data/lib/cosmos/tools/cmd_sender/cmd_sender.rb +23 -319
  68. data/lib/cosmos/tools/cmd_sequence/cmd_sequence.rb +14 -17
  69. data/lib/cosmos/tools/cmd_sequence/sequence_item.rb +43 -331
  70. data/lib/cosmos/tools/cmd_sequence/sequence_list.rb +16 -11
  71. data/lib/cosmos/tools/cmd_tlm_server/cmd_tlm_server_gui.rb +1 -0
  72. data/lib/cosmos/tools/config_editor/config_editor.rb +33 -2
  73. data/lib/cosmos/tools/config_editor/config_editor_frame.rb +8 -9
  74. data/lib/cosmos/tools/config_editor/system_config_dialog.rb +158 -0
  75. data/lib/cosmos/tools/script_runner/script_runner_frame.rb +2 -2
  76. data/lib/cosmos/tools/test_runner/test.rb +5 -2
  77. data/lib/cosmos/tools/test_runner/test_runner.rb +2 -2
  78. data/lib/cosmos/tools/tlm_extractor/tlm_extractor_processor.rb +17 -13
  79. data/lib/cosmos/tools/tlm_grapher/tabbed_plots_tool/tabbed_plots_dart_thread.rb +20 -16
  80. data/lib/cosmos/tools/tlm_grapher/tlm_grapher.rb +18 -11
  81. data/lib/cosmos/tools/tlm_viewer/tlm_viewer.rb +16 -5
  82. data/lib/cosmos/utilities/ruby_lex_utils.rb +34 -30
  83. data/lib/cosmos/version.rb +4 -4
  84. data/lib/cosmos/win32/excel.rb +23 -17
  85. data/run_gui_tests.bat +1 -0
  86. data/spec/core_ext/socket_spec.rb +1 -1
  87. data/spec/install/yaml_docs_spec.rb +26 -6
  88. data/spec/interfaces/protocols/length_protocol_spec.rb +39 -0
  89. data/spec/io/json_drb_spec.rb +14 -0
  90. data/spec/io/win32_serial_driver_spec.rb +16 -2
  91. data/spec/packet_logs/packet_log_reader_spec.rb +2 -2
  92. data/spec/packets/structure_spec.rb +52 -2
  93. data/spec/packets/telemetry_spec.rb +29 -1
  94. data/spec/system/system_spec.rb +2 -2
  95. data/spec/utilities/message_log_spec.rb +6 -3
  96. data/tasks/gemfile_stats.rake +22 -13
  97. metadata +17 -7
  98. data/lib/cosmos/dart/Gemfile +0 -69
@@ -14,6 +14,7 @@ LISTEN_HOST REPLAY_PREIDENTIFIED 0.0.0.0 # 127.0.0.1 is more secure if you don't
14
14
  LISTEN_HOST REPLAY_CMD_ROUTER 0.0.0.0 # 127.0.0.1 is more secure if you don't need external connections
15
15
  LISTEN_HOST DART_STREAM 0.0.0.0 # 127.0.0.1 is more secure if you don't need external connections
16
16
  LISTEN_HOST DART_DECOM 0.0.0.0 # 127.0.0.1 is more secure if you don't need external connections
17
+ LISTEN_HOST DART_MASTER 0.0.0.0 # 127.0.0.1 is more secure if you don't need external connections
17
18
 
18
19
  # Connect Hosts - Ip addresses or hostnames to connect to when running the tools
19
20
  CONNECT_HOST CTS_API 127.0.0.1
@@ -25,6 +26,7 @@ CONNECT_HOST REPLAY_PREIDENTIFIED 127.0.0.1
25
26
  CONNECT_HOST REPLAY_CMD_ROUTER 127.0.0.1
26
27
  CONNECT_HOST DART_STREAM 127.0.0.1
27
28
  CONNECT_HOST DART_DECOM 127.0.0.1
29
+ CONNECT_HOST DART_MASTER 127.0.0.1
28
30
 
29
31
  # Ethernet Ports
30
32
  PORT CTS_API 7777
@@ -36,6 +38,7 @@ PORT REPLAY_PREIDENTIFIED 7879
36
38
  PORT REPLAY_CMD_ROUTER 7880
37
39
  PORT DART_STREAM 8777
38
40
  PORT DART_DECOM 8779
41
+ PORT DART_MASTER 8780
39
42
 
40
43
  # Default Packet Log Writer and Reader
41
44
  DEFAULT_PACKET_LOG_WRITER packet_log_writer.rb
@@ -1,3 +1,3 @@
1
- ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../Gemfile', __dir__)
1
+ #ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../Gemfile', __dir__)
2
2
 
3
3
  require 'bundler/setup' # Set up gems listed in the Gemfile.
@@ -1,6 +1,8 @@
1
1
  default: &default
2
2
  template: template0
3
3
  encoding: SQL_ASCII
4
+ collation: C
5
+ ctype: C
4
6
  adapter: postgresql
5
7
  pool: 50
6
8
  timeout: 5000
@@ -10,6 +10,7 @@
10
10
 
11
11
  require 'cosmos/script'
12
12
  require 'optparse'
13
+ require 'dart_constants'
13
14
 
14
15
  # Autoload models here to remove problems loading within Cosmos namespace
15
16
  Target
@@ -20,6 +21,8 @@ PacketLogEntry
20
21
  # Implement methods common to DART (Data Archival Retrieval and Trending).
21
22
  # Most of these methods handle accessing the DART database.
22
23
  module DartCommon
24
+ include DartConstants
25
+
23
26
  # @return [Integer] Maximimum byte size of strings in the database
24
27
  MAX_STRING_BYTE_SIZE = 191 # Works well with mysql utf8mb4 if we want to support mysql in the future
25
28
  # @return [Integer] Maximimum bit size of strings in the database
@@ -184,7 +187,7 @@ module DartCommon
184
187
  end
185
188
  end
186
189
  t.index :time
187
- t.index :reduced_state, :where => "reduced_state < 2"
190
+ t.index :reduced_state, :where => "reduced_state < #{REDUCED}"
188
191
  end
189
192
  create_reduction_table("t#{packet_config.id}_#{table_index}_h", table_data_types, table_index) # hour
190
193
  create_reduction_table("t#{packet_config.id}_#{table_index}_m", table_data_types, table_index) # month
@@ -288,7 +291,11 @@ module DartCommon
288
291
  reader.open(packet_log.filename)
289
292
  @plr_cache[packet_log.id] = reader
290
293
  end
291
- return reader.read_at_offset(ple.data_offset)
294
+ packet = reader.read_at_offset(ple.data_offset)
295
+ unless packet
296
+ Cosmos::Logger.error("Failed to read at offset #{ple.data_offset} (file offset: #{reader.bytes_read}) with file size #{reader.size}")
297
+ end
298
+ return packet
292
299
  rescue Exception => error
293
300
  Cosmos::Logger.error("Error Reading Packet Log Entry:\n#{error.formatted}")
294
301
  return nil
@@ -319,7 +326,7 @@ module DartCommon
319
326
  reduction,
320
327
  reduction_modifier,
321
328
  item_name_modifier,
322
- limit = 10000,
329
+ limit = MAX_DECOM_RESULTS,
323
330
  offset = 0,
324
331
  meta_ids = [])
325
332
 
@@ -687,7 +694,7 @@ module DartCommon
687
694
  end
688
695
  end
689
696
  t.index :start_time
690
- t.index :reduced_state, :where => "reduced_state < 2"
697
+ t.index :reduced_state, :where => "reduced_state < #{REDUCED}"
691
698
  end
692
699
  end
693
700
 
@@ -0,0 +1,15 @@
1
+ # encoding: ascii-8bit
2
+
3
+ # Copyright 2018 Ball Aerospace & Technologies Corp.
4
+ # All Rights Reserved.
5
+ #
6
+ # This program is free software; you can modify and/or redistribute it
7
+ # under the terms of the GNU General Public License
8
+ # as published by the Free Software Foundation; version 3 with
9
+ # attribution addendums as found in the LICENSE.txt
10
+
11
+ # Implement constants common to DART (Data Archival Retrieval and Trending).
12
+ module DartConstants
13
+ # @return [Integer] maximum number of results returned from a decommutation query
14
+ MAX_DECOM_RESULTS = 10000
15
+ end
@@ -43,10 +43,9 @@ class DartDecomQuery
43
43
  # meta_ids => Optional IDs related to the meta data you want to filter by. This requires
44
44
  # making a separate request for the particular meta data in question and recording
45
45
  # the returned meta_ids for use in a subsequent request.
46
- # limit => Maximum number of data items to return, must be less than 10000
47
- # offset => Offset into the data stream. Since the maximum number of values allowed
48
- # is 10000, you can set the offset to 10000, then 20000, etc to get additional values.
49
- # By default the offset is 0.
46
+ # limit => Maximum number of data items to return, must be less than DartCommon::MAX_DECOM_RESULTS
47
+ # offset => Offset into the data stream. Use this to get more than the DartCommon::MAX_DECOM_RESULTS
48
+ # by making multipe requests with multiples of the DartCommon::MAX_DECOM_RESULTS value.
50
49
  # cmd_tlm => Whether the item is a command or telemetry. Default is telemetry.
51
50
  # @return [Array<Array<String, Integer, Integer, Integer, Integer>>] Array of arrays containing
52
51
  # the item name, item seconds, item microseconds, samples (always 1 for NONE reduction, varies
@@ -143,7 +142,7 @@ class DartDecomQuery
143
142
  end
144
143
 
145
144
  limit = request['limit'].to_i
146
- limit = 10000 if limit <= 0 or limit > 10000
145
+ limit = MAX_DECOM_RESULTS if limit <= 0 or limit > MAX_DECOM_RESULTS
147
146
 
148
147
  offset = request['offset'].to_i
149
148
  offset = 0 if offset < 0
@@ -168,7 +167,7 @@ class DartDecomQuery
168
167
  # @param is_tlm true or false
169
168
  # @return [Array<String>] Array of item names
170
169
  def item_names(target_name, packet_name, is_tlm = true)
171
- Cosmos::Logger.info("#{time.formatted}: item_names")
170
+ Cosmos::Logger.info("#{Time.now.formatted}: item_names")
172
171
 
173
172
  target = Target.where("name = ?", target_name).first
174
173
  raise "Target #{target_name} not found" unless target
@@ -11,6 +11,7 @@
11
11
  require 'dart_common'
12
12
  require 'dart_logging'
13
13
  require 'packet_log_entry'
14
+ require 'cosmos/io/json_drb_object'
14
15
 
15
16
  class DartDecommutatorStatus
16
17
  attr_accessor :count
@@ -35,25 +36,41 @@ class DartDecommutator
35
36
 
36
37
  # Wait 60s before giving up on the PacketConfig becoming ready
37
38
  PACKET_CONFIG_READY_TIMEOUT = 60
38
-
39
+
40
+ # Delay between updating the DART status packet. Simply throttles this rarely viewed status
41
+ STATUS_UPDATE_PERIOD_SECONDS = 60.seconds
42
+
39
43
  def initialize(worker_id = 0, num_workers = 1)
40
44
  sync_targets_and_packets()
41
45
  @worker_id = worker_id
42
46
  @num_workers = num_workers
43
47
  @status = DartDecommutatorStatus.new
48
+ @master = Cosmos::JsonDRbObject.new(Cosmos::System.connect_hosts['DART_MASTER'], Cosmos::System.ports['DART_MASTER'])
49
+ end
50
+
51
+ def timeit(message, &block)
52
+ time_start = Time.now
53
+ yield
54
+ Cosmos::Logger.info("#{Time.now - time_start}s #{message}")
44
55
  end
45
56
 
46
57
  # Run forever looking for data to decommutate
47
58
  def run
48
- status_time = Time.now + 60.seconds
59
+ status_time = Time.now + STATUS_UPDATE_PERIOD_SECONDS
49
60
  while true
50
- time_start = Time.now # Remember start time so we can throttle
51
- # Get all entries that are ready and decommutation hasn't started
52
- PacketLogEntry.where("decom_state = #{PacketLogEntry::NOT_STARTED} and ready = true").
53
- # Mod the ID to allow distribution of effort, in_batches processes 1000 at a time
54
- where("id % #{@num_workers} = #{@worker_id}").in_batches do |group|
55
- group.each do |ple|
61
+ ple_id = nil
62
+ start_time = nil
63
+ end_time = nil
64
+ begin
65
+ ple_ids = @master.get_decom_ple_ids()
66
+ rescue DRb::DRbConnError
67
+ sleep(1)
68
+ next
69
+ end
70
+ if ple_ids and ple_ids.length > 0
71
+ ple_ids.each do |ple_id|
56
72
  begin
73
+ ple = PacketLogEntry.find(ple_id)
57
74
  meta_ple = get_meta_ple(ple)
58
75
  next unless meta_ple
59
76
  system_meta = get_system_meta(ple, meta_ple)
@@ -67,7 +84,7 @@ class DartDecommutator
67
84
  # If we timeout this code will simply exit the application
68
85
  wait_for_ready_packet_config(packet_config)
69
86
  decom_packet(ple, packet, packet_config)
70
-
87
+
71
88
  # Update status
72
89
  if Time.now > status_time
73
90
  status_time = Time.now + 60.seconds
@@ -86,15 +103,14 @@ class DartDecommutator
86
103
  rescue => err
87
104
  handle_error("PLE:#{ple.id}:ERROR\n#{err.formatted}")
88
105
  end
89
- end # each ple
90
- end # batches
91
-
92
- # Throttle to no faster than 1 Hz
93
- delta = Time.now - time_start
94
- sleep(1 - delta) if delta < 1 && delta > 0
106
+ end
107
+ else
108
+ sleep(1)
109
+ end
95
110
  end
96
111
  rescue Interrupt
97
112
  Cosmos::Logger.info("Dart Worker Closing From Signal...")
113
+ @master.shutdown
98
114
  end
99
115
 
100
116
  protected
@@ -238,51 +254,45 @@ class DartDecommutator
238
254
  end
239
255
 
240
256
  def decom_packet(ple, packet, packet_config)
241
- # Update packet config times
242
- if !packet_config.start_time or (packet.packet_time < packet_config.start_time)
243
- packet_config.start_time = packet.packet_time
244
- packet_config.save!
245
- end
246
- if !packet_config.end_time or (packet.packet_time > packet_config.end_time)
247
- packet_config.end_time = packet.packet_time
248
- packet_config.save!
249
- end
257
+ ActiveRecord::Base.transaction do
258
+ # Update packet config times
259
+ if !packet_config.start_time or (packet.packet_time < packet_config.start_time)
260
+ packet_config.start_time = packet.packet_time
261
+ packet_config.save!
262
+ end
263
+ if !packet_config.end_time or (packet.packet_time > packet_config.end_time)
264
+ packet_config.end_time = packet.packet_time
265
+ packet_config.save!
266
+ end
250
267
 
251
- # Mark the log entry IN_PROGRESS as we decommutate the data
252
- ple.decom_state = PacketLogEntry::IN_PROGRESS
253
- ple.save!
254
- values = get_values(packet)
268
+ values = get_values(packet)
255
269
 
256
- table_index = 0
257
- rows = []
258
- # Create rows in the decommutation table model
259
- values.each_slice(MAX_COLUMNS_PER_TABLE) do |table_values|
260
- model = get_decom_table_model(packet_config.id, table_index)
261
- row = model.new
262
- row.time = ple.time
263
- row.ple_id = ple.id
264
- row.packet_log_id = ple.packet_log_id
265
- row.meta_id = ple.meta_id
266
- row.reduced_state = INITIALIZING
267
- table_values.each_with_index do |value, index|
268
- item_index = (table_index * MAX_COLUMNS_PER_TABLE) + index
269
- row.write_attribute("i#{item_index}", value)
270
+ table_index = 0
271
+ rows = []
272
+ # Create rows in the decommutation table model
273
+ values.each_slice(MAX_COLUMNS_PER_TABLE) do |table_values|
274
+ model = get_decom_table_model(packet_config.id, table_index)
275
+ row = model.new
276
+ row.time = ple.time
277
+ row.ple_id = ple.id
278
+ row.packet_log_id = ple.packet_log_id
279
+ row.meta_id = ple.meta_id
280
+ row.reduced_state = READY_TO_REDUCE
281
+ table_values.each_with_index do |value, index|
282
+ item_index = (table_index * MAX_COLUMNS_PER_TABLE) + index
283
+ row.write_attribute("i#{item_index}", value)
284
+ end
285
+ row.save!
286
+ rows << row
287
+ table_index += 1
270
288
  end
271
- row.save!
272
- rows << row
273
- table_index += 1
274
- end
275
- # Mark ready to reduce
276
- rows.each do |row|
277
- row.reduced_state = READY_TO_REDUCE
278
- row.save!
279
- end
280
289
 
281
- # The log entry has been decommutated, mark COMPLETE
282
- ple.decom_state = PacketLogEntry::COMPLETE
283
- ple.save!
284
- @status.count += 1
285
- Cosmos::Logger.debug("PLE:#{ple.id}:#{ple.decom_state_string}")
290
+ # The log entry has been decommutated, mark COMPLETE
291
+ ple.decom_state = PacketLogEntry::COMPLETE
292
+ ple.save!
293
+ @status.count += 1
294
+ Cosmos::Logger.debug("PLE:#{ple.id}:#{ple.decom_state_string}")
295
+ end
286
296
  end
287
297
 
288
298
  def handle_error(message)
@@ -0,0 +1,71 @@
1
+ # encoding: ascii-8bit
2
+
3
+ # Copyright 2018 Ball Aerospace & Technologies Corp.
4
+ # All Rights Reserved.
5
+ #
6
+ # This program is free software; you can modify and/or redistribute it
7
+ # under the terms of the GNU General Public License
8
+ # as published by the Free Software Foundation; version 3 with
9
+ # attribution addendums as found in the LICENSE.txt
10
+
11
+ require 'dart_common'
12
+ require 'dart_logging'
13
+ require 'thread'
14
+
15
+ # Rails Json screws up COSMOS handling of Nan, etc.
16
+ require "active_support/core_ext/object/json"
17
+ module ActiveSupport
18
+ module ToJsonWithActiveSupportEncoder # :nodoc:
19
+ def to_json(options = nil)
20
+ super(options)
21
+ end
22
+ end
23
+ end
24
+
25
+ # JsonDRb server which responds to queries for decommutated and reduced data
26
+ # from the database.
27
+ class DartMasterQuery
28
+ include DartCommon
29
+
30
+ def initialize(ples_per_request = 5)
31
+ # Keep a thread to make sure we have the current list of items to decom
32
+ @ples_per_request = ples_per_request
33
+ @mutex = Mutex.new
34
+ @decom_list = []
35
+ @thread = Thread.new do
36
+ loop do
37
+ # Get all entries that are ready and decommutation hasn't started
38
+ if @decom_list.length <= 0
39
+ @mutex.synchronize do
40
+ begin
41
+ @decom_list.replace(PacketLogEntry.where("decom_state = #{PacketLogEntry::NOT_STARTED} and ready = true").order("id ASC").limit(1000).pluck(:id))
42
+ rescue Exception => error
43
+ Cosmos::Logger.error("Error getting packets to decom\n#{error.formatted}")
44
+ end
45
+ end
46
+ else
47
+ sleep(1)
48
+ end
49
+ end
50
+ end
51
+ end
52
+
53
+ # Returns the id of a ple that needs to be decommed next
54
+ #
55
+ def get_decom_ple_ids()
56
+ begin
57
+ @mutex.synchronize do
58
+ result = []
59
+ @ples_per_request.times do
60
+ ple_id = @decom_list.shift
61
+ result << ple_id if ple_id
62
+ end
63
+ return result
64
+ end
65
+ rescue Exception => error
66
+ msg = "Master Error: #{error.message}"
67
+ raise $!, msg, $!.backtrace
68
+ end
69
+ end
70
+
71
+ end
@@ -14,6 +14,11 @@ ItemToDecomTableMapping
14
14
 
15
15
  # Thread which performs data reduction in the DART database.
16
16
  class DartReducerWorkerThread
17
+ # This constant controls how much spread there must be in the data before doing a reduction. Since our minimum
18
+ # reduction is 1 minute, we will wait until we have at least two minutes of spread. Not as important for higher order
19
+ # reductions but also ensures that there is a spread in the data points.
20
+ HOLD_OFF_TIME = 2.minutes
21
+
17
22
  # Create a new thread and start it
18
23
  #
19
24
  # @param master_queue [Queue] Queue which the new thread will be added to
@@ -61,145 +66,44 @@ class DartReducerWorkerThread
61
66
 
62
67
  time_delta, base_model_time_column, time_method = job_attributes(job_type)
63
68
  rows = []
69
+ done = false
64
70
  # Find all the rows in the decommutation table which are ready to reduce
65
- base_model.where("reduced_state = #{DartCommon::READY_TO_REDUCE}").order("meta_id ASC, #{base_model_time_column} ASC").find_each do |row|
66
- rows << row
67
- first_row_time = rows[0].send(base_model_time_column)
68
- last_row_time = rows[-1].send(base_model_time_column)
69
- # Ensure we have conditions to process the reduction data
70
- next unless (last_row_time - first_row_time) > time_delta || # Enough samples or
71
- # The time attribute (min, hour, day) has changed or
72
- first_row_time.send(time_method) != last_row_time.send(time_method) ||
73
- rows[0].meta_id != rows[-1].meta_id # New meta data
74
-
75
- # Sample from the start to the second to last row because the last row
76
- # is where we detected a change. The last row will be part of a new sample set.
77
- sample_rows = rows[0..-2]
78
- new_row = reduction_model.new
79
- new_row.start_time = first_row_time
80
- new_row.num_samples = sample_rows.length
81
- new_row.meta_id = sample_rows[0].meta_id
82
- new_row.packet_log_id = sample_rows[0].packet_log_id
83
- # Process each of the ItemToDecomTableMapping to get the item to be reduced
84
- mappings.each do |mapping|
85
- item_name = "i#{mapping.item_index}"
86
- min_item_name = "i#{mapping.item_index}min"
87
- max_item_name = "i#{mapping.item_index}max"
88
- avg_item_name = "i#{mapping.item_index}avg"
89
- stddev_item_name = "i#{mapping.item_index}stddev"
90
- min_value = nil
91
- max_value = nil
92
- total_samples = 0 # s0
93
- avg_value = 0.0 # s1
94
- s2 = 0.0
95
- stddev_value = 0.0
96
- min_nan_found = false
97
- max_nan_found = false
98
- avg_nan_found = false
99
- stddev_nan_found = false
100
- # Process each of the rows in the base model which is the decommutation table
101
- # or a lesser reduction table (the minute or hour table).
102
- sample_rows.each do |row_to_reduce|
103
- # If we processing minute data we're reading from the base decommutation table
104
- # thus there is only raw values to read
105
- if job_type == :MINUTE
106
- value = row_to_reduce.read_attribute(item_name)
107
- min_sample = value
108
- max_sample = value
109
- avg_sample = value
110
- if value.nil?
111
- handle_error("#{item_name} is nil in #{row_to_reduce.class}:#{row_to_reduce.id}")
112
- next
113
- end
114
- else # :HOUR or :DAY
115
- # We're processing hour or day data so we're reducing previously reduced data
116
- # thus there are min, max, and average values to read
117
- min_sample = row_to_reduce.read_attribute(min_item_name)
118
- max_sample = row_to_reduce.read_attribute(max_item_name)
119
- avg_sample = row_to_reduce.read_attribute(avg_item_name)
120
- stddev_sample = row_to_reduce.read_attribute(stddev_item_name)
121
- if min_sample.nil?
122
- handle_error("#{min_item_name} is nil in #{row_to_reduce.class}:#{row_to_reduce.id}")
123
- next
124
- end
125
- if max_sample.nil?
126
- handle_error("#{max_item_name} is nil in #{row_to_reduce.class}:#{row_to_reduce.id}")
127
- next
128
- end
129
- if avg_sample.nil?
130
- handle_error("#{avg_item_name} is nil in #{row_to_reduce.class}:#{row_to_reduce.id}")
131
- next
132
- end
133
- if stddev_sample.nil?
134
- handle_error("#{stddev_item_name} is nil in #{row_to_reduce.class}:#{row_to_reduce.id}")
135
- next
136
- end
137
- end
138
-
139
- if nan_value?(min_sample)
140
- min_nan_found = true
141
- else
142
- if !min_value or min_sample < min_value
143
- min_value = min_sample
144
- end
145
- end
146
-
147
- if nan_value?(max_sample)
148
- max_nan_found = true
149
- else
150
- if !max_value or max_sample > max_value
151
- max_value = max_sample
152
- end
153
- end
154
-
155
- if nan_value?(avg_sample)
156
- avg_nan_found = true
157
- else
158
- # MINUTE data is reducing the decommutated values
159
- if job_type == :MINUTE
160
- total_samples += 1 # s0
161
- avg_value += avg_sample # s1
162
- s2 += (avg_sample * avg_sample)
163
- else # :HOUR or :DAY
164
- # Aggregated Stddev
165
- # See https://math.stackexchange.com/questions/1547141/aggregating-standard-deviation-to-a-summary-point
166
- total_samples += row_to_reduce.num_samples # s0
167
- avg_value += (avg_sample * row_to_reduce.num_samples) # s1
168
- s2 += row_to_reduce.num_samples * (avg_sample * avg_sample + stddev_sample * stddev_sample)
71
+ row_ids = base_model.where("reduced_state = #{DartCommon::READY_TO_REDUCE}").order("meta_id ASC, #{base_model_time_column} ASC").pluck(:id)
72
+ if row_ids.length > 0
73
+ first_row = base_model.find(row_ids[0])
74
+ last_row = base_model.find(row_ids[-1])
75
+ first_query_row_time = first_row.send(base_model_time_column)
76
+ last_query_row_time = last_row.send(base_model_time_column)
77
+ # Require at least a 2 minute spread to ensure a full minute of context is available
78
+ if (last_query_row_time - first_query_row_time) > HOLD_OFF_TIME
79
+ row_ids.in_groups_of(1000, false).each do |group_row_ids|
80
+ break if done
81
+ query_rows = base_model.order("meta_id ASC, #{base_model_time_column} ASC").where(id: group_row_ids)
82
+ query_rows.each do |row|
83
+ rows << row
84
+ first_row_time = rows[0].send(base_model_time_column)
85
+ last_row_time = rows[-1].send(base_model_time_column)
86
+
87
+ # Break if we are near the end of a minute
88
+ if (last_query_row_time - last_row_time) < 1.minute
89
+ done = true
90
+ break
169
91
  end
92
+
93
+ # Ensure we have conditions to process the reduction data
94
+ next unless (last_row_time - first_row_time) > time_delta || # Enough samples or
95
+ # The time attribute (min, hour, day) has changed or
96
+ first_row_time.send(time_method) != last_row_time.send(time_method) ||
97
+ rows[0].meta_id != rows[-1].meta_id # New meta data
98
+
99
+ # Sample from the start to the second to last row because the last row
100
+ # is where we detected a change. The last row will be part of a new sample set.
101
+ sample_rows = rows[0..-2]
102
+ create_reduced_row(sample_rows, base_model, reduction_model, base_model_time_column, mappings, job_type)
103
+ rows = rows[-1..-1] # Start a new sample with the last item in the previous sample
170
104
  end
171
105
  end
172
- if total_samples != 0
173
- # Aggregated Stddev
174
- # See https://math.stackexchange.com/questions/1547141/aggregating-standard-deviation-to-a-summary-point
175
- avg_value = avg_value.to_f / total_samples
176
- # Note: For very large numbers with very small deviations this sqrt can fail. If so then just set the stddev to 0.
177
- begin
178
- stddev_value = sqrt((s2 / total_samples) - (avg_value * avg_value))
179
- rescue Exception
180
- stddev_value = 0.0
181
- end
182
- end
183
- min_value = Float::NAN if min_nan_found and !min_value
184
- max_value = Float::NAN if max_nan_found and !max_value
185
- if avg_nan_found and total_samples == 0
186
- avg_value = Float::NAN
187
- stddev_value = Float::NAN
188
- end
189
- new_row.write_attribute(min_item_name, min_value)
190
- new_row.write_attribute(max_item_name, max_value)
191
- new_row.write_attribute(avg_item_name, avg_value)
192
- new_row.write_attribute(stddev_item_name, stddev_value)
193
106
  end
194
- base_model.where(id: sample_rows.map(&:id)).update_all(:reduced_state => DartCommon::REDUCED)
195
- new_row.save! # Create the reduced data row in the database
196
- base_model.where(id: sample_rows.map(&:id)).update_all(:reduced_id => new_row.id)
197
- new_row.reduced_state = DartCommon::READY_TO_REDUCE
198
- new_row.save!
199
- @status.count += 1
200
-
201
- rows = rows[-1..-1] # Start a new sample with the last item in the previous sample
202
- Cosmos::Logger.debug("Created #{new_row.class}:#{new_row.id} with #{mappings.length} items from #{new_row.num_samples} samples")
203
107
  end
204
108
  complete_job(job_type, packet_config_id, table_index)
205
109
  end # while @running
@@ -208,6 +112,133 @@ class DartReducerWorkerThread
208
112
  handle_error("Reducer Thread Unexpectedly Died: #{error.formatted}")
209
113
  end
210
114
 
115
+ def create_reduced_row(sample_rows, base_model, reduction_model, base_model_time_column, mappings, job_type)
116
+ new_row = reduction_model.new
117
+ new_row.start_time = sample_rows[0].send(base_model_time_column)
118
+ new_row.num_samples = sample_rows.length
119
+ new_row.meta_id = sample_rows[0].meta_id
120
+ new_row.packet_log_id = sample_rows[0].packet_log_id
121
+ # Process each of the ItemToDecomTableMapping to get the item to be reduced
122
+ mappings.each do |mapping|
123
+ item_name = "i#{mapping.item_index}"
124
+ min_item_name = "i#{mapping.item_index}min"
125
+ max_item_name = "i#{mapping.item_index}max"
126
+ avg_item_name = "i#{mapping.item_index}avg"
127
+ stddev_item_name = "i#{mapping.item_index}stddev"
128
+ min_value = nil
129
+ max_value = nil
130
+ total_samples = 0 # s0
131
+ avg_value = 0.0 # s1
132
+ s2 = 0.0
133
+ stddev_value = 0.0
134
+ min_nan_found = false
135
+ max_nan_found = false
136
+ avg_nan_found = false
137
+ stddev_nan_found = false
138
+ # Process each of the rows in the base model which is the decommutation table
139
+ # or a lesser reduction table (the minute or hour table).
140
+ sample_rows.each do |row_to_reduce|
141
+ # If we processing minute data we're reading from the base decommutation table
142
+ # thus there is only raw values to read
143
+ if job_type == :MINUTE
144
+ value = row_to_reduce.read_attribute(item_name)
145
+ min_sample = value
146
+ max_sample = value
147
+ avg_sample = value
148
+ if value.nil?
149
+ handle_error("#{item_name} is nil in #{row_to_reduce.class}:#{row_to_reduce.id}")
150
+ next
151
+ end
152
+ else # :HOUR or :DAY
153
+ # We're processing hour or day data so we're reducing previously reduced data
154
+ # thus there are min, max, and average values to read
155
+ min_sample = row_to_reduce.read_attribute(min_item_name)
156
+ max_sample = row_to_reduce.read_attribute(max_item_name)
157
+ avg_sample = row_to_reduce.read_attribute(avg_item_name)
158
+ stddev_sample = row_to_reduce.read_attribute(stddev_item_name)
159
+ if min_sample.nil?
160
+ handle_error("#{min_item_name} is nil in #{row_to_reduce.class}:#{row_to_reduce.id}")
161
+ next
162
+ end
163
+ if max_sample.nil?
164
+ handle_error("#{max_item_name} is nil in #{row_to_reduce.class}:#{row_to_reduce.id}")
165
+ next
166
+ end
167
+ if avg_sample.nil?
168
+ handle_error("#{avg_item_name} is nil in #{row_to_reduce.class}:#{row_to_reduce.id}")
169
+ next
170
+ end
171
+ if stddev_sample.nil?
172
+ handle_error("#{stddev_item_name} is nil in #{row_to_reduce.class}:#{row_to_reduce.id}")
173
+ next
174
+ end
175
+ end
176
+
177
+ if nan_value?(min_sample)
178
+ min_nan_found = true
179
+ else
180
+ if !min_value or min_sample < min_value
181
+ min_value = min_sample
182
+ end
183
+ end
184
+
185
+ if nan_value?(max_sample)
186
+ max_nan_found = true
187
+ else
188
+ if !max_value or max_sample > max_value
189
+ max_value = max_sample
190
+ end
191
+ end
192
+
193
+ if nan_value?(avg_sample)
194
+ avg_nan_found = true
195
+ else
196
+ # MINUTE data is reducing the decommutated values
197
+ if job_type == :MINUTE
198
+ total_samples += 1 # s0
199
+ avg_value += avg_sample # s1
200
+ s2 += (avg_sample * avg_sample)
201
+ else # :HOUR or :DAY
202
+ # Aggregated Stddev
203
+ # See https://math.stackexchange.com/questions/1547141/aggregating-standard-deviation-to-a-summary-point
204
+ total_samples += row_to_reduce.num_samples # s0
205
+ avg_value += (avg_sample * row_to_reduce.num_samples) # s1
206
+ s2 += row_to_reduce.num_samples * (avg_sample * avg_sample + stddev_sample * stddev_sample)
207
+ end
208
+ end
209
+ end
210
+ if total_samples != 0
211
+ # Aggregated Stddev
212
+ # See https://math.stackexchange.com/questions/1547141/aggregating-standard-deviation-to-a-summary-point
213
+ avg_value = avg_value.to_f / total_samples
214
+ # Note: For very large numbers with very small deviations this sqrt can fail. If so then just set the stddev to 0.
215
+ begin
216
+ stddev_value = sqrt((s2 / total_samples) - (avg_value * avg_value))
217
+ rescue Exception
218
+ stddev_value = 0.0
219
+ end
220
+ end
221
+ min_value = Float::NAN if min_nan_found and !min_value
222
+ max_value = Float::NAN if max_nan_found and !max_value
223
+ if avg_nan_found and total_samples == 0
224
+ avg_value = Float::NAN
225
+ stddev_value = Float::NAN
226
+ end
227
+ new_row.write_attribute(min_item_name, min_value)
228
+ new_row.write_attribute(max_item_name, max_value)
229
+ new_row.write_attribute(avg_item_name, avg_value)
230
+ new_row.write_attribute(stddev_item_name, stddev_value)
231
+ end
232
+ base_model.where(id: sample_rows.map(&:id)).update_all(:reduced_state => DartCommon::REDUCED)
233
+ new_row.save! # Create the reduced data row in the database
234
+ base_model.where(id: sample_rows.map(&:id)).update_all(:reduced_id => new_row.id)
235
+ new_row.reduced_state = DartCommon::READY_TO_REDUCE
236
+ new_row.save!
237
+ @status.count += 1
238
+
239
+ Cosmos::Logger.debug("Created #{new_row.class}:#{new_row.id} with #{mappings.length} items from #{new_row.num_samples} samples")
240
+ end
241
+
211
242
  # Shutdown the worker thread
212
243
  def shutdown
213
244
  @running = false