openc3 7.0.0 → 7.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. checksums.yaml +4 -4
  2. data/bin/openc3cli +105 -13
  3. data/bin/pipinstall +38 -6
  4. data/data/config/command_modifiers.yaml +1 -0
  5. data/data/config/item_modifiers.yaml +2 -1
  6. data/data/config/microservice.yaml +12 -1
  7. data/data/config/parameter_modifiers.yaml +49 -7
  8. data/data/config/table_parameter_modifiers.yaml +3 -1
  9. data/data/config/target.yaml +11 -0
  10. data/data/config/target_config.yaml +6 -2
  11. data/lib/openc3/accessors/template_accessor.rb +9 -0
  12. data/lib/openc3/api/cmd_api.rb +2 -1
  13. data/lib/openc3/api/metrics_api.rb +11 -1
  14. data/lib/openc3/api/tlm_api.rb +21 -6
  15. data/lib/openc3/core_ext/faraday.rb +1 -1
  16. data/lib/openc3/interfaces/interface.rb +1 -6
  17. data/lib/openc3/io/json_api.rb +1 -1
  18. data/lib/openc3/logs/log_writer.rb +3 -1
  19. data/lib/openc3/microservices/decom_common.rb +128 -0
  20. data/lib/openc3/microservices/decom_microservice.rb +27 -96
  21. data/lib/openc3/microservices/interface_decom_common.rb +28 -10
  22. data/lib/openc3/microservices/interface_microservice.rb +16 -9
  23. data/lib/openc3/microservices/log_microservice.rb +1 -1
  24. data/lib/openc3/microservices/microservice.rb +3 -2
  25. data/lib/openc3/microservices/queue_microservice.rb +1 -1
  26. data/lib/openc3/microservices/scope_cleanup_microservice.rb +60 -46
  27. data/lib/openc3/microservices/text_log_microservice.rb +1 -2
  28. data/lib/openc3/models/cvt_model.rb +24 -13
  29. data/lib/openc3/models/db_sharded_model.rb +110 -0
  30. data/lib/openc3/models/interface_model.rb +9 -0
  31. data/lib/openc3/models/interface_status_model.rb +33 -3
  32. data/lib/openc3/models/metric_model.rb +96 -37
  33. data/lib/openc3/models/microservice_model.rb +7 -0
  34. data/lib/openc3/models/microservice_status_model.rb +30 -3
  35. data/lib/openc3/models/plugin_model.rb +9 -1
  36. data/lib/openc3/models/python_package_model.rb +1 -1
  37. data/lib/openc3/models/reaction_model.rb +27 -9
  38. data/lib/openc3/models/reingest_job_model.rb +153 -0
  39. data/lib/openc3/models/scope_model.rb +3 -2
  40. data/lib/openc3/models/script_status_model.rb +4 -20
  41. data/lib/openc3/models/target_model.rb +113 -100
  42. data/lib/openc3/models/trigger_model.rb +24 -7
  43. data/lib/openc3/packets/packet_config.rb +4 -1
  44. data/lib/openc3/script/api_shared.rb +39 -2
  45. data/lib/openc3/script/calendar.rb +32 -10
  46. data/lib/openc3/script/extract.rb +46 -13
  47. data/lib/openc3/script/script.rb +2 -2
  48. data/lib/openc3/script/script_runner.rb +4 -4
  49. data/lib/openc3/script/telemetry.rb +3 -3
  50. data/lib/openc3/script/web_socket_api.rb +29 -22
  51. data/lib/openc3/system/system.rb +20 -3
  52. data/lib/openc3/topics/command_decom_topic.rb +4 -2
  53. data/lib/openc3/topics/command_topic.rb +8 -5
  54. data/lib/openc3/topics/decom_interface_topic.rb +31 -11
  55. data/lib/openc3/topics/interface_topic.rb +88 -27
  56. data/lib/openc3/topics/limits_event_topic.rb +62 -41
  57. data/lib/openc3/topics/router_topic.rb +61 -21
  58. data/lib/openc3/topics/system_events_topic.rb +18 -1
  59. data/lib/openc3/topics/telemetry_decom_topic.rb +2 -1
  60. data/lib/openc3/topics/telemetry_topic.rb +4 -2
  61. data/lib/openc3/topics/topic.rb +77 -5
  62. data/lib/openc3/utilities/aws_bucket.rb +2 -0
  63. data/lib/openc3/utilities/cli_generator.rb +3 -2
  64. data/lib/openc3/utilities/ctrf.rb +231 -0
  65. data/lib/openc3/utilities/metric.rb +15 -1
  66. data/lib/openc3/utilities/questdb_client.rb +177 -40
  67. data/lib/openc3/utilities/reingest_job.rb +377 -0
  68. data/lib/openc3/utilities/ruby_lex_utils.rb +2 -0
  69. data/lib/openc3/utilities/store_autoload.rb +78 -52
  70. data/lib/openc3/utilities/store_queued.rb +20 -12
  71. data/lib/openc3/version.rb +5 -5
  72. data/templates/plugin/plugin.gemspec +13 -1
  73. data/templates/tool_angular/package.json +2 -2
  74. data/templates/tool_react/package.json +1 -1
  75. data/templates/tool_svelte/package.json +1 -1
  76. data/templates/tool_vue/package.json +3 -4
  77. data/templates/tool_vue/src/router.js +2 -2
  78. data/templates/widget/package.json +2 -2
  79. metadata +8 -3
@@ -0,0 +1,377 @@
1
+ # encoding: ascii-8bit
2
+
3
+ # Copyright 2026 OpenC3, Inc.
4
+ # All Rights Reserved.
5
+ #
6
+ # This program is distributed in the hope that it will be useful,
7
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
8
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9
+ # See LICENSE.md for more details.
10
+ #
11
+ # This file may also be used under the terms of a commercial license
12
+ # if purchased from OpenC3, Inc.
13
+
14
+ require 'fileutils'
15
+ require 'tmpdir'
16
+ require 'openc3/system/system'
17
+ require 'openc3/utilities/bucket'
18
+ require 'openc3/utilities/bucket_utilities'
19
+ require 'openc3/utilities/logger'
20
+ require 'openc3/utilities/questdb_client'
21
+ require 'openc3/logs/packet_log_reader'
22
+ require 'openc3/microservices/decom_common'
23
+ require 'openc3/models/reingest_job_model'
24
+
25
+ module OpenC3
26
+ class ReingestJobError < StandardError; end
27
+
28
+ # Replays raw .bin.gz log files from a bucket, decommutating each packet via
29
+ # DecomCommon.decom_and_publish(check_limits: false) so historical data
30
+ # reaches QuestDB without re-firing limits events.
31
+ #
32
+ # Runs synchronously (caller wraps in a Thread). Tracks state in a
33
+ # ReingestJobModel. DEDUP is enabled on affected tables during the job and
34
+ # disabled in an ensure block on completion (or after a cooldown window so
35
+ # in-flight WAL commits are covered).
36
+ #
37
+ # target_version:
38
+ # - 'as_logged' (default): each file is decoded with the target config hash
39
+ # that was in effect when the packets were originally logged. Files are
40
+ # grouped by their embedded target_id and System is rebuilt per group.
41
+ # - 'current': all files are decoded with the latest target config.
42
+ # - <hash>: explicit hash, used for every file in the job.
43
+ class ReingestJob
44
+ # How often to persist progress during the ingest pass (write every N packets)
45
+ STATUS_UPDATE_EVERY = 500
46
+ # How often to tick the heartbeat during the cooldown sleep
47
+ HEARTBEAT_INTERVAL_SEC = 10
48
+
49
+ # Reingest rebuilds the process-global System singleton. Serialize all
50
+ # reingest jobs running in this process so they don't stomp each other.
51
+ @@run_mutex = Mutex.new
52
+
53
+ def initialize(job_id:, files:, path:, bucket:, scope:,
54
+ target_version: 'as_logged',
55
+ dedup_cooldown_seconds: ENV.fetch('OPENC3_REINGEST_DEDUP_COOLDOWN', 60).to_i,
56
+ logger: Logger)
57
+ @job_id = job_id
58
+ @files = files
59
+ @path = path
60
+ @bucket_env = bucket
61
+ @scope = scope
62
+ @target_version = target_version
63
+ @dedup_cooldown_seconds = dedup_cooldown_seconds
64
+ @logger = logger
65
+ end
66
+
67
+ def run
68
+ tmp_dir = Dir.mktmpdir
69
+ job = load_job
70
+ dedup_enabled_by_us = []
71
+ db_shard = 0
72
+ @@run_mutex.synchronize do
73
+ begin
74
+ mark(job, state: 'Running', progress_phase: 'downloading',
75
+ started_at: Time.now.utc.iso8601,
76
+ progress_total: @files.length)
77
+
78
+ # Parse target from path, e.g. "DEFAULT/raw_logs/tlm/INST/20260421/"
79
+ # → "INST". Fail fast if the path doesn't encode one — otherwise
80
+ # ingest would run against whatever System was loaded in this process
81
+ # from a prior job (or raise opaquely inside PacketLogReader), and
82
+ # the job could mark Complete with rows written under the wrong
83
+ # target config.
84
+ path_parts = @path.to_s.split('/').reject(&:empty?)
85
+ unless path_parts.length >= 4 && path_parts[1] == 'raw_logs'
86
+ raise ReingestJobError, "Cannot determine target from path '#{@path}'; expected '{scope}/raw_logs/{tlm|cmd}/{target}/'"
87
+ end
88
+ target = path_parts[3]
89
+ db_shard = QuestDBClient.db_shard_for_target(target, scope: @scope)
90
+
91
+ local_files = download_and_uncompress(job, tmp_dir)
92
+
93
+ # Pass 1: read raw (no System required) to discover table names and
94
+ # each file's embedded target hash. File hashes are what the "as
95
+ # logged" mode uses to pick the right target_version per file.
96
+ mark(job, progress_phase: 'enabling_dedup', progress_current: 0,
97
+ progress_total: 0)
98
+ table_names, file_versions = discover_tables_and_versions(local_files)
99
+ mark(job, table_names: table_names, progress_total: table_names.length)
100
+
101
+ dedup_enabled_by_us, preexisting = enable_dedup(job, table_names, db_shard)
102
+ mark(job,
103
+ dedup_enabled_by_us: dedup_enabled_by_us,
104
+ dedup_preexisting: preexisting,
105
+ dedup_enabled_at: Time.now.utc.iso8601)
106
+
107
+ # Pass 2: group files by the target_version we'll load for them,
108
+ # then ingest each group under its own System instance.
109
+ groups = group_files_by_version(local_files, file_versions)
110
+ mark(job, versions_used: groups.keys,
111
+ progress_phase: 'ingesting', progress_current: 0,
112
+ progress_total: 0, packets_written: 0)
113
+ ingest_all_groups(job, groups, target)
114
+
115
+ mark(job, progress_phase: 'dedup_cooldown')
116
+ cooldown(job)
117
+
118
+ mark(job, progress_phase: 'disabling_dedup')
119
+ disabled = disable_dedup(job, dedup_enabled_by_us, db_shard)
120
+ mark(job, dedup_disabled_tables: disabled,
121
+ dedup_disabled_at: Time.now.utc.iso8601,
122
+ state: 'Complete',
123
+ finished_at: Time.now.utc.iso8601)
124
+ rescue Exception => e
125
+ @logger.error("Reingest job #{@job_id} failed: #{e.message}\n#{e.backtrace.first(10).join("\n")}")
126
+ # Always try to revert DEDUP even on crash so user tables are not left altered
127
+ disabled_on_crash = []
128
+ begin
129
+ disabled_on_crash = disable_dedup(job, dedup_enabled_by_us, db_shard)
130
+ rescue => de
131
+ @logger.error("Reingest job #{@job_id} failed to disable DEDUP during crash cleanup: #{de.message}")
132
+ end
133
+ mark(job,
134
+ dedup_disabled_tables: disabled_on_crash,
135
+ dedup_disabled_at: Time.now.utc.iso8601,
136
+ state: 'Crashed',
137
+ error: e.message,
138
+ finished_at: Time.now.utc.iso8601)
139
+ ensure
140
+ FileUtils.remove_entry_secure(tmp_dir, true) if tmp_dir && File.directory?(tmp_dir)
141
+ end
142
+ end
143
+ end
144
+
145
+ private
146
+
147
+ def load_job
148
+ ReingestJobModel.get_model(name: @job_id, scope: @scope) or
149
+ raise ReingestJobError, "ReingestJobModel #{@job_id} not found in scope #{@scope}"
150
+ end
151
+
152
+ # Merge attrs into the model and persist. Model#update refreshes updated_at,
153
+ # which doubles as the heartbeat used by the stale-check.
154
+ def mark(job, **attrs)
155
+ attrs.each { |k, v| job.send("#{k}=", v) }
156
+ job.update
157
+ end
158
+
159
+ def download_and_uncompress(job, tmp_dir)
160
+ bucket_name = ENV.fetch(@bucket_env) { |name| raise ReingestJobError, "Unknown bucket #{name}" }
161
+ bucket_client = Bucket.getClient()
162
+ local_files = []
163
+ tmp_root = File.expand_path(tmp_dir) + File::SEPARATOR
164
+ @files.each_with_index do |filename, i|
165
+ key = "#{@path}#{filename}"
166
+ temp_file = File.expand_path(File.join(tmp_dir, filename))
167
+ # Defense-in-depth: the controller validates filenames, but refuse to
168
+ # write outside tmp_dir if any caller (tests, future callers) bypasses it.
169
+ unless temp_file.start_with?(tmp_root)
170
+ raise ReingestJobError, "Invalid filename escapes tmp dir: #{filename}"
171
+ end
172
+ FileUtils.mkdir_p(File.dirname(temp_file))
173
+ bucket_client.get_object(bucket: bucket_name, key: key, path: temp_file)
174
+ if File.extname(filename) == '.gz'
175
+ decompressed = BucketUtilities.uncompress_file(temp_file)
176
+ File.delete(temp_file)
177
+ local_files << decompressed
178
+ else
179
+ local_files << temp_file
180
+ end
181
+ mark(job, progress_current: i + 1)
182
+ end
183
+ local_files
184
+ end
185
+
186
+ # Read each file in raw mode (identify_and_define=false) to collect
187
+ # {scope}__TLM__{target}__{packet} table names and the first target
188
+ # declaration hash embedded in each file. Returns [table_names, file_versions]
189
+ # where file_versions maps local_file_path → hex hash (or nil if the file
190
+ # has no hash, which happens for pre-6.x log files).
191
+ def discover_tables_and_versions(local_files)
192
+ table_names = Set.new
193
+ file_versions = {}
194
+ local_files.each do |local_file|
195
+ reader = PacketLogReader.new
196
+ reader.each(local_file, false) do |packet|
197
+ next unless packet.target_name && packet.packet_name
198
+ cmd_or_tlm = packet.cmd_or_tlm == :CMD ? 'CMD' : 'TLM'
199
+ table_names.add("#{@scope}__#{cmd_or_tlm}__#{packet.target_name}__#{packet.packet_name}")
200
+ end
201
+ ids = reader.instance_variable_get(:@target_ids) || []
202
+ file_versions[local_file] = ids.first ? ids.first.unpack1('H*') : nil
203
+ end
204
+ [table_names.to_a, file_versions]
205
+ end
206
+
207
+ # Returns a Hash of target_version → [local_file, ...]. The value at key
208
+ # 'current' means "use System with the latest config"; any other value is
209
+ # a specific hash string used as target_version in System.setup_targets.
210
+ def group_files_by_version(local_files, file_versions)
211
+ groups = Hash.new { |h, k| h[k] = [] }
212
+ case @target_version
213
+ when 'current'
214
+ groups['current'] = local_files.dup
215
+ when 'as_logged', nil
216
+ local_files.each do |file|
217
+ version = file_versions[file] || 'current'
218
+ groups[version] << file
219
+ end
220
+ else
221
+ # Caller passed an explicit hash; use it for every file.
222
+ groups[@target_version] = local_files.dup
223
+ end
224
+ groups
225
+ end
226
+
227
+ # For each version group, rebuild System under that version and ingest
228
+ # the group's files. `@@run_mutex` in `run` protects other threads from
229
+ # seeing a transient nil @@instance.
230
+ #
231
+ # If the requested target archive (a specific hash) is missing from the
232
+ # config bucket — which happens in dev setups where every `openc3.sh start`
233
+ # regenerates the target archive with a fresh timestamp-appended gem
234
+ # version — we fall back to 'current' and record a warning on the job so
235
+ # the UI can surface it. This matters because the old historical archive
236
+ # the log file references may no longer exist.
237
+ def ingest_all_groups(job, groups, target)
238
+ packets_written = 0
239
+ last_status_at = 0
240
+ warnings = (job.warnings || []).dup
241
+ groups.each do |version, files|
242
+ resolved = load_system_with_fallback(target, version, warnings)
243
+ unless resolved
244
+ # Even the 'current' fallback failed; skip this group rather than
245
+ # publish empty json_data for every packet.
246
+ mark(job, warnings: warnings)
247
+ next
248
+ end
249
+ mark(job, warnings: warnings) if warnings.any?
250
+ files.each do |file|
251
+ packets_written, last_status_at = ingest_file(job, file, packets_written, last_status_at)
252
+ end
253
+ end
254
+ mark(job, packets_written: packets_written, warnings: warnings)
255
+ end
256
+
257
+ # Returns the target_version that was actually loaded, or nil if even the
258
+ # 'current' fallback failed. Appends human-readable entries to `warnings`
259
+ # for any fallback or failure.
260
+ def load_system_with_fallback(target, version, warnings)
261
+ begin
262
+ load_system(target, version)
263
+ return version
264
+ rescue => e
265
+ if version == 'current'
266
+ # Caller explicitly requested 'current' and that failed; no further
267
+ # fallback exists — propagate so the outer rescue marks Crashed.
268
+ raise
269
+ end
270
+ @logger.warn("Reingest job #{@job_id}: target archive for #{target} version '#{version}' unavailable (#{e.class}: #{e.message}); falling back to 'current'")
271
+ warnings << "Version '#{version}' archive missing; used 'current' instead"
272
+ end
273
+
274
+ begin
275
+ load_system(target, 'current')
276
+ 'current'
277
+ rescue => e
278
+ @logger.error("Reingest job #{@job_id}: fallback to 'current' also failed: #{e.class}: #{e.message}")
279
+ warnings << "Version '#{version}' archive missing and 'current' also failed (#{e.message})"
280
+ nil
281
+ end
282
+ end
283
+
284
+ def load_system(target, version)
285
+ System.reset_instance!
286
+ System.setup_targets([target], Dir.tmpdir, scope: @scope, target_version: version)
287
+ end
288
+
289
+ def ingest_file(job, local_file, packets_written, last_status_at)
290
+ reader = PacketLogReader.new
291
+ reader.each(local_file, true) do |packet|
292
+ next unless packet.target_name && packet.packet_name
293
+ packet.stored = true
294
+ DecomCommon.decom_and_publish(
295
+ packet,
296
+ scope: @scope,
297
+ target_names: [packet.target_name],
298
+ logger: @logger,
299
+ name: "REINGEST:#{@job_id}",
300
+ check_limits: false,
301
+ )
302
+ packets_written += 1
303
+ if packets_written - last_status_at >= STATUS_UPDATE_EVERY
304
+ mark(job, packets_written: packets_written)
305
+ last_status_at = packets_written
306
+ end
307
+ end
308
+ [packets_written, last_status_at]
309
+ end
310
+
311
+ # Returns [enabled_by_us, preexisting]. Only tables we enable are recorded
312
+ # in enabled_by_us; pre-existing DEDUP tables are left untouched on teardown.
313
+ def enable_dedup(job, table_names, db_shard)
314
+ enabled_by_us = []
315
+ preexisting = []
316
+ conn = QuestDBClient.connection(db_shard: db_shard)
317
+ table_names.each_with_index do |table_name, i|
318
+ begin
319
+ already = dedup_already_enabled?(conn, table_name)
320
+ if already
321
+ preexisting << table_name
322
+ else
323
+ conn.exec("ALTER TABLE '#{table_name}' DEDUP ENABLE UPSERT KEYS(PACKET_TIMESECONDS)")
324
+ enabled_by_us << table_name
325
+ end
326
+ rescue => e
327
+ @logger.warn("Failed to enable DEDUP on #{table_name}: #{e.message}")
328
+ end
329
+ mark(job, progress_current: i + 1)
330
+ end
331
+ [enabled_by_us, preexisting]
332
+ end
333
+
334
+ # QuestDB exposes per-table dedup status via tables() function.
335
+ # Falls back to false (treat as not-enabled, will issue ALTER) on any error.
336
+ def dedup_already_enabled?(conn, table_name)
337
+ result = conn.exec_params(
338
+ "SELECT dedup FROM tables() WHERE table_name = $1",
339
+ [table_name],
340
+ )
341
+ return false if result.ntuples == 0
342
+ value = result[0]['dedup']
343
+ value == true || value == 't' || value.to_s.downcase == 'true'
344
+ rescue => e
345
+ @logger.warn("Could not query DEDUP status for #{table_name}: #{e.message}")
346
+ false
347
+ end
348
+
349
+ # Sleep dedup_cooldown_seconds, ticking the heartbeat so the stale-check
350
+ # doesn't misfire during the wait. This gives the Python TsdbMicroservice
351
+ # and QuestDB WAL time to commit reingested rows while DEDUP is still on.
352
+ def cooldown(job)
353
+ remaining = @dedup_cooldown_seconds
354
+ while remaining > 0
355
+ step = [HEARTBEAT_INTERVAL_SEC, remaining].min
356
+ sleep(step)
357
+ remaining -= step
358
+ mark(job) # heartbeat only
359
+ end
360
+ end
361
+
362
+ def disable_dedup(job, tables, db_shard)
363
+ disabled = []
364
+ conn = QuestDBClient.connection(db_shard: db_shard)
365
+ tables.each_with_index do |table_name, i|
366
+ begin
367
+ conn.exec("ALTER TABLE '#{table_name}' DEDUP DISABLE")
368
+ disabled << table_name
369
+ rescue => e
370
+ @logger.warn("Failed to disable DEDUP on #{table_name}: #{e.message}")
371
+ end
372
+ mark(job, progress_current: i + 1, progress_total: tables.length)
373
+ end
374
+ disabled
375
+ end
376
+ end
377
+ end
@@ -16,7 +16,9 @@
16
16
  # if purchased from OpenC3, Inc.
17
17
 
18
18
  require 'irb/ruby-lex'
19
+ require 'ripper'
19
20
  require 'prism'
21
+ require 'ripper'
20
22
 
21
23
  class RubyLexUtils
22
24
  OPENING_DELIMITER_TYPES = %i(PARENTHESIS_LEFT BRACKET_LEFT BRACE_LEFT BRACKET_LEFT_ARRAY)
@@ -20,27 +20,16 @@ require 'hiredis-client'
20
20
  require 'json'
21
21
  require 'connection_pool'
22
22
 
23
- if ENV['OPENC3_REDIS_CLUSTER']
24
- require 'openc3-enterprise/utilities/store'
25
- $openc3_redis_cluster = true
26
- else
27
- $openc3_redis_cluster = false
28
- end
29
-
30
23
  module OpenC3
31
24
  class StoreConnectionPool < ConnectionPool
32
25
  def pipelined
33
- if $openc3_redis_cluster
34
- yield # TODO: Update keys to support pipelining in cluster
35
- else
36
- with do |redis|
37
- redis.pipelined do |pipeline|
38
- Thread.current[:pipeline] = pipeline
39
- begin
40
- yield
41
- ensure
42
- Thread.current[:pipeline] = nil
43
- end
26
+ with do |redis|
27
+ redis.pipelined do |pipeline|
28
+ Thread.current[:pipeline] = pipeline
29
+ begin
30
+ yield
31
+ ensure
32
+ Thread.current[:pipeline] = nil
44
33
  end
45
34
  end
46
35
  end
@@ -57,8 +46,13 @@ module OpenC3
57
46
  end
58
47
 
59
48
  class Store
60
- # Variable that holds the singleton instance
61
- @instance = nil
49
+ # Variable that holds the singleton instances per db_shard
50
+ @instances = []
51
+
52
+ # DB_Shard cache: { "scope__target_name" => [db_shard_number, Time] }
53
+ @@db_shard_cache = {}
54
+ @@db_shard_cache_mutex = Mutex.new
55
+ DB_SHARD_CACHE_TIMEOUT = 60 # seconds
62
56
 
63
57
  # Mutex used to ensure that only one instance is created
64
58
  @@instance_mutex = Mutex.new
@@ -66,14 +60,48 @@ module OpenC3
66
60
  attr_reader :redis_url
67
61
  attr_reader :redis_pool
68
62
 
63
+ # Look up the db_shard number for a target with a 1-minute cache.
64
+ # Reads directly from Redis db_shard 0 to avoid circular deps with TargetModel.
65
+ # Non-target-specific data (nil target_name) always returns db_shard 0.
66
+ def self.db_shard_for_target(target_name, scope: "DEFAULT")
67
+ return 0 unless target_name
68
+
69
+ cache_key = "#{scope}__#{target_name}"
70
+ now = Time.now
71
+
72
+ @@db_shard_cache_mutex.synchronize do
73
+ cached = @@db_shard_cache[cache_key]
74
+ if cached
75
+ db_shard, cached_at = cached
76
+ return db_shard if (now - cached_at) < DB_SHARD_CACHE_TIMEOUT
77
+ end
78
+ end
79
+
80
+ begin
81
+ json = Store.instance(db_shard: 0).hget("#{scope}__openc3_targets", target_name)
82
+ db_shard = json ? JSON.parse(json)['db_shard'].to_i : 0
83
+ rescue
84
+ db_shard = 0
85
+ end
86
+
87
+ @@db_shard_cache_mutex.synchronize do
88
+ @@db_shard_cache[cache_key] = [db_shard, now]
89
+ end
90
+
91
+ db_shard
92
+ end
93
+
69
94
  # Get the singleton instance
70
- def self.instance(pool_size = 100)
95
+ def self.instance(pool_size = 100, db_shard: 0)
71
96
  # Logger.level = Logger::DEBUG
72
- return @instance if @instance
97
+ @instances ||= []
98
+ the_instance = @instances[db_shard]
99
+ return the_instance if the_instance
73
100
 
74
101
  @@instance_mutex.synchronize do
75
- @instance ||= self.new(pool_size)
76
- return @instance
102
+ @instances ||= []
103
+ @instances[db_shard] ||= self.new(pool_size, db_shard: db_shard)
104
+ return @instances[db_shard]
77
105
  end
78
106
  end
79
107
 
@@ -87,17 +115,16 @@ module OpenC3
87
115
  @redis_pool.with { |redis| redis.public_send(message, *args, **kwargs, &block) }
88
116
  end
89
117
 
90
- def initialize(pool_size = 10)
118
+ def initialize(pool_size = 10, db_shard: 0)
91
119
  @redis_username = ENV['OPENC3_REDIS_USERNAME']
92
120
  @redis_key = ENV['OPENC3_REDIS_PASSWORD']
93
- @redis_url = "redis://#{ENV['OPENC3_REDIS_HOSTNAME']}:#{ENV['OPENC3_REDIS_PORT']}"
121
+ hostname = ENV['OPENC3_REDIS_HOSTNAME'].to_s.gsub("SHARDNUM", db_shard.to_s)
122
+ @redis_url = "redis://#{hostname}:#{ENV.fetch('OPENC3_REDIS_PORT', 6379)}"
94
123
  @redis_pool = StoreConnectionPool.new(size: pool_size) { build_redis() }
95
124
  end
96
125
 
97
- unless $openc3_redis_cluster
98
- def build_redis
99
- return Redis.new(url: @redis_url, username: @redis_username, password: @redis_key)
100
- end
126
+ def build_redis
127
+ return Redis.new(url: @redis_url, username: @redis_username, password: @redis_key)
101
128
  end
102
129
 
103
130
  ###########################################################################
@@ -160,30 +187,28 @@ module OpenC3
160
187
  return offsets
161
188
  end
162
189
 
163
- unless $openc3_redis_cluster
164
- def read_topics(topics, offsets = nil, timeout_ms = 1000, count = nil)
165
- return {} if topics.empty?
166
- Thread.current[:topic_offsets] ||= {}
167
- topic_offsets = Thread.current[:topic_offsets]
168
- begin
169
- # Logger.debug "read_topics: #{topics}, #{offsets} pool:#{@redis_pool}"
170
- @redis_pool.with do |redis|
171
- offsets = update_topic_offsets(topics) unless offsets
172
- result = redis.xread(topics, offsets, block: timeout_ms, count: count)
173
- if result and result.length > 0
174
- result.each do |topic, messages|
175
- messages.each do |msg_id, msg_hash|
176
- topic_offsets[topic] = msg_id
177
- yield topic, msg_id, msg_hash, redis if block_given?
178
- end
190
+ def read_topics(topics, offsets = nil, timeout_ms = 1000, count = nil)
191
+ return {} if topics.empty?
192
+ Thread.current[:topic_offsets] ||= {}
193
+ topic_offsets = Thread.current[:topic_offsets]
194
+ begin
195
+ # Logger.debug "read_topics: #{topics}, #{offsets} pool:#{@redis_pool}"
196
+ @redis_pool.with do |redis|
197
+ offsets = update_topic_offsets(topics) unless offsets
198
+ result = redis.xread(topics, offsets, block: timeout_ms, count: count)
199
+ if result and result.length > 0
200
+ result.each do |topic, messages|
201
+ messages.each do |msg_id, msg_hash|
202
+ topic_offsets[topic] = msg_id
203
+ yield topic, msg_id, msg_hash, redis if block_given?
179
204
  end
180
205
  end
181
- # Logger.debug "result:#{result}" if result and result.length > 0
182
- return result
183
206
  end
184
- rescue Redis::TimeoutError
185
- return {} # Should return an empty hash not array - xread returns a hash
207
+ # Logger.debug "result:#{result}" if result and result.length > 0
208
+ return result
186
209
  end
210
+ rescue Redis::TimeoutError
211
+ return {} # Should return an empty hash not array - xread returns a hash
187
212
  end
188
213
  end
189
214
 
@@ -233,9 +258,10 @@ module OpenC3
233
258
  end
234
259
 
235
260
  class EphemeralStore < Store
236
- def initialize(pool_size = 10)
261
+ def initialize(pool_size = 10, db_shard: 0)
237
262
  super(pool_size)
238
- @redis_url = "redis://#{ENV['OPENC3_REDIS_EPHEMERAL_HOSTNAME']}:#{ENV['OPENC3_REDIS_EPHEMERAL_PORT']}"
263
+ hostname = ENV['OPENC3_REDIS_EPHEMERAL_HOSTNAME'].to_s.gsub("SHARDNUM", db_shard.to_s)
264
+ @redis_url = "redis://#{hostname}:#{ENV.fetch('OPENC3_REDIS_EPHEMERAL_PORT', 6380)}"
239
265
  @redis_pool = StoreConnectionPool.new(size: pool_size) { build_redis() }
240
266
  end
241
267
  end
@@ -18,30 +18,34 @@ module OpenC3
18
18
  class StoreQueued
19
19
  attr_reader :update_interval
20
20
 
21
- # Variable that holds the singleton instance
22
- @instance = nil
21
+ # Variable that holds the singleton instances per db_shard
22
+ @instances = []
23
23
 
24
24
  # Mutex used to ensure that only one instance is created
25
25
  @@instance_mutex = Mutex.new
26
26
 
27
- # Get the singleton instance
27
+ # Get the singleton instance for the given db_shard
28
28
  # Sets the update interval to 1 second by default
29
- def self.instance(update_interval = 1) # seconds
30
- return @instance if @instance
29
+ def self.instance(update_interval = 1, db_shard: 0) # seconds
30
+ @instances ||= []
31
+ the_instance = @instances[db_shard]
32
+ return the_instance if the_instance
31
33
 
32
34
  @@instance_mutex.synchronize do
33
- @instance ||= self.new(update_interval)
34
- return @instance
35
+ @instances ||= []
36
+ @instances[db_shard] ||= self.new(update_interval, db_shard: db_shard)
37
+ return @instances[db_shard]
35
38
  end
36
39
  end
37
40
 
38
- # Delegate all unknown class methods to delegate to the instance
41
+ # Delegate all unknown class methods to delegate to the instance (db_shard 0)
39
42
  def self.method_missing(message, *args, **kwargs, &)
40
43
  self.instance.public_send(message, *args, **kwargs, &)
41
44
  end
42
45
 
43
- def initialize(update_interval)
46
+ def initialize(update_interval, db_shard: 0)
44
47
  @update_interval = update_interval
48
+ @db_shard = db_shard
45
49
  @store = store_instance()
46
50
  # Queue to hold the store requests
47
51
  @store_queue = Queue.new
@@ -80,7 +84,11 @@ module OpenC3
80
84
  while true
81
85
  start_time = Time.now
82
86
 
83
- process_queue()
87
+ begin
88
+ process_queue()
89
+ rescue => e
90
+ puts "StoreQueued thread error (db_shard=#{@db_shard}):\n#{e.formatted}"
91
+ end
84
92
 
85
93
  # Only check whether to update at a set interval
86
94
  run_time = Time.now - start_time
@@ -107,7 +115,7 @@ module OpenC3
107
115
 
108
116
  # Returns the store we're working with
109
117
  def store_instance
110
- Store.instance
118
+ Store.instance(db_shard: @db_shard)
111
119
  end
112
120
 
113
121
  def graceful_kill
@@ -117,7 +125,7 @@ module OpenC3
117
125
 
118
126
  class EphemeralStoreQueued < StoreQueued
119
127
  def store_instance
120
- EphemeralStore.instance
128
+ EphemeralStore.instance(db_shard: @db_shard)
121
129
  end
122
130
  end
123
131
  end
@@ -1,14 +1,14 @@
1
1
  # encoding: ascii-8bit
2
2
 
3
- OPENC3_VERSION = '7.0.0'
3
+ OPENC3_VERSION = '7.1.0'
4
4
  module OpenC3
5
5
  module Version
6
6
  MAJOR = '7'
7
- MINOR = '0'
7
+ MINOR = '1'
8
8
  PATCH = '0'
9
9
  OTHER = ''
10
- BUILD = '463726bb8dab631febe2e502ec0462a460bd326c'
10
+ BUILD = '1074049d7a87d4b4d8cdc31e3512ab495b7492bb'
11
11
  end
12
- VERSION = '7.0.0'
13
- GEM_VERSION = '7.0.0'
12
+ VERSION = '7.1.0'
13
+ GEM_VERSION = '7.1.0'
14
14
  end