ruby_tdms 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +1 -0
  3. data/.travis.yml +7 -0
  4. data/CODE_OF_CONDUCT.md +13 -0
  5. data/Gemfile +4 -0
  6. data/Gemfile.lock +31 -0
  7. data/LICENSE.txt +26 -0
  8. data/README.md +28 -0
  9. data/Rakefile +10 -0
  10. data/demo.rb +14 -0
  11. data/doc/data_types.txt +23 -0
  12. data/doc/example_disasm.txt +47 -0
  13. data/doc/tdms_format.txt +101 -0
  14. data/doc/usage.txt +48 -0
  15. data/lib/ruby_tdms.rb +2 -0
  16. data/lib/ruby_tdms/aggregate_channel.rb +53 -0
  17. data/lib/ruby_tdms/aggregate_channel_enumerator.rb +50 -0
  18. data/lib/ruby_tdms/channel_enumerator.rb +33 -0
  19. data/lib/ruby_tdms/data_types.rb +22 -0
  20. data/lib/ruby_tdms/data_types/base.rb +19 -0
  21. data/lib/ruby_tdms/data_types/boolean.rb +15 -0
  22. data/lib/ruby_tdms/data_types/double.rb +19 -0
  23. data/lib/ruby_tdms/data_types/double_with_unit.rb +19 -0
  24. data/lib/ruby_tdms/data_types/int16.rb +19 -0
  25. data/lib/ruby_tdms/data_types/int32.rb +19 -0
  26. data/lib/ruby_tdms/data_types/int64.rb +19 -0
  27. data/lib/ruby_tdms/data_types/int8.rb +15 -0
  28. data/lib/ruby_tdms/data_types/single.rb +19 -0
  29. data/lib/ruby_tdms/data_types/single_with_unit.rb +19 -0
  30. data/lib/ruby_tdms/data_types/string.rb +15 -0
  31. data/lib/ruby_tdms/data_types/timestamp.rb +15 -0
  32. data/lib/ruby_tdms/data_types/u_int16.rb +19 -0
  33. data/lib/ruby_tdms/data_types/u_int32.rb +19 -0
  34. data/lib/ruby_tdms/data_types/u_int64.rb +19 -0
  35. data/lib/ruby_tdms/data_types/u_int8.rb +15 -0
  36. data/lib/ruby_tdms/document.rb +105 -0
  37. data/lib/ruby_tdms/file.rb +13 -0
  38. data/lib/ruby_tdms/object_parser.rb +47 -0
  39. data/lib/ruby_tdms/objects/base.rb +47 -0
  40. data/lib/ruby_tdms/objects/channel.rb +105 -0
  41. data/lib/ruby_tdms/objects/file.rb +11 -0
  42. data/lib/ruby_tdms/objects/group.rb +22 -0
  43. data/lib/ruby_tdms/path.rb +97 -0
  44. data/lib/ruby_tdms/property.rb +16 -0
  45. data/lib/ruby_tdms/segment.rb +107 -0
  46. data/lib/ruby_tdms/streaming.rb +124 -0
  47. data/lib/ruby_tdms/string_channel_enumerator.rb +49 -0
  48. data/lib/ruby_tdms/version.rb +3 -0
  49. data/ruby_tdms.gemspec +38 -0
  50. metadata +185 -0
@@ -0,0 +1,13 @@
1
+ require_relative 'streaming'
2
+
3
+ module RubyTDMS
4
+ class File < ::File
5
+ include Streaming
6
+
7
+ class << self
8
+ def parse(filename)
9
+ Document.new open(filename, 'rb')
10
+ end
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,47 @@
1
+ require_rel 'objects'
2
+
3
+ module RubyTDMS
4
+ class ObjectParser
5
+ class << self
6
+ # Given a +Segment+, parse the next object. If no objects remain, return nil.
7
+ def parse_stream(stream, document, segment)
8
+ path = Path.new raw: stream.read_utf8_string
9
+ raw_index = stream.read_u32
10
+
11
+ if path == '/'
12
+ # File object
13
+ Objects::File.new(path, document, segment).tap { |obj| obj.parse_stream stream }
14
+ else
15
+ case raw_index
16
+ when 0xFFFFFFFF
17
+ # No data stored, indicating a group object
18
+ Objects::Group.new(path, document, segment).tap { |obj| obj.parse_stream stream }
19
+ when 0x69120000
20
+ # "DAQmx Format Changing scaler"
21
+ # TODO: Implement support for this
22
+ raise NotImplementedError, 'DAQmx Format Changing scaler support is not implemented'
23
+ when 0x69130000
24
+ # "DAQmx Digital Line scaler"
25
+ # TODO: Implement support for this
26
+ raise NotImplementedError, 'DAQmx Digital Line scaler support is not implemented'
27
+ when 0x00000000
28
+ # Identical to previous segment, so clone the channel object and have it update streaming parameters
29
+ previous_channel = document.objects.reverse.find { |object| object.path == path }
30
+ Objects::Channel.new(path, document, segment).tap { |obj|
31
+ obj.continue_stream stream, raw_index, previous_channel
32
+ group = document.objects.find { |object| object.is_a? Objects::Group and object.path == Path.new(parts: path.to_a[0..-2]) }
33
+ group.channels << obj if group
34
+ }
35
+ else
36
+ Objects::Channel.new(path, document, segment).tap { |obj|
37
+ obj.parse_stream stream, raw_index
38
+ group = document.objects.find { |object| object.is_a? Objects::Group and object.path == Path.new(parts: path.to_a[0..-2]) }
39
+ group.channels << obj if group
40
+ }
41
+ end
42
+
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,47 @@
1
+ module RubyTDMS
2
+ module Objects
3
+ # TDMS object base.
4
+ # All objects hold a collection of Segment references since objects can be striped across segments.
5
+ class Base
6
+ attr_reader :path, :properties, :segment, :stream
7
+
8
+
9
+ def initialize(path, document, segment)
10
+ @path = path
11
+ @document = document
12
+ @segment = segment
13
+ @stream = document.stream
14
+
15
+ @properties = []
16
+ end
17
+
18
+
19
+ def continue_stream(stream, previous_channel)
20
+ parse_properties stream
21
+ end
22
+
23
+
24
+ def parse_stream(stream)
25
+ parse_properties stream
26
+ end
27
+
28
+
29
+ def as_json
30
+ {
31
+ path: path.to_s,
32
+ properties: properties.reduce({}) { |properties, property| properties[property.name.to_s.to_sym] = property.value; properties }
33
+ }
34
+ end
35
+
36
+
37
+ protected
38
+
39
+ def parse_properties(stream)
40
+ @properties_length = stream.read_u32
41
+ @properties_length.times do
42
+ @properties << stream.read_property(@segment.big_endian?)
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,105 @@
1
+ require_relative '../data_types'
2
+ require_relative '../channel_enumerator'
3
+ require_relative '../string_channel_enumerator'
4
+
5
+ module RubyTDMS
6
+ module Objects
7
+ class Channel < Base
8
+ attr_reader :chunk_offsets, :chunk_value_count, :data_length, :data_type, :data_type_id, :dimensions, :chunk_length, :raw_data_offset, :value_count, :value_offset
9
+
10
+
11
+ def initialize(path, document, segment)
12
+ super
13
+ @chunk_offsets = []
14
+ @value_count = 0
15
+ end
16
+
17
+
18
+ def name
19
+ path.to_a.last
20
+ end
21
+
22
+
23
+ def as_json
24
+ super.merge({
25
+ name: name,
26
+ data_type: data_type.name.split('::').last,
27
+ dimensions: dimensions,
28
+ values: values.to_a
29
+ })
30
+ end
31
+
32
+
33
+ # Get all data from the stream to configure ourself with data type, number of values, etc.
34
+ def parse_stream(stream, raw_index)
35
+ @data_type_id = stream.read_u32
36
+ @dimensions = stream.read_u32
37
+ @chunk_value_count = stream.read_u64
38
+
39
+ @data_type = DataTypes.find_by_id @data_type_id
40
+ # Get the data length for variable length types (when DataTypes::LENGTH_IN_BYTES is nil)
41
+ @data_length = @data_type::LENGTH_IN_BYTES || stream.read_u64
42
+
43
+ # Chunk length is the same as data length for variable length types
44
+ @chunk_length = @data_type::LENGTH_IN_BYTES.nil? ? @data_length : @data_length * @dimensions * @chunk_value_count # Size of the data for this channel in a given chunk.
45
+
46
+ super stream
47
+ end
48
+
49
+
50
+ # When a channel is continued in a new segment, this method is called rather than #parse_stream
51
+ def continue_stream(stream, raw_index, previous_channel)
52
+ @chunk_value_count = previous_channel.chunk_value_count
53
+ @data_length = previous_channel.data_length
54
+ @data_type = previous_channel.data_type
55
+ @data_type_id = previous_channel.data_type_id
56
+ @dimensions = previous_channel.dimensions
57
+
58
+ @chunk_length = @data_length * @dimensions * @chunk_value_count # Size of the data for this channel in a given chunk.
59
+
60
+ super stream, previous_channel
61
+ end
62
+
63
+
64
+ # After all channels in a segment have been read, we have to determine our raw data starting offset and
65
+ # the offsets for all individual values, based on the number of chunks in the segment as well as whether
66
+ # the segment is interleaved.
67
+ def calculate_offsets
68
+ previous_channel = nil
69
+ channels = @segment.raw_channels
70
+ me = channels.index self
71
+ previous_channel = channels[me - 1] if me and me > 0
72
+
73
+ if @segment.interleaved_data?
74
+ @value_offset = @segment.raw_channels.map(&:data_length).reduce :+
75
+ else
76
+ @value_offset = @data_length
77
+ end
78
+
79
+ @raw_data_offset = @segment.raw_data_offset
80
+ if previous_channel
81
+ @raw_data_offset = previous_channel.raw_data_offset
82
+ @raw_data_offset += @segment.interleaved_data? ? previous_channel.data_length : previous_channel.chunk_length
83
+ end
84
+
85
+ @segment.chunk_count.times do
86
+ @chunk_offsets << @raw_data_offset + @chunk_offsets.length * @chunk_length
87
+ end
88
+ @value_count = @chunk_value_count * @segment.chunk_count
89
+ end
90
+
91
+
92
+ def values
93
+ @values ||= begin
94
+ klass = if @data_type::LENGTH_IN_BYTES.nil?
95
+ StringChannelEnumerator
96
+ else
97
+ ChannelEnumerator
98
+ end
99
+
100
+ klass.new self
101
+ end
102
+ end
103
+ end
104
+ end
105
+ end
@@ -0,0 +1,11 @@
1
+ require_relative 'base'
2
+
3
+ module RubyTDMS
4
+ module Objects
5
+ class File < Base
6
+ def as_json
7
+ super.reject { |key, value| key == :path }
8
+ end
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,22 @@
1
+ require_relative 'base'
2
+
3
+ module RubyTDMS
4
+ module Objects
5
+ class Group < Base
6
+ attr_reader :channels
7
+
8
+
9
+ def initialize(*args)
10
+ super
11
+ @channels = []
12
+ end
13
+
14
+
15
+ def as_json
16
+ super.merge({
17
+ channel_count: @channels.length
18
+ })
19
+ end
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,97 @@
1
+ module RubyTDMS
2
+ class Path
3
+ PATH_MATCHER = /(?:(?<!\\))\//
4
+ RAW_MATCHER = /(?:^|(?<='))\/(?:(?=')|$)/
5
+
6
+ # Can initialize with parts, path, or raw. Elements can contain / only in raw or parts forms.
7
+ def initialize(options = {})
8
+ raise ArgumentError, 'Initialize with at most one of +parts+, +path+, or +raw+.' if options.length > 1
9
+ @parts = options[:parts] || []
10
+ self.path = options[:path] if options.has_key? :path
11
+ self.raw = options[:raw] if options.has_key? :raw
12
+ end
13
+
14
+
15
+ def dump
16
+ to_s
17
+ end
18
+
19
+
20
+ def hash
21
+ to_s.hash
22
+ end
23
+
24
+
25
+ def inspect
26
+ "#<#{self.class.name}:#{self.object_id} path=#{path.inspect}>"
27
+ end
28
+
29
+
30
+ def load(string)
31
+ self.path = string
32
+ end
33
+
34
+
35
+ def path
36
+ '/' + @parts.map { |part| part.gsub('/', '\/') }.join('/')
37
+ end
38
+
39
+
40
+ def path=(value)
41
+ @parts = value._?('').split(PATH_MATCHER).reject { |x| x.length == 0 }.map { |part| decode_part part }
42
+ end
43
+
44
+
45
+ def raw
46
+ '/' + @parts.map { |part| encode_part part }.join('/')
47
+ end
48
+
49
+
50
+ def raw=(value)
51
+ @parts = value._?('').split(RAW_MATCHER).reject { |x| x.length == 0 }.map { |part| decode_raw_part part }
52
+ end
53
+
54
+
55
+ def to_a
56
+ @parts
57
+ end
58
+
59
+
60
+ def to_s
61
+ path
62
+ end
63
+
64
+
65
+ def ==(other)
66
+ if other.is_a? String
67
+ self.to_s == other
68
+ elsif other.is_a? self.class
69
+ self.dump == other.dump
70
+ else
71
+ super
72
+ end
73
+ end
74
+
75
+
76
+ alias eql? ==
77
+
78
+
79
+ protected
80
+
81
+ def decode_part(part)
82
+ part.gsub(/\\\//, '/')
83
+ end
84
+
85
+
86
+ def decode_raw_part(part)
87
+ part.gsub(/(^'|'$)/, '').gsub(/''/, "'")
88
+ end
89
+
90
+
91
+ # Pure part representation -> raw encoded representation
92
+ # "my / part's / awesomeness" -> "'my / part''s / awesomeness'"
93
+ def encode_part(part)
94
+ "'#{part.gsub(/'/, "''")}'"
95
+ end
96
+ end
97
+ end
@@ -0,0 +1,16 @@
1
+ module RubyTDMS
2
+ class Property
3
+ attr_accessor :name
4
+ attr_accessor :data # TDMS::DataTypes::Base
5
+
6
+ def initialize(name = nil, data = nil)
7
+ @name = name
8
+ @data = data
9
+ end
10
+
11
+
12
+ def value
13
+ data.value
14
+ end
15
+ end
16
+ end
@@ -0,0 +1,107 @@
1
+ require_relative 'object_parser'
2
+
3
+ module RubyTDMS
4
+ # Implements the TDMS segment, including a Segment factory and stream parser.
5
+ # TODO: Refactor the parser out? Too much coupling between Segment and Document!
6
+ class Segment
7
+ FLAG_META_DATA = 1 << 1
8
+ FLAG_RAW_DATA = 1 << 3
9
+ FLAG_DAQMX_RAW_DATA = 1 << 7
10
+ FLAG_INTERLEAVED_DATA = 1 << 5
11
+ FLAG_BIG_ENDIAN = 1 << 6
12
+ FLAG_NEW_OBJECT_LIST = 1 << 2
13
+
14
+ attr_reader :document
15
+ attr_reader :objects, :tag, :version, :length, :meta_data_length, :meta_data_offset, :raw_data_offset, :chunk_count
16
+
17
+
18
+ def initialize(document)
19
+ @document = document
20
+ @objects = []
21
+ end
22
+
23
+
24
+ class << self
25
+ def parse_stream(stream, document)
26
+ new(document).tap do |new|
27
+ new.parse_lead_in stream
28
+ document.segments << new # TODO: smelly
29
+ new.parse_meta_data stream if new.meta_data?
30
+ end
31
+ end
32
+ end
33
+
34
+
35
+ def raw_channels
36
+ objects.select { |object| object.is_a? Objects::Channel }
37
+ end
38
+
39
+
40
+ # Checks if the segment flags have +flag+ set.
41
+ # @param flag [Fixnum] The flag mask to check, like FLAG_META_DATA or FLAG_RAW_DATA.
42
+ # @return [Boolean] Whether the segment has the flag in question set.
43
+ def flag?(flag)
44
+ !!(@flags & flag == flag)
45
+ end
46
+
47
+
48
+ def meta_data?
49
+ flag? FLAG_META_DATA
50
+ end
51
+
52
+
53
+ def raw_data?
54
+ flag? FLAG_RAW_DATA
55
+ end
56
+
57
+
58
+ def daqmx_data?
59
+ flag? FLAG_DAQMX_RAW_DATA
60
+ end
61
+
62
+
63
+ def interleaved_data?
64
+ flag? FLAG_INTERLEAVED_DATA
65
+ end
66
+
67
+
68
+ def big_endian?
69
+ flag? FLAG_BIG_ENDIAN
70
+ end
71
+
72
+
73
+ def new_object_list?
74
+ flag? FLAG_NEW_OBJECT_LIST
75
+ end
76
+
77
+
78
+ # protected
79
+
80
+ def parse_lead_in(stream)
81
+ @tag = stream.read 4
82
+ @flags = stream.read_u32
83
+ @version = stream.read_u32
84
+ @length = stream.read_u64 # Overall length of segment minus length of lead-in, aka "Next segment offset" in NI docs.
85
+ @meta_data_length = stream.read_u64 # Overall length of meta information, aka "Raw data offset" in NI docs.
86
+
87
+ @meta_data_offset = @meta_data_length > 0 ? stream.pos : nil
88
+ @raw_data_offset = stream.pos + @meta_data_length # Stream offset at which raw data for this segment begins.
89
+ @raw_data_length = @length == -1 ? stream.length - @raw_data_offset : @length - @meta_data_length # Number of bytes raw data occupies. NI docs say @length == -1 means the entire file, after lead-in and header, is raw data.
90
+ end
91
+
92
+
93
+ def parse_meta_data(stream)
94
+ @number_of_objects = stream.read_u32
95
+
96
+ @number_of_objects.times do
97
+ object = ObjectParser.parse_stream stream, document, self
98
+ @objects << object
99
+ end
100
+
101
+
102
+ @chunk_length = raw_channels.map(&:chunk_length).reduce(:+) # Length of an individual data chunk (summation of each object's raw data length)
103
+ @chunk_count = @raw_data_length / @chunk_length
104
+ raw_channels.each { |object| object.calculate_offsets }
105
+ end
106
+ end
107
+ end