ceph-ruby-livelink 1.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,39 @@
1
+ module CephRuby
2
+ # Rados BlockDevice helper Methods
3
+ module RadosBlockDeviceHelper
4
+ def self.parse_stat(stat)
5
+ Hash[[:size, :obj_size, :num_objs, :order].map { |k| [k, stat[k]] }]
6
+ .tap do |hash|
7
+ hash[:block_name_prefix] = stat[:block_name_prefix].to_ptr.read_string
8
+ end
9
+ end
10
+
11
+ def self.close_handle(handle)
12
+ Lib::Rbd.rbd_close(handle)
13
+ true
14
+ end
15
+
16
+ def self.parse_dst_pool(dst_pool, pool)
17
+ if dst_pool.is_a? String
18
+ dst_pool = cluster.pool(dst_pool)
19
+ elsif dst_pool.nil?
20
+ dst_pool = pool
21
+ end
22
+ dst_pool.ensure_open
23
+ dst_pool
24
+ end
25
+
26
+ def open?
27
+ !handle.nil?
28
+ end
29
+
30
+ def ensure_open
31
+ return if open?
32
+ open
33
+ end
34
+
35
+ def log(message)
36
+ CephRuby.log("rbd image #{pool.name}/#{name} #{message}")
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,124 @@
1
+ module CephRuby
2
+ # An Object in Ceph
3
+ class RadosObject
4
+ attr_accessor :pool, :name
5
+
6
+ def initialize(pool, name)
7
+ self.pool = pool
8
+ self.name = name
9
+ yield(self) if block_given?
10
+ end
11
+
12
+ def exists?
13
+ log('exists?')
14
+ !stat.nil?
15
+ rescue SystemCallError => e
16
+ return false if e.errno == Errno::ENOENT::Errno
17
+ raise e
18
+ end
19
+
20
+ def overwrite(data)
21
+ size = data.bytesize
22
+ log("overwrite size #{size}")
23
+ ret = Lib::Rados.rados_write_full(pool.handle, name, data, size)
24
+ raise SystemCallError.new("overwrite of #{size} bytes to '#{name}'"\
25
+ ' failed', -ret) if ret < 0
26
+ end
27
+
28
+ def write(offset, data)
29
+ size = data.bytesize
30
+ log("write offset #{offset}, size #{size}")
31
+ ret = Lib::Rados.rados_write(pool.handle, name, data, size, offset)
32
+ raise SystemCallError.new("write of #{size} bytes to '#{name}'"\
33
+ " at #{offset} failed", -ret) if ret < 0
34
+ end
35
+
36
+ def append(data)
37
+ size = data.bytesize
38
+ log("append #{size}B")
39
+ ret = Lib::Rados.rados_append(pool.handle, name, data, size)
40
+ raise SystemCallError.new("appendment of #{size} bytes to '#{name}'"\
41
+ ' failed', -ret) if ret < 0
42
+ end
43
+
44
+ alias exist? exists?
45
+
46
+ def read(offset, size)
47
+ log("read offset #{offset}, size #{size}")
48
+ data_p = FFI::MemoryPointer.new(:char, size)
49
+ ret = Lib::Rados.rados_read(pool.handle, name, data_p, size, offset)
50
+ raise SystemCallError.new("read of #{size} bytes from '#{name}'"\
51
+ " at #{offset} failed", -ret) if ret < 0
52
+ data_p.get_bytes(0, ret)
53
+ end
54
+
55
+ def read_full
56
+ log('read_full')
57
+ read 0, size
58
+ end
59
+
60
+ def destroy
61
+ log('destroy')
62
+ ret = Lib::Rados.rados_remove(pool.handle, name)
63
+ raise SystemCallError.new("destroy of '#{name}' failed", -ret) if ret < 0
64
+ end
65
+
66
+ def resize(size)
67
+ log("resize size #{size}")
68
+ ret = Lib::Rados.rados_trunc(pool.handle, name, size)
69
+ raise SystemCallError.new("resize of '#{name}'"\
70
+ " to #{size} failed", -ret) if ret < 0
71
+ end
72
+
73
+ def stat
74
+ log('stat')
75
+ size_p = FFI::MemoryPointer.new(:uint64)
76
+ mtime_p = FFI::MemoryPointer.new(:uint64)
77
+ ret = Lib::Rados.rados_stat(pool.handle, name, size_p, mtime_p)
78
+ raise SystemCallError.new("stat of '#{name}' failed", -ret) if ret < 0
79
+ RadosObject.stat_hash(size_p, mtime_p)
80
+ end
81
+
82
+ class << self
83
+ def stat_hash(size_p, mtime_p)
84
+ {
85
+ size: size_p.get_uint64(0),
86
+ mtime: Time.at(mtime_p.get_uint64(0))
87
+ }
88
+ end
89
+ end
90
+
91
+ def size
92
+ stat[:size]
93
+ end
94
+
95
+ def mtime
96
+ stat[:mtime]
97
+ end
98
+
99
+ def xattr(name = nil)
100
+ Xattr.new(self, name)
101
+ end
102
+
103
+ def xattr_enumerator
104
+ ::CephRuby::XattrEnumerator.new(self)
105
+ end
106
+
107
+ def <=>(other)
108
+ pool_check = pool <=> other.pool
109
+ return pool_check unless pool_check == 0
110
+ other.name <=> name
111
+ end
112
+
113
+ def eql?(other)
114
+ return false unless other.class == self.class
115
+ self == other
116
+ end
117
+
118
+ # helper methods below
119
+
120
+ def log(message)
121
+ CephRuby.log("rados object #{pool.name}/#{name} #{message}")
122
+ end
123
+ end
124
+ end
@@ -0,0 +1,89 @@
1
+ module CephRuby
2
+ # Enumerator for Ceph Rados Objects
3
+ class RadosObjectEnumerator
4
+ include Enumerable
5
+
6
+ class << self
7
+ attr_accessor :limit
8
+ end
9
+
10
+ attr_accessor :pool
11
+ attr_reader :handle, :page
12
+
13
+ def initialize(pool)
14
+ self.pool = pool
15
+ @page = 0
16
+
17
+ open
18
+ end
19
+
20
+ def paginate(page = 0)
21
+ @page = page ||= 0
22
+ to = CephRuby::RadosObjectEnumerator.limit
23
+ to = 0 if to.nil?
24
+ seek page * to
25
+ end
26
+
27
+ def seek(to)
28
+ ret = Lib::Rados.rados_nobjects_list_seek(handle, to)
29
+ raise SystemCallError('unable to seek to position', -ret) if ret < 0
30
+ self
31
+ end
32
+
33
+ def position
34
+ Lib::Rados.rados_nobjects_list_get_pg_hash_position(handle)
35
+ end
36
+
37
+ def close
38
+ Lib::Rados.rados_nobjects_close(handle)
39
+ @handle = nil
40
+ end
41
+
42
+ def open?
43
+ !handle.nil?
44
+ end
45
+
46
+ def open
47
+ return if open?
48
+ pool.ensure_open
49
+ handle_p = FFI::MemoryPointer.new(:pointer)
50
+ ret = Lib::Rados.rados_nobjects_list_open(pool.handle, handle_p)
51
+ raise SystemCallError('unable to open object list', -ret) if ret < 0
52
+ @handle = handle_p.get_pointer(0)
53
+ end
54
+
55
+ def each
56
+ return enum_for(:each) unless block_given?
57
+ while within_limit
58
+ obj = next_rados_object
59
+ return if obj.nil?
60
+ yield obj
61
+ end
62
+ ensure
63
+ paginate(page)
64
+ end
65
+
66
+ def within_limit
67
+ return true if CephRuby::RadosObjectEnumerator.limit.nil?
68
+ position < (CephRuby::RadosObjectEnumerator.limit * (page + 1))
69
+ end
70
+
71
+ private
72
+
73
+ def next_rados_object
74
+ entry_buffer = FFI::MemoryPointer.new(:pointer, 1)
75
+ ret = Lib::Rados.rados_nobjects_list_next(handle, entry_buffer,
76
+ nil, nil)
77
+ return unless within_limit
78
+ return if ret == -Errno::ENOENT::Errno
79
+ raise SystemCallError.new('unable to fetch next object', -ret) if ret < 0
80
+ next_object(entry_buffer)
81
+ end
82
+
83
+ def next_object(entry_buffer)
84
+ str_ptr = entry_buffer.read_pointer
85
+ return if str_ptr.null?
86
+ RadosObject.new(pool, str_ptr.read_string)
87
+ end
88
+ end
89
+ end
@@ -0,0 +1,3 @@
1
+ module CephRuby
2
+ VERSION = '1.5.1'.freeze
3
+ end
@@ -0,0 +1,67 @@
1
+ module CephRuby
2
+ # Representation of a File extended Attribute
3
+ class Xattr
4
+ attr_accessor :rados_object, :name, :pool
5
+
6
+ def initialize(rados_object, name)
7
+ raise Errno::ENOENT, 'RadosObject is nil' unless rados_object.exists?
8
+ raise SystemCallError.new(
9
+ 'xattr name cannot be nil',
10
+ Errno::ENOENT::Errno
11
+ ) if name.nil?
12
+ self.rados_object = rados_object
13
+ self.pool = rados_object.pool
14
+ self.name = name
15
+ yield(self) if block_given?
16
+ end
17
+
18
+ def value(size = 4096)
19
+ read size
20
+ end
21
+
22
+ def value=(value)
23
+ write value
24
+ end
25
+
26
+ def destroy
27
+ log('destroy')
28
+ ret = Lib::Rados.rados_rmxattr(pool.handle,
29
+ rados_object.name,
30
+ name)
31
+ raise SystemCallError.new("destruction of xattr '#{name}' failed",
32
+ -ret) if ret < 0
33
+ end
34
+
35
+ def to_s
36
+ read
37
+ end
38
+
39
+ def log(message)
40
+ CephRuby.log('rados obj xattr '\
41
+ "#{rados_object.name}/#{name} #{message}")
42
+ end
43
+
44
+ private
45
+
46
+ def read(size)
47
+ log("read #{size}b")
48
+ data_p = FFI::MemoryPointer.new(:char, size)
49
+ ret = Lib::Rados.rados_getxattr(pool.handle,
50
+ rados_object.name,
51
+ name, data_p, size)
52
+ raise SystemCallError.new("read of xattr '#{name}' failed",
53
+ -ret) if ret < 0
54
+ data_p.get_bytes(0, ret)
55
+ end
56
+
57
+ def write(data)
58
+ size = data.bytesize
59
+ log("write size #{size}")
60
+ ret = Lib::Rados.rados_setxattr(pool.handle,
61
+ rados_object.name,
62
+ name, data, size)
63
+ raise SystemCallError.new("write of xattr '#{name}' failed",
64
+ -ret) if ret < 0
65
+ end
66
+ end
67
+ end
@@ -0,0 +1,62 @@
1
+ module CephRuby
2
+ # Enumerator for Ceph Rados Objects Xattr
3
+ class XattrEnumerator
4
+ include Enumerable
5
+
6
+ attr_accessor :object, :pool
7
+ attr_reader :handle
8
+
9
+ def initialize(object)
10
+ self.object = object
11
+ self.pool = object.pool
12
+ open
13
+ end
14
+
15
+ def close
16
+ Lib::Rados.rados_getxattrs_end(handle)
17
+ @handle = nil
18
+ end
19
+
20
+ def open?
21
+ !handle.nil?
22
+ end
23
+
24
+ def open
25
+ return if open?
26
+ pool.ensure_open
27
+ handle_p = FFI::MemoryPointer.new(:pointer)
28
+ ret = Lib::Rados.rados_getxattrs(pool.handle, object.name, handle_p)
29
+ raise SystemCallError.new('unable to open xattr list', -ret) if ret < 0
30
+ @handle = handle_p.get_pointer(0)
31
+ end
32
+
33
+ def each
34
+ return enum_for(:each) unless block_given?
35
+ open
36
+ loop do
37
+ obj = next_xattr_object
38
+ break if obj.nil?
39
+ yield obj
40
+ end
41
+ close
42
+ end
43
+
44
+ private
45
+
46
+ def next_xattr_object
47
+ key_buffer = FFI::MemoryPointer.new(:pointer, 1)
48
+ val_buffer = FFI::MemoryPointer.new(:pointer, 1)
49
+ size_t_buffer = FFI::MemoryPointer.new(:size_t)
50
+ ret = Lib::Rados.rados_getxattrs_next(handle, key_buffer,
51
+ val_buffer, size_t_buffer)
52
+ raise SystemCallError.new('unable to fetch next object', -ret) if ret < 0
53
+ next_xattr(key_buffer)
54
+ end
55
+
56
+ def next_xattr(key_buffer)
57
+ str_ptr = key_buffer.read_pointer
58
+ return if str_ptr.null?
59
+ Xattr.new(object, str_ptr.read_string)
60
+ end
61
+ end
62
+ end
@@ -0,0 +1,101 @@
1
+ require 'spec_helper'
2
+ describe CephRuby::Cluster do
3
+ let(:config) { cluster_config }
4
+ let(:cluster) { ::CephRuby::Cluster.new(config) }
5
+ subject { cluster }
6
+
7
+ describe 'should respond to' do
8
+ it 'shutdown' do
9
+ expect(subject).to respond_to :shutdown
10
+ end
11
+
12
+ it 'connect' do
13
+ expect(subject).to respond_to :connect
14
+ end
15
+
16
+ it 'setup_using_file' do
17
+ expect(subject).to respond_to :setup_using_file
18
+ end
19
+
20
+ it 'log' do
21
+ expect(subject).to respond_to :log
22
+ end
23
+
24
+ it 'pool' do
25
+ expect(subject).to respond_to :pool
26
+ end
27
+
28
+ it 'pools' do
29
+ expect(subject).to respond_to :pools
30
+ end
31
+
32
+ it 'pool_id_by_name' do
33
+ expect(subject).to respond_to :pool_id_by_name
34
+ end
35
+
36
+ it 'pool_name_by_id' do
37
+ expect(subject).to respond_to :pool_name_by_id
38
+ end
39
+
40
+ it 'status' do
41
+ expect(subject).to respond_to :status
42
+ end
43
+
44
+ it 'fsid' do
45
+ expect(subject).to respond_to :fsid
46
+ end
47
+ end
48
+
49
+ describe 'fsid' do
50
+ subject { cluster.fsid }
51
+ it 'should return a 36 byte string' do
52
+ expect(subject.length).to be 36
53
+ end
54
+ end
55
+
56
+ describe 'status' do
57
+ subject { cluster.status }
58
+ it 'should return a hash' do
59
+ expect(subject).to be_a Hash
60
+ end
61
+
62
+ it 'should have the correct keys' do
63
+ expect(subject.key?(:kb)).to be true
64
+ expect(subject.key?(:kb_used)).to be true
65
+ expect(subject.key?(:kb_avail)).to be true
66
+ expect(subject.key?(:num_objects)).to be true
67
+ end
68
+ end
69
+
70
+ # Starter pool functionality, check the cluster methods exist
71
+ describe 'pool functions' do
72
+ describe 'pool method' do
73
+ it 'should return a pool object' do
74
+ expect(subject.pool(config[:pool][:name])).to be_a ::CephRuby::Pool
75
+ end
76
+
77
+ describe 'when passed a block' do
78
+ it 'should pass a pool into the block' do
79
+ obj = nil
80
+ subject.pool(config[:pool][:name]) { |p| obj = p }
81
+ expect(obj).to be_a ::CephRuby::Pool
82
+ end
83
+ end
84
+ end
85
+
86
+ describe 'pools method' do
87
+ it 'should return a PoolEnumerator' do
88
+ expect(subject.pools).to be_a ::CephRuby::PoolEnumerator
89
+ end
90
+
91
+ describe 'when passed a block into each', require_cluster_read: true do
92
+ it 'should pass as many pools into the block as there are' do
93
+ subject.pools.each do |p|
94
+ # This won't prove anything unless there are some pools
95
+ expect(p).to be_a ::CephRuby::Pool
96
+ end
97
+ end
98
+ end
99
+ end
100
+ end
101
+ end