ceph-ruby-livelink 1.5.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: 156dbabee9228fe85292b28c7ab54b152d69d14d
4
+ data.tar.gz: 4f5e24209a2641ae76764375d34914cd32a21e9a
5
+ SHA512:
6
+ metadata.gz: 919c6f08edeb98c704e37f3bc5a2c5fdac24e0ab489f1bf1e44a06c46111bacf9c2ee57b6cb57656e9e6d5fd4a45ccf55f2b462100b80a4b3e5e71043037a51b
7
+ data.tar.gz: e1c66b0327860078379b2971e4658bfa465cd24a3b3b542363ce6cfa3095fbdac0781bbd3dd3925e4b603b28d6029f6434c519ba7a1fdf6388cbc4fee8cf4bab
@@ -0,0 +1,19 @@
1
+ ---
2
+ :cluster: ceph
3
+ :config_dir: "/etc/ceph"
4
+ :user: client.admin
5
+ :flags: 0
6
+ :readable: true # User has +r mon cap
7
+ :writable: true # User has +w mon cap
8
+ :pool:
9
+ :name: rspec_test
10
+ :create_delete: true # Means you can run create and delete on this pool {beware of dragons!}
11
+ # If you accidentally set this and you have a pool with the same name it will
12
+ # delete it at the end of the rspec test!
13
+ # This only works if the user has +w mon cap
14
+ :rule_id: 0 # Crush Ruleset id to determine where to place pools
15
+ :writable: true # Means this pool has read/write permissions {beware of dragons!}
16
+ # If you accidentally set this to true and you have objects in your pool
17
+ # that you want to keep with name = object_name, it will overwrite and then delete them!
18
+ # this only works if the user has +w osd cap at least on pool[:name]
19
+ :object_name: rspec_test_object
data/.gitignore ADDED
@@ -0,0 +1,18 @@
1
+ *.gem
2
+ *.rbc
3
+ .bundle
4
+ .config
5
+ .yardoc
6
+ Gemfile.lock
7
+ InstalledFiles
8
+ _yardoc
9
+ coverage
10
+ doc/
11
+ lib/bundler/man
12
+ pkg
13
+ rdoc
14
+ spec/reports
15
+ .cluster.yml
16
+ test/tmp
17
+ test/version_tmp
18
+ tmp
data/.rspec ADDED
@@ -0,0 +1,2 @@
1
+ --color
2
+ --require spec_helper
data/.rubocop.yml ADDED
@@ -0,0 +1,2 @@
1
+ Style/FileName:
2
+ Enabled: false
data/Gemfile ADDED
@@ -0,0 +1,4 @@
1
+ source 'https://rubygems.org'
2
+
3
+ # Specify your gem's dependencies in ceph-ruby.gemspec
4
+ gemspec
data/Guardfile ADDED
@@ -0,0 +1,20 @@
1
+ # A sample Guardfile
2
+ # More info at https://github.com/guard/guard#readme
3
+
4
+ ## Uncomment and set this to only include directories you want to watch
5
+ # directories %w(app lib config test spec features) \
6
+ # .select{|d| Dir.exists?(d) ? d : UI.warning("Directory #{d} does not exist")}
7
+
8
+ ## Note: if you are using the `directories` clause above and you are not
9
+ ## watching the project directory ('.'), then you will want to move
10
+ ## the Guardfile to a watched dir and symlink it back, e.g.
11
+ #
12
+ # $ mkdir config
13
+ # $ mv Guardfile config/
14
+ # $ ln -s config/Guardfile .
15
+ #
16
+ # and, you'll have to watch "config/Guardfile" instead of "Guardfile"
17
+
18
+ guard 'rake', task: 'dev_test' do
19
+ watch(%r{^(spec|lib/.*)/.*.rb})
20
+ end
data/LICENSE.txt ADDED
@@ -0,0 +1,22 @@
1
+ Copyright (c) 2012 - 2013 Netskin GmbH
2
+
3
+ MIT License
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining
6
+ a copy of this software and associated documentation files (the
7
+ "Software"), to deal in the Software without restriction, including
8
+ without limitation the rights to use, copy, modify, merge, publish,
9
+ distribute, sublicense, and/or sell copies of the Software, and to
10
+ permit persons to whom the Software is furnished to do so, subject to
11
+ the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be
14
+ included in all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.md ADDED
@@ -0,0 +1 @@
1
+ Ceph-ruby are the official ruby bindings now and so were moved to the ceph organisation: https://github.com/ceph/ceph-ruby
data/Rakefile ADDED
@@ -0,0 +1,7 @@
1
+ require 'bundler/gem_tasks'
2
+
3
+ Dir.glob('tasks/**/*.rake').each(&method(:import))
4
+
5
+ task test: [:rubocop_test, :spec]
6
+
7
+ task dev_test: [:rubocop_dev, :spec]
data/ceph-ruby.gemspec ADDED
@@ -0,0 +1,31 @@
1
+ # -*- encoding: utf-8 -*-
2
+ lib = File.expand_path('../lib', __FILE__)
3
+ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
+ require 'ceph-ruby/version'
5
+
6
+ Gem::Specification.new do |gem|
7
+ gem.name = 'ceph-ruby-livelink'
8
+ gem.version = CephRuby::VERSION
9
+ gem.authors = ['Netskin GmbH', 'Corin Langosch', 'LivelinkTechnology LTD', 'Stuart Harland']
10
+ gem.email = ['info@netskin.com', 'info@corinlangosch.com', 'infraops@livelinktechnology.net', 's.harland@livelinktechnology.net']
11
+ gem.description = 'Easy management of Ceph'
12
+ gem.summary = 'Easy management of Ceph Distributed Storage System'
13
+ gem.homepage = 'http://github.com/livelink/ceph-ruby'
14
+ gem.license = 'MIT'
15
+
16
+ gem.files = `git ls-files`.split($RS)
17
+ gem.executables = gem.files.grep(%r{^bin/}).map { |f| File.basename(f) }
18
+ gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
19
+ gem.require_paths = ['lib']
20
+
21
+ gem.add_dependency 'ffi', '~> 1.9'
22
+ gem.add_dependency 'activesupport', '~> 3.0'
23
+ gem.add_development_dependency 'guard', '~> 2.13'
24
+ gem.add_development_dependency 'guard-rake', '~> 1.0'
25
+ gem.add_development_dependency 'guard-rspec', '~> 4.6'
26
+ gem.add_development_dependency 'guard-bundler', '~> 2.1'
27
+ gem.add_development_dependency 'rubocop', '~> 0.39'
28
+ gem.add_development_dependency 'rspec', '~> 3.4'
29
+ gem.add_development_dependency 'rake', '~> 11.1'
30
+ gem.add_development_dependency 'bundler', '~> 1.3'
31
+ end
data/lib/ceph-ruby.rb ADDED
@@ -0,0 +1,32 @@
1
+ require 'active_support/core_ext/module/delegation'
2
+ require 'active_support/core_ext/module/attribute_accessors'
3
+
4
+ require 'ffi'
5
+
6
+ require 'ceph-ruby/lib/rados'
7
+ require 'ceph-ruby/lib/rbd'
8
+
9
+ require 'ceph-ruby/version'
10
+ require 'ceph-ruby/cluster_helper'
11
+ require 'ceph-ruby/pool_enumerator'
12
+ require 'ceph-ruby/cluster'
13
+ require 'ceph-ruby/rados_object_enumerator'
14
+ require 'ceph-ruby/pool_helper'
15
+ require 'ceph-ruby/pool'
16
+ require 'ceph-ruby/rados_block_device_helper'
17
+ require 'ceph-ruby/rados_block_device'
18
+ require 'ceph-ruby/rados_object'
19
+ require 'ceph-ruby/xattr'
20
+ require 'ceph-ruby/xattr_enumerator'
21
+ # Ceph::Ruby
22
+ #
23
+ # Easy management of Ceph Distributed Storage System
24
+ # (rbd, images, rados objects) using ruby.
25
+ module CephRuby
26
+ mattr_accessor :logger
27
+
28
+ def self.log(message)
29
+ return unless logger
30
+ logger.info("CephRuby: #{message}")
31
+ end
32
+ end
@@ -0,0 +1,129 @@
1
+ module CephRuby
2
+ # = Cluster
3
+ #
4
+ # == Synopsis
5
+ # A cluster object will connect to a Ceph monitor to
6
+ # carry out tasks or access objects from ceph
7
+ #
8
+ # == How to connect
9
+ # clusterA = ::CephRuby::Cluster.new
10
+ # clusterB = ::CephRuby::Cluster.new('/path/to/config/dir')
11
+ # clusterC = ::CephRuby::Cluster.new('/path/to/config/dir', options)
12
+ # clusterD = ::CephRuby::Cluster.new(options)
13
+ # === Options (with defaults)
14
+ # {
15
+ # config_dir: '/etc/ceph'
16
+ # cluster: 'ceph',
17
+ # user: 'client.admin',
18
+ # flags: 0
19
+ # }
20
+ class Cluster
21
+ extend CephRuby::ClusterHelper
22
+ include CephRuby::ClusterHelper
23
+ include ::Comparable
24
+ attr_reader :options
25
+ attr_accessor :handle
26
+
27
+ def initialize(config = {}, opts = {})
28
+ setup(config, opts)
29
+
30
+ connect
31
+
32
+ if block_given?
33
+ begin
34
+ yield(self)
35
+ ensure
36
+ shutdown
37
+ end
38
+ end
39
+ end
40
+
41
+ def shutdown
42
+ return unless handle
43
+ log('shutdown')
44
+ Lib::Rados.rados_shutdown(handle)
45
+ self.handle = nil
46
+ end
47
+
48
+ def pool(name, &block)
49
+ Pool.new(self, name, &block)
50
+ end
51
+
52
+ def pools
53
+ PoolEnumerator.new(self)
54
+ end
55
+
56
+ def pool_name_by_id(id, size = 512)
57
+ data_p = FFI::MemoryPointer.new(:char, size)
58
+ ret = Lib::Rados.rados_pool_reverse_lookup(handle,
59
+ id,
60
+ name,
61
+ size)
62
+ raise Errno::ERANGE,
63
+ 'buffer size too small' if ret == -Errno::ERANGE::Errno
64
+ raise SystemCallError.new('read of pool name failed', -ret) if ret < 0
65
+ data_p.get_bytes(0, ret)
66
+ end
67
+
68
+ def pool_id_by_name(name)
69
+ ret = Lib::Rados.rados_pool_lookup(handle, name)
70
+ raise Errno::ENOENT if ret == -Errno::ERANGE::Errno
71
+ raise SystemCallError.new('read of pool id failed', -ret) if ret < 0
72
+ ret
73
+ end
74
+
75
+ def connect
76
+ log('connect')
77
+ ret = Lib::Rados.rados_connect(handle)
78
+ raise SystemCallError.new('connect to cluster failed', -ret) if ret < 0
79
+ end
80
+
81
+ def setup_using_file
82
+ log("setup_using_file #{options[:path]}")
83
+ ret = Lib::Rados.rados_conf_read_file(handle, options[:path])
84
+ raise SystemCallError.new('setup of cluster from config file'\
85
+ " '#{options[:path]}' failed", -ret) if ret < 0
86
+ end
87
+
88
+ def status
89
+ log('stat')
90
+ stat_s = Lib::Rados::MonitorStatStruct.new
91
+ ret = Lib::Rados.rados_cluster_stat(handle, stat_s)
92
+ raise SystemCallError.new('retrieve cluster status failed',
93
+ -ret) if ret < 0
94
+ stat_s.to_hash
95
+ end
96
+
97
+ def fsid
98
+ log('fsid')
99
+ data_p = FFI::MemoryPointer.new(:char, 37)
100
+ ret = Lib::Rados.rados_cluster_fsid(handle, data_p, 37)
101
+ raise SystemCallError.new('cluster fsid failed',
102
+ -ret) if ret < 0
103
+ data_p.get_bytes(0, ret)
104
+ end
105
+
106
+ private
107
+
108
+ def setup(config, opts)
109
+ log("init lib rados #{Lib::Rados.version_string},"\
110
+ " lib rbd #{Lib::Rbd.version_string}")
111
+ setup_options(config, opts)
112
+ self.handle = Cluster.setup_handle(options)
113
+ setup_using_file
114
+ end
115
+
116
+ def setup_options(config, opts)
117
+ @options = Cluster.default_options
118
+ if config.is_a?(::Hash)
119
+ @options.merge! config
120
+ else
121
+ @options.merge! opts
122
+ @options[:config_path] = config
123
+ end
124
+
125
+ @options[:flags] = 0 unless Cluster.uint?(@options[:flags])
126
+ @options.freeze
127
+ end
128
+ end
129
+ end
@@ -0,0 +1,39 @@
1
+ module CephRuby
2
+ # Helper Methods for CephRuby::Cluster
3
+ module ClusterHelper
4
+ def setup_handle(options)
5
+ handle_p = FFI::MemoryPointer.new(:pointer)
6
+ ret = Lib::Rados.rados_create2(handle_p,
7
+ options[:cluster],
8
+ options[:user],
9
+ options[:flags])
10
+ raise SystemCallError.new('open of cluster failed', -ret) if ret < 0
11
+ handle_p.get_pointer(0)
12
+ end
13
+
14
+ def default_options
15
+ {
16
+ config_path: '/etc/ceph',
17
+ user: 'client.admin',
18
+ cluster: 'ceph'
19
+ }
20
+ end
21
+
22
+ def uint?(value)
23
+ value.is_a?(Integer) && value >= 0
24
+ end
25
+
26
+ def log(message)
27
+ CephRuby.log("cluster #{message}")
28
+ end
29
+
30
+ def <=>(other)
31
+ other.options <=> options
32
+ end
33
+
34
+ def eql?(other)
35
+ return false if other.class != self.class
36
+ self == other
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,158 @@
1
+ require 'ffi'
2
+
3
+ # see https://github.com/ceph/ceph/blob/v0.48.2argonaut/src/pybind/rados.py
4
+
5
+ module CephRuby
6
+ module Lib
7
+ # Ruby bindings for librados
8
+ module Rados
9
+ extend FFI::Library
10
+
11
+ ffi_lib ['rados', 'librados.so.2']
12
+
13
+ attach_function 'rados_version', [:pointer, :pointer, :pointer], :void
14
+
15
+ attach_function 'rados_create', [:pointer, :string], :int
16
+
17
+ attach_function 'rados_create2', [:pointer, :string, :string,
18
+ :uint64], :int
19
+
20
+ attach_function 'rados_connect', [:pointer], :int
21
+
22
+ attach_function 'rados_conf_read_file', [:pointer, :string], :int
23
+
24
+ attach_function 'rados_shutdown', [:pointer], :void
25
+
26
+ attach_function 'rados_cluster_stat', [:pointer, :pointer], :int
27
+
28
+ attach_function 'rados_cluster_fsid', [:pointer, :buffer_out,
29
+ :size_t], :int
30
+
31
+ attach_function 'rados_pool_list', [:pointer, :buffer_out, :size_t], :int
32
+
33
+ attach_function 'rados_pool_lookup', [:pointer, :string], :int
34
+
35
+ attach_function 'rados_pool_reverse_lookup', [:pointer, :int,
36
+ :buffer_out, :size_t], :int
37
+
38
+ attach_function 'rados_pool_create', [:pointer, :string], :int
39
+
40
+ attach_function 'rados_pool_create_with_auid', [:pointer, :string,
41
+ :uint64], :int
42
+
43
+ attach_function 'rados_pool_create_with_crush_rule', [:pointer, :string,
44
+ :uint8], :int
45
+
46
+ attach_function 'rados_pool_create_with_all', [:pointer, :string,
47
+ :uint64, :uint8], :int
48
+
49
+ attach_function 'rados_pool_delete', [:pointer, :string], :int
50
+
51
+ attach_function 'rados_ioctx_pool_set_auid', [:pointer, :uint64], :int
52
+
53
+ attach_function 'rados_ioctx_pool_get_auid', [:pointer, :pointer], :int
54
+
55
+ attach_function 'rados_ioctx_pool_stat', [:pointer, :pointer], :int
56
+
57
+ attach_function 'rados_ioctx_get_id', [:pointer], :int
58
+
59
+ attach_function 'rados_ioctx_get_pool_name', [:pointer, :buffer_out,
60
+ :size_t], :int
61
+
62
+ attach_function 'rados_ioctx_set_namespace', [:pointer, :string], :void
63
+
64
+ attach_function 'rados_ioctx_create', [:pointer, :string, :pointer], :int
65
+
66
+ attach_function 'rados_ioctx_destroy', [:pointer], :void
67
+
68
+ attach_function 'rados_write', [:pointer, :string, :buffer_in,
69
+ :size_t, :off_t], :int
70
+
71
+ attach_function 'rados_write_full', [:pointer, :string, :buffer_in,
72
+ :size_t], :int
73
+
74
+ attach_function 'rados_read', [:pointer, :string, :buffer_out,
75
+ :size_t, :off_t], :int
76
+ attach_function 'rados_append', [:pointer, :string, :buffer_out,
77
+ :size_t], :int
78
+ attach_function 'rados_remove', [:pointer, :string], :int
79
+
80
+ attach_function 'rados_trunc', [:pointer, :string, :size_t], :int
81
+
82
+ attach_function 'rados_stat', [:pointer, :string, :pointer,
83
+ :pointer], :int
84
+
85
+ attach_function 'rados_getxattr', [:pointer, :string, :string,
86
+ :buffer_out, :size_t], :int
87
+ attach_function 'rados_setxattr', [:pointer, :string, :string,
88
+ :buffer_in, :size_t], :int
89
+ attach_function 'rados_rmxattr', [:pointer, :string, :string], :int
90
+
91
+ attach_function 'rados_getxattrs', [:pointer, :string, :pointer], :int
92
+
93
+ attach_function 'rados_getxattrs_next', [:pointer, :pointer, :pointer,
94
+ :pointer], :int
95
+ attach_function 'rados_getxattrs_end', [:pointer], :void
96
+
97
+ attach_function 'rados_nobjects_list_open', [:pointer, :pointer], :int
98
+
99
+ attach_function 'rados_nobjects_list_seek', [:pointer, :uint32], :uint32
100
+
101
+ attach_function 'rados_nobjects_list_next', [:pointer, :pointer,
102
+ :pointer,
103
+ :pointer], :int
104
+
105
+ attach_function 'rados_nobjects_list_close', [:pointer], :void
106
+
107
+ attach_function 'rados_nobjects_list_get_pg_hash_position',
108
+ [:pointer], :uint32
109
+
110
+ class MonitorStatStruct < FFI::Struct #:nodoc:
111
+ layout :kb, :uint64,
112
+ :kb_used, :uint64,
113
+ :kb_avail, :uint64,
114
+ :num_objects, :uint64
115
+ def to_hash
116
+ return {} if members.empty?
117
+ Hash[* members.collect { |m| [m, self[m]] }.flatten!]
118
+ end
119
+ end
120
+
121
+ class PoolStatStruct < FFI::Struct #:nodoc:
122
+ layout :num_bytes, :uint64,
123
+ :num_kb, :uint64,
124
+ :num_objects, :uint64,
125
+ :num_object_clones, :uint64,
126
+ :num_object_copies, :uint64,
127
+ :num_objects_missing_on_primary, :uint64,
128
+ :num_objects_unfound, :uint64,
129
+ :num_objects_degraded, :uint64,
130
+ :num_rd, :uint64,
131
+ :num_rd_kb, :uint64,
132
+ :num_wr, :uint64,
133
+ :num_wr_kb, :uint64
134
+
135
+ def to_hash
136
+ return {} if members.empty?
137
+ Hash[* members.collect { |m| [m, self[m]] }.flatten!]
138
+ end
139
+ end
140
+
141
+ def self.version
142
+ major = FFI::MemoryPointer.new(:int)
143
+ minor = FFI::MemoryPointer.new(:int)
144
+ extra = FFI::MemoryPointer.new(:int)
145
+ rados_version(major, minor, extra)
146
+ {
147
+ major: major.get_int(0),
148
+ minor: minor.get_int(0),
149
+ extra: extra.get_int(0)
150
+ }
151
+ end
152
+
153
+ def self.version_string
154
+ "#{version[:major]}.#{version[:minor]}.#{version[:extra]}"
155
+ end
156
+ end
157
+ end
158
+ end