ceph-ruby 1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: 86b36cd836036d69386f3b989e1dba9fb2dd0020
4
+ data.tar.gz: 2599313fda868e6d09918be709afcbf3262856c4
5
+ SHA512:
6
+ metadata.gz: 1675940967805d52134ceaf366145a8ea68beba04d61dcb761e7b80ae020982c789c844dea342730272326fbc26a3bed9ac423f7ed4865e9469ed33d367fde3a
7
+ data.tar.gz: ba9dbcedb6df0c3c3109a11523cb2ac74edf73bc9111572a9ebc5c6383954797aa44c11e46c0e3829db4fce1a17f1a79c4b03f4029c9bd5503e1c4fd6fb8281d
data/.gitignore ADDED
@@ -0,0 +1,17 @@
1
+ *.gem
2
+ *.rbc
3
+ .bundle
4
+ .config
5
+ .yardoc
6
+ Gemfile.lock
7
+ InstalledFiles
8
+ _yardoc
9
+ coverage
10
+ doc/
11
+ lib/bundler/man
12
+ pkg
13
+ rdoc
14
+ spec/reports
15
+ test/tmp
16
+ test/version_tmp
17
+ tmp
data/Gemfile ADDED
@@ -0,0 +1,4 @@
1
+ source 'https://rubygems.org'
2
+
3
+ # Specify your gem's dependencies in ceph-ruby.gemspec
4
+ gemspec
data/LICENSE.txt ADDED
@@ -0,0 +1,22 @@
1
+ Copyright (c) 2012 - 2013 Netskin GmbH
2
+
3
+ MIT License
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining
6
+ a copy of this software and associated documentation files (the
7
+ "Software"), to deal in the Software without restriction, including
8
+ without limitation the rights to use, copy, modify, merge, publish,
9
+ distribute, sublicense, and/or sell copies of the Software, and to
10
+ permit persons to whom the Software is furnished to do so, subject to
11
+ the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be
14
+ included in all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,70 @@
1
+ # Ceph::Ruby
2
+
3
+ Easy management of Ceph Distributed Storage System (rbd, images, rados objects) using ruby.
4
+
5
+
6
+ ## Installation
7
+
8
+ Add this line to your application's Gemfile:
9
+
10
+ gem 'ceph-ruby'
11
+
12
+ And then execute:
13
+
14
+ $ bundle
15
+
16
+ Or install it yourself as:
17
+
18
+ $ gem install ceph-ruby
19
+
20
+
21
+ ## Usage
22
+
23
+ require "ceph-ruby"
24
+
25
+ # version information
26
+ puts CephRuby::Lib::Rados.version_string
27
+ puts CephRuby::Lib::Rbd.version_string
28
+
29
+ # connect to cluster and open a pool
30
+ cluster = CephRuby::Cluster.new
31
+ pool = cluster.pool("my-pool-xyz")
32
+ pool.open
33
+
34
+ # simple example for using rados objects
35
+ object = pool.rados_object("my-object-xyz")
36
+ object.write(0, "This is a Test!")
37
+ puts object.size
38
+
39
+ # simple example for using rbd images
40
+ image = pool.rados_block_device("my-image-xyz")
41
+ puts image.exists?
42
+ image.create(10.gigabytes)
43
+ puts image.exists?
44
+ puts image.size
45
+ image.write(0, "This is a Test!")
46
+ pp image.stat
47
+ image.close
48
+
49
+ # clean up
50
+ pool.close
51
+ cluster.shutdown
52
+
53
+
54
+ ## Known bugs
55
+
56
+ * Many features provided by ceph are not implemented yet. Please contribute!
57
+
58
+
59
+ ## Contributing
60
+
61
+ 1. Fork it
62
+ 2. Create your feature branch (`git checkout -b my-new-feature`)
63
+ 3. Commit your changes (`git commit -am 'Add some feature'`)
64
+ 4. Push to the branch (`git push origin my-new-feature`)
65
+ 5. Create new Pull Request
66
+
67
+
68
+ ## Copyright
69
+
70
+ Copyright (c) 2012 - 2013 [Netskin GmbH](http://www.netskin.com). Released unter the MIT license.
data/Rakefile ADDED
@@ -0,0 +1 @@
1
+ require "bundler/gem_tasks"
data/ceph-ruby.gemspec ADDED
@@ -0,0 +1,22 @@
1
+ # -*- encoding: utf-8 -*-
2
+ lib = File.expand_path('../lib', __FILE__)
3
+ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
+ require 'ceph-ruby/version'
5
+
6
+ Gem::Specification.new do |gem|
7
+ gem.name = "ceph-ruby"
8
+ gem.version = CephRuby::VERSION
9
+ gem.authors = ["Netskin GmbH", "Corin Langosch"]
10
+ gem.email = ["info@netskin.com", "info@corinlangosch.com"]
11
+ gem.description = %q{Easy management of Ceph}
12
+ gem.summary = %q{Easy management of Ceph Distributed Storage System using ruby}
13
+ gem.license = "MIT"
14
+
15
+ gem.files = `git ls-files`.split($/)
16
+ gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
17
+ gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
18
+ gem.require_paths = ["lib"]
19
+
20
+ gem.add_dependency('ffi', '~> 1.1.5')
21
+ gem.add_dependency('activesupport', '>= 3.0.0')
22
+ end
data/lib/ceph-ruby.rb ADDED
@@ -0,0 +1,22 @@
1
+ require "active_support/core_ext/module/delegation"
2
+ require "active_support/core_ext/module/attribute_accessors"
3
+
4
+ require "ffi"
5
+
6
+ require "ceph-ruby/lib/rados"
7
+ require "ceph-ruby/lib/rbd"
8
+
9
+ require "ceph-ruby/version"
10
+ require "ceph-ruby/cluster"
11
+ require "ceph-ruby/pool"
12
+ require "ceph-ruby/rados_block_device"
13
+ require "ceph-ruby/rados_object"
14
+
15
+ module CephRuby
16
+ mattr_accessor :logger
17
+
18
+ def self.log(message)
19
+ return unless logger
20
+ logger.info("CephRuby: #{message}")
21
+ end
22
+ end
@@ -0,0 +1,52 @@
1
+ module CephRuby
2
+ class Cluster
3
+ attr_accessor :handle
4
+
5
+ def initialize(config_path = "/etc/ceph/ceph.conf")
6
+ log("init lib rados #{Lib::Rados.version_string}, lib rbd #{Lib::Rbd.version_string}")
7
+
8
+ handle_p = FFI::MemoryPointer.new(:pointer)
9
+ ret = Lib::Rados.rados_create(handle_p, nil)
10
+ raise SystemCallError.new("open of cluster failed", -ret) if ret < 0
11
+ self.handle = handle_p.get_pointer(0)
12
+
13
+ setup_using_file(config_path)
14
+
15
+ connect
16
+
17
+ if block_given?
18
+ yield(self)
19
+ shutdown
20
+ end
21
+ end
22
+
23
+ def shutdown
24
+ return unless handle
25
+ log("shutdown")
26
+ Lib::Rados.rados_shutdown(handle)
27
+ self.handle = nil
28
+ end
29
+
30
+ def pool(name, &block)
31
+ Pool.new(self, name, &block)
32
+ end
33
+
34
+ # helper methods below
35
+
36
+ def connect
37
+ log("connect")
38
+ ret = Lib::Rados.rados_connect(handle)
39
+ raise SystemCallError.new("connect to cluster failed", -ret) if ret < 0
40
+ end
41
+
42
+ def setup_using_file(path)
43
+ log("setup_using_file #{path}")
44
+ ret = Lib::Rados.rados_conf_read_file(handle, path)
45
+ raise SystemCallError.new("setup of cluster from config file '#{path}' failed", -ret) if ret < 0
46
+ end
47
+
48
+ def log(message)
49
+ CephRuby.log("cluster #{message}")
50
+ end
51
+ end
52
+ end
@@ -0,0 +1,47 @@
1
+ require "ffi"
2
+
3
+ # see https://github.com/ceph/ceph/blob/v0.48.2argonaut/src/pybind/rados.py
4
+
5
+ module CephRuby
6
+ module Lib
7
+ module Rados
8
+ extend FFI::Library
9
+
10
+ ffi_lib ['rados', 'librados.so.2']
11
+
12
+ attach_function 'rados_version', [:pointer, :pointer, :pointer], :void
13
+
14
+ attach_function 'rados_create', [:pointer, :string], :int
15
+ attach_function 'rados_connect', [:pointer], :int
16
+ attach_function 'rados_conf_read_file', [:pointer, :string], :int
17
+ attach_function 'rados_shutdown', [:pointer], :void
18
+
19
+ attach_function 'rados_pool_lookup', [:pointer, :string], :int
20
+
21
+ attach_function 'rados_ioctx_create', [:pointer, :string, :pointer], :int
22
+ attach_function 'rados_ioctx_destroy', [:pointer], :void
23
+
24
+ attach_function 'rados_write', [:pointer, :string, :buffer_in, :size_t, :off_t], :int
25
+ attach_function 'rados_read', [:pointer, :string, :buffer_out, :size_t, :off_t], :int
26
+ attach_function 'rados_remove', [:pointer, :string], :int
27
+ attach_function 'rados_trunc', [:pointer, :string, :size_t], :int
28
+ attach_function 'rados_stat', [:pointer, :string, :pointer, :pointer], :int
29
+
30
+ def self.version
31
+ major = FFI::MemoryPointer.new(:int)
32
+ minor= FFI::MemoryPointer.new(:int)
33
+ extra = FFI::MemoryPointer.new(:int)
34
+ rados_version(major, minor, extra)
35
+ {
36
+ :major => major.get_int(0),
37
+ :minor => minor.get_int(0),
38
+ :extra => extra.get_int(0),
39
+ }
40
+ end
41
+
42
+ def self.version_string
43
+ "#{version[:major]}.#{version[:minor]}.#{version[:extra]}"
44
+ end
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,55 @@
1
+ require "ffi"
2
+
3
+ # see https://github.com/ceph/ceph/blob/v0.48.2argonaut/src/pybind/rbd.py
4
+
5
+ module CephRuby
6
+ module Lib
7
+ module Rbd
8
+ extend FFI::Library
9
+
10
+ ffi_lib ['rbd', 'librbd.so.1']
11
+
12
+ attach_function 'rbd_version', [:pointer, :pointer, :pointer], :void
13
+
14
+ attach_function 'rbd_create2', [:pointer, :string, :size_t, :uint64, :pointer], :int
15
+ attach_function 'rbd_remove', [:pointer, :string], :int
16
+
17
+ attach_function 'rbd_open', [:pointer, :string, :pointer, :string], :int
18
+ attach_function 'rbd_close', [:pointer], :void
19
+
20
+ attach_function 'rbd_write', [:pointer, :off_t, :size_t, :buffer_in], :int
21
+ attach_function 'rbd_read', [:pointer, :off_t, :size_t, :buffer_out], :int
22
+ attach_function 'rbd_stat', [:pointer, :pointer, :size_t], :int
23
+ attach_function 'rbd_resize', [:pointer, :size_t], :int
24
+
25
+ attach_function 'rbd_copy', [:pointer, :pointer, :string], :int
26
+ attach_function 'rbd_copy_with_progress', [:pointer, :pointer, :string, :pointer, :pointer], :int
27
+
28
+ class StatStruct < FFI::Struct
29
+ layout :size, :uint64,
30
+ :obj_size, :uint64,
31
+ :num_objs, :uint64,
32
+ :order, :int,
33
+ :block_name_prefix, [:char, 24],
34
+ :parent_pool, :int, # deprecated
35
+ :parent_name, [:char, 96] # deprecated
36
+ end
37
+
38
+ def self.version
39
+ major = FFI::MemoryPointer.new(:int)
40
+ minor= FFI::MemoryPointer.new(:int)
41
+ extra = FFI::MemoryPointer.new(:int)
42
+ rbd_version(major, minor, extra)
43
+ {
44
+ :major => major.get_int(0),
45
+ :minor => minor.get_int(0),
46
+ :extra => extra.get_int(0),
47
+ }
48
+ end
49
+
50
+ def self.version_string
51
+ "#{version[:major]}.#{version[:minor]}.#{version[:extra]}"
52
+ end
53
+ end
54
+ end
55
+ end
@@ -0,0 +1,63 @@
1
+ module CephRuby
2
+ class Pool
3
+ attr_accessor :cluster, :name, :handle
4
+
5
+ def initialize(cluster, name)
6
+ self.cluster = cluster
7
+ self.name = name
8
+ if block_given?
9
+ yield(self)
10
+ close
11
+ end
12
+ end
13
+
14
+ def exists?
15
+ log("exists?")
16
+ ret = Lib::Rados.rados_pool_lookup(cluster.handle, name)
17
+ return true if ret >= 0
18
+ return false if ret == -Errno::ENOENT::Errno
19
+ raise SystemCallError.new("lookup of '#{name}' failed", -ret) if ret < 0
20
+ end
21
+
22
+ def open
23
+ return if open?
24
+ log("open")
25
+ handle_p = FFI::MemoryPointer.new(:pointer)
26
+ ret = Lib::Rados.rados_ioctx_create(cluster.handle, name, handle_p)
27
+ raise SystemCallError.new("creation of io context for '#{name}' failed", -ret) if ret < 0
28
+ self.handle = handle_p.get_pointer(0)
29
+ end
30
+
31
+ def close
32
+ return unless open?
33
+ log("close")
34
+ Lib::Rados.rados_ioctx_destroy(handle)
35
+ self.handle = nil
36
+ end
37
+
38
+ def rados_object(name, &block)
39
+ ensure_open
40
+ RadosObject.new(self, name, &block)
41
+ end
42
+
43
+ def rados_block_device(name, &block)
44
+ ensure_open
45
+ RadosBlockDevice.new(self, name, &block)
46
+ end
47
+
48
+ # helper methods below
49
+
50
+ def open?
51
+ !!handle
52
+ end
53
+
54
+ def ensure_open
55
+ return if open?
56
+ open
57
+ end
58
+
59
+ def log(message)
60
+ CephRuby.log("pool #{name} #{message}")
61
+ end
62
+ end
63
+ end
@@ -0,0 +1,132 @@
1
+ module CephRuby
2
+ class RadosBlockDevice
3
+ attr_accessor :pool, :name, :handle
4
+
5
+ delegate :cluster, :to => :pool
6
+
7
+ def initialize(pool, name)
8
+ self.pool = pool
9
+ self.name = name
10
+ if block_given?
11
+ yield(self)
12
+ close
13
+ end
14
+ end
15
+
16
+ def exists?
17
+ log("exists?")
18
+ handle_p = FFI::MemoryPointer.new(:pointer)
19
+ ret = Lib::Rbd.rbd_open(pool.handle, name, handle_p, nil)
20
+ case ret
21
+ when 0
22
+ handle = handle_p.get_pointer(0)
23
+ Lib::Rbd.rbd_close(handle)
24
+ true
25
+ when -Errno::ENOENT::Errno
26
+ false
27
+ else
28
+ raise SystemCallError.new("open of '#{name}' failed", -ret) if ret < 0
29
+ end
30
+ end
31
+
32
+ def create(size, features = 0, order = 26)
33
+ log("create size #{size}, features #{features}, order #{order}")
34
+ order_p = FFI::MemoryPointer.new(:int)
35
+ order_p.put_int(0, order)
36
+ ret = Lib::Rbd.rbd_create2(pool.handle, name, size, features, order_p)
37
+ raise SystemCallError.new("creation of '#{name}' failed", -ret) if ret < 0
38
+ end
39
+
40
+ def open
41
+ return if open?
42
+ log("open")
43
+ handle_p = FFI::MemoryPointer.new(:pointer)
44
+ ret = Lib::Rbd.rbd_open(pool.handle, name, handle_p, nil)
45
+ raise SystemCallError.new("open of '#{name}' failed", -ret) if ret < 0
46
+ self.handle = handle_p.get_pointer(0)
47
+ end
48
+
49
+ def close
50
+ return unless open?
51
+ log("close")
52
+ Lib::Rbd.rbd_close(handle)
53
+ self.handle = nil
54
+ end
55
+
56
+ def destroy
57
+ close if open?
58
+ log("destroy")
59
+ ret = Lib::Rbd.rbd_remove(pool.handle, name)
60
+ raise SystemCallError.new("destroy of '#{name}' failed", -ret) if ret < 0
61
+ end
62
+
63
+ def write(offset, data)
64
+ ensure_open
65
+ size = data.bytesize
66
+ log("write offset #{offset}, size #{size}")
67
+ ret = Lib::Rbd.rbd_write(handle, offset, size, data)
68
+ raise SystemCallError.new("write of #{size} bytes to '#{name}' at #{offset} failed", -ret) if ret < 0
69
+ raise Errno::EIO.new("wrote only #{ret} of #{size} bytes to '#{name}' at #{offset}") if ret < size
70
+ end
71
+
72
+ def read(offset, size)
73
+ ensure_open
74
+ log("read offset #{offset}, size #{size}")
75
+ data_p = FFI::MemoryPointer.new(:char, size)
76
+ ret = Lib::Rbd.rbd_read(handle, offset, size, data_p)
77
+ raise SystemCallError.new("read of #{size} bytes from '#{name}' at #{offset} failed", -ret) if ret < 0
78
+ data_p.get_bytes(0, ret)
79
+ end
80
+
81
+ def stat
82
+ ensure_open
83
+ log("stat")
84
+ stat = Lib::Rbd::StatStruct.new
85
+ ret = Lib::Rbd.rbd_stat(handle, stat, stat.size)
86
+ raise SystemCallError.new("stat of '#{name}' failed", -ret) if ret < 0
87
+ Hash[[:size, :obj_size, :num_objs, :order].map{ |k| [k, stat[k]] }].tap do |hash|
88
+ hash[:block_name_prefix] = stat[:block_name_prefix].to_ptr.read_string
89
+ end
90
+ end
91
+
92
+ def resize(size)
93
+ ensure_open
94
+ log("resize size #{size}")
95
+ ret = Lib::Rbd.rbd_resize(handle, size)
96
+ raise SystemCallError.new("resize of '#{name}' to #{size} failed", -ret) if ret < 0
97
+ end
98
+
99
+ def size
100
+ stat[:size]
101
+ end
102
+
103
+ def copy_to(dst_name, dst_pool = nil)
104
+ ensure_open
105
+ case dst_pool
106
+ when String
107
+ dst_pool = cluster.pool(dst_pool)
108
+ when nil
109
+ dst_pool = pool
110
+ end
111
+ dst_pool.ensure_open
112
+ log("copy_to #{dst_pool.name}/#{dst_name}")
113
+ ret = Lib::Rbd.rbd_copy(handle, dst_pool.handle, dst_name)
114
+ raise SystemCallError.new("copy of '#{name}' to '#{dst_pool.name}/#{dst_name}' failed", -ret) if ret < 0
115
+ end
116
+
117
+ # helper methods below
118
+
119
+ def open?
120
+ !!handle
121
+ end
122
+
123
+ def ensure_open
124
+ return if open?
125
+ open
126
+ end
127
+
128
+ def log(message)
129
+ CephRuby.log("rbd image #{pool.name}/#{name} #{message}")
130
+ end
131
+ end
132
+ end
@@ -0,0 +1,71 @@
1
+ module CephRuby
2
+ class RadosObject
3
+ attr_accessor :pool, :name
4
+
5
+ def initialize(pool, name)
6
+ self.pool = pool
7
+ self.name = name
8
+ if block_given?
9
+ yield(self)
10
+ end
11
+ end
12
+
13
+ def exists?
14
+ log("exists?")
15
+ !!stat
16
+ rescue SystemCallError => e
17
+ return false if e.errno == Errno::ENOENT::Errno
18
+ raise e
19
+ end
20
+
21
+ def write(offset, data)
22
+ size = data.bytesize
23
+ log("write offset #{offset}, size #{size}")
24
+ ret = Lib::Rados.rados_write(pool.handle, name, data, size, offset)
25
+ raise SystemCallError.new("write of #{size} bytes to '#{name}' at #{offset} failed", -ret) if ret < 0
26
+ raise Errno::EIO.new("wrote only #{ret} of #{size} bytes to '#{name}' at #{offset}") if ret < size
27
+ end
28
+
29
+ def read(offset, size)
30
+ log("read offset #{offset}, size #{size}")
31
+ data_p = FFI::MemoryPointer.new(:char, size)
32
+ ret = Lib::Rados.rados_read(pool.handle, name, data_p, size, offset)
33
+ raise SystemCallError.new("read of #{size} bytes from '#{name}' at #{offset} failed", -ret) if ret < 0
34
+ data_p.get_bytes(0, ret)
35
+ end
36
+
37
+ def destroy
38
+ log("destroy")
39
+ ret = Lib::Rados.rados_remove(pool.handle, name)
40
+ raise SystemCallError.new("destroy of '#{name}' failed", -ret) if ret < 0
41
+ end
42
+
43
+ def resize(size)
44
+ log("resize size #{size}")
45
+ ret = Lib::Rados.rados_trunc(pool.handle, name, size)
46
+ raise SystemCallError.new("resize of '#{name}' to #{size} failed", -ret) if ret < 0
47
+ end
48
+
49
+ def stat
50
+ log("stat")
51
+ size_p = FFI::MemoryPointer.new(:uint64)
52
+ mtime_p = FFI::MemoryPointer.new(:uint64)
53
+ ret = Lib::Rados.rados_stat(pool.handle, name, size_p, mtime_p)
54
+ raise SystemCallError.new("stat of '#{name}' failed", -ret) if ret < 0
55
+ {
56
+ :size => size_p.get_uint64(0),
57
+ :mtime => Time.at(mtime_p.get_uint64(0)),
58
+ }
59
+ end
60
+
61
+ def size
62
+ stat[:size]
63
+ end
64
+
65
+ # helper methods below
66
+
67
+ def log(message)
68
+ CephRuby.log("rados object #{pool.name}/#{name} #{message}")
69
+ end
70
+ end
71
+ end
@@ -0,0 +1,3 @@
1
+ module CephRuby
2
+ VERSION = "1.0"
3
+ end
metadata ADDED
@@ -0,0 +1,88 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: ceph-ruby
3
+ version: !ruby/object:Gem::Version
4
+ version: '1.0'
5
+ platform: ruby
6
+ authors:
7
+ - Netskin GmbH
8
+ - Corin Langosch
9
+ autorequire:
10
+ bindir: bin
11
+ cert_chain: []
12
+ date: 2013-12-20 00:00:00.000000000 Z
13
+ dependencies:
14
+ - !ruby/object:Gem::Dependency
15
+ name: ffi
16
+ requirement: !ruby/object:Gem::Requirement
17
+ requirements:
18
+ - - ~>
19
+ - !ruby/object:Gem::Version
20
+ version: 1.1.5
21
+ type: :runtime
22
+ prerelease: false
23
+ version_requirements: !ruby/object:Gem::Requirement
24
+ requirements:
25
+ - - ~>
26
+ - !ruby/object:Gem::Version
27
+ version: 1.1.5
28
+ - !ruby/object:Gem::Dependency
29
+ name: activesupport
30
+ requirement: !ruby/object:Gem::Requirement
31
+ requirements:
32
+ - - '>='
33
+ - !ruby/object:Gem::Version
34
+ version: 3.0.0
35
+ type: :runtime
36
+ prerelease: false
37
+ version_requirements: !ruby/object:Gem::Requirement
38
+ requirements:
39
+ - - '>='
40
+ - !ruby/object:Gem::Version
41
+ version: 3.0.0
42
+ description: Easy management of Ceph
43
+ email:
44
+ - info@netskin.com
45
+ - info@corinlangosch.com
46
+ executables: []
47
+ extensions: []
48
+ extra_rdoc_files: []
49
+ files:
50
+ - .gitignore
51
+ - Gemfile
52
+ - LICENSE.txt
53
+ - README.md
54
+ - Rakefile
55
+ - ceph-ruby.gemspec
56
+ - lib/ceph-ruby.rb
57
+ - lib/ceph-ruby/cluster.rb
58
+ - lib/ceph-ruby/lib/rados.rb
59
+ - lib/ceph-ruby/lib/rbd.rb
60
+ - lib/ceph-ruby/pool.rb
61
+ - lib/ceph-ruby/rados_block_device.rb
62
+ - lib/ceph-ruby/rados_object.rb
63
+ - lib/ceph-ruby/version.rb
64
+ homepage:
65
+ licenses:
66
+ - MIT
67
+ metadata: {}
68
+ post_install_message:
69
+ rdoc_options: []
70
+ require_paths:
71
+ - lib
72
+ required_ruby_version: !ruby/object:Gem::Requirement
73
+ requirements:
74
+ - - '>='
75
+ - !ruby/object:Gem::Version
76
+ version: '0'
77
+ required_rubygems_version: !ruby/object:Gem::Requirement
78
+ requirements:
79
+ - - '>='
80
+ - !ruby/object:Gem::Version
81
+ version: '0'
82
+ requirements: []
83
+ rubyforge_project:
84
+ rubygems_version: 2.1.11
85
+ signing_key:
86
+ specification_version: 4
87
+ summary: Easy management of Ceph Distributed Storage System using ruby
88
+ test_files: []