process_shared 0.0.3 → 0.0.4

Sign up to get free protection for your applications and to get access to all the features.
data/README.rdoc CHANGED
@@ -59,12 +59,35 @@ Install the gem with:
59
59
 
60
60
  puts "value should be zero: #{mem.get_int(0)}"
61
61
 
62
+ == Transfer Objects Across Processes
63
+
64
+ # allocate a sufficient memory block
65
+ mem = ProcessShared::SharedMemory.new(1024)
66
+
67
+ # sub process can write (serialize) object to memory (with bounds checking)
68
+ pid = fork do
69
+ mem.write_object(['a', 'b'])
70
+ end
71
+
72
+ Process.wait(pid)
73
+
74
+ # parent process can read the object back (synchronizing access
75
+ # with a Mutex left as an excercie to reader)
76
+
77
+ mem.read_object.must_equal ['a', 'b']
78
+
62
79
  == Todo
63
80
 
64
- * Implement ConditionVariable
81
+ * Test ConditionVariable
65
82
  * Implement optional override of core Thread/Mutex classes
66
83
  * Extend libpsem to win32? (See Python's processing library)
67
84
  * Break out tests that use PSem.getvalue() (which isn't supported on Mac OS X)
68
85
  so that the test suite will pass
69
86
  * Add finalizer to Mutex? (finalizer on Semaphore objects may be enough) or a method to
70
- explicitly close and release resources?
87
+ explicitly close and release resources?
88
+ * Test semantics of crashing processes who still hold locks, etc.
89
+ * Is SharedArray with Enumerable mixing sufficient Array-like interface?
90
+ * Remove bsem from libpsem as it is of little use and doesn't work on Mac OS X
91
+ * Possibly implement BoundedSemaphore with arbitrary bound (in Ruby
92
+ rather than relying on sem_getvalue()), but this is of little
93
+ utility beyond extra error checking..
@@ -68,7 +68,6 @@ psem_free(psem_t *psem) {
68
68
  error_new((err), E_SOURCE_SYSTEM, errno); \
69
69
  return ERROR; \
70
70
  } \
71
- return OK; \
72
71
  } while (0)
73
72
 
74
73
  #define errcheck(expr, err) errcheck_val((expr), -1, (err))
@@ -79,52 +78,83 @@ psem_open(psem_t *psem, const char *name, unsigned int value, error_t **err)
79
78
  errcheck_val(psem->sem = sem_open(name, O_CREAT | O_EXCL, 0600, value),
80
79
  SEM_FAILED,
81
80
  err);
81
+ return OK;
82
82
  }
83
83
 
84
84
  int
85
85
  psem_close(psem_t *psem, error_t **err)
86
86
  {
87
87
  errcheck(sem_close(psem->sem), err);
88
+ return OK;
88
89
  }
89
90
 
90
91
  int
91
92
  psem_unlink(const char *name, error_t **err)
92
93
  {
93
94
  errcheck(sem_unlink(name), err);
95
+ return OK;
94
96
  }
95
97
 
96
98
  int
97
99
  psem_post(psem_t *psem, error_t **err)
98
100
  {
99
101
  errcheck(sem_post(psem->sem), err);
102
+ return OK;
100
103
  }
101
104
 
102
105
  int
103
106
  psem_wait(psem_t *psem, error_t **err)
104
107
  {
105
108
  errcheck(sem_wait(psem->sem), err);
109
+ return OK;
106
110
  }
107
111
 
108
112
  int
109
113
  psem_trywait(psem_t *psem, error_t **err)
110
114
  {
111
115
  errcheck(sem_trywait(psem->sem), err);
116
+ return OK;
112
117
  }
113
118
 
119
+ #define NS_PER_S (1000 * 1000 * 1000)
120
+ #define US_PER_NS (1000)
121
+ #define TV_NSEC_MAX (NS_PER_S - 1)
122
+
114
123
  int
115
124
  psem_timedwait(psem_t *psem, float timeout_s, error_t **err)
116
125
  {
126
+ struct timeval now;
117
127
  struct timespec abs_timeout;
118
128
 
119
- abs_timeout.tv_sec = floorf(timeout_s);
120
- abs_timeout.tv_nsec =
121
- floorf((timeout_s - abs_timeout.tv_sec) * (1000 * 1000 * 1000));
129
+ errcheck(gettimeofday(&now, NULL), err);
130
+ abs_timeout.tv_sec = now.tv_sec;
131
+ abs_timeout.tv_nsec = now.tv_usec * US_PER_NS;
132
+
133
+ /* Fun with rounding: careful adding reltive timeout to abs time */
134
+ {
135
+ time_t sec; /* relative timeout */
136
+ long nsec;
137
+
138
+ sec = floorf(timeout_s);
139
+ nsec = floorf((timeout_s - floorf(timeout_s)) * NS_PER_S);
140
+
141
+ abs_timeout.tv_sec += sec;
142
+ abs_timeout.tv_nsec += nsec;
143
+
144
+ while (abs_timeout.tv_nsec > TV_NSEC_MAX) {
145
+ abs_timeout.tv_sec += 1;
146
+ abs_timeout.tv_nsec -= NS_PER_S;
147
+ }
148
+ }
122
149
 
123
150
  errcheck(sem_timedwait(psem->sem, &abs_timeout), err);
151
+ return OK;
124
152
  }
125
153
 
126
154
  int
127
155
  psem_getvalue(psem_t *psem, int *sval, error_t **err)
128
156
  {
129
157
  errcheck(sem_getvalue(psem->sem, sval), err);
158
+ return OK;
130
159
  }
160
+
@@ -8,23 +8,43 @@ module ProcessShared
8
8
  include ProcessShared::PSem
9
9
  public
10
10
 
11
- # Generate a name for a semaphore.
11
+ # Generate a name for a semaphore. If +name+ is given, it is used
12
+ # as the name (and so a semaphore could be shared by arbitrary
13
+ # processes not forked from one another). Otherwise, a name is
14
+ # generated containing +middle+ and the process id.
15
+ #
16
+ # @param [String] middle arbitrary string used in the middle
17
+ # @param [String] name if given, used as the name
18
+ # @return [String] name, or the generated name
12
19
  def self.gen_name(middle, name = nil)
13
20
  if name
14
21
  name
15
22
  else
16
23
  @count ||= 0
17
24
  @count += 1
18
- "ps-#{middle}-#{Process.pid}-#{@count}"
25
+ "ps-#{middle}-#{::Process.pid}-#{@count}"
19
26
  end
20
27
  end
21
28
 
29
+ # Make a Proc suitable for use as a finalizer that will call
30
+ # +psem_unlink+ on +name+ and ignore system errors.
31
+ #
32
+ # @return [Proc] a finalizer
22
33
  def self.make_finalizer(name)
23
34
  proc { ProcessShared::PSem.psem_unlink(name, nil) }
24
35
  end
25
36
 
26
37
  # private_class_method :new
27
38
 
39
+ def synchronize
40
+ wait
41
+ begin
42
+ yield
43
+ ensure
44
+ post
45
+ end
46
+ end
47
+
28
48
  protected
29
49
 
30
50
  attr_reader :sem, :err
@@ -0,0 +1,48 @@
1
+ require 'process_shared/psem'
2
+ require 'process_shared/semaphore'
3
+ require 'process_shared/process_error'
4
+
5
+ module ProcessShared
6
+ # BinarySemaphore is identical to Semaphore except that its value is
7
+ # not permitted to rise above one (it may be either zero or one).
8
+ # When the value is at the maximum, calls to #post will raise an
9
+ # exception.
10
+ #
11
+ # This is identical to a Semaphore but with extra error checking.
12
+ class BinarySemaphore < Semaphore
13
+ # Create a new semaphore with initial value +value+. After
14
+ # {Kernel#fork}, the semaphore will be shared across two (or more)
15
+ # processes. The semaphore must be closed with {#close} in each
16
+ # process that no longer needs the semaphore.
17
+ #
18
+ # (An object finalizer is registered that will close the semaphore
19
+ # to avoid memory leaks, but this should be considered a last
20
+ # resort).
21
+ #
22
+ # @param [Integer] value the initial semaphore value
23
+ # @param [String] name not currently supported
24
+ def initialize(value = 1, name = nil)
25
+ raise ArgumentErrror 'value must be 0 or 1' if (value < 0 or value > 1)
26
+ super(value, name)
27
+ end
28
+
29
+ # Increment from zero to one.
30
+ #
31
+ # First, attempt to decrement. If this fails with EAGAIN, the
32
+ # semaphore was at zero, so continue with the post. If this
33
+ # succeeds, the semaphore was not at zero, so increment back to
34
+ # one and raise {ProcesError} (multiple workers may have acquired
35
+ # the semaphore at this point).
36
+ def post
37
+ begin
38
+ try_wait
39
+ # oops, value was not zero...
40
+ psem_post(sem, err)
41
+ raise ProcessError, 'post would raise value over bound'
42
+ rescue Errno::EAGAIN
43
+ # ok, value was zero
44
+ psem_post(sem, err)
45
+ end
46
+ end
47
+ end
48
+ end
@@ -8,9 +8,9 @@ module ProcessShared
8
8
  class BoundedSemaphore < Semaphore
9
9
  # With no associated block, open is a synonym for
10
10
  # Semaphore.new. If the optional code block is given, it will be
11
- # passed `sem` as an argument, and the Semaphore object will
11
+ # passed +sem+ as an argument, and the Semaphore object will
12
12
  # automatically be closed when the block terminates. In this
13
- # instance, Semaphore.open returns the value of the block.
13
+ # instance, BoundedSemaphore.open returns the value of the block.
14
14
  #
15
15
  # @param [Integer] value the initial semaphore value
16
16
  # @param [String] name not currently supported
@@ -18,9 +18,9 @@ module ProcessShared
18
18
  new(maxvalue, value, name).with_self(&block)
19
19
  end
20
20
 
21
- # Create a new semaphore with initial value `value`. After
22
- # Kernel#fork, the semaphore will be shared across two (or more)
23
- # processes. The semaphore must be closed with #close in each
21
+ # Create a new semaphore with initial value +value+. After
22
+ # {Kernel#fork}, the semaphore will be shared across two (or more)
23
+ # processes. The semaphore must be closed with {#close} in each
24
24
  # process that no longer needs the semaphore.
25
25
  #
26
26
  # (An object finalizer is registered that will close the semaphore
@@ -1,14 +1,18 @@
1
1
  require 'process_shared/semaphore'
2
2
 
3
3
  module ProcessShared
4
- # TODO: implement this
5
4
  class ConditionVariable
6
5
  def initialize
7
- @sem = Semaphore.new
6
+ @internal = Semaphore.new(1)
7
+ @waiting = SharedMemory.new(:int)
8
+ @waiting.write_int(0)
9
+ @sem = Semaphore.new(0)
8
10
  end
9
11
 
10
12
  def broadcast
11
- @sem.post
13
+ @internal.synchronize do
14
+ @waiting.read_int.times { @sem.post }
15
+ end
12
16
  end
13
17
 
14
18
  def signal
@@ -18,10 +22,32 @@ module ProcessShared
18
22
  def wait(mutex, timeout = nil)
19
23
  mutex.unlock
20
24
  begin
21
- @sem.wait
25
+ inc_waiting
26
+ if timeout
27
+ begin
28
+ @sem.try_wait(timeout)
29
+ rescue Errno::EAGAIN, Errno::ETIMEDOUT
30
+ # success!
31
+ end
32
+ else
33
+ @sem.wait
34
+ end
35
+ dec_waiting
22
36
  ensure
23
37
  mutex.lock
24
38
  end
25
39
  end
40
+
41
+ private
42
+
43
+ def inc_waiting(val = 1)
44
+ @internal.synchronize do
45
+ @waiting.write_int(@waiting.read_int + val)
46
+ end
47
+ end
48
+
49
+ def dec_waiting
50
+ inc_waiting(-1)
51
+ end
26
52
  end
27
53
  end
Binary file
@@ -1,24 +1,24 @@
1
- require 'process_shared/bounded_semaphore'
1
+ require 'process_shared/semaphore'
2
2
  require 'process_shared/with_self'
3
3
  require 'process_shared/shared_memory'
4
4
  require 'process_shared/process_error'
5
5
 
6
6
  module ProcessShared
7
- # This Mutex class is implemented as a BoundedSemaphore with a
8
- # maximum value of 1. Additionally, the locking process is tracked,
9
- # and ProcessError is raised if either #unlock is called by a
10
- # process different from the locking process, or if #lock is called
11
- # while the process already holds the lock (i.e. the mutex is not
12
- # re-entrant). This tracking is not without performance cost, of
13
- # course (current implementation uses an additional BoundedSemaphore
14
- # and SharedMemory segment).
7
+ # This Mutex class is implemented as a Semaphore with a second
8
+ # internal Semaphore used to track the locking process is tracked.
9
+ # {ProcessError} is raised if either {#unlock} is called by a
10
+ # process different from the locking process, or if {#lock} is
11
+ # called while the process already holds the lock (i.e. the mutex is
12
+ # not re-entrant). This tracking is not without performance cost,
13
+ # of course (current implementation uses the additional {Semaphore}
14
+ # and {SharedMemory} segment).
15
15
  #
16
- # The API is intended to be identical to the ::Mutex in the core
16
+ # The API is intended to be identical to the {::Mutex} in the core
17
17
  # Ruby library.
18
18
  #
19
19
  # TODO: the core Ruby api has no #close method, but this Mutex must
20
- # release its BoundedSemaphore and SharedMemory resources. For now,
21
- # we rely on the object finalizers of those objects...
20
+ # release its {Semaphore} and {SharedMemory} resources. For now,
21
+ # rely on the object finalizers of those objects...
22
22
  class Mutex
23
23
  # include WithSelf
24
24
 
@@ -27,10 +27,10 @@ module ProcessShared
27
27
  # end
28
28
 
29
29
  def initialize
30
- @internal_sem = BoundedSemaphore.new(1)
30
+ @internal_sem = Semaphore.new
31
31
  @locked_by = SharedMemory.new(:int)
32
32
 
33
- @sem = BoundedSemaphore.new(1)
33
+ @sem = Semaphore.new
34
34
  end
35
35
 
36
36
  # @return [Mutex]
@@ -68,7 +68,7 @@ module ProcessShared
68
68
  if @locked_by.get_int(0) > 0
69
69
  false # was locked
70
70
  else
71
- @sem.wait
71
+ @sem.wait # should return immediately
72
72
  self.locked_by = ::Process.pid
73
73
  true
74
74
  end
@@ -2,9 +2,9 @@
2
2
 
3
3
  module ProcessShared
4
4
  module PosixCall
5
- # Replace methods in `syms` with error checking wrappers that
6
- # invoke the original method and raise a SystemCallError with the
7
- # current errno if the return value is an error.
5
+ # Replace methods in +syms+ with error checking wrappers that
6
+ # invoke the original method and raise a {SystemCallError} with
7
+ # the current errno if the return value is an error.
8
8
  #
9
9
  # Errors are detected if the block returns true when called with
10
10
  # the original method's return value.
@@ -23,6 +23,9 @@ module ProcessShared
23
23
  # Replace methods in `syms` with error checking wrappers that
24
24
  # invoke the original psem method and raise an appropriate
25
25
  # error.
26
+ #
27
+ # The last argument is assumed to be a pointer to a pointer
28
+ # where either a psem error or NULL will be stored.
26
29
  def psem_error_check(*syms)
27
30
  syms.each do |sym|
28
31
  method = self.method(sym)
@@ -32,7 +35,8 @@ module ProcessShared
32
35
  errp = args[-1]
33
36
  unless errp.nil?
34
37
  begin
35
- err = Error.new(errp.get_pointer(0))
38
+ err = Error.new(errp.read_pointer)
39
+ errp.write_pointer(nil)
36
40
  if err[:source] == PSem.e_source_system
37
41
  raise SystemCallError.new("error in #{sym}", err[:errno])
38
42
  else
@@ -86,7 +90,7 @@ module ProcessShared
86
90
  attach_function :psem_post, [:pointer, :pointer], :int
87
91
  attach_function :psem_wait, [:pointer, :pointer], :int
88
92
  attach_function :psem_trywait, [:pointer, :pointer], :int
89
- attach_function :psem_timedwait, [:pointer, :pointer, :pointer], :int
93
+ attach_function :psem_timedwait, [:pointer, :float, :pointer], :int
90
94
  attach_function :psem_getvalue, [:pointer, :pointer, :pointer], :int
91
95
 
92
96
  psem_error_check(:psem_open, :psem_close, :psem_unlink, :psem_post,
@@ -100,7 +104,7 @@ module ProcessShared
100
104
  attach_function :bsem_post, [:pointer, :pointer], :int
101
105
  attach_function :bsem_wait, [:pointer, :pointer], :int
102
106
  attach_function :bsem_trywait, [:pointer, :pointer], :int
103
- attach_function :bsem_timedwait, [:pointer, :pointer, :pointer], :int
107
+ attach_function :bsem_timedwait, [:pointer, :float, :pointer], :int
104
108
  attach_function :bsem_getvalue, [:pointer, :pointer, :pointer], :int
105
109
 
106
110
  psem_error_check(:bsem_open, :bsem_close, :bsem_unlink, :bsem_post,
@@ -5,7 +5,7 @@ module ProcessShared
5
5
  class Semaphore < AbstractSemaphore
6
6
  # With no associated block, open is a synonym for
7
7
  # Semaphore.new. If the optional code block is given, it will be
8
- # passed `sem` as an argument, and the Semaphore object will
8
+ # passed +sem+ as an argument, and the Semaphore object will
9
9
  # automatically be closed when the block terminates. In this
10
10
  # instance, Semaphore.open returns the value of the block.
11
11
  #
@@ -15,7 +15,7 @@ module ProcessShared
15
15
  new(value, name).with_self(&block)
16
16
  end
17
17
 
18
- # Create a new semaphore with initial value `value`. After
18
+ # Create a new semaphore with initial value +value+. After
19
19
  # Kernel#fork, the semaphore will be shared across two (or more)
20
20
  # processes. The semaphore must be closed with #close in each
21
21
  # process that no longer needs the semaphore.
@@ -33,18 +33,36 @@ module ProcessShared
33
33
  end
34
34
 
35
35
  # Decrement the value of the semaphore. If the value is zero,
36
- # wait until another process increments via #post.
36
+ # wait until another process increments via {#post}.
37
37
  def wait
38
38
  psem_wait(sem, err)
39
39
  end
40
40
 
41
+ # Decrement the value of the semaphore if it can be done
42
+ # immediately (i.e. if it was non-zero). Otherwise, wait up to
43
+ # +timeout+ seconds until another process increments via {#post}.
44
+ #
45
+ # @param timeout [Numeric] the maximum seconds to wait, or nil to not wait
46
+ #
47
+ # @return If +timeout+ is nil and the semaphore cannot be
48
+ # decremented immediately, raise Errno::EAGAIN. If +timeout+
49
+ # passed before the semaphore could be decremented, raise
50
+ # Errno::ETIMEDOUT.
51
+ def try_wait(timeout = nil)
52
+ if timeout
53
+ psem_timedwait(sem, timeout, err)
54
+ else
55
+ psem_trywait(sem, err)
56
+ end
57
+ end
58
+
41
59
  # Increment the value of the semaphore. If other processes are
42
60
  # waiting on this semaphore, one will be woken.
43
61
  def post
44
62
  psem_post(sem, err)
45
63
  end
46
64
 
47
- # Get the current value of the semaphore. Raises Errno::NOTSUP on
65
+ # Get the current value of the semaphore. Raises {Errno::NOTSUP} on
48
66
  # platforms that don't support this (e.g. Mac OS X).
49
67
  #
50
68
  # @return [Integer] the current value of the semaphore.
@@ -55,7 +73,7 @@ module ProcessShared
55
73
  end
56
74
 
57
75
  # Release the resources associated with this semaphore. Calls to
58
- # other methods are undefined after #close has been called.
76
+ # other methods are undefined after {#close} has been called.
59
77
  #
60
78
  # Close must be called when the semaphore is no longer needed. An
61
79
  # object finalizer will close the semaphore as a last resort.
@@ -0,0 +1,69 @@
1
+ module ProcessShared
2
+ class SharedArray < SharedMemory
3
+ include Enumerable
4
+
5
+ # A fixed-size array in shared memory. Processes forked from this
6
+ # one will be able to read and write shared data to the array.
7
+ # Access should be synchronized using a {Mutex}, {Semaphore}, or
8
+ # other means.
9
+ #
10
+ # Note that {Enumerable} methods such as {#map}, {#sort},
11
+ # etc. return new {Array} objects rather than modifying the shared
12
+ # array.
13
+ #
14
+ # @param [Symbol] type_or_count the data type as a symbol
15
+ # understood by FFI (e.g. :int, :double)
16
+ #
17
+ # @param [Integer] count number of array elements
18
+ def initialize(type_or_count = 1, count = 1)
19
+ super(type_or_count, count)
20
+
21
+ # See https://github.com/ffi/ffi/issues/118
22
+ ffi_type = FFI.find_type(self.type)
23
+
24
+ name = if ffi_type.inspect =~ /FFI::Type::Builtin:(\w+)*/
25
+ # name will be something like int32
26
+ $1.downcase
27
+ end
28
+
29
+ unless name
30
+ raise ArgumentError, "could not find FFI::Type for #{self.type}"
31
+ end
32
+
33
+ getter = "get_#{name}"
34
+ setter = "put_#{name}"
35
+
36
+ # singleton class
37
+ sclass = class << self; self; end
38
+
39
+ unless sclass.method_defined?(getter)
40
+ raise ArgumentError, "no element getter for #{self.type} (#{getter})"
41
+ end
42
+
43
+ unless sclass.method_defined?(setter)
44
+ raise ArgumentError, "no element setter for #{self.type} (#{setter})"
45
+ end
46
+
47
+ sclass.send(:alias_method, :get_type, getter)
48
+ sclass.send(:alias_method, :put_type, setter)
49
+ end
50
+
51
+ def each
52
+ # NOTE: using @count because Enumerable defines its own count
53
+ # method...
54
+ @count.times { |i| yield self[i] }
55
+ end
56
+
57
+ def each_with_index
58
+ @count.times { |i| yield self[i], i }
59
+ end
60
+
61
+ def [](i)
62
+ get_type(i * self.type_size)
63
+ end
64
+
65
+ def []=(i, val)
66
+ put_type(i * self.type_size, val)
67
+ end
68
+ end
69
+ end
@@ -1,25 +1,37 @@
1
1
  require 'process_shared/rt'
2
2
  require 'process_shared/libc'
3
3
  require 'process_shared/with_self'
4
+ require 'process_shared/shared_memory_io'
4
5
 
5
6
  module ProcessShared
6
- # Memory block shared across processes. TODO: finalizer that closes...
7
+ # Memory block shared across processes.
7
8
  class SharedMemory < FFI::Pointer
8
9
  include WithSelf
9
10
 
10
- attr_reader :size, :fd
11
+ attr_reader :size, :type, :type_size, :count, :fd
11
12
 
12
13
  def self.open(size, &block)
13
14
  new(size).with_self(&block)
14
15
  end
15
16
 
16
- def initialize(size)
17
- @size = case size
18
- when Symbol
19
- FFI.type_size(size)
20
- else
21
- size
22
- end
17
+ def self.make_finalizer(addr, size, fd)
18
+ proc do
19
+ pointer = FFI::Pointer.new(addr)
20
+ LibC.munmap(pointer, size)
21
+ LibC.close(fd)
22
+ end
23
+ end
24
+
25
+ def initialize(type_or_count = 1, count = 1)
26
+ @type, @count = case type_or_count
27
+ when Symbol
28
+ [type_or_count, count]
29
+ else
30
+ [:uchar, type_or_count]
31
+ end
32
+
33
+ @type_size = FFI.type_size(@type)
34
+ @size = @type_size * @count
23
35
 
24
36
  name = "/ps-shm#{rand(10000)}"
25
37
  @fd = RT.shm_open(name,
@@ -33,13 +45,69 @@ module ProcessShared
33
45
  LibC::PROT_READ | LibC::PROT_WRITE,
34
46
  LibC::MAP_SHARED,
35
47
  @fd,
36
- 0)
48
+ 0).
49
+ slice(0, size) # slice to get FFI::Pointer that knows its size
50
+ # (and thus does bounds checking)
51
+
52
+ @finalize = self.class.make_finalizer(@pointer.address, @size, @fd)
53
+ ObjectSpace.define_finalizer(self, @finalize)
54
+
37
55
  super(@pointer)
38
56
  end
39
57
 
58
+ # Write the serialization of +obj+ (using Marshal.dump) to this
59
+ # shared memory object at +offset+ (in bytes).
60
+ #
61
+ # Raises IndexError if there is insufficient space.
62
+ def put_object(offset, obj)
63
+ # FIXME: This is a workaround to an issue I'm seeing in
64
+ # 1.8.7-p352 (not tested in other 1.8's). If I used the code
65
+ # below that works in 1.9, then inside SharedMemoryIO#write, the
66
+ # passed string object is 'terminated' (garbage collected?) and
67
+ # won't respond to any methods... This way is less efficient
68
+ # since it involves the creation of an intermediate string, but
69
+ # it works in 1.8.7-p352.
70
+ if RUBY_VERSION =~ /^1.8/
71
+ str = Marshal.dump(obj)
72
+ return put_bytes(offset, str, 0, str.size)
73
+ end
74
+
75
+ io = SharedMemoryIO.new(self)
76
+ io.seek(offset)
77
+ Marshal.dump(obj, io)
78
+ end
79
+
80
+ # Read the serialized object at +offset+ (in bytes) using
81
+ # Marshal.load.
82
+ #
83
+ # @return [Object]
84
+ def get_object(offset)
85
+ io = to_shm_io
86
+ io.seek(offset)
87
+ Marshal.load(io)
88
+ end
89
+
90
+ # Equivalent to {#put_object(0, obj)}
91
+ def write_object(obj)
92
+ put_object(0, obj)
93
+ end
94
+
95
+ # Equivalent to {#read_object(0, obj)}
96
+ #
97
+ # @return [Object]
98
+ def read_object
99
+ Marshal.load(to_shm_io)
100
+ end
101
+
40
102
  def close
41
- LibC.munmap(@pointer, @size)
42
- LibC.close(@fd)
103
+ ObjectSpace.undefine_finalizer(self)
104
+ @finalize.call
105
+ end
106
+
107
+ private
108
+
109
+ def to_shm_io
110
+ SharedMemoryIO.new(self)
43
111
  end
44
112
  end
45
113
  end