aerospike 2.24.0 → 2.25.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -13,26 +13,44 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- require 'aerospike/policy/priority'
17
- require 'aerospike/policy/consistency_level'
18
- require 'aerospike/policy/replica'
19
-
16
+ require "aerospike/policy/priority"
17
+ require "aerospike/policy/consistency_level"
18
+ require "aerospike/policy/replica"
20
19
 
21
20
  module Aerospike
22
21
 
23
22
  # Container object for client policy command.
24
23
  class Policy
25
-
26
- attr_accessor :priority, :timeout, :max_retries, :sleep_between_retries, :consistency_level,
24
+ attr_accessor :filter_exp, :priority, :timeout, :max_retries, :sleep_between_retries, :consistency_level,
27
25
  :predexp, :fail_on_filtered_out, :replica, :use_compression
28
26
 
29
27
  alias total_timeout timeout
30
28
  alias total_timeout= timeout=
31
29
 
32
- def initialize(opt={})
30
+ def initialize(opt = {})
33
31
  # Container object for transaction policy attributes used in all database
34
32
  # operation calls.
35
33
 
34
+ # Optional expression filter. If filterExp exists and evaluates to false, the
35
+ # transaction is ignored.
36
+ #
37
+ # Default: nil
38
+ #
39
+ # ==== Examples:
40
+ #
41
+ # p = Policy.new
42
+ # p.filter_exp = Exp.build(Exp.eq(Exp.int_bin("a"), Exp.int_val(11)));
43
+ @filter_exp = opt[:filter_exp]
44
+
45
+ # Throw exception if {#filter_exp} is defined and that filter evaluates
46
+ # to false (transaction ignored). The {AerospikeException}
47
+ # will contain result code {ResultCode::FILTERED_OUT}.
48
+ #
49
+ # This field is not applicable to batch, scan or query commands.
50
+ #
51
+ # Default: false
52
+ @fail_on_filtered_out = opt[:fail_on_filtered_out] || false
53
+
36
54
  # Priority of request relative to other transactions.
37
55
  # Currently, only used for scans.
38
56
  @priority = opt[:priority] || Priority::DEFAULT
@@ -74,7 +92,6 @@ module Aerospike
74
92
  # ]
75
93
  @predexp = opt[:predexp] || nil
76
94
 
77
-
78
95
  # Throw exception if @predexp is defined and that filter evaluates
79
96
  # to false (transaction ignored). The Aerospike::Exceptions::Aerospike
80
97
  # will contain result code Aerospike::ResultCode::FILTERED_OUT.
@@ -86,7 +103,6 @@ module Aerospike
86
103
  # read operation.
87
104
  @consistency_level = opt[:consistency_level] || Aerospike::ConsistencyLevel::CONSISTENCY_ONE
88
105
 
89
-
90
106
  # Send read commands to the node containing the key's partition replica type.
91
107
  # Write commands are not affected by this setting, because all writes are directed
92
108
  # to the node containing the key's master partition.
@@ -118,8 +134,5 @@ module Aerospike
118
134
  # timeout was not exceeded. Enter zero to skip sleep.
119
135
  @sleep_between_retries = opt[:sleep_between_retries] || 0.5
120
136
  end
121
-
122
-
123
137
  end # class
124
-
125
138
  end # module
@@ -17,28 +17,29 @@
17
17
 
18
18
  module Aerospike
19
19
  class QueryExecutor # :nodoc:
20
-
21
20
  def self.query_partitions(cluster, policy, tracker, statement, recordset)
22
21
  interval = policy.sleep_between_retries
23
22
 
24
23
  should_retry = false
25
24
 
26
25
  loop do
26
+ # reset last_expn
27
+ @last_expn = nil
28
+
27
29
  list = tracker.assign_partitions_to_nodes(cluster, statement.namespace)
28
30
 
29
31
  if policy.concurrent_nodes
30
32
  threads = []
31
33
  # Use a thread per node
32
34
  list.each do |node_partition|
33
-
34
35
  threads << Thread.new do
35
36
  Thread.current.abort_on_exception = true
36
37
  command = QueryPartitionCommand.new(node_partition.node, tracker, policy, statement, recordset, node_partition)
37
38
  begin
38
39
  command.execute
39
40
  rescue => e
41
+ @last_expn = e unless e == QUERY_TERMINATED_EXCEPTION
40
42
  should_retry ||= command.should_retry(e)
41
- # puts "should retry: #{should_retry}"
42
43
  Aerospike.logger.error(e.backtrace.join("\n")) unless e == QUERY_TERMINATED_EXCEPTION
43
44
  end
44
45
  end
@@ -51,23 +52,20 @@ module Aerospike
51
52
  begin
52
53
  command.execute
53
54
  rescue => e
55
+ @last_expn = e unless e == QUERY_TERMINATED_EXCEPTION
54
56
  should_retry ||= command.should_retry(e)
55
57
  Aerospike.logger.error(e.backtrace.join("\n")) unless e == QUERY_TERMINATED_EXCEPTION
56
58
  end
57
59
  end
58
60
  end
59
61
 
60
- complete = tracker.complete?(@cluster, policy)
61
-
62
- if complete || !should_retry
63
- recordset.thread_finished
62
+ if tracker.complete?(@cluster, policy) || !should_retry
63
+ recordset.thread_finished(@last_expn)
64
64
  return
65
65
  end
66
66
  sleep(interval) if policy.sleep_between_retries > 0
67
67
  statement.reset_task_id
68
68
  end
69
69
  end
70
-
71
70
  end
72
-
73
71
  end
@@ -14,15 +14,13 @@
14
14
  # License for the specific language governing permissions and limitations under
15
15
  # the License.
16
16
 
17
- require 'aerospike/query/stream_command'
18
- require 'aerospike/query/recordset'
17
+ require "aerospike/query/stream_command"
18
+ require "aerospike/query/recordset"
19
19
 
20
20
  module Aerospike
21
-
22
21
  private
23
22
 
24
23
  class QueryPartitionCommand < QueryCommand #:nodoc:
25
-
26
24
  def initialize(node, tracker, policy, statement, recordset, node_partitions)
27
25
  super(node, policy, statement, recordset, @node_partitions)
28
26
  @node_partitions = node_partitions
@@ -39,29 +37,29 @@ module Aerospike
39
37
 
40
38
  if @statement.namespace
41
39
  @data_offset += @statement.namespace.bytesize + FIELD_HEADER_SIZE
42
- field_count+=1
40
+ field_count += 1
43
41
  end
44
42
 
45
43
  if @statement.set_name
46
44
  @data_offset += @statement.set_name.bytesize + FIELD_HEADER_SIZE
47
- field_count+=1
45
+ field_count += 1
48
46
  end
49
47
 
50
48
  # Estimate recordsPerSecond field size. This field is used in new servers and not used
51
49
  # (but harmless to add) in old servers.
52
50
  if @policy.records_per_second > 0
53
51
  @data_offset += 4 + FIELD_HEADER_SIZE
54
- field_count+=1
52
+ field_count += 1
55
53
  end
56
54
 
57
55
  # Estimate socket timeout field size. This field is used in new servers and not used
58
56
  # (but harmless to add) in old servers.
59
57
  @data_offset += 4 + FIELD_HEADER_SIZE
60
- field_count+=1
58
+ field_count += 1
61
59
 
62
60
  # Estimate task_id field.
63
61
  @data_offset += 8 + FIELD_HEADER_SIZE
64
- field_count+=1
62
+ field_count += 1
65
63
 
66
64
  filter = @statement.filters[0]
67
65
  bin_names = @statement.bin_names
@@ -73,16 +71,16 @@ module Aerospike
73
71
  # Estimate INDEX_TYPE field.
74
72
  if col_type > 0
75
73
  @data_offset += FIELD_HEADER_SIZE + 1
76
- field_count+=1
74
+ field_count += 1
77
75
  end
78
76
 
79
77
  # Estimate INDEX_RANGE field.
80
78
  @data_offset += FIELD_HEADER_SIZE
81
- filter_size+=1 # num filters
79
+ filter_size += 1 # num filters
82
80
  filter_size += filter.estimate_size
83
81
 
84
82
  @data_offset += filter_size
85
- field_count+=1
83
+ field_count += 1
86
84
 
87
85
  # TODO: Implement
88
86
  # packed_ctx = filter.packed_ctx
@@ -102,13 +100,18 @@ module Aerospike
102
100
  field_count += 1
103
101
  end
104
102
 
103
+ unless @policy.filter_exp.nil?
104
+ exp_size = estimate_expression_size(@policy.filter_exp)
105
+ field_count += 1 if exp_size > 0
106
+ end
107
+
105
108
  # Estimate aggregation/background function size.
106
109
  if @statement.function_name
107
110
  @data_offset += FIELD_HEADER_SIZE + 1 # udf type
108
111
  @data_offset += @statement.package_name.bytesize + FIELD_HEADER_SIZE
109
112
  @data_offset += @statement.function_name.bytesize + FIELD_HEADER_SIZE
110
113
 
111
- function_arg_buffer=''
114
+ function_arg_buffer = ""
112
115
  if @statement.function_args && @statement.function_args.length > 0
113
116
  function_arg_buffer = Value.of(@statement.function_args).to_bytes
114
117
  end
@@ -133,24 +136,24 @@ module Aerospike
133
136
 
134
137
  if parts_full_size > 0
135
138
  @data_offset += parts_full_size + FIELD_HEADER_SIZE
136
- field_count+=1
139
+ field_count += 1
137
140
  end
138
141
 
139
142
  if parts_partial_digest_size > 0
140
143
  @data_offset += parts_partial_digest_size + FIELD_HEADER_SIZE
141
- field_count+=1
144
+ field_count += 1
142
145
  end
143
146
 
144
147
  if parts_partial_bval_size > 0
145
148
  @data_offset += parts_partial_bval_size + FIELD_HEADER_SIZE
146
- field_count+=1
149
+ field_count += 1
147
150
  end
148
151
 
149
152
  # Estimate max records field size. This field is used in new servers and not used
150
153
  # (but harmless to add) in old servers.
151
154
  if max_records > 0
152
155
  @data_offset += 8 + FIELD_HEADER_SIZE
153
- field_count+=1
156
+ field_count += 1
154
157
  end
155
158
 
156
159
  operation_count = 0
@@ -180,6 +183,8 @@ module Aerospike
180
183
  # Write records per second.
181
184
  write_field_int(@policy.records_per_second, FieldType::RECORDS_PER_SECOND) if @policy.records_per_second > 0
182
185
 
186
+ write_filter_exp(@policy.filter_exp, exp_size)
187
+
183
188
  # Write socket idle timeout.
184
189
  write_field_int(@policy.socket_timeout, FieldType::SOCKET_TIMEOUT)
185
190
 
@@ -260,7 +265,5 @@ module Aerospike
260
265
  # !! converts nil to false
261
266
  !!@tracker&.should_retry(@node_partitions, e)
262
267
  end
263
-
264
268
  end # class
265
-
266
269
  end # module
@@ -22,7 +22,6 @@ module Aerospike
22
22
  # so the production and the consumptoin are decoupled
23
23
  # there can be an unlimited count of producer threads and consumer threads
24
24
  class Recordset
25
-
26
25
  attr_reader :records
27
26
 
28
27
  def initialize(queue_size = 5000, thread_count = 1, type)
@@ -66,18 +65,21 @@ module Aerospike
66
65
 
67
66
  # this is called by working threads to signal their job is finished
68
67
  # it decreases the count of active threads and puts an EOF on queue when all threads are finished
69
- def thread_finished
68
+ # e is an exception that has happened in the exceutor, and outside of the threads themselves
69
+ def thread_finished(expn = nil)
70
70
  @active_threads.update do |v|
71
71
  v -= 1
72
72
  @records.enq(nil) if v == 0
73
73
  v
74
74
  end
75
+
76
+ raise expn unless expn.nil?
75
77
  end
76
78
 
77
79
  # this is called by a thread who faced an exception to singnal to terminate the whole operation
78
80
  # it also may be called by the user to terminate the command in the middle of fetching records from server nodes
79
81
  # it clears the queue so that if any threads are waiting for the queue get unblocked and find out about the cancellation
80
- def cancel(expn=nil)
82
+ def cancel(expn = nil)
81
83
  set_exception(expn)
82
84
  @cancelled.set(true)
83
85
  @records.clear
@@ -104,18 +106,16 @@ module Aerospike
104
106
  @filters.nil? || @filters.empty?
105
107
  end
106
108
 
107
- private
109
+ private
108
110
 
109
- def set_exception(expn=nil)
111
+ def set_exception(expn = nil)
110
112
  expn ||= (@type == :scan ? SCAN_TERMINATED_EXCEPTION : QUERY_TERMINATED_EXCEPTION)
111
113
  @thread_exception.set(expn)
112
114
  end
113
-
114
115
  end
115
116
 
116
117
  private
117
118
 
118
- SCAN_TERMINATED_EXCEPTION = Aerospike::Exceptions::ScanTerminated.new()
119
- QUERY_TERMINATED_EXCEPTION = Aerospike::Exceptions::QueryTerminated.new()
120
-
119
+ SCAN_TERMINATED_EXCEPTION = Aerospike::Exceptions::ScanTerminated.new()
120
+ QUERY_TERMINATED_EXCEPTION = Aerospike::Exceptions::QueryTerminated.new()
121
121
  end
@@ -17,26 +17,28 @@
17
17
 
18
18
  module Aerospike
19
19
  class ScanExecutor # :nodoc:
20
-
21
20
  def self.scan_partitions(policy, cluster, tracker, namespace, set_name, recordset, bin_names = nil)
22
21
  interval = policy.sleep_between_retries
23
22
 
24
23
  should_retry = false
25
24
 
26
25
  loop do
26
+ # reset last_expn
27
+ @last_expn = nil
28
+
27
29
  list = tracker.assign_partitions_to_nodes(cluster, namespace)
28
30
 
29
31
  if policy.concurrent_nodes
30
32
  threads = []
31
33
  # Use a thread per node
32
34
  list.each do |node_partition|
33
-
34
35
  threads << Thread.new do
35
36
  Thread.current.abort_on_exception = true
36
37
  command = ScanPartitionCommand.new(policy, tracker, node_partition, namespace, set_name, bin_names, recordset)
37
38
  begin
38
39
  command.execute
39
40
  rescue => e
41
+ @last_expn = e unless e == SCAN_TERMINATED_EXCEPTION
40
42
  should_retry ||= command.should_retry(e)
41
43
  Aerospike.logger.error(e.backtrace.join("\n")) unless e == SCAN_TERMINATED_EXCEPTION
42
44
  end
@@ -50,6 +52,7 @@ module Aerospike
50
52
  begin
51
53
  command.execute
52
54
  rescue => e
55
+ @last_expn = e unless e == SCAN_TERMINATED_EXCEPTION
53
56
  should_retry ||= command.should_retry(e)
54
57
  Aerospike.logger.error(e.backtrace.join("\n")) unless e == SCAN_TERMINATED_EXCEPTION
55
58
  end
@@ -57,13 +60,12 @@ module Aerospike
57
60
  end
58
61
 
59
62
  if tracker.complete?(@cluster, policy) || !should_retry
60
- recordset.thread_finished
63
+ recordset.thread_finished(@last_expn)
61
64
  return
62
65
  end
63
66
  sleep(interval) if policy.sleep_between_retries > 0
67
+ statement.reset_task_id
64
68
  end
65
69
  end
66
-
67
70
  end
68
-
69
71
  end
@@ -13,7 +13,6 @@
13
13
  # limitations under the License.
14
14
 
15
15
  module Aerospike
16
-
17
16
  private
18
17
 
19
18
  # ExecuteTask is used to poll for long running server execute job completion.
@@ -29,19 +28,24 @@ module Aerospike
29
28
  self
30
29
  end
31
30
 
32
- # IsDone queries all nodes for task completion status.
31
+ # queries all nodes for task completion status.
33
32
  def all_nodes_done?
34
-
35
- if @scan
36
- command = 'scan-list'
37
- else
38
- command = 'query-list'
39
- end
33
+ modul = @scan ? "scan" : "query"
34
+ cmd1 = "query-show:trid=#{@task_id}"
35
+ cmd2 = modul + "-show:trid=#{@task_id}"
36
+ cmd3 = "jobs:module=" + modul + ";cmd=get-job;trid=#{@task_id}"
40
37
 
41
38
  nodes = @cluster.nodes
42
39
  done = false
43
40
 
44
41
  nodes.each do |node|
42
+ command = cmd3
43
+ if node.supports_feature?(Aerospike::Features::PARTITION_QUERY)
44
+ command = cmd1
45
+ elsif node.supports_feature?(Aerospike::Features::QUERY_SHOW)
46
+ command = cmd2
47
+ end
48
+
45
49
  conn = node.get_connection(0)
46
50
  responseMap, _ = Info.request(conn, command)
47
51
  node.put_connection(conn)
@@ -58,28 +62,27 @@ module Aerospike
58
62
 
59
63
  b = index + find.length
60
64
  response = response[b, response.length]
61
- find = 'job_status='
65
+ find = "job_status="
62
66
  index = response.index(find)
63
67
 
64
68
  next unless index
65
69
 
66
70
  b = index + find.length
67
71
  response = response[b, response.length]
68
- e = response.index(':')
72
+ e = response.index(":")
69
73
  status = response[0, e]
70
74
 
71
75
  case status
72
- when 'ABORTED'
76
+ when "ABORTED"
73
77
  raise Aerospike::Exceptions::QueryTerminated
74
- when 'IN PROGRESS'
78
+ when "IN PROGRESS"
75
79
  return false
76
- when 'DONE'
80
+ when "DONE"
77
81
  done = true
78
82
  end
79
83
  end
80
84
 
81
85
  done
82
86
  end
83
-
84
87
  end
85
88
  end
@@ -17,36 +17,34 @@
17
17
  # License for the specific language governing permissions and limitations under
18
18
  # the License.
19
19
 
20
- require 'aerospike/utils/pool'
20
+ require "aerospike/utils/pool"
21
21
 
22
22
  module Aerospike
23
-
24
23
  private
25
24
 
26
25
  # Buffer class to ease the work around
27
26
  class Buffer #:nodoc:
28
-
29
27
  @@buf_pool = Pool.new
30
28
  @@buf_pool.create_proc = Proc.new { Buffer.new }
31
29
 
32
30
  attr_accessor :buf
33
31
 
34
- INT16 = 's>'
35
- UINT16 = 'n'
36
- UINT16LE = 'v'
37
- INT32 = 'l>'
38
- UINT32 = 'N'
39
- INT64 = 'q>'
40
- UINT64 = 'Q>'
41
- UINT64LE = 'Q'
42
- DOUBLE = 'G'
32
+ INT16 = "s>"
33
+ UINT16 = "n"
34
+ UINT16LE = "v"
35
+ INT32 = "l>"
36
+ UINT32 = "N"
37
+ INT64 = "q>"
38
+ UINT64 = "Q>"
39
+ UINT64LE = "Q"
40
+ DOUBLE = "G"
43
41
 
44
42
  DEFAULT_BUFFER_SIZE = 16 * 1024
45
43
  MAX_BUFFER_SIZE = 10 * 1024 * 1024
46
44
 
47
- def initialize(size=DEFAULT_BUFFER_SIZE, buf = nil)
45
+ def initialize(size = DEFAULT_BUFFER_SIZE, buf = nil)
48
46
  @buf = (buf ? buf : ("%0#{size}d" % 0))
49
- @buf.force_encoding('binary')
47
+ @buf.force_encoding("binary")
50
48
  @slice_end = @buf.bytesize
51
49
  end
52
50
 
@@ -61,6 +59,7 @@ module Aerospike
61
59
  def size
62
60
  @buf.bytesize
63
61
  end
62
+
64
63
  alias_method :length, :size
65
64
 
66
65
  def eat!(n)
@@ -135,7 +134,7 @@ module Aerospike
135
134
  8
136
135
  end
137
136
 
138
- def read(offset, len=nil)
137
+ def read(offset, len = nil)
139
138
  if len
140
139
  @buf[offset, len]
141
140
  else
@@ -144,37 +143,37 @@ module Aerospike
144
143
  end
145
144
 
146
145
  def read_int16(offset)
147
- vals = @buf[offset..offset+1]
146
+ vals = @buf[offset..offset + 1]
148
147
  vals.unpack(INT16)[0]
149
148
  end
150
149
 
151
150
  def read_uint16(offset)
152
- vals = @buf[offset..offset+1]
151
+ vals = @buf[offset..offset + 1]
153
152
  vals.unpack(UINT16)[0]
154
153
  end
155
154
 
156
155
  def read_int32(offset)
157
- vals = @buf[offset..offset+3]
156
+ vals = @buf[offset..offset + 3]
158
157
  vals.unpack(INT32)[0]
159
158
  end
160
159
 
161
160
  def read_uint32(offset)
162
- vals = @buf[offset..offset+3]
161
+ vals = @buf[offset..offset + 3]
163
162
  vals.unpack(UINT32)[0]
164
163
  end
165
164
 
166
165
  def read_int64(offset)
167
- vals = @buf[offset..offset+7]
166
+ vals = @buf[offset..offset + 7]
168
167
  vals.unpack(INT64)[0]
169
168
  end
170
169
 
171
170
  def read_uint64_little_endian(offset)
172
- vals = @buf[offset..offset+7]
171
+ vals = @buf[offset..offset + 7]
173
172
  vals.unpack(UINT64LE)[0]
174
173
  end
175
174
 
176
175
  def read_uint64(offset)
177
- vals = @buf[offset..offset+7]
176
+ vals = @buf[offset..offset + 7]
178
177
  vals.unpack(UINT64)[0]
179
178
  end
180
179
 
@@ -183,14 +182,14 @@ module Aerospike
183
182
  i = 0
184
183
  while i < len
185
184
  val <<= 8
186
- val |= @buf[offset+i].ord & 0xFF
185
+ val |= @buf[offset + i].ord & 0xFF
187
186
  i = i.succ
188
187
  end
189
188
  val
190
189
  end
191
190
 
192
191
  def read_double(offset)
193
- vals = @buf[offset..offset+7]
192
+ vals = @buf[offset..offset + 7]
194
193
  vals.unpack(DOUBLE)[0]
195
194
  end
196
195
 
@@ -199,39 +198,48 @@ module Aerospike
199
198
  end
200
199
 
201
200
  def to_s
202
- @buf[0..@slice_end-1]
201
+ @buf[0..@slice_end - 1]
203
202
  end
204
203
 
205
204
  def reset
206
- for i in 0..@buf.size-1
207
- @buf[i] = ' '
205
+ for i in 0..@buf.size - 1
206
+ @buf[i] = " "
208
207
  end
209
208
  end
210
209
 
211
- def dump(start=0, finish=nil)
210
+ def dump(start = 0, finish = nil)
211
+ buf ||= @buf.bytes
212
212
  finish ||= @slice_end - 1
213
213
  width = 16
214
214
 
215
- ascii = '|'
215
+ ascii = "|"
216
216
  counter = 0
217
217
 
218
- print '%06x ' % start
218
+ print "%08x " % start
219
219
  @buf.bytes[start...finish].each do |c|
220
220
  if counter >= start
221
- print '%02x ' % c
221
+ print "%02x " % c
222
222
  ascii << (c.between?(32, 126) ? c : ?.)
223
- if ascii.length >= width
224
- ascii << '|'
223
+ print " " if ascii.length == (width / 2 + 1)
224
+ if ascii.length > width
225
+ ascii << "|"
225
226
  puts ascii
226
- ascii = '|'
227
- print '%06x ' % (counter + 1)
227
+ ascii = "|"
228
+ print "%08x " % (counter + 1)
228
229
  end
229
230
  end
230
231
  counter += 1
231
232
  end
232
- puts
233
- end
234
233
 
234
+ # print the remainder in buffer
235
+ if ascii.length.positive?
236
+ fill_size = ((width - ascii.length + 1) * 3)
237
+ fill_size += 1 if ascii.length <= (width / 2)
238
+ filler = " " * fill_size
239
+ print filler
240
+ ascii << "|"
241
+ puts ascii
242
+ end
243
+ end
235
244
  end # buffer
236
-
237
245
  end # module
@@ -14,13 +14,11 @@
14
14
  # License for the specific language governing permissions and limitations under
15
15
  # the License.
16
16
 
17
- require 'msgpack'
18
- require 'aerospike/utils/pool'
17
+ require "msgpack"
18
+ require "aerospike/utils/pool"
19
19
 
20
20
  module Aerospike
21
-
22
21
  class Packer < MessagePack::Packer #:nodoc:
23
-
24
22
  AS_EXT_TYPE = -1
25
23
 
26
24
  @@pool = Pool.new
@@ -44,9 +42,12 @@ module Aerospike
44
42
  buffer << [val].pack("S>")
45
43
  end
46
44
 
45
+ def write_raw(buf)
46
+ buffer.write(buf)
47
+ end
48
+
47
49
  def bytes
48
- self.to_s.force_encoding('binary')
50
+ self.to_s.force_encoding("binary")
49
51
  end
50
52
  end
51
-
52
53
  end