tablestore-ruby-sdk 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/lib/consts.rb +44 -0
- data/lib/protobuf/ots.proto +374 -0
- data/lib/protobuf/ots_filiter.proto +48 -0
- data/lib/protobuf/ots_filiter_pb.rb +55 -0
- data/lib/protobuf/ots_pb.rb +307 -0
- data/lib/tablestore-ruby-sdk.rb +99 -0
- data/lib/tablestore/connection.rb +22 -0
- data/lib/tablestore/crc8_auto.rb +36 -0
- data/lib/tablestore/error.rb +57 -0
- data/lib/tablestore/main.rb +51 -0
- data/lib/tablestore/metadata.rb +413 -0
- data/lib/tablestore/ots.rb +481 -0
- data/lib/tablestore/plain_buffer_coded_input_stream.rb +217 -0
- data/lib/tablestore/plain_buffer_coded_output_stream.rb +252 -0
- data/lib/tablestore/plain_buffer_crc8.rb +21 -0
- data/lib/tablestore/plain_buffer_input_stream.rb +92 -0
- data/lib/tablestore/plain_buffer_output_stream.rb +59 -0
- data/lib/tablestore/protocol.rb +161 -0
- metadata +117 -0
|
@@ -0,0 +1,481 @@
|
|
|
1
|
+
require 'protobuf'
|
|
2
|
+
require 'os'
|
|
3
|
+
require 'protobuf/ots_pb'
|
|
4
|
+
require 'protobuf/ots_filiter_pb'
|
|
5
|
+
require 'consts'
|
|
6
|
+
require 'tablestore/plain_buffer_coded_output_stream'
|
|
7
|
+
require 'tablestore/plain_buffer_output_stream'
|
|
8
|
+
require 'tablestore/plain_buffer_coded_input_stream'
|
|
9
|
+
require 'tablestore/plain_buffer_input_stream'
|
|
10
|
+
|
|
11
|
+
class OTS
|
|
12
|
+
def encode_get_range_request(request)
|
|
13
|
+
proto = GetRangeRequest.new
|
|
14
|
+
proto.table_name = request[:table_name]
|
|
15
|
+
proto.direction = request[:direction]
|
|
16
|
+
proto.inclusive_start_primary_key = serialize_primary_key(request[:inclusive_start_primary_key])
|
|
17
|
+
proto.exclusive_end_primary_key = serialize_primary_key(request[:exclusive_end_primary_key])
|
|
18
|
+
proto.max_versions = request[:max_version]
|
|
19
|
+
proto.limit = request[:limit]
|
|
20
|
+
proto_string = GetRangeRequest.encode(proto)
|
|
21
|
+
if proto_string.match("#{request[:table_name]}\x10\x02")
|
|
22
|
+
proto_string.sub("#{request[:table_name]}\x10\x02", "#{request[:table_name]}\x10\x00")
|
|
23
|
+
end
|
|
24
|
+
proto_string.sub("#{request[:table_name]}\x10\x02", "#{request[:table_name]}\x10\x00")
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
def decode_get_range_request(api_name, headers, body)
|
|
28
|
+
proto = GetRangeResponse.decode(body)
|
|
29
|
+
#capacity_unit = parse_capacity_unit(proto.consumed.capacity_unit)
|
|
30
|
+
|
|
31
|
+
next_start_pk = nil
|
|
32
|
+
row_list = []
|
|
33
|
+
|
|
34
|
+
# if proto.next_start_primary_key.length != 0
|
|
35
|
+
# inputStream = PlainBufferInputStream.new(proto.next_start_primary_key)
|
|
36
|
+
# codedInputStream = PlainBufferCodedInputStream.new(inputStream)
|
|
37
|
+
# next_start_pk, att = codedInputStream.read_row
|
|
38
|
+
# end
|
|
39
|
+
if proto.rows.length != 0
|
|
40
|
+
inputStream = PlainBufferInputStream.new(proto.rows)
|
|
41
|
+
codedInputStream = PlainBufferCodedInputStream.new(inputStream)
|
|
42
|
+
row_list = codedInputStream.read_rows
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
#next_token = proto.next_token
|
|
46
|
+
|
|
47
|
+
row_list
|
|
48
|
+
#return capacity_unit, next_start_pk, row_list, next_token
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
def encode_put_row(table_name, row, condition)
|
|
52
|
+
proto = PutRowRequest.new
|
|
53
|
+
proto.table_name = table_name
|
|
54
|
+
condition = Condition(RowExistenceExpectation::IGNORE, nil) if condition.nil?
|
|
55
|
+
contion_proto = Condition.new
|
|
56
|
+
proto.condition = make_condition(contion_proto, condition)
|
|
57
|
+
proto.row = serialize_for_put_row(row.primary_key, row.attribute_columns)
|
|
58
|
+
proto_string = PutRowRequest.encode(proto)
|
|
59
|
+
proto_string = proto_string[0..-2] + [0].pack('C') if proto_string[-1] == "\x03"
|
|
60
|
+
proto_string
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
def encode_get_row(table_name, primary_key, columns_to_get, column_filter, max_version)
|
|
64
|
+
proto = GetRowRequest.new
|
|
65
|
+
proto.table_name = table_name
|
|
66
|
+
make_repeated_column_names(proto.columns_to_get, columns_to_get)
|
|
67
|
+
if column_filter.present?
|
|
68
|
+
pb_filter = make_column_condition(column_filter)
|
|
69
|
+
proto.filter = Filter.encode(pb_filter)
|
|
70
|
+
end
|
|
71
|
+
proto.primary_key = serialize_primary_key(primary_key)
|
|
72
|
+
proto.max_versions = max_version if max_version.present?
|
|
73
|
+
GetRowRequest.encode(proto)
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def decode_get_row(body)
|
|
77
|
+
proto = GetRowResponse.decode(body)
|
|
78
|
+
|
|
79
|
+
return_row = nil
|
|
80
|
+
if proto.row.length > 0
|
|
81
|
+
inputStream = PlainBufferInputStream.new(proto.row)
|
|
82
|
+
codedInputStream = PlainBufferCodedInputStream.new(inputStream)
|
|
83
|
+
return_row = codedInputStream.read_row
|
|
84
|
+
end
|
|
85
|
+
return_row
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
def decode_put_row(body)
|
|
89
|
+
proto = PutRowResponse.decode(body)
|
|
90
|
+
return_row = nil
|
|
91
|
+
if proto.row.length != 0
|
|
92
|
+
inputStream = PlainBufferInputStream.new(proto.row)
|
|
93
|
+
codedInputStream = PlainBufferCodedInputStream.new(inputStream)
|
|
94
|
+
return_row = codedInputStream.read_row
|
|
95
|
+
end
|
|
96
|
+
return_row
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def make_batch_get_row(request)
|
|
100
|
+
proto = BatchGetRowRequest.new
|
|
101
|
+
request.items.each do |item|
|
|
102
|
+
table_value = item[1]
|
|
103
|
+
table_item = TableInBatchGetRowRequest.new
|
|
104
|
+
table_item.table_name = table_value.table_name
|
|
105
|
+
make_repeated_column_names(table_item.columns_to_get, table_value.columns_to_get)
|
|
106
|
+
|
|
107
|
+
if table_value.column_filter.present?
|
|
108
|
+
pb_filter = make_column_condition(table_value.column_filter)
|
|
109
|
+
table_item.filter = Filter.encode(pb_filter)
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
table_value.primary_keys.each do |pk|
|
|
113
|
+
table_item.primary_key << serialize_primary_key(pk)
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
if table_value.token.present?
|
|
117
|
+
table_value.token.each do |tk|
|
|
118
|
+
table_item.token << tk
|
|
119
|
+
end
|
|
120
|
+
end
|
|
121
|
+
if table_value.max_version.present?
|
|
122
|
+
table_item.max_versions = table_value.max_version
|
|
123
|
+
end
|
|
124
|
+
if table_value.time_range.present?
|
|
125
|
+
if table_value.time_range.is_a?(Array)
|
|
126
|
+
table_item.time_range.start_time = table_value.time_range[0]
|
|
127
|
+
table_item.time_range.end_time = table_value.time_range[1]
|
|
128
|
+
elsif table_value.is_a?(Fixnum)
|
|
129
|
+
table_item.time_range.specific_time = table_value.time_range
|
|
130
|
+
end
|
|
131
|
+
end
|
|
132
|
+
if table_value.start_column.present?
|
|
133
|
+
table_item.start_column = table_value.start_column
|
|
134
|
+
end
|
|
135
|
+
if table_value.end_column.present?
|
|
136
|
+
table_item.end_column = table_value.end_column
|
|
137
|
+
end
|
|
138
|
+
proto.tables << table_item
|
|
139
|
+
end
|
|
140
|
+
BatchGetRowRequest.encode(proto)
|
|
141
|
+
end
|
|
142
|
+
|
|
143
|
+
def decode_batch_get_row(body)
|
|
144
|
+
proto = BatchGetRowResponse.decode(body)
|
|
145
|
+
rows = []
|
|
146
|
+
proto.tables.each do |table_item|
|
|
147
|
+
rows << parse_get_row_item(table_item.rows)
|
|
148
|
+
end
|
|
149
|
+
rows
|
|
150
|
+
end
|
|
151
|
+
|
|
152
|
+
def make_batch_write_row(request)
|
|
153
|
+
proto = BatchWriteRowRequest.new
|
|
154
|
+
request.items.each do |item|
|
|
155
|
+
table_value = item[1]
|
|
156
|
+
table_item = TableInBatchWriteRowRequest.new
|
|
157
|
+
table_item.table_name = table_value.table_name
|
|
158
|
+
|
|
159
|
+
table_value.row_items.each do |row_item|
|
|
160
|
+
if row_item.type == Metadata::BatchWriteRowType::PUT
|
|
161
|
+
row = RowInBatchWriteRowRequest.new
|
|
162
|
+
table_item.rows << make_put_row_item(row, row_item)
|
|
163
|
+
end
|
|
164
|
+
if row_item.type == Metadata::BatchWriteRowType::UPDATE
|
|
165
|
+
row = RowInBatchWriteRowRequest.new
|
|
166
|
+
table_item.rows << make_update_row_item(row, row_item)
|
|
167
|
+
end
|
|
168
|
+
end
|
|
169
|
+
proto.tables << table_item
|
|
170
|
+
end
|
|
171
|
+
batch_string = BatchWriteRowRequest.encode(proto)
|
|
172
|
+
batch_string.gsub("\x08\x03", "\x08\x00")
|
|
173
|
+
end
|
|
174
|
+
|
|
175
|
+
def make_put_row_item(proto, put_row_item)
|
|
176
|
+
condition = put_row_item.condition
|
|
177
|
+
if condition.nil?
|
|
178
|
+
condition = Metadata::Condition.new(Metadata::RowExistenceExpectation::IGNORE, nil)
|
|
179
|
+
end
|
|
180
|
+
condition_proto = Condition.new
|
|
181
|
+
proto.condition = make_condition(condition_proto, condition)
|
|
182
|
+
if put_row_item.return_type == ReturnType::RT_PK
|
|
183
|
+
proto.return_content.return_type = :RT_PK
|
|
184
|
+
end
|
|
185
|
+
|
|
186
|
+
proto.row_change = serialize_for_put_row(put_row_item.row.primary_key, put_row_item.row.attribute_columns)
|
|
187
|
+
proto.type = :PUT
|
|
188
|
+
proto
|
|
189
|
+
end
|
|
190
|
+
|
|
191
|
+
def make_update_row_item(proto, update_row_item)
|
|
192
|
+
condition = update_row_item.condition
|
|
193
|
+
if condition.nil?
|
|
194
|
+
condition = Metadata::Condition(Metadata::RowExistenceExpectation::IGNORE, nil)
|
|
195
|
+
end
|
|
196
|
+
|
|
197
|
+
make_condition(proto.condition, condition)
|
|
198
|
+
|
|
199
|
+
if update_row_item.return_type == Metadata::ReturnType::RT_PK
|
|
200
|
+
proto.return_content.return_type = :RT_PK
|
|
201
|
+
end
|
|
202
|
+
|
|
203
|
+
proto.row_change = serialize_for_update_row(update_row_item.row.primary_key, update_row_item.row.attribute_columns)
|
|
204
|
+
proto.type = :UPDATE
|
|
205
|
+
proto
|
|
206
|
+
end
|
|
207
|
+
|
|
208
|
+
def make_repeated_column_names(proto, columns_to_get)
|
|
209
|
+
if columns_to_get.nil?
|
|
210
|
+
return
|
|
211
|
+
end
|
|
212
|
+
|
|
213
|
+
columns_to_get.each do |column_name|
|
|
214
|
+
proto << column_name
|
|
215
|
+
end
|
|
216
|
+
end
|
|
217
|
+
|
|
218
|
+
def make_condition(proto, condition)
|
|
219
|
+
raise TableStoreClientError.new("condition should be an instance of Condition, not #{condition.class}") unless condition.is_a?(Metadata::Condition)
|
|
220
|
+
expectation_str = condition.get_row_existence_expectation
|
|
221
|
+
proto.row_existence = expectation_str
|
|
222
|
+
raise TableStoreClientError.new("row_existence_expectation should be one of [#{join(', ')}], not #{expectation_str}") if proto.row_existence.nil?
|
|
223
|
+
|
|
224
|
+
if condition.get_column_condition.present?
|
|
225
|
+
pb_filter = make_column_condition(condition.column_condition)
|
|
226
|
+
proto.column_condition = Filter.encode(pb_filter)
|
|
227
|
+
end
|
|
228
|
+
proto
|
|
229
|
+
end
|
|
230
|
+
|
|
231
|
+
def make_column_condition(column_condition)
|
|
232
|
+
return if column_condition.nil?
|
|
233
|
+
proto = Filter.new
|
|
234
|
+
proto.type = column_condition.get_type
|
|
235
|
+
|
|
236
|
+
# condition
|
|
237
|
+
if column_condition.is_a?(Metadata::CompositeColumnCondition)
|
|
238
|
+
proto.filter = make_composite_condition(column_condition)
|
|
239
|
+
elsif column_condition.is_a?(Metadata::SingleColumnCondition)
|
|
240
|
+
proto.filter = make_relation_condition(column_condition)
|
|
241
|
+
else
|
|
242
|
+
raise TableStoreClientError.new("expect CompositeColumnCondition, SingleColumnCondition but not #{column_condition.class}")
|
|
243
|
+
end
|
|
244
|
+
proto
|
|
245
|
+
end
|
|
246
|
+
|
|
247
|
+
def make_composite_condition(condition)
|
|
248
|
+
proto = CompositeColumnValueFilter.new
|
|
249
|
+
proto.combinator = condition.get_combinator
|
|
250
|
+
|
|
251
|
+
condition.sub_conditions.each do |sub|
|
|
252
|
+
proto.sub_filters << make_column_condition(sub)
|
|
253
|
+
end
|
|
254
|
+
|
|
255
|
+
CompositeColumnValueFilter.encode(proto)
|
|
256
|
+
end
|
|
257
|
+
|
|
258
|
+
def make_relation_condition(condition)
|
|
259
|
+
proto = SingleColumnValueFilter.new
|
|
260
|
+
proto.comparator = condition.get_comparator
|
|
261
|
+
|
|
262
|
+
proto.column_name = condition.get_column_name
|
|
263
|
+
proto.column_value = serialize_column_value(condition.get_column_value)
|
|
264
|
+
proto.filter_if_missing = !condition.pass_if_missing
|
|
265
|
+
proto.latest_version_only = condition.latest_version_only
|
|
266
|
+
filter_string = SingleColumnValueFilter.encode(proto)
|
|
267
|
+
|
|
268
|
+
if proto.filter_if_missing.blank? && proto.latest_version_only.blank?
|
|
269
|
+
filter_string += [32, 0, 40, 0].pack("C*")
|
|
270
|
+
elsif proto.filter_if_missing.blank?
|
|
271
|
+
filter_string.insert(-3, [32, 0].pack("C*"))
|
|
272
|
+
elsif proto.latest_version_only.blank?
|
|
273
|
+
filter_string += [40, 0].pack("C*")
|
|
274
|
+
end
|
|
275
|
+
|
|
276
|
+
filter_string
|
|
277
|
+
end
|
|
278
|
+
|
|
279
|
+
def parse_get_row_item(proto)
|
|
280
|
+
row_list = []
|
|
281
|
+
proto.each do |row_item|
|
|
282
|
+
primary_key_columns = nil
|
|
283
|
+
attribute_columns = nil
|
|
284
|
+
|
|
285
|
+
if row_item.is_ok
|
|
286
|
+
# error_code = nil
|
|
287
|
+
# error_message = nil
|
|
288
|
+
# capacity_unit = parse_capacity_unit(row_item.consumed.capacity_unit)
|
|
289
|
+
|
|
290
|
+
if row_item.row.length != 0
|
|
291
|
+
inputStream = PlainBufferInputStream.new(row_item.row)
|
|
292
|
+
codedInputStream = PlainBufferCodedInputStream.new(inputStream)
|
|
293
|
+
primary_key_columns, attribute_columns = codedInputStream.read_row
|
|
294
|
+
end
|
|
295
|
+
else
|
|
296
|
+
# error_code = row_item.error.code
|
|
297
|
+
# error_message = row_item.error.HasField('message') ? row_item.error.message : ''
|
|
298
|
+
# if row_item.HasField('consumed')
|
|
299
|
+
# capacity_unit = parse_capacity_unit(row_item.consumed.capacity_unit)
|
|
300
|
+
# else
|
|
301
|
+
# capacity_unit = nil
|
|
302
|
+
# end
|
|
303
|
+
end
|
|
304
|
+
|
|
305
|
+
row_list << {pk: primary_key_columns, attr: attribute_columns} if primary_key_columns.present?
|
|
306
|
+
end
|
|
307
|
+
row_list
|
|
308
|
+
end
|
|
309
|
+
|
|
310
|
+
def parse_batch_write_row(proto)
|
|
311
|
+
result_list = {}
|
|
312
|
+
proto.each do |table_item|
|
|
313
|
+
table_name = table_item.table_name
|
|
314
|
+
result_list[table_name] = []
|
|
315
|
+
|
|
316
|
+
table_item.rows.each do |row_item|
|
|
317
|
+
row = parse_write_row_item(row_item)
|
|
318
|
+
result_list[table_name] << row
|
|
319
|
+
end
|
|
320
|
+
end
|
|
321
|
+
|
|
322
|
+
result_list
|
|
323
|
+
end
|
|
324
|
+
|
|
325
|
+
def parse_write_row_item(row_item)
|
|
326
|
+
primary_key_columns = nil
|
|
327
|
+
|
|
328
|
+
if row_item.is_ok
|
|
329
|
+
error_code = nil
|
|
330
|
+
error_message = nil
|
|
331
|
+
|
|
332
|
+
if row_item.row.length != 0
|
|
333
|
+
inputStream = PlainBufferInputStream.new(row_item.row)
|
|
334
|
+
codedInputStream = PlainBufferCodedInputStream.new(inputStream)
|
|
335
|
+
primary_key_columns, attribute_columns = codedInputStream.read_row
|
|
336
|
+
end
|
|
337
|
+
end
|
|
338
|
+
primary_key_columns
|
|
339
|
+
|
|
340
|
+
#BatchWriteRowResponseItem.new(row_item.is_ok, error_code, error_message, consumed, primary_key_columns)
|
|
341
|
+
end
|
|
342
|
+
|
|
343
|
+
private
|
|
344
|
+
def serialize_primary_key(primary_key)
|
|
345
|
+
buf_size = LITTLE_ENDIAN_SIZE
|
|
346
|
+
buf_size += compute_primary_key_size(primary_key)
|
|
347
|
+
buf_size += 2
|
|
348
|
+
output_stream = PlainBufferOutputStream.new(buf_size)
|
|
349
|
+
coded_output_stream = PlainBufferCodedOutputStream.new(output_stream)
|
|
350
|
+
row_checksum = 0
|
|
351
|
+
coded_output_stream.write_header
|
|
352
|
+
|
|
353
|
+
row_checksum = coded_output_stream.write_primary_key(primary_key, row_checksum)
|
|
354
|
+
row_checksum = coded_output_stream.crc_int8(row_checksum, 0)
|
|
355
|
+
coded_output_stream.write_row_checksum(row_checksum)
|
|
356
|
+
output_stream.get_buffer.join('')
|
|
357
|
+
end
|
|
358
|
+
|
|
359
|
+
def serialize_column_value(value)
|
|
360
|
+
buf_size = compute_variant_value_size(value)
|
|
361
|
+
stream = PlainBufferOutputStream.new(buf_size)
|
|
362
|
+
coded_stream = PlainBufferCodedOutputStream.new(stream)
|
|
363
|
+
|
|
364
|
+
coded_stream.write_column_value(value)
|
|
365
|
+
stream.get_buffer.join('')
|
|
366
|
+
end
|
|
367
|
+
|
|
368
|
+
def compute_variant_value_size(value)
|
|
369
|
+
compute_primary_key_value_size(value) - LITTLE_ENDIAN_SIZE - 1
|
|
370
|
+
end
|
|
371
|
+
|
|
372
|
+
def parse_capacity_unit(proto)
|
|
373
|
+
if proto.nil?
|
|
374
|
+
capacity_unit = nil
|
|
375
|
+
else
|
|
376
|
+
cu_read = proto.HasField('read') ? proto.read : 0
|
|
377
|
+
cu_write = proto.HasField('write') ? proto.write : 0
|
|
378
|
+
capacity_unit = CapacityUnit(cu_read, cu_write)
|
|
379
|
+
end
|
|
380
|
+
capacity_unit
|
|
381
|
+
end
|
|
382
|
+
|
|
383
|
+
def compute_primary_key_size(primary_key)
|
|
384
|
+
size = 1
|
|
385
|
+
primary_key.each do |pk|
|
|
386
|
+
size += compute_primary_key_column_size(pk[0], pk[1])
|
|
387
|
+
end
|
|
388
|
+
size
|
|
389
|
+
end
|
|
390
|
+
|
|
391
|
+
def compute_primary_key_column_size(pk_name, pk_value)
|
|
392
|
+
size = 1
|
|
393
|
+
size += 1 + LITTLE_ENDIAN_SIZE
|
|
394
|
+
size += pk_name.length
|
|
395
|
+
size += compute_primary_key_value_size(pk_value)
|
|
396
|
+
size += 2
|
|
397
|
+
size
|
|
398
|
+
end
|
|
399
|
+
|
|
400
|
+
def compute_primary_key_value_size(value)
|
|
401
|
+
size = 1
|
|
402
|
+
size += LITTLE_ENDIAN_SIZE + 1
|
|
403
|
+
if ["INF_MIN", "INF_MAX", "PK_AUTO_INCR"].include?value
|
|
404
|
+
size += 1
|
|
405
|
+
return size
|
|
406
|
+
end
|
|
407
|
+
if value.is_a?(Numeric)
|
|
408
|
+
size += 8
|
|
409
|
+
elsif value.is_a?(String)
|
|
410
|
+
size += LITTLE_ENDIAN_SIZE
|
|
411
|
+
size += value.length
|
|
412
|
+
end
|
|
413
|
+
size
|
|
414
|
+
end
|
|
415
|
+
|
|
416
|
+
def serialize_for_put_row(primary_key, attribute_columns)
|
|
417
|
+
buf_size = compute_put_row_size(primary_key, attribute_columns)
|
|
418
|
+
output_stream = PlainBufferOutputStream.new(buf_size)
|
|
419
|
+
coded_output_stream = PlainBufferCodedOutputStream.new(output_stream)
|
|
420
|
+
|
|
421
|
+
row_checksum = 0
|
|
422
|
+
coded_output_stream.write_header
|
|
423
|
+
row_checksum = coded_output_stream.write_primary_key(primary_key, row_checksum)
|
|
424
|
+
row_checksum = coded_output_stream.write_columns(attribute_columns, row_checksum)
|
|
425
|
+
row_checksum = PlainBufferCrc8.crc_int8(row_checksum, 0)
|
|
426
|
+
coded_output_stream.write_row_checksum(row_checksum)
|
|
427
|
+
|
|
428
|
+
output_stream.get_buffer.join('')
|
|
429
|
+
end
|
|
430
|
+
|
|
431
|
+
def compute_put_row_size(primary_key, attribute_columns)
|
|
432
|
+
size = LITTLE_ENDIAN_SIZE
|
|
433
|
+
size += compute_primary_key_size(primary_key)
|
|
434
|
+
|
|
435
|
+
if attribute_columns.length != 0
|
|
436
|
+
size += 1
|
|
437
|
+
attribute_columns.each do |attr|
|
|
438
|
+
if attr.length == 2
|
|
439
|
+
size += compute_column_size(attr[0], attr[1])
|
|
440
|
+
else
|
|
441
|
+
size += compute_column_size(attr[0], attr[1], attr[2])
|
|
442
|
+
end
|
|
443
|
+
end
|
|
444
|
+
end
|
|
445
|
+
size += 2
|
|
446
|
+
size
|
|
447
|
+
end
|
|
448
|
+
|
|
449
|
+
def compute_column_size(column_name, column_value, timestamp = nil)
|
|
450
|
+
size = 1
|
|
451
|
+
size += 1 + LITTLE_ENDIAN_SIZE
|
|
452
|
+
size += column_name.length
|
|
453
|
+
unless column_value.nil?
|
|
454
|
+
size += compute_column_value_size(column_value)
|
|
455
|
+
end
|
|
456
|
+
unless timestamp.nil?
|
|
457
|
+
size += 1 + LITTLE_ENDIAN_64_SIZE
|
|
458
|
+
end
|
|
459
|
+
size += 2
|
|
460
|
+
size
|
|
461
|
+
end
|
|
462
|
+
|
|
463
|
+
def compute_column_value_size(value)
|
|
464
|
+
size = 1
|
|
465
|
+
size += LITTLE_ENDIAN_SIZE + 1
|
|
466
|
+
|
|
467
|
+
if value.is_a?(TrueClass) || value.is_a?(FalseClass)
|
|
468
|
+
size += 1
|
|
469
|
+
elsif value.is_a?(Fixnum)
|
|
470
|
+
size += LITTLE_ENDIAN_64_SIZE
|
|
471
|
+
elsif value.is_a?(String)
|
|
472
|
+
size += LITTLE_ENDIAN_SIZE
|
|
473
|
+
size += value.length
|
|
474
|
+
elsif value.is_a?(Float)
|
|
475
|
+
size += LITTLE_ENDIAN_64_SIZE
|
|
476
|
+
else
|
|
477
|
+
raise TableStoreClientError("Unsupported column type: " + value.class)
|
|
478
|
+
end
|
|
479
|
+
size
|
|
480
|
+
end
|
|
481
|
+
end
|