tablestore-ruby-sdk 0.0.5 → 0.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 393f04a53a6fc7a96b9dd44076b620f52b59508b
|
4
|
+
data.tar.gz: f9a9535ba93b0ad7b0e2188d379d050ea156ae66
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 3e20aebbf6e247c5165bf23d633afa2f4b6328db19504215ec039442c4531edb663ae4d6aa9f9b25e3b6f3b7d51371979d266ebecb26d45a8f3eaf63b835d093
|
7
|
+
data.tar.gz: 56181e730229258de469909715ab42570ce972fb5d992a79614a192af30bdfbed9eee2f103f692cd10382b9b9d6c89fd560a7eac59369d15ec5ab93d640eb534
|
data/lib/tablestore/metadata.rb
CHANGED
@@ -40,6 +40,39 @@ ALL = [
|
|
40
40
|
]
|
41
41
|
|
42
42
|
module Metadata
|
43
|
+
class TableMeta
|
44
|
+
attr_accessor :table_name, :schema_of_primary_key
|
45
|
+
def initialize(table_name, schema_of_primary_key)
|
46
|
+
# schema_of_primary_key: [('PK0', 'STRING'), ('PK1', 'INTEGER'), ...]
|
47
|
+
self.table_name = table_name
|
48
|
+
self.schema_of_primary_key = schema_of_primary_key
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
class TableOptions
|
53
|
+
attr_accessor :time_to_live, :max_version, :max_time_deviation
|
54
|
+
def initialize(time_to_live = -1, max_version = 1, max_time_deviation = 86400)
|
55
|
+
self.time_to_live = time_to_live
|
56
|
+
self.max_version = max_version
|
57
|
+
self.max_time_deviation = max_time_deviation
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
class CapacityUnit
|
62
|
+
attr_accessor :read, :write
|
63
|
+
def initialize(read=0, write=0)
|
64
|
+
self.read = read
|
65
|
+
self.write = write
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
class ReservedThroughput
|
70
|
+
attr_accessor :capacity_unit
|
71
|
+
def initialize(capacity_unit)
|
72
|
+
self.capacity_unit = capacity_unit
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
43
76
|
class RowExistenceExpectation
|
44
77
|
IGNORE = "IGNORE"
|
45
78
|
EXPECT_EXIST = "EXPECT_EXIST"
|
@@ -104,70 +137,38 @@ module Metadata
|
|
104
137
|
end
|
105
138
|
|
106
139
|
class Condition
|
140
|
+
attr_accessor :row_existence_expectation, :column_condition
|
141
|
+
|
107
142
|
def initialize(row_existence_expectation, column_condition = nil)
|
108
|
-
|
109
|
-
|
143
|
+
self.row_existence_expectation = nil
|
144
|
+
self.column_condition = column_condition
|
110
145
|
|
111
146
|
set_row_existence_expectation(row_existence_expectation)
|
112
|
-
|
113
|
-
set_column_condition(column_condition) if @column_condition
|
114
147
|
end
|
115
148
|
|
116
149
|
def set_row_existence_expectation(row_existence_expectation)
|
117
150
|
raise TableStoreClientError.new("Expect input row_existence_expectation should be one of #{RowExistenceExpectation::MEMBERS.to_s}, but #{row_existence_expectation}") unless RowExistenceExpectation::VALUES.include? row_existence_expectation
|
118
|
-
|
151
|
+
self.row_existence_expectation = row_existence_expectation
|
119
152
|
end
|
120
153
|
|
121
|
-
def set_column_condition(column_condition)
|
122
|
-
@column_condition = column_condition
|
123
|
-
end
|
124
|
-
|
125
|
-
def get_row_existence_expectation
|
126
|
-
@row_existence_expectation
|
127
|
-
end
|
128
|
-
|
129
|
-
def get_column_condition
|
130
|
-
@column_condition
|
131
|
-
end
|
132
154
|
end
|
133
155
|
|
134
156
|
class Row
|
157
|
+
attr_accessor :primary_key, :attribute_columns
|
135
158
|
def initialize(primary_key, attribute_columns=nil)
|
136
|
-
|
137
|
-
|
159
|
+
self.primary_key = primary_key
|
160
|
+
self.attribute_columns = attribute_columns
|
138
161
|
end
|
139
162
|
|
140
|
-
def primary_key
|
141
|
-
@primary_key
|
142
|
-
end
|
143
|
-
|
144
|
-
def attribute_columns
|
145
|
-
@attribute_columns
|
146
|
-
end
|
147
163
|
end
|
148
164
|
|
149
165
|
class RowItem
|
166
|
+
attr_accessor :type, :row, :condition, :return_type
|
150
167
|
def initialize(row_type, row, condition, return_type = nil)
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
end
|
156
|
-
|
157
|
-
def type
|
158
|
-
@type
|
159
|
-
end
|
160
|
-
|
161
|
-
def condition
|
162
|
-
@condition
|
163
|
-
end
|
164
|
-
|
165
|
-
def row
|
166
|
-
@row
|
167
|
-
end
|
168
|
-
|
169
|
-
def return_type
|
170
|
-
@return_type
|
168
|
+
self.type = row_type
|
169
|
+
self.condition = condition
|
170
|
+
self.row = row
|
171
|
+
self.return_type = return_type
|
171
172
|
end
|
172
173
|
end
|
173
174
|
|
@@ -306,23 +307,18 @@ module Metadata
|
|
306
307
|
end
|
307
308
|
|
308
309
|
class TableInBatchWriteRowItem
|
310
|
+
attr_accessor :table_name, :row_items
|
309
311
|
def initialize(table_name, row_items)
|
310
|
-
|
311
|
-
|
312
|
-
end
|
313
|
-
|
314
|
-
def row_items
|
315
|
-
@row_items
|
316
|
-
end
|
317
|
-
|
318
|
-
def table_name
|
319
|
-
@table_name
|
312
|
+
self.table_name = table_name
|
313
|
+
self.row_items = row_items
|
320
314
|
end
|
321
315
|
end
|
322
316
|
|
323
317
|
class BatchWriteRowRequest
|
318
|
+
attr_accessor :items
|
319
|
+
|
324
320
|
def initialize
|
325
|
-
|
321
|
+
self.items = {}
|
326
322
|
end
|
327
323
|
|
328
324
|
def add(table_item)
|
@@ -335,12 +331,9 @@ module Metadata
|
|
335
331
|
raise TableStoreClientError.new("The input table_item should be an instance of TableInBatchWriteRowItem, not #{table_item.class}")
|
336
332
|
end
|
337
333
|
|
338
|
-
|
334
|
+
self.items[table_item.table_name] = table_item
|
339
335
|
end
|
340
336
|
|
341
|
-
def items
|
342
|
-
@items
|
343
|
-
end
|
344
337
|
end
|
345
338
|
|
346
339
|
class TableInBatchGetRowItem
|
@@ -168,11 +168,18 @@ class PlainBufferCodedOutputStream
|
|
168
168
|
cell_check_sum = write_column_value_with_checksum(column_value, cell_check_sum)
|
169
169
|
end
|
170
170
|
end
|
171
|
+
if update_type == "DELETE"
|
172
|
+
write_tag(TAG_CELL_TYPE)
|
173
|
+
@output_stream.write_raw_byte(DELETE_ONE_VERSION)
|
174
|
+
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, DELETE_ONE_VERSION)
|
175
|
+
elsif update_type == "DELETE_ALL"
|
176
|
+
write_tag(TAG_CELL_TYPE)
|
177
|
+
@output_stream.write_raw_byte(DELETE_ALL_VERSION)
|
178
|
+
cell_check_sum = PlainBufferCrc8.crc_int8(cell_check_sum, DELETE_ALL_VERSION)
|
179
|
+
end
|
171
180
|
if timestamp
|
172
181
|
write_tag(TAG_CELL_TIMESTAMP)
|
173
182
|
@output_stream.write_raw_little_endian64(timestamp)
|
174
|
-
end
|
175
|
-
if timestamp
|
176
183
|
cell_check_sum = PlainBufferCrc8.crc_int64(cell_check_sum, timestamp)
|
177
184
|
end
|
178
185
|
write_tag(TAG_CELL_CHECKSUM)
|
@@ -215,7 +222,7 @@ class PlainBufferCodedOutputStream
|
|
215
222
|
elsif column.length == 3
|
216
223
|
row_check_sum = write_update_column(update_type, column[0], [column[1], column[2]], row_check_sum)
|
217
224
|
else
|
218
|
-
|
225
|
+
raise TableStoreClientError.new("Unsupported column format: #{column.to_s}")
|
219
226
|
end
|
220
227
|
end
|
221
228
|
end
|
@@ -74,7 +74,9 @@ class PlainBufferInputStream
|
|
74
74
|
raise TableStoreClientError.new("Read bytes encountered EOF.") if @buffer.length - @cur_pos < size
|
75
75
|
utf_str = @buffer[@cur_pos, size]
|
76
76
|
@cur_pos += size
|
77
|
-
|
77
|
+
if utf_str.is_a?(String)
|
78
|
+
utf_str = utf_str.force_encoding('UTF-8')
|
79
|
+
end
|
78
80
|
utf_str
|
79
81
|
end
|
80
82
|
|
@@ -8,6 +8,52 @@ require 'tablestore/plain_buffer_coded_input_stream'
|
|
8
8
|
require 'tablestore/plain_buffer_input_stream'
|
9
9
|
|
10
10
|
class TableStoreClient
|
11
|
+
|
12
|
+
##Encode
|
13
|
+
def encode_create_table(table_meta, table_options, reserved_throughput)
|
14
|
+
proto = CreateTableRequest.new
|
15
|
+
|
16
|
+
meta_proto = TableMeta.new
|
17
|
+
meta_proto.table_name = table_meta.table_name
|
18
|
+
meta_proto.table_name = table_meta.table_name
|
19
|
+
table_meta.schema_of_primary_key.each do |primary_key|
|
20
|
+
meta_proto.primary_key << make_schemas_with_list(primary_key)
|
21
|
+
end
|
22
|
+
proto.table_meta = meta_proto
|
23
|
+
|
24
|
+
proto.table_options = make_table_options(table_options)
|
25
|
+
|
26
|
+
rt_proto = ReservedThroughput.new
|
27
|
+
rt_proto.capacity_unit = make_capacity_unit(reserved_throughput.capacity_unit)
|
28
|
+
proto.reserved_throughput = rt_proto
|
29
|
+
proto.serialize_to_string
|
30
|
+
end
|
31
|
+
|
32
|
+
def encode_list_table
|
33
|
+
proto = ListTableRequest.new
|
34
|
+
proto.serialize_to_string
|
35
|
+
end
|
36
|
+
|
37
|
+
def encode_update_table(table_name, table_options, reserved_throughput)
|
38
|
+
proto = UpdateTableRequest.new
|
39
|
+
proto.table_name = table_name
|
40
|
+
if reserved_throughput
|
41
|
+
rt_proto = ReservedThroughput.new
|
42
|
+
rt_proto.capacity_unit = make_capacity_unit(reserved_throughput.capacity_unit)
|
43
|
+
proto.reserved_throughput = rt_proto
|
44
|
+
end
|
45
|
+
if table_options
|
46
|
+
proto.table_options = make_table_options(table_options)
|
47
|
+
end
|
48
|
+
proto.serialize_to_string
|
49
|
+
end
|
50
|
+
|
51
|
+
def encode_delete_table(table_name)
|
52
|
+
proto = DeleteTableRequest.new
|
53
|
+
proto.table_name = table_name
|
54
|
+
proto.serialize_to_string
|
55
|
+
end
|
56
|
+
|
11
57
|
def encode_get_range_request(request)
|
12
58
|
proto = GetRangeRequest.new
|
13
59
|
proto.table_name = request[:table_name]
|
@@ -22,31 +68,6 @@ class TableStoreClient
|
|
22
68
|
proto.serialize_to_string
|
23
69
|
end
|
24
70
|
|
25
|
-
def decode_get_range_request(api_name, headers, body)
|
26
|
-
proto = GetRangeResponse.new
|
27
|
-
proto.parse_from_string(body)
|
28
|
-
#capacity_unit = parse_capacity_unit(proto.consumed.capacity_unit)
|
29
|
-
|
30
|
-
next_start_pk = nil
|
31
|
-
row_list = []
|
32
|
-
|
33
|
-
# if proto.next_start_primary_key.length != 0
|
34
|
-
# inputStream = PlainBufferInputStream.new(proto.next_start_primary_key)
|
35
|
-
# codedInputStream = PlainBufferCodedInputStream.new(inputStream)
|
36
|
-
# next_start_pk, att = codedInputStream.read_row
|
37
|
-
# end
|
38
|
-
if proto.rows.length != 0
|
39
|
-
inputStream = PlainBufferInputStream.new(proto.rows)
|
40
|
-
codedInputStream = PlainBufferCodedInputStream.new(inputStream)
|
41
|
-
row_list = codedInputStream.read_rows
|
42
|
-
end
|
43
|
-
|
44
|
-
#next_token = proto.next_token
|
45
|
-
|
46
|
-
row_list
|
47
|
-
#return capacity_unit, next_start_pk, row_list, next_token
|
48
|
-
end
|
49
|
-
|
50
71
|
def encode_put_row(table_name, row, condition)
|
51
72
|
proto = PutRowRequest.new
|
52
73
|
proto.table_name = table_name
|
@@ -71,14 +92,57 @@ class TableStoreClient
|
|
71
92
|
|
72
93
|
def encode_update_row(teble_name, row, condition)
|
73
94
|
proto = UpdateRowRequest.new
|
74
|
-
proto.table_name =
|
95
|
+
proto.table_name = teble_name
|
75
96
|
condition = Condition.new(RowExistenceExpectation::IGNORE) if condition.nil?
|
76
|
-
|
97
|
+
condition_proto = Condition.new
|
98
|
+
proto.condition = make_condition(condition_proto, condition)
|
77
99
|
if return_type == ReturnType::RT_PK
|
78
|
-
|
100
|
+
return_content = ReturnContent.new
|
101
|
+
return_content.return_type = RT_PK
|
102
|
+
proto.return_content = return_content
|
79
103
|
end
|
80
104
|
proto.row_change = serialize_for_update_row(row.primary_key, row.attribute_columns)
|
81
|
-
proto
|
105
|
+
proto.serialize_to_string
|
106
|
+
end
|
107
|
+
|
108
|
+
def encode_delete_row(table_name, row, condition)
|
109
|
+
proto = DeleteRowRequest.new
|
110
|
+
proto.table_name = table_name
|
111
|
+
condition = Condition.new(RowExistenceExpectation::IGNORE) if condition.nil?
|
112
|
+
condition_proto = Condition.new
|
113
|
+
proto.condition = make_condition(condition_proto, condition)
|
114
|
+
proto.primary_key = serialize_for_delete_row(row.primary_key)
|
115
|
+
proto.serialize_to_string
|
116
|
+
end
|
117
|
+
|
118
|
+
##Decode
|
119
|
+
def decode_list_table(body)
|
120
|
+
proto = ListTableResponse.new
|
121
|
+
proto.parse_from_string(body)
|
122
|
+
names = proto.table_names
|
123
|
+
names
|
124
|
+
end
|
125
|
+
|
126
|
+
def decode_get_range_request(body)
|
127
|
+
proto = GetRangeResponse.new
|
128
|
+
proto.parse_from_string(body)
|
129
|
+
|
130
|
+
next_start_pk = nil
|
131
|
+
row_list = []
|
132
|
+
|
133
|
+
if proto.next_start_primary_key.length != 0
|
134
|
+
inputStream = PlainBufferInputStream.new(proto.next_start_primary_key)
|
135
|
+
codedInputStream = PlainBufferCodedInputStream.new(inputStream)
|
136
|
+
next_start_pk, att = codedInputStream.read_row
|
137
|
+
end
|
138
|
+
|
139
|
+
if proto.rows.length != 0
|
140
|
+
inputStream = PlainBufferInputStream.new(proto.rows)
|
141
|
+
codedInputStream = PlainBufferCodedInputStream.new(inputStream)
|
142
|
+
row_list = codedInputStream.read_rows
|
143
|
+
end
|
144
|
+
|
145
|
+
return next_start_pk, row_list
|
82
146
|
end
|
83
147
|
|
84
148
|
def decode_get_row(body)
|
@@ -106,6 +170,17 @@ class TableStoreClient
|
|
106
170
|
return_row
|
107
171
|
end
|
108
172
|
|
173
|
+
def decode_batch_get_row(body)
|
174
|
+
proto = BatchGetRowResponse.new
|
175
|
+
proto.parse_from_string(body)
|
176
|
+
rows = []
|
177
|
+
proto.tables.each do |table_item|
|
178
|
+
rows << parse_get_row_item(table_item.rows)
|
179
|
+
end
|
180
|
+
rows
|
181
|
+
end
|
182
|
+
|
183
|
+
##Make
|
109
184
|
def make_batch_get_row(request)
|
110
185
|
proto = BatchGetRowRequest.new
|
111
186
|
request.items.each do |item|
|
@@ -122,11 +197,6 @@ class TableStoreClient
|
|
122
197
|
table_item.primary_key << serialize_primary_key(pk)
|
123
198
|
end
|
124
199
|
|
125
|
-
if table_value.token
|
126
|
-
table_value.token.each do |tk|
|
127
|
-
table_item.token << tk
|
128
|
-
end
|
129
|
-
end
|
130
200
|
if table_value.max_version
|
131
201
|
table_item.max_versions = table_value.max_version
|
132
202
|
end
|
@@ -149,16 +219,6 @@ class TableStoreClient
|
|
149
219
|
proto.serialize_to_string
|
150
220
|
end
|
151
221
|
|
152
|
-
def decode_batch_get_row(body)
|
153
|
-
proto = BatchGetRowResponse.new
|
154
|
-
proto.parse_from_string(body)
|
155
|
-
rows = []
|
156
|
-
proto.tables.each do |table_item|
|
157
|
-
rows << parse_get_row_item(table_item.rows)
|
158
|
-
end
|
159
|
-
rows
|
160
|
-
end
|
161
|
-
|
162
222
|
def make_batch_write_row(request)
|
163
223
|
proto = BatchWriteRowRequest.new
|
164
224
|
request.items.each do |item|
|
@@ -175,6 +235,10 @@ class TableStoreClient
|
|
175
235
|
row = RowInBatchWriteRowRequest.new
|
176
236
|
table_item.rows << make_update_row_item(row, row_item)
|
177
237
|
end
|
238
|
+
if row_item.type == Metadata::BatchWriteRowType::DELETE
|
239
|
+
row = RowInBatchWriteRowRequest.new
|
240
|
+
table_item.rows << make_delete_row_item(row, row_item)
|
241
|
+
end
|
178
242
|
end
|
179
243
|
proto.tables << table_item
|
180
244
|
end
|
@@ -184,12 +248,14 @@ class TableStoreClient
|
|
184
248
|
def make_put_row_item(proto, put_row_item)
|
185
249
|
condition = put_row_item.condition
|
186
250
|
if condition.nil?
|
187
|
-
condition =
|
251
|
+
condition = Condition.new(Metadata::RowExistenceExpectation::IGNORE)
|
188
252
|
end
|
189
253
|
condition_proto = Condition.new
|
190
254
|
proto.condition = make_condition(condition_proto, condition)
|
191
255
|
if put_row_item.return_type == ReturnType::RT_PK
|
192
|
-
|
256
|
+
return_content = ReturnContent.new
|
257
|
+
return_content.return_type = RT_PK
|
258
|
+
proto.return_content = return_content
|
193
259
|
end
|
194
260
|
|
195
261
|
proto.row_change = serialize_for_put_row(put_row_item.row.primary_key, put_row_item.row.attribute_columns)
|
@@ -200,20 +266,42 @@ class TableStoreClient
|
|
200
266
|
def make_update_row_item(proto, update_row_item)
|
201
267
|
condition = update_row_item.condition
|
202
268
|
if condition.nil?
|
203
|
-
condition = Condition.new(RowExistenceExpectation::IGNORE
|
269
|
+
condition = Condition.new(RowExistenceExpectation::IGNORE)
|
204
270
|
end
|
205
271
|
condition_proto = Condition.new
|
206
272
|
proto.condition = make_condition(condition_proto, condition)
|
207
273
|
|
208
274
|
if update_row_item.return_type == ReturnType::RT_PK
|
209
|
-
|
275
|
+
return_content = ReturnContent.new
|
276
|
+
return_content.return_type = RT_PK
|
277
|
+
proto.return_content = return_content
|
210
278
|
end
|
211
|
-
|
279
|
+
update_row_item.row.attribute_columns
|
212
280
|
proto.row_change = serialize_for_update_row(update_row_item.row.primary_key, update_row_item.row.attribute_columns)
|
213
281
|
proto.type = UPDATE
|
214
282
|
proto
|
215
283
|
end
|
216
284
|
|
285
|
+
def make_delete_row_item(proto, delete_row_item)
|
286
|
+
condition = delete_row_item.condition
|
287
|
+
if condition.nil?
|
288
|
+
condition = Metadata::Condition.new(RowExistenceExpectation::IGNORE)
|
289
|
+
end
|
290
|
+
condition_proto = Condition.new
|
291
|
+
proto.condition = make_condition(condition_proto, condition)
|
292
|
+
|
293
|
+
if delete_row_item.return_type == ReturnType::RT_PK
|
294
|
+
return_content = ReturnContent.new
|
295
|
+
return_content.return_type = RT_PK
|
296
|
+
proto.return_content = return_content
|
297
|
+
end
|
298
|
+
|
299
|
+
proto.row_change = serialize_for_delete_row(delete_row_item.row.primary_key)
|
300
|
+
proto.type = DELETE
|
301
|
+
proto
|
302
|
+
|
303
|
+
end
|
304
|
+
|
217
305
|
def make_repeated_column_names(proto, columns_to_get)
|
218
306
|
if columns_to_get.nil?
|
219
307
|
return
|
@@ -226,11 +314,11 @@ class TableStoreClient
|
|
226
314
|
|
227
315
|
def make_condition(proto, condition)
|
228
316
|
raise TableStoreClientError.new("condition should be an instance of Condition, not #{condition.class}") unless condition.is_a?(Metadata::Condition)
|
229
|
-
expectation_str = condition.
|
317
|
+
expectation_str = condition.row_existence_expectation
|
230
318
|
proto.row_existence = expectation_str
|
231
319
|
raise TableStoreClientError.new("row_existence_expectation should be one of [#{join(', ')}], not #{expectation_str}") if proto.row_existence.nil?
|
232
320
|
|
233
|
-
if condition.
|
321
|
+
if condition.column_condition
|
234
322
|
proto.column_condition = make_column_condition(condition.column_condition).serialize_to_string
|
235
323
|
end
|
236
324
|
proto
|
@@ -272,9 +360,52 @@ class TableStoreClient
|
|
272
360
|
proto.filter_if_missing = !condition.pass_if_missing
|
273
361
|
proto.latest_version_only = condition.latest_version_only
|
274
362
|
proto.serialize_to_string
|
363
|
+
end
|
275
364
|
|
365
|
+
def make_schemas_with_list(schema)
|
366
|
+
schema_proto = PrimaryKeySchema.new
|
367
|
+
schema_proto.name = schema[0]
|
368
|
+
schema_proto.type = schema[1]
|
369
|
+
if schema.size == 3
|
370
|
+
schema_proto.option = 1
|
371
|
+
end
|
372
|
+
schema_proto
|
373
|
+
end
|
374
|
+
|
375
|
+
def make_table_options(options)
|
376
|
+
option_proto = TableOptions.new
|
377
|
+
unless options.is_a?(Metadata::TableOptions)
|
378
|
+
raise TableStoreClientError.new("table_option should be an instance of Meta::TableOptions, not #{options.class}" )
|
379
|
+
end
|
380
|
+
if options.time_to_live
|
381
|
+
unless options.time_to_live.is_a?(Fixnum)
|
382
|
+
raise TableStoreClientError("time_to_live should be an instance of int, not #{options.time_to_live.class}")
|
383
|
+
end
|
384
|
+
option_proto.time_to_live = options.time_to_live
|
385
|
+
end
|
386
|
+
if options.max_version
|
387
|
+
unless options.max_version.is_a?(Fixnum)
|
388
|
+
raise TableStoreClientError("max_version should be an instance of int, not #{options.max_version.class}")
|
389
|
+
end
|
390
|
+
option_proto.max_versions = options.max_version
|
391
|
+
end
|
392
|
+
if options.max_time_deviation
|
393
|
+
unless options.max_time_deviation.is_a?(Fixnum)
|
394
|
+
raise TableStoreClientError("max_time_deviation should be an instance of int, not #{options.max_version.class}")
|
395
|
+
end
|
396
|
+
option_proto.deviation_cell_version_in_sec = options.max_time_deviation
|
397
|
+
end
|
398
|
+
option_proto
|
399
|
+
end
|
400
|
+
|
401
|
+
def make_capacity_unit(capacity_unit)
|
402
|
+
proto = CapacityUnit.new
|
403
|
+
proto.read = capacity_unit.read if capacity_unit.read
|
404
|
+
proto.write = capacity_unit.write if capacity_unit.write
|
405
|
+
proto
|
276
406
|
end
|
277
407
|
|
408
|
+
##Parse
|
278
409
|
def parse_get_row_item(proto)
|
279
410
|
row_list = []
|
280
411
|
proto.each do |row_item|
|
@@ -374,7 +505,7 @@ class TableStoreClient
|
|
374
505
|
raise TableStoreClientError.new("the columns value of update-row must be hash, but is #{attribute_columns[key].class}")
|
375
506
|
end
|
376
507
|
attribute_columns[key].each do |cell|
|
377
|
-
if cell.is_a?(Array)
|
508
|
+
if key.upcase != "DELETE" and key.upcase != "DELETE_ALL" && !cell.is_a?(Array)
|
378
509
|
raise TableStoreClientError.new("the cell of update-row must be array, but is #{cell.class}")
|
379
510
|
end
|
380
511
|
end
|
@@ -455,6 +586,21 @@ class TableStoreClient
|
|
455
586
|
output_stream.get_buffer.join('')
|
456
587
|
end
|
457
588
|
|
589
|
+
def serialize_for_delete_row(primary_key)
|
590
|
+
buf_size = compute_delete_row_size(primary_key)
|
591
|
+
output_stream = PlainBufferOutputStream.new(buf_size)
|
592
|
+
coded_output_stream = PlainBufferCodedOutputStream.new(output_stream)
|
593
|
+
|
594
|
+
row_checksum = 0
|
595
|
+
coded_output_stream.write_header
|
596
|
+
row_checksum = coded_output_stream.write_primary_key(primary_key, row_checksum)
|
597
|
+
row_checksum = coded_output_stream.write_delete_marker(row_checksum)
|
598
|
+
coded_output_stream.write_row_checksum(row_checksum)
|
599
|
+
|
600
|
+
output_stream.get_buffer.join('')
|
601
|
+
end
|
602
|
+
|
603
|
+
##Compute
|
458
604
|
def compute_put_row_size(primary_key, attribute_columns)
|
459
605
|
size = LITTLE_ENDIAN_SIZE
|
460
606
|
size += compute_primary_key_size(primary_key)
|
@@ -534,7 +680,17 @@ class TableStoreClient
|
|
534
680
|
end
|
535
681
|
|
536
682
|
def compute_column_size2(column_name, column_value, update_type)
|
537
|
-
compute_column_size(column_name, column_value)
|
683
|
+
size = compute_column_size(column_name, column_value)
|
684
|
+
if update_type == "DELETE" || update_type == "DELETE_ALL"
|
685
|
+
size += 2
|
686
|
+
end
|
687
|
+
size
|
538
688
|
end
|
539
689
|
|
690
|
+
def compute_delete_row_size(primary_key)
|
691
|
+
size = LITTLE_ENDIAN_SIZE
|
692
|
+
size += compute_primary_key_size(primary_key)
|
693
|
+
size += 3
|
694
|
+
size
|
695
|
+
end
|
540
696
|
end
|
data/lib/tablestore-ruby-sdk.rb
CHANGED
@@ -39,11 +39,47 @@ class TableStore
|
|
39
39
|
# client = TableStoreClient('your_instance_endpoint', 'your_user_id', 'your_user_key', 'your_instance_name')
|
40
40
|
end
|
41
41
|
|
42
|
+
##tables
|
43
|
+
def _create_table(table_meta, table_option, reserved_throughput)
|
44
|
+
api_name = 'CreateTable'
|
45
|
+
body = TableStoreClient.new.encode_create_table(table_meta, table_option, reserved_throughput)
|
46
|
+
response = post_request(body, api_name)
|
47
|
+
if response.code == 200
|
48
|
+
"create table #{table_meta.table_name} succeed!"
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
def _list_table
|
53
|
+
api_name = "ListTable"
|
54
|
+
body = TableStoreClient.new.encode_list_table
|
55
|
+
response = post_request(body, api_name)
|
56
|
+
TableStoreClient.new.decode_list_table(response.body)
|
57
|
+
end
|
58
|
+
|
59
|
+
def _update_table(table_name, table_option, reserved_throughput=nil)
|
60
|
+
api_name = 'UpdateTable'
|
61
|
+
body = TableStoreClient.new.encode_update_table(table_name, table_option, reserved_throughput)
|
62
|
+
response = post_request(body, api_name)
|
63
|
+
if response.code == 200
|
64
|
+
"update table #{table_name} succeed!"
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
def _delete_table(table_name)
|
69
|
+
api_name = 'DeleteTable'
|
70
|
+
body = TableStoreClient.new.encode_delete_table(table_name)
|
71
|
+
response = post_request(body, api_name)
|
72
|
+
if response.code == 200
|
73
|
+
"delete table #{table_name} succeed!"
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
##rows
|
42
78
|
def _get_range(request)
|
43
79
|
api_name = 'GetRange'
|
44
80
|
body = TableStoreClient.new.encode_get_range_request(request)
|
45
81
|
response = post_request(body, api_name)
|
46
|
-
TableStoreClient.new.decode_get_range_request(
|
82
|
+
TableStoreClient.new.decode_get_range_request(response.body)
|
47
83
|
end
|
48
84
|
|
49
85
|
def _put_row(table_name, row, condition)
|
@@ -71,6 +107,15 @@ class TableStore
|
|
71
107
|
end
|
72
108
|
end
|
73
109
|
|
110
|
+
def _delete_row(table_name, row, condition)
|
111
|
+
api_name = 'DeleteRow'
|
112
|
+
body = TableStoreClient.new.encode_delete_row(table_name, row, condition)
|
113
|
+
response = post_request(body, api_name)
|
114
|
+
if response.code == 200
|
115
|
+
'delete succeed!'
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
74
119
|
def _batch_get_row(request)
|
75
120
|
api_name = 'BatchGetRow'
|
76
121
|
body = TableStoreClient.new.make_batch_get_row(request)
|
@@ -112,7 +157,7 @@ class TableStore
|
|
112
157
|
signature_string += headers_string + "\n"
|
113
158
|
salt1 = OpenSSL::HMAC.digest('sha1', access_key_secret, signature_string)
|
114
159
|
signature = Base64.encode64(salt1).gsub(/\n/, '')
|
115
|
-
headers.merge!({'User-Agent': 'aliyun-tablestore-sdk-ruby', 'x-ots-signature': signature})
|
160
|
+
headers.merge!({'User-Agent': 'aliyun-tablestore-sdk-ruby', 'x-ots-signature': signature, "Content-Type": 'application/x-www-form-urlencoded',})
|
116
161
|
headers
|
117
162
|
end
|
118
163
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: tablestore-ruby-sdk
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.6
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- seveninches
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-01-
|
11
|
+
date: 2018-01-18 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rest-client
|