riak-client 1.0.2 → 1.0.3
Sign up to get free protection for your applications and to get access to all the features.
- data/RELEASE_NOTES.md +32 -3
- data/erl_src/riak_kv_test_backend.beam +0 -0
- data/erl_src/riak_kv_test_backend.erl +255 -120
- data/erl_src/riak_search_test_backend.beam +0 -0
- data/erl_src/riak_search_test_backend.erl +3 -3
- data/lib/riak/client/excon_backend.rb +40 -7
- data/lib/riak/client/net_http_backend.rb +2 -3
- data/lib/riak/node/configuration.rb +6 -0
- data/lib/riak/node/console.rb +0 -7
- data/lib/riak/robject.rb +12 -10
- data/lib/riak/test_server.rb +7 -1
- data/lib/riak/version.rb +1 -1
- data/riak-client.gemspec +1 -1
- data/spec/integration/riak/http_backends_spec.rb +51 -0
- data/spec/integration/riak/node_spec.rb +16 -0
- data/spec/riak/excon_backend_spec.rb +18 -13
- data/spec/riak/node_spec.rb +1 -1
- data/spec/riak/robject_spec.rb +8 -0
- data/spec/support/unified_backend_examples.rb +28 -0
- data/spec/support/version_filter.rb +6 -3
- metadata +664 -5
data/RELEASE_NOTES.md
CHANGED
@@ -1,5 +1,34 @@
|
|
1
1
|
# Riak Ruby Client Release Notes
|
2
2
|
|
3
|
+
## 1.0.3 Patch/Bugfix Release - 2012-04-17
|
4
|
+
|
5
|
+
Release 1.0.3 fixes some bugs and adds support for secondary indexes
|
6
|
+
when using `Riak::TestServer`.
|
7
|
+
|
8
|
+
* Added tests for secondary index features to the unified backend
|
9
|
+
examples.
|
10
|
+
* Added secondary index support to `riak_kv_test_backend`. Full
|
11
|
+
support for this feature will be available via
|
12
|
+
`riak_kv_memory_backend` in the next major Riak release. See
|
13
|
+
[riak_kv #314](https://github.com/basho/riak_kv/pull/314).
|
14
|
+
* The console log (`lager_console_backend`) is now enabled on
|
15
|
+
generated nodes.
|
16
|
+
* `Riak::Node::Console` no longer overrides the `SIGWINCH` signal
|
17
|
+
handler.
|
18
|
+
* [Excon](http://rubygems.org/gems/excon) versions >= 0.7.0 are now
|
19
|
+
supported.
|
20
|
+
* IO-style objects will now be emitted properly when using the
|
21
|
+
`NetHTTPBackend`. [#1](https://github.com/basho/riak-ruby-client/issues/1)
|
22
|
+
* The Riak version filter for integration specs is now more correct.
|
23
|
+
* `Riak::RObject#url` has been removed because its accuracy cannot be
|
24
|
+
maintained when connected to multiple Riak nodes or to Riak via
|
25
|
+
PBC. [#3](https://github.com/basho/riak-ruby-client/issues/3)
|
26
|
+
* Index entries on `Riak::RObject` can be mass-overwritten using
|
27
|
+
`Riak::RObject#indexes=` while maintaining the proper internal
|
28
|
+
semantics. [#17](https://github.com/basho/riak-ruby-client/issues/17)
|
29
|
+
* Nodes should now generate properly when the `riak` script is a
|
30
|
+
symlink (e.g. Homebrew). [#26](https://github.com/basho/riak-ruby-client/issues/26)
|
31
|
+
|
3
32
|
## 1.0.2 Repackaging - 2012-04-02
|
4
33
|
|
5
34
|
Release 1.0.2 relaxes the multi_json dependency so that the client
|
@@ -97,16 +126,16 @@ The new gem and repository locations are below:
|
|
97
126
|
* [`ripple`](http://rubygems.org/gems/ripple) —
|
98
127
|
[seancribbs/ripple](https://github.com/seancribbs/ripple)
|
99
128
|
* [`riak-sessions`](http://rubygems.org/gems/riak-sessions) —
|
100
|
-
[seancribbs/riak-sessions](https://github.com/seancribbs/riak-sessions)
|
129
|
+
[seancribbs/riak-sessions](https://github.com/seancribbs/riak-sessions)
|
101
130
|
* [`riak-cache`](http://rubygems.org/gems/riak-cache) —
|
102
|
-
[seancribbs/riak-cache](https://github.com/seancribbs/riak-cache)
|
131
|
+
[seancribbs/riak-cache](https://github.com/seancribbs/riak-cache)
|
103
132
|
|
104
133
|
### Significant Known Issues
|
105
134
|
|
106
135
|
Attempting to use the Protocol Buffers transport with a 0.14.x cluster
|
107
136
|
may cause the connection to dump because of incompatibilities in
|
108
137
|
certain protocol messages. This will be addressed in a future
|
109
|
-
patch/bugfix release.
|
138
|
+
patch/bugfix release.
|
110
139
|
|
111
140
|
The new node generation and test server intermittently fails on JRuby,
|
112
141
|
specifically from deadlocks related to blocking opens for the console
|
Binary file
|
@@ -1,6 +1,6 @@
|
|
1
1
|
%% -------------------------------------------------------------------
|
2
2
|
%%
|
3
|
-
%%
|
3
|
+
%% riak_kv_test_backend: storage engine using ETS tables, for use in testing.
|
4
4
|
%%
|
5
5
|
%% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved.
|
6
6
|
%%
|
@@ -32,6 +32,7 @@
|
|
32
32
|
%% <ul>
|
33
33
|
%% <li>`ttl' - The time in seconds that an object should live before being expired.</li>
|
34
34
|
%% <li>`max_memory' - The amount of memory in megabytes to limit the backend to.</li>
|
35
|
+
%% <li>`test' - When `true', exposes the internal ETS tables so that they can be efficiently cleared using {@link reset/3}.</li>
|
35
36
|
%% </ul>
|
36
37
|
%%
|
37
38
|
|
@@ -40,6 +41,8 @@
|
|
40
41
|
|
41
42
|
%% KV Backend API
|
42
43
|
-export([api_version/0,
|
44
|
+
capabilities/1,
|
45
|
+
capabilities/2,
|
43
46
|
start/2,
|
44
47
|
stop/1,
|
45
48
|
get/3,
|
@@ -51,20 +54,30 @@
|
|
51
54
|
fold_objects/4,
|
52
55
|
is_empty/1,
|
53
56
|
status/1,
|
54
|
-
callback/3
|
55
|
-
|
56
|
-
|
57
|
-
|
57
|
+
callback/3]).
|
58
|
+
|
59
|
+
%% "Testing" backend API
|
60
|
+
-export([reset/0]).
|
58
61
|
|
59
62
|
-ifdef(TEST).
|
60
63
|
-include_lib("eunit/include/eunit.hrl").
|
61
64
|
-endif.
|
62
65
|
|
63
66
|
-define(API_VERSION, 1).
|
64
|
-
-define(CAPABILITIES, [async_fold]).
|
67
|
+
-define(CAPABILITIES, [async_fold, indexes]).
|
68
|
+
|
69
|
+
%% Macros for working with indexes
|
70
|
+
-define(DELETE_PTN(B,K), {{B,'_','_',K},'_'}).
|
65
71
|
|
66
|
-
|
67
|
-
|
72
|
+
%% ETS table name macros so we can break encapsulation for testing
|
73
|
+
%% mode
|
74
|
+
-define(DNAME(P), list_to_atom("riak_kv_"++integer_to_list(P))).
|
75
|
+
-define(INAME(P), list_to_atom("riak_kv_"++integer_to_list(P)++"_i")).
|
76
|
+
-define(TNAME(P), list_to_atom("riak_kv_"++integer_to_list(P)++"_t")).
|
77
|
+
|
78
|
+
-record(state, {data_ref :: ets:tid(),
|
79
|
+
index_ref :: ets:tid(),
|
80
|
+
time_ref :: ets:tid(),
|
68
81
|
max_memory :: undefined | integer(),
|
69
82
|
used_memory=0 :: integer(),
|
70
83
|
ttl :: integer()}).
|
@@ -76,20 +89,11 @@
|
|
76
89
|
%% Public API
|
77
90
|
%% ===================================================================
|
78
91
|
|
79
|
-
%% TestServer reset
|
80
|
-
|
81
|
-
-spec reset() -> ok | {error, timeout}.
|
82
|
-
reset() ->
|
83
|
-
{ok, Ring} = riak_core_ring_manager:get_my_ring(),
|
84
|
-
[ catch ets:delete_all_objects(list_to_atom("kv" ++ integer_to_list(P))) ||
|
85
|
-
P <- riak_core_ring:my_indices(Ring) ],
|
86
|
-
ok.
|
87
|
-
|
88
92
|
%% KV Backend API
|
89
93
|
|
90
94
|
%% @doc Return the major version of the
|
91
|
-
%% current API
|
92
|
-
-spec api_version() -> {ok, integer()}
|
95
|
+
%% current API.
|
96
|
+
-spec api_version() -> {ok, integer()}.
|
93
97
|
api_version() ->
|
94
98
|
case lists:member({capabilities, 1}, riak_kv_backend:behaviour_info(callbacks)) of
|
95
99
|
true -> % Using 1.1 API or later
|
@@ -110,23 +114,33 @@ capabilities(_, _) ->
|
|
110
114
|
|
111
115
|
%% @doc Start the memory backend
|
112
116
|
-spec start(integer(), config()) -> {ok, state()}.
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
117
|
+
%% Bug in riak_kv_vnode in 1.0
|
118
|
+
start(Partition, [{async_folds,_}=AFolds, Rest]) when is_list(Rest) ->
|
119
|
+
start(Partition, [AFolds|Rest]);
|
120
|
+
start(Partition, Config) ->
|
121
|
+
TTL = app_helper:get_prop_or_env(ttl, Config, memory_backend),
|
122
|
+
MemoryMB = app_helper:get_prop_or_env(max_memory, Config, memory_backend),
|
123
|
+
TableOpts = case app_helper:get_prop_or_env(test, Config, memory_backend) of
|
124
|
+
true ->
|
125
|
+
[ordered_set, public, named_table];
|
126
|
+
_ ->
|
127
|
+
[ordered_set]
|
128
|
+
end,
|
129
|
+
case MemoryMB of
|
130
|
+
undefined ->
|
131
|
+
MaxMemory = undefined,
|
132
|
+
TimeRef = undefined;
|
133
|
+
_ ->
|
134
|
+
MaxMemory = MemoryMB * 1024 * 1024,
|
135
|
+
TimeRef = ets:new(?TNAME(Partition), TableOpts)
|
136
|
+
end,
|
137
|
+
IndexRef = ets:new(?INAME(Partition), TableOpts),
|
138
|
+
DataRef = ets:new(?DNAME(Partition), TableOpts),
|
139
|
+
{ok, #state{data_ref=DataRef,
|
140
|
+
index_ref=IndexRef,
|
141
|
+
max_memory=MaxMemory,
|
142
|
+
time_ref=TimeRef,
|
143
|
+
ttl=TTL}}.
|
130
144
|
|
131
145
|
%% @doc Stop the memory backend
|
132
146
|
-spec stop(state()) -> ok.
|
@@ -148,14 +162,27 @@ stop(#state{data_ref=DataRef,
|
|
148
162
|
{ok, not_found, state()} |
|
149
163
|
{error, term(), state()}.
|
150
164
|
get(Bucket, Key, State=#state{data_ref=DataRef,
|
165
|
+
index_ref=IndexRef,
|
166
|
+
used_memory=UsedMemory,
|
167
|
+
max_memory=MaxMemory,
|
151
168
|
ttl=TTL}) ->
|
152
169
|
case ets:lookup(DataRef, {Bucket, Key}) of
|
153
170
|
[] -> {error, not_found, State};
|
154
|
-
[{{Bucket, Key}, {{ts, Timestamp}, Val}}] ->
|
171
|
+
[{{Bucket, Key}, {{ts, Timestamp}, Val}}=Object] ->
|
155
172
|
case exceeds_ttl(Timestamp, TTL) of
|
156
173
|
true ->
|
157
|
-
|
158
|
-
|
174
|
+
%% Because we do not have the IndexSpecs, we must
|
175
|
+
%% delete the object directly and all index
|
176
|
+
%% entries blindly using match_delete.
|
177
|
+
ets:delete(DataRef, {Bucket, Key}),
|
178
|
+
ets:match_delete(IndexRef, ?DELETE_PTN(Bucket, Key)),
|
179
|
+
case MaxMemory of
|
180
|
+
undefined ->
|
181
|
+
UsedMemory1 = UsedMemory;
|
182
|
+
_ ->
|
183
|
+
UsedMemory1 = UsedMemory - object_size(Object)
|
184
|
+
end,
|
185
|
+
{error, not_found, State#state{used_memory=UsedMemory1}};
|
159
186
|
false ->
|
160
187
|
{ok, Val, State}
|
161
188
|
end;
|
@@ -166,18 +193,15 @@ get(Bucket, Key, State=#state{data_ref=DataRef,
|
|
166
193
|
end.
|
167
194
|
|
168
195
|
%% @doc Insert an object into the memory backend.
|
169
|
-
%% NOTE: The memory backend does not currently
|
170
|
-
%% support secondary indexing and the _IndexSpecs
|
171
|
-
%% parameter is ignored.
|
172
196
|
-type index_spec() :: {add, Index, SecondaryKey} | {remove, Index, SecondaryKey}.
|
173
197
|
-spec put(riak_object:bucket(), riak_object:key(), [index_spec()], binary(), state()) ->
|
174
|
-
{ok, state()}
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
198
|
+
{ok, state()}.
|
199
|
+
put(Bucket, PrimaryKey, IndexSpecs, Val, State=#state{data_ref=DataRef,
|
200
|
+
index_ref=IndexRef,
|
201
|
+
max_memory=MaxMemory,
|
202
|
+
time_ref=TimeRef,
|
203
|
+
ttl=TTL,
|
204
|
+
used_memory=UsedMemory}) ->
|
181
205
|
Now = now(),
|
182
206
|
case TTL of
|
183
207
|
undefined ->
|
@@ -185,36 +209,29 @@ put(Bucket, PrimaryKey, _IndexSpecs, Val, State=#state{data_ref=DataRef,
|
|
185
209
|
_ ->
|
186
210
|
Val1 = {{ts, Now}, Val}
|
187
211
|
end,
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
end,
|
204
|
-
{ok, State#state{used_memory=UsedMemory1}};
|
205
|
-
{error, Reason} ->
|
206
|
-
{error, Reason, State}
|
207
|
-
end.
|
212
|
+
{ok, Size} = do_put(Bucket, PrimaryKey, Val1, IndexSpecs, DataRef, IndexRef),
|
213
|
+
case MaxMemory of
|
214
|
+
undefined ->
|
215
|
+
UsedMemory1 = UsedMemory;
|
216
|
+
_ ->
|
217
|
+
time_entry(Bucket, PrimaryKey, Now, TimeRef),
|
218
|
+
Freed = trim_data_table(MaxMemory,
|
219
|
+
UsedMemory + Size,
|
220
|
+
DataRef,
|
221
|
+
TimeRef,
|
222
|
+
IndexRef,
|
223
|
+
0),
|
224
|
+
UsedMemory1 = UsedMemory + Size - Freed
|
225
|
+
end,
|
226
|
+
{ok, State#state{used_memory=UsedMemory1}}.
|
208
227
|
|
209
228
|
%% @doc Delete an object from the memory backend
|
210
|
-
%% NOTE: The memory backend does not currently
|
211
|
-
%% support secondary indexing and the _IndexSpecs
|
212
|
-
%% parameter is ignored.
|
213
229
|
-spec delete(riak_object:bucket(), riak_object:key(), [index_spec()], state()) ->
|
214
230
|
{ok, state()}.
|
215
|
-
delete(Bucket, Key,
|
216
|
-
|
217
|
-
|
231
|
+
delete(Bucket, Key, IndexSpecs, State=#state{data_ref=DataRef,
|
232
|
+
index_ref=IndexRef,
|
233
|
+
time_ref=TimeRef,
|
234
|
+
used_memory=UsedMemory}) ->
|
218
235
|
case TimeRef of
|
219
236
|
undefined ->
|
220
237
|
UsedMemory1 = UsedMemory;
|
@@ -231,6 +248,7 @@ delete(Bucket, Key, _IndexSpecs, State=#state{data_ref=DataRef,
|
|
231
248
|
UsedMemory1 = UsedMemory
|
232
249
|
end
|
233
250
|
end,
|
251
|
+
update_indexes(Bucket, Key, IndexSpecs, IndexRef),
|
234
252
|
ets:delete(DataRef, {Bucket, Key}),
|
235
253
|
{ok, State#state{used_memory=UsedMemory1}}.
|
236
254
|
|
@@ -259,15 +277,33 @@ fold_buckets(FoldBucketsFun, Acc, Opts, #state{data_ref=DataRef}) ->
|
|
259
277
|
any(),
|
260
278
|
[{atom(), term()}],
|
261
279
|
state()) -> {ok, term()} | {async, fun()}.
|
262
|
-
fold_keys(FoldKeysFun, Acc, Opts, #state{data_ref=DataRef
|
263
|
-
|
264
|
-
|
280
|
+
fold_keys(FoldKeysFun, Acc, Opts, #state{data_ref=DataRef,
|
281
|
+
index_ref=IndexRef}) ->
|
282
|
+
|
283
|
+
%% Figure out how we should limit the fold: by bucket, by
|
284
|
+
%% secondary index, or neither (fold across everything.)
|
285
|
+
Bucket = lists:keyfind(bucket, 1, Opts),
|
286
|
+
Index = lists:keyfind(index, 1, Opts),
|
287
|
+
|
288
|
+
%% Multiple limiters may exist. Take the most specific limiter,
|
289
|
+
%% get an appropriate folder function.
|
290
|
+
Folder = if
|
291
|
+
Index /= false ->
|
292
|
+
FoldFun = fold_keys_fun(FoldKeysFun, Index),
|
293
|
+
get_index_folder(FoldFun, Acc, Index, DataRef, IndexRef);
|
294
|
+
Bucket /= false ->
|
295
|
+
FoldFun = fold_keys_fun(FoldKeysFun, Bucket),
|
296
|
+
get_folder(FoldFun, Acc, DataRef);
|
297
|
+
true ->
|
298
|
+
FoldFun = fold_keys_fun(FoldKeysFun, undefined),
|
299
|
+
get_folder(FoldFun, Acc, DataRef)
|
300
|
+
end,
|
301
|
+
|
265
302
|
case lists:member(async_fold, Opts) of
|
266
303
|
true ->
|
267
|
-
{async,
|
304
|
+
{async, Folder};
|
268
305
|
false ->
|
269
|
-
|
270
|
-
{ok, Acc0}
|
306
|
+
{ok, Folder()}
|
271
307
|
end.
|
272
308
|
|
273
309
|
%% @doc Fold over all the objects for one or all buckets.
|
@@ -289,8 +325,10 @@ fold_objects(FoldObjectsFun, Acc, Opts, #state{data_ref=DataRef}) ->
|
|
289
325
|
%% @doc Delete all objects from this memory backend
|
290
326
|
-spec drop(state()) -> {ok, state()}.
|
291
327
|
drop(State=#state{data_ref=DataRef,
|
328
|
+
index_ref=IndexRef,
|
292
329
|
time_ref=TimeRef}) ->
|
293
330
|
ets:delete_all_objects(DataRef),
|
331
|
+
ets:delete_all_objects(IndexRef),
|
294
332
|
case TimeRef of
|
295
333
|
undefined ->
|
296
334
|
ok;
|
@@ -308,14 +346,18 @@ is_empty(#state{data_ref=DataRef}) ->
|
|
308
346
|
%% @doc Get the status information for this memory backend
|
309
347
|
-spec status(state()) -> [{atom(), term()}].
|
310
348
|
status(#state{data_ref=DataRef,
|
349
|
+
index_ref=IndexRef,
|
311
350
|
time_ref=TimeRef}) ->
|
312
351
|
DataStatus = ets:info(DataRef),
|
352
|
+
IndexStatus = ets:info(IndexRef),
|
313
353
|
case TimeRef of
|
314
354
|
undefined ->
|
315
|
-
[{data_table_status, DataStatus}
|
355
|
+
[{data_table_status, DataStatus},
|
356
|
+
{index_table_status, IndexStatus}];
|
316
357
|
_ ->
|
317
358
|
TimeStatus = ets:info(TimeRef),
|
318
359
|
[{data_table_status, DataStatus},
|
360
|
+
{index_table_status, IndexStatus},
|
319
361
|
{time_table_status, TimeStatus}]
|
320
362
|
end.
|
321
363
|
|
@@ -324,6 +366,24 @@ status(#state{data_ref=DataRef,
|
|
324
366
|
callback(_Ref, _Msg, State) ->
|
325
367
|
{ok, State}.
|
326
368
|
|
369
|
+
%% @doc Resets state of all running memory backends on the local
|
370
|
+
%% node. The `riak_kv' environment variable `memory_backend' must
|
371
|
+
%% contain the `test' property, set to `true' for this to work.
|
372
|
+
-spec reset() -> ok | {error, reset_disabled}.
|
373
|
+
reset() ->
|
374
|
+
reset(app_helper:get_env(memory_backend, test, app_helper:get_env(riak_kv, test)), app_helper:get_env(riak_kv, storage_backend)).
|
375
|
+
|
376
|
+
reset(true, ?MODULE) ->
|
377
|
+
{ok, Ring} = riak_core_ring_manager:get_my_ring(),
|
378
|
+
[ begin
|
379
|
+
catch ets:delete_all_objects(?DNAME(I)),
|
380
|
+
catch ets:delete_all_objects(?INAME(I)),
|
381
|
+
catch ets:delete_all_objects(?TNAME(I))
|
382
|
+
end || I <- riak_core_ring:my_indices(Ring) ],
|
383
|
+
ok;
|
384
|
+
reset(_, _) ->
|
385
|
+
{error, reset_disabled}.
|
386
|
+
|
327
387
|
%% ===================================================================
|
328
388
|
%% Internal functions
|
329
389
|
%% ===================================================================
|
@@ -349,32 +409,43 @@ fold_buckets_fun(FoldBucketsFun) ->
|
|
349
409
|
%% Return a function to fold over keys on this backend
|
350
410
|
fold_keys_fun(FoldKeysFun, undefined) ->
|
351
411
|
fun({{Bucket, Key}, _}, Acc) ->
|
352
|
-
FoldKeysFun(Bucket, Key, Acc)
|
412
|
+
FoldKeysFun(Bucket, Key, Acc);
|
413
|
+
(_, Acc) ->
|
414
|
+
Acc
|
353
415
|
end;
|
354
|
-
fold_keys_fun(FoldKeysFun,
|
355
|
-
fun({{
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
416
|
+
fold_keys_fun(FoldKeysFun, {bucket, FilterBucket}) ->
|
417
|
+
fun({{Bucket, Key}, _}, Acc) when Bucket == FilterBucket ->
|
418
|
+
FoldKeysFun(Bucket, Key, Acc);
|
419
|
+
(_, Acc) ->
|
420
|
+
Acc
|
421
|
+
end;
|
422
|
+
fold_keys_fun(FoldKeysFun, {index, FilterBucket, {eq, <<"$bucket">>, _}}) ->
|
423
|
+
%% 2I exact match query on special $bucket field...
|
424
|
+
fold_keys_fun(FoldKeysFun, {bucket, FilterBucket});
|
425
|
+
fold_keys_fun(FoldKeysFun, {index, FilterBucket, {range, <<"$key">>, _, _}}) ->
|
426
|
+
%% 2I range query on special $key field...
|
427
|
+
fold_keys_fun(FoldKeysFun, {bucket, FilterBucket});
|
428
|
+
fold_keys_fun(FoldKeysFun, {index, _FilterBucket, _Query}) ->
|
429
|
+
fun({{Bucket, _FilterField, _FilterTerm, Key}, _}, Acc) ->
|
430
|
+
FoldKeysFun(Bucket, Key, Acc);
|
431
|
+
(_, Acc) ->
|
432
|
+
Acc
|
362
433
|
end.
|
363
434
|
|
435
|
+
|
364
436
|
%% @private
|
365
437
|
%% Return a function to fold over keys on this backend
|
366
438
|
fold_objects_fun(FoldObjectsFun, undefined) ->
|
367
439
|
fun({{Bucket, Key}, Value}, Acc) ->
|
368
|
-
FoldObjectsFun(Bucket, Key, Value, Acc)
|
440
|
+
FoldObjectsFun(Bucket, Key, Value, Acc);
|
441
|
+
(_, Acc) ->
|
442
|
+
Acc
|
369
443
|
end;
|
370
|
-
fold_objects_fun(FoldObjectsFun,
|
371
|
-
fun({{
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
false ->
|
376
|
-
Acc
|
377
|
-
end
|
444
|
+
fold_objects_fun(FoldObjectsFun, FilterBucket) ->
|
445
|
+
fun({{Bucket, Key}, Value}, Acc) when Bucket == FilterBucket->
|
446
|
+
FoldObjectsFun(Bucket, Key, Value, Acc);
|
447
|
+
(_, Acc) ->
|
448
|
+
Acc
|
378
449
|
end.
|
379
450
|
|
380
451
|
%% @private
|
@@ -384,29 +455,90 @@ get_folder(FoldFun, Acc, DataRef) ->
|
|
384
455
|
end.
|
385
456
|
|
386
457
|
%% @private
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
458
|
+
get_index_folder(Folder, Acc0, {index, Bucket, {eq, <<"$bucket">>, _}}, DataRef, _) ->
|
459
|
+
%% For the special $bucket index, turn it into a fold over the
|
460
|
+
%% data table.
|
461
|
+
fun() ->
|
462
|
+
key_range_folder(Folder, Acc0, DataRef, {Bucket, <<>>}, Bucket)
|
463
|
+
end;
|
464
|
+
get_index_folder(Folder, Acc0, {index, Bucket, {range, <<"$key">>, Min, Max}}, DataRef, _) ->
|
465
|
+
%% For the special range lookup on the $key index, turn it into a
|
466
|
+
%% fold on the data table
|
467
|
+
fun() ->
|
468
|
+
key_range_folder(Folder, Acc0, DataRef, {Bucket, Min}, {Bucket, Min, Max})
|
469
|
+
end;
|
470
|
+
get_index_folder(Folder, Acc0, {index, Bucket, {eq, Field, Term}}, _, IndexRef) ->
|
471
|
+
fun() ->
|
472
|
+
index_range_folder(Folder, Acc0, IndexRef, {Bucket, Field, Term, undefined}, {Bucket, Field, Term, Term})
|
473
|
+
end;
|
474
|
+
get_index_folder(Folder, Acc0, {index, Bucket, {range, Field, Min, Max}}, _, IndexRef) ->
|
475
|
+
fun() ->
|
476
|
+
index_range_folder(Folder, Acc0, IndexRef, {Bucket, Field, Min, undefined}, {Bucket, Field, Min, Max})
|
477
|
+
end.
|
478
|
+
|
391
479
|
|
480
|
+
%% Iterates over a range of keys, for the special $key and $bucket
|
481
|
+
%% indexes.
|
392
482
|
%% @private
|
393
|
-
|
394
|
-
|
483
|
+
-spec key_range_folder(function(), term(), ets:tid(), {riak_object:bucket(), riak_object:key()}, binary() | {riak_object:bucket(), term(), term()}) -> term().
|
484
|
+
key_range_folder(Folder, Acc0, DataRef, {B,_}=DataKey, B) ->
|
485
|
+
case ets:lookup(DataRef, DataKey) of
|
486
|
+
[] ->
|
487
|
+
key_range_folder(Folder, Acc0, DataRef, ets:next(DataRef, DataKey), B);
|
488
|
+
[Object] ->
|
489
|
+
Acc = Folder(Object, Acc0),
|
490
|
+
key_range_folder(Folder, Acc, DataRef, ets:next(DataRef, DataKey), B)
|
491
|
+
end;
|
492
|
+
key_range_folder(Folder, Acc0, DataRef, {B,K}=DataKey, {B, Min, Max}=Query) when K >= Min, K =< Max ->
|
493
|
+
case ets:lookup(DataRef, DataKey) of
|
494
|
+
[] ->
|
495
|
+
key_range_folder(Folder, Acc0, DataRef, ets:next(DataRef, DataKey), Query);
|
496
|
+
[Object] ->
|
497
|
+
Acc = Folder(Object, Acc0),
|
498
|
+
key_range_folder(Folder, Acc, DataRef, ets:next(DataRef, DataKey), Query)
|
499
|
+
end;
|
500
|
+
key_range_folder(_Folder, Acc, _DataRef, _DataKey, _Query) ->
|
501
|
+
Acc.
|
502
|
+
|
503
|
+
%% Iterates over a range of index postings
|
504
|
+
index_range_folder(Folder, Acc0, IndexRef, {B, I, V, _K}=IndexKey, {B, I, Min, Max}=Query) when V >= Min, V =< Max ->
|
505
|
+
case ets:lookup(IndexRef, IndexKey) of
|
506
|
+
[] ->
|
507
|
+
%% This will happen on the first iteration, where the key
|
508
|
+
%% does not exist. In all other cases, ETS will give us a
|
509
|
+
%% real key from next/2.
|
510
|
+
index_range_folder(Folder, Acc0, IndexRef, ets:next(IndexRef, IndexKey), Query);
|
511
|
+
[Posting] ->
|
512
|
+
Acc = Folder(Posting, Acc0),
|
513
|
+
index_range_folder(Folder, Acc, IndexRef, ets:next(IndexRef, IndexKey), Query)
|
514
|
+
end;
|
515
|
+
index_range_folder(_Folder, Acc, _IndexRef, _IndexKey, _Query) ->
|
516
|
+
Acc.
|
517
|
+
|
395
518
|
|
396
519
|
%% @private
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
Value
|
403
|
-
end.
|
520
|
+
do_put(Bucket, Key, Val, IndexSpecs, DataRef, IndexRef) ->
|
521
|
+
Object = {{Bucket, Key}, Val},
|
522
|
+
true = ets:insert(DataRef, Object),
|
523
|
+
update_indexes(Bucket, Key, IndexSpecs, IndexRef),
|
524
|
+
{ok, object_size(Object)}.
|
404
525
|
|
405
526
|
%% Check if this timestamp is past the ttl setting.
|
406
527
|
exceeds_ttl(Timestamp, TTL) ->
|
407
528
|
Diff = (timer:now_diff(now(), Timestamp) / 1000 / 1000),
|
408
529
|
Diff > TTL.
|
409
530
|
|
531
|
+
update_indexes(_Bucket, _Key, undefined, _IndexRef) ->
|
532
|
+
ok;
|
533
|
+
update_indexes(_Bucket, _Key, [], _IndexRef) ->
|
534
|
+
ok;
|
535
|
+
update_indexes(Bucket, Key, [{remove, Field, Value}|Rest], IndexRef) ->
|
536
|
+
true = ets:delete(IndexRef, {Bucket, Field, Value, Key}),
|
537
|
+
update_indexes(Bucket, Key, Rest, IndexRef);
|
538
|
+
update_indexes(Bucket, Key, [{add, Field, Value}|Rest], IndexRef) ->
|
539
|
+
true = ets:insert(IndexRef, {{Bucket, Field, Value, Key}, <<>>}),
|
540
|
+
update_indexes(Bucket, Key, Rest, IndexRef).
|
541
|
+
|
410
542
|
%% @private
|
411
543
|
time_entry(Bucket, Key, Now, TimeRef) ->
|
412
544
|
ets:insert(TimeRef, {Now, {Bucket, Key}}).
|
@@ -414,20 +546,21 @@ time_entry(Bucket, Key, Now, TimeRef) ->
|
|
414
546
|
%% @private
|
415
547
|
%% @doc Dump some entries if the max memory size has
|
416
548
|
%% been breached.
|
417
|
-
trim_data_table(MaxMemory, UsedMemory, _, _, Freed) when
|
549
|
+
trim_data_table(MaxMemory, UsedMemory, _, _, _, Freed) when
|
418
550
|
(UsedMemory - Freed) =< MaxMemory ->
|
419
551
|
Freed;
|
420
|
-
trim_data_table(MaxMemory, UsedMemory, DataRef, TimeRef, Freed) ->
|
552
|
+
trim_data_table(MaxMemory, UsedMemory, DataRef, TimeRef, IndexRef, Freed) ->
|
421
553
|
%% Delete the oldest object
|
422
|
-
OldestSize = delete_oldest(DataRef, TimeRef),
|
554
|
+
OldestSize = delete_oldest(DataRef, TimeRef, IndexRef),
|
423
555
|
trim_data_table(MaxMemory,
|
424
556
|
UsedMemory,
|
425
557
|
DataRef,
|
426
558
|
TimeRef,
|
559
|
+
IndexRef,
|
427
560
|
Freed + OldestSize).
|
428
561
|
|
429
562
|
%% @private
|
430
|
-
delete_oldest(DataRef, TimeRef) ->
|
563
|
+
delete_oldest(DataRef, TimeRef, IndexRef) ->
|
431
564
|
OldestTime = ets:first(TimeRef),
|
432
565
|
case OldestTime of
|
433
566
|
'$end_of_table' ->
|
@@ -437,8 +570,10 @@ delete_oldest(DataRef, TimeRef) ->
|
|
437
570
|
ets:delete(TimeRef, OldestTime),
|
438
571
|
case ets:lookup(DataRef, OldestKey) of
|
439
572
|
[] ->
|
440
|
-
delete_oldest(DataRef, TimeRef);
|
573
|
+
delete_oldest(DataRef, TimeRef, IndexRef);
|
441
574
|
[Object] ->
|
575
|
+
{Bucket, Key} = OldestKey,
|
576
|
+
ets:match_delete(IndexRef, ?DELETE_PTN(Bucket, Key)),
|
442
577
|
ets:delete(DataRef, OldestKey),
|
443
578
|
object_size(Object)
|
444
579
|
end
|
@@ -522,8 +657,8 @@ eqc_test_() ->
|
|
522
657
|
[
|
523
658
|
{timeout, 60000,
|
524
659
|
[?_assertEqual(true,
|
525
|
-
|
526
|
-
|
660
|
+
backend_eqc:test(?MODULE, true))]}
|
661
|
+
]}]}]}.
|
527
662
|
|
528
663
|
setup() ->
|
529
664
|
application:load(sasl),
|