better-riak-client 1.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE +16 -0
- data/README.markdown +198 -0
- data/RELEASE_NOTES.md +211 -0
- data/better-riak-client.gemspec +61 -0
- data/erl_src/riak_kv_test014_backend.beam +0 -0
- data/erl_src/riak_kv_test014_backend.erl +189 -0
- data/erl_src/riak_kv_test_backend.beam +0 -0
- data/erl_src/riak_kv_test_backend.erl +697 -0
- data/erl_src/riak_search_test_backend.beam +0 -0
- data/erl_src/riak_search_test_backend.erl +175 -0
- data/lib/riak/bucket.rb +221 -0
- data/lib/riak/client/beefcake/messages.rb +213 -0
- data/lib/riak/client/beefcake/object_methods.rb +111 -0
- data/lib/riak/client/beefcake_protobuffs_backend.rb +226 -0
- data/lib/riak/client/decaying.rb +36 -0
- data/lib/riak/client/excon_backend.rb +162 -0
- data/lib/riak/client/feature_detection.rb +88 -0
- data/lib/riak/client/http_backend/configuration.rb +211 -0
- data/lib/riak/client/http_backend/key_streamer.rb +43 -0
- data/lib/riak/client/http_backend/object_methods.rb +106 -0
- data/lib/riak/client/http_backend/request_headers.rb +34 -0
- data/lib/riak/client/http_backend/transport_methods.rb +201 -0
- data/lib/riak/client/http_backend.rb +340 -0
- data/lib/riak/client/net_http_backend.rb +82 -0
- data/lib/riak/client/node.rb +115 -0
- data/lib/riak/client/protobuffs_backend.rb +173 -0
- data/lib/riak/client/search.rb +91 -0
- data/lib/riak/client.rb +540 -0
- data/lib/riak/cluster.rb +151 -0
- data/lib/riak/core_ext/blank.rb +53 -0
- data/lib/riak/core_ext/deep_dup.rb +13 -0
- data/lib/riak/core_ext/extract_options.rb +7 -0
- data/lib/riak/core_ext/json.rb +15 -0
- data/lib/riak/core_ext/slice.rb +18 -0
- data/lib/riak/core_ext/stringify_keys.rb +10 -0
- data/lib/riak/core_ext/symbolize_keys.rb +10 -0
- data/lib/riak/core_ext/to_param.rb +31 -0
- data/lib/riak/core_ext.rb +7 -0
- data/lib/riak/encoding.rb +6 -0
- data/lib/riak/failed_request.rb +81 -0
- data/lib/riak/i18n.rb +5 -0
- data/lib/riak/json.rb +52 -0
- data/lib/riak/link.rb +94 -0
- data/lib/riak/locale/en.yml +53 -0
- data/lib/riak/locale/fr.yml +52 -0
- data/lib/riak/map_reduce/filter_builder.rb +103 -0
- data/lib/riak/map_reduce/phase.rb +98 -0
- data/lib/riak/map_reduce.rb +225 -0
- data/lib/riak/map_reduce_error.rb +7 -0
- data/lib/riak/node/configuration.rb +293 -0
- data/lib/riak/node/console.rb +133 -0
- data/lib/riak/node/control.rb +207 -0
- data/lib/riak/node/defaults.rb +83 -0
- data/lib/riak/node/generation.rb +106 -0
- data/lib/riak/node/log.rb +34 -0
- data/lib/riak/node/version.rb +43 -0
- data/lib/riak/node.rb +38 -0
- data/lib/riak/robject.rb +318 -0
- data/lib/riak/search.rb +3 -0
- data/lib/riak/serializers.rb +74 -0
- data/lib/riak/stamp.rb +77 -0
- data/lib/riak/test_server.rb +89 -0
- data/lib/riak/util/escape.rb +76 -0
- data/lib/riak/util/headers.rb +53 -0
- data/lib/riak/util/multipart/stream_parser.rb +62 -0
- data/lib/riak/util/multipart.rb +52 -0
- data/lib/riak/util/tcp_socket_extensions.rb +58 -0
- data/lib/riak/util/translation.rb +19 -0
- data/lib/riak/version.rb +3 -0
- data/lib/riak/walk_spec.rb +105 -0
- data/lib/riak.rb +21 -0
- metadata +348 -0
Binary file
|
@@ -0,0 +1,189 @@
|
|
1
|
+
%% -------------------------------------------------------------------
|
2
|
+
%%
|
3
|
+
%% riak_kv_test_backend: storage engine based on ETS tables
|
4
|
+
%%
|
5
|
+
%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved.
|
6
|
+
%%
|
7
|
+
%% This file is provided to you under the Apache License,
|
8
|
+
%% Version 2.0 (the "License"); you may not use this file
|
9
|
+
%% except in compliance with the License. You may obtain
|
10
|
+
%% a copy of the License at
|
11
|
+
%%
|
12
|
+
%% http://www.apache.org/licenses/LICENSE-2.0
|
13
|
+
%%
|
14
|
+
%% Unless required by applicable law or agreed to in writing,
|
15
|
+
%% software distributed under the License is distributed on an
|
16
|
+
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
17
|
+
%% KIND, either express or implied. See the License for the
|
18
|
+
%% specific language governing permissions and limitations
|
19
|
+
%% under the License.
|
20
|
+
%%
|
21
|
+
%% -------------------------------------------------------------------
|
22
|
+
|
23
|
+
% @doc riak_kv_test_backend is a Riak storage backend using ets that
|
24
|
+
% exposes a reset function for efficiently clearing stored data.
|
25
|
+
|
26
|
+
-module(riak_kv_test014_backend).
|
27
|
+
-behavior(riak_kv_backend).
|
28
|
+
-behavior(gen_server).
|
29
|
+
-ifdef(TEST).
|
30
|
+
-include_lib("eunit/include/eunit.hrl").
|
31
|
+
-endif.
|
32
|
+
-export([start/2,stop/1,get/2,put/3,list/1,list_bucket/2,delete/2,
|
33
|
+
is_empty/1, drop/1, fold/3, callback/3, reset/0]).
|
34
|
+
|
35
|
+
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
|
36
|
+
terminate/2, code_change/3]).
|
37
|
+
|
38
|
+
|
39
|
+
% @type state() = term().
|
40
|
+
-record(state, {t, p}).
|
41
|
+
|
42
|
+
% @spec start(Partition :: integer(), Config :: proplist()) ->
|
43
|
+
% {ok, state()} | {{error, Reason :: term()}, state()}
|
44
|
+
start(Partition, _Config) ->
|
45
|
+
gen_server:start_link(?MODULE, [Partition], []).
|
46
|
+
|
47
|
+
% @spec reset() -> ok | {error, timeout}
|
48
|
+
reset() ->
|
49
|
+
Pids = lists:foldl(fun(Item, Acc) ->
|
50
|
+
case lists:prefix("test_backend", atom_to_list(Item)) of
|
51
|
+
true -> [whereis(Item)|Acc];
|
52
|
+
_ -> Acc
|
53
|
+
end
|
54
|
+
end, [], registered()),
|
55
|
+
[gen_server:cast(Pid,{reset, self()})|| Pid <- Pids],
|
56
|
+
receive_reset(Pids).
|
57
|
+
|
58
|
+
receive_reset([]) -> ok;
|
59
|
+
receive_reset(Pids) ->
|
60
|
+
receive
|
61
|
+
{reset, Pid} ->
|
62
|
+
receive_reset(lists:delete(Pid, Pids))
|
63
|
+
after 1000 ->
|
64
|
+
{error, timeout}
|
65
|
+
end.
|
66
|
+
|
67
|
+
%% @private
|
68
|
+
init([Partition]) ->
|
69
|
+
PName = list_to_atom("test_backend" ++ integer_to_list(Partition)),
|
70
|
+
P = list_to_atom(integer_to_list(Partition)),
|
71
|
+
register(PName, self()),
|
72
|
+
{ok, #state{t=ets:new(P,[]), p=P}}.
|
73
|
+
|
74
|
+
%% @private
|
75
|
+
handle_cast({reset,From}, State) ->
|
76
|
+
ets:delete_all_objects(State#state.t),
|
77
|
+
From ! {reset, self()},
|
78
|
+
{noreply, State};
|
79
|
+
handle_cast(_, State) -> {noreply, State}.
|
80
|
+
|
81
|
+
%% @private
|
82
|
+
handle_call(stop,_From,State) -> {reply, srv_stop(State), State};
|
83
|
+
handle_call({get,BKey},_From,State) -> {reply, srv_get(State,BKey), State};
|
84
|
+
handle_call({put,BKey,Val},_From,State) ->
|
85
|
+
{reply, srv_put(State,BKey,Val),State};
|
86
|
+
handle_call({delete,BKey},_From,State) -> {reply, srv_delete(State,BKey),State};
|
87
|
+
handle_call(list,_From,State) -> {reply, srv_list(State), State};
|
88
|
+
handle_call({list_bucket,Bucket},_From,State) ->
|
89
|
+
{reply, srv_list_bucket(State, Bucket), State};
|
90
|
+
handle_call(is_empty, _From, State) ->
|
91
|
+
{reply, ets:info(State#state.t, size) =:= 0, State};
|
92
|
+
handle_call(drop, _From, State) ->
|
93
|
+
ets:delete(State#state.t),
|
94
|
+
{reply, ok, State};
|
95
|
+
handle_call({fold, Fun0, Acc}, _From, State) ->
|
96
|
+
Fun = fun({{B,K}, V}, AccIn) -> Fun0({B,K}, V, AccIn) end,
|
97
|
+
Reply = ets:foldl(Fun, Acc, State#state.t),
|
98
|
+
{reply, Reply, State}.
|
99
|
+
|
100
|
+
% @spec stop(state()) -> ok | {error, Reason :: term()}
|
101
|
+
stop(SrvRef) -> gen_server:call(SrvRef,stop).
|
102
|
+
srv_stop(State) ->
|
103
|
+
true = ets:delete(State#state.t),
|
104
|
+
ok.
|
105
|
+
|
106
|
+
% get(state(), riak_object:bkey()) ->
|
107
|
+
% {ok, Val :: binary()} | {error, Reason :: term()}
|
108
|
+
% key must be 160b
|
109
|
+
get(SrvRef, BKey) -> gen_server:call(SrvRef,{get,BKey}).
|
110
|
+
srv_get(State, BKey) ->
|
111
|
+
case ets:lookup(State#state.t,BKey) of
|
112
|
+
[] -> {error, notfound};
|
113
|
+
[{BKey,Val}] -> {ok, Val};
|
114
|
+
Err -> {error, Err}
|
115
|
+
end.
|
116
|
+
|
117
|
+
% put(state(), riak_object:bkey(), Val :: binary()) ->
|
118
|
+
% ok | {error, Reason :: term()}
|
119
|
+
% key must be 160b
|
120
|
+
put(SrvRef, BKey, Val) -> gen_server:call(SrvRef,{put,BKey,Val}).
|
121
|
+
srv_put(State,BKey,Val) ->
|
122
|
+
true = ets:insert(State#state.t, {BKey,Val}),
|
123
|
+
ok.
|
124
|
+
|
125
|
+
% delete(state(), riak_object:bkey()) ->
|
126
|
+
% ok | {error, Reason :: term()}
|
127
|
+
% key must be 160b
|
128
|
+
delete(SrvRef, BKey) -> gen_server:call(SrvRef,{delete,BKey}).
|
129
|
+
srv_delete(State, BKey) ->
|
130
|
+
true = ets:delete(State#state.t, BKey),
|
131
|
+
ok.
|
132
|
+
|
133
|
+
% list(state()) -> [riak_object:bkey()]
|
134
|
+
list(SrvRef) -> gen_server:call(SrvRef,list).
|
135
|
+
srv_list(State) ->
|
136
|
+
MList = ets:match(State#state.t,{'$1','_'}),
|
137
|
+
list(MList,[]).
|
138
|
+
list([],Acc) -> Acc;
|
139
|
+
list([[K]|Rest],Acc) -> list(Rest,[K|Acc]).
|
140
|
+
|
141
|
+
% list_bucket(term(), Bucket :: riak_object:bucket()) -> [Key :: binary()]
|
142
|
+
list_bucket(SrvRef, Bucket) ->
|
143
|
+
gen_server:call(SrvRef,{list_bucket, Bucket}).
|
144
|
+
srv_list_bucket(State, {filter, Bucket, Fun}) ->
|
145
|
+
MList = lists:filter(Fun, ets:match(State#state.t,{{Bucket,'$1'},'_'})),
|
146
|
+
list(MList,[]);
|
147
|
+
srv_list_bucket(State, Bucket) ->
|
148
|
+
case Bucket of
|
149
|
+
'_' -> MatchSpec = {{'$1','_'},'_'};
|
150
|
+
_ -> MatchSpec = {{Bucket,'$1'},'_'}
|
151
|
+
end,
|
152
|
+
MList = ets:match(State#state.t,MatchSpec),
|
153
|
+
list(MList,[]).
|
154
|
+
|
155
|
+
is_empty(SrvRef) -> gen_server:call(SrvRef, is_empty).
|
156
|
+
|
157
|
+
drop(SrvRef) -> gen_server:call(SrvRef, drop).
|
158
|
+
|
159
|
+
fold(SrvRef, Fun, Acc0) -> gen_server:call(SrvRef, {fold, Fun, Acc0}, infinity).
|
160
|
+
|
161
|
+
%% Ignore callbacks for other backends so multi backend works
|
162
|
+
callback(_State, _Ref, _Msg) ->
|
163
|
+
ok.
|
164
|
+
|
165
|
+
%% @private
|
166
|
+
handle_info(_Msg, State) -> {noreply, State}.
|
167
|
+
|
168
|
+
%% @private
|
169
|
+
terminate(_Reason, _State) -> ok.
|
170
|
+
|
171
|
+
%% @private
|
172
|
+
code_change(_OldVsn, State, _Extra) -> {ok, State}.
|
173
|
+
|
174
|
+
%%
|
175
|
+
%% Test
|
176
|
+
%%
|
177
|
+
-ifdef(TEST).
|
178
|
+
|
179
|
+
% @private
|
180
|
+
simple_test() ->
|
181
|
+
riak_kv_backend:standard_test(?MODULE, []).
|
182
|
+
|
183
|
+
-ifdef(EQC).
|
184
|
+
%% @private
|
185
|
+
eqc_test() ->
|
186
|
+
?assertEqual(true, backend_eqc:test(?MODULE, true)).
|
187
|
+
|
188
|
+
-endif. % EQC
|
189
|
+
-endif. % TEST
|
Binary file
|
@@ -0,0 +1,697 @@
|
|
1
|
+
%% -------------------------------------------------------------------
|
2
|
+
%%
|
3
|
+
%% riak_kv_test_backend: storage engine using ETS tables, for use in testing.
|
4
|
+
%%
|
5
|
+
%% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved.
|
6
|
+
%%
|
7
|
+
%% This file is provided to you under the Apache License,
|
8
|
+
%% Version 2.0 (the "License"); you may not use this file
|
9
|
+
%% except in compliance with the License. You may obtain
|
10
|
+
%% a copy of the License at
|
11
|
+
%%
|
12
|
+
%% http://www.apache.org/licenses/LICENSE-2.0
|
13
|
+
%%
|
14
|
+
%% Unless required by applicable law or agreed to in writing,
|
15
|
+
%% software distributed under the License is distributed on an
|
16
|
+
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
17
|
+
%% KIND, either express or implied. See the License for the
|
18
|
+
%% specific language governing permissions and limitations
|
19
|
+
%% under the License.
|
20
|
+
%%
|
21
|
+
%% -------------------------------------------------------------------
|
22
|
+
|
23
|
+
%% @doc riak_kv_memory_backend is a Riak storage backend that uses ets
|
24
|
+
%% tables to store all data in memory.
|
25
|
+
%%
|
26
|
+
%% === Configuration Options ===
|
27
|
+
%%
|
28
|
+
%% The following configuration options are available for the memory backend.
|
29
|
+
%% The options should be specified in the `memory_backend' section of your
|
30
|
+
%% app.config file.
|
31
|
+
%%
|
32
|
+
%% <ul>
|
33
|
+
%% <li>`ttl' - The time in seconds that an object should live before being expired.</li>
|
34
|
+
%% <li>`max_memory' - The amount of memory in megabytes to limit the backend to.</li>
|
35
|
+
%% <li>`test' - When `true', exposes the internal ETS tables so that they can be efficiently cleared using {@link reset/3}.</li>
|
36
|
+
%% </ul>
|
37
|
+
%%
|
38
|
+
|
39
|
+
-module(riak_kv_test_backend).
|
40
|
+
-behavior(riak_kv_backend).
|
41
|
+
|
42
|
+
%% KV Backend API
|
43
|
+
-export([api_version/0,
|
44
|
+
capabilities/1,
|
45
|
+
capabilities/2,
|
46
|
+
start/2,
|
47
|
+
stop/1,
|
48
|
+
get/3,
|
49
|
+
put/5,
|
50
|
+
delete/4,
|
51
|
+
drop/1,
|
52
|
+
fold_buckets/4,
|
53
|
+
fold_keys/4,
|
54
|
+
fold_objects/4,
|
55
|
+
is_empty/1,
|
56
|
+
status/1,
|
57
|
+
callback/3]).
|
58
|
+
|
59
|
+
%% "Testing" backend API
|
60
|
+
-export([reset/0]).
|
61
|
+
|
62
|
+
-ifdef(TEST).
|
63
|
+
-include_lib("eunit/include/eunit.hrl").
|
64
|
+
-endif.
|
65
|
+
|
66
|
+
-define(API_VERSION, 1).
|
67
|
+
-define(CAPABILITIES, [async_fold, indexes]).
|
68
|
+
|
69
|
+
%% Macros for working with indexes
|
70
|
+
-define(DELETE_PTN(B,K), {{B,'_','_',K},'_'}).
|
71
|
+
|
72
|
+
%% ETS table name macros so we can break encapsulation for testing
|
73
|
+
%% mode
|
74
|
+
-define(DNAME(P), list_to_atom("riak_kv_"++integer_to_list(P))).
|
75
|
+
-define(INAME(P), list_to_atom("riak_kv_"++integer_to_list(P)++"_i")).
|
76
|
+
-define(TNAME(P), list_to_atom("riak_kv_"++integer_to_list(P)++"_t")).
|
77
|
+
|
78
|
+
-record(state, {data_ref :: ets:tid(),
|
79
|
+
index_ref :: ets:tid(),
|
80
|
+
time_ref :: ets:tid(),
|
81
|
+
max_memory :: undefined | integer(),
|
82
|
+
used_memory=0 :: integer(),
|
83
|
+
ttl :: integer()}).
|
84
|
+
|
85
|
+
-type state() :: #state{}.
|
86
|
+
-type config() :: [].
|
87
|
+
|
88
|
+
%% ===================================================================
|
89
|
+
%% Public API
|
90
|
+
%% ===================================================================
|
91
|
+
|
92
|
+
%% KV Backend API
|
93
|
+
|
94
|
+
%% @doc Return the major version of the
|
95
|
+
%% current API.
|
96
|
+
-spec api_version() -> {ok, integer()}.
|
97
|
+
api_version() ->
|
98
|
+
case lists:member({capabilities, 1}, riak_kv_backend:behaviour_info(callbacks)) of
|
99
|
+
true -> % Using 1.1 API or later
|
100
|
+
{ok, ?API_VERSION};
|
101
|
+
_ -> % Using 1.0 API
|
102
|
+
{?API_VERSION, ?CAPABILITIES}
|
103
|
+
end.
|
104
|
+
|
105
|
+
%% @doc Return the capabilities of the backend.
|
106
|
+
-spec capabilities(state()) -> {ok, [atom()]}.
|
107
|
+
capabilities(_) ->
|
108
|
+
{ok, ?CAPABILITIES}.
|
109
|
+
|
110
|
+
%% @doc Return the capabilities of the backend.
|
111
|
+
-spec capabilities(riak_object:bucket(), state()) -> {ok, [atom()]}.
|
112
|
+
capabilities(_, _) ->
|
113
|
+
{ok, ?CAPABILITIES}.
|
114
|
+
|
115
|
+
%% @doc Start the memory backend
|
116
|
+
-spec start(integer(), config()) -> {ok, state()}.
|
117
|
+
%% Bug in riak_kv_vnode in 1.0
|
118
|
+
start(Partition, [{async_folds,_}=AFolds, Rest]) when is_list(Rest) ->
|
119
|
+
start(Partition, [AFolds|Rest]);
|
120
|
+
start(Partition, Config) ->
|
121
|
+
TTL = get_prop_or_env(ttl, Config, memory_backend),
|
122
|
+
MemoryMB = get_prop_or_env(max_memory, Config, memory_backend),
|
123
|
+
TableOpts = case get_prop_or_env(test, Config, memory_backend) of
|
124
|
+
true ->
|
125
|
+
[ordered_set, public, named_table];
|
126
|
+
_ ->
|
127
|
+
[ordered_set]
|
128
|
+
end,
|
129
|
+
case MemoryMB of
|
130
|
+
undefined ->
|
131
|
+
MaxMemory = undefined,
|
132
|
+
TimeRef = undefined;
|
133
|
+
_ ->
|
134
|
+
MaxMemory = MemoryMB * 1024 * 1024,
|
135
|
+
TimeRef = ets:new(?TNAME(Partition), TableOpts)
|
136
|
+
end,
|
137
|
+
IndexRef = ets:new(?INAME(Partition), TableOpts),
|
138
|
+
DataRef = ets:new(?DNAME(Partition), TableOpts),
|
139
|
+
{ok, #state{data_ref=DataRef,
|
140
|
+
index_ref=IndexRef,
|
141
|
+
max_memory=MaxMemory,
|
142
|
+
time_ref=TimeRef,
|
143
|
+
ttl=TTL}}.
|
144
|
+
|
145
|
+
%% @doc Stop the memory backend
|
146
|
+
-spec stop(state()) -> ok.
|
147
|
+
stop(#state{data_ref=DataRef,
|
148
|
+
index_ref=IndexRef,
|
149
|
+
max_memory=MaxMemory,
|
150
|
+
time_ref=TimeRef}) ->
|
151
|
+
catch ets:delete(DataRef),
|
152
|
+
catch ets:delete(IndexRef),
|
153
|
+
case MaxMemory of
|
154
|
+
undefined ->
|
155
|
+
ok;
|
156
|
+
_ ->
|
157
|
+
catch ets:delete(TimeRef)
|
158
|
+
end,
|
159
|
+
ok.
|
160
|
+
|
161
|
+
%% @doc Retrieve an object from the memory backend
|
162
|
+
-spec get(riak_object:bucket(), riak_object:key(), state()) ->
|
163
|
+
{ok, any(), state()} |
|
164
|
+
{ok, not_found, state()} |
|
165
|
+
{error, term(), state()}.
|
166
|
+
get(Bucket, Key, State=#state{data_ref=DataRef,
|
167
|
+
index_ref=IndexRef,
|
168
|
+
used_memory=UsedMemory,
|
169
|
+
max_memory=MaxMemory,
|
170
|
+
ttl=TTL}) ->
|
171
|
+
case ets:lookup(DataRef, {Bucket, Key}) of
|
172
|
+
[] -> {error, not_found, State};
|
173
|
+
[{{Bucket, Key}, {{ts, Timestamp}, Val}}=Object] ->
|
174
|
+
case exceeds_ttl(Timestamp, TTL) of
|
175
|
+
true ->
|
176
|
+
%% Because we do not have the IndexSpecs, we must
|
177
|
+
%% delete the object directly and all index
|
178
|
+
%% entries blindly using match_delete.
|
179
|
+
ets:delete(DataRef, {Bucket, Key}),
|
180
|
+
ets:match_delete(IndexRef, ?DELETE_PTN(Bucket, Key)),
|
181
|
+
case MaxMemory of
|
182
|
+
undefined ->
|
183
|
+
UsedMemory1 = UsedMemory;
|
184
|
+
_ ->
|
185
|
+
UsedMemory1 = UsedMemory - object_size(Object)
|
186
|
+
end,
|
187
|
+
{error, not_found, State#state{used_memory=UsedMemory1}};
|
188
|
+
false ->
|
189
|
+
{ok, Val, State}
|
190
|
+
end;
|
191
|
+
[{{Bucket, Key}, Val}] ->
|
192
|
+
{ok, Val, State};
|
193
|
+
Error ->
|
194
|
+
{error, Error, State}
|
195
|
+
end.
|
196
|
+
|
197
|
+
%% @doc Insert an object into the memory backend.
|
198
|
+
-type index_spec() :: {add, Index, SecondaryKey} | {remove, Index, SecondaryKey}.
|
199
|
+
-spec put(riak_object:bucket(), riak_object:key(), [index_spec()], binary(), state()) ->
|
200
|
+
{ok, state()}.
|
201
|
+
put(Bucket, PrimaryKey, IndexSpecs, Val, State=#state{data_ref=DataRef,
|
202
|
+
index_ref=IndexRef,
|
203
|
+
max_memory=MaxMemory,
|
204
|
+
time_ref=TimeRef,
|
205
|
+
ttl=TTL,
|
206
|
+
used_memory=UsedMemory}) ->
|
207
|
+
Now = now(),
|
208
|
+
case TTL of
|
209
|
+
undefined ->
|
210
|
+
Val1 = Val;
|
211
|
+
_ ->
|
212
|
+
Val1 = {{ts, Now}, Val}
|
213
|
+
end,
|
214
|
+
{ok, Size} = do_put(Bucket, PrimaryKey, Val1, IndexSpecs, DataRef, IndexRef),
|
215
|
+
case MaxMemory of
|
216
|
+
undefined ->
|
217
|
+
UsedMemory1 = UsedMemory;
|
218
|
+
_ ->
|
219
|
+
time_entry(Bucket, PrimaryKey, Now, TimeRef),
|
220
|
+
Freed = trim_data_table(MaxMemory,
|
221
|
+
UsedMemory + Size,
|
222
|
+
DataRef,
|
223
|
+
TimeRef,
|
224
|
+
IndexRef,
|
225
|
+
0),
|
226
|
+
UsedMemory1 = UsedMemory + Size - Freed
|
227
|
+
end,
|
228
|
+
{ok, State#state{used_memory=UsedMemory1}}.
|
229
|
+
|
230
|
+
%% @doc Delete an object from the memory backend
|
231
|
+
-spec delete(riak_object:bucket(), riak_object:key(), [index_spec()], state()) ->
|
232
|
+
{ok, state()}.
|
233
|
+
delete(Bucket, Key, IndexSpecs, State=#state{data_ref=DataRef,
|
234
|
+
index_ref=IndexRef,
|
235
|
+
time_ref=TimeRef,
|
236
|
+
used_memory=UsedMemory}) ->
|
237
|
+
case TimeRef of
|
238
|
+
undefined ->
|
239
|
+
UsedMemory1 = UsedMemory;
|
240
|
+
_ ->
|
241
|
+
%% Lookup the object so we can delete its
|
242
|
+
%% entry from the time table and account
|
243
|
+
%% for the memory used.
|
244
|
+
[Object] = ets:lookup(DataRef, {Bucket, Key}),
|
245
|
+
case Object of
|
246
|
+
{_, {{ts, Timestamp}, _}} ->
|
247
|
+
ets:delete(TimeRef, Timestamp),
|
248
|
+
UsedMemory1 = UsedMemory - object_size(Object);
|
249
|
+
_ ->
|
250
|
+
UsedMemory1 = UsedMemory
|
251
|
+
end
|
252
|
+
end,
|
253
|
+
update_indexes(Bucket, Key, IndexSpecs, IndexRef),
|
254
|
+
ets:delete(DataRef, {Bucket, Key}),
|
255
|
+
{ok, State#state{used_memory=UsedMemory1}}.
|
256
|
+
|
257
|
+
%% @doc Fold over all the buckets.
|
258
|
+
-spec fold_buckets(riak_kv_backend:fold_buckets_fun(),
|
259
|
+
any(),
|
260
|
+
[],
|
261
|
+
state()) -> {ok, any()}.
|
262
|
+
fold_buckets(FoldBucketsFun, Acc, Opts, #state{data_ref=DataRef}) ->
|
263
|
+
FoldFun = fold_buckets_fun(FoldBucketsFun),
|
264
|
+
case lists:member(async_fold, Opts) of
|
265
|
+
true ->
|
266
|
+
BucketFolder =
|
267
|
+
fun() ->
|
268
|
+
{Acc0, _} = ets:foldl(FoldFun, {Acc, sets:new()}, DataRef),
|
269
|
+
Acc0
|
270
|
+
end,
|
271
|
+
{async, BucketFolder};
|
272
|
+
false ->
|
273
|
+
{Acc0, _} = ets:foldl(FoldFun, {Acc, sets:new()}, DataRef),
|
274
|
+
{ok, Acc0}
|
275
|
+
end.
|
276
|
+
|
277
|
+
%% @doc Fold over all the keys for one or all buckets.
|
278
|
+
-spec fold_keys(riak_kv_backend:fold_keys_fun(),
|
279
|
+
any(),
|
280
|
+
[{atom(), term()}],
|
281
|
+
state()) -> {ok, term()} | {async, fun()}.
|
282
|
+
fold_keys(FoldKeysFun, Acc, Opts, #state{data_ref=DataRef,
|
283
|
+
index_ref=IndexRef}) ->
|
284
|
+
|
285
|
+
%% Figure out how we should limit the fold: by bucket, by
|
286
|
+
%% secondary index, or neither (fold across everything.)
|
287
|
+
Bucket = lists:keyfind(bucket, 1, Opts),
|
288
|
+
Index = lists:keyfind(index, 1, Opts),
|
289
|
+
|
290
|
+
%% Multiple limiters may exist. Take the most specific limiter,
|
291
|
+
%% get an appropriate folder function.
|
292
|
+
Folder = if
|
293
|
+
Index /= false ->
|
294
|
+
FoldFun = fold_keys_fun(FoldKeysFun, Index),
|
295
|
+
get_index_folder(FoldFun, Acc, Index, DataRef, IndexRef);
|
296
|
+
Bucket /= false ->
|
297
|
+
FoldFun = fold_keys_fun(FoldKeysFun, Bucket),
|
298
|
+
get_folder(FoldFun, Acc, DataRef);
|
299
|
+
true ->
|
300
|
+
FoldFun = fold_keys_fun(FoldKeysFun, undefined),
|
301
|
+
get_folder(FoldFun, Acc, DataRef)
|
302
|
+
end,
|
303
|
+
|
304
|
+
case lists:member(async_fold, Opts) of
|
305
|
+
true ->
|
306
|
+
{async, Folder};
|
307
|
+
false ->
|
308
|
+
{ok, Folder()}
|
309
|
+
end.
|
310
|
+
|
311
|
+
%% @doc Fold over all the objects for one or all buckets.
|
312
|
+
-spec fold_objects(riak_kv_backend:fold_objects_fun(),
|
313
|
+
any(),
|
314
|
+
[{atom(), term()}],
|
315
|
+
state()) -> {ok, any()} | {async, fun()}.
|
316
|
+
fold_objects(FoldObjectsFun, Acc, Opts, #state{data_ref=DataRef}) ->
|
317
|
+
Bucket = proplists:get_value(bucket, Opts),
|
318
|
+
FoldFun = fold_objects_fun(FoldObjectsFun, Bucket),
|
319
|
+
case lists:member(async_fold, Opts) of
|
320
|
+
true ->
|
321
|
+
{async, get_folder(FoldFun, Acc, DataRef)};
|
322
|
+
false ->
|
323
|
+
Acc0 = ets:foldl(FoldFun, Acc, DataRef),
|
324
|
+
{ok, Acc0}
|
325
|
+
end.
|
326
|
+
|
327
|
+
%% @doc Delete all objects from this memory backend
|
328
|
+
-spec drop(state()) -> {ok, state()}.
|
329
|
+
drop(State=#state{data_ref=DataRef,
|
330
|
+
index_ref=IndexRef,
|
331
|
+
time_ref=TimeRef}) ->
|
332
|
+
ets:delete_all_objects(DataRef),
|
333
|
+
ets:delete_all_objects(IndexRef),
|
334
|
+
case TimeRef of
|
335
|
+
undefined ->
|
336
|
+
ok;
|
337
|
+
_ ->
|
338
|
+
ets:delete_all_objects(TimeRef)
|
339
|
+
end,
|
340
|
+
{ok, State}.
|
341
|
+
|
342
|
+
%% @doc Returns true if this memory backend contains any
|
343
|
+
%% non-tombstone values; otherwise returns false.
|
344
|
+
-spec is_empty(state()) -> boolean().
|
345
|
+
is_empty(#state{data_ref=DataRef}) ->
|
346
|
+
ets:info(DataRef, size) =:= 0.
|
347
|
+
|
348
|
+
%% @doc Get the status information for this memory backend
|
349
|
+
-spec status(state()) -> [{atom(), term()}].
|
350
|
+
status(#state{data_ref=DataRef,
|
351
|
+
index_ref=IndexRef,
|
352
|
+
time_ref=TimeRef}) ->
|
353
|
+
DataStatus = ets:info(DataRef),
|
354
|
+
IndexStatus = ets:info(IndexRef),
|
355
|
+
case TimeRef of
|
356
|
+
undefined ->
|
357
|
+
[{data_table_status, DataStatus},
|
358
|
+
{index_table_status, IndexStatus}];
|
359
|
+
_ ->
|
360
|
+
TimeStatus = ets:info(TimeRef),
|
361
|
+
[{data_table_status, DataStatus},
|
362
|
+
{index_table_status, IndexStatus},
|
363
|
+
{time_table_status, TimeStatus}]
|
364
|
+
end.
|
365
|
+
|
366
|
+
%% @doc Register an asynchronous callback
|
367
|
+
-spec callback(reference(), any(), state()) -> {ok, state()}.
|
368
|
+
callback(_Ref, _Msg, State) ->
|
369
|
+
{ok, State}.
|
370
|
+
|
371
|
+
%% @doc Resets state of all running memory backends on the local
|
372
|
+
%% node. The `riak_kv' environment variable `memory_backend' must
|
373
|
+
%% contain the `test' property, set to `true' for this to work.
|
374
|
+
-spec reset() -> ok | {error, reset_disabled}.
|
375
|
+
reset() ->
|
376
|
+
reset(app_helper:get_env(memory_backend, test, app_helper:get_env(riak_kv, test)), app_helper:get_env(riak_kv, storage_backend)).
|
377
|
+
|
378
|
+
reset(true, ?MODULE) ->
|
379
|
+
{ok, Ring} = riak_core_ring_manager:get_my_ring(),
|
380
|
+
[ begin
|
381
|
+
catch ets:delete_all_objects(?DNAME(I)),
|
382
|
+
catch ets:delete_all_objects(?INAME(I)),
|
383
|
+
catch ets:delete_all_objects(?TNAME(I))
|
384
|
+
end || I <- riak_core_ring:my_indices(Ring) ],
|
385
|
+
ok;
|
386
|
+
reset(_, _) ->
|
387
|
+
{error, reset_disabled}.
|
388
|
+
|
389
|
+
%% ===================================================================
|
390
|
+
%% Internal functions
|
391
|
+
%% ===================================================================
|
392
|
+
|
393
|
+
%% @TODO Some of these implementations may be suboptimal.
|
394
|
+
%% Need to do some measuring and testing to refine the
|
395
|
+
%% implementations.
|
396
|
+
|
397
|
+
%% @private
|
398
|
+
%% Return a function to fold over the buckets on this backend
|
399
|
+
fold_buckets_fun(FoldBucketsFun) ->
|
400
|
+
fun({{Bucket, _}, _}, {Acc, BucketSet}) ->
|
401
|
+
case sets:is_element(Bucket, BucketSet) of
|
402
|
+
true ->
|
403
|
+
{Acc, BucketSet};
|
404
|
+
false ->
|
405
|
+
{FoldBucketsFun(Bucket, Acc),
|
406
|
+
sets:add_element(Bucket, BucketSet)}
|
407
|
+
end
|
408
|
+
end.
|
409
|
+
|
410
|
+
%% @private
|
411
|
+
%% Return a function to fold over keys on this backend
|
412
|
+
fold_keys_fun(FoldKeysFun, undefined) ->
|
413
|
+
fun({{Bucket, Key}, _}, Acc) ->
|
414
|
+
FoldKeysFun(Bucket, Key, Acc);
|
415
|
+
(_, Acc) ->
|
416
|
+
Acc
|
417
|
+
end;
|
418
|
+
fold_keys_fun(FoldKeysFun, {bucket, FilterBucket}) ->
|
419
|
+
fun({{Bucket, Key}, _}, Acc) when Bucket == FilterBucket ->
|
420
|
+
FoldKeysFun(Bucket, Key, Acc);
|
421
|
+
(_, Acc) ->
|
422
|
+
Acc
|
423
|
+
end;
|
424
|
+
fold_keys_fun(FoldKeysFun, {index, FilterBucket, {eq, <<"$bucket">>, _}}) ->
|
425
|
+
%% 2I exact match query on special $bucket field...
|
426
|
+
fold_keys_fun(FoldKeysFun, {bucket, FilterBucket});
|
427
|
+
fold_keys_fun(FoldKeysFun, {index, FilterBucket, {range, <<"$key">>, _, _}}) ->
|
428
|
+
%% 2I range query on special $key field...
|
429
|
+
fold_keys_fun(FoldKeysFun, {bucket, FilterBucket});
|
430
|
+
fold_keys_fun(FoldKeysFun, {index, _FilterBucket, _Query}) ->
|
431
|
+
fun({{Bucket, _FilterField, _FilterTerm, Key}, _}, Acc) ->
|
432
|
+
FoldKeysFun(Bucket, Key, Acc);
|
433
|
+
(_, Acc) ->
|
434
|
+
Acc
|
435
|
+
end.
|
436
|
+
|
437
|
+
|
438
|
+
%% @private
|
439
|
+
%% Return a function to fold over keys on this backend
|
440
|
+
fold_objects_fun(FoldObjectsFun, undefined) ->
|
441
|
+
fun({{Bucket, Key}, Value}, Acc) ->
|
442
|
+
FoldObjectsFun(Bucket, Key, Value, Acc);
|
443
|
+
(_, Acc) ->
|
444
|
+
Acc
|
445
|
+
end;
|
446
|
+
fold_objects_fun(FoldObjectsFun, FilterBucket) ->
|
447
|
+
fun({{Bucket, Key}, Value}, Acc) when Bucket == FilterBucket->
|
448
|
+
FoldObjectsFun(Bucket, Key, Value, Acc);
|
449
|
+
(_, Acc) ->
|
450
|
+
Acc
|
451
|
+
end.
|
452
|
+
|
453
|
+
%% @private
|
454
|
+
get_folder(FoldFun, Acc, DataRef) ->
|
455
|
+
fun() ->
|
456
|
+
ets:foldl(FoldFun, Acc, DataRef)
|
457
|
+
end.
|
458
|
+
|
459
|
+
%% @private
|
460
|
+
get_index_folder(Folder, Acc0, {index, Bucket, {eq, <<"$bucket">>, _}}, DataRef, _) ->
|
461
|
+
%% For the special $bucket index, turn it into a fold over the
|
462
|
+
%% data table.
|
463
|
+
fun() ->
|
464
|
+
key_range_folder(Folder, Acc0, DataRef, {Bucket, <<>>}, Bucket)
|
465
|
+
end;
|
466
|
+
get_index_folder(Folder, Acc0, {index, Bucket, {range, <<"$key">>, Min, Max}}, DataRef, _) ->
|
467
|
+
%% For the special range lookup on the $key index, turn it into a
|
468
|
+
%% fold on the data table
|
469
|
+
fun() ->
|
470
|
+
key_range_folder(Folder, Acc0, DataRef, {Bucket, Min}, {Bucket, Min, Max})
|
471
|
+
end;
|
472
|
+
get_index_folder(Folder, Acc0, {index, Bucket, {eq, Field, Term}}, _, IndexRef) ->
|
473
|
+
fun() ->
|
474
|
+
index_range_folder(Folder, Acc0, IndexRef, {Bucket, Field, Term, undefined}, {Bucket, Field, Term, Term})
|
475
|
+
end;
|
476
|
+
get_index_folder(Folder, Acc0, {index, Bucket, {range, Field, Min, Max}}, _, IndexRef) ->
|
477
|
+
fun() ->
|
478
|
+
index_range_folder(Folder, Acc0, IndexRef, {Bucket, Field, Min, undefined}, {Bucket, Field, Min, Max})
|
479
|
+
end.
|
480
|
+
|
481
|
+
|
482
|
+
%% Iterates over a range of keys, for the special $key and $bucket
|
483
|
+
%% indexes.
|
484
|
+
%% @private
|
485
|
+
-spec key_range_folder(function(), term(), ets:tid(), {riak_object:bucket(), riak_object:key()}, binary() | {riak_object:bucket(), term(), term()}) -> term().
|
486
|
+
key_range_folder(Folder, Acc0, DataRef, {B,_}=DataKey, B) ->
|
487
|
+
case ets:lookup(DataRef, DataKey) of
|
488
|
+
[] ->
|
489
|
+
key_range_folder(Folder, Acc0, DataRef, ets:next(DataRef, DataKey), B);
|
490
|
+
[Object] ->
|
491
|
+
Acc = Folder(Object, Acc0),
|
492
|
+
key_range_folder(Folder, Acc, DataRef, ets:next(DataRef, DataKey), B)
|
493
|
+
end;
|
494
|
+
key_range_folder(Folder, Acc0, DataRef, {B,K}=DataKey, {B, Min, Max}=Query) when K >= Min, K =< Max ->
|
495
|
+
case ets:lookup(DataRef, DataKey) of
|
496
|
+
[] ->
|
497
|
+
key_range_folder(Folder, Acc0, DataRef, ets:next(DataRef, DataKey), Query);
|
498
|
+
[Object] ->
|
499
|
+
Acc = Folder(Object, Acc0),
|
500
|
+
key_range_folder(Folder, Acc, DataRef, ets:next(DataRef, DataKey), Query)
|
501
|
+
end;
|
502
|
+
key_range_folder(_Folder, Acc, _DataRef, _DataKey, _Query) ->
|
503
|
+
Acc.
|
504
|
+
|
505
|
+
%% Iterates over a range of index postings
|
506
|
+
index_range_folder(Folder, Acc0, IndexRef, {B, I, V, _K}=IndexKey, {B, I, Min, Max}=Query) when V >= Min, V =< Max ->
|
507
|
+
case ets:lookup(IndexRef, IndexKey) of
|
508
|
+
[] ->
|
509
|
+
%% This will happen on the first iteration, where the key
|
510
|
+
%% does not exist. In all other cases, ETS will give us a
|
511
|
+
%% real key from next/2.
|
512
|
+
index_range_folder(Folder, Acc0, IndexRef, ets:next(IndexRef, IndexKey), Query);
|
513
|
+
[Posting] ->
|
514
|
+
Acc = Folder(Posting, Acc0),
|
515
|
+
index_range_folder(Folder, Acc, IndexRef, ets:next(IndexRef, IndexKey), Query)
|
516
|
+
end;
|
517
|
+
index_range_folder(_Folder, Acc, _IndexRef, _IndexKey, _Query) ->
|
518
|
+
Acc.
|
519
|
+
|
520
|
+
|
521
|
+
%% @private
|
522
|
+
do_put(Bucket, Key, Val, IndexSpecs, DataRef, IndexRef) ->
|
523
|
+
Object = {{Bucket, Key}, Val},
|
524
|
+
true = ets:insert(DataRef, Object),
|
525
|
+
update_indexes(Bucket, Key, IndexSpecs, IndexRef),
|
526
|
+
{ok, object_size(Object)}.
|
527
|
+
|
528
|
+
%% Check if this timestamp is past the ttl setting.
|
529
|
+
exceeds_ttl(Timestamp, TTL) ->
|
530
|
+
Diff = (timer:now_diff(now(), Timestamp) / 1000 / 1000),
|
531
|
+
Diff > TTL.
|
532
|
+
|
533
|
+
update_indexes(_Bucket, _Key, undefined, _IndexRef) ->
|
534
|
+
ok;
|
535
|
+
update_indexes(_Bucket, _Key, [], _IndexRef) ->
|
536
|
+
ok;
|
537
|
+
update_indexes(Bucket, Key, [{remove, Field, Value}|Rest], IndexRef) ->
|
538
|
+
true = ets:delete(IndexRef, {Bucket, Field, Value, Key}),
|
539
|
+
update_indexes(Bucket, Key, Rest, IndexRef);
|
540
|
+
update_indexes(Bucket, Key, [{add, Field, Value}|Rest], IndexRef) ->
|
541
|
+
true = ets:insert(IndexRef, {{Bucket, Field, Value, Key}, <<>>}),
|
542
|
+
update_indexes(Bucket, Key, Rest, IndexRef).
|
543
|
+
|
544
|
+
%% @private
|
545
|
+
time_entry(Bucket, Key, Now, TimeRef) ->
|
546
|
+
ets:insert(TimeRef, {Now, {Bucket, Key}}).
|
547
|
+
|
548
|
+
%% @private
|
549
|
+
%% @doc Dump some entries if the max memory size has
|
550
|
+
%% been breached.
|
551
|
+
trim_data_table(MaxMemory, UsedMemory, _, _, _, Freed) when
|
552
|
+
(UsedMemory - Freed) =< MaxMemory ->
|
553
|
+
Freed;
|
554
|
+
trim_data_table(MaxMemory, UsedMemory, DataRef, TimeRef, IndexRef, Freed) ->
|
555
|
+
%% Delete the oldest object
|
556
|
+
OldestSize = delete_oldest(DataRef, TimeRef, IndexRef),
|
557
|
+
trim_data_table(MaxMemory,
|
558
|
+
UsedMemory,
|
559
|
+
DataRef,
|
560
|
+
TimeRef,
|
561
|
+
IndexRef,
|
562
|
+
Freed + OldestSize).
|
563
|
+
|
564
|
+
%% @private
|
565
|
+
delete_oldest(DataRef, TimeRef, IndexRef) ->
|
566
|
+
OldestTime = ets:first(TimeRef),
|
567
|
+
case OldestTime of
|
568
|
+
'$end_of_table' ->
|
569
|
+
0;
|
570
|
+
_ ->
|
571
|
+
OldestKey = ets:lookup_element(TimeRef, OldestTime, 2),
|
572
|
+
ets:delete(TimeRef, OldestTime),
|
573
|
+
case ets:lookup(DataRef, OldestKey) of
|
574
|
+
[] ->
|
575
|
+
delete_oldest(DataRef, TimeRef, IndexRef);
|
576
|
+
[Object] ->
|
577
|
+
{Bucket, Key} = OldestKey,
|
578
|
+
ets:match_delete(IndexRef, ?DELETE_PTN(Bucket, Key)),
|
579
|
+
ets:delete(DataRef, OldestKey),
|
580
|
+
object_size(Object)
|
581
|
+
end
|
582
|
+
end.
|
583
|
+
|
584
|
+
%% @private
|
585
|
+
object_size(Object) ->
|
586
|
+
case Object of
|
587
|
+
{{Bucket, Key}, {{ts, _}, Val}} ->
|
588
|
+
ok;
|
589
|
+
{{Bucket, Key}, Val} ->
|
590
|
+
ok
|
591
|
+
end,
|
592
|
+
size(Bucket) + size(Key) + size(Val).
|
593
|
+
|
594
|
+
%% Copied from riak_core 1.2 app_helper module
|
595
|
+
%% @private
|
596
|
+
%% @doc Retrieve value for Key from Properties if it exists, otherwise
|
597
|
+
%% return from the application's env.
|
598
|
+
-spec get_prop_or_env(atom(), [{atom(), term()}], atom()) -> term().
|
599
|
+
get_prop_or_env(Key, Properties, App) ->
|
600
|
+
get_prop_or_env(Key, Properties, App, undefined).
|
601
|
+
|
602
|
+
%% @private
|
603
|
+
%% @doc Return the value for Key in Properties if it exists, otherwise return
|
604
|
+
%% the value from the application's env, or Default.
|
605
|
+
-spec get_prop_or_env(atom(), [{atom(), term()}], atom(), term()) -> term().
|
606
|
+
get_prop_or_env(Key, Properties, App, Default) ->
|
607
|
+
case proplists:get_value(Key, Properties) of
|
608
|
+
undefined ->
|
609
|
+
app_helper:get_env(App, Key, Default);
|
610
|
+
Value ->
|
611
|
+
Value
|
612
|
+
end.
|
613
|
+
|
614
|
+
%% ===================================================================
|
615
|
+
%% EUnit tests
|
616
|
+
%% ===================================================================
|
617
|
+
|
618
|
+
-ifdef(TEST).
|
619
|
+
|
620
|
+
simple_test_() ->
|
621
|
+
riak_kv_backend:standard_test(?MODULE, []).
|
622
|
+
|
623
|
+
ttl_test_() ->
|
624
|
+
Config = [{ttl, 15}],
|
625
|
+
{ok, State} = start(42, Config),
|
626
|
+
|
627
|
+
Bucket = <<"Bucket">>,
|
628
|
+
Key = <<"Key">>,
|
629
|
+
Value = <<"Value">>,
|
630
|
+
|
631
|
+
[
|
632
|
+
%% Put an object
|
633
|
+
?_assertEqual({ok, State}, put(Bucket, Key, [], Value, State)),
|
634
|
+
%% Wait 1 second to access it
|
635
|
+
?_assertEqual(ok, timer:sleep(1000)),
|
636
|
+
?_assertEqual({ok, Value, State}, get(Bucket, Key, State)),
|
637
|
+
%% Wait 3 seconds and access it again
|
638
|
+
?_assertEqual(ok, timer:sleep(3000)),
|
639
|
+
?_assertEqual({ok, Value, State}, get(Bucket, Key, State)),
|
640
|
+
%% Wait 15 seconds and it should expire
|
641
|
+
{timeout, 30000, ?_assertEqual(ok, timer:sleep(15000))},
|
642
|
+
%% This time it should be gone
|
643
|
+
?_assertEqual({error, not_found, State}, get(Bucket, Key, State))
|
644
|
+
].
|
645
|
+
|
646
|
+
%% @private
|
647
|
+
max_memory_test_() ->
|
648
|
+
%% Set max size to 1.5kb
|
649
|
+
Config = [{max_memory, 1.5 * (1 / 1024)}],
|
650
|
+
{ok, State} = start(42, Config),
|
651
|
+
|
652
|
+
Bucket = <<"Bucket">>,
|
653
|
+
Key1 = <<"Key1">>,
|
654
|
+
Value1 = list_to_binary(string:copies("1", 1024)),
|
655
|
+
Key2 = <<"Key2">>,
|
656
|
+
Value2 = list_to_binary(string:copies("2", 1024)),
|
657
|
+
|
658
|
+
%% Write Key1 to the datastore
|
659
|
+
{ok, State1} = put(Bucket, Key1, [], Value1, State),
|
660
|
+
timer:sleep(timer:seconds(1)),
|
661
|
+
%% Write Key2 to the datastore
|
662
|
+
{ok, State2} = put(Bucket, Key2, [], Value2, State1),
|
663
|
+
|
664
|
+
[
|
665
|
+
%% Key1 should be kicked out
|
666
|
+
?_assertEqual({error, not_found, State2}, get(Bucket, Key1, State2)),
|
667
|
+
%% Key2 should still be present
|
668
|
+
?_assertEqual({ok, Value2, State2}, get(Bucket, Key2, State2))
|
669
|
+
].
|
670
|
+
|
671
|
+
-ifdef(EQC).
|
672
|
+
|
673
|
+
eqc_test_() ->
|
674
|
+
{spawn,
|
675
|
+
[{inorder,
|
676
|
+
[{setup,
|
677
|
+
fun setup/0,
|
678
|
+
fun cleanup/1,
|
679
|
+
[
|
680
|
+
{timeout, 60000,
|
681
|
+
[?_assertEqual(true,
|
682
|
+
backend_eqc:test(?MODULE, true))]}
|
683
|
+
]}]}]}.
|
684
|
+
|
685
|
+
setup() ->
|
686
|
+
application:load(sasl),
|
687
|
+
application:set_env(sasl, sasl_error_logger, {file, "riak_kv_memory_backend_eqc_sasl.log"}),
|
688
|
+
error_logger:tty(false),
|
689
|
+
error_logger:logfile({open, "riak_kv_memory_backend_eqc.log"}),
|
690
|
+
ok.
|
691
|
+
|
692
|
+
cleanup(_) ->
|
693
|
+
ok.
|
694
|
+
|
695
|
+
-endif. % EQC
|
696
|
+
|
697
|
+
-endif. % TEST
|